scala.collection.concurrent Scala Examples
The following examples show how to use scala.collection.concurrent.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: KuduService.scala From pulse with Apache License 2.0 | 5 votes |
package io.phdata.pulse.logcollector import java.util.{ ArrayList, Collections } import com.typesafe.scalalogging.LazyLogging import io.phdata.pulse.common.domain.TimeseriesEvent import org.apache.kudu.client.SessionConfiguration.FlushMode import org.apache.kudu.client.{ CreateTableOptions, KuduClient, KuduException, KuduTable } import org.apache.kudu.{ ColumnSchema, Schema, Type } import scala.collection.concurrent object TimeseriesEventColumns { val TIMESTAMP = "ts" val KEY = "key" val TAG = "tag" val VALUE = "value" } private[logcollector] def getOrCreateTable(tableName: String): KuduTable = KerberosContext.runPrivileged { if (tableCache.contains(tableName)) { tableCache(tableName) } else if (!client.tableExists(tableName)) { logger.info(s"Kudu table not found: $tableName") val columns = new ArrayList[ColumnSchema] columns.add( new ColumnSchema.ColumnSchemaBuilder(TimeseriesEventColumns.TIMESTAMP, Type.UNIXTIME_MICROS).key(true).build) columns.add( new ColumnSchema.ColumnSchemaBuilder(TimeseriesEventColumns.KEY, Type.STRING) .key(true) .build) columns.add( new ColumnSchema.ColumnSchemaBuilder(TimeseriesEventColumns.TAG, Type.STRING) .key(true) .build) columns.add( new ColumnSchema.ColumnSchemaBuilder(TimeseriesEventColumns.VALUE, Type.DOUBLE) .key(false) .build) val schema = new Schema(columns) val opts = new CreateTableOptions() .setRangePartitionColumns(Collections.singletonList(TimeseriesEventColumns.TIMESTAMP)) .addHashPartitions(Collections.singletonList(TimeseriesEventColumns.KEY), 4) val table = client.createTable(tableName, schema, opts) tableCache.put(tableName, table) logger.info(s"Created Kudu table $tableName") table } else { val table = client.openTable(tableName) tableCache.put(tableName, table) table } } }
Example 2
Source File: OffsetStore.scala From lagom with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.spi.persistence import akka.Done import akka.persistence.query.NoOffset import akka.persistence.query.Offset import scala.concurrent.Future import scala.collection.concurrent class InMemoryOffsetStore extends OffsetStore { private final val store: concurrent.Map[String, Offset] = concurrent.TrieMap.empty override def prepare(eventProcessorId: String, tag: String): Future[OffsetDao] = { val key = s"$eventProcessorId-$tag" Future.successful(new OffsetDao { override def saveOffset(offset: Offset): Future[Done] = { store.put(key, offset) Future.successful(Done) } override val loadedOffset: Offset = store.getOrElse(key, NoOffset) }) } }
Example 3
Source File: LiftOver.scala From hail with MIT License | 5 votes |
package is.hail.io.reference import is.hail.backend.BroadcastValue import is.hail.expr.ir.ExecuteContext import is.hail.variant.{Locus, ReferenceGenome} import is.hail.utils._ import is.hail.io.fs.FS import scala.collection.JavaConverters._ import scala.collection.concurrent import scala.language.implicitConversions class SerializableHtsjdkLiftOver(val tmpdir: String, val fsBc: BroadcastValue[FS], val chainFile: String) extends Serializable { @transient lazy val value: htsjdk.samtools.liftover.LiftOver = { val localChainFile = LiftOver.getLocalChainFile(tmpdir, fsBc.value, chainFile) new htsjdk.samtools.liftover.LiftOver(new java.io.File(uriPath(localChainFile))) } } object LiftOver { private[this] val localChainFiles: concurrent.Map[String, String] = new concurrent.TrieMap() def getLocalChainFile(tmpdir: String, fs: FS, chainFile: String): String = localChainFiles.getOrElseUpdate(chainFile, LiftOver.setup(tmpdir, fs, chainFile)) def setup(tmpdir: String, fs: FS, chainFile: String): String = { val localChainFile = ExecuteContext.createTmpPathNoCleanup(tmpdir, "lift-over", "chain") fs.copyRecode(chainFile, localChainFile) if (!fs.exists(localChainFile)) fatal(s"Error while copying chain file to local file system. Did not find '$localChainFile'.") localChainFile } def apply(tmpdir: String, fs: FS, chainFile: String): LiftOver = new LiftOver(tmpdir, fs.broadcast, chainFile) } class LiftOver(val tmpdir: String, val fsBc: BroadcastValue[FS], val chainFile: String) extends Serializable { val lo = new SerializableHtsjdkLiftOver(tmpdir, fsBc, chainFile) def queryInterval(interval: is.hail.utils.Interval, minMatch: Double = htsjdk.samtools.liftover.LiftOver.DEFAULT_LIFTOVER_MINMATCH): (is.hail.utils.Interval, Boolean) = { val start = interval.start.asInstanceOf[Locus] val end = interval.end.asInstanceOf[Locus] if (start.contig != end.contig) fatal(s"'start' and 'end' contigs must be identical. Found '$interval'.") val contig = start.contig val startPos = if (interval.includesStart) start.position else start.position + 1 val endPos = if (interval.includesEnd) end.position else end.position - 1 if (startPos == endPos) fatal(s"Cannot liftover a 0-length interval: ${ interval.toString }.\nDid you mean to use 'liftover_locus'?") val result = lo.value.liftOver(new htsjdk.samtools.util.Interval(contig, startPos, endPos), minMatch) if (result != null) (Interval( Locus(result.getContig, result.getStart), Locus(result.getContig, result.getEnd), includesStart = true, includesEnd = true), result.isNegativeStrand) else null } def queryLocus(l: Locus, minMatch: Double = htsjdk.samtools.liftover.LiftOver.DEFAULT_LIFTOVER_MINMATCH): (Locus, Boolean) = { val result = lo.value.liftOver(new htsjdk.samtools.util.Interval(l.contig, l.position, l.position), minMatch) if (result != null) (Locus(result.getContig, result.getStart), result.isNegativeStrand) else null } def checkChainFile(srcRG: ReferenceGenome, destRG: ReferenceGenome) { val cMap = lo.value.getContigMap.asScala cMap.foreach { case (srcContig, destContigs) => srcRG.checkContig(srcContig) destContigs.asScala.foreach(destRG.checkContig) } } }
Example 4
Source File: ObservableStream.scala From fotm-info with MIT License | 5 votes |
package info.fotm.util import com.google.common.collect.MapMaker import scala.collection.concurrent import scala.collection.JavaConverters._ trait Subscription { def unsubscribe(): Unit } trait ObservableReadStream[+T] { def foreach(observer: T => Unit): Subscription def filter(p: T => Boolean): ObservableReadStream[T] def map[U](f: T => U): ObservableReadStream[U] def flatMap[U](f: T => ObservableStream[U]): ObservableReadStream[U] } trait ObservableWriteStream[-T] { def publish(value: T): Unit } trait ObservableStream[T] extends ObservableReadStream[T] with ObservableWriteStream[T] { self => type Observer = T => Unit // private interface private val subs: concurrent.Map[Subscription, Observer] = new concurrent.TrieMap[Subscription, Observer]() private val weaksubs: concurrent.Map[Subscription, Observer] = new MapMaker().concurrencyLevel(4).weakKeys.makeMap[Subscription, Observer].asScala private def addsub(pool: concurrent.Map[Subscription, Observer], observer: Observer): Subscription = { val sub = new Subscription { def unsubscribe(): Unit = pool -= this } pool.put(sub, observer) sub } private def weaksub(observer: Observer): Subscription = addsub(weaksubs, observer) // implementers interface def publish(value: T) = { for { (_, o) <- subs } o(value) for { (_, o) <- weaksubs } o(value) } // public interface def foreach(observer: Observer) = addsub(subs, observer) def filter(p: T => Boolean) = new ObservableStream[T] { val sub = self.weaksub(t => if (p(t)) publish(t)) }.asInstanceOf[ObservableReadStream[T]] // TODO: make map expose ObservableReadStream def map[U](f: T => U) = new ObservableStream[U] { val sub = self.weaksub(f andThen publish) } def flatMap[U](f: T => ObservableStream[U]) = new ObservableStream[U] { val refs = scala.collection.mutable.Set.empty[Subscription] val sub = self.map(f).weaksub { refs += _.weaksub(publish) } }.asInstanceOf[ObservableReadStream[U]] }
Example 5
Source File: MemoryStore.scala From shield with MIT License | 5 votes |
package shield.kvstore import java.util.concurrent.atomic.AtomicInteger import com.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap import com.typesafe.scalalogging.LazyLogging import shield.metrics.Instrumented import spray.http.{MediaType, HttpResponse} import scala.concurrent.{ExecutionContext, Future} import scala.collection.concurrent class LazyWrapper[A](builder: => A) { lazy val value : A = builder } class MemoryStore(id: String, maxHashCapacity: Int, maxKeyCapacity: Int, maxLimitCapacity: Int)(implicit context: ExecutionContext) extends KVStore with LazyLogging with Instrumented { def getMillis():Long = System.currentTimeMillis private val setStore = new ConcurrentLinkedHashMap.Builder[String, LazyWrapper[TrieSet[String]]] .initialCapacity(1000) .maximumWeightedCapacity(Math.max(1000, maxHashCapacity)) .build() // todo: tweak capacity - can we do by memory size? (weigher to weigh by memory footprint) private val keyStore = new ConcurrentLinkedHashMap.Builder[String, HttpResponse] .initialCapacity(1000) .maximumWeightedCapacity(Math.max(1000, maxKeyCapacity)) .build() private val limitStore = new ConcurrentLinkedHashMap.Builder[String, AtomicInteger] .initialCapacity(1000) .maximumWeightedCapacity(Math.max(1000, maxLimitCapacity)) .build() // todo: profiling optimization - triesets are expensive to build. Is there a better data structure we can use? private def getOrSet[V](set: ConcurrentLinkedHashMap[String, V], key: String, default: V) = set.putIfAbsent(key, default) match { case null => default case existing => existing } val setGetTimer = timing("setGet", id) def setGet(key: String) : Future[Seq[String]] = setGetTimer { Future.successful(getOrSet(setStore, key, new LazyWrapper[TrieSet[String]](TrieSet[String]())).value.toSeq) } val setDeleteTimer = timing("setDelete", id) def setDelete(key: String) : Future[Long] = setDeleteTimer { setStore.remove(key) // todo: implement these according to the same semantics as RedisStore Future.successful(0L) } val setAddTimer = timing("setAdd", id) def setAdd(key: String, value: String) : Future[Long] = setAddTimer { getOrSet(setStore, key, new LazyWrapper[TrieSet[String]](TrieSet[String]())).value += value Future.successful(0L) } val setRemoveTimer = timing("setRemove", id) def setRemove(key: String, value: String) : Future[Long] = setRemoveTimer { getOrSet(setStore, key, new LazyWrapper[TrieSet[String]](TrieSet[String]())).value -= value Future.successful(0L) } val keyGetTimer = timing("keyGet", id) def keyGet(key: String) : Future[Option[HttpResponse]] = keyGetTimer { Future.successful(Option(keyStore.get(key))) } val keySetTimer = timing("keySet", id) def keySet(key: String, value: HttpResponse) : Future[Boolean] = keySetTimer { keyStore.put(key, value) Future.successful(true) } val keyDeleteTimer = timing("keyDelete", id) def keyDelete(key: String) : Future[Long] = keyDeleteTimer { keyStore.remove(key) Future.successful(0L) } val tokenTimer = timing("tokenRateLimit", id) def tokenRateLimit(key: String, rate: Int, perSeconds: Int) : Future[Boolean] = tokenTimer { // we could set up a concurrent system for actively pruning expired entries or.... // we could just let them get evicted via lru policy val floored = Math.floor(getMillis() / (perSeconds * 1000)).toLong val fullKey = s"rl:$floored:$key" val counter = getOrSet(limitStore, fullKey, new AtomicInteger(0)) // doesn't matter if we increment over the count (ie count rate limited requests), since it won't spill // over to the next bucket Future.successful(counter.incrementAndGet() <= rate) } }
Example 6
Source File: TrieSet.scala From shield with MIT License | 5 votes |
package shield.kvstore import scala.collection.concurrent import scala.collection.generic.{GenericSetTemplate, MutableSetFactory} object TrieSet extends MutableSetFactory[TrieSet] { override def empty[T]: TrieSet[T] = new TrieSet[T] } class TrieSet[T] extends scala.collection.mutable.Set[T] with GenericSetTemplate[T, TrieSet] with scala.collection.mutable.SetLike[T, TrieSet[T]] { private val map = concurrent.TrieMap[T, Null]() override def empty : TrieSet[T] = new TrieSet[T] override def companion = TrieSet override def +=(elem: T): TrieSet.this.type = { map.put(elem, null) this } override def -=(elem: T): TrieSet.this.type = { map.remove(elem) this } override def contains(elem: T): Boolean = { map.contains(elem) } override def iterator: Iterator[T] = { map.keysIterator } override def foreach[U](f: (T) => U) : Unit = { map.keys.foreach(f) } override def size : Int = { map.size } }