com.google.common.cache.LoadingCache Scala Examples
The following examples show how to use com.google.common.cache.LoadingCache.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: ExactQualifier.scala From incubator-s2graph with Apache License 2.0 | 5 votes |
package org.apache.s2graph.counter.core import java.util import com.google.common.cache.{CacheBuilder, CacheLoader, LoadingCache} import org.apache.s2graph.counter.core.TimedQualifier.IntervalUnit.IntervalUnit import scala.collection.JavaConversions._ case class ExactQualifier(tq: TimedQualifier, dimKeyValues: Map[String, String], dimension: String) { def checkDimensionEquality(dimQuery: Map[String, Set[String]]): Boolean = { // println(s"self: $dimKeyValues, query: $dimQuery") dimQuery.size == dimKeyValues.size && { for { (k, v) <- dimKeyValues } yield { dimQuery.get(k).exists(qv => qv.isEmpty || qv.contains(v)) } }.forall(x => x) } } object ExactQualifier { val cache: LoadingCache[String, Map[String, String]] = CacheBuilder.newBuilder() .maximumSize(10000) .build( new CacheLoader[String, Map[String, String]]() { def load(s: String): Map[String, String] = { strToDimensionMap(s) } } ) def apply(tq: TimedQualifier, dimension: String): ExactQualifier = { ExactQualifier(tq, cache.get(dimension), dimension) } def apply(tq: TimedQualifier, dimKeyValues: Map[String, String]): ExactQualifier = { ExactQualifier(tq, dimKeyValues, makeDimensionStr(dimKeyValues)) } def makeSortedDimension(dimKeyValues: Map[String, String]): Iterator[String] = { val sortedDimKeyValues = new util.TreeMap[String, String](dimKeyValues) sortedDimKeyValues.keysIterator ++ sortedDimKeyValues.valuesIterator } def makeDimensionStr(dimKeyValues: Map[String, String]): String = { makeSortedDimension(dimKeyValues).mkString(".") } def getQualifiers(intervals: Seq[IntervalUnit], ts: Long, dimKeyValues: Map[String, String]): Seq[ExactQualifier] = { for { tq <- TimedQualifier.getQualifiers(intervals, ts) } yield { ExactQualifier(tq, dimKeyValues, makeDimensionStr(dimKeyValues)) } } def strToDimensionMap(dimension: String): Map[String, String] = { val dimSp = { val sp = dimension.split('.') if (dimension == ".") { Array("", "") } else if (dimension.nonEmpty && dimension.last == '.') { sp ++ Array("") } else { sp } } val dimKey = dimSp.take(dimSp.length / 2) val dimVal = dimSp.takeRight(dimSp.length / 2) dimKey.zip(dimVal).toMap } }
Example 2
Source File: RankingCounter.scala From incubator-s2graph with Apache License 2.0 | 5 votes |
package org.apache.s2graph.counter.core import java.util.concurrent.TimeUnit import com.google.common.cache.{CacheBuilder, CacheLoader, LoadingCache} import com.typesafe.config.Config import org.apache.s2graph.counter.core.RankingCounter.RankingValueMap import org.apache.s2graph.counter.models.Counter import org.apache.s2graph.counter.util.{CollectionCacheConfig, CollectionCache} import org.slf4j.LoggerFactory import scala.collection.JavaConversions._ case class RankingRow(key: RankingKey, value: Map[String, RankingValue]) case class RateRankingRow(key: RankingKey, value: Map[String, RateRankingValue]) class RankingCounter(config: Config, storage: RankingStorage) { private val log = LoggerFactory.getLogger(getClass) val storageStatusCache = new CollectionCache[Option[Boolean]](CollectionCacheConfig(1000, 60, negativeCache = false, 60)) val cache: LoadingCache[RankingKey, RankingResult] = CacheBuilder.newBuilder() .maximumSize(1000000) .expireAfterWrite(10l, TimeUnit.MINUTES) .build( new CacheLoader[RankingKey, RankingResult]() { def load(rankingKey: RankingKey): RankingResult = { // log.warn(s"cache load: $rankingKey") storage.getTopK(rankingKey, Int.MaxValue).getOrElse(RankingResult(-1, Nil)) } } ) def getTopK(rankingKey: RankingKey, k: Int = Int.MaxValue): Option[RankingResult] = { val tq = rankingKey.eq.tq if (TimedQualifier.getQualifiers(Seq(tq.q), System.currentTimeMillis()).head == tq) { // do not use cache storage.getTopK(rankingKey, k) } else { val result = cache.get(rankingKey) if (result.values.nonEmpty) { Some(result.copy(values = result.values.take(k))) } else { None } } } def update(key: RankingKey, value: RankingValueMap, k: Int): Unit = { storage.update(key, value, k) } def update(values: Seq[(RankingKey, RankingValueMap)], k: Int): Unit = { storage.update(values, k) } def delete(key: RankingKey): Unit = { storage.delete(key) } def getAllItems(keys: Seq[RankingKey], k: Int = Int.MaxValue): Seq[String] = { val oldKeys = keys.filter(key => TimedQualifier.getQualifiers(Seq(key.eq.tq.q), System.currentTimeMillis()).head != key.eq.tq) val cached = cache.getAllPresent(oldKeys) val missed = keys.diff(cached.keys.toSeq) val found = storage.getTopK(missed, k) // log.warn(s"cached: ${cached.size()}, missed: ${missed.size}") for { (key, result) <- found } { cache.put(key, result) } for { (key, RankingResult(totalScore, values)) <- cached ++ found (item, score) <- values } yield { item } }.toSeq.distinct def prepare(policy: Counter): Unit = { storage.prepare(policy) } def destroy(policy: Counter): Unit = { storage.destroy(policy) } def ready(policy: Counter): Boolean = { storageStatusCache.withCache(s"${policy.id}") { Some(storage.ready(policy)) }.getOrElse(false) } } object RankingCounter { type RankingValueMap = Map[String, RankingValue] }
Example 3
Source File: BlockchainCache.scala From matcher with MIT License | 5 votes |
package com.wavesplatform.dex.grpc.integration.caches import java.time.Duration import com.google.common.cache.{CacheBuilder, CacheLoader, LoadingCache} import com.wavesplatform.dex.domain.utils.ScorexLogging import scala.concurrent.{ExecutionContext, Future} import scala.util.{Failure, Success} abstract class BlockchainCache[K <: AnyRef, V <: AnyRef](loader: K => Future[V], expiration: Option[Duration], invalidationPredicate: V => Boolean)( implicit ec: ExecutionContext) extends ScorexLogging { lazy private val cache: LoadingCache[K, Future[V]] = { val builder = CacheBuilder.newBuilder expiration .fold(builder)(builder.expireAfterWrite) .build { new CacheLoader[K, Future[V]] { override def load(key: K): Future[V] = loader(key) andThen { case Success(value) if invalidationPredicate(value) => cache.invalidate(key) // value may persist for a little longer than expected due to the fact that all the threads in the EC may be busy case Failure(exception) => log.error(s"Error while value loading occurred: ", exception); cache.invalidate(key) } } } } def get(key: K): Future[V] = cache.get(key) def put(key: K, value: Future[V]): Unit = cache.put(key, value) } object BlockchainCache { def noCustomInvalidationLogic[V](value: V): Boolean = false }
Example 4
Source File: LshFunctionCache.scala From elastiknn with Apache License 2.0 | 5 votes |
package com.klibisz.elastiknn.query import java.time.Duration import com.google.common.cache.{CacheBuilder, CacheLoader, LoadingCache} import com.klibisz.elastiknn.api.{Mapping, Vec} import com.klibisz.elastiknn.models.LshFunction import com.klibisz.elastiknn.storage.StoredVec // The Lsh Functions tend to be expensive to instantiate (i.e. initializing hashing parameters), hence a cache. sealed trait LshFunctionCache[M <: Mapping, V <: Vec, S <: StoredVec] extends (M => LshFunction[M, V, S]) { self => private val cache: LoadingCache[M, LshFunction[M, V, S]] = CacheBuilder.newBuilder .expireAfterWrite(Duration.ofSeconds(60)) .build(new CacheLoader[M, LshFunction[M, V, S]] { override def load(m: M): LshFunction[M, V, S] = self.load(m) }) override final def apply(mapping: M): LshFunction[M, V, S] = cache.get(mapping) protected def load(m: M): LshFunction[M, V, S] } object LshFunctionCache { implicit object Jaccard extends LshFunctionCache[Mapping.JaccardLsh, Vec.SparseBool, StoredVec.SparseBool] { def load(m: Mapping.JaccardLsh): LshFunction[Mapping.JaccardLsh, Vec.SparseBool, StoredVec.SparseBool] = new LshFunction.Jaccard(m) } implicit object Hamming extends LshFunctionCache[Mapping.HammingLsh, Vec.SparseBool, StoredVec.SparseBool] { def load(m: Mapping.HammingLsh): LshFunction[Mapping.HammingLsh, Vec.SparseBool, StoredVec.SparseBool] = new LshFunction.Hamming(m) } implicit object Angular extends LshFunctionCache[Mapping.AngularLsh, Vec.DenseFloat, StoredVec.DenseFloat] { def load(m: Mapping.AngularLsh): LshFunction[Mapping.AngularLsh, Vec.DenseFloat, StoredVec.DenseFloat] = new LshFunction.Angular(m) } implicit object L2 extends LshFunctionCache[Mapping.L2Lsh, Vec.DenseFloat, StoredVec.DenseFloat] { def load(m: Mapping.L2Lsh): LshFunction[Mapping.L2Lsh, Vec.DenseFloat, StoredVec.DenseFloat] = new LshFunction.L2(m) } }
Example 5
Source File: LockManager.scala From metronome with Apache License 2.0 | 5 votes |
package dcos.metronome.utils.state import com.google.common.cache.{LoadingCache, CacheLoader, CacheBuilder} import java.util.concurrent.Semaphore import scala.concurrent.{ExecutionContext, Future} def executeSequentially[T](key: String)(future: => Future[T])(implicit ec: ExecutionContext): Future[T] } object LockManager { def create(): LockManager = new LockManager { val locks = loadingCache[String]() override def executeSequentially[T]( key: String )(future: => Future[T])(implicit ec: ExecutionContext): Future[T] = { val lock = locks.get(key) scala.concurrent.blocking { lock.acquire() } val result = future result.onComplete { _ => lock.release() } result } } private[this] def loadingCache[A <: AnyRef](): LoadingCache[A, Semaphore] = { CacheBuilder .newBuilder() .weakValues() .build[A, Semaphore](new CacheLoader[A, Semaphore] { override def load(key: A): Semaphore = new Semaphore(1) }) } }
Example 6
Source File: ObservedLoadingCache.scala From Waves with MIT License | 5 votes |
package com.wavesplatform.utils import com.google.common.cache.{ForwardingLoadingCache, LoadingCache} import monix.reactive.Observer import scala.reflect.ClassTag class ObservedLoadingCache[K, V](override val delegate: LoadingCache[K, V], changed: Observer[K])(implicit ct: ClassTag[K]) extends ForwardingLoadingCache[K, V] { override def refresh(key: K): Unit = { super.refresh(key) changed.onNext(key) } override def put(key: K, value: V): Unit = { super.put(key, value) changed.onNext(key) } override def putAll(m: java.util.Map[_ <: K, _ <: V]): Unit = { super.putAll(m) m.keySet().forEach(k => changed.onNext(k)) } override def invalidate(key: Any): Unit = { super.invalidate(key) onNext(key) } override def invalidateAll(keys: java.lang.Iterable[_]): Unit = { super.invalidateAll(keys) keys.forEach(onNext) } private def onNext(key: Any): Unit = key match { case k: K => changed.onNext(k) case _ => } }
Example 7
Source File: ObservedLoadingCacheSpecification.scala From Waves with MIT License | 5 votes |
package com.wavesplatform.utils import java.util.concurrent.TimeUnit import java.util.concurrent.atomic.AtomicLong import com.google.common.base.Ticker import com.google.common.cache.{CacheBuilder, CacheLoader, LoadingCache} import com.wavesplatform.utils.ObservedLoadingCacheSpecification.FakeTicker import monix.execution.Ack import monix.reactive.Observer import org.scalamock.scalatest.MockFactory import org.scalatest.{FreeSpec, Matchers} import scala.jdk.CollectionConverters._ import scala.concurrent.Future import scala.concurrent.duration.DurationInt class ObservedLoadingCacheSpecification extends FreeSpec with Matchers with MockFactory { private val ExpiringTime = 10.minutes "notifies" - { "on refresh" in test { (loadingCache, changes, _) => (changes.onNext _).expects("foo").returning(Future.successful(Ack.Continue)).once() loadingCache.refresh("foo") } "on put" in test { (loadingCache, changes, _) => (changes.onNext _).expects("foo").returning(Future.successful(Ack.Continue)).once() loadingCache.put("foo", 10) } "on putAll" in test { (loadingCache, changes, _) => (changes.onNext _).expects("foo").returning(Future.successful(Ack.Continue)).once() (changes.onNext _).expects("bar").returning(Future.successful(Ack.Continue)).once() loadingCache.putAll(Map[String, Integer]("foo" -> 10, "bar" -> 11).asJava) } "on invalidate" in test { (loadingCache, changes, _) => (changes.onNext _).expects("foo").returning(Future.successful(Ack.Continue)).once() loadingCache.invalidate("foo") } "on invalidateAll" in test { (loadingCache, changes, _) => (changes.onNext _).expects("foo").returning(Future.successful(Ack.Continue)).once() (changes.onNext _).expects("bar").returning(Future.successful(Ack.Continue)).once() loadingCache.invalidateAll(Seq("foo", "bar").asJava) } } "don't notify" - { "on cache expiration" in test { (loadingCache, changes, ticker) => (changes.onNext _).expects("foo").returning(Future.successful(Ack.Continue)).once() loadingCache.put("foo", 1) ticker.advance(ExpiringTime.toMillis + 100, TimeUnit.MILLISECONDS) } } private def test(f: (LoadingCache[String, Integer], Observer[String], FakeTicker) => Unit): Unit = { val changes = mock[Observer[String]] val ticker = new FakeTicker() val delegate = CacheBuilder .newBuilder() .expireAfterWrite(ExpiringTime.toMillis, TimeUnit.MILLISECONDS) .ticker(ticker) .build(new CacheLoader[String, Integer] { override def load(key: String): Integer = key.length }) val loadingCache = new ObservedLoadingCache(delegate, changes) f(loadingCache, changes, ticker) } } private object ObservedLoadingCacheSpecification { // see https://github.com/google/guava/blob/master/guava-testlib/src/com/google/common/testing/FakeTicker.java class FakeTicker extends Ticker { private val nanos = new AtomicLong() private var autoIncrementStepNanos = 0L def advance(time: Long, timeUnit: TimeUnit): FakeTicker = advance(timeUnit.toNanos(time)) def advance(nanoseconds: Long): FakeTicker = { nanos.addAndGet(nanoseconds) this } def setAutoIncrementStep(autoIncrementStep: Long, timeUnit: TimeUnit): FakeTicker = { require(autoIncrementStep >= 0, "May not auto-increment by a negative amount") this.autoIncrementStepNanos = timeUnit.toNanos(autoIncrementStep) this } override def read: Long = nanos.getAndAdd(autoIncrementStepNanos) } }