com.google.common.cache.Cache Scala Examples
The following examples show how to use com.google.common.cache.Cache.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: JwksVerifier.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.jwt import java.net.{URI, URL} import java.security.interfaces.RSAPublicKey import java.util.concurrent.TimeUnit import com.auth0.jwk.UrlJwkProvider import com.daml.jwt.JwtVerifier.Error import com.google.common.cache.{Cache, CacheBuilder} import scalaz.{-\/, Show, \/} import scalaz.syntax.show._ private[this] def getCachedVerifier(keyId: String): Error \/ JwtVerifier = { if (keyId == null) -\/(Error('getCachedVerifier, "No Key ID found")) else \/.fromTryCatchNonFatal( cache.get(keyId, () => getVerifier(keyId).fold(e => sys.error(e.shows), x => x)) ).leftMap(e => Error('getCachedVerifier, e.getMessage)) } def verify(jwt: domain.Jwt): Error \/ domain.DecodedJwt[String] = { for { keyId <- \/.fromTryCatchNonFatal(com.auth0.jwt.JWT.decode(jwt.value).getKeyId) .leftMap(e => Error('verify, e.getMessage)) verifier <- getCachedVerifier(keyId) decoded <- verifier.verify(jwt) } yield decoded } } object JwksVerifier { def apply(url: String) = new JwksVerifier(new URI(url).toURL) final case class Error(what: Symbol, message: String) object Error { implicit val showInstance: Show[Error] = Show.shows(e => s"JwksVerifier.Error: ${e.what}, ${e.message}") } }
Example 2
Source File: CollectionCache.scala From incubator-s2graph with Apache License 2.0 | 5 votes |
package org.apache.s2graph.counter.util import java.net.InetAddress import java.util.concurrent.TimeUnit import com.google.common.cache.{Cache, CacheBuilder} import org.slf4j.LoggerFactory import scala.concurrent.{ExecutionContext, Future} import scala.language.{postfixOps, reflectiveCalls} case class CollectionCacheConfig(maxSize: Int, ttl: Int, negativeCache: Boolean = false, negativeTTL: Int = 600) class CollectionCache[C <: { def nonEmpty: Boolean; def isEmpty: Boolean } ](config: CollectionCacheConfig) { private val cache: Cache[String, C] = CacheBuilder.newBuilder() .expireAfterWrite(config.ttl, TimeUnit.SECONDS) .maximumSize(config.maxSize) .build[String, C]() // private lazy val cache = new SynchronizedLruMap[String, (C, Int)](config.maxSize) private lazy val className = this.getClass.getSimpleName private lazy val log = LoggerFactory.getLogger(this.getClass) val localHostname = InetAddress.getLocalHost.getHostName def size = cache.size val maxSize = config.maxSize // cache statistics def getStatsString: String = { s"$localHostname ${cache.stats().toString}" } def withCache(key: String)(op: => C): C = { Option(cache.getIfPresent(key)) match { case Some(r) => r case None => val r = op if (r.nonEmpty || config.negativeCache) { cache.put(key, r) } r } } def withCacheAsync(key: String)(op: => Future[C])(implicit ec: ExecutionContext): Future[C] = { Option(cache.getIfPresent(key)) match { case Some(r) => Future.successful(r) case None => op.map { r => if (r.nonEmpty || config.negativeCache) { cache.put(key, r) } r } } } def purgeKey(key: String) = { cache.invalidate(key) } def contains(key: String): Boolean = { Option(cache.getIfPresent(key)).nonEmpty } }
Example 3
Source File: CacheableRPCInterceptor.scala From Linkis with Apache License 2.0 | 5 votes |
package com.webank.wedatasphere.linkis.rpc.interceptor.common import java.util.concurrent.{Callable, TimeUnit} import com.google.common.cache.{Cache, CacheBuilder, RemovalListener, RemovalNotification} import com.webank.wedatasphere.linkis.common.exception.WarnException import com.webank.wedatasphere.linkis.common.utils.Logging import com.webank.wedatasphere.linkis.protocol.CacheableProtocol import com.webank.wedatasphere.linkis.rpc.interceptor.{RPCInterceptor, RPCInterceptorChain, RPCInterceptorExchange} import org.springframework.stereotype.Component @Component class CacheableRPCInterceptor extends RPCInterceptor with Logging{ private val guavaCache: Cache[Any, Any] = CacheBuilder.newBuilder().concurrencyLevel(5) .expireAfterAccess(120000, TimeUnit.MILLISECONDS).initialCapacity(20) //TODO Make parameters(做成参数) .maximumSize(1000).recordStats().removalListener(new RemovalListener[Any, Any] { override def onRemoval(removalNotification: RemovalNotification[Any, Any]): Unit = { debug(s"CacheSender removed key => ${removalNotification.getKey}, value => ${removalNotification.getValue}.") } }).asInstanceOf[CacheBuilder[Any, Any]].build() override val order: Int = 10 override def intercept(interceptorExchange: RPCInterceptorExchange, chain: RPCInterceptorChain): Any = interceptorExchange.getProtocol match { case cacheable: CacheableProtocol => guavaCache.get(cacheable.toString, new Callable[Any] { override def call(): Any = { val returnMsg = chain.handle(interceptorExchange) returnMsg match { case warn: WarnException => throw warn case _ => returnMsg } } }) case _ => chain.handle(interceptorExchange) } }
Example 4
Source File: KafkaProducerCacheSpec.scala From spark-kafka-writer with Apache License 2.0 | 5 votes |
package com.github.benfradet.spark.kafka.writer import com.google.common.cache.Cache import org.apache.kafka.clients.producer._ import org.apache.kafka.common.serialization.StringSerializer import org.scalatest.PrivateMethodTester import scala.concurrent.duration._ class KafkaProducerCacheSpec extends SKRSpec with PrivateMethodTester { val cache = PrivateMethod[Cache[Seq[(String, Object)], KafkaProducer[_, _]]]('cache) val m1 = Map( "bootstrap.servers" -> "127.0.0.1:9092", "key.serializer" -> classOf[StringSerializer].getName, "value.serializer" -> classOf[StringSerializer].getName ) val m2 = m1 + ("acks" -> "0") override def beforeAll(): Unit = { super.beforeAll() KafkaProducerCache.invokePrivate(cache()).invalidateAll() } "A KafkaProducerCache" when { "calling getProducer" should { "create the producer if it doesn't exist and retrieve it if it exists" in { cacheSize shouldBe 0 val p1 = KafkaProducerCache.getProducer[String, String](m1) cacheSize shouldBe 1 val p2 = KafkaProducerCache.getProducer[String, String](m1) p1 shouldBe p2 cacheSize shouldBe 1 } } "closing a producer" should { "close the correct producer" in { cacheSize shouldBe 1 val p1 = KafkaProducerCache.getProducer[String, String](m1) cacheSize shouldBe 1 val p2 = KafkaProducerCache.getProducer[String, String](m2) cacheSize shouldBe 2 p1 should not be p2 KafkaProducerCache.close(m1) cacheSize shouldBe 1 } } } private def cacheSize: Int = KafkaProducerCache.invokePrivate(cache()).asMap.size }
Example 5
Source File: L1Cache.scala From CM-Well with Apache License 2.0 | 5 votes |
package cmwell.zcache import java.util.concurrent.TimeUnit import com.google.common.cache.{Cache, CacheBuilder} import scala.concurrent.{ExecutionContext, Future} import scala.util.{Failure, Success} object L1Cache { def memoize[K, V](task: K => Future[V])( digest: K => String, isCachable: V => Boolean = (_: V) => true )(l1Size: Int = 1024, ttlSeconds: Int = 10)(implicit ec: ExecutionContext): K => Future[V] = { val cache: Cache[String, Future[V]] = CacheBuilder.newBuilder().maximumSize(l1Size).expireAfterWrite(ttlSeconds, TimeUnit.SECONDS).build() (input: K) => { val key = digest(input) Option(cache.getIfPresent(key)) match { case Some(cachedFuture) => cachedFuture.asInstanceOf[Future[V]] case None => { val fut = task(input) cache.put(key, fut) fut.andThen { case Success(v) => if (!isCachable(v)) cache.invalidate(key) case Failure(_) => cache.getIfPresent(key) match { case `fut` => cache.invalidate(key) case _ => //Do Nothing } } } } } } def memoizeWithCache[K, V](task: K => Future[V])(digest: K => String, isCachable: V => Boolean = (_: V) => true)( cache: Cache[String, Future[V]] )(implicit ec: ExecutionContext): K => Future[V] = { (input: K) => { val key = digest(input) Option(cache.getIfPresent(key)).getOrElse { val fut = task(input) cache.put(key, fut) fut.andThen { case Success(v) => if (!isCachable(v)) cache.getIfPresent(key) match { case `fut` => cache.invalidate(key) case _ => //Do Nothing } case Failure(_) => cache.getIfPresent(key) match { case `fut` => cache.invalidate(key) case _ => //Do Nothing } } } } } }
Example 6
Source File: BigtableDoFnTest.scala From scio with Apache License 2.0 | 5 votes |
package com.spotify.scio.bigtable import java.util.concurrent.ConcurrentLinkedQueue import com.google.cloud.bigtable.grpc.BigtableSession import com.google.common.cache.{Cache, CacheBuilder} import com.google.common.util.concurrent.{Futures, ListenableFuture} import com.spotify.scio.testing._ import com.spotify.scio.transforms.BaseAsyncLookupDoFn.CacheSupplier import scala.jdk.CollectionConverters._ import scala.util.{Failure, Success} class BigtableDoFnTest extends PipelineSpec { "BigtableDoFn" should "work" in { val fn = new TestBigtableDoFn val output = runWithData(1 to 10)(_.parDo(fn)) .map(kv => (kv.getKey, kv.getValue.get())) output should contain theSameElementsAs (1 to 10).map(x => (x, x.toString)) } it should "work with cache" in { val fn = new TestCachingBigtableDoFn val output = runWithData((1 to 10) ++ (6 to 15))(_.parDo(fn)) .map(kv => (kv.getKey, kv.getValue.get())) output should contain theSameElementsAs ((1 to 10) ++ (6 to 15)).map(x => (x, x.toString)) BigtableDoFnTest.queue.asScala.toSet should contain theSameElementsAs (1 to 15) BigtableDoFnTest.queue.size() should be <= 20 } it should "work with failures" in { val fn = new TestFailingBigtableDoFn val output = runWithData(1 to 10)(_.parDo(fn)).map { kv => val r = kv.getValue.asScala match { case Success(v) => v case Failure(e) => e.getMessage } (kv.getKey, r) } output should contain theSameElementsAs (1 to 10).map { x => val prefix = if (x % 2 == 0) "success" else "failure" (x, prefix + x.toString) } } } object BigtableDoFnTest { val queue: ConcurrentLinkedQueue[Int] = new ConcurrentLinkedQueue[Int]() } class TestBigtableDoFn extends BigtableDoFn[Int, String](null) { override def newClient(): BigtableSession = null override def asyncLookup(session: BigtableSession, input: Int): ListenableFuture[String] = Futures.immediateFuture(input.toString) } class TestCachingBigtableDoFn extends BigtableDoFn[Int, String](null, 100, new TestCacheSupplier) { override def newClient(): BigtableSession = null override def asyncLookup(session: BigtableSession, input: Int): ListenableFuture[String] = { BigtableDoFnTest.queue.add(input) Futures.immediateFuture(input.toString) } } class TestFailingBigtableDoFn extends BigtableDoFn[Int, String](null) { override def newClient(): BigtableSession = null override def asyncLookup(session: BigtableSession, input: Int): ListenableFuture[String] = if (input % 2 == 0) { Futures.immediateFuture("success" + input) } else { Futures.immediateFailedFuture(new RuntimeException("failure" + input)) } } class TestCacheSupplier extends CacheSupplier[Int, String, java.lang.Long] { override def createCache(): Cache[java.lang.Long, String] = CacheBuilder.newBuilder().build[java.lang.Long, String]() override def getKey(input: Int): java.lang.Long = input.toLong }
Example 7
Source File: FetchWithCacheConfigClient.scala From izanami with Apache License 2.0 | 5 votes |
package izanami.configs import java.util.concurrent.TimeUnit import akka.actor.ActorSystem import akka.event.Logging import akka.http.scaladsl.util.FastFuture import akka.stream.Materializer import akka.util.Timeout import com.google.common.cache.{Cache, CacheBuilder} import izanami.Strategy.FetchWithCacheStrategy import izanami.scaladsl._ import izanami._ import play.api.libs.json.Json import scala.concurrent.Future import scala.concurrent.duration.DurationInt import scala.util.{Failure, Success} object FetchWithCacheConfigClient { def apply( clientConfig: ClientConfig, fallback: Configs, underlyingStrategy: ConfigClient, cacheConfig: FetchWithCacheStrategy )(implicit izanamiDispatcher: IzanamiDispatcher, actorSystem: ActorSystem, materializer: Materializer): FetchWithCacheConfigClient = new FetchWithCacheConfigClient(clientConfig, fallback, underlyingStrategy, cacheConfig, underlyingStrategy.cudConfigClient) } private[configs] class FetchWithCacheConfigClient( clientConfig: ClientConfig, fallback: Configs, underlyingStrategy: ConfigClient, cacheConfig: FetchWithCacheStrategy, override val cudConfigClient: CUDConfigClient )(implicit val izanamiDispatcher: IzanamiDispatcher, actorSystem: ActorSystem, val materializer: Materializer) extends ConfigClient { import actorSystem.dispatcher implicit val timeout = Timeout(10.second) private val logger = Logging(actorSystem, this.getClass.getName) private val cache: Cache[String, Seq[Config]] = CacheBuilder .newBuilder() .maximumSize(cacheConfig.maxElement) .expireAfterWrite(cacheConfig.duration.toMillis, TimeUnit.MILLISECONDS) .build[String, Seq[Config]]() override def configs(pattern: Seq[String]): Future[Configs] = { val convertedPattern = Option(pattern).map(_.map(_.replace(".", ":")).mkString(",")).getOrElse("*") Option(cache.getIfPresent(convertedPattern)) match { case Some(configs) => FastFuture.successful(Configs(configs)) case None => val futureConfigs = underlyingStrategy.configs(convertedPattern) futureConfigs.onComplete { case Success(c) => cache.put(convertedPattern, c.configs) case Failure(e) => logger.error(e, "Error fetching configs") } futureConfigs } } override def config(key: String) = { require(key != null, "key should not be null") val convertedKey: String = key.replace(".", ":") Option(cache.getIfPresent(convertedKey)) match { case Some(configs) => FastFuture.successful(configs.find(_.id == convertedKey).map(_.value).getOrElse(Json.obj())) case None => val futureConfig: Future[Configs] = underlyingStrategy.configs(convertedKey) futureConfig.onComplete { case Success(configs) => cache.put(convertedKey, configs.configs) case Failure(e) => logger.error(e, "Error fetching features") } futureConfig .map( _.configs .find(_.id == convertedKey) .map(c => c.value) .getOrElse(Json.obj()) ) } } override def configsSource(pattern: String) = underlyingStrategy.configsSource(pattern) override def configsStream(pattern: String) = underlyingStrategy.configsStream(pattern) }