com.google.common.cache.CacheBuilder Scala Examples
The following examples show how to use com.google.common.cache.CacheBuilder.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: JdbcConnector.scala From bandar-log with Apache License 2.0 | 5 votes |
package com.aol.one.dwh.bandarlog.connectors import java.sql.{Connection, Statement} import com.aol.one.dwh.infra.sql.Setting import com.aol.one.dwh.infra.sql.pool.HikariConnectionPool import com.aol.one.dwh.infra.util.LogTrait import com.aol.one.dwh.infra.sql.Query import com.aol.one.dwh.infra.sql.pool.SqlSource.{PRESTO, VERTICA} import com.facebook.presto.jdbc.PrestoConnection import com.google.common.cache.CacheBuilder import com.vertica.jdbc.VerticaConnection import org.apache.commons.dbutils.ResultSetHandler import resource.managed import scala.concurrent.duration._ import scala.util.Try import scalacache.guava.GuavaCache import scalacache.memoization._ import scalacache.{CacheConfig, ScalaCache} abstract class JdbcConnector(@cacheKeyExclude pool: HikariConnectionPool) extends LogTrait { implicit val scalaCache = ScalaCache( GuavaCache(CacheBuilder.newBuilder().maximumSize(100).build[String, Object]), cacheConfig = CacheConfig(keyPrefix = Some(pool.getName)) ) def runQuery[V](query: Query, @cacheKeyExclude handler: ResultSetHandler[V]): V = memoizeSync(50.seconds) { val rm = for { connection <- managed(pool.getConnection) statement <- managed(connection.createStatement()) } yield { applySettings(connection, statement, query.settings) logger.info(s"Running query:[${query.sql}] source:[${query.source}] settings:[${query.settings.mkString(",")}]") val resultSet = statement.executeQuery(query.sql) handler.handle(resultSet) } Try(rm.acquireAndGet(identity)).getOrElse(throw new RuntimeException(s"Failure:[$query]")) } private def applySettings(connection: Connection, statement: Statement, settings: Seq[Setting]) = { settings.foreach(setting => applySetting(connection, statement, setting)) } def applySetting(connection: Connection, statement: Statement, setting: Setting) } object JdbcConnector { private class PrestoConnector(connectionPool: HikariConnectionPool) extends JdbcConnector(connectionPool) { override def applySetting(connection: Connection, statement: Statement, setting: Setting): Unit = { connection.unwrap(classOf[PrestoConnection]).setSessionProperty(setting.key, setting.value) } } private class VerticaConnector(connectionPool: HikariConnectionPool) extends JdbcConnector(connectionPool) { override def applySetting(connection: Connection, statement: Statement, setting: Setting): Unit = { connection.unwrap(classOf[VerticaConnection]).setProperty(setting.key, setting.value) } } def apply(connectorType: String, connectionPool: HikariConnectionPool): JdbcConnector = connectorType match { case VERTICA => new VerticaConnector(connectionPool) case PRESTO => new PrestoConnector(connectionPool) case _ => throw new IllegalArgumentException(s"Can't create connector for SQL source:[$connectorType]") } }
Example 2
Source File: FetchWithCacheConfigClient.scala From izanami with Apache License 2.0 | 5 votes |
package izanami.configs import java.util.concurrent.TimeUnit import akka.actor.ActorSystem import akka.event.Logging import akka.http.scaladsl.util.FastFuture import akka.stream.Materializer import akka.util.Timeout import com.google.common.cache.{Cache, CacheBuilder} import izanami.Strategy.FetchWithCacheStrategy import izanami.scaladsl._ import izanami._ import play.api.libs.json.Json import scala.concurrent.Future import scala.concurrent.duration.DurationInt import scala.util.{Failure, Success} object FetchWithCacheConfigClient { def apply( clientConfig: ClientConfig, fallback: Configs, underlyingStrategy: ConfigClient, cacheConfig: FetchWithCacheStrategy )(implicit izanamiDispatcher: IzanamiDispatcher, actorSystem: ActorSystem, materializer: Materializer): FetchWithCacheConfigClient = new FetchWithCacheConfigClient(clientConfig, fallback, underlyingStrategy, cacheConfig, underlyingStrategy.cudConfigClient) } private[configs] class FetchWithCacheConfigClient( clientConfig: ClientConfig, fallback: Configs, underlyingStrategy: ConfigClient, cacheConfig: FetchWithCacheStrategy, override val cudConfigClient: CUDConfigClient )(implicit val izanamiDispatcher: IzanamiDispatcher, actorSystem: ActorSystem, val materializer: Materializer) extends ConfigClient { import actorSystem.dispatcher implicit val timeout = Timeout(10.second) private val logger = Logging(actorSystem, this.getClass.getName) private val cache: Cache[String, Seq[Config]] = CacheBuilder .newBuilder() .maximumSize(cacheConfig.maxElement) .expireAfterWrite(cacheConfig.duration.toMillis, TimeUnit.MILLISECONDS) .build[String, Seq[Config]]() override def configs(pattern: Seq[String]): Future[Configs] = { val convertedPattern = Option(pattern).map(_.map(_.replace(".", ":")).mkString(",")).getOrElse("*") Option(cache.getIfPresent(convertedPattern)) match { case Some(configs) => FastFuture.successful(Configs(configs)) case None => val futureConfigs = underlyingStrategy.configs(convertedPattern) futureConfigs.onComplete { case Success(c) => cache.put(convertedPattern, c.configs) case Failure(e) => logger.error(e, "Error fetching configs") } futureConfigs } } override def config(key: String) = { require(key != null, "key should not be null") val convertedKey: String = key.replace(".", ":") Option(cache.getIfPresent(convertedKey)) match { case Some(configs) => FastFuture.successful(configs.find(_.id == convertedKey).map(_.value).getOrElse(Json.obj())) case None => val futureConfig: Future[Configs] = underlyingStrategy.configs(convertedKey) futureConfig.onComplete { case Success(configs) => cache.put(convertedKey, configs.configs) case Failure(e) => logger.error(e, "Error fetching features") } futureConfig .map( _.configs .find(_.id == convertedKey) .map(c => c.value) .getOrElse(Json.obj()) ) } } override def configsSource(pattern: String) = underlyingStrategy.configsSource(pattern) override def configsStream(pattern: String) = underlyingStrategy.configsStream(pattern) }
Example 3
Source File: BigtableDoFnTest.scala From scio with Apache License 2.0 | 5 votes |
package com.spotify.scio.bigtable import java.util.concurrent.ConcurrentLinkedQueue import com.google.cloud.bigtable.grpc.BigtableSession import com.google.common.cache.{Cache, CacheBuilder} import com.google.common.util.concurrent.{Futures, ListenableFuture} import com.spotify.scio.testing._ import com.spotify.scio.transforms.BaseAsyncLookupDoFn.CacheSupplier import scala.jdk.CollectionConverters._ import scala.util.{Failure, Success} class BigtableDoFnTest extends PipelineSpec { "BigtableDoFn" should "work" in { val fn = new TestBigtableDoFn val output = runWithData(1 to 10)(_.parDo(fn)) .map(kv => (kv.getKey, kv.getValue.get())) output should contain theSameElementsAs (1 to 10).map(x => (x, x.toString)) } it should "work with cache" in { val fn = new TestCachingBigtableDoFn val output = runWithData((1 to 10) ++ (6 to 15))(_.parDo(fn)) .map(kv => (kv.getKey, kv.getValue.get())) output should contain theSameElementsAs ((1 to 10) ++ (6 to 15)).map(x => (x, x.toString)) BigtableDoFnTest.queue.asScala.toSet should contain theSameElementsAs (1 to 15) BigtableDoFnTest.queue.size() should be <= 20 } it should "work with failures" in { val fn = new TestFailingBigtableDoFn val output = runWithData(1 to 10)(_.parDo(fn)).map { kv => val r = kv.getValue.asScala match { case Success(v) => v case Failure(e) => e.getMessage } (kv.getKey, r) } output should contain theSameElementsAs (1 to 10).map { x => val prefix = if (x % 2 == 0) "success" else "failure" (x, prefix + x.toString) } } } object BigtableDoFnTest { val queue: ConcurrentLinkedQueue[Int] = new ConcurrentLinkedQueue[Int]() } class TestBigtableDoFn extends BigtableDoFn[Int, String](null) { override def newClient(): BigtableSession = null override def asyncLookup(session: BigtableSession, input: Int): ListenableFuture[String] = Futures.immediateFuture(input.toString) } class TestCachingBigtableDoFn extends BigtableDoFn[Int, String](null, 100, new TestCacheSupplier) { override def newClient(): BigtableSession = null override def asyncLookup(session: BigtableSession, input: Int): ListenableFuture[String] = { BigtableDoFnTest.queue.add(input) Futures.immediateFuture(input.toString) } } class TestFailingBigtableDoFn extends BigtableDoFn[Int, String](null) { override def newClient(): BigtableSession = null override def asyncLookup(session: BigtableSession, input: Int): ListenableFuture[String] = if (input % 2 == 0) { Futures.immediateFuture("success" + input) } else { Futures.immediateFailedFuture(new RuntimeException("failure" + input)) } } class TestCacheSupplier extends CacheSupplier[Int, String, java.lang.Long] { override def createCache(): Cache[java.lang.Long, String] = CacheBuilder.newBuilder().build[java.lang.Long, String]() override def getKey(input: Int): java.lang.Long = input.toLong }
Example 4
Source File: ObservedLoadingCacheSpecification.scala From Waves with MIT License | 5 votes |
package com.wavesplatform.utils import java.util.concurrent.TimeUnit import java.util.concurrent.atomic.AtomicLong import com.google.common.base.Ticker import com.google.common.cache.{CacheBuilder, CacheLoader, LoadingCache} import com.wavesplatform.utils.ObservedLoadingCacheSpecification.FakeTicker import monix.execution.Ack import monix.reactive.Observer import org.scalamock.scalatest.MockFactory import org.scalatest.{FreeSpec, Matchers} import scala.jdk.CollectionConverters._ import scala.concurrent.Future import scala.concurrent.duration.DurationInt class ObservedLoadingCacheSpecification extends FreeSpec with Matchers with MockFactory { private val ExpiringTime = 10.minutes "notifies" - { "on refresh" in test { (loadingCache, changes, _) => (changes.onNext _).expects("foo").returning(Future.successful(Ack.Continue)).once() loadingCache.refresh("foo") } "on put" in test { (loadingCache, changes, _) => (changes.onNext _).expects("foo").returning(Future.successful(Ack.Continue)).once() loadingCache.put("foo", 10) } "on putAll" in test { (loadingCache, changes, _) => (changes.onNext _).expects("foo").returning(Future.successful(Ack.Continue)).once() (changes.onNext _).expects("bar").returning(Future.successful(Ack.Continue)).once() loadingCache.putAll(Map[String, Integer]("foo" -> 10, "bar" -> 11).asJava) } "on invalidate" in test { (loadingCache, changes, _) => (changes.onNext _).expects("foo").returning(Future.successful(Ack.Continue)).once() loadingCache.invalidate("foo") } "on invalidateAll" in test { (loadingCache, changes, _) => (changes.onNext _).expects("foo").returning(Future.successful(Ack.Continue)).once() (changes.onNext _).expects("bar").returning(Future.successful(Ack.Continue)).once() loadingCache.invalidateAll(Seq("foo", "bar").asJava) } } "don't notify" - { "on cache expiration" in test { (loadingCache, changes, ticker) => (changes.onNext _).expects("foo").returning(Future.successful(Ack.Continue)).once() loadingCache.put("foo", 1) ticker.advance(ExpiringTime.toMillis + 100, TimeUnit.MILLISECONDS) } } private def test(f: (LoadingCache[String, Integer], Observer[String], FakeTicker) => Unit): Unit = { val changes = mock[Observer[String]] val ticker = new FakeTicker() val delegate = CacheBuilder .newBuilder() .expireAfterWrite(ExpiringTime.toMillis, TimeUnit.MILLISECONDS) .ticker(ticker) .build(new CacheLoader[String, Integer] { override def load(key: String): Integer = key.length }) val loadingCache = new ObservedLoadingCache(delegate, changes) f(loadingCache, changes, ticker) } } private object ObservedLoadingCacheSpecification { // see https://github.com/google/guava/blob/master/guava-testlib/src/com/google/common/testing/FakeTicker.java class FakeTicker extends Ticker { private val nanos = new AtomicLong() private var autoIncrementStepNanos = 0L def advance(time: Long, timeUnit: TimeUnit): FakeTicker = advance(timeUnit.toNanos(time)) def advance(nanoseconds: Long): FakeTicker = { nanos.addAndGet(nanoseconds) this } def setAutoIncrementStep(autoIncrementStep: Long, timeUnit: TimeUnit): FakeTicker = { require(autoIncrementStep >= 0, "May not auto-increment by a negative amount") this.autoIncrementStepNanos = timeUnit.toNanos(autoIncrementStep) this } override def read: Long = nanos.getAndAdd(autoIncrementStepNanos) } }
Example 5
Source File: LegacyFrameCodec.scala From Waves with MIT License | 5 votes |
package com.wavesplatform.network import java.util import com.google.common.cache.CacheBuilder import com.wavesplatform.block.Block import com.wavesplatform.common.utils.Base64 import com.wavesplatform.crypto import com.wavesplatform.network.message.Message._ import com.wavesplatform.transaction.Transaction import com.wavesplatform.utils.ScorexLogging import io.netty.buffer.ByteBuf import io.netty.buffer.Unpooled._ import io.netty.channel.ChannelHandlerContext import io.netty.handler.codec.{ByteToMessageCodec, DecoderException} import scala.concurrent.duration.FiniteDuration import scala.util.control.NonFatal class LegacyFrameCodec(peerDatabase: PeerDatabase, receivedTxsCacheTimeout: FiniteDuration) extends ByteToMessageCodec[Any] with ScorexLogging { import BasicMessagesRepo.specsByCodes import LegacyFrameCodec._ private val receivedTxsCache = CacheBuilder .newBuilder() .expireAfterWrite(receivedTxsCacheTimeout.length, receivedTxsCacheTimeout.unit) .build[String, Object]() override def exceptionCaught(ctx: ChannelHandlerContext, cause: Throwable): Unit = cause match { case e: DecoderException => peerDatabase.blacklistAndClose(ctx.channel(), s"Corrupted message frame: $e") case _ => super.exceptionCaught(ctx, cause) } override def decode(ctx: ChannelHandlerContext, in: ByteBuf, out: util.List[AnyRef]): Unit = try { require(in.readInt() == Magic, "invalid magic number") val code = in.readByte() require(specsByCodes.contains(code), s"Unexpected message code $code") val spec = specsByCodes(code) val length = in.readInt() require(length <= spec.maxLength, s"${spec.messageName} message length $length exceeds ${spec.maxLength}") val dataBytes = new Array[Byte](length) val pushToPipeline = length == 0 || { val declaredChecksum = in.readSlice(ChecksumLength) in.readBytes(dataBytes) val rawChecksum = crypto.fastHash(dataBytes) val actualChecksum = wrappedBuffer(rawChecksum, 0, ChecksumLength) require(declaredChecksum.equals(actualChecksum), "invalid checksum") actualChecksum.release() spec != TransactionSpec || { val actualChecksumStr = Base64.encode(rawChecksum) if (receivedTxsCache.getIfPresent(actualChecksumStr) == null) { receivedTxsCache.put(actualChecksumStr, LegacyFrameCodec.dummy) true } else false } } if (pushToPipeline) out.add(RawBytes(code, dataBytes)) } catch { case NonFatal(e) => log.warn(s"${id(ctx)} Malformed network message", e) peerDatabase.blacklistAndClose(ctx.channel(), s"Malformed network message: $e") in.resetReaderIndex() // Cancels subsequent read tries, see Netty decode() documentation } override def encode(ctx: ChannelHandlerContext, msg1: Any, out: ByteBuf): Unit = { val msg = msg1 match { case rb: RawBytes => rb case tx: Transaction => RawBytes.fromTransaction(tx) case block: Block => RawBytes.fromBlock(block) case mb: MicroBlockResponse => RawBytes.fromMicroBlock(mb) } out.writeInt(Magic) out.writeByte(msg.code) if (msg.data.length > 0) { out.writeInt(msg.data.length) out.writeBytes(crypto.fastHash(msg.data), 0, ChecksumLength) out.writeBytes(msg.data) } else { out.writeInt(0) } } } object LegacyFrameCodec { val Magic = 0x12345678 private val dummy = new Object() }
Example 6
Source File: LockManager.scala From metronome with Apache License 2.0 | 5 votes |
package dcos.metronome.utils.state import com.google.common.cache.{LoadingCache, CacheLoader, CacheBuilder} import java.util.concurrent.Semaphore import scala.concurrent.{ExecutionContext, Future} def executeSequentially[T](key: String)(future: => Future[T])(implicit ec: ExecutionContext): Future[T] } object LockManager { def create(): LockManager = new LockManager { val locks = loadingCache[String]() override def executeSequentially[T]( key: String )(future: => Future[T])(implicit ec: ExecutionContext): Future[T] = { val lock = locks.get(key) scala.concurrent.blocking { lock.acquire() } val result = future result.onComplete { _ => lock.release() } result } } private[this] def loadingCache[A <: AnyRef](): LoadingCache[A, Semaphore] = { CacheBuilder .newBuilder() .weakValues() .build[A, Semaphore](new CacheLoader[A, Semaphore] { override def load(key: A): Semaphore = new Semaphore(1) }) } }
Example 7
Source File: LshFunctionCache.scala From elastiknn with Apache License 2.0 | 5 votes |
package com.klibisz.elastiknn.query import java.time.Duration import com.google.common.cache.{CacheBuilder, CacheLoader, LoadingCache} import com.klibisz.elastiknn.api.{Mapping, Vec} import com.klibisz.elastiknn.models.LshFunction import com.klibisz.elastiknn.storage.StoredVec // The Lsh Functions tend to be expensive to instantiate (i.e. initializing hashing parameters), hence a cache. sealed trait LshFunctionCache[M <: Mapping, V <: Vec, S <: StoredVec] extends (M => LshFunction[M, V, S]) { self => private val cache: LoadingCache[M, LshFunction[M, V, S]] = CacheBuilder.newBuilder .expireAfterWrite(Duration.ofSeconds(60)) .build(new CacheLoader[M, LshFunction[M, V, S]] { override def load(m: M): LshFunction[M, V, S] = self.load(m) }) override final def apply(mapping: M): LshFunction[M, V, S] = cache.get(mapping) protected def load(m: M): LshFunction[M, V, S] } object LshFunctionCache { implicit object Jaccard extends LshFunctionCache[Mapping.JaccardLsh, Vec.SparseBool, StoredVec.SparseBool] { def load(m: Mapping.JaccardLsh): LshFunction[Mapping.JaccardLsh, Vec.SparseBool, StoredVec.SparseBool] = new LshFunction.Jaccard(m) } implicit object Hamming extends LshFunctionCache[Mapping.HammingLsh, Vec.SparseBool, StoredVec.SparseBool] { def load(m: Mapping.HammingLsh): LshFunction[Mapping.HammingLsh, Vec.SparseBool, StoredVec.SparseBool] = new LshFunction.Hamming(m) } implicit object Angular extends LshFunctionCache[Mapping.AngularLsh, Vec.DenseFloat, StoredVec.DenseFloat] { def load(m: Mapping.AngularLsh): LshFunction[Mapping.AngularLsh, Vec.DenseFloat, StoredVec.DenseFloat] = new LshFunction.Angular(m) } implicit object L2 extends LshFunctionCache[Mapping.L2Lsh, Vec.DenseFloat, StoredVec.DenseFloat] { def load(m: Mapping.L2Lsh): LshFunction[Mapping.L2Lsh, Vec.DenseFloat, StoredVec.DenseFloat] = new LshFunction.L2(m) } }
Example 8
Source File: PrepareStatementCache.scala From quill with Apache License 2.0 | 5 votes |
package io.getquill.context.cassandra import java.util.concurrent.Callable import com.google.common.base.Charsets import com.google.common.cache.CacheBuilder import com.google.common.hash.Hashing class PrepareStatementCache[V <: AnyRef](size: Long) { private val cache = CacheBuilder .newBuilder .maximumSize(size) .build[java.lang.Long, V]() private val hasher = Hashing.goodFastHash(128) def apply(stmt: String)(prepare: String => V): V = { cache.get( hash(stmt), new Callable[V] { override def call: V = prepare(stmt) } ) } def invalidate(stmt: String): Unit = cache.invalidate(hash(stmt)) private def hash(string: String): java.lang.Long = { hasher .hashString(string, Charsets.UTF_8) .asLong() } }
Example 9
Source File: L1Cache.scala From CM-Well with Apache License 2.0 | 5 votes |
package cmwell.zcache import java.util.concurrent.TimeUnit import com.google.common.cache.{Cache, CacheBuilder} import scala.concurrent.{ExecutionContext, Future} import scala.util.{Failure, Success} object L1Cache { def memoize[K, V](task: K => Future[V])( digest: K => String, isCachable: V => Boolean = (_: V) => true )(l1Size: Int = 1024, ttlSeconds: Int = 10)(implicit ec: ExecutionContext): K => Future[V] = { val cache: Cache[String, Future[V]] = CacheBuilder.newBuilder().maximumSize(l1Size).expireAfterWrite(ttlSeconds, TimeUnit.SECONDS).build() (input: K) => { val key = digest(input) Option(cache.getIfPresent(key)) match { case Some(cachedFuture) => cachedFuture.asInstanceOf[Future[V]] case None => { val fut = task(input) cache.put(key, fut) fut.andThen { case Success(v) => if (!isCachable(v)) cache.invalidate(key) case Failure(_) => cache.getIfPresent(key) match { case `fut` => cache.invalidate(key) case _ => //Do Nothing } } } } } } def memoizeWithCache[K, V](task: K => Future[V])(digest: K => String, isCachable: V => Boolean = (_: V) => true)( cache: Cache[String, Future[V]] )(implicit ec: ExecutionContext): K => Future[V] = { (input: K) => { val key = digest(input) Option(cache.getIfPresent(key)).getOrElse { val fut = task(input) cache.put(key, fut) fut.andThen { case Success(v) => if (!isCachable(v)) cache.getIfPresent(key) match { case `fut` => cache.invalidate(key) case _ => //Do Nothing } case Failure(_) => cache.getIfPresent(key) match { case `fut` => cache.invalidate(key) case _ => //Do Nothing } } } } } }
Example 10
Source File: JwksVerifier.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.jwt import java.net.{URI, URL} import java.security.interfaces.RSAPublicKey import java.util.concurrent.TimeUnit import com.auth0.jwk.UrlJwkProvider import com.daml.jwt.JwtVerifier.Error import com.google.common.cache.{Cache, CacheBuilder} import scalaz.{-\/, Show, \/} import scalaz.syntax.show._ private[this] def getCachedVerifier(keyId: String): Error \/ JwtVerifier = { if (keyId == null) -\/(Error('getCachedVerifier, "No Key ID found")) else \/.fromTryCatchNonFatal( cache.get(keyId, () => getVerifier(keyId).fold(e => sys.error(e.shows), x => x)) ).leftMap(e => Error('getCachedVerifier, e.getMessage)) } def verify(jwt: domain.Jwt): Error \/ domain.DecodedJwt[String] = { for { keyId <- \/.fromTryCatchNonFatal(com.auth0.jwt.JWT.decode(jwt.value).getKeyId) .leftMap(e => Error('verify, e.getMessage)) verifier <- getCachedVerifier(keyId) decoded <- verifier.verify(jwt) } yield decoded } } object JwksVerifier { def apply(url: String) = new JwksVerifier(new URI(url).toURL) final case class Error(what: Symbol, message: String) object Error { implicit val showInstance: Show[Error] = Show.shows(e => s"JwksVerifier.Error: ${e.what}, ${e.message}") } }
Example 11
Source File: DateTimeFormatterHelper.scala From XSQL with Apache License 2.0 | 5 votes |
package org.apache.spark.sql.catalyst.util import java.time._ import java.time.chrono.IsoChronology import java.time.format.{DateTimeFormatter, DateTimeFormatterBuilder, ResolverStyle} import java.time.temporal.{ChronoField, TemporalAccessor, TemporalQueries} import java.util.Locale import com.google.common.cache.CacheBuilder import org.apache.spark.sql.catalyst.util.DateTimeFormatterHelper._ trait DateTimeFormatterHelper { protected def toInstantWithZoneId(temporalAccessor: TemporalAccessor, zoneId: ZoneId): Instant = { val localTime = if (temporalAccessor.query(TemporalQueries.localTime) == null) { LocalTime.ofNanoOfDay(0) } else { LocalTime.from(temporalAccessor) } val localDate = LocalDate.from(temporalAccessor) val localDateTime = LocalDateTime.of(localDate, localTime) val zonedDateTime = ZonedDateTime.of(localDateTime, zoneId) Instant.from(zonedDateTime) } // Gets a formatter from the cache or creates new one. The buildFormatter method can be called // a few times with the same parameters in parallel if the cache does not contain values // associated to those parameters. Since the formatter is immutable, it does not matter. // In this way, synchronised is intentionally omitted in this method to make parallel calls // less synchronised. // The Cache.get method is not used here to avoid creation of additional instances of Callable. protected def getOrCreateFormatter(pattern: String, locale: Locale): DateTimeFormatter = { val key = (pattern, locale) var formatter = cache.getIfPresent(key) if (formatter == null) { formatter = buildFormatter(pattern, locale) cache.put(key, formatter) } formatter } } private object DateTimeFormatterHelper { val cache = CacheBuilder.newBuilder() .maximumSize(128) .build[(String, Locale), DateTimeFormatter]() def buildFormatter(pattern: String, locale: Locale): DateTimeFormatter = { new DateTimeFormatterBuilder() .parseCaseInsensitive() .appendPattern(pattern) .parseDefaulting(ChronoField.ERA, 1) .parseDefaulting(ChronoField.MONTH_OF_YEAR, 1) .parseDefaulting(ChronoField.DAY_OF_MONTH, 1) .parseDefaulting(ChronoField.MINUTE_OF_HOUR, 0) .parseDefaulting(ChronoField.SECOND_OF_MINUTE, 0) .toFormatter(locale) .withChronology(IsoChronology.INSTANCE) .withResolverStyle(ResolverStyle.STRICT) } }
Example 12
Source File: ConfluentSchemaRegistry.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.avro.registry import com.google.common.cache.{CacheBuilder, CacheLoader} import com.typesafe.config.{Config, ConfigFactory} import hydra.common.logging.LoggingAdapter import io.confluent.kafka.schemaregistry.client.{ CachedSchemaRegistryClient, MockSchemaRegistryClient, SchemaMetadata, SchemaRegistryClient } import scala.collection.JavaConverters._ import scala.concurrent.{ExecutionContext, Future} case class ConfluentSchemaRegistry( registryClient: SchemaRegistryClient, registryUrl: String ) extends SchemaRegistryComponent { def getAllSubjects()(implicit ec: ExecutionContext): Future[Seq[String]] = Future( registryClient .getAllSubjects() .asScala .map(s => if (s.endsWith("-value")) s.dropRight(6) else s) .toSeq ) def getById(id: Int, suffix: String = "-value")( implicit ec: ExecutionContext ): Future[SchemaMetadata] = Future { val schema = registryClient.getById(id) val subject = schema.getFullName + suffix registryClient.getLatestSchemaMetadata(subject) } } object ConfluentSchemaRegistry extends LoggingAdapter { import hydra.common.config.ConfigSupport._ case class SchemaRegistryClientInfo( url: String, schemaRegistryMaxCapacity: Int ) private val cachedClients = CacheBuilder .newBuilder() .build( new CacheLoader[SchemaRegistryClientInfo, ConfluentSchemaRegistry] { def load(info: SchemaRegistryClientInfo): ConfluentSchemaRegistry = { log.debug(s"Creating new schema registry client for ${info.url}") val client = if (info.url == "mock") { mockRegistry } else { new CachedSchemaRegistryClient( info.url, info.schemaRegistryMaxCapacity ) } ConfluentSchemaRegistry(client, info.url) } } ) val mockRegistry = new MockSchemaRegistryClient() def registryUrl(config: Config): String = config.getStringOpt("schema.registry.url") .getOrElse(throw new IllegalArgumentException("A schema registry url is required.")) def forConfig( config: Config = ConfigFactory.load() ): ConfluentSchemaRegistry = { val identityMapCapacity = config.getIntOpt("max.schemas.per.subject").getOrElse(1000) cachedClients.get( SchemaRegistryClientInfo(registryUrl(config), identityMapCapacity) ) } }
Example 13
Source File: CacheableRPCInterceptor.scala From Linkis with Apache License 2.0 | 5 votes |
package com.webank.wedatasphere.linkis.rpc.interceptor.common import java.util.concurrent.{Callable, TimeUnit} import com.google.common.cache.{Cache, CacheBuilder, RemovalListener, RemovalNotification} import com.webank.wedatasphere.linkis.common.exception.WarnException import com.webank.wedatasphere.linkis.common.utils.Logging import com.webank.wedatasphere.linkis.protocol.CacheableProtocol import com.webank.wedatasphere.linkis.rpc.interceptor.{RPCInterceptor, RPCInterceptorChain, RPCInterceptorExchange} import org.springframework.stereotype.Component @Component class CacheableRPCInterceptor extends RPCInterceptor with Logging{ private val guavaCache: Cache[Any, Any] = CacheBuilder.newBuilder().concurrencyLevel(5) .expireAfterAccess(120000, TimeUnit.MILLISECONDS).initialCapacity(20) //TODO Make parameters(做成参数) .maximumSize(1000).recordStats().removalListener(new RemovalListener[Any, Any] { override def onRemoval(removalNotification: RemovalNotification[Any, Any]): Unit = { debug(s"CacheSender removed key => ${removalNotification.getKey}, value => ${removalNotification.getValue}.") } }).asInstanceOf[CacheBuilder[Any, Any]].build() override val order: Int = 10 override def intercept(interceptorExchange: RPCInterceptorExchange, chain: RPCInterceptorChain): Any = interceptorExchange.getProtocol match { case cacheable: CacheableProtocol => guavaCache.get(cacheable.toString, new Callable[Any] { override def call(): Any = { val returnMsg = chain.handle(interceptorExchange) returnMsg match { case warn: WarnException => throw warn case _ => returnMsg } } }) case _ => chain.handle(interceptorExchange) } }
Example 14
Source File: BlockchainCache.scala From matcher with MIT License | 5 votes |
package com.wavesplatform.dex.grpc.integration.caches import java.time.Duration import com.google.common.cache.{CacheBuilder, CacheLoader, LoadingCache} import com.wavesplatform.dex.domain.utils.ScorexLogging import scala.concurrent.{ExecutionContext, Future} import scala.util.{Failure, Success} abstract class BlockchainCache[K <: AnyRef, V <: AnyRef](loader: K => Future[V], expiration: Option[Duration], invalidationPredicate: V => Boolean)( implicit ec: ExecutionContext) extends ScorexLogging { lazy private val cache: LoadingCache[K, Future[V]] = { val builder = CacheBuilder.newBuilder expiration .fold(builder)(builder.expireAfterWrite) .build { new CacheLoader[K, Future[V]] { override def load(key: K): Future[V] = loader(key) andThen { case Success(value) if invalidationPredicate(value) => cache.invalidate(key) // value may persist for a little longer than expected due to the fact that all the threads in the EC may be busy case Failure(exception) => log.error(s"Error while value loading occurred: ", exception); cache.invalidate(key) } } } } def get(key: K): Future[V] = cache.get(key) def put(key: K, value: Future[V]): Unit = cache.put(key, value) } object BlockchainCache { def noCustomInvalidationLogic[V](value: V): Boolean = false }
Example 15
Source File: RankingCounter.scala From incubator-s2graph with Apache License 2.0 | 5 votes |
package org.apache.s2graph.counter.core import java.util.concurrent.TimeUnit import com.google.common.cache.{CacheBuilder, CacheLoader, LoadingCache} import com.typesafe.config.Config import org.apache.s2graph.counter.core.RankingCounter.RankingValueMap import org.apache.s2graph.counter.models.Counter import org.apache.s2graph.counter.util.{CollectionCacheConfig, CollectionCache} import org.slf4j.LoggerFactory import scala.collection.JavaConversions._ case class RankingRow(key: RankingKey, value: Map[String, RankingValue]) case class RateRankingRow(key: RankingKey, value: Map[String, RateRankingValue]) class RankingCounter(config: Config, storage: RankingStorage) { private val log = LoggerFactory.getLogger(getClass) val storageStatusCache = new CollectionCache[Option[Boolean]](CollectionCacheConfig(1000, 60, negativeCache = false, 60)) val cache: LoadingCache[RankingKey, RankingResult] = CacheBuilder.newBuilder() .maximumSize(1000000) .expireAfterWrite(10l, TimeUnit.MINUTES) .build( new CacheLoader[RankingKey, RankingResult]() { def load(rankingKey: RankingKey): RankingResult = { // log.warn(s"cache load: $rankingKey") storage.getTopK(rankingKey, Int.MaxValue).getOrElse(RankingResult(-1, Nil)) } } ) def getTopK(rankingKey: RankingKey, k: Int = Int.MaxValue): Option[RankingResult] = { val tq = rankingKey.eq.tq if (TimedQualifier.getQualifiers(Seq(tq.q), System.currentTimeMillis()).head == tq) { // do not use cache storage.getTopK(rankingKey, k) } else { val result = cache.get(rankingKey) if (result.values.nonEmpty) { Some(result.copy(values = result.values.take(k))) } else { None } } } def update(key: RankingKey, value: RankingValueMap, k: Int): Unit = { storage.update(key, value, k) } def update(values: Seq[(RankingKey, RankingValueMap)], k: Int): Unit = { storage.update(values, k) } def delete(key: RankingKey): Unit = { storage.delete(key) } def getAllItems(keys: Seq[RankingKey], k: Int = Int.MaxValue): Seq[String] = { val oldKeys = keys.filter(key => TimedQualifier.getQualifiers(Seq(key.eq.tq.q), System.currentTimeMillis()).head != key.eq.tq) val cached = cache.getAllPresent(oldKeys) val missed = keys.diff(cached.keys.toSeq) val found = storage.getTopK(missed, k) // log.warn(s"cached: ${cached.size()}, missed: ${missed.size}") for { (key, result) <- found } { cache.put(key, result) } for { (key, RankingResult(totalScore, values)) <- cached ++ found (item, score) <- values } yield { item } }.toSeq.distinct def prepare(policy: Counter): Unit = { storage.prepare(policy) } def destroy(policy: Counter): Unit = { storage.destroy(policy) } def ready(policy: Counter): Boolean = { storageStatusCache.withCache(s"${policy.id}") { Some(storage.ready(policy)) }.getOrElse(false) } } object RankingCounter { type RankingValueMap = Map[String, RankingValue] }
Example 16
Source File: ExactQualifier.scala From incubator-s2graph with Apache License 2.0 | 5 votes |
package org.apache.s2graph.counter.core import java.util import com.google.common.cache.{CacheBuilder, CacheLoader, LoadingCache} import org.apache.s2graph.counter.core.TimedQualifier.IntervalUnit.IntervalUnit import scala.collection.JavaConversions._ case class ExactQualifier(tq: TimedQualifier, dimKeyValues: Map[String, String], dimension: String) { def checkDimensionEquality(dimQuery: Map[String, Set[String]]): Boolean = { // println(s"self: $dimKeyValues, query: $dimQuery") dimQuery.size == dimKeyValues.size && { for { (k, v) <- dimKeyValues } yield { dimQuery.get(k).exists(qv => qv.isEmpty || qv.contains(v)) } }.forall(x => x) } } object ExactQualifier { val cache: LoadingCache[String, Map[String, String]] = CacheBuilder.newBuilder() .maximumSize(10000) .build( new CacheLoader[String, Map[String, String]]() { def load(s: String): Map[String, String] = { strToDimensionMap(s) } } ) def apply(tq: TimedQualifier, dimension: String): ExactQualifier = { ExactQualifier(tq, cache.get(dimension), dimension) } def apply(tq: TimedQualifier, dimKeyValues: Map[String, String]): ExactQualifier = { ExactQualifier(tq, dimKeyValues, makeDimensionStr(dimKeyValues)) } def makeSortedDimension(dimKeyValues: Map[String, String]): Iterator[String] = { val sortedDimKeyValues = new util.TreeMap[String, String](dimKeyValues) sortedDimKeyValues.keysIterator ++ sortedDimKeyValues.valuesIterator } def makeDimensionStr(dimKeyValues: Map[String, String]): String = { makeSortedDimension(dimKeyValues).mkString(".") } def getQualifiers(intervals: Seq[IntervalUnit], ts: Long, dimKeyValues: Map[String, String]): Seq[ExactQualifier] = { for { tq <- TimedQualifier.getQualifiers(intervals, ts) } yield { ExactQualifier(tq, dimKeyValues, makeDimensionStr(dimKeyValues)) } } def strToDimensionMap(dimension: String): Map[String, String] = { val dimSp = { val sp = dimension.split('.') if (dimension == ".") { Array("", "") } else if (dimension.nonEmpty && dimension.last == '.') { sp ++ Array("") } else { sp } } val dimKey = dimSp.take(dimSp.length / 2) val dimVal = dimSp.takeRight(dimSp.length / 2) dimKey.zip(dimVal).toMap } }
Example 17
Source File: CollectionCache.scala From incubator-s2graph with Apache License 2.0 | 5 votes |
package org.apache.s2graph.counter.util import java.net.InetAddress import java.util.concurrent.TimeUnit import com.google.common.cache.{Cache, CacheBuilder} import org.slf4j.LoggerFactory import scala.concurrent.{ExecutionContext, Future} import scala.language.{postfixOps, reflectiveCalls} case class CollectionCacheConfig(maxSize: Int, ttl: Int, negativeCache: Boolean = false, negativeTTL: Int = 600) class CollectionCache[C <: { def nonEmpty: Boolean; def isEmpty: Boolean } ](config: CollectionCacheConfig) { private val cache: Cache[String, C] = CacheBuilder.newBuilder() .expireAfterWrite(config.ttl, TimeUnit.SECONDS) .maximumSize(config.maxSize) .build[String, C]() // private lazy val cache = new SynchronizedLruMap[String, (C, Int)](config.maxSize) private lazy val className = this.getClass.getSimpleName private lazy val log = LoggerFactory.getLogger(this.getClass) val localHostname = InetAddress.getLocalHost.getHostName def size = cache.size val maxSize = config.maxSize // cache statistics def getStatsString: String = { s"$localHostname ${cache.stats().toString}" } def withCache(key: String)(op: => C): C = { Option(cache.getIfPresent(key)) match { case Some(r) => r case None => val r = op if (r.nonEmpty || config.negativeCache) { cache.put(key, r) } r } } def withCacheAsync(key: String)(op: => Future[C])(implicit ec: ExecutionContext): Future[C] = { Option(cache.getIfPresent(key)) match { case Some(r) => Future.successful(r) case None => op.map { r => if (r.nonEmpty || config.negativeCache) { cache.put(key, r) } r } } } def purgeKey(key: String) = { cache.invalidate(key) } def contains(key: String): Boolean = { Option(cache.getIfPresent(key)).nonEmpty } }
Example 18
Source File: JWTAuthenticatorRepository.scala From crm-seed with Apache License 2.0 | 5 votes |
package com.dataengi.crm.identities.repositories import com.dataengi.crm.identities.daos.JWTAuthenticatorDAO import com.dataengi.crm.identities.models.JWTAuthenticatorData import com.google.common.cache.CacheBuilder import com.google.inject.{Inject, Singleton} import com.mohiva.play.silhouette.api.crypto.AuthenticatorEncoder import com.mohiva.play.silhouette.api.repositories.AuthenticatorRepository import com.mohiva.play.silhouette.impl.authenticators.{JWTAuthenticator, JWTAuthenticatorSettings} import scala.collection.concurrent.TrieMap import scala.concurrent.{ExecutionContext, Future} import scalacache.guava.GuavaCache trait JWTAuthenticatorRepository extends AuthenticatorRepository[JWTAuthenticator] @Singleton class JWTAuthenticatorRepositoryInMemoryImplementation @Inject()(implicit val executionContext: ExecutionContext) extends JWTAuthenticatorRepository { protected val repository = TrieMap[String, JWTAuthenticator]() override def find(id: String): Future[Option[JWTAuthenticator]] = Future { repository.get(id) } override def add(authenticator: JWTAuthenticator): Future[JWTAuthenticator] = Future { repository.put(authenticator.id, authenticator) authenticator } override def update(authenticator: JWTAuthenticator): Future[JWTAuthenticator] = Future { repository.update(authenticator.id, authenticator) authenticator } override def remove(id: String): Future[Unit] = Future { repository.remove(id) } } @Singleton class JWTAuthenticatorSerializableRepositoryImplementation @Inject()(implicit val executionContext: ExecutionContext, authenticatorDAO: JWTAuthenticatorDAO, authenticatorEncoder: AuthenticatorEncoder, conf: JWTAuthenticatorSettings) extends JWTAuthenticatorRepository { override def find(id: String): Future[Option[JWTAuthenticator]] = authenticatorDAO .getOption(id) .map(_.map(data => JWTAuthenticator.unserialize(data.authenticator, authenticatorEncoder, conf).get)) override def add(authenticator: JWTAuthenticator): Future[JWTAuthenticator] = for { data <- serializeData(authenticator) addResult <- authenticatorDAO.add(data) } yield authenticator private def serializeData(authenticator: JWTAuthenticator): Future[JWTAuthenticatorData] = { Future { val serializedData = JWTAuthenticator.serialize(authenticator, authenticatorEncoder, conf) JWTAuthenticatorData(serializedData, authenticator.id) } } override def update(authenticator: JWTAuthenticator): Future[JWTAuthenticator] = for { updatedAuthenticator <- authenticatorDAO.get(authenticator.id) data <- serializeData(authenticator) updateResult <- authenticatorDAO.update(data.copy(id = updatedAuthenticator.id)) } yield authenticator override def remove(id: String): Future[Unit] = authenticatorDAO.delete(id) } @Singleton class JWTAuthenticatorCacheRepositoryImplementation @Inject()(implicit val executionContext: ExecutionContext) extends JWTAuthenticatorRepository { import scalacache._ val underlyingGuavaCache = CacheBuilder.newBuilder().maximumSize(10000L).build[String, Object] implicit val scalaCache = ScalaCache(GuavaCache(underlyingGuavaCache)) val cache = typed[JWTAuthenticator, NoSerialization] override def find(id: String): Future[Option[JWTAuthenticator]] = cache.get(id) override def add(authenticator: JWTAuthenticator): Future[JWTAuthenticator] = cache.put(authenticator.id)(authenticator).map(_ => authenticator) override def update(authenticator: JWTAuthenticator): Future[JWTAuthenticator] = cache.put(authenticator.id)(authenticator).map(_ => authenticator) override def remove(id: String): Future[Unit] = cache.remove(id) }