cats.effect.Timer Scala Examples
The following examples show how to use cats.effect.Timer.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: FOpsTest.scala From scala-server-toolkit with MIT License | 5 votes |
package com.avast.sst.catseffect.syntax import cats.effect.concurrent.Ref import cats.effect.{Clock, IO, Timer} import com.avast.sst.catseffect.syntax.time._ import org.scalatest.funsuite.AsyncFunSuite import scala.concurrent.ExecutionContext import scala.concurrent.duration.{Duration, TimeUnit} class FOpsTest extends AsyncFunSuite { implicit private val timer: Timer[IO] = IO.timer(ExecutionContext.global) test("time") { val sleepTime = Duration.fromNanos(500000000) implicit val mockClock: Clock[IO] = new Clock[IO] { var values = List(0L, sleepTime.toNanos) override def monotonic(unit: TimeUnit): IO[Long] = { val time = values.head values = values.tail IO.pure(time) } override def realTime(unit: TimeUnit): IO[Long] = ??? } val io = for { ref <- Ref.of[IO, Option[Duration]](None) _ <- IO.sleep(sleepTime).time(d => ref.set(Some(d))) result <- ref.get } yield assert(result.isDefined && result.get.toMillis === sleepTime.toMillis) io.unsafeToFuture() } }
Example 2
Source File: Fs2KafkaModule.scala From scala-server-toolkit with MIT License | 5 votes |
package com.avast.sst.fs2kafka import cats.effect.{Blocker, ConcurrentEffect, ContextShift, Resource, Timer} import fs2.kafka._ object Fs2KafkaModule { def makeConsumer[F[_]: ConcurrentEffect: ContextShift: Timer, K: Deserializer[F, *], V: Deserializer[F, *]]( config: ConsumerConfig, blocker: Option[Blocker] = None, createConsumer: Option[Map[String, String] => F[KafkaByteConsumer]] = None ): Resource[F, KafkaConsumer[F, K, V]] = { def setOpt[A](maybeValue: Option[A])( setter: (ConsumerSettings[F, K, V], A) => ConsumerSettings[F, K, V] )(initial: ConsumerSettings[F, K, V]): ConsumerSettings[F, K, V] = maybeValue match { case Some(value) => setter(initial, value) case None => initial } val settings = ConsumerSettings(implicitly[Deserializer[F, K]], implicitly[Deserializer[F, V]]) .withBootstrapServers(config.bootstrapServers.mkString(",")) .withGroupId(config.groupId) .pipe(setOpt(config.groupInstanceId)(_.withGroupInstanceId(_))) .pipe(setOpt(config.clientId)(_.withClientId(_))) .pipe(setOpt(config.clientRack)(_.withClientRack(_))) .withAutoOffsetReset(config.autoOffsetReset) .withEnableAutoCommit(config.enableAutoCommit) .withAutoCommitInterval(config.autoCommitInterval) .withAllowAutoCreateTopics(config.allowAutoCreateTopics) .withCloseTimeout(config.closeTimeout) .withCommitRecovery(config.commitRecovery) .withCommitTimeout(config.closeTimeout) .withDefaultApiTimeout(config.defaultApiTimeout) .withHeartbeatInterval(config.heartbeatInterval) .withIsolationLevel(config.isolationLevel) .withMaxPrefetchBatches(config.maxPrefetchBatches) .withPollInterval(config.pollInterval) .withPollTimeout(config.pollTimeout) .withMaxPollInterval(config.maxPollInterval) .withMaxPollRecords(config.maxPollRecords) .withRequestTimeout(config.requestTimeout) .withSessionTimeout(config.sessionTimeout) .pipe(setOpt(blocker)(_.withBlocker(_))) .withProperties(config.properties) .pipe(setOpt(createConsumer)(_.withCreateConsumer(_))) makeConsumer(settings) } def makeConsumer[F[_]: ConcurrentEffect: ContextShift: Timer, K, V]( settings: ConsumerSettings[F, K, V] ): Resource[F, KafkaConsumer[F, K, V]] = consumerResource[F].using(settings) def makeProducer[F[_]: ConcurrentEffect: ContextShift, K: Serializer[F, *], V: Serializer[F, *]]( config: ProducerConfig, blocker: Option[Blocker] = None, createProducer: Option[Map[String, String] => F[KafkaByteProducer]] = None ): Resource[F, KafkaProducer[F, K, V]] = { def setOpt[A](maybeValue: Option[A])( setter: (ProducerSettings[F, K, V], A) => ProducerSettings[F, K, V] )(initial: ProducerSettings[F, K, V]): ProducerSettings[F, K, V] = maybeValue match { case Some(value) => setter(initial, value) case None => initial } val settings = ProducerSettings(implicitly[Serializer[F, K]], implicitly[Serializer[F, V]]) .withBootstrapServers(config.bootstrapServers.mkString(",")) .pipe(setOpt(config.clientId)(_.withClientId(_))) .withAcks(config.acks) .withBatchSize(config.batchSize) .withCloseTimeout(config.closeTimeout) .withDeliveryTimeout(config.deliveryTimeout) .withRequestTimeout(config.requestTimeout) .withLinger(config.linger) .withEnableIdempotence(config.enableIdempotence) .withMaxInFlightRequestsPerConnection(config.maxInFlightRequestsPerConnection) .withParallelism(config.parallelism) .withRetries(config.retries) .pipe(setOpt(blocker)(_.withBlocker(_))) .withProperties(config.properties) .pipe(setOpt(createProducer)(_.withCreateProducer(_))) makeProducer(settings) } def makeProducer[F[_]: ConcurrentEffect: ContextShift, K, V](settings: ProducerSettings[F, K, V]): Resource[F, KafkaProducer[F, K, V]] = producerResource[F].using(settings) implicit private final class ChainingOps[A](private val self: A) extends AnyVal { def pipe[B](f: A => B): B = f(self) } }
Example 3
Source File: CorrelationIdMiddlewareTest.scala From scala-server-toolkit with MIT License | 5 votes |
package com.avast.sst.http4s.server.middleware import java.net.InetSocketAddress import cats.effect.{ContextShift, IO, Resource, Timer} import com.avast.sst.http4s.server.Http4sRouting import org.http4s.client.blaze.BlazeClientBuilder import org.http4s.dsl.Http4sDsl import org.http4s.server.blaze.BlazeServerBuilder import org.http4s.util.CaseInsensitiveString import org.http4s.{Header, HttpRoutes, Request, Uri} import org.scalatest.funsuite.AsyncFunSuite import scala.concurrent.ExecutionContext @SuppressWarnings(Array("scalafix:Disable.get", "scalafix:Disable.toString", "scalafix:Disable.createUnresolved")) class CorrelationIdMiddlewareTest extends AsyncFunSuite with Http4sDsl[IO] { implicit private val cs: ContextShift[IO] = IO.contextShift(ExecutionContext.global) implicit private val timer: Timer[IO] = IO.timer(ExecutionContext.global) test("CorrelationIdMiddleware fills Request attributes and HTTP response header") { val test = for { middleware <- Resource.liftF(CorrelationIdMiddleware.default[IO]) routes = Http4sRouting.make { middleware.wrap { HttpRoutes.of[IO] { case req @ GET -> Root / "test" => val id = middleware.retrieveCorrelationId(req) Ok("test").map(_.withHeaders(Header("Attribute-Value", id.toString))) } } } server <- BlazeServerBuilder[IO](ExecutionContext.global) .bindSocketAddress(InetSocketAddress.createUnresolved("127.0.0.1", 0)) .withHttpApp(routes) .resource client <- BlazeClientBuilder[IO](ExecutionContext.global).resource } yield (server, client) test .use { case (server, client) => client .run( Request[IO](uri = Uri.unsafeFromString(s"http://${server.address.getHostString}:${server.address.getPort}/test")) .withHeaders(Header("Correlation-Id", "test-value")) ) .use { response => IO.delay { assert(response.headers.get(CaseInsensitiveString("Correlation-Id")).get.value === "test-value") assert(response.headers.get(CaseInsensitiveString("Attribute-Value")).get.value === "Some(CorrelationId(test-value))") } } } .unsafeToFuture() } }
Example 4
Source File: Http4sRpcServer.scala From iotchain with MIT License | 5 votes |
package jbok.network.rpc.http import cats.effect.{ConcurrentEffect, Resource, Sync, Timer} import cats.implicits._ import io.circe.Json import io.circe.syntax._ import jbok.network.rpc.{RpcRequest, RpcService} import org.http4s.HttpRoutes import org.http4s.circe.CirceEntityCodec._ import org.http4s.dsl.Http4sDsl import org.http4s.implicits._ import org.http4s.server.Server import org.http4s.server.blaze.BlazeServerBuilder object Http4sRpcServer { def routes[F[_]](service: RpcService[F, Json])(implicit F: Sync[F]): HttpRoutes[F] = { val dsl = Http4sDsl[F] import dsl._ HttpRoutes.of[F] { case req @ POST -> path => for { json <- req.as[Json] result <- service.handle(RpcRequest(path.toList, json)) resp <- Ok(result.asJson) } yield resp } } def server[F[_]](service: RpcService[F, Json])(implicit F: ConcurrentEffect[F], T: Timer[F]): Resource[F, Server[F]] = BlazeServerBuilder[F] .bindLocal(0) .withHttpApp(routes[F](service).orNotFound) .withWebSockets(true) .resource }
Example 5
Source File: StoreUpdateService.scala From iotchain with MIT License | 5 votes |
package jbok.app.service import cats.data.OptionT import cats.effect.{Sync, Timer} import cats.implicits._ import fs2._ import jbok.app.service.store.{BlockStore, TransactionStore} import jbok.common.log.Logger import jbok.common.math.N import jbok.core.ledger.History import spire.compat._ import scala.concurrent.duration._ final class StoreUpdateService[F[_]](history: History[F], blockStore: BlockStore[F], txStore: TransactionStore[F])(implicit F: Sync[F], T: Timer[F]) { private[this] val log = Logger[F] def findForkPoint(start: N): F[N] = for { hash1 <- blockStore.getBlockHashByNumber(start) hash2 <- history.getHashByBlockNumber(start) number <- (hash1, hash2) match { case (Some(h1), Some(h2)) if h1 == h2 => F.pure(start) case (Some(_), Some(_)) => findForkPoint(start - 1) case _ => F.raiseError(new Exception(s"fatal error")) } } yield number private def delRange(start: N, end: N): F[Unit] = List.range(start, end + 1).traverse_ { number => blockStore.delByBlockNumber(number) >> txStore.delByBlockNumber(number) } private def syncRange(start: N, end: N): F[Unit] = List.range(start, end + 1).traverse_ { number => syncBlock(number) >> syncTransactions(number) } private def syncBlock(number: N): F[Unit] = for { header <- history.getBlockHeaderByNumber(number) _ <- header.fold(F.unit)(header => blockStore.insert(header.number, header.hash)) } yield () private def syncTransactions(number: N): F[Unit] = (for { hash <- OptionT(history.getHashByBlockNumber(number)) block <- OptionT(history.getBlockByHash(hash)) receipts <- OptionT(history.getReceiptsByHash(hash)) _ <- OptionT.liftF(txStore.insertBlockTransactions(block, receipts)) } yield ()).value.void def sync: F[Unit] = for { currentOpt <- blockStore.getBestBlockNumber fork <- currentOpt.fold(N(0).pure[F])(current => findForkPoint(current)) best <- history.getBestBlockNumber _ <- log.i(s"current: ${fork}, best: ${best}") _ <- if (fork == best) { F.unit } else { delRange(fork, best) >> syncRange(fork, best) } } yield () val stream: Stream[F, Unit] = Stream.eval(log.i(s"starting App/StoreUpdateService")) ++ Stream.repeatEval(sync).metered(10.seconds) }
Example 6
Source File: HttpService.scala From iotchain with MIT License | 5 votes |
package jbok.app.service import cats.effect.{ConcurrentEffect, Resource, Timer} import io.circe.Json import cats.implicits._ import fs2._ import javax.net.ssl.SSLContext import jbok.network.http.server.middleware.{CORSMiddleware, GzipMiddleware, LoggerMiddleware, MetricsMiddleware} import jbok.core.config.ServiceConfig import jbok.core.api._ import jbok.crypto.ssl.SSLConfig import jbok.network.rpc.RpcService import jbok.network.rpc.http.Http4sRpcServer import org.http4s.HttpRoutes import org.http4s.implicits._ import org.http4s.server.{SSLClientAuthMode, Server} import org.http4s.server.blaze.BlazeServerBuilder final class HttpService[F[_]]( config: ServiceConfig, sslConfig: SSLConfig, account: AccountAPI[F], admin: AdminAPI[F], block: BlockAPI[F], contract: ContractAPI[F], miner: MinerAPI[F], personal: PersonalAPI[F], transaction: TransactionAPI[F], sslOpt: Option[SSLContext] )(implicit F: ConcurrentEffect[F], T: Timer[F]) { import jbok.codec.impl.circe._ import _root_.io.circe.generic.auto._ import jbok.codec.json.implicits._ val rpcService: RpcService[F, Json] = { var service = RpcService[F, Json] if (config.apis.contains("account")) service = service.mount(account) else () if (config.apis.contains("admin")) service = service.mount(admin) else () if (config.apis.contains("block")) service = service.mount(block) else () if (config.apis.contains("contract")) service = service.mount(contract) else () if (config.apis.contains("miner")) service = service.mount(miner) else () if (config.apis.contains("personal")) service = service.mount(personal) else () if (config.apis.contains("transaction")) service = service.mount(transaction) else () service } val routes: HttpRoutes[F] = Http4sRpcServer.routes(rpcService) private val builder: F[BlazeServerBuilder[F]] = { val httpApp = for { exportRoute <- MetricsMiddleware.exportService[F] withMetrics <- MetricsMiddleware[F](routes, config.enableMetrics) withLogger = LoggerMiddleware[F](config.logHeaders, config.logBody)((withMetrics <+> exportRoute).orNotFound) withCORS = CORSMiddleware[F](withLogger, config.allowedOrigins) app = GzipMiddleware[F](withCORS) } yield app val builder = httpApp.map { app => BlazeServerBuilder[F] .withHttpApp(app) .withNio2(true) .enableHttp2(config.enableHttp2) .withWebSockets(config.enableWebsockets) .bindHttp(config.port, config.local) } val sslLClientAuthMode = sslConfig.clientAuth match { case "NotRequested" => SSLClientAuthMode.NotRequested case "Requested" => SSLClientAuthMode.Requested case "Required" => SSLClientAuthMode.Requested case x => throw new IllegalArgumentException(s"SSLClientAuthMode ${x} is not supported") } sslOpt match { case Some(ssl) => builder.map(_.withSSLContext(ssl, sslLClientAuthMode)) case None => builder.map(_.enableHttp2(false)) } } val resource: Resource[F, Server[F]] = Resource.liftF(builder).flatMap(_.resource) val stream: Stream[F, Unit] = if (config.enable) { Stream.eval(builder).flatMap(_.serve).drain } else { Stream.empty } }
Example 7
Source File: Metrics.scala From iotchain with MIT License | 5 votes |
package jbok.common.metrics import cats.effect.{Resource, Sync, Timer} import cats.implicits._ import fs2._ import scala.concurrent.duration._ trait EffectMetrics[F[_]] { self: Metrics[F] => def observed[A](name: String, labels: String*)(fa: F[A])(implicit F: Sync[F], T: Timer[F]): F[A] = for { start <- T.clock.monotonic(NANOSECONDS) attempt <- fa.attempt end <- T.clock.monotonic(NANOSECONDS) elapsed = end - start a <- attempt match { case Left(e) => self.observe(name, "failure" :: labels.toList: _*)(elapsed.toDouble) >> F.raiseError(e) case Right(a) => self.observe(name, "success" :: labels.toList: _*)(elapsed.toDouble).as(a) } } yield a def monitored[A](name: String, labels: String*)(res: Resource[F, A])(implicit F: Sync[F]): Resource[F, A] = { val r = Resource { for { _ <- self.inc(name, labels: _*)(1.0) } yield () -> self.dec(name, labels: _*)(1.0) } r.flatMap(_ => res) } } trait StreamMetrics[F[_]] { self: Metrics[F] => // observe events occur in the stream def observePipe[A](name: String, labels: String*): Pipe[F, A, Unit] = _.chunks.through(observeChunkPipe[A](name, labels: _*)) def observeChunkPipe[A](name: String, labels: String*): Pipe[F, Chunk[A], Unit] = _.evalMap(c => self.observe(name, labels: _*)(c.size.toDouble)) } trait Metrics[F[_]] extends EffectMetrics[F] with StreamMetrics[F] { type Registry def registry: Registry // accumulate, e.g. the number of requests served, tasks completed, or errors. def acc(name: String, labels: String*)(n: Double = 1.0): F[Unit] // increase, e.g. the current memory usage, queue size, or active requests. def inc(name: String, labels: String*)(n: Double = 1.0): F[Unit] // decrease, e.g. the current memory usage, queue size, or active requests. def dec(name: String, labels: String*)(n: Double = 1.0): F[Unit] // equivalent to inc(name, labels)(delta) def set(name: String, labels: String*)(n: Double): F[Unit] // e.g. the request response latency, or the size of the response body def observe(name: String, labels: String*)(n: Double): F[Unit] } object Metrics { val METRIC_PREFIX = "iotchain" val TIMER_SUFFIX = "seconds" val GAUGE_SUFFIX = "active" sealed trait NoopRegistry object NoopRegistry extends NoopRegistry def nop[F[_]: Sync]: Metrics[F] = new Metrics[F] { override type Registry = NoopRegistry override def registry: Registry = NoopRegistry override def acc(name: String, labels: String*)(n: Double): F[Unit] = Sync[F].unit override def inc(name: String, labels: String*)(n: Double): F[Unit] = Sync[F].unit override def dec(name: String, labels: String*)(n: Double): F[Unit] = Sync[F].unit override def set(name: String, labels: String*)(n: Double): F[Unit] = Sync[F].unit override def observe(name: String, labels: String*)(n: Double): F[Unit] = Sync[F].unit } }
Example 8
Source File: CoreNode.scala From iotchain with MIT License | 5 votes |
package jbok.core import cats.effect.concurrent.Ref import cats.effect.{ConcurrentEffect, Timer} import cats.implicits._ import fs2._ import jbok.common.log.Logger import jbok.core.config.FullConfig import jbok.core.ledger.{BlockExecutor, History} import jbok.core.mining.BlockMiner import jbok.core.peer.{PeerManager, PeerMessageHandler} import jbok.core.pool.TxPool import jbok.core.sync.SyncClient import scala.concurrent.duration._ final case class CoreNode[F[_]]( config: FullConfig, nodeStatus: Ref[F, NodeStatus], history: History[F], peerManager: PeerManager[F], executor: BlockExecutor[F], miner: BlockMiner[F], txPool: TxPool[F], handler: PeerMessageHandler[F], syncClient: SyncClient[F] )(implicit F: ConcurrentEffect[F], T: Timer[F]) { private val log = Logger[F] val logStatus: Stream[F, Unit] = Stream.eval { for { number <- history.getBestBlockNumber td <- history.getTotalDifficultyByNumber(number).map(_.getOrElse(BigInt(0))) status <- nodeStatus.get _ <- log.i(s"status=${status},bestNumber=${number},td=${td}") _ <- T.sleep(10.seconds) } yield () }.repeat val stream: Stream[F, Unit] = Stream( peerManager.stream, miner.stream, txPool.stream, executor.stream, handler.stream, syncClient.stream, syncClient.checkSeedConnect, syncClient.heartBeatStream, syncClient.statusStream, logStatus ).parJoinUnbounded .handleErrorWith(e => Stream.eval(log.e("CoreNode has an unhandled failure", e))) }
Example 9
Source File: BatchProducerIT.scala From Scala-Programming-Projects with MIT License | 5 votes |
package coinyser import java.sql.Timestamp import java.time.Instant import java.util.concurrent.TimeUnit import cats.effect.{IO, Timer} import org.apache.spark.sql.test.SharedSparkSession import org.scalatest.{Matchers, WordSpec} import scala.concurrent.duration.FiniteDuration class BatchProducerIT extends WordSpec with Matchers with SharedSparkSession { import testImplicits._ "BatchProducer.save" should { "save a Dataset[Transaction] to parquet" in withTempDir { tmpDir => val transaction1 = Transaction(timestamp = new Timestamp(1532365695000L), tid = 70683282, price = 7740.00, sell = false, amount = 0.10041719) val transaction2 = Transaction(timestamp = new Timestamp(1532365693000L), tid = 70683281, price = 7739.99, sell = false, amount = 0.00148564) val sourceDS = Seq(transaction1, transaction2).toDS() val uri = tmpDir.toURI BatchProducer.save(sourceDS, uri).unsafeRunSync() tmpDir.list() should contain("date=2018-07-23") val readDS = spark.read.parquet(uri.toString).as[Transaction] spark.read.parquet(uri + "/date=2018-07-23").show() sourceDS.collect() should contain theSameElementsAs readDS.collect() } } "BatchProducer.processOneBatch" should { "filter and save a batch of transaction, wait 59 mn, fetch the next batch" in withTempDir { tmpDir => implicit object FakeTimer extends Timer[IO] { private var clockRealTimeInMillis: Long = Instant.parse("2018-08-02T01:00:00Z").toEpochMilli def clockRealTime(unit: TimeUnit): IO[Long] = IO(unit.convert(clockRealTimeInMillis, TimeUnit.MILLISECONDS)) def sleep(duration: FiniteDuration): IO[Unit] = IO { clockRealTimeInMillis = clockRealTimeInMillis + duration.toMillis } def shift: IO[Unit] = ??? def clockMonotonic(unit: TimeUnit): IO[Long] = ??? } implicit val appContext: AppContext = new AppContext(transactionStorePath = tmpDir.toURI) implicit def toTimestamp(str: String): Timestamp = Timestamp.from(Instant.parse(str)) val tx1 = Transaction("2018-08-01T23:00:00Z", 1, 7657.58, true, 0.021762) val tx2 = Transaction("2018-08-02T01:00:00Z", 2, 7663.85, false, 0.01385517) val tx3 = Transaction("2018-08-02T01:58:30Z", 3, 7663.85, false, 0.03782426) val tx4 = Transaction("2018-08-02T01:58:59Z", 4, 7663.86, false, 0.15750809) val tx5 = Transaction("2018-08-02T02:30:00Z", 5, 7661.49, true, 0.1) // Start at 01:00, tx 2 ignored (too soon) val txs0 = Seq(tx1) // Fetch at 01:59, get nb 2 and 3, but will miss nb 4 because of Api lag val txs1 = Seq(tx2, tx3) // Fetch at 02:58, get nb 3, 4, 5 val txs2 = Seq(tx3, tx4, tx5) // Fetch at 03:57, get nothing val txs3 = Seq.empty[Transaction] val start0 = Instant.parse("2018-08-02T00:00:00Z") val end0 = Instant.parse("2018-08-02T00:59:55Z") val threeBatchesIO = for { tuple1 <- BatchProducer.processOneBatch(IO(txs1.toDS()), txs0.toDS(), start0, end0) // end - Api lag (ds1, start1, end1) = tuple1 tuple2 <- BatchProducer.processOneBatch(IO(txs2.toDS()), ds1, start1, end1) (ds2, start2, end2) = tuple2 _ <- BatchProducer.processOneBatch(IO(txs3.toDS()), ds2, start2, end2) } yield (ds1, start1, end1, ds2, start2, end2) val (ds1, start1, end1, ds2, start2, end2) = threeBatchesIO.unsafeRunSync() ds1.collect() should contain theSameElementsAs txs1 start1 should ===(end0) end1 should ===(Instant.parse("2018-08-02T01:58:55Z")) // initialClock + 1mn - 15s - 5s ds2.collect() should contain theSameElementsAs txs2 start2 should ===(end1) end2 should ===(Instant.parse("2018-08-02T02:57:55Z")) // initialClock + 1mn -15s + 1mn -15s -5s = end1 + 45s val lastClock = Instant.ofEpochMilli( FakeTimer.clockRealTime(TimeUnit.MILLISECONDS).unsafeRunSync()) lastClock should === (Instant.parse("2018-08-02T03:57:00Z")) val savedTransactions = spark.read.parquet(tmpDir.toString).as[Transaction].collect() val expectedTxs = Seq(tx2, tx3, tx4, tx5) savedTransactions should contain theSameElementsAs expectedTxs } } }
Example 10
Source File: ServerSentEventsTests.scala From lolhttp with Apache License 2.0 | 5 votes |
package lol.http import cats.implicits._ import cats.effect.{ContextShift, IO, Timer} import fs2.concurrent.SignallingRef import fs2.{Chunk, Stream} import lol.http.ServerSentEvents._ import scala.concurrent.ExecutionContext import scala.concurrent.duration._ class ServerSentEventsTests extends Tests { implicit val ec: ExecutionContext = scala.concurrent.ExecutionContext.Implicits.global implicit val timer: Timer[IO] = IO.timer(ec) implicit val cs: ContextShift[IO] = IO.contextShift(ec) val App: Service = { case url"/" => Ok("Hello") case url"/stream" => Ok(Stream(Event("Hello"), Event("World")).covaryAll[IO, Event[String]]) case url"/fakeStream" => Ok("Hello").addHeaders(h"Content-Type" -> h"text/event-stream") } test("Valid string events stream") { withServer(Server.listen()(App)) { server => await() { Client("localhost", server.port).runAndStop { client => client.run(Get("/stream")) { response => response.readAs[Stream[IO,Event[String]]].flatMap { eventStream => eventStream.compile.toVector.map(_.toList) } } } } should be (List(Event("Hello"), Event("World"))) } } test("Events stream should be stopped by server when client closes the connection") { val isRunning = SignallingRef[IO, Boolean](true).unsafeRunSync() val App: Service = { case url"/infiniteStream" => val infiniteStream = Stream.sleep[IO](100.milliseconds).flatMap(_ => Stream.chunk(Chunk.bytes("LOL\n".getBytes("utf-8")))).repeat Ok(Content(infiniteStream.onFinalize(isRunning.set(false)))) } withServer(Server.listen()(App)) { server => await() { val client = Client("localhost", server.port) (IO.sleep(1.second) >> IO(client.stopSync())).unsafeRunAsync(_ => ()) client.run(Get("/infiniteStream")) { response => response.readAs[String] } } eventually({ val t = isRunning.get.unsafeRunSync() t shouldBe false }) } } test("Not an events stream") { withServer(Server.listen()(App)) { server => the [Error] thrownBy await() { Client("localhost", server.port).runAndStop { client => client.run(Get("/")) { response => response.readAs[Stream[IO,Event[String]]].flatMap { eventStream => eventStream.compile.toVector.map(_.toList) } } } } should be (Error.UnexpectedContentType()) } } test("Invalid events stream ") { withServer(Server.listen()(App)) { server => await() { Client("localhost", server.port).runAndStop { client => client.run(Get("/fakeStream")) { response => response.readAs[Stream[IO,Event[String]]].flatMap { eventStream => eventStream.compile.toVector.map(_.toList) } } } } should be (Nil) } } }
Example 11
Source File: StreamingTests.scala From lolhttp with Apache License 2.0 | 5 votes |
package lol.http.examples import lol.http._ import cats.effect.{ContextShift, IO, Timer} import fs2.{Chunk, Pipe, Pull, Stream} import scala.concurrent.ExecutionContext import scala.concurrent.duration._ class StreamingTests extends Tests { implicit val ec: ExecutionContext = scala.concurrent.ExecutionContext.Implicits.global implicit val timer: Timer[IO] = IO.timer(ec) implicit val cs: ContextShift[IO] = IO.contextShift(ec) def now = System.currentTimeMillis val `10Meg` = 10 * 1024 * 1024 // Transform the stream into packets of 10Meg def rechunk: Pipe[IO,Byte,Chunk[Byte]] = _.repeatPull(_.unconsN(`10Meg`, true).flatMap { case Some((chunks, h)) => Pull.output1(chunks) as Some(h) case None => Pull.pure(None) }) test("Slow server read", Slow) { withServer(Server.listen() { req => val start = now // Read at most 3Meg per second req.read( _.through(rechunk). evalMap(c => IO(println(s"${c.size} bytes received"))). flatMap(_ => Stream.sleep[IO](3.seconds)) .compile.drain ).map { _ => Ok(s"${now - start}") } }) { server => val start = now var end = 0:Long // Send 100M as fast as possible val timeToReceive = contentString( Post( s"http://localhost:${server.port}/", content = Content( stream = Stream.eval(IO { println(s"${`10Meg`} bytes sent") end = now Chunk.bytes(("." * `10Meg`).getBytes) }).repeat.take(10).flatMap(c => Stream.chunk(c)) ) ).addHeaders(h"Content-Length" -> h"${10 * `10Meg`}"), atMost = 2.minutes ).toInt val timeToSend = (end - start).toInt println(s"Received in ${timeToReceive/1000}s") println(s"Sent in ${timeToSend/1000}s") timeToReceive should be > 25000 timeToSend should be > 15000 } } test("Client read compressed", Slow) { withServer(Server.listen() { req => Ok(Content(Stream.eval(IO { println(s"sent ${`10Meg`} bytes") Chunk.bytes(("." * `10Meg`).getBytes) }).repeat.take(10).flatMap(c => Stream.chunk(c)))) .addHeaders(h"Content-Length" -> h"${10 * `10Meg`}") }) { server => await(atMost = 2.minutes) { Client("localhost", server.port).runAndStop { client => for { length1 <- client.run(Get("/a"))(_.readSuccess { stream => stream.chunks.map(_.size).compile.fold(0)(_ + _) }) length2 <- client.run(Get("/b").addHeaders(h"Accept-Encoding" -> h"gzip"))(_.readSuccess { stream => stream.chunks.map(_.size).compile.fold(0)(_ + _) }) length3 <- client.run(Get("/c").addHeaders(h"Accept-Encoding" -> h"deflate"))(_.readSuccess { stream => stream.chunks.map(_.size).compile.fold(0)(_ + _) }) } yield { length1 shouldEqual 10 * `10Meg` length2 shouldEqual length1 length3 shouldEqual length1 } } } } } }
Example 12
Source File: DoobieCheckSpec.scala From sup with Apache License 2.0 | 5 votes |
package sup import _root_.doobie.Transactor import cats.effect.Async import cats.effect.ContextShift import cats.effect.IO import cats.effect.Timer import scala.concurrent.duration._ import cats.implicits._ import scala.concurrent.ExecutionContext class DoobieCheckSpec extends BaseIOTest { def goodTransactor[F[_]: Async: ContextShift]: Transactor[F] = Transactor.fromDriverManager[F]("org.h2.Driver", "jdbc:h2:mem:") def badTransactor[F[_]: Async: ContextShift]: Transactor[F] = Transactor.fromDriverManager[F]("org.h2.Driver", "jdbcfoobarnope") implicit val cs: ContextShift[IO] = IO.contextShift(ExecutionContext.global) implicit val timer: Timer[IO] = IO.timer(ExecutionContext.global) "IO H2 check" when { "the database responds before the timeout" should { "be Healthy" in runIO { val healthCheck = modules.doobie.connectionCheck(goodTransactor[IO])(timeout = 5.seconds.some) healthCheck.check.map { _.value shouldBe Health.Healthy } } } "there is no timeout" should { "be Healthy" in runIO { val healthCheck = modules.doobie.connectionCheck(goodTransactor[IO])(timeout = none) healthCheck.check.map { _.value shouldBe Health.Healthy } } } } }
Example 13
Source File: MetadataAlgebraSpec.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.kafka.algebras import java.time.Instant import cats.data.NonEmptyList import cats.effect.{Concurrent, ContextShift, IO, Sync, Timer} import cats.implicits._ import hydra.avro.registry.SchemaRegistry import hydra.core.marshallers.History import hydra.kafka.algebras.MetadataAlgebra.TopicMetadataContainer import hydra.kafka.model.ContactMethod.Slack import hydra.kafka.model.TopicMetadataV2Request.Subject import hydra.kafka.model.{Public, StreamTypeV2, TopicMetadataV2, TopicMetadataV2Key, TopicMetadataV2Request, TopicMetadataV2Value} import io.chrisdavenport.log4cats.SelfAwareStructuredLogger import io.chrisdavenport.log4cats.slf4j.Slf4jLogger import org.apache.avro.generic.GenericRecord import org.scalatest.Assertion import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AnyWordSpecLike import retry.RetryPolicies._ import retry.syntax.all._ import retry.{RetryPolicy, _} import scala.concurrent.ExecutionContext import scala.concurrent.duration._ class MetadataAlgebraSpec extends AnyWordSpecLike with Matchers { implicit private val contextShift: ContextShift[IO] = IO.contextShift(ExecutionContext.global) private implicit val concurrentEffect: Concurrent[IO] = IO.ioConcurrentEffect private implicit val policy: RetryPolicy[IO] = limitRetries[IO](5) |+| exponentialBackoff[IO](500.milliseconds) private implicit val timer: Timer[IO] = IO.timer(ExecutionContext.global) private implicit def noop[A]: (A, RetryDetails) => IO[Unit] = retry.noop[IO, A] implicit private def unsafeLogger[F[_]: Sync]: SelfAwareStructuredLogger[F] = Slf4jLogger.getLogger[F] private implicit class RetryAndAssert[A](boolIO: IO[A]) { def retryIfFalse(check: A => Boolean): IO[Assertion] = boolIO.map(check).retryingM(identity, policy, noop).map(assert(_)) } private val metadataTopicName = "_internal.metadataTopic" private val consumerGroup = "Consumer Group" (for { kafkaClient <- KafkaClientAlgebra.test[IO] schemaRegistry <- SchemaRegistry.test[IO] metadata <- MetadataAlgebra.make(metadataTopicName, consumerGroup, kafkaClient, schemaRegistry, consumeMetadataEnabled = true) } yield { runTests(metadata, kafkaClient) }).unsafeRunSync() private def runTests(metadataAlgebra: MetadataAlgebra[IO], kafkaClientAlgebra: KafkaClientAlgebra[IO]): Unit = { "MetadataAlgebraSpec" should { "retrieve none for non-existant topic" in { val subject = Subject.createValidated("Non-existantTopic").get metadataAlgebra.getMetadataFor(subject).unsafeRunSync() shouldBe None } "retrieve metadata" in { val subject = Subject.createValidated("subject1").get val (genericRecordsIO, key, value) = getMetadataGenericRecords(subject) (for { record <- genericRecordsIO _ <- kafkaClientAlgebra.publishMessage(record, metadataTopicName) _ <- metadataAlgebra.getMetadataFor(subject).retryIfFalse(_.isDefined) metadata <- metadataAlgebra.getMetadataFor(subject) } yield metadata shouldBe Some(TopicMetadataContainer(key, value, None, None))).unsafeRunSync() } "retrieve all metadata" in { val subject = Subject.createValidated("subject2").get val (genericRecordsIO, key, value) = getMetadataGenericRecords(subject) (for { record <- genericRecordsIO _ <- kafkaClientAlgebra.publishMessage(record, metadataTopicName) _ <- metadataAlgebra.getMetadataFor(subject).retryIfFalse(_.isDefined) allMetadata <- metadataAlgebra.getAllMetadata } yield allMetadata should have length 2).unsafeRunSync() } } } private def getMetadataGenericRecords(subject: Subject): (IO[(GenericRecord, Option[GenericRecord])], TopicMetadataV2Key, TopicMetadataV2Value) = { val key = TopicMetadataV2Key(subject) val value = TopicMetadataV2Value( StreamTypeV2.Entity, deprecated = false, Public, NonEmptyList.one(Slack.create("#channel").get), Instant.now, List(), None) (TopicMetadataV2.encode[IO](key, Some(value)), key, value) } }
Example 14
Source File: Sleep.scala From cats-retry with Apache License 2.0 | 5 votes |
package retry import cats.effect.Timer import scala.concurrent.duration.FiniteDuration trait Sleep[M[_]] { def sleep(delay: FiniteDuration): M[Unit] } object Sleep { def apply[M[_]](implicit sleep: Sleep[M]): Sleep[M] = sleep implicit def sleepUsingTimer[F[_]](implicit timer: Timer[F]): Sleep[F] = new Sleep[F] { def sleep(delay: FiniteDuration): F[Unit] = timer.sleep(delay) } }
Example 15
Source File: TimerAlarm.scala From canoe with MIT License | 5 votes |
package samples import canoe.api._ import canoe.syntax._ import canoe.models.messages.TextMessage import cats.effect.{ExitCode, IO, IOApp, Timer} import cats.syntax.functor._ import fs2.Stream import scala.util.Try import scala.concurrent.duration._ object TimerAlarm extends IOApp { val token: String = "<your telegram token>" def run(args: List[String]): IO[ExitCode] = Stream .resource(TelegramClient.global[IO](token)) .flatMap { implicit client => Bot.polling[IO].follow(alarm) } .compile.drain.as(ExitCode.Success) def alarm[F[_]: TelegramClient: Timer]: Scenario[F, Unit] = for { chat <- Scenario.expect(command("alarm").chat) _ <- Scenario.eval(chat.send("Tell me in how many seconds you want to be notified?")) in <- Scenario.expect(textMessage) sec = Try(in.text.toInt).toOption.filter(_ > 0) _ <- sec match { case Some(i) => setTimer(in, i) case None => Scenario.eval(in.reply("I'm sorry, but I couldn't get that (expecting positive number)")) } } yield () def setTimer[F[_]: TelegramClient: Timer](m: TextMessage, i: Int): Scenario[F, Unit] = for { _ <- Scenario.eval(m.chat.send(s"Timer is set. You will receive a reply after $i seconds")) _ <- Scenario.eval(Timer[F].sleep(i.seconds)) _ <- Scenario.eval(m.reply("Time's up.")) } yield () }
Example 16
Source File: SemanticBlocking.scala From canoe with MIT License | 5 votes |
package samples import canoe.api._ import canoe.models.Chat import canoe.syntax._ import cats.effect.{ExitCode, IO, IOApp, Timer} import cats.syntax.functor._ import fs2.Stream import scala.concurrent.duration._ import scala.util.Try object SemanticBlocking extends IOApp { val token: String = "<your telegram token>" def run(args: List[String]): IO[ExitCode] = Stream .resource(TelegramClient.global[IO](token)) .flatMap { implicit client => Bot.polling[IO].follow(count) } .compile.drain.as(ExitCode.Success) def count[F[_]: TelegramClient: Timer]: Scenario[F, Unit] = for { m <- Scenario.expect(command("count")) start = Try(m.text.split(" ")(1).toInt).getOrElse(10) _ <- repeat(m.chat, start) } yield () def repeat[F[_]: TelegramClient: Timer](chat: Chat, i: Int): Scenario[F, Unit] = if (i <= 0) Scenario.eval(chat.send("Done.")).void else for { _ <- Scenario.eval(chat.send(s"$i..")) _ <- Scenario.eval(Timer[F].sleep(1.second)) _ <- repeat(chat, i - 1) } yield () }
Example 17
Source File: Hook.scala From canoe with MIT License | 5 votes |
package canoe.api.sources import canoe.api.{TelegramClient} import canoe.methods.webhooks.{DeleteWebhook, SetWebhook} import canoe.models.{InputFile, Update} import canoe.syntax.methodOps import cats.Monad import cats.effect.{ConcurrentEffect, Resource, Timer} import cats.syntax.all._ import fs2.Stream import fs2.concurrent.Queue import io.chrisdavenport.log4cats.Logger import io.chrisdavenport.log4cats.slf4j.Slf4jLogger import org.http4s._ import org.http4s.circe.jsonOf import org.http4s.dsl.Http4sDsl import org.http4s.implicits._ import org.http4s.server.Server import org.http4s.server.blaze.BlazeServerBuilder class Hook[F[_]](queue: Queue[F, Update]) { def updates: Stream[F, Update] = queue.dequeue } object Hook { private def listenServer[F[_]: ConcurrentEffect: Timer: Logger](port: Int): Resource[F, Hook[F]] = { val dsl = Http4sDsl[F] import dsl._ def app(queue: Queue[F, Update]): HttpApp[F] = HttpRoutes .of[F] { case req @ POST -> Root => req .decodeWith(jsonOf[F, Update], strict = true)(queue.enqueue1(_) *> Ok()) .recoverWith { case InvalidMessageBodyFailure(details, _) => F.error(s"Received unknown type of update. $details") *> Ok() } } .orNotFound def server(queue: Queue[F, Update]): Resource[F, Server[F]] = BlazeServerBuilder[F].bindHttp(port).withHttpApp(app(queue)).resource Resource.suspend(Queue.unbounded[F, Update].map(q => server(q).map(_ => new Hook[F](q)))) } }
Example 18
Source File: ServerInterpreterTest.scala From endpoints4s with MIT License | 5 votes |
package endpoints4s.http4s.server import java.net.ServerSocket import cats.effect.{ContextShift, IO, Timer} import endpoints4s.{Invalid, Valid} import endpoints4s.algebra.server.{ BasicAuthenticationTestSuite, DecodedUrl, EndpointsTestSuite, JsonEntitiesFromSchemasTestSuite, SumTypedEntitiesTestSuite, TextEntitiesTestSuite } import org.http4s.server.Router import org.http4s.{HttpRoutes, Uri} import org.http4s.server.blaze.BlazeServerBuilder import org.http4s.syntax.kleisli._ import scala.concurrent.ExecutionContext class ServerInterpreterTest extends EndpointsTestSuite[EndpointsTestApi] with BasicAuthenticationTestSuite[EndpointsTestApi] with JsonEntitiesFromSchemasTestSuite[EndpointsTestApi] with TextEntitiesTestSuite[EndpointsTestApi] with SumTypedEntitiesTestSuite[EndpointsTestApi] { val serverApi = new EndpointsTestApi() def decodeUrl[A](url: serverApi.Url[A])(rawValue: String): DecodedUrl[A] = { val uri = Uri.fromString(rawValue).getOrElse(sys.error(s"Illegal URI: $rawValue")) url.decodeUrl(uri) match { case None => DecodedUrl.NotMatched case Some(Invalid(errors)) => DecodedUrl.Malformed(errors) case Some(Valid(a)) => DecodedUrl.Matched(a) } } private def serveGeneralEndpoint[Req, Resp]( endpoint: serverApi.Endpoint[Req, Resp], request2response: Req => Resp )(runTests: Int => Unit): Unit = { val port = { val socket = new ServerSocket(0) try socket.getLocalPort finally if (socket != null) socket.close() } implicit val cs: ContextShift[IO] = IO.contextShift(ExecutionContext.global) implicit val timer: Timer[IO] = IO.timer(ExecutionContext.global) val service = HttpRoutes.of[IO](endpoint.implementedBy(request2response)) val httpApp = Router("/" -> service).orNotFound val server = BlazeServerBuilder[IO](ExecutionContext.global) .bindHttp(port, "localhost") .withHttpApp(httpApp) server.resource.use(_ => IO(runTests(port))).unsafeRunSync() } def serveEndpoint[Resp]( endpoint: serverApi.Endpoint[_, Resp], response: => Resp )(runTests: Int => Unit): Unit = serveGeneralEndpoint(endpoint, (_: Any) => response)(runTests) def serveIdentityEndpoint[Resp]( endpoint: serverApi.Endpoint[Resp, Resp] )(runTests: Int => Unit): Unit = serveGeneralEndpoint(endpoint, identity[Resp])(runTests) }
Example 19
Source File: RerunnableTimerSuite.scala From catbird with Apache License 2.0 | 5 votes |
package io.catbird.util.effect import cats.effect.Timer import org.scalatest.Outcome import org.scalatest.funsuite.FixtureAnyFunSuite import com.twitter.util import com.twitter.util.{ Await, Future } import io.catbird.util.Rerunnable import scala.concurrent.duration._ class RerunnableTimerSuite extends FixtureAnyFunSuite { protected final class FixtureParam { val twitterTimer: util.Timer = new util.JavaTimer() } test("A timer can be used to delay execution") { f => implicit val timer: Timer[Rerunnable] = RerunnableTimer(f.twitterTimer) val result = Await.result( Future.selectIndex( Vector( for { _ <- Timer[Rerunnable].sleep(100.milliseconds).run r <- Future.value("slow") } yield r, Future.value("fast").delayed(util.Duration.fromMilliseconds(50))(f.twitterTimer) ) ) ) assert(result == 1) // The first future is delayed for longer, so we expect the second one to win } override protected def withFixture(test: OneArgTest): Outcome = withFixture(test.toNoArgTest(new FixtureParam)) }
Example 20
Source File: ProcessAlg.scala From scala-steward with Apache License 2.0 | 5 votes |
package org.scalasteward.core.io import better.files.File import cats.effect.{Blocker, Concurrent, ContextShift, Timer} import cats.implicits._ import io.chrisdavenport.log4cats.Logger import org.scalasteward.core.application.Cli.EnvVar import org.scalasteward.core.application.Config import org.scalasteward.core.util.Nel trait ProcessAlg[F[_]] { def exec(command: Nel[String], cwd: File, extraEnv: (String, String)*): F[List[String]] def execSandboxed(command: Nel[String], cwd: File): F[List[String]] } object ProcessAlg { abstract class UsingFirejail[F[_]](config: Config) extends ProcessAlg[F] { override def execSandboxed(command: Nel[String], cwd: File): F[List[String]] = { val envVars = config.envVars.map(EnvVar.unapply(_).get) if (config.disableSandbox) exec(command, cwd, envVars: _*) else { val whitelisted = (cwd.pathAsString :: config.whitelistedDirectories) .map(dir => s"--whitelist=$dir") val readOnly = config.readOnlyDirectories .map(dir => s"--read-only=$dir") exec(Nel("firejail", whitelisted ++ readOnly) ::: command, cwd, envVars: _*) } } } def create[F[_]](blocker: Blocker)(implicit config: Config, contextShift: ContextShift[F], logger: Logger[F], timer: Timer[F], F: Concurrent[F] ): ProcessAlg[F] = new UsingFirejail[F](config) { override def exec( command: Nel[String], cwd: File, extraEnv: (String, String)* ): F[List[String]] = logger.debug(s"Execute ${command.mkString_(" ")}") >> process.slurp[F]( command, Some(cwd.toJava), extraEnv.toMap, config.processTimeout, logger.trace(_), blocker ) } }
Example 21
Source File: TestInstances.scala From scala-steward with Apache License 2.0 | 5 votes |
package org.scalasteward.core import _root_.io.chrisdavenport.log4cats.Logger import _root_.io.chrisdavenport.log4cats.slf4j.Slf4jLogger import cats.effect.{ContextShift, IO, Timer} import org.scalacheck.{Arbitrary, Cogen, Gen} import org.scalasteward.core.data.Version import org.scalasteward.core.util.Change import org.scalasteward.core.util.Change.{Changed, Unchanged} import scala.concurrent.ExecutionContext object TestInstances { implicit def changeArbitrary[T](implicit arbT: Arbitrary[T]): Arbitrary[Change[T]] = Arbitrary(arbT.arbitrary.flatMap(t => Gen.oneOf(Changed(t), Unchanged(t)))) implicit val ioContextShift: ContextShift[IO] = IO.contextShift(ExecutionContext.global) implicit val ioLogger: Logger[IO] = Slf4jLogger.getLogger[IO] implicit val ioTimer: Timer[IO] = IO.timer(ExecutionContext.global) implicit val versionArbitrary: Arbitrary[Version] = { val versionChar = Gen.frequency( (8, Gen.numChar), (5, Gen.const('.')), (3, Gen.alphaChar), (2, Gen.const('-')) ) Arbitrary(Gen.listOf(versionChar).map(_.mkString).map(Version.apply)) } implicit val versionCogen: Cogen[Version] = Cogen(_.alnumComponents.map { case Version.Component.Numeric(value) => BigInt(value).toLong case a @ Version.Component.Alpha(_) => a.order.toLong case _ => 0L }.sum) }
Example 22
Source File: CassandraSync.scala From kafka-journal with MIT License | 5 votes |
package com.evolutiongaming.kafka.journal.eventual.cassandra import cats.arrow.FunctionK import cats.effect.concurrent.Semaphore import cats.effect.{Concurrent, Sync, Timer} import cats.implicits._ import cats.~> import com.evolutiongaming.cassandra import com.evolutiongaming.cassandra.sync.AutoCreate import com.evolutiongaming.kafka.journal.Origin trait CassandraSync[F[_]] { def apply[A](fa: F[A]): F[A] } object CassandraSync { def empty[F[_]]: CassandraSync[F] = new CassandraSync[F] { def apply[A](fa: F[A]) = fa } def apply[F[_]](implicit F: CassandraSync[F]): CassandraSync[F] = F def apply[F[_] : Sync : Timer : CassandraSession]( config: SchemaConfig, origin: Option[Origin], ): CassandraSync[F] = { val keyspace = config.keyspace val autoCreate = if (keyspace.autoCreate) AutoCreate.Table else AutoCreate.None apply( keyspace = keyspace.name, table = config.locksTable, autoCreate = autoCreate, metadata = origin.map(_.value)) } def apply[F[_] : Sync : Timer : CassandraSession]( keyspace: String, table: String, autoCreate: AutoCreate, metadata: Option[String], ): CassandraSync[F] = { new CassandraSync[F] { def apply[A](fa: F[A]) = { val cassandraSync = cassandra.sync.CassandraSync.of[F]( session = CassandraSession[F].unsafe, keyspace = keyspace, table = table, autoCreate = autoCreate) for { cassandraSync <- cassandraSync result <- cassandraSync(id = "kafka-journal", metadata = metadata)(fa) } yield result } } } def of[F[_] : Concurrent : Timer : CassandraSession]( config: SchemaConfig, origin: Option[Origin] ): F[CassandraSync[F]] = { for { semaphore <- Semaphore[F](1) } yield { val cassandraSync = apply[F](config, origin) val serial = new (F ~> F) { def apply[A](fa: F[A]) = semaphore.withPermit(fa) } cassandraSync.mapK(serial, FunctionK.id) } } implicit class CassandraSyncOps[F[_]](val self: CassandraSync[F]) extends AnyVal { def mapK[G[_]](fg: F ~> G, gf: G ~> F): CassandraSync[G] = new CassandraSync[G] { def apply[A](fa: G[A]) = fg(self(gf(fa))) } } }
Example 23
Source File: SetupSchema.scala From kafka-journal with MIT License | 5 votes |
package com.evolutiongaming.kafka.journal.eventual.cassandra import cats.Parallel import cats.effect.{Concurrent, Timer} import cats.implicits._ import com.evolutiongaming.kafka.journal.{Origin, Setting, Settings} import com.evolutiongaming.scassandra.TableName import com.evolutiongaming.catshelper.CatsHelper._ import com.evolutiongaming.catshelper.{BracketThrowable, FromFuture, LogOf, ToFuture} import com.evolutiongaming.kafka.journal.eventual.cassandra.CassandraHelper._ import scala.util.Try object SetupSchema { self => def migrate[F[_] : BracketThrowable : CassandraSession : CassandraSync : Settings]( schema: Schema, fresh: CreateSchema.Fresh ): F[Unit] = { def addHeaders(table: TableName)(implicit cassandraSync: CassandraSync[F]) = { val query = JournalStatements.addHeaders(table) val fa = query.execute.first.redeem[Unit](_ => (), _ => ()) cassandraSync { fa } } val schemaVersion = "schema-version" def version(setting: Option[Setting]) = { for { setting <- setting version <- Try(setting.value.toInt).toOption } yield version } def migrate = { for { _ <- if (fresh) ().pure[F] else addHeaders(schema.journal) _ <- Settings[F].setIfEmpty(schemaVersion, "0") } yield {} } for { setting <- Settings[F].get(schemaVersion) _ <- version(setting).fold(migrate)(_ => ().pure[F]) } yield {} } def apply[F[_] : Concurrent : Parallel : Timer : CassandraCluster : CassandraSession : FromFuture : ToFuture : LogOf]( config: SchemaConfig, origin: Option[Origin] ): F[Schema] = { def migrate( schema: Schema, fresh: CreateSchema.Fresh)(implicit cassandraSync: CassandraSync[F], settings: Settings[F] ) = { self.migrate[F](schema, fresh) } def createSchema(implicit cassandraSync: CassandraSync[F]) = CreateSchema(config) for { cassandraSync <- CassandraSync.of[F](config, origin) ab <- createSchema(cassandraSync) (schema, fresh) = ab settings <- SettingsCassandra.of[F](schema, origin) _ <- migrate(schema, fresh)(cassandraSync, settings) } yield schema } }
Example 24
Source File: CacheOf.scala From kafka-journal with MIT License | 5 votes |
package com.evolutiongaming.kafka.journal.replicator import cats.Parallel import cats.effect.{Concurrent, Resource, Timer} import cats.implicits._ import com.evolutiongaming.catshelper.{BracketThrowable, Runtime} import com.evolutiongaming.scache import com.evolutiongaming.scache.{CacheMetrics, Releasable} import com.evolutiongaming.skafka.Topic import com.evolutiongaming.smetrics.MeasureDuration import scala.concurrent.duration.FiniteDuration trait CacheOf[F[_]] { def apply[K, V](topic: Topic): Resource[F, Cache[F, K, V]] } object CacheOf { def empty[F[_] : BracketThrowable]: CacheOf[F] = new CacheOf[F] { def apply[K, V](topic: Topic) = { val cache = new Cache[F, K, V] { def getOrUpdate(key: K)(value: => Resource[F, V]) = value.use(_.pure[F]) def remove(key: K) = ().pure[F] } Resource.liftF(cache.pure[F]) } } def apply[F[_] : Concurrent : Timer : Runtime : Parallel : MeasureDuration]( expireAfter: FiniteDuration, cacheMetrics: Option[CacheMetrics.Name => CacheMetrics[F]] ): CacheOf[F] = { new CacheOf[F] { def apply[K, V](topic: Topic) = { for { cache <- scache.Cache.expiring[F, K, V](expireAfter) cache <- cacheMetrics.fold { Resource.liftF(cache.pure[F]) } { cacheMetrics => cache.withMetrics(cacheMetrics(topic)) } } yield { new Cache[F, K, V] { def getOrUpdate(key: K)(value: => Resource[F, V]) = { cache.getOrUpdateReleasable(key) { Releasable.of(value) } } def remove(key: K) = cache.remove(key).flatten.void } } } } } }
Example 25
Source File: KafkaSingletonTest.scala From kafka-journal with MIT License | 5 votes |
package com.evolutiongaming.kafka.journal.replicator import cats.data.{NonEmptySet => Nes} import cats.effect.concurrent.{Deferred, Ref} import cats.effect.{Concurrent, IO, Resource, Timer} import cats.implicits._ import com.evolutiongaming.catshelper.Log import com.evolutiongaming.kafka.journal.IOSuite._ import com.evolutiongaming.skafka.consumer.RebalanceListener import com.evolutiongaming.skafka.{Partition, TopicPartition} import com.evolutiongaming.sstream.Stream import org.scalatest.funsuite.AsyncFunSuite import org.scalatest.matchers.should.Matchers import scala.concurrent.duration._ class KafkaSingletonTest extends AsyncFunSuite with Matchers { test("allocate & release when partition assigned or revoked") { `allocate & release when partition assigned or revoked`[IO]().run() } private def `allocate & release when partition assigned or revoked`[F[_] : Concurrent : Timer](): F[Unit] = { val topic = "topic" def consumer(deferred: Deferred[F, RebalanceListener[F]]) = { new TopicConsumer[F] { def subscribe(listener: RebalanceListener[F]) = deferred.complete(listener) def poll = Stream.empty def commit = TopicCommit.empty } } def topicPartition(partition: Partition) = TopicPartition(topic, partition) val result = for { listener <- Resource.liftF(Deferred[F, RebalanceListener[F]]) allocated <- Resource.liftF(Ref[F].of(false)) resource = Resource.make { allocated.set(true) } { _ => allocated.set(false) } singleton <- KafkaSingleton.of(topic, consumer(listener).pure[Resource[F, *]], resource, Log.empty[F]) listener <- Resource.liftF(listener.get) _ <- Resource.liftF { for { a <- singleton.get _ = a shouldEqual none[Unit] a <- allocated.get _ = a shouldEqual false _ <- listener.onPartitionsAssigned(Nes.of(topicPartition(Partition.max))) a <- singleton.get _ = a shouldEqual none[Unit] a <- allocated.get _ = a shouldEqual false _ <- listener.onPartitionsAssigned(Nes.of(topicPartition(Partition.min))) _ <- Timer[F].sleep(10.millis) a <- singleton.get _ = a shouldEqual ().some a <- allocated.get _ = a shouldEqual true _ <- listener.onPartitionsRevoked(Nes.of(topicPartition(Partition.max))) a <- singleton.get _ = a shouldEqual ().some a <- allocated.get _ = a shouldEqual true _ <- listener.onPartitionsRevoked(Nes.of(topicPartition(Partition.min))) _ <- Timer[F].sleep(10.millis) a <- singleton.get _ = a shouldEqual none[Unit] a <- allocated.get _ = a shouldEqual false } yield {} } } yield {} result.use { _ => ().pure[F] } } }
Example 26
Source File: HeadCacheOf.scala From kafka-journal with MIT License | 5 votes |
package com.evolutiongaming.kafka.journal import cats.{Applicative, Parallel} import cats.implicits._ import cats.effect.{Concurrent, Resource, Timer} import com.evolutiongaming.catshelper.{FromTry, LogOf} import com.evolutiongaming.kafka.journal.eventual.EventualJournal import com.evolutiongaming.skafka.consumer.ConsumerConfig import com.evolutiongaming.smetrics.MeasureDuration trait HeadCacheOf[F[_]] { def apply( consumerConfig: ConsumerConfig, eventualJournal: EventualJournal[F] ): Resource[F, HeadCache[F]] } object HeadCacheOf { def empty[F[_] : Applicative]: HeadCacheOf[F] = const(Resource.liftF(HeadCache.empty[F].pure[F])) def const[F[_]](value: Resource[F, HeadCache[F]]): HeadCacheOf[F] = { (_: ConsumerConfig, _: EventualJournal[F]) => value } def apply[F[_]](implicit F: HeadCacheOf[F]): HeadCacheOf[F] = F def apply[ F[_] : Concurrent : Parallel : Timer : LogOf : KafkaConsumerOf : MeasureDuration : FromTry : FromAttempt : FromJsResult : JsonCodec.Decode ]( metrics: Option[HeadCacheMetrics[F]] ): HeadCacheOf[F] = { (consumerConfig: ConsumerConfig, eventualJournal: EventualJournal[F]) => { for { headCache <- HeadCache.of[F](consumerConfig, eventualJournal, metrics) log <- Resource.liftF(LogOf[F].apply(HeadCache.getClass)) } yield { headCache.withLog(log) } } } }
Example 27
Source File: IOSuite.scala From kafka-journal with MIT License | 5 votes |
package com.evolutiongaming.kafka.journal import cats.Parallel import cats.effect.{Clock, Concurrent, ContextShift, IO, Timer} import cats.implicits._ import com.evolutiongaming.smetrics.MeasureDuration import org.scalatest.Succeeded import scala.concurrent.duration._ import scala.concurrent.{ExecutionContext, ExecutionContextExecutor, Future} object IOSuite { val Timeout: FiniteDuration = 5.seconds implicit val executor: ExecutionContextExecutor = ExecutionContext.global implicit val contextShiftIO: ContextShift[IO] = IO.contextShift(executor) implicit val concurrentIO: Concurrent[IO] = IO.ioConcurrentEffect implicit val timerIO: Timer[IO] = IO.timer(executor) implicit val parallel: Parallel[IO] = IO.ioParallel implicit val measureDuration: MeasureDuration[IO] = MeasureDuration.fromClock(Clock[IO]) def runIO[A](io: IO[A], timeout: FiniteDuration = Timeout): Future[Succeeded.type] = { io.timeout(timeout).as(Succeeded).unsafeToFuture } implicit class IOOps[A](val self: IO[A]) extends AnyVal { def run(timeout: FiniteDuration = Timeout): Future[Succeeded.type] = runIO(self, timeout) } }
Example 28
Source File: ResilientStream.scala From fs2-rabbit with Apache License 2.0 | 5 votes |
package dev.profunktor.fs2rabbit.resiliency import cats.effect.{Sync, Timer} import cats.syntax.apply._ import dev.profunktor.fs2rabbit.effects.Log import fs2.Stream import scala.concurrent.duration._ import scala.util.control.NonFatal object ResilientStream { def runF[F[_]: Log: Sync: Timer](program: F[Unit], retry: FiniteDuration = 5.seconds): F[Unit] = run(Stream.eval(program), retry) def run[F[_]: Log: Sync: Timer]( program: Stream[F, Unit], retry: FiniteDuration = 5.seconds ): F[Unit] = loop(program, retry, 1).compile.drain private def loop[F[_]: Log: Sync: Timer]( program: Stream[F, Unit], retry: FiniteDuration, count: Int ): Stream[F, Unit] = program.handleErrorWith { case NonFatal(err) => Stream.eval(Log[F].error(err.getMessage) *> Log[F].info(s"Restarting in ${retry.toSeconds * count}...")) >> loop[F](Stream.sleep(retry) >> program, retry, count + 1) } }
Example 29
Source File: TVarTest.scala From cats-stm with Apache License 2.0 | 5 votes |
package io.github.timwspence.cats.stm import cats.effect.{ContextShift, IO, Timer} import org.scalatest.matchers.should.Matchers import org.scalatest.funsuite.AsyncFunSuite import scala.concurrent.ExecutionContext class TVarTest extends AsyncFunSuite with Matchers { implicit override def executionContext: ExecutionContext = ExecutionContext.Implicits.global implicit val timer: Timer[IO] = IO.timer(executionContext) implicit val cs: ContextShift[IO] = IO.contextShift(executionContext) test("Get returns current value") { val prog: STM[String] = for { tvar <- TVar.of("hello") value <- tvar.get } yield value for (value <- prog.commit[IO].unsafeToFuture) yield { value shouldBe "hello" } } test("Set changes current value") { val prog: STM[String] = for { tvar <- TVar.of("hello") _ <- tvar.set("world") value <- tvar.get } yield value for (value <- prog.commit[IO].unsafeToFuture) yield { value shouldBe "world" value shouldBe "world" } } test("Modify changes current value") { val prog: STM[String] = for { tvar <- TVar.of("hello") _ <- tvar.modify(_.toUpperCase) value <- tvar.get } yield value for (value <- prog.commit[IO].unsafeToFuture) yield { value shouldBe "HELLO" } } test("Pending transaction is removed on success") { val tvar = TVar.of("foo").commit[IO].unsafeRunSync val prog: STM[String] = for { _ <- tvar.modify(_.toUpperCase) value <- tvar.get } yield value for (value <- prog.commit[IO].unsafeToFuture) yield { value shouldBe "FOO" tvar.value shouldBe "FOO" tvar.pending.get.isEmpty shouldBe true } } test("Pending transaction is removed on failure") { val tvar = TVar.of("foo").commit[IO].unsafeRunSync val prog: STM[String] = for { _ <- tvar.modify(_.toUpperCase) _ <- STM.abort[String](new RuntimeException("boom")) value <- tvar.get } yield value for (_ <- prog.commit[IO].attempt.unsafeToFuture) yield { tvar.value shouldBe "foo" tvar.pending.get.isEmpty shouldBe true } } }
Example 30
Source File: PropertyTests.scala From cats-stm with Apache License 2.0 | 5 votes |
package io.github.timwspence.cats.stm import cats.effect.{ContextShift, IO, Timer} import cats.instances.list._ import cats.syntax.functor._ import cats.syntax.traverse._ import org.scalacheck._ import org.scalatest.matchers.should.Matchers import org.scalatest.funsuite.AnyFunSuite import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks import scala.concurrent.ExecutionContext import scala.util.Random class MaintainsInvariants extends AnyFunSuite with ScalaCheckDrivenPropertyChecks with Matchers { implicit val executionContext: ExecutionContext = ExecutionContext.Implicits.global implicit val timer: Timer[IO] = IO.timer(executionContext) implicit val cs: ContextShift[IO] = IO.contextShift(executionContext) val tvarGen: Gen[TVar[Long]] = for { value <- Gen.posNum[Long] } yield TVar.of(value).commit[IO].unsafeRunSync val txnGen: List[TVar[Long]] => Gen[STM[Unit]] = tvars => for { fromIdx <- Gen.choose(0, tvars.length - 1) toIdx <- Gen.choose(0, tvars.length - 1) suchThat (_ != fromIdx) txn <- for { balance <- tvars(fromIdx).get transfer = Math.abs(Random.nextLong()) % balance _ <- tvars(fromIdx).modify(_ - transfer) _ <- tvars(toIdx).modify(_ + transfer) } yield () } yield txn val gen: Gen[(Long, List[TVar[Long]], IO[Unit])] = for { tvars <- Gen.listOfN(50, tvarGen) total = tvars.foldLeft(0L)((acc, tvar) => acc + tvar.value) txns <- Gen.listOf(txnGen(tvars)) commit = txns.traverse(_.commit[IO].start) run = commit.flatMap(l => l.traverse(_.join)).void } yield (total, tvars, run) test("Transactions maintain invariants") { forAll(gen) { g => val total = g._1 val tvars = g._2 val txn = g._3 txn.unsafeRunSync() tvars.map(_.value).sum shouldBe total } } }
Example 31
Source File: TQueueTest.scala From cats-stm with Apache License 2.0 | 5 votes |
package io.github.timwspence.cats.stm import cats.effect.{ContextShift, IO, Timer} import cats.instances.string._ import cats.syntax.semigroup._ import org.scalatest.matchers.should.Matchers import org.scalatest.funsuite.AsyncFunSuite import scala.concurrent.ExecutionContext class TQueueTest extends AsyncFunSuite with Matchers { implicit override def executionContext: ExecutionContext = ExecutionContext.Implicits.global implicit val timer: Timer[IO] = IO.timer(executionContext) implicit val cs: ContextShift[IO] = IO.contextShift(executionContext) test("Read removes the first element") { val prog: STM[(String, Boolean)] = for { tqueue <- TQueue.empty[String] _ <- tqueue.put("hello") value <- tqueue.read empty <- tqueue.isEmpty } yield value -> empty for (value <- prog.commit[IO].unsafeToFuture) yield { value._1 shouldBe "hello" value._2 shouldBe true } } test("Peek does not remove the first element") { val prog: STM[(String, Boolean)] = for { tqueue <- TQueue.empty[String] _ <- tqueue.put("hello") value <- tqueue.peek empty <- tqueue.isEmpty } yield value -> empty for (value <- prog.commit[IO].unsafeToFuture) yield { value._1 shouldBe "hello" value._2 shouldBe false } } test("TQueue is FIFO") { val prog: STM[String] = for { tqueue <- TQueue.empty[String] _ <- tqueue.put("hello") _ <- tqueue.put("world") hello <- tqueue.read world <- tqueue.peek } yield hello |+| world for (value <- prog.commit[IO].unsafeToFuture) yield { value shouldBe "helloworld" } } }
Example 32
Source File: TSemaphoreTest.scala From cats-stm with Apache License 2.0 | 5 votes |
package io.github.timwspence.cats.stm import cats.effect.{ContextShift, IO, Timer} import org.scalatest.matchers.should.Matchers import org.scalatest.funsuite.AsyncFunSuite import scala.concurrent.ExecutionContext class TSemaphoreTest extends AsyncFunSuite with Matchers { implicit override def executionContext: ExecutionContext = ExecutionContext.Implicits.global implicit val timer: Timer[IO] = IO.timer(executionContext) implicit val cs: ContextShift[IO] = IO.contextShift(executionContext) test("Acquire decrements the number of permits") { val prog: STM[Long] = for { tsem <- TSemaphore.make(1) _ <- tsem.acquire value <- tsem.available } yield value for (value <- prog.commit[IO].unsafeToFuture) yield { value shouldBe 0 } } test("Release increments the number of permits") { val prog: STM[Long] = for { tsem <- TSemaphore.make(0) _ <- tsem.release value <- tsem.available } yield value for (value <- prog.commit[IO].unsafeToFuture) yield { value shouldBe 1 } } }
Example 33
Source File: AkkaActorIntermediator.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.sourcing.akka import akka.actor.ActorSystem import akka.pattern.ask import akka.util.Timeout import cats.effect.{ContextShift, Effect, IO, Timer} import cats.syntax.all._ import ch.epfl.bluebrain.nexus.sourcing.akka.Msg._ import retry.CatsEffect._ import retry.syntax.all._ import retry.{RetryDetails, RetryPolicy} import scala.reflect.ClassTag abstract private[akka] class AkkaActorIntermediator[F[_]: Timer]( name: String, selection: ActorRefSelection[F], askTimeout: Timeout )(implicit F: Effect[F], as: ActorSystem, policy: RetryPolicy[F]) { implicit private[akka] val contextShift: ContextShift[IO] = IO.contextShift(as.dispatcher) implicit private[akka] def noop[A]: (A, RetryDetails) => F[Unit] = retry.noop[F, A] implicit private val timeout: Timeout = askTimeout private[akka] def send[M <: Msg, Reply, A](id: String, msg: M, f: Reply => A)(implicit Reply: ClassTag[Reply] ): F[A] = selection(name, id).flatMap { ref => val future = IO(ref ? msg) val fa = IO.fromFuture(future).to[F] fa.flatMap[A] { case Reply(value) => F.pure(f(value)) case te: TypeError => F.raiseError(te) case um: UnexpectedMsgId => F.raiseError(um) case cet: CommandEvaluationTimeout[_] => F.raiseError(cet) case cee: CommandEvaluationError[_] => F.raiseError(cee) case other => F.raiseError(TypeError(id, Reply.runtimeClass.getSimpleName, other)) } .retryingOnAllErrors[Throwable] } }
Example 34
Source File: InMemoryAggregateSpec.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.sourcing import cats.effect.{ContextShift, IO, Timer} import ch.epfl.bluebrain.nexus.sourcing.AggregateFixture._ import ch.epfl.bluebrain.nexus.sourcing.Command.{Increment, IncrementAsync, Initialize} import ch.epfl.bluebrain.nexus.sourcing.Event.{Incremented, Initialized} import ch.epfl.bluebrain.nexus.sourcing.State.Current import scala.concurrent.ExecutionContext import scala.concurrent.duration._ class InMemoryAggregateSpec extends SourcingSpec { implicit val ctx: ContextShift[IO] = IO.contextShift(ExecutionContext.global) implicit val timer: Timer[IO] = IO.timer(ExecutionContext.global) "An InMemoryAggregate" should { val agg = Aggregate .inMemory[IO, Int]("global", initialState, AggregateFixture.next, AggregateFixture.evaluate[IO]) .unsafeRunSync() "return its name" in { agg.name shouldEqual "global" } "update its state when accepting commands" in { agg.evaluateE(1, Increment(0, 2)).unsafeRunSync().rightValue shouldEqual Incremented(1, 2) agg .evaluate(1, IncrementAsync(1, 5, 200.millis)) .unsafeRunSync() .rightValue shouldEqual (Current(2, 7) -> Incremented(2, 5)) agg.currentState(1).unsafeRunSync() shouldEqual Current(2, 7) } "return its current seq nr" in { agg.lastSequenceNr(1).unsafeRunSync() shouldEqual 2L } "test without applying changes" in { agg.test(1, Initialize(0)).unsafeRunSync().leftValue agg.testE(1, Initialize(2)).unsafeRunSync().rightValue shouldEqual Initialized(3) agg.testS(1, Initialize(2)).unsafeRunSync().rightValue shouldEqual Current(3, 0) agg.currentState(1).unsafeRunSync() shouldEqual Current(2, 7) } "not update its state if evaluation fails" in { agg.evaluate(1, Initialize(0)).unsafeRunSync().leftValue agg.currentState(1).unsafeRunSync() shouldEqual Current(2, 7) } "evaluate commands one at a time" in { agg.evaluateS(1, Initialize(2)).unsafeRunSync().rightValue shouldEqual Current(3, 0) agg.currentState(1).unsafeRunSync() shouldEqual Current(3, 0) agg.evaluateS(1, IncrementAsync(3, 2, 300.millis)).unsafeToFuture() agg.evaluateE(1, IncrementAsync(4, 2, 20.millis)).unsafeRunSync().rightValue shouldEqual Incremented(5, 2) agg.currentState(1).unsafeRunSync() shouldEqual Current(5, 4) } "fold over the event stream in order" in { agg .foldLeft(1, (0, true)) { case ((lastRev, succeeded), event) => (event.rev, succeeded && event.rev - lastRev == 1) } .unsafeRunSync() ._2 shouldEqual true } "return all events" in { agg.foldLeft(1, 0) { case (acc, _) => acc + 1 }.unsafeRunSync() shouldEqual 5 } "append events" in { agg.append(2, Incremented(1, 2)).unsafeRunSync() shouldEqual 1L agg.currentState(1).unsafeRunSync() shouldEqual Current(5, 4) } "return true for existing ids" in { agg.exists(1).unsafeRunSync() shouldEqual true } "return false for unknown ids" in { agg.exists(Int.MaxValue).unsafeRunSync() shouldEqual false } "return the sequence number for a snapshot" in { agg.snapshot(1).unsafeRunSync() shouldEqual 5L } } }
Example 35
Source File: InMemoryStateMachineSpec.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.sourcing import cats.effect.{ContextShift, IO, Timer} import ch.epfl.bluebrain.nexus.sourcing.Command.{Increment, IncrementAsync, Initialize} import ch.epfl.bluebrain.nexus.sourcing.State.Current import ch.epfl.bluebrain.nexus.sourcing.StateMachineFixture._ import scala.concurrent.ExecutionContext import scala.concurrent.duration._ class InMemoryStateMachineSpec extends SourcingSpec { implicit val ctx: ContextShift[IO] = IO.contextShift(ExecutionContext.global) implicit val timer: Timer[IO] = IO.timer(ExecutionContext.global) "An InMemoryStateMachine" should { val cache = StateMachine.inMemory[IO, Int]("global", initialState, evaluate[IO]).unsafeRunSync() "return its name" in { cache.name shouldEqual "global" } "update its state when accepting commands" in { cache.evaluate(1, Increment(0, 2)).unsafeRunSync().rightValue shouldEqual Current(1, 2) cache.evaluate(1, IncrementAsync(1, 5, 200.millis)).unsafeRunSync().rightValue shouldEqual Current(2, 7) cache.currentState(1).unsafeRunSync() shouldEqual Current(2, 7) } "test without applying changes" in { cache.test(1, Initialize(0)).unsafeRunSync().leftValue cache.test(1, Initialize(2)).unsafeRunSync().rightValue shouldEqual Current(3, 0) cache.test(1, Initialize(2)).unsafeRunSync().rightValue shouldEqual Current(3, 0) cache.currentState(1).unsafeRunSync() shouldEqual Current(2, 7) } "not update its state if evaluation fails" in { cache.evaluate(1, Initialize(0)).unsafeRunSync().leftValue cache.currentState(1).unsafeRunSync() shouldEqual Current(2, 7) } "evaluate commands one at a time" in { cache.evaluate(1, Initialize(2)).unsafeRunSync().rightValue shouldEqual Current(3, 0) cache.currentState(1).unsafeRunSync() shouldEqual Current(3, 0) cache.evaluate(1, IncrementAsync(3, 2, 300.millis)).unsafeToFuture() cache.evaluate(1, IncrementAsync(4, 2, 20.millis)).unsafeRunSync().rightValue shouldEqual Current(5, 4) cache.currentState(1).unsafeRunSync() shouldEqual Current(5, 4) } } }
Example 36
Source File: StateMachineFixture.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.sourcing import cats.effect.{Async, Timer} import cats.implicits._ import ch.epfl.bluebrain.nexus.sourcing.Command._ import ch.epfl.bluebrain.nexus.sourcing.Rejection.InvalidRevision import ch.epfl.bluebrain.nexus.sourcing.State.{Current, Initial} object StateMachineFixture { val initialState: State = State.Initial def evaluate[F[_]](state: State, cmd: Command)(implicit F: Async[F], T: Timer[F]): F[Either[Rejection, State]] = (state, cmd) match { case (Current(revS, _), Boom(revC, message)) if revS == revC => F.raiseError(new RuntimeException(message)) case (Initial, Boom(rev, message)) if rev == 0 => F.raiseError(new RuntimeException(message)) case (_, Boom(rev, _)) => F.pure(Left(InvalidRevision(rev))) case (Current(revS, _), Never(revC)) if revS == revC => F.never case (Initial, Never(rev)) if rev == 0 => F.never case (_, Never(rev)) => F.pure(Left(InvalidRevision(rev))) case (Initial, Increment(rev, step)) if rev == 0 => F.pure(Right(State.Current(1, step))) case (Initial, Increment(rev, _)) => F.pure(Left(InvalidRevision(rev))) case (Initial, IncrementAsync(rev, step, duration)) if rev == 0 => T.sleep(duration) >> F.pure(Right(State.Current(1, step))) case (Initial, IncrementAsync(rev, _, _)) => F.pure(Left(InvalidRevision(rev))) case (Initial, Initialize(rev)) if rev == 0 => F.pure(Right(State.Current(1, 0))) case (Initial, Initialize(rev)) => F.pure(Left(InvalidRevision(rev))) case (Current(revS, value), Increment(revC, step)) if revS == revC => F.pure(Right(State.Current(revS + 1, value + step))) case (Current(_, _), Increment(revC, _)) => F.pure(Left(InvalidRevision(revC))) case (Current(revS, value), IncrementAsync(revC, step, duration)) if revS == revC => T.sleep(duration) >> F.pure(Right(State.Current(revS + 1, value + step))) case (Current(_, _), IncrementAsync(revC, _, duration)) => T.sleep(duration) >> F.pure(Left(InvalidRevision(revC))) case (Current(revS, _), Initialize(revC)) if revS == revC => F.pure(Right(State.Current(revS + 1, 0))) case (Current(_, _), Initialize(rev)) => F.pure(Left(InvalidRevision(rev))) } }
Example 37
Source File: AggregateFixture.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.sourcing import cats.effect.{Async, Timer} import cats.implicits._ import ch.epfl.bluebrain.nexus.sourcing.Command._ import ch.epfl.bluebrain.nexus.sourcing.Event.{Incremented, Initialized} import ch.epfl.bluebrain.nexus.sourcing.Rejection.InvalidRevision import ch.epfl.bluebrain.nexus.sourcing.State.{Current, Initial} object AggregateFixture { val initialState: State = State.Initial val next: (State, Event) => State = { case (Initial, Incremented(1, step)) => State.Current(1, step) case (Initial, Initialized(rev)) => State.Current(rev, 0) case (Current(_, value), Incremented(rev, step)) => State.Current(rev, value + step) case (Current(_, _), Initialized(rev)) => State.Current(rev, 0) case (other, _) => other } def evaluate[F[_]](state: State, cmd: Command)(implicit F: Async[F], T: Timer[F]): F[Either[Rejection, Event]] = (state, cmd) match { case (Current(revS, _), Boom(revC, message)) if revS == revC => F.raiseError(new RuntimeException(message)) case (Initial, Boom(rev, message)) if rev == 0 => F.raiseError(new RuntimeException(message)) case (_, Boom(rev, _)) => F.pure(Left(InvalidRevision(rev))) case (Current(revS, _), Never(revC)) if revS == revC => F.never case (Initial, Never(rev)) if rev == 0 => F.never case (_, Never(rev)) => F.pure(Left(InvalidRevision(rev))) case (Initial, Increment(rev, step)) if rev == 0 => F.pure(Right(Incremented(1, step))) case (Initial, Increment(rev, _)) => F.pure(Left(InvalidRevision(rev))) case (Initial, IncrementAsync(rev, step, duration)) if rev == 0 => T.sleep(duration) >> F.pure(Right(Incremented(1, step))) case (Initial, IncrementAsync(rev, _, _)) => F.pure(Left(InvalidRevision(rev))) case (Initial, Initialize(rev)) if rev == 0 => F.pure(Right(Initialized(1))) case (Initial, Initialize(rev)) => F.pure(Left(InvalidRevision(rev))) case (Current(revS, _), Increment(revC, step)) if revS == revC => F.pure(Right(Incremented(revS + 1, step))) case (Current(_, _), Increment(revC, _)) => F.pure(Left(InvalidRevision(revC))) case (Current(revS, _), IncrementAsync(revC, step, duration)) if revS == revC => T.sleep(duration) >> F.pure(Right(Incremented(revS + 1, step))) case (Current(_, _), IncrementAsync(revC, _, duration)) => T.sleep(duration) >> F.pure(Left(InvalidRevision(revC))) case (Current(revS, _), Initialize(revC)) if revS == revC => F.pure(Right(Initialized(revS + 1))) case (Current(_, _), Initialize(rev)) => F.pure(Left(InvalidRevision(rev))) } }
Example 38
Source File: ElasticSearchBaseClient.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.commons.es.client import akka.http.scaladsl.model.StatusCodes.GatewayTimeout import akka.http.scaladsl.model.{HttpRequest, StatusCode, StatusCodes} import cats.effect.{Effect, Timer} import cats.implicits._ import ch.epfl.bluebrain.nexus.commons.es.client.ElasticSearchBaseClient._ import ch.epfl.bluebrain.nexus.commons.es.client.ElasticSearchFailure.{ElasticServerError, ElasticUnexpectedError} import ch.epfl.bluebrain.nexus.commons.http.HttpClient.UntypedHttpClient import ch.epfl.bluebrain.nexus.sourcing.RetryStrategyConfig import com.typesafe.scalalogging.Logger import retry.CatsEffect._ import retry.syntax.all._ import retry.{RetryDetails, RetryPolicy} import scala.util.control.NonFatal private[client] def sanitize(index: String, allowWildCard: Boolean): String = { val regex = if (allowWildCard) """[\s|"|\\|<|>|\||,|/|?]""" else """[\s|"|*|\\|<|>|\||,|/|?]""" index.replaceAll(regex, "_").dropWhile(_ == '_') } } object ElasticSearchBaseClient { private[client] val docType = "_doc" private[client] val source = "_source" private[client] val anyIndexPath = "_all" private[client] val ignoreUnavailable = "ignore_unavailable" private[client] val allowNoIndices = "allow_no_indices" private[client] val trackTotalHits = "track_total_hits" }
Example 39
Source File: ArchiveCache.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.kg.archives import akka.actor.{ActorSystem, NotInfluenceReceiveTimeout} import cats.Monad import cats.data.OptionT import cats.effect.{Effect, Timer} import cats.implicits._ import ch.epfl.bluebrain.nexus.kg.archives.ArchiveCache._ import ch.epfl.bluebrain.nexus.kg.config.KgConfig.ArchivesConfig import ch.epfl.bluebrain.nexus.kg.resources.ResId import ch.epfl.bluebrain.nexus.sourcing.StateMachine import ch.epfl.bluebrain.nexus.sourcing.akka.StopStrategy import ch.epfl.bluebrain.nexus.sourcing.akka.statemachine.AkkaStateMachine import retry.RetryPolicy class ArchiveCache[F[_]: Monad](ref: StateMachine[F, String, State, Command, Unit]) { def put(value: Archive): OptionT[F, Archive] = OptionT(ref.evaluate(value.resId.show, Write(value)).map(_.toOption.flatten)) } object ArchiveCache { private[archives] type State = Option[Archive] private[archives] type Command = Write final private[archives] case class Write(bundle: Archive) extends NotInfluenceReceiveTimeout final def apply[F[_]: Timer](implicit as: ActorSystem, cfg: ArchivesConfig, F: Effect[F]): F[ArchiveCache[F]] = { implicit val retryPolicy: RetryPolicy[F] = cfg.cache.retry.retryPolicy[F] val invalidationStrategy = StopStrategy.lapsedSinceLastInteraction[State, Command](cfg.cacheInvalidateAfter) val evaluate: (State, Command) => F[Either[Unit, State]] = { case (None, Write(bundle)) => F.pure(Right(Some(bundle))) case (Some(_), _) => F.pure(Left(())) // It already exists, so we don't want to replace it } AkkaStateMachine .sharded[F]("archives", None, evaluate, invalidationStrategy, cfg.cache.akkaStateMachineConfig, cfg.cache.shards) .map(new ArchiveCache[F](_)) } }
Example 40
Source File: ElasticSearchIndexer.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.kg.indexing import akka.actor.{ActorRef, ActorSystem, Props} import akka.stream.scaladsl.Source import akka.util.Timeout import cats.effect.{Effect, Timer} import cats.implicits._ import ch.epfl.bluebrain.nexus.admin.client.types.Project import ch.epfl.bluebrain.nexus.commons.es.client.ElasticSearchClient import ch.epfl.bluebrain.nexus.commons.es.client.ElasticSearchClient.BulkOp import ch.epfl.bluebrain.nexus.kg.indexing.View.ElasticSearchView import ch.epfl.bluebrain.nexus.kg.resources._ import ch.epfl.bluebrain.nexus.kg.routes.Clients import ch.epfl.bluebrain.nexus.service.config.ServiceConfig import ch.epfl.bluebrain.nexus.sourcing.projections.ProgressFlow.ProgressFlowElem import ch.epfl.bluebrain.nexus.sourcing.projections.ProjectionProgress.NoProgress import ch.epfl.bluebrain.nexus.sourcing.projections._ import com.typesafe.scalalogging.Logger import scala.concurrent.ExecutionContext // $COVERAGE-OFF$ @SuppressWarnings(Array("MaxParameters")) object ElasticSearchIndexer { implicit private val log: Logger = Logger[ElasticSearchIndexer.type] final def start[F[_]: Timer]( view: ElasticSearchView, resources: Resources[F], project: Project, restartOffset: Boolean )(implicit as: ActorSystem, actorInitializer: (Props, String) => ActorRef, projections: Projections[F, String], F: Effect[F], clients: Clients[F], config: ServiceConfig ): StreamSupervisor[F, ProjectionProgress] = { implicit val ec: ExecutionContext = as.dispatcher implicit val p: Project = project implicit val indexing: IndexingConfig = config.kg.elasticSearch.indexing implicit val metadataOpts: MetadataOptions = MetadataOptions(linksAsIri = true, expandedLinks = true) implicit val tm: Timeout = Timeout(config.kg.elasticSearch.askTimeout) val client: ElasticSearchClient[F] = clients.elasticSearch.withRetryPolicy(config.kg.elasticSearch.indexing.retry) def deleteOrIndex(res: ResourceV): Option[BulkOp] = if (res.deprecated && !view.filter.includeDeprecated) Some(delete(res)) else view.toDocument(res).map(doc => BulkOp.Index(view.index, res.id.value.asString, doc)) def delete(res: ResourceV): BulkOp = BulkOp.Delete(view.index, res.id.value.asString) val initFetchProgressF: F[ProjectionProgress] = if (restartOffset) projections.recordProgress(view.progressId, NoProgress) >> view.createIndex >> F.pure(NoProgress) else view.createIndex >> projections.progress(view.progressId) val sourceF: F[Source[ProjectionProgress, _]] = initFetchProgressF.map { initial => val flow = ProgressFlowElem[F, Any] .collectCast[Event] .groupedWithin(indexing.batch, indexing.batchTimeout) .distinct() .mapAsync(view.toResource(resources, _)) .collectSome[ResourceV] .collect { case res if view.allowedSchemas(res) && view.allowedTypes(res) => deleteOrIndex(res) case res if view.allowedSchemas(res) => Some(delete(res)) } .collectSome[BulkOp] .runAsyncBatch(client.bulk(_))() .mergeEmit() .toPersistedProgress(view.progressId, initial) cassandraSource(s"project=${view.ref.id}", view.progressId, initial.minProgress.offset) .via(flow) .via(kamonViewMetricsFlow(view, project)) } StreamSupervisor.start(sourceF, view.progressId, actorInitializer) } } // $COVERAGE-ON$
Example 41
Source File: StorageIndexer.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.kg.indexing import java.time.Instant import akka.actor.ActorSystem import akka.stream.scaladsl.{Flow, Source} import akka.util.Timeout import cats.effect.{Effect, Timer} import cats.implicits._ import ch.epfl.bluebrain.nexus.admin.client.AdminClient import ch.epfl.bluebrain.nexus.iam.auth.AccessToken import ch.epfl.bluebrain.nexus.kg.cache.{ProjectCache, StorageCache} import ch.epfl.bluebrain.nexus.kg.config.KgConfig.StorageConfig import ch.epfl.bluebrain.nexus.kg.resources._ import ch.epfl.bluebrain.nexus.kg.storage.Storage import ch.epfl.bluebrain.nexus.service.config.ServiceConfig import ch.epfl.bluebrain.nexus.service.config.Vocabulary.nxv import ch.epfl.bluebrain.nexus.sourcing.projections.ProgressFlow.{PairMsg, ProgressFlowElem} import ch.epfl.bluebrain.nexus.sourcing.projections._ import com.typesafe.scalalogging.Logger import scala.concurrent.ExecutionContext // $COVERAGE-OFF$ object StorageIndexer { implicit private val log = Logger[StorageIndexer.type] def start[F[_]: Timer](storages: Storages[F], storageCache: StorageCache[F])(implicit projectCache: ProjectCache[F], F: Effect[F], as: ActorSystem, projectInitializer: ProjectInitializer[F], adminClient: AdminClient[F], config: ServiceConfig ): StreamSupervisor[F, Unit] = { implicit val authToken: Option[AccessToken] = config.serviceAccount.credentials implicit val indexing: IndexingConfig = config.kg.keyValueStore.indexing implicit val ec: ExecutionContext = as.dispatcher implicit val tm: Timeout = Timeout(config.kg.keyValueStore.askTimeout) implicit val storageConfig: StorageConfig = config.kg.storage val name = "storage-indexer" def toStorage(event: Event): F[Option[(Storage, Instant)]] = fetchProject(event.organization, event.id.parent, event.subject).flatMap { implicit project => storages.fetchStorage(event.id).value.map { case Left(err) => log.error(s"Error on event '${event.id.show} (rev = ${event.rev})', cause: '${err.msg}'") None case Right(timedStorage) => Some(timedStorage) } } val source: Source[PairMsg[Any], _] = cassandraSource(s"type=${nxv.Storage.value.show}", name) val flow: Flow[PairMsg[Any], Unit, _] = ProgressFlowElem[F, Any] .collectCast[Event] .groupedWithin(indexing.batch, indexing.batchTimeout) .distinct() .mergeEmit() .mapAsync(toStorage) .collectSome[(Storage, Instant)] .runAsync { case (storage, instant) => storageCache.put(storage)(instant) }() .flow .map(_ => ()) StreamSupervisor.startSingleton(F.delay(source.via(flow)), name) } } // $COVERAGE-ON$
Example 42
Source File: SparqlIndexer.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.kg.indexing import akka.actor.{ActorRef, ActorSystem, Props} import akka.stream.scaladsl.Source import akka.util.Timeout import cats.effect.{Effect, Timer} import cats.implicits._ import ch.epfl.bluebrain.nexus.admin.client.types.Project import ch.epfl.bluebrain.nexus.commons.sparql.client.{BlazegraphClient, SparqlWriteQuery} import ch.epfl.bluebrain.nexus.kg.indexing.View.SparqlView import ch.epfl.bluebrain.nexus.kg.resources._ import ch.epfl.bluebrain.nexus.kg.routes.Clients import ch.epfl.bluebrain.nexus.service.config.ServiceConfig import ch.epfl.bluebrain.nexus.sourcing.projections.ProgressFlow.ProgressFlowElem import ch.epfl.bluebrain.nexus.sourcing.projections.ProjectionProgress.NoProgress import ch.epfl.bluebrain.nexus.sourcing.projections._ import scala.concurrent.ExecutionContext // $COVERAGE-OFF$ @SuppressWarnings(Array("MaxParameters")) object SparqlIndexer { final def start[F[_]: Timer]( view: SparqlView, resources: Resources[F], project: Project, restartOffset: Boolean )(implicit as: ActorSystem, actorInitializer: (Props, String) => ActorRef, projections: Projections[F, String], F: Effect[F], clients: Clients[F], config: ServiceConfig ): StreamSupervisor[F, ProjectionProgress] = { implicit val ec: ExecutionContext = as.dispatcher implicit val p: Project = project implicit val indexing: IndexingConfig = config.kg.sparql.indexing implicit val metadataOpts: MetadataOptions = MetadataOptions(linksAsIri = true, expandedLinks = true) implicit val tm: Timeout = Timeout(config.kg.sparql.askTimeout) val client: BlazegraphClient[F] = clients.sparql.copy(namespace = view.index).withRetryPolicy(config.kg.sparql.indexing.retry) def buildInsertOrDeleteQuery(res: ResourceV): SparqlWriteQuery = if (res.deprecated && !view.filter.includeDeprecated) view.buildDeleteQuery(res) else view.buildInsertQuery(res) val initFetchProgressF: F[ProjectionProgress] = if (restartOffset) projections.recordProgress(view.progressId, NoProgress) >> view.createIndex >> F.pure(NoProgress) else view.createIndex >> projections.progress(view.progressId) val sourceF: F[Source[ProjectionProgress, _]] = initFetchProgressF.map { initial => val flow = ProgressFlowElem[F, Any] .collectCast[Event] .groupedWithin(indexing.batch, indexing.batchTimeout) .distinct() .mapAsync(view.toResource(resources, _)) .collectSome[ResourceV] .collect { case res if view.allowedSchemas(res) && view.allowedTypes(res) => buildInsertOrDeleteQuery(res) case res if view.allowedSchemas(res) => view.buildDeleteQuery(res) } .runAsyncBatch(client.bulk(_))() .mergeEmit() .toPersistedProgress(view.progressId, initial) cassandraSource(s"project=${view.ref.id}", view.progressId, initial.minProgress.offset) .via(flow) .via(kamonViewMetricsFlow(view, project)) } StreamSupervisor.start(sourceF, view.progressId, actorInitializer) } } // $COVERAGE-ON$
Example 43
Source File: ResolverIndexer.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.kg.indexing import akka.actor.ActorSystem import akka.stream.scaladsl.{Flow, Source} import akka.util.Timeout import cats.effect.{Effect, Timer} import cats.implicits._ import ch.epfl.bluebrain.nexus.admin.client.AdminClient import ch.epfl.bluebrain.nexus.iam.auth.AccessToken import ch.epfl.bluebrain.nexus.kg.cache.{ProjectCache, ResolverCache} import ch.epfl.bluebrain.nexus.kg.resolve.Resolver import ch.epfl.bluebrain.nexus.kg.resources._ import ch.epfl.bluebrain.nexus.service.config.ServiceConfig import ch.epfl.bluebrain.nexus.service.config.Vocabulary.nxv import ch.epfl.bluebrain.nexus.sourcing.projections.ProgressFlow.{PairMsg, ProgressFlowElem} import ch.epfl.bluebrain.nexus.sourcing.projections._ import com.typesafe.scalalogging.Logger import scala.concurrent.ExecutionContext // $COVERAGE-OFF$ object ResolverIndexer { implicit private val log = Logger[ResolverIndexer.type] final def start[F[_]: Timer](resolvers: Resolvers[F], resolverCache: ResolverCache[F])(implicit projectCache: ProjectCache[F], as: ActorSystem, F: Effect[F], projectInitializer: ProjectInitializer[F], adminClient: AdminClient[F], config: ServiceConfig ): StreamSupervisor[F, Unit] = { implicit val authToken: Option[AccessToken] = config.serviceAccount.credentials implicit val indexing: IndexingConfig = config.kg.keyValueStore.indexing implicit val ec: ExecutionContext = as.dispatcher implicit val tm: Timeout = Timeout(config.kg.keyValueStore.askTimeout) val name = "resolver-indexer" def toResolver(event: Event): F[Option[Resolver]] = fetchProject(event.organization, event.id.parent, event.subject).flatMap { implicit project => resolvers.fetchResolver(event.id).value.map { case Left(err) => log.error(s"Error on event '${event.id.show} (rev = ${event.rev})', cause: '${err.msg}'") None case Right(resolver) => Some(resolver) } } val source: Source[PairMsg[Any], _] = cassandraSource(s"type=${nxv.Resolver.value.show}", name) val flow: Flow[PairMsg[Any], Unit, _] = ProgressFlowElem[F, Any] .collectCast[Event] .groupedWithin(indexing.batch, indexing.batchTimeout) .distinct() .mergeEmit() .mapAsync(toResolver) .collectSome[Resolver] .runAsync(resolverCache.put)() .flow .map(_ => ()) StreamSupervisor.startSingleton(F.delay(source.via(flow)), name) } } // $COVERAGE-ON$
Example 44
Source File: ViewIndexer.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.kg.indexing import akka.actor.ActorSystem import akka.stream.scaladsl.{Flow, Source} import akka.util.Timeout import cats.effect.{Effect, Timer} import cats.implicits._ import ch.epfl.bluebrain.nexus.admin.client.AdminClient import ch.epfl.bluebrain.nexus.iam.auth.AccessToken import ch.epfl.bluebrain.nexus.kg.cache.{ProjectCache, ViewCache} import ch.epfl.bluebrain.nexus.kg.resources._ import ch.epfl.bluebrain.nexus.service.config.ServiceConfig import ch.epfl.bluebrain.nexus.service.config.Vocabulary.nxv import ch.epfl.bluebrain.nexus.sourcing.projections.ProgressFlow.{PairMsg, ProgressFlowElem} import ch.epfl.bluebrain.nexus.sourcing.projections._ import com.typesafe.scalalogging.Logger import scala.concurrent.ExecutionContext // $COVERAGE-OFF$ object ViewIndexer { implicit private val log = Logger[ViewIndexer.type] def start[F[_]: Timer](views: Views[F], viewCache: ViewCache[F])(implicit projectCache: ProjectCache[F], F: Effect[F], as: ActorSystem, projectInitializer: ProjectInitializer[F], adminClient: AdminClient[F], config: ServiceConfig ): StreamSupervisor[F, Unit] = { implicit val authToken: Option[AccessToken] = config.serviceAccount.credentials implicit val indexing: IndexingConfig = config.kg.keyValueStore.indexing implicit val ec: ExecutionContext = as.dispatcher implicit val tm: Timeout = Timeout(config.kg.keyValueStore.askTimeout) val name = "view-indexer" def toView(event: Event): F[Option[View]] = fetchProject(event.organization, event.id.parent, event.subject).flatMap { implicit project => views.fetchView(event.id).value.map { case Left(err) => log.error(s"Error on event '${event.id.show} (rev = ${event.rev})', cause: '${err.msg}'") None case Right(view) => Some(view) } } val source: Source[PairMsg[Any], _] = cassandraSource(s"type=${nxv.View.value.show}", name) val flow: Flow[PairMsg[Any], Unit, _] = ProgressFlowElem[F, Any] .collectCast[Event] .groupedWithin(indexing.batch, indexing.batchTimeout) .distinct() .mergeEmit() .mapAsync(toView) .collectSome[View] .runAsync(viewCache.put)() .flow .map(_ => ()) StreamSupervisor.startSingleton(F.delay(source.via(flow)), name) } } // $COVERAGE-ON$
Example 45
Source File: ResolverCache.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.kg.cache import java.util.UUID import java.util.concurrent.ConcurrentHashMap import akka.actor.ActorSystem import cats.Monad import cats.effect.{Effect, Timer} import cats.implicits._ import ch.epfl.bluebrain.nexus.commons.cache.{KeyValueStore, KeyValueStoreConfig} import ch.epfl.bluebrain.nexus.kg.cache.Cache._ import ch.epfl.bluebrain.nexus.kg.resolve.Resolver import ch.epfl.bluebrain.nexus.kg.resources.ProjectIdentifier.ProjectRef import ch.epfl.bluebrain.nexus.rdf.Iri.AbsoluteIri class ResolverCache[F[_]: Effect: Timer] private (projectToCache: ConcurrentHashMap[UUID, ResolverProjectCache[F]])( implicit as: ActorSystem, config: KeyValueStoreConfig ) { private class ResolverProjectCache[F[_]: Monad] private (store: KeyValueStore[F, AbsoluteIri, Resolver]) extends Cache[F, AbsoluteIri, Resolver](store) { implicit private val ordering: Ordering[Resolver] = Ordering.by(_.priority) def get: F[List[Resolver]] = store.values.map(_.toList.sorted) def put(resolver: Resolver): F[Unit] = if (resolver.deprecated) store.remove(resolver.id) else store.put(resolver.id, resolver) } private object ResolverProjectCache { def apply[F[_]: Effect: Timer]( project: ProjectRef )(implicit as: ActorSystem, config: KeyValueStoreConfig): ResolverProjectCache[F] = new ResolverProjectCache(KeyValueStore.distributed(s"resolver-${project.id}", (_, resolver) => resolver.rev)) } object ResolverCache { def apply[F[_]: Effect: Timer](implicit as: ActorSystem, config: KeyValueStoreConfig): ResolverCache[F] = new ResolverCache(new ConcurrentHashMap[UUID, ResolverProjectCache[F]]()) }
Example 46
Source File: StorageCache.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.kg.cache import java.time.{Clock, Instant} import java.util.UUID import java.util.concurrent.ConcurrentHashMap import akka.actor.ActorSystem import cats.Monad import cats.effect.{Effect, Timer} import cats.implicits._ import ch.epfl.bluebrain.nexus.commons.cache.{KeyValueStore, KeyValueStoreConfig} import ch.epfl.bluebrain.nexus.kg.RevisionedValue import ch.epfl.bluebrain.nexus.kg.cache.Cache._ import ch.epfl.bluebrain.nexus.kg.cache.StorageProjectCache._ import ch.epfl.bluebrain.nexus.kg.resources.ProjectIdentifier.ProjectRef import ch.epfl.bluebrain.nexus.kg.storage.Storage import ch.epfl.bluebrain.nexus.rdf.Iri.AbsoluteIri class StorageCache[F[_]: Effect: Timer] private (projectToCache: ConcurrentHashMap[UUID, StorageProjectCache[F]])( implicit as: ActorSystem, config: KeyValueStoreConfig, clock: Clock ) { private class StorageProjectCache[F[_]: Monad] private (store: KeyValueStore[F, AbsoluteIri, RevisionedStorage]) extends Cache[F, AbsoluteIri, RevisionedStorage](store) { implicit private val ordering: Ordering[RevisionedStorage] = Ordering.by((s: RevisionedStorage) => s.rev).reverse implicit private def revisioned(storage: Storage)(implicit instant: Instant): RevisionedStorage = RevisionedValue(instant.toEpochMilli, storage) def get: F[List[Storage]] = store.values.map(_.toList.sorted.map(_.value)) def getDefault: F[Option[Storage]] = get.map(_.collectFirst { case storage if storage.default => storage }) def getBy(id: AbsoluteIri): F[Option[Storage]] = get(id).map(_.collectFirst { case RevisionedValue(_, storage) if storage.id == id => storage }) def put(storage: Storage)(implicit instant: Instant): F[Unit] = if (storage.deprecated) store.remove(storage.id) else store.put(storage.id, storage) } private object StorageProjectCache { type RevisionedStorage = RevisionedValue[Storage] def apply[F[_]: Effect: Timer]( project: ProjectRef )(implicit as: ActorSystem, config: KeyValueStoreConfig): StorageProjectCache[F] = new StorageProjectCache( KeyValueStore.distributed(s"storage-${project.id}", (_, storage) => storage.value.rev) ) } object StorageCache { def apply[F[_]: Timer: Effect](implicit as: ActorSystem, config: KeyValueStoreConfig, clock: Clock): StorageCache[F] = new StorageCache(new ConcurrentHashMap[UUID, StorageProjectCache[F]]()) }
Example 47
Source File: ArchiveCacheSpec.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.kg.archives import java.time.{Clock, Instant, ZoneId} import cats.effect.{IO, Timer} import ch.epfl.bluebrain.nexus.admin.client.types.Project import ch.epfl.bluebrain.nexus.commons.test.ActorSystemFixture import ch.epfl.bluebrain.nexus.commons.test.io.IOOptionValues import ch.epfl.bluebrain.nexus.iam.types.Identity.Anonymous import ch.epfl.bluebrain.nexus.kg.TestHelper import ch.epfl.bluebrain.nexus.kg.archives.Archive.{File, Resource, ResourceDescription} import ch.epfl.bluebrain.nexus.kg.resources.Id import ch.epfl.bluebrain.nexus.kg.resources.syntax._ import ch.epfl.bluebrain.nexus.service.config.Settings import org.scalatest.concurrent.Eventually import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AnyWordSpecLike import scala.concurrent.duration._ class ArchiveCacheSpec extends ActorSystemFixture("ArchiveCacheSpec", true) with TestHelper with AnyWordSpecLike with Matchers with IOOptionValues with Eventually { implicit override def patienceConfig: PatienceConfig = PatienceConfig(10.second, 50.milliseconds) private val appConfig = Settings(system).serviceConfig implicit private val config = appConfig.copy(kg = appConfig.kg.copy(archives = appConfig.kg.archives.copy(cacheInvalidateAfter = 500.millis, maxResources = 100)) ) implicit private val timer: Timer[IO] = IO.timer(system.dispatcher) implicit private val archivesCfg = config.kg.archives private val cache: ArchiveCache[IO] = ArchiveCache[IO].unsafeToFuture().futureValue implicit private val clock = Clock.fixed(Instant.EPOCH, ZoneId.systemDefault()) private val instant = clock.instant() def randomProject() = { val instant = Instant.EPOCH // format: off Project(genIri, genString(), genString(), None, genIri, genIri, Map.empty, genUUID, genUUID, 1L, false, instant, genIri, instant, genIri) // format: on } "An archive cache" should { "write and read an Archive" in { val resId = Id(randomProject().ref, genIri) val resource1 = Resource(genIri, randomProject(), None, None, originalSource = true, None) val file1 = File(genIri, randomProject(), None, None, None) val archive = Archive(resId, instant, Anonymous, Set(resource1, file1)) val _ = cache.put(archive).value.some cache.get(archive.resId).value.some shouldEqual archive } "read a non existing resource" in { val resId = Id(randomProject().ref, genIri) cache.get(resId).value.ioValue shouldEqual None } "read after timeout" in { val resId = Id(randomProject().ref, genIri) val set = Set[ResourceDescription](Resource(genIri, randomProject(), None, None, originalSource = true, None)) val archive = Archive(resId, instant, Anonymous, set) val _ = cache.put(archive).value.some val time = System.currentTimeMillis() cache.get(resId).value.some shouldEqual archive eventually { cache.get(resId).value.ioValue shouldEqual None } val diff = System.currentTimeMillis() - time diff should be > config.kg.archives.cacheInvalidateAfter.toMillis diff should be < config.kg.archives.cacheInvalidateAfter.toMillis + 300 } } }
Example 48
Source File: Main.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.cli import cats.Parallel import cats.effect.{ContextShift, ExitCode, Timer} import cats.syntax.all._ import monix.catnap.SchedulerEffect import monix.eval.{Task, TaskApp} // $COVERAGE-OFF$ object Main extends TaskApp { override def run(args: List[String]): Task[ExitCode] = { implicit val cs: ContextShift[Task] = SchedulerEffect.contextShift[Task](scheduler) implicit val tm: Timer[Task] = SchedulerEffect.timer[Task](scheduler) implicit val pl: Parallel[Task] = Task.catsParallel Cli(args, sys.env).recoverWith { case err: CliError => Task.delay(println(err.show)).as(ExitCode.Error) } } }
Example 49
Source File: Influx.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.cli.modules.influx import cats.Parallel import cats.effect.{ConcurrentEffect, ContextShift, ExitCode, Timer} import cats.implicits._ import ch.epfl.bluebrain.nexus.cli.AbstractCommand import com.monovore.decline.Opts import distage.TagK import izumi.distage.model.recursive.LocatorRef final class Influx[F[_]: Timer: Parallel: ContextShift: TagK](locatorOpt: Option[LocatorRef])(implicit F: ConcurrentEffect[F] ) extends AbstractCommand[F](locatorOpt) { def subcommand: Opts[F[ExitCode]] = Opts.subcommand("influxdb", "influxDB projection.") { run } def run: Opts[F[ExitCode]] = Opts.subcommand("run", "Runs the influxDB projection") { locatorResource.map { _.use { locator => locator.get[InfluxProjection[F]].run.as(ExitCode.Success) } } } } object Influx { final def apply[F[_]: TagK: ConcurrentEffect: Timer: Parallel: ContextShift]( locatorOpt: Option[LocatorRef] = None ): Influx[F] = new Influx[F](locatorOpt) }
Example 50
Source File: Postgres.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.cli.modules.postgres import cats.Parallel import cats.effect.{ConcurrentEffect, ContextShift, ExitCode, Timer} import cats.implicits._ import ch.epfl.bluebrain.nexus.cli.AbstractCommand import com.monovore.decline.Opts import distage.TagK import izumi.distage.model.recursive.LocatorRef final class Postgres[F[_]: Timer: Parallel: ContextShift: TagK](locatorOpt: Option[LocatorRef])(implicit F: ConcurrentEffect[F] ) extends AbstractCommand[F](locatorOpt) { def subcommand: Opts[F[ExitCode]] = Opts.subcommand("postgres", "Postgres database projection.") { run } def run: Opts[F[ExitCode]] = Opts.subcommand("run", "Runs the postgres database projection") { locatorResource.map { _.use { locator => locator.get[PostgresProjection[F]].run.as(ExitCode.Success) } } } } object Postgres { final def apply[F[_]: TagK: ConcurrentEffect: Timer: Parallel: ContextShift]( locatorOpt: Option[LocatorRef] = None ): Postgres[F] = new Postgres[F](locatorOpt) }
Example 51
Source File: PostgresModule.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.cli.modules.postgres import cats.Parallel import cats.effect.{ConcurrentEffect, ContextShift, Timer} import ch.epfl.bluebrain.nexus.cli.config.AppConfig import distage.{ModuleDef, TagK} import doobie.util.transactor.Transactor import izumi.distage.model.definition.StandardAxis.Repo import izumi.distage.model.recursive.LocatorRef final class PostgresModule[F[_]: Parallel: ContextShift: ConcurrentEffect: Timer: TagK] extends ModuleDef { make[Postgres[F]].tagged(Repo.Prod).from { locatorRef: LocatorRef => Postgres[F](Some(locatorRef)) } make[Transactor[F]].tagged(Repo.Prod).from { (cfg: AppConfig) => Transactor.fromDriverManager[F]( "org.postgresql.Driver", cfg.postgres.jdbcUrl, cfg.postgres.username, cfg.postgres.password ) } make[PostgresProjection[F]].tagged(Repo.Prod) } object PostgresModule { final def apply[F[_]: Parallel: ContextShift: ConcurrentEffect: Timer: TagK]: PostgresModule[F] = new PostgresModule[F] }
Example 52
Source File: Config.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.cli.modules.config import cats.Parallel import cats.effect.{ConcurrentEffect, ContextShift, ExitCode, Timer} import cats.implicits._ import ch.epfl.bluebrain.nexus.cli.config.AppConfig import ch.epfl.bluebrain.nexus.cli.{AbstractCommand, Console} import com.monovore.decline.Opts import com.typesafe.config.ConfigRenderOptions import distage.TagK import izumi.distage.model.recursive.LocatorRef import pureconfig.ConfigWriter final class Config[F[_]: Timer: Parallel: ContextShift: TagK](locatorOpt: Option[LocatorRef])(implicit F: ConcurrentEffect[F] ) extends AbstractCommand[F](locatorOpt) { def subcommand: Opts[F[ExitCode]] = Opts.subcommand("config", "Read or write the tool configuration.") { show } private def show: Opts[F[ExitCode]] = Opts.subcommand("show", "Print the current configuration") { locatorResource.map { _.use { locator => val console = locator.get[Console[F]] val cfg = locator.get[AppConfig] console.println(renderConfig(cfg)).as(ExitCode.Success) } } } private def renderConfig(cfg: AppConfig): String = { val opts = ConfigRenderOptions.concise().setComments(false).setJson(false).setFormatted(true) ConfigWriter[AppConfig].to(cfg).render(opts) } } object Config { final def apply[F[_]: TagK: ConcurrentEffect: Timer: Parallel: ContextShift]( locatorOpt: Option[LocatorRef] = None ): Config[F] = new Config[F](locatorOpt) }
Example 53
Source File: CliModule.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.cli import cats.effect.concurrent.Ref import cats.effect.{ConcurrentEffect, Timer} import cats.implicits._ import ch.epfl.bluebrain.nexus.cli.clients._ import ch.epfl.bluebrain.nexus.cli.config.AppConfig import ch.epfl.bluebrain.nexus.cli.sse.{OrgLabel, OrgUuid, ProjectLabel, ProjectUuid} import distage.{ModuleDef, TagK} import izumi.distage.model.definition.StandardAxis.Repo import org.http4s.client.Client import org.http4s.client.blaze.BlazeClientBuilder import scala.concurrent.ExecutionContext import scala.concurrent.duration.Duration final class CliModule[F[_]: ConcurrentEffect: Timer: TagK] extends ModuleDef { make[Console[F]].tagged(Repo.Prod).from(Console[F]) make[Client[F]].tagged(Repo.Prod).fromResource { BlazeClientBuilder[F](ExecutionContext.global).withIdleTimeout(Duration.Inf).resource } make[ProjectClient[F]].tagged(Repo.Prod).fromEffect { (cfg: AppConfig, client: Client[F], console: Console[F]) => Ref.of[F, Map[(OrgUuid, ProjectUuid), (OrgLabel, ProjectLabel)]](Map.empty).map { cache => ProjectClient(client, cfg.env, cache, console) } } make[SparqlClient[F]].tagged(Repo.Prod).from { (cfg: AppConfig, client: Client[F], console: Console[F]) => SparqlClient(client, cfg.env, console) } make[EventStreamClient[F]].tagged(Repo.Prod).from { (cfg: AppConfig, client: Client[F], pc: ProjectClient[F]) => EventStreamClient(client, pc, cfg.env) } make[InfluxClient[F]].tagged(Repo.Prod).from { (cfg: AppConfig, client: Client[F], console: Console[F]) => InfluxClient(client, cfg, console) } } object CliModule { final def apply[F[_]: ConcurrentEffect: Timer: TagK]: CliModule[F] = new CliModule[F] }
Example 54
Source File: AbstractHttpClient.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.cli.clients import cats.effect.{Sync, Timer} import cats.implicits._ import ch.epfl.bluebrain.nexus.cli.CliError.ClientError import ch.epfl.bluebrain.nexus.cli.CliError.ClientError.{SerializationError, Unexpected} import ch.epfl.bluebrain.nexus.cli.config.EnvConfig import ch.epfl.bluebrain.nexus.cli.{logRetryErrors, ClientErrOr, Console} import io.circe.Decoder import org.http4s.circe.CirceEntityDecoder._ import org.http4s.client.Client import org.http4s.{Request, Response} import retry.CatsEffect._ import retry.RetryPolicy import retry.syntax.all._ import scala.reflect.ClassTag import scala.util.control.NonFatal class AbstractHttpClient[F[_]: Timer](client: Client[F], env: EnvConfig)(implicit protected val F: Sync[F], protected val console: Console[F] ) { protected val retry = env.httpClient.retry protected def successCondition[A] = retry.condition.notRetryFromEither[A] _ implicit protected val retryPolicy: RetryPolicy[F] = retry.retryPolicy implicit protected def logOnError[A] = logRetryErrors[F, A]("interacting with an HTTP API") protected def executeDiscard[A](req: Request[F], returnValue: => A): F[ClientErrOr[A]] = execute(req, _.body.compile.drain.as(Right(returnValue))) protected def executeParse[A: Decoder](req: Request[F])(implicit A: ClassTag[A]): F[ClientErrOr[A]] = execute( req, _.attemptAs[A].value.map( _.leftMap(err => SerializationError(err.message, s"The response payload was not of type '${A.runtimeClass.getSimpleName}'") ) ) ) private def execute[A](req: Request[F], f: Response[F] => F[ClientErrOr[A]]): F[ClientErrOr[A]] = client .fetch(req)(ClientError.errorOr[F, A](r => f(r))) .recoverWith { case NonFatal(err) => F.delay(Left(Unexpected(Option(err.getMessage).getOrElse("").take(30)))) } .retryingM(successCondition[A]) }
Example 55
Source File: SparqlClient.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.cli.clients import cats.effect.{Sync, Timer} import ch.epfl.bluebrain.nexus.cli.config.EnvConfig import ch.epfl.bluebrain.nexus.cli.sse.{OrgLabel, ProjectLabel} import ch.epfl.bluebrain.nexus.cli.{ClientErrOr, Console} import org.http4s._ import org.http4s.client.Client import org.http4s.headers.`Content-Type` trait SparqlClient[F[_]] { final def apply[F[_]: Sync: Timer](client: Client[F], env: EnvConfig, console: Console[F]): SparqlClient[F] = { implicit val c: Console[F] = console new LiveSparqlClient[F](client, env) } final val `application/sparql-query`: MediaType = new MediaType("application", "sparql-query") final private class LiveSparqlClient[F[_]: Timer: Console: Sync](client: Client[F], env: EnvConfig) extends AbstractHttpClient(client, env) with SparqlClient[F] { override def query( org: OrgLabel, proj: ProjectLabel, view: Option[Uri], queryStr: String ): F[ClientErrOr[SparqlResults]] = { val uri = env.sparql(org, proj, view.getOrElse(env.defaultSparqlView)) val headers = Headers(env.authorizationHeader.toList) val req = Request[F](method = Method.POST, uri = uri, headers = headers) .withEntity(queryStr) .withContentType(`Content-Type`(`application/sparql-query`)) executeParse[SparqlResults](req) } } }
Example 56
Source File: ProjectClient.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.cli.clients import cats.effect.concurrent.Ref import cats.effect.{Sync, Timer} import cats.implicits._ import ch.epfl.bluebrain.nexus.cli.config.EnvConfig import ch.epfl.bluebrain.nexus.cli.sse.{OrgLabel, OrgUuid, ProjectLabel, ProjectUuid} import ch.epfl.bluebrain.nexus.cli.{ClientErrOr, Console} import io.circe.Decoder import io.circe.generic.semiauto.deriveDecoder import org.http4s.client.Client import org.http4s.{Headers, Request} trait ProjectClient[F[_]] { final def apply[F[_]: Sync: Timer]( client: Client[F], env: EnvConfig, cache: Ref[F, Map[(OrgUuid, ProjectUuid), (OrgLabel, ProjectLabel)]], console: Console[F] ): ProjectClient[F] = { implicit val c: Console[F] = console new LiveProjectClient[F](client, env, cache) } private class LiveProjectClient[F[_]: Timer: Console: Sync]( client: Client[F], env: EnvConfig, cache: Ref[F, Map[(OrgUuid, ProjectUuid), (OrgLabel, ProjectLabel)]] ) extends AbstractHttpClient[F](client, env) with ProjectClient[F] { override def labels(org: OrgUuid, proj: ProjectUuid): F[ClientErrOr[(OrgLabel, ProjectLabel)]] = cache.get.flatMap { map => map.get((org, proj)) match { // value in cache, return case Some(value) => F.pure(Right(value)) // value not in cache, fetch, update and return case None => get(org, proj).flatMap { // propagate error case l @ Left(_) => F.pure(l) // success, update cache and return case r @ Right(value) => cache.modify(m => (m.updated((org, proj), value), value)) *> F.pure(r) } } } private def get(org: OrgUuid, proj: ProjectUuid): F[ClientErrOr[(OrgLabel, ProjectLabel)]] = { val uri = env.project(org, proj) val req = Request[F](uri = uri, headers = Headers(env.authorizationHeader.toList)) executeParse[NexusAPIProject](req).map { case Right(NexusAPIProject(orgLabel, projectLabel)) => Right((orgLabel, projectLabel)) case Left(err) => Left(err) } } } final private[ProjectClient] case class NexusAPIProject(`_organizationLabel`: OrgLabel, `_label`: ProjectLabel) private[ProjectClient] object NexusAPIProject { implicit val nexusAPIProjectDecoder: Decoder[NexusAPIProject] = deriveDecoder[NexusAPIProject] } }
Example 57
Source File: InfluxClient.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.cli.clients import cats.effect.{Sync, Timer} import cats.implicits._ import ch.epfl.bluebrain.nexus.cli._ import ch.epfl.bluebrain.nexus.cli.config.influx.InfluxConfig import ch.epfl.bluebrain.nexus.cli.config.{AppConfig, EnvConfig} import io.circe.Json import org.http4s.client.Client import org.http4s.{Method, Request, UrlForm} trait InfluxClient[F[_]] { final def apply[F[_]: Sync: Timer]( client: Client[F], config: AppConfig, console: Console[F] ): InfluxClient[F] = { implicit val c: Console[F] = console new LiveInfluxDbClient[F](client, config.influx, config.env) } }
Example 58
Source File: AbstractCommand.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.cli import cats.Parallel import cats.effect.{ConcurrentEffect, ContextShift, Resource, Timer} import cats.implicits._ import ch.epfl.bluebrain.nexus.cli.CliOpts._ import ch.epfl.bluebrain.nexus.cli.config.AppConfig import ch.epfl.bluebrain.nexus.cli.modules.config.ConfigModule import ch.epfl.bluebrain.nexus.cli.modules.influx.InfluxModule import ch.epfl.bluebrain.nexus.cli.modules.postgres.PostgresModule import com.monovore.decline.Opts import distage.{Injector, TagK} import izumi.distage.model.Locator import izumi.distage.model.definition.StandardAxis.Repo import izumi.distage.model.definition.{Activation, Module, ModuleDef} import izumi.distage.model.plan.Roots import izumi.distage.model.recursive.LocatorRef abstract class AbstractCommand[F[_]: TagK: Timer: ContextShift: Parallel](locatorOpt: Option[LocatorRef])(implicit F: ConcurrentEffect[F] ) { protected def locatorResource: Opts[Resource[F, Locator]] = locatorOpt match { case Some(value) => Opts(Resource.make(F.delay(value.get))(_ => F.unit)) case None => (envConfig.orNone, postgresConfig.orNone, influxConfig.orNone, token.orNone).mapN { case (e, p, i, t) => val res: Resource[F, Module] = Resource.make({ AppConfig.load[F](e, p, i, t).flatMap[Module] { case Left(err) => F.raiseError(err) case Right(value) => val effects = EffectModule[F] val cli = CliModule[F] val config = ConfigModule[F] val postgres = PostgresModule[F] val influx = InfluxModule[F] val modules = effects ++ cli ++ config ++ postgres ++ influx ++ new ModuleDef { make[AppConfig].from(value) } F.pure(modules) } })(_ => F.unit) res.flatMap { modules => Injector(Activation(Repo -> Repo.Prod)).produceF[F](modules, Roots.Everything).toCats } } } }
Example 59
Source File: InfluxDocker.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.cli.influx import cats.effect.{ConcurrentEffect, ContextShift, Timer} import distage.TagK import izumi.distage.docker.Docker.{ContainerConfig, DockerPort} import izumi.distage.docker.modules.DockerSupportModule import izumi.distage.docker.{ContainerDef, Docker} import izumi.distage.model.definition.ModuleDef import org.http4s.Uri object InfluxDocker extends ContainerDef { val primaryPort: DockerPort = DockerPort.TCP(8086) override def config: InfluxDocker.Config = ContainerConfig( image = "library/influxdb:1.8.0", ports = Seq(primaryPort), env = Map("INFLUXDB_REPORTING_DISABLED" -> "true", "INFLUXDB_HTTP_FLUX_ENABLED" -> "true"), reuse = true ) class Module[F[_]: ConcurrentEffect: ContextShift: Timer: TagK] extends ModuleDef { make[InfluxDocker.Container].fromResource { InfluxDocker.make[F] } make[InfluxHostConfig].from { docker: InfluxDocker.Container => val knownAddress = docker.availablePorts.availablePorts(primaryPort).head InfluxHostConfig(knownAddress.hostV4, knownAddress.port) } // add docker dependencies and override default configuration include(new DockerSupportModule[F] overridenBy new ModuleDef { make[Docker.ClientConfig].from { Docker.ClientConfig( readTimeoutMs = 60000, // long timeout for gh actions connectTimeoutMs = 500, allowReuse = false, useRemote = false, useRegistry = true, remote = None, registry = None ) } }) } final case class InfluxHostConfig(host: String, port: Int) { def endpoint: Uri = Uri.unsafeFromString(s"http://$host:$port") } }
Example 60
Source File: PostgresDocker.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.cli.postgres import cats.effect.{ConcurrentEffect, ContextShift, Timer} import distage.TagK import izumi.distage.docker.Docker.{ContainerConfig, DockerPort} import izumi.distage.docker.modules.DockerSupportModule import izumi.distage.docker.{ContainerDef, Docker} import izumi.distage.model.definition.ModuleDef object PostgresDocker extends ContainerDef { val primaryPort: DockerPort = DockerPort.TCP(5432) val password: String = "postgres" override def config: PostgresDocker.Config = ContainerConfig( image = "library/postgres:12.2", ports = Seq(primaryPort), env = Map("POSTGRES_PASSWORD" -> password), reuse = true ) class Module[F[_]: ConcurrentEffect: ContextShift: Timer: TagK] extends ModuleDef { make[PostgresDocker.Container].fromResource { PostgresDocker.make[F] } make[PostgresHostConfig].from { docker: PostgresDocker.Container => val knownAddress = docker.availablePorts.availablePorts(primaryPort).head PostgresHostConfig(knownAddress.hostV4, knownAddress.port) } // add docker dependencies and override default configuration include(new DockerSupportModule[F] overridenBy new ModuleDef { make[Docker.ClientConfig].from { Docker.ClientConfig( readTimeoutMs = 60000, // long timeout for gh actions connectTimeoutMs = 500, allowReuse = false, useRemote = false, useRegistry = true, remote = None, registry = None ) } }) } final case class PostgresHostConfig(host: String, port: Int) }
Example 61
Source File: TestCliModule.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.cli.dummies import cats.Parallel import cats.effect.{ConcurrentEffect, ContextShift, Timer} import ch.epfl.bluebrain.nexus.cli.clients.{EventStreamClient, ProjectClient, SparqlClient} import ch.epfl.bluebrain.nexus.cli.sse.Event import ch.epfl.bluebrain.nexus.cli.sse.OrgUuid.unsafe._ import ch.epfl.bluebrain.nexus.cli.sse.ProjectUuid.unsafe._ import ch.epfl.bluebrain.nexus.cli.{Cli, Console} import distage.{ModuleDef, TagK} import izumi.distage.model.definition.StandardAxis.Repo import izumi.distage.model.recursive.LocatorRef final class TestCliModule[F[_]: Parallel: ContextShift: Timer: ConcurrentEffect: TagK](events: List[Event]) extends ModuleDef { make[TestConsole[F]].tagged(Repo.Dummy).fromEffect(TestConsole[F]) make[Console[F]].tagged(Repo.Dummy).from { tc: TestConsole[F] => tc } make[ProjectClient[F]] .tagged(Repo.Dummy) .from( new TestProjectClient[F]( // matches the uuids from the events.json file used for testing Map( ( ("e6a84231-5df7-41cf-9d18-286892d119ec", "d576d282-1049-4a0c-9240-ff34b5e879f2"), ("tutorialnexus", "datamodels") ), ( ("a605b71a-377d-4df3-95f8-923149d04106", "a7d69059-8d1d-4dac-800f-90b6b6ab94ee"), ("bbp", "atlas") ) ) ) ) make[SparqlClient[F]].tagged(Repo.Dummy).fromEffect { TestSparqlClient[F](events) } make[EventStreamClient[F]].tagged(Repo.Dummy).from { pc: ProjectClient[F] => new TestEventStreamClient[F](events, pc) } make[Cli[F]].tagged(Repo.Dummy).from { locatorRef: LocatorRef => new Cli[F](Some(locatorRef)) } } object TestCliModule { final def apply[F[_]: Parallel: ContextShift: Timer: ConcurrentEffect: TagK](events: List[Event]): TestCliModule[F] = new TestCliModule[F](events) }
Example 62
Source File: ProcessWirings.scala From ticket-booking-aecor with Apache License 2.0 | 5 votes |
package ru.pavkin.booking import aecor.data._ import aecor.distributedprocessing.DistributedProcessing import aecor.journal.postgres.Offset import akka.actor.ActorSystem import cats.effect.{ Clock, ConcurrentEffect, Timer } import cats.implicits._ import cats.temp.par._ import io.chrisdavenport.log4cats.slf4j.Slf4jLogger import ru.pavkin.booking.booking.entity.{ BookingEvent, EventMetadata, EventsourcedBooking } import ru.pavkin.booking.booking.process.{ BookingPaymentProcess, _ } import ru.pavkin.booking.booking.view.BookingViewProjectionWiring import ru.pavkin.booking.common.models.BookingKey import scala.concurrent.duration._ final class ProcessWirings[F[_]: Timer: ConcurrentEffect: Par](system: ActorSystem, clock: Clock[F], postgresWirings: PostgresWirings[F], kafkaWirings: KafkaWirings[F], serviceWirings: ServiceWirings[F], entityWirings: EntityWirings[F]) { import serviceWirings._ import postgresWirings._ import kafkaWirings._ import entityWirings._ val distributedProcessing = DistributedProcessing(system) val bookingQueries = bookingsJournal.queries(journals.booking.pollingInterval).withOffsetStore(offsetStore) def bookingEvents( eventTag: EventTag, consumerId: ConsumerId ): fs2.Stream[F, Committable[F, (Offset, EntityEvent[BookingKey, Enriched[EventMetadata, BookingEvent]])]] = fs2.Stream.force(bookingQueries.eventsByTag(eventTag, consumerId)) val bookingViewProjection = new BookingViewProjectionWiring( bookingViewRepo, bookingEvents(_, _).map(_.map(_._2)), EventsourcedBooking.tagging ) val bookingConfirmationProcess = new BookingConfirmationProcess( bookings, confirmationService, Slf4jLogger.unsafeFromName("BookingConfirmationProcess") ) val bookingConfirmationProcessWiring = new BookingConfirmationProcessWiring( bookingEvents(_, _).map(_.map(_._2.map(_.event))), EventsourcedBooking.tagging, bookingConfirmationProcess ) val bookingExpirationProcess = new BookingExpirationProcess(bookings, bookingViewRepo) val bookingExpirationProcessWiring = new BookingExpirationProcessWiring(clock, frequency = 30.seconds, bookingExpirationProcess) val bookingPaymentProcess = new BookingPaymentProcess(bookings, Slf4jLogger.unsafeFromName("BookingPaymentProcess")) val bookingPaymentProcessWiring = new BookingPaymentProcessWiring(paymentReceivedEventStream, bookingPaymentProcess) // Launcher val launchProcesses: F[List[DistributedProcessing.KillSwitch[F]]] = List( "BookingViewProjectionProcessing" -> bookingViewProjection.processes, "BookingConfirmationProcessing" -> bookingConfirmationProcessWiring.processes, "BookingExpirationProcessing" -> bookingExpirationProcessWiring.processes, "BookingPaymentProcessing" -> bookingPaymentProcessWiring.processes ).parTraverse { case (name, processes) => distributedProcessing.start(name, processes) } }
Example 63
Source File: TimedOut.scala From ticket-booking-aecor with Apache License 2.0 | 5 votes |
package ru.pavkin.booking.common.effect import cats.effect.{Concurrent, Timer} import cats.~> import scala.concurrent.TimeoutException import scala.concurrent.duration.FiniteDuration object TimedOut { def apply[F[_]](timeout: FiniteDuration)(implicit timer: Timer[F], F: Concurrent[F]): F ~> F = new (F ~> F) { def apply[A](fa: F[A]): F[A] = Concurrent.timeoutTo( fa, timeout, F.raiseError(new TimeoutException(s"Call timed out after $timeout")) ) } }
Example 64
Source File: BookingPaymentProcessWiring.scala From ticket-booking-aecor with Apache License 2.0 | 5 votes |
package ru.pavkin.booking.booking.process import aecor.data.{ Committable, ConsumerId } import aecor.distributedprocessing.DistributedProcessing import cats.effect.{ ConcurrentEffect, Timer } import ru.pavkin.booking.common.streaming.Fs2Process import ru.pavkin.payment.event.PaymentReceived import cats.syntax.all._ class BookingPaymentProcessWiring[F[_]: ConcurrentEffect: Timer]( source: ConsumerId => fs2.Stream[F, Committable[F, PaymentReceived]], process: PaymentReceived => F[Unit] ) { val consumerId = ConsumerId("BookingPaymentProcess") val processStream: fs2.Stream[F, Unit] = source(consumerId).evalMap(c => process(c.value) >> c.commit) // Topic has 4 partitions, so we can run up to 4 processes in our cluster def processes: List[DistributedProcessing.Process[F]] = List.fill(4)(Fs2Process(processStream)) }
Example 65
Source File: BookingExpirationProcessWiring.scala From ticket-booking-aecor with Apache License 2.0 | 5 votes |
package ru.pavkin.booking.booking.process import java.time.Instant import java.util.concurrent.TimeUnit import aecor.distributedprocessing.DistributedProcessing import cats.effect.{Clock, ConcurrentEffect, Timer} import cats.implicits._ import ru.pavkin.booking.common.streaming.Fs2Process import scala.concurrent.duration.FiniteDuration class BookingExpirationProcessWiring[F[_]: ConcurrentEffect: Timer](clock: Clock[F], frequency: FiniteDuration, process: Instant => F[Unit]) { val processStream: fs2.Stream[F, Unit] = fs2.Stream .fixedDelay[F](frequency) .evalMap(_ => clock.realTime(TimeUnit.MILLISECONDS).map(Instant.ofEpochMilli)) .evalMap(process) def processes: List[DistributedProcessing.Process[F]] = List(Fs2Process(processStream)) }
Example 66
Source File: EndpointWirings.scala From ticket-booking-aecor with Apache License 2.0 | 5 votes |
package ru.pavkin.booking import cats.effect.{ConcurrentEffect, Timer} import org.http4s.HttpRoutes import org.http4s.implicits._ import org.http4s.server.Router import org.http4s.server.blaze.BlazeServerBuilder import ru.pavkin.booking.booking.endpoint.{BookingRoutes, DefaultBookingEndpoint} import ru.pavkin.booking.config.HttpServer import scala.concurrent.duration.{Duration => _} final class EndpointWirings[F[_] : ConcurrentEffect : Timer]( httpServer: HttpServer, postgresWirings: PostgresWirings[F], entityWirings: EntityWirings[F]) { import entityWirings._ import postgresWirings._ val bookingsEndpoint = new DefaultBookingEndpoint(bookings, bookingViewRepo) val bookingRoutes = new BookingRoutes(bookingsEndpoint) val routes: HttpRoutes[F] = bookingRoutes.routes def launchHttpService: F[Unit] = BlazeServerBuilder[F] .bindHttp(httpServer.port, httpServer.interface) .withHttpApp(Router("/" -> routes).orNotFound) .serve .compile .drain }
Example 67
Source File: StaticLoggerBinder.scala From odin with Apache License 2.0 | 5 votes |
package org.slf4j.impl import cats.effect.{ConcurrentEffect, ContextShift, IO, Timer} import io.odin._ import io.odin.slf4j.{BufferingLogger, OdinLoggerBinder} import scala.concurrent.ExecutionContext class StaticLoggerBinder extends OdinLoggerBinder[IO] { val ec: ExecutionContext = scala.concurrent.ExecutionContext.global implicit val timer: Timer[IO] = IO.timer(ec) implicit val cs: ContextShift[IO] = IO.contextShift(ec) implicit val F: ConcurrentEffect[IO] = IO.ioConcurrentEffect val loggers: PartialFunction[String, Logger[IO]] = { case Level.Trace.toString => new BufferingLogger[IO](Level.Trace) case Level.Debug.toString => new BufferingLogger[IO](Level.Debug) case Level.Info.toString => new BufferingLogger[IO](Level.Info) case Level.Warn.toString => new BufferingLogger[IO](Level.Warn) case Level.Error.toString => new BufferingLogger[IO](Level.Error) case _ => new BufferingLogger[IO](Level.Trace) } } object StaticLoggerBinder extends StaticLoggerBinder { var REQUESTED_API_VERSION: String = "1.7" def getSingleton: StaticLoggerBinder = this }
Example 68
Source File: ConditionalLogger.scala From odin with Apache License 2.0 | 5 votes |
package io.odin.extras.loggers import cats.MonadError import cats.effect.{Concurrent, ContextShift, ExitCase, Resource, Timer} import cats.syntax.applicativeError._ import cats.syntax.flatMap._ import cats.syntax.functor._ import cats.syntax.order._ import io.odin.loggers.DefaultLogger import io.odin.{Level, Logger, LoggerMessage} import monix.catnap.ConcurrentQueue import monix.execution.{BufferCapacity, ChannelType} final case class ConditionalLogger[F[_]: Timer] private ( queue: ConcurrentQueue[F, LoggerMessage], inner: Logger[F], override val minLevel: Level )(implicit F: MonadError[F, Throwable]) extends DefaultLogger[F](minLevel) { def log(msg: LoggerMessage): F[Unit] = queue.tryOffer(msg).void private def drain(exitCase: ExitCase[Throwable]): F[Unit] = { val level = exitCase match { case ExitCase.Completed => inner.minLevel case _ => minLevel } queue .drain(0, Int.MaxValue) .flatMap(msgs => inner.log(msgs.filter(_.level >= level).toList)) .attempt .void } } object ConditionalLogger { def create[F[_]: Timer: Concurrent: ContextShift]( inner: Logger[F], minLevelOnError: Level, maxBufferSize: Option[Int] ): Resource[F, Logger[F]] = { val queueCapacity = maxBufferSize match { case Some(value) => BufferCapacity.Bounded(value) case None => BufferCapacity.Unbounded() } def acquire: F[ConditionalLogger[F]] = for { queue <- ConcurrentQueue.withConfig[F, LoggerMessage](queueCapacity, ChannelType.MPSC) } yield ConditionalLogger(queue, inner, minLevelOnError) def release(logger: ConditionalLogger[F], exitCase: ExitCase[Throwable]): F[Unit] = logger.drain(exitCase) Resource.makeCase(acquire)(release).widen } }
Example 69
Source File: EnclosureRouting.scala From odin with Apache License 2.0 | 5 votes |
package io.odin.config import cats.Monad import cats.effect.Timer import cats.instances.list._ import cats.instances.map._ import cats.syntax.all._ import io.odin.loggers.DefaultLogger import io.odin.{Logger, LoggerMessage} import scala.annotation.tailrec private[config] class EnclosureRouting[F[_]: Timer](fallback: Logger[F], router: List[(String, Logger[F])])( implicit F: Monad[F] ) extends DefaultLogger { private val indexedRouter = router.mapWithIndex { case ((packageName, logger), idx) => (packageName, (idx, logger)) } def log(msg: LoggerMessage): F[Unit] = recLog(indexedRouter, msg) override def log(msgs: List[LoggerMessage]): F[Unit] = { msgs .map { msg => indexedRouter .collectFirst { case (key, indexedLogger) if msg.position.enclosureName.startsWith(key) => indexedLogger } .getOrElse(-1 -> fallback) -> List(msg) } .foldLeft(Map.empty[(Int, Logger[F]), List[LoggerMessage]]) { case (map, kv) => map |+| Map(kv) } .toList .traverse_ { case ((_, logger), ms) => logger.log(ms.filter(_.level >= logger.minLevel)) } } @tailrec private def recLog(router: List[(String, (Int, Logger[F]))], msg: LoggerMessage): F[Unit] = router match { case Nil => if (msg.level >= fallback.minLevel) fallback.log(msg) else F.unit case (key, (_, logger)) :: _ if msg.position.enclosureName.startsWith(key) && msg.level >= logger.minLevel => logger.log(msg) case _ :: tail => recLog(tail, msg) } }
Example 70
Source File: package.scala From odin with Apache License 2.0 | 5 votes |
package io.odin import java.time.LocalDateTime import cats.Monad import cats.effect.Timer import cats.instances.list._ import cats.syntax.all._ import io.odin.internal.StringContextLength import io.odin.loggers.DefaultLogger import scala.annotation.tailrec package object config extends FileNamePatternSyntax { def levelRouting[F[_]: Timer: Monad](router: Map[Level, Logger[F]]): DefaultBuilder[F] = new DefaultBuilder[F]({ default: Logger[F] => new DefaultLogger[F]() { def log(msg: LoggerMessage): F[Unit] = router.getOrElse(msg.level, default).log(msg) override def log(msgs: List[LoggerMessage]): F[Unit] = { msgs.groupBy(_.level).toList.traverse_ { case (level, msgs) => router.getOrElse(level, default).log(msgs) } } } }) implicit class FileNamePatternInterpolator(private val sc: StringContext) extends AnyVal { def file(ps: FileNamePattern*): LocalDateTime => String = { StringContextLength.checkLength(sc, ps) dt => { @tailrec def rec(args: List[FileNamePattern], parts: List[String], acc: StringBuilder): String = { args match { case Nil => acc.append(parts.head).toString() case head :: tail => rec(tail, parts.tail, acc.append(parts.head).append(head.extract(dt))) } } rec(ps.toList, sc.parts.toList, new StringBuilder()) } } } implicit def str2fileNamePattern(str: String): FileNamePattern = { Value(str) } }
Example 71
Source File: ConsoleLogger.scala From odin with Apache License 2.0 | 5 votes |
package io.odin.loggers import java.io.PrintStream import cats.effect.{Sync, Timer} import cats.syntax.all._ import io.odin.formatter.Formatter import io.odin.{Level, Logger, LoggerMessage} case class ConsoleLogger[F[_]: Timer]( formatter: Formatter, out: PrintStream, err: PrintStream, override val minLevel: Level )(implicit F: Sync[F]) extends DefaultLogger[F](minLevel) { private def println(out: PrintStream, msg: LoggerMessage, formatter: Formatter): F[Unit] = F.delay(out.println(formatter.format(msg))) def log(msg: LoggerMessage): F[Unit] = if (msg.level < Level.Warn) { println(out, msg, formatter) } else { println(err, msg, formatter) } } object ConsoleLogger { def apply[F[_]: Timer: Sync](formatter: Formatter, minLevel: Level): Logger[F] = ConsoleLogger(formatter, scala.Console.out, scala.Console.err, minLevel) }
Example 72
Source File: FileLogger.scala From odin with Apache License 2.0 | 5 votes |
package io.odin.loggers import java.io.BufferedWriter import java.nio.file.{Files, Paths} import cats.effect.syntax.all._ import cats.effect.{Resource, Sync, Timer} import cats.instances.list._ import cats.syntax.all._ import io.odin.formatter.Formatter import io.odin.{Level, Logger, LoggerMessage} case class FileLogger[F[_]: Timer](buffer: BufferedWriter, formatter: Formatter, override val minLevel: Level)( implicit F: Sync[F] ) extends DefaultLogger[F](minLevel) { def log(msg: LoggerMessage): F[Unit] = write(msg, formatter).guarantee(flush) override def log(msgs: List[LoggerMessage]): F[Unit] = msgs.traverse(write(_, formatter)).void.guarantee(flush) private def write(msg: LoggerMessage, formatter: Formatter): F[Unit] = F.delay { buffer.write(formatter.format(msg) + System.lineSeparator()) } private def flush: F[Unit] = F.delay(buffer.flush()).handleErrorWith(_ => F.unit) } object FileLogger { def apply[F[_]: Timer](fileName: String, formatter: Formatter, minLevel: Level)( implicit F: Sync[F] ): Resource[F, Logger[F]] = { def mkBuffer: F[BufferedWriter] = F.delay(Files.newBufferedWriter(Paths.get(fileName))) def closeBuffer(buffer: BufferedWriter): F[Unit] = F.delay(buffer.close()).handleErrorWith(_ => F.unit) Resource.make(mkBuffer)(closeBuffer).map { buffer => FileLogger(buffer, formatter, minLevel) } } }
Example 73
Source File: ConstContextLogger.scala From odin with Apache License 2.0 | 5 votes |
package io.odin.loggers import cats.Monad import cats.effect.Timer import io.odin.{Logger, LoggerMessage} case class ConstContextLogger[F[_]: Timer: Monad](ctx: Map[String, String], inner: Logger[F]) extends DefaultLogger(inner.minLevel) { def log(msg: LoggerMessage): F[Unit] = inner.log(msg.copy(context = msg.context ++ ctx)) override def log(msgs: List[LoggerMessage]): F[Unit] = inner.log(msgs.map(msg => msg.copy(context = msg.context ++ ctx))) } object ConstContextLogger { def withConstContext[F[_]: Timer: Monad](ctx: Map[String, String], inner: Logger[F]): Logger[F] = ConstContextLogger(ctx, inner) }
Example 74
Source File: OdinSpec.scala From odin with Apache License 2.0 | 5 votes |
package io.odin import java.time.LocalDateTime import cats.effect.{Clock, Timer} import cats.{Applicative, Eval} import io.odin.formatter.Formatter import io.odin.meta.Position import org.scalacheck.{Arbitrary, Cogen, Gen} import org.scalatest.flatspec.AnyFlatSpec import org.scalatest.matchers.should.Matchers import org.scalatestplus.scalacheck.{Checkers, ScalaCheckDrivenPropertyChecks} import org.typelevel.discipline.Laws import scala.concurrent.duration.{FiniteDuration, TimeUnit} trait OdinSpec extends AnyFlatSpec with Matchers with Checkers with ScalaCheckDrivenPropertyChecks with EqInstances { def checkAll(name: String, ruleSet: Laws#RuleSet): Unit = { for ((id, prop) <- ruleSet.all.properties) it should (name + "." + id) in { check(prop) } } def zeroTimer[F[_]](implicit F: Applicative[F]): Timer[F] = new Timer[F] { def clock: Clock[F] = new Clock[F] { def realTime(unit: TimeUnit): F[Long] = F.pure(0L) def monotonic(unit: TimeUnit): F[Long] = F.pure(0L) } def sleep(duration: FiniteDuration): F[Unit] = ??? } val lineSeparator: String = System.lineSeparator() val nonEmptyStringGen: Gen[String] = Gen.nonEmptyListOf(Gen.alphaNumChar).map(_.mkString) val levelGen: Gen[Level] = Gen.oneOf(Level.Trace, Level.Debug, Level.Info, Level.Warn, Level.Error) implicit val levelArbitrary: Arbitrary[Level] = Arbitrary(levelGen) val positionGen: Gen[Position] = for { fileName <- nonEmptyStringGen enclosureName <- Gen.uuid.map(_.toString) packageName <- nonEmptyStringGen line <- Gen.posNum[Int] } yield { Position(fileName, enclosureName, packageName, line) } implicit val positionArbitrary: Arbitrary[Position] = Arbitrary(positionGen) val loggerMessageGen: Gen[LoggerMessage] = { val startTime = System.currentTimeMillis() for { level <- levelGen msg <- Gen.alphaNumStr context <- Gen.mapOfN(20, nonEmptyStringGen.flatMap(key => nonEmptyStringGen.map(key -> _))) exception <- Gen.option(Arbitrary.arbitrary[Throwable]) position <- positionGen threadName <- nonEmptyStringGen timestamp <- Gen.choose(0, startTime) } yield { LoggerMessage( level = level, message = Eval.now(msg), context = context, exception = exception, position = position, threadName = threadName, timestamp = timestamp ) } } implicit val loggerMessageArbitrary: Arbitrary[LoggerMessage] = Arbitrary(loggerMessageGen) implicit val cogenLoggerMessage: Cogen[LoggerMessage] = Cogen[LoggerMessage]((msg: LoggerMessage) => msg.level.hashCode().toLong + msg.message.value.hashCode().toLong) val formatterGen: Gen[Formatter] = Gen.oneOf(Formatter.default, Formatter.colorful) implicit val formatterArbitrary: Arbitrary[Formatter] = Arbitrary(formatterGen) val localDateTimeGen: Gen[LocalDateTime] = for { year <- Gen.choose(0, LocalDateTime.now().getYear) month <- Gen.choose(1, 12) day <- Gen.choose(1, 28) hour <- Gen.choose(0, 23) minute <- Gen.choose(0, 59) second <- Gen.choose(0, 59) } yield { LocalDateTime.of(year, month, day, hour, minute, second) } implicit val localDateTimeArbitrary: Arbitrary[LocalDateTime] = Arbitrary(localDateTimeGen) }
Example 75
Source File: ContextualLoggerSpec.scala From odin with Apache License 2.0 | 5 votes |
package io.odin.loggers import cats.arrow.FunctionK import cats.data.{ReaderT, WriterT} import cats.effect.{Clock, IO, Timer} import cats.instances.list._ import cats.mtl.instances.all._ import io.odin.syntax._ import io.odin.{LoggerMessage, OdinSpec} import scala.concurrent.duration.{FiniteDuration, TimeUnit} class ContextualLoggerSpec extends OdinSpec { type W[A] = WriterT[IO, List[LoggerMessage], A] type F[A] = ReaderT[W, Map[String, String], A] implicit val hasContext: HasContext[Map[String, String]] = (env: Map[String, String]) => env implicit val timer: Timer[IO] = new Timer[IO] { def clock: Clock[IO] = new Clock[IO] { def realTime(unit: TimeUnit): IO[Long] = IO.pure(0) def monotonic(unit: TimeUnit): IO[Long] = IO.pure(0) } def sleep(duration: FiniteDuration): IO[Unit] = ??? } private val logger = new WriterTLogger[IO].mapK(λ[FunctionK[W, F]](ReaderT.liftF(_))).withContext checkAll("ContContextLogger", LoggerTests[F](logger, reader => reader.run(Map()).written.unsafeRunSync()).all) it should "pick up context from F[_]" in { forAll { (loggerMessage: LoggerMessage, ctx: Map[String, String]) => val List(written) = logger.log(loggerMessage).apply(ctx).written.unsafeRunSync() written.context shouldBe loggerMessage.context ++ ctx } } it should "embed context in all messages" in { forAll { (msgs: List[LoggerMessage], ctx: Map[String, String]) => val written = logger.log(msgs).apply(ctx).written.unsafeRunSync() written.map(_.context) shouldBe msgs.map(_.context ++ ctx) } } }
Example 76
Source File: WriterTLoggerSpec.scala From odin with Apache License 2.0 | 5 votes |
package io.odin.loggers import cats.Id import cats.data.Writer import cats.effect.Timer import cats.instances.list._ import io.odin.{LoggerMessage, OdinSpec} class WriterTLoggerSpec extends OdinSpec { type F[A] = Writer[List[LoggerMessage], A] implicit val timer: Timer[Id] = zeroTimer checkAll( "WriterTLogger", LoggerTests[F](new WriterTLogger[Id], _.written).all ) it should "write log into list" in { val logger = new WriterTLogger[Id]() forAll { msg: LoggerMessage => logger.log(msg).written shouldBe List(msg) } } it should "write all the logs into list" in { val logger = new WriterTLogger[Id]() forAll { msgs: List[LoggerMessage] => logger.log(msgs).written shouldBe msgs } } }
Example 77
Source File: ConstContextLoggerSpec.scala From odin with Apache License 2.0 | 5 votes |
package io.odin.loggers import cats.data.WriterT import cats.effect.{IO, Timer} import cats.instances.list._ import io.odin.syntax._ import io.odin.{LoggerMessage, OdinSpec} class ConstContextLoggerSpec extends OdinSpec { type F[A] = WriterT[IO, List[LoggerMessage], A] implicit val timer: Timer[IO] = zeroTimer checkAll( "ContextualLogger", LoggerTests[F]( new WriterTLogger[IO].withConstContext(Map.empty), _.written.unsafeRunSync() ).all ) it should "add constant context to the record" in { forAll { (loggerMessage: LoggerMessage, ctx: Map[String, String]) => val logger = new WriterTLogger[IO].withConstContext(ctx) val List(written) = logger.log(loggerMessage).written.unsafeRunSync() written.context shouldBe loggerMessage.context ++ ctx } } it should "add constant context to the records" in { forAll { (messages: List[LoggerMessage], ctx: Map[String, String]) => val logger = new WriterTLogger[IO].withConstContext(ctx) val written = logger.log(messages).written.unsafeRunSync() written.map(_.context) shouldBe messages.map(_.context ++ ctx) } } }
Example 78
Source File: FilterLoggerSpec.scala From odin with Apache License 2.0 | 5 votes |
package io.odin.loggers import cats.data.WriterT import cats.effect.{IO, Timer} import io.odin._ import io.odin.syntax._ import cats.instances.list._ import cats.syntax.all._ class FilterLoggerSpec extends OdinSpec { type F[A] = WriterT[IO, List[LoggerMessage], A] implicit val timer: Timer[IO] = zeroTimer checkAll( "FilterLogger", LoggerTests[F](new WriterTLogger[IO].filter(_.exception.isDefined), _.written.unsafeRunSync()).all ) it should "logger.filter(p).log(msg) <-> F.whenA(p)(log(msg))" in { forAll { (msgs: List[LoggerMessage], p: LoggerMessage => Boolean) => { val logger = new WriterTLogger[IO].filter(p) val written = msgs.traverse(logger.log).written.unsafeRunSync() val batchWritten = logger.log(msgs).written.unsafeRunSync() written shouldBe msgs.filter(p) batchWritten shouldBe written } } } }
Example 79
Source File: ConsoleLoggerSpec.scala From odin with Apache License 2.0 | 5 votes |
package io.odin.loggers import java.io.{ByteArrayOutputStream, PrintStream} import cats.effect.{IO, Timer} import cats.syntax.all._ import io.odin.Level._ import io.odin.formatter.Formatter import io.odin.{Level, LoggerMessage, OdinSpec} class ConsoleLoggerSpec extends OdinSpec { implicit val timer: Timer[IO] = IO.timer(scala.concurrent.ExecutionContext.global) it should "route all messages with level <= INFO to stdout" in { forAll { (loggerMessage: LoggerMessage, formatter: Formatter) => whenever(loggerMessage.level <= Info) { val outBaos = new ByteArrayOutputStream() val stdOut = new PrintStream(outBaos) val errBaos = new ByteArrayOutputStream() val stdErr = new PrintStream(errBaos) val consoleLogger = ConsoleLogger[IO](formatter, stdOut, stdErr, Level.Trace) consoleLogger.log(loggerMessage).unsafeRunSync() outBaos.toString() shouldBe (formatter.format(loggerMessage) + System.lineSeparator()) } } } it should "route all messages with level >= WARN to stderr" in { forAll { (loggerMessage: LoggerMessage, formatter: Formatter) => whenever(loggerMessage.level > Info) { val outBaos = new ByteArrayOutputStream() val stdOut = new PrintStream(outBaos) val errBaos = new ByteArrayOutputStream() val stdErr = new PrintStream(errBaos) val consoleLogger = ConsoleLogger[IO](formatter, stdOut, stdErr, Level.Trace) consoleLogger.log(loggerMessage).unsafeRunSync() errBaos.toString() shouldBe (formatter.format(loggerMessage) + System.lineSeparator()) } } } }
Example 80
Source File: LoggerNatTransformSpec.scala From odin with Apache License 2.0 | 5 votes |
package io.odin.loggers import cats.data.{Writer, WriterT} import cats.effect.{Clock, IO, Timer} import cats.{~>, Id} import io.odin.{Level, Logger, LoggerMessage, OdinSpec} import scala.concurrent.duration.{FiniteDuration, TimeUnit} class LoggerNatTransformSpec extends OdinSpec { type F[A] = Writer[List[LoggerMessage], A] type FF[A] = WriterT[IO, List[LoggerMessage], A] it should "transform each method" in { forAll { (msg: String, ctx: Map[String, String], throwable: Throwable, timestamp: Long) => implicit val clk: Timer[Id] = clock(timestamp) val logF = logger.withMinimalLevel(Level.Trace) val logFF = logF.mapK(nat).withMinimalLevel(Level.Trace) check(logF.trace(msg), logFF.trace(msg)) check(logF.trace(msg, throwable), logFF.trace(msg, throwable)) check(logF.trace(msg, ctx), logFF.trace(msg, ctx)) check(logF.trace(msg, ctx, throwable), logFF.trace(msg, ctx, throwable)) check(logF.debug(msg), logFF.debug(msg)) check(logF.debug(msg, throwable), logFF.debug(msg, throwable)) check(logF.debug(msg, ctx), logFF.debug(msg, ctx)) check(logF.debug(msg, ctx, throwable), logFF.debug(msg, ctx, throwable)) check(logF.info(msg), logFF.info(msg)) check(logF.info(msg, throwable), logFF.info(msg, throwable)) check(logF.info(msg, ctx), logFF.info(msg, ctx)) check(logF.info(msg, ctx, throwable), logFF.info(msg, ctx, throwable)) check(logF.warn(msg), logFF.warn(msg)) check(logF.warn(msg, throwable), logFF.warn(msg, throwable)) check(logF.warn(msg, ctx), logFF.warn(msg, ctx)) check(logF.warn(msg, ctx, throwable), logFF.warn(msg, ctx, throwable)) check(logF.error(msg), logFF.error(msg)) check(logF.error(msg, throwable), logFF.error(msg, throwable)) check(logF.error(msg, ctx), logFF.error(msg, ctx)) check(logF.error(msg, ctx, throwable), logFF.error(msg, ctx, throwable)) } } private val nat: F ~> FF = new (F ~> FF) { private val idToIo = new (Id ~> IO) { def apply[A](fa: Id[A]): IO[A] = IO.pure(fa) } def apply[A](fa: F[A]): FF[A] = fa.mapK(idToIo) } private def clock(timestamp: Long): Timer[Id] = new Timer[Id] { def clock: Clock[Id] = new Clock[Id] { def realTime(unit: TimeUnit): Id[Long] = timestamp def monotonic(unit: TimeUnit): Id[Long] = timestamp } def sleep(duration: FiniteDuration): Id[Unit] = ??? } private def logger(implicit timer: Timer[Id]): Logger[F] = new WriterTLogger[Id] private def check(fnF: => F[Unit], fnFF: => FF[Unit]) = { val List(loggerMessageF) = fnF.written val List(loggerMessageFF) = fnFF.written.unsafeRunSync() loggerMessageEq.eqv(loggerMessageF, loggerMessageFF) shouldBe true } }
Example 81
Source File: LoggerMonoidSpec.scala From odin with Apache License 2.0 | 5 votes |
package io.odin.loggers import java.util.UUID import cats.data.WriterT import cats.effect.{Clock, IO, Timer} import cats.instances.list._ import cats.instances.tuple._ import cats.instances.unit._ import cats.instances.uuid._ import cats.kernel.laws.discipline.MonoidTests import cats.syntax.all._ import io.odin.{Level, Logger, LoggerMessage, OdinSpec} import org.scalacheck.{Arbitrary, Gen} import scala.concurrent.duration.{FiniteDuration, TimeUnit} class LoggerMonoidSpec extends OdinSpec { type F[A] = WriterT[IO, List[(UUID, LoggerMessage)], A] checkAll("Logger", MonoidTests[Logger[F]].monoid) it should "(logger1 |+| logger2).log <-> (logger1.log |+| logger2.log)" in { forAll { (uuid1: UUID, uuid2: UUID, msg: LoggerMessage) => val logger1: Logger[F] = NamedLogger(uuid1) val logger2: Logger[F] = NamedLogger(uuid2) val a = (logger1 |+| logger2).log(msg) val b = logger1.log(msg) |+| logger2.log(msg) a.written.unsafeRunSync() shouldBe b.written.unsafeRunSync() } } it should "(logger1 |+| logger2).log(list) <-> (logger1.log |+| logger2.log(list))" in { forAll { (uuid1: UUID, uuid2: UUID, msg: List[LoggerMessage]) => val logger1: Logger[F] = NamedLogger(uuid1) val logger2: Logger[F] = NamedLogger(uuid2) val a = (logger1 |+| logger2).log(msg) val b = logger1.log(msg) |+| logger2.log(msg) a.written.unsafeRunSync() shouldBe b.written.unsafeRunSync() } } it should "set minimal level for underlying loggers" in { forAll { (uuid1: UUID, uuid2: UUID, level: Level, msg: List[LoggerMessage]) => val logger1: Logger[F] = NamedLogger(uuid1) val logger2: Logger[F] = NamedLogger(uuid2) val a = (logger1 |+| logger2).withMinimalLevel(level).log(msg) val b = (logger1.withMinimalLevel(level) |+| logger2.withMinimalLevel(level)).log(msg) a.written.unsafeRunSync() shouldBe b.written.unsafeRunSync() } } case class NamedLogger(loggerId: UUID) extends DefaultLogger[F] { def log(msg: LoggerMessage): F[Unit] = WriterT.tell(List(loggerId -> msg)) } implicit def timer: Timer[IO] = new Timer[IO] { def clock: Clock[IO] = new Clock[IO] { def realTime(unit: TimeUnit): IO[Long] = IO.pure(0) def monotonic(unit: TimeUnit): IO[Long] = IO.pure(0) } def sleep(duration: FiniteDuration): IO[Unit] = ??? } implicit def arbitraryWriterLogger: Arbitrary[Logger[F]] = Arbitrary( Gen.uuid.map(NamedLogger) ) }
Example 82
Source File: ContramapLoggerSpec.scala From odin with Apache License 2.0 | 5 votes |
package io.odin.loggers import cats.data.WriterT import cats.effect.{IO, Timer} import cats.instances.list._ import cats.syntax.all._ import io.odin.{LoggerMessage, OdinSpec} import io.odin.syntax._ class ContramapLoggerSpec extends OdinSpec { type F[A] = WriterT[IO, List[LoggerMessage], A] implicit val timer: Timer[IO] = zeroTimer checkAll("ContramapLogger", LoggerTests[F](new WriterTLogger[IO].contramap(identity), _.written.unsafeRunSync()).all) it should "contramap(identity).log(msg) <-> log(msg)" in { val logger = new WriterTLogger[IO].contramap(identity) forAll { msgs: List[LoggerMessage] => val written = msgs.traverse(logger.log).written.unsafeRunSync() val batchWritten = logger.log(msgs).written.unsafeRunSync() written shouldBe msgs batchWritten shouldBe written } } it should "contramap(f).log(msg) <-> log(f(msg))" in { forAll { (msgs: List[LoggerMessage], fn: LoggerMessage => LoggerMessage) => val logger = new WriterTLogger[IO].contramap(fn) val written = msgs.traverse(logger.log).written.unsafeRunSync() val batchWritten = logger.log(msgs).written.unsafeRunSync() written shouldBe msgs.map(fn) batchWritten shouldBe written } } }
Example 83
Source File: IOSuite.scala From skafka with MIT License | 5 votes |
package com.evolutiongaming.skafka import cats.Parallel import cats.effect.{Clock, Concurrent, ContextShift, IO, Timer} import cats.implicits._ import com.evolutiongaming.catshelper.FromFuture import com.evolutiongaming.smetrics.MeasureDuration import org.scalatest.Succeeded import scala.concurrent.duration._ import scala.concurrent.{ExecutionContext, ExecutionContextExecutor, Future} object IOSuite { val Timeout: FiniteDuration = 10.seconds implicit val executor: ExecutionContextExecutor = ExecutionContext.global implicit val contextShiftIO: ContextShift[IO] = IO.contextShift(executor) implicit val concurrentIO: Concurrent[IO] = IO.ioConcurrentEffect implicit val timerIO: Timer[IO] = IO.timer(executor) implicit val parallelIO: Parallel[IO] = IO.ioParallel implicit val fromFutureIO: FromFuture[IO] = FromFuture.lift[IO] implicit val measureDuration: MeasureDuration[IO] = MeasureDuration.fromClock[IO](Clock[IO]) def runIO[A](io: IO[A], timeout: FiniteDuration = Timeout): Future[Succeeded.type] = { io.timeout(timeout).as(Succeeded).unsafeToFuture } implicit class IOOps[A](val self: IO[A]) extends AnyVal { def run(timeout: FiniteDuration = Timeout): Future[Succeeded.type] = runIO(self, timeout) } }
Example 84
Source File: DeterministicIOTestSuite.scala From cats-effect-testing with Apache License 2.0 | 5 votes |
package cats.effect.testing.minitest import scala.concurrent.ExecutionContext import cats.effect.{ContextShift, IO, Timer} import cats.effect.laws.util.TestContext import scala.concurrent.duration._ import minitest.api.{DefaultExecutionContext, TestSpec} abstract class DeterministicIOTestSuite extends BaseIOTestSuite[TestContext] { override protected final def makeExecutionContext(): TestContext = TestContext() override protected[effect] implicit def suiteEc: ExecutionContext = DefaultExecutionContext override final implicit def ioContextShift: ContextShift[IO] = executionContext.contextShift[IO](IO.ioEffect) override final implicit def ioTimer: Timer[IO] = executionContext.timer[IO](IO.ioEffect) override protected[effect] def mkSpec(name: String, ec: TestContext, io: => IO[Unit]): TestSpec[Unit, Unit] = TestSpec.sync(name, _ => { val f = io.unsafeToFuture() ec.tick(365.days) f.value match { case Some(value) => value.get case None => throw new RuntimeException( s"The IO in ${this.getClass.getName}.$name did not terminate.\n" + "It's possible that you are using a ContextShift that is backed by other ExecutionContext or" + "the test code is waiting indefinitely." ) } }) }
Example 85
Source File: IOTestSuite.scala From cats-effect-testing with Apache License 2.0 | 5 votes |
package cats.effect.testing.utest import scala.concurrent.duration._ import scala.concurrent.{ExecutionContext, Future} import cats.effect.{ContextShift, IO, Timer} import utest._ abstract class IOTestSuite extends TestSuite { protected def makeExecutionContext(): ExecutionContext = ExecutionContext.global protected def timeout: FiniteDuration = 10.seconds protected def allowNonIOTests: Boolean = false protected lazy val executionContext: ExecutionContext = makeExecutionContext() implicit def ioContextShift: ContextShift[IO] = IO.contextShift(executionContext) implicit def ioTimer: Timer[IO] = IO.timer(executionContext) override def utestWrap(path: Seq[String], runBody: => Future[Any])(implicit ec: ExecutionContext): Future[Any] = { // Shadow the parameter EC with our EC implicit val ec: ExecutionContext = this.executionContext runBody.flatMap { case io: IO[Any] => io.timeout(timeout).unsafeToFuture() case other if allowNonIOTests => Future.successful(other) case other => throw new RuntimeException(s"Test body must return an IO value. Got $other") } } }
Example 86
Source File: DeterministicIOTestSuite.scala From cats-effect-testing with Apache License 2.0 | 5 votes |
package cats.effect.testing.utest import scala.concurrent.{ExecutionContext, Future} import scala.concurrent.duration._ import cats.effect.{ContextShift, IO, Timer} import cats.effect.laws.util.TestContext import utest.TestSuite abstract class DeterministicIOTestSuite extends TestSuite { protected val testContext: TestContext = TestContext() protected def allowNonIOTests: Boolean = false implicit def ioContextShift: ContextShift[IO] = testContext.contextShift(IO.ioEffect) implicit def ioTimer: Timer[IO] = testContext.timer(IO.ioEffect) override def utestWrap(path: Seq[String], runBody: => Future[Any])(implicit ec: ExecutionContext): Future[Any] = { runBody.flatMap { case io: IO[Any] => val f = io.unsafeToFuture() testContext.tick(365.days) assert(testContext.state.tasks.isEmpty) f.value match { case Some(_) => f case None => throw new RuntimeException( s"The IO in ${path.mkString(".")} did not terminate.\n" + "It's possible that you are using a ContextShift that is backed by other ExecutionContext or" + "the test code is waiting indefinitely." ) } case other if allowNonIOTests => Future.successful(other) case other => throw new RuntimeException(s"Test body must return an IO value. Got $other") }(new ExecutionContext { def execute(runnable: Runnable): Unit = runnable.run() def reportFailure(cause: Throwable): Unit = throw cause }) } }
Example 87
Source File: TaskSchedulerSpec.scala From vinyldns with Apache License 2.0 | 5 votes |
package vinyldns.core.task import cats.effect.{ContextShift, IO, Timer} import org.mockito.Mockito import org.mockito.Mockito._ import org.scalatestplus.mockito.MockitoSugar import org.scalatest.BeforeAndAfterEach import scala.concurrent.duration._ import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AnyWordSpec class TaskSchedulerSpec extends AnyWordSpec with Matchers with MockitoSugar with BeforeAndAfterEach { private implicit val cs: ContextShift[IO] = IO.contextShift(scala.concurrent.ExecutionContext.global) private implicit val timer: Timer[IO] = IO.timer(scala.concurrent.ExecutionContext.global) private val mockRepo = mock[TaskRepository] class TestTask( val name: String, val timeout: FiniteDuration, val runEvery: FiniteDuration, val checkInterval: FiniteDuration, testResult: IO[Unit] = IO.unit ) extends Task { def run(): IO[Unit] = testResult } override def beforeEach() = Mockito.reset(mockRepo) "TaskScheduler" should { "run a scheduled task" in { val task = new TestTask("test", 5.seconds, 500.millis, 500.millis) val spied = spy(task) doReturn(IO.unit).when(mockRepo).saveTask(task.name) doReturn(IO.pure(true)).when(mockRepo).claimTask(task.name, task.timeout, task.runEvery) doReturn(IO.unit).when(mockRepo).releaseTask(task.name) TaskScheduler.schedule(spied, mockRepo).take(1).compile.drain.unsafeRunSync() // We run twice because we run once on start up verify(spied, times(2)).run() verify(mockRepo, times(2)).claimTask(task.name, task.timeout, task.runEvery) verify(mockRepo, times(2)).releaseTask(task.name) } "release the task even on error" in { val task = new TestTask( "test", 5.seconds, 500.millis, 500.millis, IO.raiseError(new RuntimeException("fail")) ) doReturn(IO.unit).when(mockRepo).saveTask(task.name) doReturn(IO.pure(true)).when(mockRepo).claimTask(task.name, task.timeout, task.runEvery) doReturn(IO.unit).when(mockRepo).releaseTask(task.name) TaskScheduler.schedule(task, mockRepo).take(1).compile.drain.unsafeRunSync() // We release the task twice, once on start and once on the run verify(mockRepo, times(2)).releaseTask(task.name) } "fail to start if the task cannot be saved" in { val task = new TestTask("test", 5.seconds, 500.millis, 500.millis) val spied = spy(task) doReturn(IO.raiseError(new RuntimeException("fail"))).when(mockRepo).saveTask(task.name) a[RuntimeException] should be thrownBy TaskScheduler .schedule(task, mockRepo) .take(1) .compile .drain .unsafeRunSync() verify(spied, never()).run() } } }
Example 88
Source File: DefaultTransactionService.scala From aecor with MIT License | 5 votes |
package aecor.example.transaction import aecor.example.account.AccountId import aecor.example.common.Amount import aecor.example.transaction.TransactionRoute.ApiResult import aecor.example.transaction.transaction.Transactions import cats.effect.{ Concurrent, Timer } import cats.implicits._ import scala.concurrent.duration._ final class DefaultTransactionService[F[_]](transactions: Transactions[F])( implicit F: Concurrent[F], timer: Timer[F] ) extends TransactionService[F] { def authorizePayment(transactionId: TransactionId, from: From[AccountId], to: To[AccountId], amount: Amount): F[TransactionRoute.ApiResult] = transactions(transactionId) .create(from, to, amount) .flatMap { _ => val getTransaction = transactions(transactionId).getInfo .flatMap { case Right(t) => t.pure[F] case _ => F.raiseError[Algebra.TransactionInfo](new IllegalStateException("Something went bad")) } def loop: F[Boolean] = getTransaction.flatMap { case Algebra.TransactionInfo(_, _, _, Some(value)) => value.pure[F] case _ => timer.sleep(10.millis) >> loop } Concurrent.timeout(loop, 10.seconds) } .map { succeeded => if (succeeded) { ApiResult.Authorized } else { ApiResult.Declined("You suck") } } } object DefaultTransactionService { def apply[F[_]](transactions: Transactions[F])(implicit F: Concurrent[F], timer: Timer[F]): TransactionService[F] = new DefaultTransactionService[F](transactions) }
Example 89
Source File: DistributedProcessing.scala From aecor with MIT License | 5 votes |
package aecor.kafkadistributedprocessing import java.util.Properties import aecor.kafkadistributedprocessing.internal.Kafka import aecor.kafkadistributedprocessing.internal.Kafka._ import cats.effect.{ ConcurrentEffect, ContextShift, Timer } import cats.implicits._ import cats.effect.implicits._ import fs2.Stream import org.apache.kafka.clients.consumer.ConsumerConfig import scala.concurrent.duration._ final class DistributedProcessing(settings: DistributedProcessingSettings) { private def assignRange(size: Int, partitionCount: Int, partition: Int): Option[(Int, Int)] = { val even = size / partitionCount val reminder = size % partitionCount if (partition >= partitionCount) { none } else { if (partition < reminder) { (partition * (even + 1), even + 1).some } else if (even > 0) { (reminder + partition * even, even).some } else none } } def start[F[_]: ConcurrentEffect: Timer: ContextShift](name: String, processes: List[F[Unit]]): F[Unit] = Kafka .assignPartitions( settings.asProperties(name), settings.topicName, settings.pollingInterval, settings.pollTimeout ) .parEvalMapUnordered(Int.MaxValue) { case AssignedPartition(partition, partitionCount, watchRevocation, release) => assignRange(processes.size, partitionCount, partition).fold(release) { case (offset, processCount) => Stream .range[F](offset, offset + processCount) .parEvalMapUnordered(processCount)(processes) .compile .drain .race(watchRevocation) .flatMap { case Left(_) => release case Right(callback) => callback } } } .compile .drain } object DistributedProcessing { def apply(settings: DistributedProcessingSettings): DistributedProcessing = new DistributedProcessing(settings) } final case class DistributedProcessingSettings(brokers: Set[String], topicName: String, pollingInterval: FiniteDuration = 500.millis, pollTimeout: FiniteDuration = 50.millis, consumerSettings: Map[String, String] = Map.empty) { def withClientId(clientId: String): DistributedProcessingSettings = withConsumerSetting(ConsumerConfig.CLIENT_ID_CONFIG, clientId) def clientId: Option[String] = consumerSettings.get(ConsumerConfig.CLIENT_ID_CONFIG) def withConsumerSetting(key: String, value: String): DistributedProcessingSettings = copy(consumerSettings = consumerSettings.updated(key, value)) def asProperties(groupId: String): Properties = { val properties = new Properties() consumerSettings.foreach { case (key, value) => properties.setProperty(key, value) } properties.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokers.mkString(",")) properties.setProperty(ConsumerConfig.GROUP_ID_CONFIG, groupId) properties } }
Example 90
Source File: Supervision.scala From aecor with MIT License | 5 votes |
package aecor.kafkadistributedprocessing import cats.effect.{ Sync, Timer } import fs2.Stream.retry import scala.concurrent.duration._ object Supervision { type Supervision[F[_]] = F[Unit] => F[Unit] def exponentialBackoff[F[_]: Timer: Sync](minBackoff: FiniteDuration = 2.seconds, maxBackoff: FiniteDuration = 10.seconds, randomFactor: Double = 0.2, maxAttempts: Int = Int.MaxValue): Supervision[F] = { def nextDelay(in: FiniteDuration): FiniteDuration = FiniteDuration((in.toMillis * (1 + randomFactor)).toLong, MILLISECONDS).min(maxBackoff) fa => retry(fa, minBackoff, nextDelay, maxAttempts, Function.const(true)).compile.drain } def noop[F[_]]: Supervision[F] = identity }
Example 91
Source File: DeletableTerminated.scala From kubernetes-client with Apache License 2.0 | 5 votes |
package com.goyeau.kubernetes.client.operation import scala.concurrent.duration._ import cats.Applicative import cats.effect.Timer import cats.implicits._ import io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions import org.http4s._ import org.http4s.client.dsl.Http4sClientDsl private[client] trait DeletableTerminated[F[_]] extends Http4sClientDsl[F] { this: Deletable[F] => def deleteTerminated(name: String, deleteOptions: Option[DeleteOptions] = None)(implicit timer: Timer[F] ): F[Status] = { def deleteTerminated(firstTry: Boolean): F[Status] = { def retry() = timer.sleep(1.second) *> deleteTerminated(firstTry = false) delete(name, deleteOptions).flatMap { case status if status.isSuccess => retry() case Status.Conflict => retry() case response @ Status.NotFound => if (firstTry) Applicative[F].pure(response) else Applicative[F].pure(Status.Ok) case error => Applicative[F].pure(error) } } deleteTerminated(firstTry = true) } }
Example 92
Source File: Timeout.scala From tofu with Apache License 2.0 | 5 votes |
package tofu import cats.effect.{Concurrent, ContextShift, IO, Timer} import simulacrum.typeclass import tofu.syntax.feither._ import tofu.internal.NonTofu import scala.concurrent.duration.FiniteDuration @typeclass trait Timeout[F[_]] { def timeoutTo[A](fa: F[A], after: FiniteDuration, fallback: F[A]): F[A] } object Timeout extends LowPriorTimeoutImplicits { implicit def io(implicit timer: Timer[IO], cs: ContextShift[IO]): Timeout[IO] = new Timeout[IO] { override def timeoutTo[A](fa: IO[A], after: FiniteDuration, fallback: IO[A]): IO[A] = fa.timeoutTo(after, fallback) } } trait LowPriorTimeoutImplicits { self: Timeout.type => implicit def concurrent[F[_]: NonTofu](implicit F: Concurrent[F], timer: Timer[F]): Timeout[F] = new Timeout[F] { override def timeoutTo[A](fa: F[A], after: FiniteDuration, fallback: F[A]): F[A] = F.race(timer.sleep(after), fa).getOrElseF(fallback) } }
Example 93
Source File: SchedulerSrv.scala From gospeak with Apache License 2.0 | 5 votes |
package gospeak.web.services import java.time.Instant import cats.effect.{IO, Timer} import cron4s.CronExpr import eu.timepit.fs2cron.awakeEveryCron import fs2.Stream import gospeak.core.domain.utils.{AdminCtx, Constants} import gospeak.core.services.storage.AdminVideoRepo import gospeak.core.services.twitter.TwitterSrv import gospeak.libs.scala.Extensions._ import gospeak.libs.scala.TimeUtils import gospeak.web.services.SchedulerSrv.{Conf, Exec, Scheduler} import scala.collection.mutable import scala.concurrent.ExecutionContext import scala.concurrent.duration.FiniteDuration import scala.util.control.NonFatal class SchedulerSrv(videoRepo: AdminVideoRepo, twitterSrv: TwitterSrv)(implicit ec: ExecutionContext) { implicit private val timer: Timer[IO] = IO.timer(ec) private val schedulers = mutable.ListBuffer[Scheduler]() private val execs: mutable.ListBuffer[Exec] = mutable.ListBuffer[Exec]() def getSchedulers: List[Scheduler] = schedulers.toList def getExecs: List[Exec] = execs.toList def init(conf: Conf): Unit = { schedule("tweet random video", conf.tweetRandomVideo, tweetRandomVideo()) } def exec(name: String)(implicit ctx: AdminCtx): IO[Option[Exec]] = schedulers.find(_.name == name).map(exec(_, s"manual (${ctx.user.name.value})")).sequence private def tweetRandomVideo(): IO[(String, Option[String])] = for { video <- videoRepo.findRandom() tweet <- (for { v <- video.toRight("No video available") } yield twitterSrv.tweet(s"#OneDayOneTalk [${v.lang}] ${v.title} on ${v.channel.name} in ${v.publishedAt.getYear(Constants.defaultZoneId)} ${v.url.value}")).sequence } yield (tweet.map(t => s"Tweet sent: ${t.text}").getOrElse("Tweet not sent"), tweet.swap.toOption) // TODO be able to stop/start a scheduler private def schedule(name: String, cron: CronExpr, task: IO[(String, Option[String])]): Unit = { schedulers.find(_.name == name).map(_ => ()).getOrElse { val scheduler = Scheduler(name, cron, Some(Instant.now()), task) schedulers += scheduler val stream = awakeEveryCron[IO](cron).flatMap { _ => Stream.eval(exec(scheduler, "auto")) } stream.compile.drain.unsafeRunAsyncAndForget } } private def exec(s: Scheduler, source: String): IO[Exec] = IO(Instant.now()).flatMap { start => s.task.map { case (res, None) => Exec(s.name, source, start, Instant.now(), res, None) case (res, Some(err)) => Exec(s.name, source, start, Instant.now(), res, Some(err)) }.recover { case NonFatal(e) => Exec(s.name, source, start, Instant.now(), s"Finished with ${e.getClass.getSimpleName}", Some(e.getMessage)) }.map { e => execs += e e } } } object SchedulerSrv { final case class Conf(tweetRandomVideo: CronExpr) final case class Scheduler(name: String, schedule: CronExpr, started: Option[Instant], private[SchedulerSrv] val task: IO[(String, Option[String])]) final case class Exec(name: String, source: String, started: Instant, finished: Instant, result: String, error: Option[String]) { def duration: FiniteDuration = TimeUtils.toFiniteDuration(started, finished) } }
Example 94
Source File: CatsHelpers.scala From nelson with Apache License 2.0 | 5 votes |
package nelson import cats.Eval import cats.effect.{Effect, IO, Timer} import cats.free.Cofree import cats.syntax.functor._ import cats.syntax.monadError._ import fs2.{Pipe, Sink, Stream} import quiver.{Context, Decomp, Graph} import java.util.concurrent.TimeoutException import scala.concurrent.ExecutionContext import scala.concurrent.duration.FiniteDuration import scala.collection.immutable.{Stream => SStream} object CatsHelpers { implicit class NelsonEnrichedIO[A](val io: IO[A]) extends AnyVal { private type Tree[A] = Cofree[SStream, A] private def flattenTree[A](tree: Tree[A]): SStream[A] = { def go(tree: Tree[A], xs: SStream[A]): SStream[A] = SStream.cons(tree.head, tree.tail.value.foldRight(xs)(go(_, _))) go(tree, SStream.Empty) } private def Node[A](root: A, forest: => SStream[Tree[A]]): Tree[A] = Cofree[SStream, A](root, Eval.later(forest)) implicit class NelsonEnrichedGraph[N, A, B](val graph: Graph[N, A, B]) extends AnyVal { def reachable(v: N): Vector[N] = xdfWith(Seq(v), _.successors, _.vertex)._1.flatMap(flattenTree) def xdfWith[C](vs: Seq[N], d: Context[N, A, B] => Seq[N], f: Context[N, A, B] => C): (Vector[Tree[C]], Graph[N, A, B]) = if (vs.isEmpty || graph.isEmpty) (Vector(), graph) else graph.decomp(vs.head) match { case Decomp(None, g) => g.xdfWith(vs.tail, d, f) case Decomp(Some(c), g) => val (xs, _) = g.xdfWith(d(c), d, f) val (ys, g3) = g.xdfWith(vs.tail, d, f) (Node(f(c), xs.toStream) +: ys, g3) } } }
Example 95
Source File: package.scala From fs2-cron with Apache License 2.0 | 5 votes |
package eu.timepit import java.time.LocalDateTime import java.time.temporal.ChronoUnit import java.util.concurrent.TimeUnit import cats.ApplicativeError import cats.effect.{Concurrent, Sync, Timer} import cron4s.expr.CronExpr import cron4s.lib.javatime._ import fs2.Stream import scala.concurrent.duration.FiniteDuration package object fs2cron { def sleepCron[F[_]: Sync](cronExpr: CronExpr)(implicit timer: Timer[F]): Stream[F, Unit] = durationFromNow(cronExpr).flatMap(Stream.sleep[F]) def schedule[F[_]: Concurrent, A](tasks: List[(CronExpr, Stream[F, A])])(implicit timer: Timer[F] ): Stream[F, A] = { val scheduled = tasks.map { case (cronExpr, task) => awakeEveryCron[F](cronExpr) >> task } Stream.emits(scheduled).covary[F].parJoinUnbounded } }
Example 96
Source File: FS2CronTest.scala From fs2-cron with Apache License 2.0 | 5 votes |
package eu.timepit.fs2cron import cats.effect.{ContextShift, IO, Timer} import cron4s.Cron import cron4s.expr.CronExpr import scala.concurrent.ExecutionContext import org.scalatest.funsuite.AnyFunSuite import org.scalatest.matchers.should.Matchers class FS2CronTest extends AnyFunSuite with Matchers { implicit val timer: Timer[IO] = IO.timer(ExecutionContext.global) val evenSeconds: CronExpr = Cron.unsafeParse("*/2 * * ? * *") def isEven(i: Int): Boolean = i % 2 == 0 test("awakeEveryCron") { val s1 = awakeEveryCron[IO](evenSeconds) >> evalNow[IO] val s2 = s1.map(_.getSecond).take(2).forall(isEven) s2.compile.last.map(_.getOrElse(false)).unsafeRunSync() } test("sleepCron") { val s1 = sleepCron[IO](evenSeconds) >> evalNow[IO] val s2 = s1.map(_.getSecond).forall(isEven) s2.compile.last.map(_.getOrElse(false)).unsafeRunSync() } test("schedule") { implicit val ctxShift: ContextShift[IO] = IO.contextShift(ExecutionContext.global) val everySecond: CronExpr = Cron.unsafeParse("* * * ? * *") val s1 = schedule(List(everySecond -> evalNow[IO], evenSeconds -> evalNow[IO])).map(_.getSecond) val seconds = s1.take(3).compile.toList.unsafeRunSync() seconds.count(isEven) shouldBe 2 seconds.count(!isEven(_)) shouldBe 1 } }
Example 97
Source File: ArchiveCache.scala From nexus-kg with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.kg.archives import akka.actor.{ActorSystem, NotInfluenceReceiveTimeout} import cats.Monad import cats.data.OptionT import cats.effect.{Effect, Timer} import cats.implicits._ import ch.epfl.bluebrain.nexus.kg.archives.ArchiveCache._ import ch.epfl.bluebrain.nexus.kg.config.AppConfig._ import ch.epfl.bluebrain.nexus.kg.resources.ResId import ch.epfl.bluebrain.nexus.sourcing.StateMachine import ch.epfl.bluebrain.nexus.sourcing.akka.StopStrategy import ch.epfl.bluebrain.nexus.sourcing.akka.statemachine.AkkaStateMachine import retry.RetryPolicy class ArchiveCache[F[_]: Monad](ref: StateMachine[F, String, State, Command, Unit]) { def put(value: Archive): OptionT[F, Archive] = OptionT(ref.evaluate(value.resId.show, Write(value)).map(_.toOption.flatten)) } object ArchiveCache { private[archives] type State = Option[Archive] private[archives] type Command = Write private[archives] final case class Write(bundle: Archive) extends NotInfluenceReceiveTimeout final def apply[F[_]: Timer](implicit as: ActorSystem, cfg: ArchivesConfig, F: Effect[F]): F[ArchiveCache[F]] = { implicit val retryPolicy: RetryPolicy[F] = cfg.cache.retry.retryPolicy[F] val invalidationStrategy = StopStrategy.lapsedSinceLastInteraction[State, Command](cfg.cacheInvalidateAfter) val evaluate: (State, Command) => F[Either[Unit, State]] = { case (None, Write(bundle)) => F.pure(Right(Some(bundle))) case (Some(_), _) => F.pure(Left(())) // It already exists, so we don't want to replace it } AkkaStateMachine .sharded[F]("archives", None, evaluate, invalidationStrategy, cfg.cache.akkaStateMachineConfig, cfg.cache.shards) .map(new ArchiveCache[F](_)) } }
Example 98
Source File: ElasticSearchIndexer.scala From nexus-kg with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.kg.indexing import akka.actor.{ActorRef, ActorSystem, Props} import akka.stream.scaladsl.Source import akka.util.Timeout import cats.effect.{Effect, Timer} import cats.implicits._ import ch.epfl.bluebrain.nexus.admin.client.types.Project import ch.epfl.bluebrain.nexus.commons.es.client.ElasticSearchClient import ch.epfl.bluebrain.nexus.commons.es.client.ElasticSearchClient.BulkOp import ch.epfl.bluebrain.nexus.kg.config.AppConfig import ch.epfl.bluebrain.nexus.kg.config.AppConfig._ import ch.epfl.bluebrain.nexus.kg.indexing.View.ElasticSearchView import ch.epfl.bluebrain.nexus.kg.resources._ import ch.epfl.bluebrain.nexus.kg.routes.Clients import ch.epfl.bluebrain.nexus.sourcing.projections.ProgressFlow.ProgressFlowElem import ch.epfl.bluebrain.nexus.sourcing.projections.ProjectionProgress.NoProgress import ch.epfl.bluebrain.nexus.sourcing.projections._ import com.typesafe.scalalogging.Logger import scala.concurrent.ExecutionContext // $COVERAGE-OFF$ @SuppressWarnings(Array("MaxParameters")) object ElasticSearchIndexer { private implicit val log: Logger = Logger[ElasticSearchIndexer.type] final def start[F[_]: Timer]( view: ElasticSearchView, resources: Resources[F], project: Project, restartOffset: Boolean )( implicit as: ActorSystem, actorInitializer: (Props, String) => ActorRef, projections: Projections[F, String], F: Effect[F], clients: Clients[F], config: AppConfig ): StreamSupervisor[F, ProjectionProgress] = { implicit val ec: ExecutionContext = as.dispatcher implicit val p: Project = project implicit val indexing: IndexingConfig = config.elasticSearch.indexing implicit val metadataOpts: MetadataOptions = MetadataOptions(linksAsIri = true, expandedLinks = true) implicit val tm: Timeout = Timeout(config.elasticSearch.askTimeout) val client: ElasticSearchClient[F] = clients.elasticSearch.withRetryPolicy(config.elasticSearch.indexing.retry) def deleteOrIndex(res: ResourceV): Option[BulkOp] = if (res.deprecated && !view.filter.includeDeprecated) Some(delete(res)) else view.toDocument(res).map(doc => BulkOp.Index(view.index, res.id.value.asString, doc)) def delete(res: ResourceV): BulkOp = BulkOp.Delete(view.index, res.id.value.asString) val initFetchProgressF: F[ProjectionProgress] = if (restartOffset) projections.recordProgress(view.progressId, NoProgress) >> view.createIndex >> F.pure(NoProgress) else view.createIndex >> projections.progress(view.progressId) val sourceF: F[Source[ProjectionProgress, _]] = initFetchProgressF.map { initial => val flow = ProgressFlowElem[F, Any] .collectCast[Event] .groupedWithin(indexing.batch, indexing.batchTimeout) .distinct() .mapAsync(view.toResource(resources, _)) .collectSome[ResourceV] .collect { case res if view.allowedSchemas(res) && view.allowedTypes(res) => deleteOrIndex(res) case res if view.allowedSchemas(res) => Some(delete(res)) } .collectSome[BulkOp] .runAsyncBatch(client.bulk(_))() .mergeEmit() .toPersistedProgress(view.progressId, initial) cassandraSource(s"project=${view.ref.id}", view.progressId, initial.minProgress.offset) .via(flow) .via(kamonViewMetricsFlow(view, project)) } StreamSupervisor.start(sourceF, view.progressId, actorInitializer) } } // $COVERAGE-ON$
Example 99
Source File: StorageIndexer.scala From nexus-kg with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.kg.indexing import java.time.Instant import akka.actor.ActorSystem import akka.stream.scaladsl.{Flow, Source} import akka.util.Timeout import cats.effect.{Effect, Timer} import cats.implicits._ import ch.epfl.bluebrain.nexus.admin.client.AdminClient import ch.epfl.bluebrain.nexus.kg.cache.{ProjectCache, StorageCache} import ch.epfl.bluebrain.nexus.kg.config.AppConfig import ch.epfl.bluebrain.nexus.kg.config.AppConfig._ import ch.epfl.bluebrain.nexus.kg.config.Vocabulary.nxv import ch.epfl.bluebrain.nexus.kg.resources._ import ch.epfl.bluebrain.nexus.kg.storage.Storage import ch.epfl.bluebrain.nexus.sourcing.projections.ProgressFlow.{PairMsg, ProgressFlowElem} import ch.epfl.bluebrain.nexus.sourcing.projections._ import com.typesafe.scalalogging.Logger import scala.concurrent.ExecutionContext // $COVERAGE-OFF$ object StorageIndexer { private implicit val log = Logger[StorageIndexer.type] def start[F[_]: Timer](storages: Storages[F], storageCache: StorageCache[F])( implicit projectCache: ProjectCache[F], F: Effect[F], as: ActorSystem, projectInitializer: ProjectInitializer[F], adminClient: AdminClient[F], config: AppConfig ): StreamSupervisor[F, Unit] = { implicit val authToken = config.iam.serviceAccountToken implicit val indexing: IndexingConfig = config.keyValueStore.indexing implicit val ec: ExecutionContext = as.dispatcher implicit val tm: Timeout = Timeout(config.keyValueStore.askTimeout) val name = "storage-indexer" def toStorage(event: Event): F[Option[(Storage, Instant)]] = fetchProject(event.organization, event.id.parent, event.subject).flatMap { implicit project => storages.fetchStorage(event.id).value.map { case Left(err) => log.error(s"Error on event '${event.id.show} (rev = ${event.rev})', cause: '${err.msg}'") None case Right(timedStorage) => Some(timedStorage) } } val source: Source[PairMsg[Any], _] = cassandraSource(s"type=${nxv.Storage.value.show}", name) val flow: Flow[PairMsg[Any], Unit, _] = ProgressFlowElem[F, Any] .collectCast[Event] .groupedWithin(indexing.batch, indexing.batchTimeout) .distinct() .mergeEmit() .mapAsync(toStorage) .collectSome[(Storage, Instant)] .runAsync { case (storage, instant) => storageCache.put(storage)(instant) }() .flow .map(_ => ()) StreamSupervisor.startSingleton(F.delay(source.via(flow)), name) } } // $COVERAGE-ON$
Example 100
Source File: SparqlIndexer.scala From nexus-kg with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.kg.indexing import akka.actor.{ActorRef, ActorSystem, Props} import akka.stream.scaladsl.Source import akka.util.Timeout import cats.effect.{Effect, Timer} import cats.implicits._ import ch.epfl.bluebrain.nexus.admin.client.types.Project import ch.epfl.bluebrain.nexus.commons.sparql.client.{BlazegraphClient, SparqlWriteQuery} import ch.epfl.bluebrain.nexus.kg.config.AppConfig import ch.epfl.bluebrain.nexus.kg.config.AppConfig._ import ch.epfl.bluebrain.nexus.kg.indexing.View.SparqlView import ch.epfl.bluebrain.nexus.kg.resources._ import ch.epfl.bluebrain.nexus.kg.routes.Clients import ch.epfl.bluebrain.nexus.sourcing.projections.ProgressFlow.ProgressFlowElem import ch.epfl.bluebrain.nexus.sourcing.projections.ProjectionProgress.NoProgress import ch.epfl.bluebrain.nexus.sourcing.projections._ import scala.concurrent.ExecutionContext // $COVERAGE-OFF$ @SuppressWarnings(Array("MaxParameters")) object SparqlIndexer { final def start[F[_]: Timer]( view: SparqlView, resources: Resources[F], project: Project, restartOffset: Boolean )( implicit as: ActorSystem, actorInitializer: (Props, String) => ActorRef, projections: Projections[F, String], F: Effect[F], clients: Clients[F], config: AppConfig ): StreamSupervisor[F, ProjectionProgress] = { implicit val ec: ExecutionContext = as.dispatcher implicit val p: Project = project implicit val indexing: IndexingConfig = config.sparql.indexing implicit val metadataOpts: MetadataOptions = MetadataOptions(linksAsIri = true, expandedLinks = true) implicit val tm: Timeout = Timeout(config.sparql.askTimeout) val client: BlazegraphClient[F] = clients.sparql.copy(namespace = view.index).withRetryPolicy(config.sparql.indexing.retry) def buildInsertOrDeleteQuery(res: ResourceV): SparqlWriteQuery = if (res.deprecated && !view.filter.includeDeprecated) view.buildDeleteQuery(res) else view.buildInsertQuery(res) val initFetchProgressF: F[ProjectionProgress] = if (restartOffset) projections.recordProgress(view.progressId, NoProgress) >> view.createIndex >> F.pure(NoProgress) else view.createIndex >> projections.progress(view.progressId) val sourceF: F[Source[ProjectionProgress, _]] = initFetchProgressF.map { initial => val flow = ProgressFlowElem[F, Any] .collectCast[Event] .groupedWithin(indexing.batch, indexing.batchTimeout) .distinct() .mapAsync(view.toResource(resources, _)) .collectSome[ResourceV] .collect { case res if view.allowedSchemas(res) && view.allowedTypes(res) => buildInsertOrDeleteQuery(res) case res if view.allowedSchemas(res) => view.buildDeleteQuery(res) } .runAsyncBatch(client.bulk(_))() .mergeEmit() .toPersistedProgress(view.progressId, initial) cassandraSource(s"project=${view.ref.id}", view.progressId, initial.minProgress.offset) .via(flow) .via(kamonViewMetricsFlow(view, project)) } StreamSupervisor.start(sourceF, view.progressId, actorInitializer) } } // $COVERAGE-ON$
Example 101
Source File: ResolverIndexer.scala From nexus-kg with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.kg.indexing import akka.actor.ActorSystem import akka.stream.scaladsl.{Flow, Source} import akka.util.Timeout import cats.effect.{Effect, Timer} import cats.implicits._ import ch.epfl.bluebrain.nexus.admin.client.AdminClient import ch.epfl.bluebrain.nexus.kg.cache.{ProjectCache, ResolverCache} import ch.epfl.bluebrain.nexus.kg.config.AppConfig import ch.epfl.bluebrain.nexus.kg.config.AppConfig._ import ch.epfl.bluebrain.nexus.kg.config.Vocabulary.nxv import ch.epfl.bluebrain.nexus.kg.resolve.Resolver import ch.epfl.bluebrain.nexus.kg.resources._ import ch.epfl.bluebrain.nexus.sourcing.projections.ProgressFlow.{PairMsg, ProgressFlowElem} import ch.epfl.bluebrain.nexus.sourcing.projections._ import com.typesafe.scalalogging.Logger import scala.concurrent.ExecutionContext // $COVERAGE-OFF$ object ResolverIndexer { private implicit val log = Logger[ResolverIndexer.type] final def start[F[_]: Timer](resolvers: Resolvers[F], resolverCache: ResolverCache[F])( implicit projectCache: ProjectCache[F], as: ActorSystem, F: Effect[F], projectInitializer: ProjectInitializer[F], adminClient: AdminClient[F], config: AppConfig ): StreamSupervisor[F, Unit] = { implicit val authToken = config.iam.serviceAccountToken implicit val indexing: IndexingConfig = config.keyValueStore.indexing implicit val ec: ExecutionContext = as.dispatcher implicit val tm: Timeout = Timeout(config.keyValueStore.askTimeout) val name = "resolver-indexer" def toResolver(event: Event): F[Option[Resolver]] = fetchProject(event.organization, event.id.parent, event.subject).flatMap { implicit project => resolvers.fetchResolver(event.id).value.map { case Left(err) => log.error(s"Error on event '${event.id.show} (rev = ${event.rev})', cause: '${err.msg}'") None case Right(resolver) => Some(resolver) } } val source: Source[PairMsg[Any], _] = cassandraSource(s"type=${nxv.Resolver.value.show}", name) val flow: Flow[PairMsg[Any], Unit, _] = ProgressFlowElem[F, Any] .collectCast[Event] .groupedWithin(indexing.batch, indexing.batchTimeout) .distinct() .mergeEmit() .mapAsync(toResolver) .collectSome[Resolver] .runAsync(resolverCache.put)() .flow .map(_ => ()) StreamSupervisor.startSingleton(F.delay(source.via(flow)), name) } } // $COVERAGE-ON$
Example 102
Source File: ViewIndexer.scala From nexus-kg with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.kg.indexing import akka.actor.ActorSystem import akka.stream.scaladsl.{Flow, Source} import akka.util.Timeout import cats.effect.{Effect, Timer} import cats.implicits._ import ch.epfl.bluebrain.nexus.admin.client.AdminClient import ch.epfl.bluebrain.nexus.kg.cache.{ProjectCache, ViewCache} import ch.epfl.bluebrain.nexus.kg.config.AppConfig import ch.epfl.bluebrain.nexus.kg.config.AppConfig._ import ch.epfl.bluebrain.nexus.kg.config.Vocabulary.nxv import ch.epfl.bluebrain.nexus.kg.resources._ import ch.epfl.bluebrain.nexus.sourcing.projections.ProgressFlow.{PairMsg, ProgressFlowElem} import ch.epfl.bluebrain.nexus.sourcing.projections._ import com.typesafe.scalalogging.Logger import scala.concurrent.ExecutionContext // $COVERAGE-OFF$ object ViewIndexer { private implicit val log = Logger[ViewIndexer.type] def start[F[_]: Timer](views: Views[F], viewCache: ViewCache[F])( implicit projectCache: ProjectCache[F], F: Effect[F], as: ActorSystem, projectInitializer: ProjectInitializer[F], adminClient: AdminClient[F], config: AppConfig ): StreamSupervisor[F, Unit] = { implicit val authToken = config.iam.serviceAccountToken implicit val indexing: IndexingConfig = config.keyValueStore.indexing implicit val ec: ExecutionContext = as.dispatcher implicit val tm: Timeout = Timeout(config.keyValueStore.askTimeout) val name = "view-indexer" def toView(event: Event): F[Option[View]] = fetchProject(event.organization, event.id.parent, event.subject).flatMap { implicit project => views.fetchView(event.id).value.map { case Left(err) => log.error(s"Error on event '${event.id.show} (rev = ${event.rev})', cause: '${err.msg}'") None case Right(view) => Some(view) } } val source: Source[PairMsg[Any], _] = cassandraSource(s"type=${nxv.View.value.show}", name) val flow: Flow[PairMsg[Any], Unit, _] = ProgressFlowElem[F, Any] .collectCast[Event] .groupedWithin(indexing.batch, indexing.batchTimeout) .distinct() .mergeEmit() .mapAsync(toView) .collectSome[View] .runAsync(viewCache.put)() .flow .map(_ => ()) StreamSupervisor.startSingleton(F.delay(source.via(flow)), name) } } // $COVERAGE-ON$
Example 103
Source File: ResolverCache.scala From nexus-kg with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.kg.cache import java.util.UUID import java.util.concurrent.ConcurrentHashMap import akka.actor.ActorSystem import cats.Monad import cats.effect.{Effect, Timer} import cats.implicits._ import ch.epfl.bluebrain.nexus.commons.cache.{KeyValueStore, KeyValueStoreConfig} import ch.epfl.bluebrain.nexus.kg.cache.Cache._ import ch.epfl.bluebrain.nexus.kg.resolve.Resolver import ch.epfl.bluebrain.nexus.kg.resources.ProjectIdentifier.ProjectRef import ch.epfl.bluebrain.nexus.rdf.Iri.AbsoluteIri class ResolverCache[F[_]: Effect: Timer] private (projectToCache: ConcurrentHashMap[UUID, ResolverProjectCache[F]])( implicit as: ActorSystem, config: KeyValueStoreConfig ) { private class ResolverProjectCache[F[_]: Monad] private (store: KeyValueStore[F, AbsoluteIri, Resolver]) extends Cache[F, AbsoluteIri, Resolver](store) { private implicit val ordering: Ordering[Resolver] = Ordering.by(_.priority) def get: F[List[Resolver]] = store.values.map(_.toList.sorted) def put(resolver: Resolver): F[Unit] = if (resolver.deprecated) store.remove(resolver.id) else store.put(resolver.id, resolver) } private object ResolverProjectCache { def apply[F[_]: Effect: Timer]( project: ProjectRef )(implicit as: ActorSystem, config: KeyValueStoreConfig): ResolverProjectCache[F] = new ResolverProjectCache(KeyValueStore.distributed(s"resolver-${project.id}", (_, resolver) => resolver.rev)) } object ResolverCache { def apply[F[_]: Effect: Timer](implicit as: ActorSystem, config: KeyValueStoreConfig): ResolverCache[F] = new ResolverCache(new ConcurrentHashMap[UUID, ResolverProjectCache[F]]()) }
Example 104
Source File: StorageCache.scala From nexus-kg with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.kg.cache import java.time.{Clock, Instant} import java.util.UUID import java.util.concurrent.ConcurrentHashMap import akka.actor.ActorSystem import cats.Monad import cats.effect.{Effect, Timer} import cats.implicits._ import ch.epfl.bluebrain.nexus.commons.cache.{KeyValueStore, KeyValueStoreConfig} import ch.epfl.bluebrain.nexus.kg.RevisionedValue import ch.epfl.bluebrain.nexus.kg.cache.Cache._ import ch.epfl.bluebrain.nexus.kg.cache.StorageProjectCache._ import ch.epfl.bluebrain.nexus.kg.resources.ProjectIdentifier.ProjectRef import ch.epfl.bluebrain.nexus.kg.storage.Storage import ch.epfl.bluebrain.nexus.rdf.Iri.AbsoluteIri class StorageCache[F[_]: Effect: Timer] private (projectToCache: ConcurrentHashMap[UUID, StorageProjectCache[F]])( implicit as: ActorSystem, config: KeyValueStoreConfig, clock: Clock ) { private class StorageProjectCache[F[_]: Monad] private (store: KeyValueStore[F, AbsoluteIri, RevisionedStorage]) extends Cache[F, AbsoluteIri, RevisionedStorage](store) { private implicit val ordering: Ordering[RevisionedStorage] = Ordering.by((s: RevisionedStorage) => s.rev).reverse private implicit def revisioned(storage: Storage)(implicit instant: Instant): RevisionedStorage = RevisionedValue(instant.toEpochMilli, storage) def get: F[List[Storage]] = store.values.map(_.toList.sorted.map(_.value)) def getDefault: F[Option[Storage]] = get.map(_.collectFirst { case storage if storage.default => storage }) def getBy(id: AbsoluteIri): F[Option[Storage]] = get(id).map(_.collectFirst { case RevisionedValue(_, storage) if storage.id == id => storage }) def put(storage: Storage)(implicit instant: Instant): F[Unit] = if (storage.deprecated) store.remove(storage.id) else store.put(storage.id, storage) } private object StorageProjectCache { type RevisionedStorage = RevisionedValue[Storage] def apply[F[_]: Effect: Timer]( project: ProjectRef )(implicit as: ActorSystem, config: KeyValueStoreConfig): StorageProjectCache[F] = new StorageProjectCache( KeyValueStore.distributed(s"storage-${project.id}", (_, storage) => storage.value.rev) ) } object StorageCache { def apply[F[_]: Timer: Effect](implicit as: ActorSystem, config: KeyValueStoreConfig, clock: Clock): StorageCache[F] = new StorageCache(new ConcurrentHashMap[UUID, StorageProjectCache[F]]()) }
Example 105
Source File: AclsCache.scala From nexus-kg with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.kg.cache import akka.actor.ActorSystem import cats.Monad import cats.effect.{Effect, Timer} import cats.implicits._ import ch.epfl.bluebrain.nexus.commons.cache.KeyValueStore import ch.epfl.bluebrain.nexus.iam.client.IamClient import ch.epfl.bluebrain.nexus.iam.client.types.events.Event._ import ch.epfl.bluebrain.nexus.iam.client.types.{AccessControlList, AccessControlLists, ResourceAccessControlList} import ch.epfl.bluebrain.nexus.kg.config.AppConfig import ch.epfl.bluebrain.nexus.kg.config.AppConfig._ import ch.epfl.bluebrain.nexus.kg.config.Vocabulary._ import ch.epfl.bluebrain.nexus.rdf.Iri.Path def apply[F[_]: Effect: Timer]( iamClient: IamClient[F] )(implicit as: ActorSystem, config: AppConfig): AclsCache[F] = { val cache = new AclsCache(KeyValueStore.distributed("acls", (_, acls) => acls.rev)) val handle: AclEvent => F[Unit] = { case event: AclReplaced => cache.replace(event.path, toResourceAcl(event, event.acl)) case event: AclAppended => cache.append(event.path, toResourceAcl(event, event.acl)) case event: AclSubtracted => cache.subtract(event.path, toResourceAcl(event, event.acl)) case event: AclDeleted => cache.remove(event.path) } iamClient.aclEvents(handle)(config.iam.serviceAccountToken) cache } }
Example 106
Source File: ArchiveCacheSpec.scala From nexus-kg with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.kg.archives import java.time.{Clock, Instant, ZoneId} import cats.effect.{IO, Timer} import ch.epfl.bluebrain.nexus.admin.client.types.Project import ch.epfl.bluebrain.nexus.commons.test.ActorSystemFixture import ch.epfl.bluebrain.nexus.commons.test.io.IOOptionValues import ch.epfl.bluebrain.nexus.iam.client.types.Identity.Anonymous import ch.epfl.bluebrain.nexus.kg.TestHelper import ch.epfl.bluebrain.nexus.kg.archives.Archive.{File, Resource, ResourceDescription} import ch.epfl.bluebrain.nexus.kg.config.Settings import ch.epfl.bluebrain.nexus.kg.resources.Id import ch.epfl.bluebrain.nexus.kg.resources.syntax._ import org.scalatest.concurrent.Eventually import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AnyWordSpecLike import scala.concurrent.duration._ class ArchiveCacheSpec extends ActorSystemFixture("ArchiveCacheSpec", true) with TestHelper with AnyWordSpecLike with Matchers with IOOptionValues with Eventually { override implicit def patienceConfig: PatienceConfig = PatienceConfig(10.second, 50.milliseconds) private val appConfig = Settings(system).appConfig private implicit val config = appConfig.copy(archives = appConfig.archives.copy(cacheInvalidateAfter = 500.millis, maxResources = 100)) private implicit val timer: Timer[IO] = IO.timer(system.dispatcher) private val cache: ArchiveCache[IO] = ArchiveCache[IO].unsafeToFuture().futureValue private implicit val clock = Clock.fixed(Instant.EPOCH, ZoneId.systemDefault()) private val instant = clock.instant() def randomProject() = { val instant = Instant.EPOCH // format: off Project(genIri, genString(), genString(), None, genIri, genIri, Map.empty, genUUID, genUUID, 1L, false, instant, genIri, instant, genIri) // format: on } "An archive cache" should { "write and read an Archive" in { val resId = Id(randomProject().ref, genIri) val resource1 = Resource(genIri, randomProject(), None, None, originalSource = true, None) val file1 = File(genIri, randomProject(), None, None, None) val archive = Archive(resId, instant, Anonymous, Set(resource1, file1)) val _ = cache.put(archive).value.some cache.get(archive.resId).value.some shouldEqual archive } "read a non existing resource" in { val resId = Id(randomProject().ref, genIri) cache.get(resId).value.ioValue shouldEqual None } "read after timeout" in { val resId = Id(randomProject().ref, genIri) val set = Set[ResourceDescription](Resource(genIri, randomProject(), None, None, originalSource = true, None)) val archive = Archive(resId, instant, Anonymous, set) val _ = cache.put(archive).value.some val time = System.currentTimeMillis() cache.get(resId).value.some shouldEqual archive eventually { cache.get(resId).value.ioValue shouldEqual None } val diff = System.currentTimeMillis() - time diff should be > config.archives.cacheInvalidateAfter.toMillis diff should be < config.archives.cacheInvalidateAfter.toMillis + 300 } } }
Example 107
Source File: JvmMonitoring.scala From datadog4s with MIT License | 5 votes |
package com.avast.datadog4s.extension.jvm import java.time.Duration import cats.effect.{ ConcurrentEffect, Resource, Sync, Timer } import com.avast.cloud.datadog4s.helpers.Repeated import com.avast.datadog4s.api.MetricFactory object JvmMonitoring { type ErrorHandler[F[_]] = Throwable => F[Unit] case class Config( delay: Duration = Duration.ofSeconds(60), timeout: Duration = Duration.ofSeconds(10) ) def default[F[_]: ConcurrentEffect: Timer](factory: MetricFactory[F]): Resource[F, Unit] = configured(factory, Config(), defaultErrorHandler) def configured[F[_]: ConcurrentEffect: Timer]( factory: MetricFactory[F], config: Config, errorHandler: ErrorHandler[F] ): Resource[F, Unit] = { val reporter = new JvmReporter[F](factory) Repeated.run[F](config.delay, config.timeout, errorHandler)(reporter.collect).map(_ => ()) } private def defaultErrorHandler[F[_]: Sync]: ErrorHandler[F] = err => Sync[F].delay { println(s"Error during metrics collection: ${err.getMessage}") err.printStackTrace() } }
Example 108
Source File: JvmMonitoringTest.scala From datadog4s with MIT License | 5 votes |
package com.avast.datadog4s.extension.jvm import java.time.Duration import cats.effect.{ ContextShift, IO, Timer } import com.avast.cloud.datadog4s.inmemory.MockMetricsFactory import com.avast.datadog4s.extension.jvm.JvmMonitoring.Config import org.scalatest.flatspec.AnyFlatSpec import org.scalatest.matchers.must.Matchers import cats.syntax.flatMap._ import scala.concurrent.ExecutionContext import scala.concurrent.duration._ class JvmMonitoringTest extends AnyFlatSpec with Matchers { private val ec: ExecutionContext = scala.concurrent.ExecutionContext.Implicits.global implicit val contextShift: ContextShift[IO] = cats.effect.IO.contextShift(ec) implicit val timer: Timer[IO] = IO.timer(ec) val noopErrHandler: Throwable => IO[Unit] = (_: Throwable) => IO.unit "JvmMonitoring" should "create all expected metrics and update them periodically" in { val testEffect = MockMetricsFactory.make[IO].flatMap { inmemory => val runTest = JvmMonitoring .configured(inmemory, Config().copy(delay = Duration.ofMillis(10)), noopErrHandler) .use(_ => IO.never) .timeout(100.millis) .attempt runTest >> inmemory.state.get } val result = testEffect.unsafeRunSync() result.keySet must equal(expectedAspects) result.values.foreach { vector => vector.groupBy(_.tags).foreach { case (_, records) => records.size must be > 0 records.size must be < 15 } } } val minorGcParams = if (System.getProperty("java.version").startsWith("1.8.")) Set.empty else Set("jvm.gc.minor_collection_time", "jvm.gc.minor_collection_count") val expectedAspects: Set[String] = Set( "jvm.cpu.load", "jvm.cpu.time", "jvm.filedescriptor.open", "jvm.heap_memory", "jvm.heap_memory_committed", "jvm.heap_memory_init", "jvm.heap_memory_max", "jvm.heap_memory.eden", "jvm.heap_memory.eden_committed", "jvm.heap_memory.eden_max", "jvm.heap_memory.survivor", "jvm.heap_memory.survivor_committed", "jvm.heap_memory.survivor_max", "jvm.heap_memory.old_gen", "jvm.heap_memory.old_gen_committed", "jvm.heap_memory.old_gen_max", "jvm.non_heap_memory", "jvm.non_heap_memory_committed", "jvm.non_heap_memory_init", "jvm.non_heap_memory_max", "jvm.non_heap_memory.code_cache", "jvm.non_heap_memory.code_cache_committed", "jvm.non_heap_memory.code_cache_max", "jvm.non_heap_memory.metaspace", "jvm.non_heap_memory.metaspace_committed", "jvm.non_heap_memory.metaspace_max", "jvm.non_heap_memory.compressed_class_space", "jvm.non_heap_memory.compressed_class_space_committed", "jvm.non_heap_memory.compressed_class_space_max", "jvm.uptime", "jvm.thread_count", "jvm.thread_daemon", "jvm.thread_started", "jvm.loaded_classes", "jvm.bufferpool.instances", "jvm.bufferpool.bytes", "jvm.gc.major_collection_time", "jvm.gc.major_collection_count" ) ++ minorGcParams }
Example 109
Source File: Repeated.scala From datadog4s with MIT License | 5 votes |
package com.avast.cloud.datadog4s.helpers import java.time.Duration import cats.effect.{ Concurrent, Resource, Timer } import cats.syntax.applicativeError._ import cats.syntax.flatMap._ import cats.syntax.apply._ import cats.syntax.applicative._ import scala.concurrent.duration._ object Repeated { def run[F[_]: Concurrent: Timer]( delay: Duration, iterationTimeout: Duration, errorHandler: Throwable => F[Unit] )(task: F[Unit]): Resource[F, F[Unit]] = { val safeTask = Concurrent.timeout(task, toScala(iterationTimeout)).attempt.flatMap { case Right(a) => a.pure[F] case Left(e) => errorHandler(e) } val snooze = Timer[F].sleep(toScala(delay)) val process = (safeTask *> snooze).foreverM[Unit] Concurrent[F].background(process) } private def toScala(duration: Duration): FiniteDuration = duration.toMillis.millis }
Example 110
Source File: PromiseMapper.scala From laserdisc with MIT License | 5 votes |
package laserdisc package fs2 import cats.effect.concurrent.Deferred import cats.effect.syntax.concurrent._ import cats.effect.{Concurrent, Timer} import cats.syntax.flatMap._ import cats.syntax.monadError._ import shapeless.Poly1 import scala.concurrent.TimeoutException object PromiseMapper extends Poly1 { private[this] final def mapper[F[_]: Concurrent: Timer, A](protocol: Protocol.Aux[A]): Env[F] => F[Maybe[A]] = { case (queue, duration) => Deferred[F, Maybe[A]] >>= { promise => queue.enqueue1(Request(protocol, promise.complete)) >> { promise.get .timeout(duration) .adaptError { case _: TimeoutException => RequestTimedOut(protocol) } } } } implicit def mkOne[F[_]: Timer: Concurrent, A]: Case.Aux[Protocol.Aux[A], Env[F] => F[Maybe[A]]] = at[Protocol.Aux[A]](mapper(_)) }
Example 111
Source File: LaserdiscFs2Suite.scala From laserdisc with MIT License | 5 votes |
package laserdisc package fs2 import cats.effect.syntax.effect._ import cats.effect.{ConcurrentEffect, ContextShift, Timer} import laserdisc.auto._ import munit.FunSuite abstract class LaserdiscFs2Suite[F[_]: ContextShift: Timer: ConcurrentEffect](p: Port) extends FunSuite { private var cleanUp: F[Unit] = _ protected final var client: RedisClient[F] = _ override final def beforeAll(): Unit = { val (cl, cu) = RedisClient.to("127.0.0.1", p).allocated.toIO.unsafeRunSync() cleanUp = cu client = cl } override final def afterAll(): Unit = cleanUp.toIO.unsafeRunSync() protected def assertAllEqual[A](as: List[A], a: A): Unit = as.foreach(assertEquals(_, a)) }
Example 112
Source File: ReadmeExampleSpec.scala From laserdisc with MIT License | 5 votes |
import java.util.concurrent.ForkJoinPool import cats.effect.{ContextShift, IO, Timer} import munit.FunSuite import scala.concurrent.ExecutionContext import scala.concurrent.ExecutionContext.fromExecutor final class ReadmeExampleSpec extends FunSuite with TestLogCapture { private[this] val ec: ExecutionContext = fromExecutor(new ForkJoinPool()) private[this] implicit val timer: Timer[IO] = IO.timer(ec) private[this] implicit val contextShift: ContextShift[IO] = IO.contextShift(ec) test("The readme example gives the expected output and logs when a LogWriter is in scope") { import cats.syntax.flatMap._ import laserdisc._ import laserdisc.all._ import laserdisc.auto._ import laserdisc.fs2._ import log.effect.LogWriter import log.effect.fs2.SyncLogWriter def redisTest(implicit log: LogWriter[IO]): IO[Unit] = RedisClient.to("localhost", 6379).use { client => client.send( set("a", 23), set("b", 55), get[PosInt]("b"), get[PosInt]("a") ) >>= { case (Right(OK), Right(OK), Right(Some(getOfb)), Right(Some(getOfa))) if getOfb.value == 55 && getOfa.value == 23 => log info "yay!" case other => log.error(s"something went terribly wrong $other") >> IO.raiseError(new RuntimeException("boom")) } } val logged = capturedConsoleOutOf { redisTest(SyncLogWriter.consoleLog[IO]) } assert(logged contains "Starting connection") assert(logged contains "Connected to server localhost:6379") assert(logged contains "sending Arr(Bulk(SET),Bulk(a),Bulk(23))") assert(logged contains "receiving Str(OK)") assert(logged contains "sending Arr(Bulk(SET),Bulk(b),Bulk(55))") assert(logged contains "receiving Str(OK)") assert(logged contains "sending Arr(Bulk(GET),Bulk(b))") assert(logged contains "receiving Bulk(55)") assert(logged contains "sending Arr(Bulk(GET),Bulk(a))") assert(logged contains "receiving Bulk(23)") assert(logged contains "yay!") assert(logged contains "Shutting down connection") assert(logged contains "Shutdown complete") assert(logged contains "Connection terminated: No issues") } }
Example 113
Source File: DefaultLoggerSpec.scala From laserdisc with MIT License | 5 votes |
import java.util.concurrent.ForkJoinPool import cats.effect.{ContextShift, IO, Timer} import munit.FunSuite import scala.concurrent.ExecutionContext import scala.concurrent.ExecutionContext.fromExecutor final class DefaultLoggerSpec extends FunSuite with TestLogCapture { private def assertNot(c: =>Boolean): Unit = assert(!c) private[this] val ec: ExecutionContext = fromExecutor(new ForkJoinPool()) private[this] implicit val timer: Timer[IO] = IO.timer(ec) private[this] implicit val contextShift: ContextShift[IO] = IO.contextShift(ec) test("The readme example doesn't log when no LogWriter is given") { import cats.syntax.flatMap._ import laserdisc._ import laserdisc.all._ import laserdisc.auto._ import laserdisc.fs2._ import log.effect.fs2.SyncLogWriter.consoleLog val redisTest: IO[Unit] = RedisClient.to("localhost", 6379).use { client => client.send( set("a", 23), set("b", 55), get[PosInt]("b"), get[PosInt]("a") ) >>= { case (Right(OK), Right(OK), Right(Some(getOfb)), Right(Some(getOfa))) if getOfb.value == 55 && getOfa.value == 23 => consoleLog[IO].info("yay!") case other => consoleLog[IO].error(s"something went terribly wrong $other") >> IO.raiseError(new RuntimeException("boom")) } } val logged = capturedConsoleOutOf(redisTest) assertNot(logged contains "Starting connection") assertNot(logged contains "Server available for publishing: localhost:6379") assertNot(logged contains "sending Arr(Bulk(SET),Bulk(a),Bulk(23))") assertNot(logged contains "receiving Str(OK)") assertNot(logged contains "sending Arr(Bulk(SET),Bulk(b),Bulk(55))") assertNot(logged contains "receiving Str(OK)") assertNot(logged contains "sending Arr(Bulk(GET),Bulk(b))") assertNot(logged contains "receiving Bulk(55)") assertNot(logged contains "sending Arr(Bulk(GET),Bulk(a))") assertNot(logged contains "receiving Bulk(23)") assertNot(logged contains "Shutting down connection") assertNot(logged contains "Shutdown complete") assertNot(logged contains "Connection terminated: No issues") } }
Example 114
Source File: CatsIoTestRunner.scala From laserdisc with MIT License | 5 votes |
package laserdisc package fs2 import java.util.concurrent.{Executors, TimeUnit} import cats.effect.{ContextShift, IO, Timer} import cats.syntax.flatMap._ import laserdisc.auto._ import log.effect.fs2.SyncLogWriter.consoleLogUpToLevel import log.effect.{LogLevels, LogWriter} import scala.concurrent.ExecutionContext import scala.concurrent.ExecutionContext.fromExecutor object CatsIoTestRunner extends TestCases { private[this] val ec: ExecutionContext = fromExecutor(Executors.newFixedThreadPool(8)) private[this] implicit val timer: Timer[IO] = IO.timer(ec) private[this] implicit val contextShift: ContextShift[IO] = IO.contextShift(ec) private[this] implicit val logWriter: LogWriter[IO] = consoleLogUpToLevel(LogLevels.Error) def main(args: Array[String]): Unit = { val task = timer.clock.monotonic(TimeUnit.MINUTES) >>= { start: Long => RedisClient.to("localhost", 6379).use { cl => def loop(count: Long): IO[Long] = case1(cl) >> timer.clock.monotonic(TimeUnit.MINUTES) >>= { current => if (current - start >= 2) IO.pure(count) else loop(count + 1) } loop(0) } } println(s"Avg send/s: ${task.unsafeRunSync() * 24.0 / 2 / 60}") sys.exit() } }
Example 115
Source File: checkout.scala From pfps-shopping-cart with Apache License 2.0 | 5 votes |
package shop.programs import cats.effect.Timer import cats.implicits._ import io.chrisdavenport.log4cats.Logger import retry._ import retry.RetryDetails._ import scala.concurrent.duration._ import shop.algebras._ import shop.domain.auth.UserId import shop.domain.cart._ import shop.domain.checkout._ import shop.domain.order._ import shop.domain.payment._ import shop.effects._ import shop.http.clients.PaymentClient import squants.market.Money final class CheckoutProgram[F[_]: Background: Logger: MonadThrow: Timer]( paymentClient: PaymentClient[F], shoppingCart: ShoppingCart[F], orders: Orders[F], retryPolicy: RetryPolicy[F] ) { private def logError(action: String)(e: Throwable, details: RetryDetails): F[Unit] = details match { case r: WillDelayAndRetry => Logger[F].error( s"Failed to process $action with ${e.getMessage}. So far we have retried ${r.retriesSoFar} times." ) case g: GivingUp => Logger[F].error(s"Giving up on $action after ${g.totalRetries} retries.") } private def processPayment(payment: Payment): F[PaymentId] = { val action = retryingOnAllErrors[PaymentId]( policy = retryPolicy, onError = logError("Payments") )(paymentClient.process(payment)) action.adaptError { case e => PaymentError(Option(e.getMessage).getOrElse("Unknown")) } } private def createOrder(userId: UserId, paymentId: PaymentId, items: List[CartItem], total: Money): F[OrderId] = { val action = retryingOnAllErrors[OrderId]( policy = retryPolicy, onError = logError("Order") )(orders.create(userId, paymentId, items, total)) def bgAction(fa: F[OrderId]): F[OrderId] = fa.adaptError { case e => OrderError(e.getMessage) } .onError { case _ => Logger[F].error(s"Failed to create order for Payment: ${paymentId}. Rescheduling as a background action") *> Background[F].schedule(bgAction(fa), 1.hour) } bgAction(action) } def checkout(userId: UserId, card: Card): F[OrderId] = shoppingCart .get(userId) .ensure(EmptyCartError)(_.items.nonEmpty) .flatMap { case CartTotal(items, total) => for { pid <- processPayment(Payment(userId, total, card)) order <- createOrder(userId, pid, items, total) _ <- shoppingCart.delete(userId).attempt.void } yield order } }
Example 116
Source File: ProductIntegration.scala From http4s-poc-api with MIT License | 5 votes |
package integration import cats.effect.syntax.concurrent._ import cats.effect.{Concurrent, ContextShift, IO, Timer} import cats.syntax.flatMap._ import errors.PriceServiceError.{ProductErr, ProductPriceErr} import external._ import external.library.IoAdapt.--> import external.library.syntax.errorAdapt._ import external.library.syntax.ioAdapt._ import model.DomainModel._ import scala.concurrent.Future import scala.concurrent.duration.FiniteDuration sealed trait ProductIntegration[F[_]] { def product: ProductId => F[Option[Product]] def productPrice: Product => UserPreferences => F[Price] } object ProductIntegration { @inline def apply[F[_]: Concurrent: Timer: IO --> *[_]: Future --> *[_]]( productDep: TeamTwoHttpApi, pricesDep: TeamOneHttpApi, t: FiniteDuration )( implicit CS: ContextShift[F] ): ProductIntegration[F] = new ProductIntegration[F] { def product: ProductId => F[Option[Product]] = { ps => CS.shift >> productDep.product(ps).adaptedTo[F].timeout(t).narrowFailureTo[ProductErr] } def productPrice: Product => UserPreferences => F[Price] = { p => pref => CS.shift >> pricesDep.productPrice(p)(pref).adaptedTo[F].timeout(t).narrowFailureTo[ProductPriceErr] } } }
Example 117
Source File: CacheIntegration.scala From http4s-poc-api with MIT License | 5 votes |
package integration import cats.effect.syntax.concurrent._ import cats.effect.{Concurrent, ContextShift, IO, Timer} import cats.syntax.flatMap._ import errors.PriceServiceError.{CacheLookupError, CacheStoreError} import external.TeamThreeCacheApi import external.library.IoAdapt.--> import external.library.syntax.errorAdapt._ import external.library.syntax.ioAdapt._ import model.DomainModel._ import scala.concurrent.duration.FiniteDuration sealed trait CacheIntegration[F[_]] { def cachedProduct: ProductId => F[Option[Product]] def storeProductToCache: ProductId => Product => F[Unit] } object CacheIntegration { @inline def apply[F[_]: Concurrent: Timer: IO --> *[_]]( cache: TeamThreeCacheApi[ProductId, Product], t: FiniteDuration )( implicit CS: ContextShift[F] ): CacheIntegration[F] = new CacheIntegration[F] { def cachedProduct: ProductId => F[Option[Product]] = pId => CS.shift >> cache.get(pId).adaptedTo[F].timeout(t).narrowFailureTo[CacheLookupError] def storeProductToCache: ProductId => Product => F[Unit] = pId => p => CS.shift >> cache.put(pId)(p).adaptedTo[F].timeout(t).narrowFailureTo[CacheStoreError] } }
Example 118
Source File: UserIntegration.scala From http4s-poc-api with MIT License | 5 votes |
package integration import cats.effect.syntax.concurrent._ import cats.effect.{Concurrent, ContextShift, IO, Timer} import cats.syntax.flatMap._ import errors.PriceServiceError.{PreferenceErr, UserErr} import external._ import external.library.IoAdapt.--> import external.library.syntax.errorAdapt._ import external.library.syntax.ioAdapt._ import model.DomainModel._ import scala.concurrent.Future import scala.concurrent.duration.FiniteDuration sealed trait UserIntegration[F[_]] { def user: UserId => F[User] def usersPreferences: UserId => F[UserPreferences] } object UserIntegration { @inline def apply[F[_]: Concurrent: Timer: IO --> *[_]: Future --> *[_]]( userDep: TeamTwoHttpApi, preferencesDep: TeamOneHttpApi, t: FiniteDuration )( implicit CS: ContextShift[F] ): UserIntegration[F] = new UserIntegration[F] { def user: UserId => F[User] = { id => CS.shift >> userDep.user(id).adaptedTo[F].timeout(t).narrowFailureTo[UserErr] } def usersPreferences: UserId => F[UserPreferences] = { id => CS.shift >> preferencesDep.usersPreferences(id).adaptedTo[F].timeout(t).narrowFailureTo[PreferenceErr] } } }
Example 119
Source File: PriceService.scala From http4s-poc-api with MIT License | 5 votes |
package service import cats.Parallel import cats.effect.{Concurrent, ContextShift, IO, Timer} import cats.syntax.apply._ import cats.syntax.flatMap._ import cats.syntax.parallel._ import external.library.IoAdapt.--> import external.{TeamOneHttpApi, TeamThreeCacheApi, TeamTwoHttpApi} import integration.{CacheIntegration, ProductIntegration, UserIntegration} import log.effect.LogWriter import model.DomainModel._ import scala.concurrent.Future import scala.concurrent.duration._ final case class PriceService[F[_]: Concurrent: Timer: ContextShift: Parallel[*[_]]]( cacheDep: TeamThreeCacheApi[ProductId, Product], teamOneStupidName: TeamOneHttpApi, teamTwoStupidName: TeamTwoHttpApi, logger: LogWriter[F] )( implicit ev1: IO --> F, ev2: Future --> F ) { private[this] val cache = CacheIntegration[F](cacheDep, 10.seconds) private[this] val userInt = UserIntegration[F](teamTwoStupidName, teamOneStupidName, 10.seconds) private[this] val productInt = ProductIntegration[F](teamTwoStupidName, teamOneStupidName, 10.seconds) private[this] lazy val productRepo: ProductRepo[F] = ProductRepo(cache, productInt, logger) private[this] lazy val priceCalculator: PriceCalculator[F] = PriceCalculator(productInt, logger) private[this] lazy val preferenceFetcher: PreferenceFetcher[F] = PreferenceFetcher(userInt, logger) def prices(userId: UserId, productIds: Seq[ProductId]): F[List[Price]] = (userFor(userId), productsFor(productIds), preferencesFor(userId)) .parMapN(priceCalculator.finalPrices) .flatten private[this] def userFor(userId: UserId): F[User] = logger.debug(s"Collecting user details for id $userId") >> userInt.user(userId) <* logger.debug(s"User details collected for id $userId") private[this] def preferencesFor(userId: UserId): F[UserPreferences] = logger.debug(s"Looking up user preferences for user $userId") >> preferenceFetcher.userPreferences(userId) <* logger.debug(s"User preferences look up for $userId completed") private[this] def productsFor(productIds: Seq[ProductId]): F[List[Product]] = logger.debug(s"Collecting product details for products $productIds") >> productRepo.storedProducts(productIds) <* logger.debug(s"Product details collection for $productIds completed") }
Example 120
Source File: TestTeamThreeCacheApi.scala From http4s-poc-api with MIT License | 5 votes |
package interpreters import cats.effect.{ConcurrentEffect, IO, Timer} import cats.syntax.flatMap._ import external.TeamThreeCacheApi import log.effect.LogWriter import model.DomainModel.{Product, ProductId} import zio.clock.Clock import zio.interop.catz._ import zio.{Runtime, Task} import scala.concurrent.duration._ object TestTeamThreeCacheApi { @inline def make(productsInCache: Map[ProductId, Product])(testLogger: LogWriter[Task])( implicit t: Timer[IO], rt: Runtime[Clock] ): TeamThreeCacheApi[ProductId, Product] = new TeamThreeCacheApi[ProductId, Product] { def get: ProductId => IO[Option[Product]] = { id => ConcurrentEffect[Task].toIO( testLogger.debug(s"DEP cachedProduct -> Getting the product $id from the cache in test") ) >> t.sleep(200.milliseconds) >> IO(productsInCache.get(id)) } def put: ProductId => Product => IO[Unit] = { id => _ => ConcurrentEffect[Task].toIO( testLogger.debug(s"DEP storeProductToCache -> Storing the product $id to the repo in test") ) >> t.sleep(200.milliseconds) >> IO.unit } } @inline def makeFail(implicit t: Timer[IO]): TeamThreeCacheApi[ProductId, Product] = new TeamThreeCacheApi[ProductId, Product] { def get: ProductId => IO[Option[Product]] = { _ => t.sleep(300.milliseconds) >> IO.delay( throw new Throwable( "DependencyFailure. The dependency def cachedProduct: ProductId => Task[Option[Product]] failed with message not responding" ) ) } def put: ProductId => Product => IO[Unit] = { _ => _ => t.sleep(150.milliseconds) >> IO.delay( throw new Throwable( "DependencyFailure. The dependency def storeProductToCache: ProductId => Product => Task[Unit] failed with message not responding" ) ) } } }
Example 121
Source File: TestTeamTwoHttpApi.scala From http4s-poc-api with MIT License | 5 votes |
package interpreters import cats.effect.{ConcurrentEffect, IO, Timer} import cats.syntax.flatMap._ import external.TeamTwoHttpApi import log.effect.LogWriter import model.DomainModel.{Product, ProductId, User, UserId} import zio.clock.Clock import zio.interop.catz._ import zio.{Runtime, Task} import scala.concurrent.duration._ object TestTeamTwoHttpApi { @inline def make(aUser: User, productsInStore: Map[ProductId, Product])(testLogger: LogWriter[Task])( implicit t: Timer[IO], rt: Runtime[Clock] ): TeamTwoHttpApi = new TeamTwoHttpApi { def user: UserId => IO[User] = { id => ConcurrentEffect[Task].toIO( testLogger.debug(s"DEP user -> Getting the user $id in test") ) >> t.sleep(1.second) >> IO.delay(aUser) } def product: ProductId => IO[Option[Product]] = { id => ConcurrentEffect[Task].toIO( testLogger.debug(s"DEP product -> Getting the product $id from the store in test") ) >> t.sleep(1.second) >> IO(productsInStore.get(id)) } } @inline def makeFail(implicit t: Timer[IO]): TeamTwoHttpApi = new TeamTwoHttpApi { def user: UserId => IO[User] = { _ => t.sleep(200.milliseconds) >> IO.delay( throw new Throwable( "DependencyFailure. The dependency `UserId => IO[User]` failed with message network failure" ) ) } def product: ProductId => IO[Option[Product]] = { _ => t.sleep(400.milliseconds) >> IO.delay( throw new Throwable( "DependencyFailure. The dependency `ProductId => IO[Option[Product]]` failed with message network failure" ) ) } } }
Example 122
Source File: FinatraServerTests.scala From tapir with Apache License 2.0 | 5 votes |
package sttp.tapir.server.finatra import cats.data.NonEmptyList import cats.effect.{ContextShift, IO, Resource, Timer} import com.github.ghik.silencer.silent import com.twitter.finagle.http.Request import com.twitter.finatra.http.filters.{AccessLoggingFilter, ExceptionMappingFilter} import com.twitter.finatra.http.{Controller, EmbeddedHttpServer, HttpServer} import com.twitter.finatra.http.routing.HttpRouter import com.twitter.util.{Future, FuturePool} import sttp.tapir.Endpoint import sttp.tapir.server.{DecodeFailureHandler, ServerDefaults, ServerEndpoint} import sttp.tapir.server.tests.ServerTests import sttp.tapir.tests.{Port, PortCounter} import scala.concurrent.ExecutionContext import scala.reflect.ClassTag import scala.concurrent.duration._ class FinatraServerTests extends ServerTests[Future, Nothing, FinatraRoute] { override def streamingSupport: Boolean = false private val futurePool = FuturePool.unboundedPool implicit val ec: ExecutionContext = scala.concurrent.ExecutionContext.Implicits.global implicit val contextShift: ContextShift[IO] = IO.contextShift(ec) implicit val timer: Timer[IO] = IO.timer(ec) override def pureResult[T](t: T): Future[T] = Future.value(t) override def suspendResult[T](t: => T): Future[T] = futurePool { t } override def route[I, E, O]( e: ServerEndpoint[I, E, O, Nothing, Future], decodeFailureHandler: Option[DecodeFailureHandler] = None ): FinatraRoute = { implicit val serverOptions: FinatraServerOptions = FinatraServerOptions.default.copy(decodeFailureHandler = decodeFailureHandler.getOrElse(ServerDefaults.decodeFailureHandler)) e.toRoute } override def routeRecoverErrors[I, E <: Throwable, O](e: Endpoint[I, E, O, Nothing], fn: I => Future[O])(implicit eClassTag: ClassTag[E] ): FinatraRoute = { e.toRouteRecoverErrors(fn) } override def server(routes: NonEmptyList[FinatraRoute], port: Port): Resource[IO, Unit] = FinatraServerTests.server(routes, port) override lazy val portCounter: PortCounter = new PortCounter(58000) } object FinatraServerTests { def server(routes: NonEmptyList[FinatraRoute], port: Port)(implicit ioTimer: Timer[IO]): Resource[IO, Unit] = { def waitUntilHealthy(s: EmbeddedHttpServer, count: Int): IO[EmbeddedHttpServer] = if (s.isHealthy) IO.pure(s) else if (count > 1000) IO.raiseError(new IllegalStateException("Server unhealthy")) else IO.sleep(10.milliseconds).flatMap(_ => waitUntilHealthy(s, count + 1)) val bind = IO { class TestController extends Controller with TapirController { routes.toList.foreach(addTapirRoute) } class TestServer extends HttpServer { @silent("discarded") override protected def configureHttp(router: HttpRouter): Unit = { router .filter[AccessLoggingFilter[Request]] .filter[ExceptionMappingFilter[Request]] .add(new TestController) } } val server = new EmbeddedHttpServer( new TestServer, Map( "http.port" -> s":$port" ), // in the default implementation waitForWarmup suspends the thread for 1 second between healthy checks // we improve on that by checking every 10ms waitForWarmup = false ) server.start() server }.flatMap(waitUntilHealthy(_, 0)) Resource .make(bind)(httpServer => IO(httpServer.close())) .map(_ => ()) } }
Example 123
Source File: FinatraServerCatsTests.scala From tapir with Apache License 2.0 | 5 votes |
package sttp.tapir.server.finatra.cats import cats.data.NonEmptyList import cats.effect.{ContextShift, IO, Resource, Timer} import sttp.tapir.Endpoint import sttp.tapir.server.finatra.{FinatraRoute, FinatraServerOptions, FinatraServerTests} import sttp.tapir.server.tests.ServerTests import sttp.tapir.server.{DecodeFailureHandler, ServerDefaults, ServerEndpoint} import sttp.tapir.tests.{Port, PortCounter} import scala.concurrent.ExecutionContext import scala.reflect.ClassTag class FinatraServerCatsTests extends ServerTests[IO, Nothing, FinatraRoute] { override def streamingSupport: Boolean = false implicit val ec: ExecutionContext = scala.concurrent.ExecutionContext.Implicits.global implicit val contextShift: ContextShift[IO] = IO.contextShift(ec) implicit val timer: Timer[IO] = IO.timer(ec) override def pureResult[T](t: T): IO[T] = IO.pure(t) override def suspendResult[T](t: => T): IO[T] = IO.apply(t) override def route[I, E, O]( e: ServerEndpoint[I, E, O, Nothing, IO], decodeFailureHandler: Option[DecodeFailureHandler] = None ): FinatraRoute = { implicit val serverOptions: FinatraServerOptions = FinatraServerOptions.default.copy(decodeFailureHandler = decodeFailureHandler.getOrElse(ServerDefaults.decodeFailureHandler)) e.toRoute } override def routeRecoverErrors[I, E <: Throwable, O](e: Endpoint[I, E, O, Nothing], fn: I => IO[O])(implicit eClassTag: ClassTag[E] ): FinatraRoute = e.toRouteRecoverErrors(fn) override def server(routes: NonEmptyList[FinatraRoute], port: Port): Resource[IO, Unit] = FinatraServerTests.server(routes, port) override lazy val portCounter: PortCounter = new PortCounter(59000) }
Example 124
Source File: InvoicesApi.scala From event-sourcing-kafka-streams with MIT License | 5 votes |
package org.amitayh.invoices.web import java.util.UUID import cats.effect.{Concurrent, Timer} import cats.implicits._ import fs2.Stream import fs2.concurrent.Topic import io.circe._ import io.circe.generic.auto._ import io.circe.syntax._ import org.amitayh.invoices.common.domain.CommandResult.{Failure, Success} import org.amitayh.invoices.common.domain.{Command, CommandResult} import org.amitayh.invoices.dao.InvoiceList import org.amitayh.invoices.web.CommandDto._ import org.amitayh.invoices.web.PushEvents.CommandResultRecord import org.http4s.circe._ import org.http4s.dsl.Http4sDsl import org.http4s.{EntityDecoder, HttpRoutes, Response} import scala.concurrent.duration._ class InvoicesApi[F[_]: Concurrent: Timer] extends Http4sDsl[F] { private val maxQueued = 16 implicit val commandEntityDecoder: EntityDecoder[F, Command] = jsonOf[F, Command] def service(invoiceList: InvoiceList[F], producer: Kafka.Producer[F, UUID, Command], commandResultsTopic: Topic[F, CommandResultRecord]): HttpRoutes[F] = HttpRoutes.of[F] { case GET -> Root / "invoices" => invoiceList.get.flatMap(invoices => Ok(invoices.asJson)) case request @ POST -> Root / "execute" / "async" / UuidVar(invoiceId) => request .as[Command] .flatMap(producer.send(invoiceId, _)) .flatMap(metaData => Accepted(Json.fromLong(metaData.timestamp))) case request @ POST -> Root / "execute" / UuidVar(invoiceId) => request.as[Command].flatMap { command => val response = resultStream(commandResultsTopic, command.commandId) merge timeoutStream producer.send(invoiceId, command) *> response.head.compile.toList.map(_.head) } } private def resultStream(commandResultsTopic: Topic[F, CommandResultRecord], commandId: UUID): Stream[F, Response[F]] = commandResultsTopic.subscribe(maxQueued).collectFirst { case Some((_, CommandResult(_, `commandId`, outcome))) => outcome }.flatMap { case Success(_, _, snapshot) => Stream.eval(Ok(snapshot.asJson)) case Failure(cause) => Stream.eval(UnprocessableEntity(cause.message)) } private def timeoutStream: Stream[F, Response[F]] = Stream.eval(Timer[F].sleep(5.seconds) *> RequestTimeout("timeout")) } object InvoicesApi { def apply[F[_]: Concurrent: Timer]: InvoicesApi[F] = new InvoicesApi[F] }
Example 125
Source File: Http4sBlazeServerModule.scala From scala-server-toolkit with MIT License | 5 votes |
package com.avast.sst.http4s.server import java.net.{InetSocketAddress, StandardSocketOptions} import cats.effect.{ConcurrentEffect, Resource, Timer} import org.http4s.HttpApp import org.http4s.server.Server import org.http4s.server.blaze.BlazeServerBuilder import scala.concurrent.ExecutionContext import scala.concurrent.duration.Duration object Http4sBlazeServerModule { def make[F[_]: ConcurrentEffect: Timer]( config: Http4sBlazeServerConfig, httpApp: HttpApp[F], executionContext: ExecutionContext ): Resource[F, Server[F]] = { for { inetSocketAddress <- Resource.liftF( ConcurrentEffect[F].delay( InetSocketAddress.createUnresolved(config.listenAddress, config.listenPort) ) ) server <- BlazeServerBuilder[F](executionContext) .bindSocketAddress(inetSocketAddress) .withHttpApp(httpApp) .withoutBanner .withNio2(config.nio2Enabled) .withWebSockets(config.webSocketsEnabled) .enableHttp2(config.http2Enabled) .withResponseHeaderTimeout(Duration.fromNanos(config.responseHeaderTimeout.toNanos)) .withIdleTimeout(Duration.fromNanos(config.idleTimeout.toNanos)) .withBufferSize(config.bufferSize) .withMaxRequestLineLength(config.maxRequestLineLength) .withMaxHeadersLength(config.maxHeadersLength) .withChunkBufferMaxSize(config.chunkBufferMaxSize) .withConnectorPoolSize(config.connectorPoolSize) .withChannelOption[java.lang.Boolean](StandardSocketOptions.TCP_NODELAY, config.socketOptions.tcpNoDelay) .resource } yield server } }
Example 126
Source File: Http4sBlazeServerModuleTest.scala From scala-server-toolkit with MIT License | 5 votes |
package com.avast.sst.http4s.server import cats.effect.{ContextShift, IO, Timer} import com.avast.sst.http4s.client.{Http4sBlazeClientConfig, Http4sBlazeClientModule} import org.http4s.HttpRoutes import org.http4s.dsl.Http4sDsl import org.scalatest.funsuite.AsyncFunSuite import scala.concurrent.ExecutionContext class Http4sBlazeServerModuleTest extends AsyncFunSuite with Http4sDsl[IO] { implicit private val cs: ContextShift[IO] = IO.contextShift(ExecutionContext.global) implicit private val timer: Timer[IO] = IO.timer(ExecutionContext.global) test("Simple HTTP server") { val routes = Http4sRouting.make(HttpRoutes.of[IO] { case GET -> Root / "test" => Ok("test") }) val test = for { server <- Http4sBlazeServerModule.make[IO](Http4sBlazeServerConfig("127.0.0.1", 0), routes, ExecutionContext.global) client <- Http4sBlazeClientModule.make[IO](Http4sBlazeClientConfig(), ExecutionContext.global) } yield (server, client) test .use { case (server, client) => client .expect[String](s"http://${server.address.getHostString}:${server.address.getPort}/test") .map(response => assert(response === "test")) } .unsafeToFuture() } }
Example 127
Source File: Algebras.scala From hydra with Apache License 2.0 | 4 votes |
package hydra.ingest.modules import cats.effect.{Async, ConcurrentEffect, ContextShift, Timer} import cats.implicits._ import hydra.avro.registry.SchemaRegistry import hydra.ingest.app.AppConfig.AppConfig import hydra.kafka.algebras.{KafkaAdminAlgebra, KafkaClientAlgebra, MetadataAlgebra} import io.chrisdavenport.log4cats.Logger final class Algebras[F[_]] private ( val schemaRegistry: SchemaRegistry[F], val kafkaAdmin: KafkaAdminAlgebra[F], val kafkaClient: KafkaClientAlgebra[F], val metadata: MetadataAlgebra[F] ) object Algebras { def make[F[_]: Async: ConcurrentEffect: ContextShift: Timer: Logger](config: AppConfig): F[Algebras[F]] = for { schemaRegistry <- SchemaRegistry.live[F]( config.createTopicConfig.schemaRegistryConfig.fullUrl, config.createTopicConfig.schemaRegistryConfig.maxCacheSize ) kafkaAdmin <- KafkaAdminAlgebra.live[F](config.createTopicConfig.bootstrapServers) kafkaClient <- KafkaClientAlgebra.live[F](config.createTopicConfig.bootstrapServers, schemaRegistry, config.ingestConfig.recordSizeLimitBytes) metadata <- MetadataAlgebra.make[F](config.v2MetadataTopicConfig.topicName.value, config.v2MetadataTopicConfig.consumerGroup, kafkaClient, schemaRegistry, config.v2MetadataTopicConfig.createOnStartup) } yield new Algebras[F](schemaRegistry, kafkaAdmin, kafkaClient, metadata) }
Example 128
Source File: RollingFileLogger.scala From odin with Apache License 2.0 | 4 votes |
package io.odin.loggers import java.nio.file.{Files, Path, Paths} import java.time.{Instant, LocalDateTime} import java.time.format.DateTimeFormatter import java.util.TimeZone import java.util.concurrent.TimeUnit import cats.Monad import cats.effect.concurrent.Ref import cats.effect.{Concurrent, ContextShift, Fiber, Resource, Timer} import cats.syntax.all._ import io.odin.formatter.Formatter import io.odin.{Level, Logger, LoggerMessage} import scala.concurrent.duration.{FiniteDuration, _} object RollingFileLogger { def apply[F[_]]( fileNamePattern: LocalDateTime => String, maxFileSizeInBytes: Option[Long], rolloverInterval: Option[FiniteDuration], formatter: Formatter, minLevel: Level )(implicit F: Concurrent[F], timer: Timer[F], cs: ContextShift[F]): Resource[F, Logger[F]] = { new RollingFileLoggerFactory( fileNamePattern, maxFileSizeInBytes, rolloverInterval, formatter, minLevel, FileLogger.apply[F] ).mk } private[odin] class RefLogger[F[_]: Timer: Monad]( current: Ref[F, Logger[F]], override val minLevel: Level ) extends DefaultLogger[F](minLevel) { def log(msg: LoggerMessage): F[Unit] = current.get.flatMap(_.log(msg)) override def log(msgs: List[LoggerMessage]): F[Unit] = current.get.flatMap(_.log(msgs)) } private[odin] class RollingFileLoggerFactory[F[_]]( fileNamePattern: LocalDateTime => String, maxFileSizeInBytes: Option[Long], rolloverInterval: Option[FiniteDuration], formatter: Formatter, minLevel: Level, underlyingLogger: (String, Formatter, Level) => Resource[F, Logger[F]], fileSizeCheck: Path => Long = Files.size )(implicit F: Concurrent[F], timer: Timer[F], cs: ContextShift[F]) { val df: DateTimeFormatter = DateTimeFormatter.ofPattern("yyyy-MM-dd-HH-mm-ss") def mk: Resource[F, Logger[F]] = { val logger = for { ((logger, watcherFiber), release) <- allocate.allocated refLogger <- Ref.of(logger) refRelease <- Ref.of(release) _ <- F.start(rollingLoop(watcherFiber, refLogger, refRelease)) } yield { (new RefLogger(refLogger, minLevel), refRelease) } Resource.make(logger)(_._2.get.flatten).map { case (logger, _) => logger } } def now: F[Long] = timer.clock.realTime(TimeUnit.MILLISECONDS) def rollingLoop(watcher: Fiber[F, Unit], logger: Ref[F, Logger[F]], release: Ref[F, F[Unit]]): F[Unit] = for { _ <- watcher.join oldRelease <- release.get ((newLogger, newWatcher), newRelease) <- allocate.allocated _ <- logger.set(newLogger) _ <- release.set(newRelease) _ <- oldRelease _ <- rollingLoop(newWatcher, logger, release) } yield () } }