cats.effect.Resource Scala Examples
The following examples show how to use cats.effect.Resource.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: NatchezHttp4sModule.scala From skunk with MIT License | 8 votes |
// Copyright (c) 2018-2020 by Rob Norris // This software is licensed under the MIT License (MIT). // For more information see LICENSE or https://opensource.org/licenses/MIT package natchez.http4s import cats.~> import cats.data.{ Kleisli, OptionT } import cats.effect.Bracket import cats.implicits._ import natchez.{ EntryPoint, Kernel, Span } import org.http4s.HttpRoutes import natchez.Trace import natchez.Tags import scala.util.control.NonFatal import org.http4s.Response import cats.effect.Resource import cats.Defer import natchez.TraceValue import cats.Monad object implicits { // Given an entry point and HTTP Routes in Kleisli[F, Span[F], ?] return routes in F. A new span // is created with the URI path as the name, either as a continuation of the incoming trace, if // any, or as a new root. This can likely be simplified, I just did what the types were saying // and it works so :shrug: private def liftT[F[_]: Bracket[?[_], Throwable]]( entryPoint: EntryPoint[F])( routes: HttpRoutes[Kleisli[F, Span[F], ?]] ): HttpRoutes[F] = Kleisli { req => type G[A] = Kleisli[F, Span[F], A] val lift = λ[F ~> G](fa => Kleisli(_ => fa)) val kernel = Kernel(req.headers.toList.map(h => (h.name.value -> h.value)).toMap) val spanR = entryPoint.continueOrElseRoot(req.uri.path, kernel) OptionT { spanR.use { span => val lower = λ[G ~> F](_(span)) routes.run(req.mapK(lift)).mapK(lower).map(_.mapK(lower)).value } } } implicit class EntryPointOps[F[_]](self: EntryPoint[F]) { private def dummySpan( implicit ev: Monad[F] ): Span[F] = new Span[F] { val kernel: F[Kernel] = Kernel(Map.empty).pure[F] def put(fields: (String, TraceValue)*): F[Unit] = Monad[F].unit def span(name: String): Resource[F, Span[F]] = Monad[Resource[F, ?]].pure(this) } def liftT(routes: HttpRoutes[Kleisli[F, Span[F], ?]])( implicit ev: Bracket[F, Throwable] ): HttpRoutes[F] = implicits.liftT(self)(routes) def natchezMiddleware[F[_]: Bracket[?[_], Throwable]: Trace](routes: HttpRoutes[F]): HttpRoutes[F] = Kleisli { req => val addRequestFields: F[Unit] = Trace[F].put( Tags.http.method(req.method.name), Tags.http.url(req.uri.renderString) ) def addResponseFields(res: Response[F]): F[Unit] = Trace[F].put( Tags.http.status_code(res.status.code.toString) ) def addErrorFields(e: Throwable): F[Unit] = Trace[F].put( Tags.error(true), "error.message" -> e.getMessage, "error.stacktrace" -> e.getStackTrace.mkString("\n"), ) OptionT { routes(req).onError { case NonFatal(e) => OptionT.liftF(addRequestFields *> addErrorFields(e)) } .value.flatMap { case Some(handler) => addRequestFields *> addResponseFields(handler).as(handler.some) case None => Option.empty[Response[F]].pure[F] } } } }
Example 2
Source File: Http4sRpcServer.scala From iotchain with MIT License | 5 votes |
package jbok.network.rpc.http import cats.effect.{ConcurrentEffect, Resource, Sync, Timer} import cats.implicits._ import io.circe.Json import io.circe.syntax._ import jbok.network.rpc.{RpcRequest, RpcService} import org.http4s.HttpRoutes import org.http4s.circe.CirceEntityCodec._ import org.http4s.dsl.Http4sDsl import org.http4s.implicits._ import org.http4s.server.Server import org.http4s.server.blaze.BlazeServerBuilder object Http4sRpcServer { def routes[F[_]](service: RpcService[F, Json])(implicit F: Sync[F]): HttpRoutes[F] = { val dsl = Http4sDsl[F] import dsl._ HttpRoutes.of[F] { case req @ POST -> path => for { json <- req.as[Json] result <- service.handle(RpcRequest(path.toList, json)) resp <- Ok(result.asJson) } yield resp } } def server[F[_]](service: RpcService[F, Json])(implicit F: ConcurrentEffect[F], T: Timer[F]): Resource[F, Server[F]] = BlazeServerBuilder[F] .bindLocal(0) .withHttpApp(routes[F](service).orNotFound) .withWebSockets(true) .resource }
Example 3
Source File: HttpService.scala From iotchain with MIT License | 5 votes |
package jbok.app.service import cats.effect.{ConcurrentEffect, Resource, Timer} import io.circe.Json import cats.implicits._ import fs2._ import javax.net.ssl.SSLContext import jbok.network.http.server.middleware.{CORSMiddleware, GzipMiddleware, LoggerMiddleware, MetricsMiddleware} import jbok.core.config.ServiceConfig import jbok.core.api._ import jbok.crypto.ssl.SSLConfig import jbok.network.rpc.RpcService import jbok.network.rpc.http.Http4sRpcServer import org.http4s.HttpRoutes import org.http4s.implicits._ import org.http4s.server.{SSLClientAuthMode, Server} import org.http4s.server.blaze.BlazeServerBuilder final class HttpService[F[_]]( config: ServiceConfig, sslConfig: SSLConfig, account: AccountAPI[F], admin: AdminAPI[F], block: BlockAPI[F], contract: ContractAPI[F], miner: MinerAPI[F], personal: PersonalAPI[F], transaction: TransactionAPI[F], sslOpt: Option[SSLContext] )(implicit F: ConcurrentEffect[F], T: Timer[F]) { import jbok.codec.impl.circe._ import _root_.io.circe.generic.auto._ import jbok.codec.json.implicits._ val rpcService: RpcService[F, Json] = { var service = RpcService[F, Json] if (config.apis.contains("account")) service = service.mount(account) else () if (config.apis.contains("admin")) service = service.mount(admin) else () if (config.apis.contains("block")) service = service.mount(block) else () if (config.apis.contains("contract")) service = service.mount(contract) else () if (config.apis.contains("miner")) service = service.mount(miner) else () if (config.apis.contains("personal")) service = service.mount(personal) else () if (config.apis.contains("transaction")) service = service.mount(transaction) else () service } val routes: HttpRoutes[F] = Http4sRpcServer.routes(rpcService) private val builder: F[BlazeServerBuilder[F]] = { val httpApp = for { exportRoute <- MetricsMiddleware.exportService[F] withMetrics <- MetricsMiddleware[F](routes, config.enableMetrics) withLogger = LoggerMiddleware[F](config.logHeaders, config.logBody)((withMetrics <+> exportRoute).orNotFound) withCORS = CORSMiddleware[F](withLogger, config.allowedOrigins) app = GzipMiddleware[F](withCORS) } yield app val builder = httpApp.map { app => BlazeServerBuilder[F] .withHttpApp(app) .withNio2(true) .enableHttp2(config.enableHttp2) .withWebSockets(config.enableWebsockets) .bindHttp(config.port, config.local) } val sslLClientAuthMode = sslConfig.clientAuth match { case "NotRequested" => SSLClientAuthMode.NotRequested case "Requested" => SSLClientAuthMode.Requested case "Required" => SSLClientAuthMode.Requested case x => throw new IllegalArgumentException(s"SSLClientAuthMode ${x} is not supported") } sslOpt match { case Some(ssl) => builder.map(_.withSSLContext(ssl, sslLClientAuthMode)) case None => builder.map(_.enableHttp2(false)) } } val resource: Resource[F, Server[F]] = Resource.liftF(builder).flatMap(_.resource) val stream: Stream[F, Unit] = if (config.enable) { Stream.eval(builder).flatMap(_.serve).drain } else { Stream.empty } }
Example 4
Source File: Doobie.scala From iotchain with MIT License | 5 votes |
package jbok.app.service.store.doobie import cats.effect.{Async, ContextShift, Resource} import doobie._ import doobie.hikari.HikariTransactor import jbok.core.config.DatabaseConfig object Doobie { def xa[F[_]](config: DatabaseConfig)(implicit F: Async[F], cs: ContextShift[F]): Resource[F, Transactor[F]] = for { ce <- ExecutionContexts.fixedThreadPool[F](32) // our connect EC te <- ExecutionContexts.cachedThreadPool[F] // our transaction EC xa <- HikariTransactor.newHikariTransactor[F]( config.driver, config.url, config.user, // username config.password, // password ce, // await connection here te // execute JDBC operations here ) } yield xa }
Example 5
Source File: ThreadUtil.scala From iotchain with MIT License | 5 votes |
package jbok.common.thread import java.lang.Thread.UncaughtExceptionHandler import java.nio.channels.AsynchronousChannelGroup import java.nio.channels.spi.AsynchronousChannelProvider import java.util.concurrent.atomic.AtomicInteger import java.util.concurrent.{Executors, ThreadFactory} import cats.effect.{Resource, Sync} import scala.concurrent.ExecutionContext import scala.util.control.NonFatal object ThreadUtil { def named(threadPrefix: String, daemon: Boolean, exitJvmOnFatalError: Boolean = true): ThreadFactory = new ThreadFactory { val defaultThreadFactory = Executors.defaultThreadFactory() val idx = new AtomicInteger(0) def newThread(r: Runnable) = { val t = defaultThreadFactory.newThread(r) t.setDaemon(daemon) t.setName(s"$threadPrefix-${idx.incrementAndGet()}") t.setUncaughtExceptionHandler(new UncaughtExceptionHandler { def uncaughtException(t: Thread, e: Throwable): Unit = { ExecutionContext.defaultReporter(e) if (exitJvmOnFatalError) { e match { case NonFatal(_) => () case _ => System.exit(-1) } } } }) t } } def blockingThreadPool[F[_]](name: String)(implicit F: Sync[F]): Resource[F, ExecutionContext] = Resource(F.delay { val factory = named(name, daemon = true) val executor = Executors.newCachedThreadPool(factory) val ec = ExecutionContext.fromExecutor(executor) (ec, F.delay(executor.shutdown())) }) def acg[F[_]](implicit F: Sync[F]): Resource[F, AsynchronousChannelGroup] = Resource(F.delay { val acg = acgUnsafe (acg, F.delay(acg.shutdownNow())) }) def acgUnsafe: AsynchronousChannelGroup = AsynchronousChannelProvider .provider() .openAsynchronousChannelGroup(8, named("jbok-ag-tcp", daemon = true)) lazy val acgGlobal: AsynchronousChannelGroup = acgUnsafe }
Example 6
Source File: PrometheusMetricsSpec.scala From iotchain with MIT License | 5 votes |
package jbok.common.metrics import cats.effect.{IO, Resource} import jbok.common.CommonSpec import jbok.common.metrics.implicits._ import scala.concurrent.duration._ class PrometheusMetricsSpec extends CommonSpec { implicit val prometheus = new PrometheusMetrics[IO]() "Prometheus" should { "observed" in { val name = "important_fun" val ioa = timer.sleep(1.second) ioa.observed(name).unsafeRunSync() IO.raiseError(new Exception("boom")).observed(name).attempt.unsafeRunSync() val report = PrometheusMetrics.textReport[IO]().unsafeRunSync() report.contains(s"""iotchain_${name}_seconds_count{success="success",} 1.0""") shouldBe true report.contains(s"""iotchain_${name}_seconds_count{success="failure",} 1.0""") shouldBe true } "monitored" in { val name = "running_programs" val res = Resource.liftF(IO.raiseError[String](new Exception("boom"))) res .monitored(name) .use { _ => IO.unit } .attempt .unsafeRunSync() val report = PrometheusMetrics.textReport[IO]().unsafeRunSync() report.contains(s"""iotchain_${name}_active 0.0""") shouldBe true } } }
Example 7
Source File: CommonSpec.scala From iotchain with MIT License | 5 votes |
package jbok.common import cats.effect.{IO, Resource} import cats.implicits._ import jbok.common.log.{Level, Logger} import jbok.common.thread.ThreadUtil import org.scalacheck.{Arbitrary, Gen} import org.scalatest._ import org.scalatest.concurrent.TimeLimitedTests import org.scalatest.prop.PropertyChecks import org.scalatest.time.Span import scala.concurrent.ExecutionContext import scala.concurrent.duration._ trait CommonSpec extends WordSpecLike with Matchers with PropertyChecks with BeforeAndAfterAll with BeforeAndAfterEach with TimeLimitedTests with CancelAfterFailure with CommonArb { implicit val cs = IO.contextShift(ExecutionContext.global) implicit val timer = IO.timer(ExecutionContext.global) implicit val ce = IO.ioConcurrentEffect(cs) implicit val acg = ThreadUtil.acgGlobal override def timeLimit: Span = 60.seconds Logger.setRootHandlers[IO](Logger.consoleHandler(minimumLevel = Some(Level.Info))).unsafeRunSync() def withResource[A](res: Resource[IO, A])(f: A => IO[Unit]): Unit = res.use(a => f(a)).unsafeRunSync() def withIO[A](ioa: IO[A]): Unit = ioa.void.unsafeRunSync() def random[A](implicit arb: Arbitrary[A]): A = arb.arbitrary.sample.get def random[A](gen: Gen[A]): A = gen.sample.get } object CommonSpec extends CommonSpec
Example 8
Source File: Metrics.scala From iotchain with MIT License | 5 votes |
package jbok.common.metrics import cats.effect.{Resource, Sync, Timer} import cats.implicits._ import fs2._ import scala.concurrent.duration._ trait EffectMetrics[F[_]] { self: Metrics[F] => def observed[A](name: String, labels: String*)(fa: F[A])(implicit F: Sync[F], T: Timer[F]): F[A] = for { start <- T.clock.monotonic(NANOSECONDS) attempt <- fa.attempt end <- T.clock.monotonic(NANOSECONDS) elapsed = end - start a <- attempt match { case Left(e) => self.observe(name, "failure" :: labels.toList: _*)(elapsed.toDouble) >> F.raiseError(e) case Right(a) => self.observe(name, "success" :: labels.toList: _*)(elapsed.toDouble).as(a) } } yield a def monitored[A](name: String, labels: String*)(res: Resource[F, A])(implicit F: Sync[F]): Resource[F, A] = { val r = Resource { for { _ <- self.inc(name, labels: _*)(1.0) } yield () -> self.dec(name, labels: _*)(1.0) } r.flatMap(_ => res) } } trait StreamMetrics[F[_]] { self: Metrics[F] => // observe events occur in the stream def observePipe[A](name: String, labels: String*): Pipe[F, A, Unit] = _.chunks.through(observeChunkPipe[A](name, labels: _*)) def observeChunkPipe[A](name: String, labels: String*): Pipe[F, Chunk[A], Unit] = _.evalMap(c => self.observe(name, labels: _*)(c.size.toDouble)) } trait Metrics[F[_]] extends EffectMetrics[F] with StreamMetrics[F] { type Registry def registry: Registry // accumulate, e.g. the number of requests served, tasks completed, or errors. def acc(name: String, labels: String*)(n: Double = 1.0): F[Unit] // increase, e.g. the current memory usage, queue size, or active requests. def inc(name: String, labels: String*)(n: Double = 1.0): F[Unit] // decrease, e.g. the current memory usage, queue size, or active requests. def dec(name: String, labels: String*)(n: Double = 1.0): F[Unit] // equivalent to inc(name, labels)(delta) def set(name: String, labels: String*)(n: Double): F[Unit] // e.g. the request response latency, or the size of the response body def observe(name: String, labels: String*)(n: Double): F[Unit] } object Metrics { val METRIC_PREFIX = "iotchain" val TIMER_SUFFIX = "seconds" val GAUGE_SUFFIX = "active" sealed trait NoopRegistry object NoopRegistry extends NoopRegistry def nop[F[_]: Sync]: Metrics[F] = new Metrics[F] { override type Registry = NoopRegistry override def registry: Registry = NoopRegistry override def acc(name: String, labels: String*)(n: Double): F[Unit] = Sync[F].unit override def inc(name: String, labels: String*)(n: Double): F[Unit] = Sync[F].unit override def dec(name: String, labels: String*)(n: Double): F[Unit] = Sync[F].unit override def set(name: String, labels: String*)(n: Double): F[Unit] = Sync[F].unit override def observe(name: String, labels: String*)(n: Double): F[Unit] = Sync[F].unit } }
Example 9
Source File: testkit.scala From iotchain with MIT License | 5 votes |
package jbok.persistent import cats.effect.{IO, Resource} import jbok.codec.HexPrefix import jbok.common.{gen, FileUtil} import jbok.persistent.rocksdb.RocksKVStore import org.scalacheck.{Arbitrary, Gen} import scodec.bits.ByteVector import jbok.codec.rlp.implicits._ import jbok.persistent.mpt.MptNode import jbok.persistent.mpt.MptNode._ import cats.implicits._ object testkit { implicit def arbColumnFamily: Arbitrary[ColumnFamily] = Arbitrary { Gen.alphaNumStr.map(ColumnFamily.apply) } def testRocksKVStore(cfs: List[ColumnFamily] = List(ColumnFamily.default)): Resource[IO, KVStore[IO]] = FileUtil[IO].temporaryDir().flatMap { dir => RocksKVStore.resource[IO](dir.path, cfs) } val testMemoryKVStore: Resource[IO, KVStore[IO]] = Resource.liftF(MemoryKVStore[IO]) def testRocksStageStore(cfs: List[ColumnFamily] = List(ColumnFamily.default)): Resource[IO, StageKVStore[IO, ByteVector, ByteVector]] = testRocksKVStore(cfs).map(inner => StageKVStore(SingleColumnKVStore[IO, ByteVector, ByteVector](ColumnFamily.default, inner))) val testMemoryStageStore: Resource[IO, StageKVStore[IO, ByteVector, ByteVector]] = testMemoryKVStore.map(inner => StageKVStore(SingleColumnKVStore[IO, ByteVector, ByteVector](ColumnFamily.default, inner))) implicit lazy val arbLeafNode: Arbitrary[LeafNode] = Arbitrary { for { key <- gen.boundedByteVector(0, 1024) value <- gen.boundedByteVector(0, 1024) } yield LeafNode(HexPrefix.encodedToNibbles(key.encoded), value.encoded) } implicit lazy val arbBranchNode: Arbitrary[BranchNode] = Arbitrary { for { children <- Gen .listOfN(16, Gen.oneOf(gen.sizedByteVector(32).map(_.asLeft), arbMptNode.arbitrary.map(_.asRight))) .map(childrenList => childrenList.map(child => Some(child))) value <- gen.byteVector } yield BranchNode(children, Some(value.encoded)) } implicit lazy val arbExtensionNode: Arbitrary[ExtensionNode] = Arbitrary { for { key <- gen.boundedByteVector(0, 1024) value <- gen.boundedByteVector(0, 1024) } yield ExtensionNode(HexPrefix.encodedToNibbles(key.encoded), Left(value)) } implicit lazy val arbMptNode: Arbitrary[MptNode] = Arbitrary { Gen.oneOf[MptNode](arbLeafNode.arbitrary, arbExtensionNode.arbitrary, arbBranchNode.arbitrary) } }
Example 10
Source File: KVStoreSpec.scala From iotchain with MIT License | 5 votes |
package jbok.persistent import cats.effect.{IO, Resource} import cats.implicits._ import jbok.common.CommonSpec import jbok.persistent.testkit._ import scodec.bits.ByteVector class KVStoreSpec extends CommonSpec { val default = ColumnFamily.default val cfa = ColumnFamily("a") val cfb = ColumnFamily("b") val cfs = List(default, cfa, cfb) def test(name: String, resource: Resource[IO, KVStore[IO]]): Unit = s"KVStore ${name}" should { "respect column family" in withResource(resource) { store => val key = "key".getBytes val a = "a".getBytes val b = "b".getBytes for { _ <- store.put(cfa, key, a) _ <- store.put(cfb, key, b) value <- store.get(default, key) _ = value shouldBe None size <- store.size(default) _ = size shouldBe 0 value <- store.get(cfa, key) _ = value.get shouldEqual a size <- store.size(cfa) _ = size shouldBe 1 value <- store.get(cfb, key) _ = value.get shouldEqual b size <- store.size(cfb) _ = size shouldBe 1 } yield () } "write batch" in { val cf = default forAll { m: Map[ByteVector, ByteVector] => val p = resource.use { store => val kvs = m.toList.map { case (k, v) => k.toArray -> v.toArray } for { _ <- store.writeBatch(cf, kvs, Nil) res <- store.toList(cf) _ = res.map { case (k, v) => ByteVector(k) -> ByteVector(v) }.toMap shouldBe m res <- store.size(cf) _ = res shouldBe kvs.size } yield () } p.unsafeRunSync() } } "toList" in { forAll { (m1: Map[ByteVector, ByteVector], m2: Map[ByteVector, ByteVector]) => val kvs1 = m1.toList.map { case (k, v) => k.toArray -> v.toArray } val kvs2 = m2.toList.map { case (k, v) => k.toArray -> v.toArray } val p = resource.use { store => for { _ <- kvs1.traverse(t => store.put(cfa, t._1, t._2)) _ <- kvs2.traverse(t => store.put(cfb, t._1, t._2)) res <- store.toList(cfa) _ = res.map { case (k, v) => ByteVector(k) -> ByteVector(v) }.toMap shouldBe m1 res <- store.toList(cfb) _ = res.map { case (k, v) => ByteVector(k) -> ByteVector(v) }.toMap shouldBe m2 } yield () } p.unsafeRunSync() } } } test("rocksdb", testRocksKVStore(cfs)) test("memory", testMemoryKVStore) }
Example 11
Source File: StageKVStoreSpec.scala From iotchain with MIT License | 5 votes |
package jbok.persistent import cats.effect.{IO, Resource} import jbok.common.CommonSpec import scodec.bits.ByteVector import jbok.persistent.testkit._ class StageKVStoreSpec extends CommonSpec { def test(name: String, resource: Resource[IO, StageKVStore[IO, ByteVector, ByteVector]]): Unit = s"StageKVStore $name" should { val key = ByteVector("key".getBytes) val value = ByteVector("value".getBytes) "not write inserts until commit" in withResource(resource) { stage => val updated = stage .put(key, value) for { res <- updated.mustGet(key) _ = res shouldBe value res <- updated.inner.get(key) _ = res shouldBe None committed <- updated.commit res <- committed.inner.get(key) _ = res shouldBe Some(value) res <- stage.mustGet(key) _ = res shouldBe value } yield () } } test("rocksdb", testRocksStageStore()) test("memory", testMemoryStageStore) }
Example 12
Source File: StageMPTSpec.scala From iotchain with MIT License | 5 votes |
package jbok.persistent.mpt import cats.effect.{IO, Resource} import jbok.common.{CommonSpec, FileUtil} import jbok.persistent._ import jbok.persistent.rocksdb.RocksKVStore class StageMPTSpec extends CommonSpec { def check(name: String, resource: Resource[IO, StageKVStore[IO, String, String]]): Unit = s"Staged MPT ${name}" should { "not write inserts until commit" in withResource(resource) { stage => val updated = stage .put("1", "1") for { res <- updated.get("1") _ = res shouldBe Some("1") res <- updated.inner.get("1") _ = res shouldBe None res <- stage.get("1") _ = res shouldBe None res <- stage.inner.get("1") _ = res shouldBe None committed <- updated.commit res <- committed.inner.get("1") _ = res shouldBe Some("1") res <- stage.get("1") _ = res shouldBe Some("1") } yield () } } val memory = Resource.liftF(for { store <- MemoryKVStore[IO] mpt <- MerklePatriciaTrie[IO, String, String](ColumnFamily.default, store) stage = StageKVStore(mpt) } yield stage) val rocksdb = for { file <- FileUtil[IO].temporaryDir() store <- RocksKVStore.resource[IO](file.path, List(ColumnFamily.default)) mpt <- Resource.liftF(MerklePatriciaTrie[IO, String, String](ColumnFamily.default, store)) stage = StageKVStore(mpt) } yield stage check("memory", memory) check("rocksdb", rocksdb) }
Example 13
Source File: CoreSpec.scala From iotchain with MIT License | 5 votes |
package jbok.core import cats.effect.{IO, Resource} import com.github.pshirshov.izumi.distage.model.definition.ModuleDef import distage.{Injector, Locator} import jbok.codec.rlp import jbok.codec.json import jbok.common.CommonSpec import jbok.common.math.N import jbok.core.config.{FullConfig, GenesisBuilder} import jbok.core.keystore.{KeyStore, MockingKeyStore} import jbok.core.models.{Address, ChainId} import jbok.crypto.signature.KeyPair import monocle.macros.syntax.lens._ object CoreSpecFixture { val chainId: ChainId = ChainId(1) val testKeyPair = KeyPair( KeyPair.Public("a4991b82cb3f6b2818ce8fedc00ef919ba505bf9e67d96439b63937d24e4d19d509dd07ac95949e815b307769f4e4d6c3ed5d6bd4883af23cb679b251468a8bc"), KeyPair.Secret("1a3c21bb6e303a384154a56a882f5b760a2d166161f6ccff15fc70e147161788") ) val testAllocAddress = Address(testKeyPair) val testAllocBalance = BigInt("1" + "0" * 30) val testGenesis = GenesisBuilder() .withChainId(chainId) .addAlloc(testAllocAddress, testAllocBalance) .addMiner(testAllocAddress) .build val config = CoreModule.testConfig .lens(_.genesis) .set(testGenesis) val keystoreModule: ModuleDef = new ModuleDef { make[KeyStore[IO]].fromEffect(MockingKeyStore.withInitKeys[IO](testKeyPair :: Nil)) } } trait CoreSpec extends CommonSpec with N.implicits with StatelessArb with StatefulArb with rlp.implicits with json.implicits { implicit val chainId = CoreSpecFixture.chainId implicit val config = CoreSpecFixture.config val genesis = CoreSpecFixture.testGenesis val testKeyPair = CoreSpecFixture.testKeyPair val testAllocAddress = CoreSpecFixture.testAllocAddress def testCoreModule(config: FullConfig) = new CoreModule[IO](config).overridenBy(CoreSpecFixture.keystoreModule) def testCoreResource(config: FullConfig): Resource[IO, Locator] = Injector().produceF[IO](testCoreModule(config)).toCats val locator: IO[Locator] = testCoreResource(config).allocated.map(_._1) def check(f: Locator => IO[Unit]): Unit = check(config)(f) def check(config: FullConfig)(f: Locator => IO[Unit]): Unit = { val p = testCoreResource(config).use { objects => f(objects) } p.unsafeRunSync() } } object CoreSpec extends CoreSpec
Example 14
Source File: TracingClient.scala From opencensus-scala with Apache License 2.0 | 5 votes |
package io.opencensus.scala.http4s import cats.effect.{Effect, Resource} import cats.implicits._ import io.opencensus.scala.Tracing import io.opencensus.scala.http.propagation.Propagation import io.opencensus.scala.http.{HttpAttributes => BaseHttpAttributes} import io.opencensus.scala.http4s.HttpAttributes._ import io.opencensus.scala.http4s.TracingUtils.recordResponse import io.opencensus.scala.http4s.propagation.Http4sFormatPropagation import io.opencensus.trace.{Span, Status} import org.http4s.client.Client import org.http4s.{Header, Request, Response} abstract class TracingClient[F[_]: Effect] { protected val tracing: Tracing protected val propagation: Propagation[Header, Request[F]] def trace(client: Client[F], parentSpan: Option[Span] = None): Client[F] = { val tracedOpen: Request[F] => Resource[F, Response[F]] = req => for { span <- Resource.liftF(startSpan(parentSpan, req)) enrichedReq = addTraceHeaders(req, span) res <- client .run(enrichedReq) .onError(traceError(span).andThen(x => Resource.liftF(x))) } yield recordResponse(span, tracing)(res) Client(tracedOpen) } private def traceError(span: Span): PartialFunction[Throwable, F[Unit]] = { case _ => recordException(span) } private def startSpan(parentSpan: Option[Span], req: Request[F]) = Effect[F].delay(startAndEnrichSpan(req, parentSpan)) private def startAndEnrichSpan( req: Request[F], parentSpan: Option[Span] ): Span = { val name = req.uri.path.toString val span = parentSpan.fold(tracing.startSpan(name))(span => tracing.startSpanWithParent(name, span) ) BaseHttpAttributes.setAttributesForRequest(span, req) span } private def addTraceHeaders(request: Request[F], span: Span): Request[F] = request.withHeaders( request.headers.put(propagation.headersWithTracingContext(span): _*) ) private def recordException(span: Span) = Effect[F].delay(tracing.endSpan(span, Status.INTERNAL)) } object TracingClient { def apply[F[_]: Effect]: TracingClient[F] = new TracingClient[F] { override protected val tracing: Tracing = Tracing override protected val propagation: Propagation[Header, Request[F]] = new Http4sFormatPropagation[F] {} } }
Example 15
Source File: Server.scala From seals with Apache License 2.0 | 5 votes |
package com.example.server import java.net.{ InetSocketAddress, InetAddress } import java.nio.channels.AsynchronousChannelGroup import java.util.concurrent.Executors import scala.concurrent.duration._ import cats.implicits._ import cats.effect.{ IO, IOApp, ExitCode, Resource, Blocker } import fs2.{ Stream, Chunk } import fs2.io.tcp import scodec.bits.BitVector import scodec.Codec import dev.tauri.seals.scodec.Codecs._ import com.example.proto._ object Server extends IOApp { final val bufferSize = 32 * 1024 final val timeout = Some(2.seconds) final val maxClients = 200 final val port = 8080 val rnd = new scala.util.Random def addr(port: Int): InetSocketAddress = new InetSocketAddress(InetAddress.getLoopbackAddress, port) override def run(args: List[String]): IO[ExitCode] = { Blocker[IO].use { bl => tcp.SocketGroup[IO](bl).use { sg => serve(port, sg).compile.drain.as(ExitCode.Success) } } } def serve(port: Int, sg: tcp.SocketGroup): Stream[IO, Unit] = { Stream.resource(sg.serverResource[IO](addr(port))).flatMap { case (localAddr, sockets) => val s = sockets.map { socket => Stream.resource(socket).flatMap { socket => val bvs: Stream[IO, BitVector] = socket.reads(bufferSize, timeout).chunks.map(ch => BitVector.view(ch.toArray)) val tsk: IO[BitVector] = bvs.compile.toVector.map(_.foldLeft(BitVector.empty)(_ ++ _)) val request: IO[Request] = tsk.flatMap { bv => Codec[Request].decode(bv).fold( err => IO.raiseError(new Exception(err.toString)), result => IO.pure(result.value) ) } val response: IO[Response] = request.flatMap(logic) val encoded: Stream[IO, Byte] = Stream.eval(response) .map(r => Codec[Response].encode(r).require) .flatMap { bv => Stream.chunk(Chunk.bytes(bv.bytes.toArray)) } encoded.through(socket.writes(timeout)).onFinalize(socket.endOfOutput) } } s.parJoin[IO, Unit](maxClients) } } def logic(req: Request): IO[Response] = req match { case RandomNumber(min, max) => if (min < max) { IO { val v = rnd.nextInt(max - min + 1) + min Number(v) } } else if (min === max) { IO.pure(Number(min)) } else { IO.raiseError(new IllegalArgumentException("min must not be greater than max")) } case ReSeed(s) => IO { rnd.setSeed(s) Ok } } }
Example 16
Source File: SftpStore.scala From fs2-blobstore with Apache License 2.0 | 5 votes |
package blobstore package sftp import java.util.Date import com.jcraft.jsch._ import cats.instances.option._ import scala.util.Try import java.io.OutputStream import cats.Traverse import cats.effect.{Blocker, ConcurrentEffect, ContextShift, IO, Resource} import cats.effect.concurrent.{MVar, Semaphore} import fs2.concurrent.Queue final class SftpStore[F[_]]( absRoot: String, session: Session, blocker: Blocker, mVar: MVar[F, ChannelSftp], semaphore: Option[Semaphore[F]], connectTimeout: Int )(implicit F: ConcurrentEffect[F], CS: ContextShift[F]) extends Store[F] { import implicits._ import Path.SEP private val openChannel: F[ChannelSftp] = { val openF = blocker.delay{ val ch = session.openChannel("sftp").asInstanceOf[ChannelSftp] ch.connect(connectTimeout) ch } semaphore.fold(openF){s => F.ifM(s.tryAcquire)(openF, getChannel) } } private val getChannel = F.flatMap(mVar.tryTake) { case Some(channel) => F.pure(channel) case None => openChannel } private def channelResource: Resource[F, ChannelSftp] = Resource.make{ getChannel }{ case ch if ch.isClosed => F.unit case ch => F.ifM(mVar.tryPut(ch))(F.unit, SftpStore.closeChannel(semaphore, blocker)(ch)) } def apply[F[_]]( absRoot: String, fa: F[Session], blocker: Blocker, maxChannels: Option[Long] = None, connectTimeout: Int = 10000 )(implicit F: ConcurrentEffect[F], CS: ContextShift[F]): fs2.Stream[F, SftpStore[F]] = if (maxChannels.exists(_ < 1)) { fs2.Stream.raiseError[F](new IllegalArgumentException(s"maxChannels must be >= 1")) } else { for { session <- fs2.Stream.bracket(fa)(session => F.delay(session.disconnect())) semaphore <- fs2.Stream.eval(Traverse[Option].sequence(maxChannels.map(Semaphore.apply[F]))) mVar <- fs2.Stream.bracket(MVar.empty[F, ChannelSftp])(mVar => F.flatMap(mVar.tryTake)(_.fold(F.unit)(closeChannel[F](semaphore, blocker)))) } yield new SftpStore[F](absRoot, session, blocker, mVar, semaphore, connectTimeout) } private def closeChannel[F[_]](semaphore: Option[Semaphore[F]], blocker: Blocker)(ch: ChannelSftp)(implicit F: ConcurrentEffect[F], CS: ContextShift[F]): F[Unit] = F.productR(semaphore.fold(F.unit)(_.release))(blocker.delay(ch.disconnect())) }
Example 17
Source File: KafkaAdminAlgebra.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.kafka.algebras import cats.effect.concurrent.Ref import cats.effect.{Async, Concurrent, ContextShift, Resource, Sync} import cats.implicits._ import fs2.kafka._ import hydra.core.protocol._ import hydra.kafka.util.KafkaUtils.TopicDetails import org.apache.kafka.clients.admin.NewTopic import org.apache.kafka.common.errors.UnknownTopicOrPartitionException import scala.util.control.NoStackTrace def deleteTopic(name: String): F[Unit] } object KafkaAdminAlgebra { type TopicName = String final case class Topic(name: TopicName, numberPartitions: Int) def live[F[_]: Sync: Concurrent: ContextShift]( bootstrapServers: String, ): F[KafkaAdminAlgebra[F]] = Sync[F].delay { new KafkaAdminAlgebra[F] { override def describeTopic(name: TopicName): F[Option[Topic]] = { getAdminClientResource .use(_.describeTopics(name :: Nil)) .map(_.headOption.map(_._2).map { td => Topic(td.name(), td.partitions().size()) }) .recover { case _: UnknownTopicOrPartitionException => None } } override def getTopicNames: F[List[TopicName]] = getAdminClientResource.use(_.listTopics.names.map(_.toList)) override def createTopic(name: TopicName, d: TopicDetails): F[Unit] = { import scala.collection.JavaConverters._ val newTopic = new NewTopic(name, d.numPartitions, d.replicationFactor) .configs(d.configs.asJava) getAdminClientResource.use(_.createTopic(newTopic)) } override def deleteTopic(name: String): F[Unit] = getAdminClientResource.use(_.deleteTopic(name)) private def getAdminClientResource: Resource[F, KafkaAdminClient[F]] = { adminClientResource( AdminClientSettings.apply.withBootstrapServers(bootstrapServers) ) } } } def test[F[_]: Sync]: F[KafkaAdminAlgebra[F]] = Ref[F].of(Map[TopicName, Topic]()).flatMap(getTestKafkaClient[F]) private[this] def getTestKafkaClient[F[_]: Sync]( ref: Ref[F, Map[TopicName, Topic]] ): F[KafkaAdminAlgebra[F]] = Sync[F].delay { new KafkaAdminAlgebra[F] { override def describeTopic(name: TopicName): F[Option[Topic]] = ref.get.map(_.get(name)) override def getTopicNames: F[List[TopicName]] = ref.get.map(_.keys.toList) override def createTopic( name: TopicName, details: TopicDetails ): F[Unit] = { val entry = name -> Topic(name, details.numPartitions) ref.update(old => old + entry) } override def deleteTopic(name: String): F[Unit] = ref.update(_ - name) } } }
Example 18
Source File: KamonSupport.scala From kamon-http4s with Apache License 2.0 | 5 votes |
package kamon.http4s package middleware.client import cats.effect.{Effect, Resource} import cats.implicits._ import com.typesafe.config.Config import kamon.Kamon import kamon.context.Context import kamon.instrumentation.http.HttpClientInstrumentation import org.http4s.{Request, Response} import org.http4s.client.Client object KamonSupport { private var _instrumentation = instrumentation(Kamon.config()) private def instrumentation(kamonConfig: Config): HttpClientInstrumentation = { val httpClientConfig = kamonConfig.getConfig("kamon.instrumentation.http4s.client") HttpClientInstrumentation.from(httpClientConfig, "http4s.client") } Kamon.onReconfigure(newConfig => _instrumentation = instrumentation(newConfig)) def apply[F[_]](underlying: Client[F])(implicit F:Effect[F]): Client[F] = Client { request => for { ctx <- Resource.liftF(F.delay(Kamon.currentContext())) k <- kamonClient(underlying)(request)(ctx)(_instrumentation) } yield k } private def kamonClient[F[_]](underlying: Client[F]) (request: Request[F]) (ctx: Context) (instrumentation: HttpClientInstrumentation) (implicit F:Effect[F]): Resource[F, Response[F]] = for { requestHandler <- Resource.liftF(F.delay(instrumentation.createHandler(getRequestBuilder(request), ctx))) response <- underlying.run(requestHandler.request).attempt trackedResponse <- Resource.liftF(handleResponse(response, requestHandler, instrumentation.settings)) } yield trackedResponse def handleResponse[F[_]]( response: Either[Throwable, Response[F]], requestHandler: HttpClientInstrumentation.RequestHandler[Request[F]], settings: HttpClientInstrumentation.Settings )(implicit F:Effect[F]): F[Response[F]] = response match { case Right(res) => if(res.status.code == 404) requestHandler.span.name(settings.defaultOperationName) requestHandler.processResponse(getResponseBuilder(res)) F.delay(res) case Left(error) => requestHandler.span.fail(error).finish() F.raiseError(error) } }
Example 19
Source File: KamonSupport.scala From kamon-http4s with Apache License 2.0 | 5 votes |
package kamon.http4s package middleware.server import cats.data.{Kleisli, OptionT} import cats.effect.{Resource, Sync} import cats.implicits._ import kamon.Kamon import kamon.context.Storage import kamon.instrumentation.http.HttpServerInstrumentation.RequestHandler import kamon.instrumentation.http.HttpServerInstrumentation import org.http4s.{HttpRoutes, Request, Response} object KamonSupport { def apply[F[_]: Sync](service: HttpRoutes[F], interface: String, port: Int): HttpRoutes[F] = { val httpServerConfig = Kamon.config().getConfig("kamon.instrumentation.http4s.server") val instrumentation = HttpServerInstrumentation.from(httpServerConfig, "http4s.server", interface, port) Kleisli(kamonService[F](service, instrumentation)(_)) } private def kamonService[F[_]](service: HttpRoutes[F], instrumentation: HttpServerInstrumentation) (request: Request[F]) (implicit F: Sync[F]): OptionT[F, Response[F]] = OptionT { getHandler(instrumentation)(request).use { handler => for { resOrUnhandled <- service(request).value.attempt respWithContext <- kamonServiceHandler(handler, resOrUnhandled, instrumentation.settings) } yield respWithContext } } private def processRequest[F[_]](requestHandler: RequestHandler)(implicit F: Sync[F]): Resource[F, RequestHandler] = Resource.make(F.delay(requestHandler.requestReceived()))(h => F.delay(h.responseSent())) private def withContext[F[_]](requestHandler: RequestHandler)(implicit F: Sync[F]): Resource[F, Storage.Scope] = Resource.make(F.delay(Kamon.storeContext(requestHandler.context)))( scope => F.delay(scope.close())) private def getHandler[F[_]](instrumentation: HttpServerInstrumentation)(request: Request[F])(implicit F: Sync[F]): Resource[F, RequestHandler] = for { handler <- Resource.liftF(F.delay(instrumentation.createHandler(buildRequestMessage(request)))) _ <- processRequest(handler) _ <- withContext(handler) } yield handler private def kamonServiceHandler[F[_]](requestHandler: RequestHandler, e: Either[Throwable, Option[Response[F]]], settings: HttpServerInstrumentation.Settings) (implicit F: Sync[F]): F[Option[Response[F]]] = e match { case Left(e) => F.delay { requestHandler.span.fail(e.getMessage) Some(requestHandler.buildResponse(errorResponseBuilder, requestHandler.context)) } *> F.raiseError(e) case Right(None) => F.delay { requestHandler.span.name(settings.unhandledOperationName) val response: Response[F] = requestHandler.buildResponse[Response[F]]( notFoundResponseBuilder, requestHandler.context ) Some(response) } case Right(Some(response)) => F.delay { val a = requestHandler.buildResponse(getResponseBuilder(response), requestHandler.context) Some(a) } } }
Example 20
Source File: FileUtils.scala From skeuomorph with Apache License 2.0 | 5 votes |
package higherkindness.skeuomorph import java.io.{File, FileOutputStream, InputStream} import java.nio.file.{Files, Paths, StandardOpenOption} import cats.effect.{Resource, Sync} object FileUtils { def fileHandle[F[_]: Sync](name: String): Resource[F, File] = Resource.make( Sync[F].delay(new File(name)) )(file => Sync[F].delay(file.deleteOnExit())) def fileOutputStream[F[_]: Sync](file: File): Resource[F, FileOutputStream] = Resource.make( Sync[F].delay(new FileOutputStream(file)) )(fos => Sync[F].delay(fos.close())) def fileInputStream[F[_]: Sync](name: String): Resource[F, InputStream] = Resource.make( Sync[F].delay(Files.newInputStream(Paths.get(name), StandardOpenOption.DELETE_ON_CLOSE)) )(is => Sync[F].delay(is.close())) }
Example 21
Source File: Hook.scala From canoe with MIT License | 5 votes |
package canoe.api.sources import canoe.api.{TelegramClient} import canoe.methods.webhooks.{DeleteWebhook, SetWebhook} import canoe.models.{InputFile, Update} import canoe.syntax.methodOps import cats.Monad import cats.effect.{ConcurrentEffect, Resource, Timer} import cats.syntax.all._ import fs2.Stream import fs2.concurrent.Queue import io.chrisdavenport.log4cats.Logger import io.chrisdavenport.log4cats.slf4j.Slf4jLogger import org.http4s._ import org.http4s.circe.jsonOf import org.http4s.dsl.Http4sDsl import org.http4s.implicits._ import org.http4s.server.Server import org.http4s.server.blaze.BlazeServerBuilder class Hook[F[_]](queue: Queue[F, Update]) { def updates: Stream[F, Update] = queue.dequeue } object Hook { private def listenServer[F[_]: ConcurrentEffect: Timer: Logger](port: Int): Resource[F, Hook[F]] = { val dsl = Http4sDsl[F] import dsl._ def app(queue: Queue[F, Update]): HttpApp[F] = HttpRoutes .of[F] { case req @ POST -> Root => req .decodeWith(jsonOf[F, Update], strict = true)(queue.enqueue1(_) *> Ok()) .recoverWith { case InvalidMessageBodyFailure(details, _) => F.error(s"Received unknown type of update. $details") *> Ok() } } .orNotFound def server(queue: Queue[F, Update]): Resource[F, Server[F]] = BlazeServerBuilder[F].bindHttp(port).withHttpApp(app(queue)).resource Resource.suspend(Queue.unbounded[F, Update].map(q => server(q).map(_ => new Hook[F](q)))) } }
Example 22
Source File: package.scala From fs2-blobstore with Apache License 2.0 | 5 votes |
import java.io.OutputStream import java.nio.file.Files import cats.effect.{Blocker, Concurrent, ContextShift, Resource, Sync} import fs2.{Chunk, Hotswap, Pipe, Pull, RaiseThrowable, Stream} import cats.implicits._ package object blobstore { protected[blobstore] def _writeAllToOutputStream1[F[_]](in: Stream[F, Byte], out: OutputStream, blocker: Blocker)( implicit F: Sync[F], CS: ContextShift[F] ): Pull[F, Nothing, Unit] = { in.pull.uncons.flatMap { case None => Pull.done case Some((hd, tl)) => Pull.eval[F, Unit](blocker.delay(out.write(hd.toArray))) >> _writeAllToOutputStream1(tl, out, blocker) } } protected[blobstore] def bufferToDisk[F[_]]( chunkSize: Int, blocker: Blocker )(implicit F: Sync[F], CS: ContextShift[F]): Pipe[F, Byte, (Long, Stream[F, Byte])] = { in => Stream.bracket(F.delay(Files.createTempFile("bufferToDisk", ".bin")))(p => F.delay(p.toFile.delete).void).flatMap { p => in.through(fs2.io.file.writeAll(p, blocker)).drain ++ Stream.emit((p.toFile.length, fs2.io.file.readAll(p, blocker, chunkSize))) } } private[blobstore] def putRotateBase[F[_]: Concurrent, T]( limit: Long, openNewFile: Resource[F, T] )(consume: T => Chunk[Byte] => F[Unit]): Pipe[F, Byte, Unit] = { in => Stream .resource(Hotswap(openNewFile)) .flatMap { case (hotswap, newFile) => goRotate(limit, 0L, in, newFile, hotswap, openNewFile)( consume = consumer => bytes => Pull.eval(consume(consumer)(bytes)).as(consumer), extract = Stream.emit ).stream } } private[blobstore] def goRotate[F[_]: RaiseThrowable, A, B]( limit: Long, acc: Long, s: Stream[F, Byte], consumer: B, hotswap: Hotswap[F, A], resource: Resource[F, A] )( consume: B => Chunk[Byte] => Pull[F, Unit, B], extract: A => Stream[F, B] ): Pull[F, Unit, Unit] = { val toWrite = (limit - acc).min(Int.MaxValue.toLong).toInt s.pull.unconsLimit(toWrite).flatMap { case Some((hd, tl)) => val newAcc = acc + hd.size consume(consumer)(hd).flatMap { consumer => if (newAcc >= limit) { Pull .eval(hotswap.swap(resource)) .flatMap(a => extract(a).pull.headOrError) .flatMap(nc => goRotate(limit, 0L, tl, nc, hotswap, resource)(consume, extract)) } else { goRotate(limit, newAcc, tl, consumer, hotswap, resource)(consume, extract) } } case None => Pull.done } } }
Example 23
Source File: HttpExistenceClient.scala From scala-steward with Apache License 2.0 | 5 votes |
package org.scalasteward.core.util import cats.effect.{Async, Resource} import cats.implicits._ import com.github.benmanes.caffeine.cache.Caffeine import io.chrisdavenport.log4cats.Logger import org.http4s.client.Client import org.http4s.{Method, Request, Status, Uri} import org.scalasteward.core.application.Config import scalacache.CatsEffect.modes._ import scalacache.caffeine.CaffeineCache import scalacache.{Async => _, _} final class HttpExistenceClient[F[_]](statusCache: Cache[Status])(implicit client: Client[F], logger: Logger[F], mode: Mode[F], F: MonadThrowable[F] ) { def exists(uri: Uri): F[Boolean] = status(uri).map(_ === Status.Ok).handleErrorWith { throwable => logger.debug(throwable)(s"Failed to check if $uri exists").as(false) } private def status(uri: Uri): F[Status] = statusCache.cachingForMemoizeF(uri.renderString)(None) { client.status(Request[F](method = Method.HEAD, uri = uri)) } } object HttpExistenceClient { def create[F[_]](implicit config: Config, client: Client[F], logger: Logger[F], F: Async[F] ): Resource[F, HttpExistenceClient[F]] = { val buildCache = F.delay { CaffeineCache( Caffeine .newBuilder() .maximumSize(16384L) .expireAfterWrite(config.cacheTtl.length, config.cacheTtl.unit) .build[String, Entry[Status]]() ) } Resource.make(buildCache)(_.close().void).map(new HttpExistenceClient[F](_)) } }
Example 24
Source File: CassandraCluster.scala From kafka-journal with MIT License | 5 votes |
package com.evolutiongaming.kafka.journal.eventual.cassandra import cats.effect.{Concurrent, Resource} import cats.implicits._ import com.evolutiongaming.catshelper.FromFuture import com.evolutiongaming.scassandra.{CassandraClusterOf, CassandraConfig} import com.evolutiongaming.scassandra import com.evolutiongaming.scassandra.util.FromGFuture trait CassandraCluster[F[_]] { def session: Resource[F, CassandraSession[F]] def metadata: F[CassandraMetadata[F]] } object CassandraCluster { def apply[F[_]](implicit F: CassandraCluster[F]): CassandraCluster[F] = F def apply[F[_] : Concurrent : FromGFuture]( cluster: scassandra.CassandraCluster[F], retries: Int ): CassandraCluster[F] = new CassandraCluster[F] { def session = { for { session <- cluster.connect session <- CassandraSession.of[F](session) } yield { CassandraSession(session, retries) } } def metadata = { for { metadata <- cluster.metadata } yield { CassandraMetadata[F](metadata) } } } def of[F[_] : Concurrent : FromFuture : FromGFuture]( config: CassandraConfig, cassandraClusterOf: CassandraClusterOf[F], retries: Int, ): Resource[F, CassandraCluster[F]] = { for { cluster <- cassandraClusterOf(config) } yield { apply[F](cluster, retries) } } }
Example 25
Source File: CassandraHealthCheckSpec.scala From kafka-journal with MIT License | 5 votes |
package com.evolutiongaming.kafka.journal.eventual.cassandra import cats.effect.{IO, Resource} import cats.implicits._ import com.evolutiongaming.catshelper.Log import com.evolutiongaming.kafka.journal.IOSuite._ import org.scalatest.funsuite.AsyncFunSuite import org.scalatest.matchers.should.Matchers import scala.concurrent.duration._ import scala.util.control.NoStackTrace class CassandraHealthCheckSpec extends AsyncFunSuite with Matchers { test("CassandraHealthCheck") { val error = (new RuntimeException with NoStackTrace).raiseError[IO, Unit] val healthCheck = CassandraHealthCheck.of[IO]( initial = 0.seconds, interval = 1.second, statement = Resource.pure[IO, IO[Unit]](error), log = Log.empty[IO]) val result = for { error <- healthCheck.use { _.error.untilDefinedM } } yield { error shouldEqual error } result.run() } }
Example 26
Source File: CacheOf.scala From kafka-journal with MIT License | 5 votes |
package com.evolutiongaming.kafka.journal.replicator import cats.Parallel import cats.effect.{Concurrent, Resource, Timer} import cats.implicits._ import com.evolutiongaming.catshelper.{BracketThrowable, Runtime} import com.evolutiongaming.scache import com.evolutiongaming.scache.{CacheMetrics, Releasable} import com.evolutiongaming.skafka.Topic import com.evolutiongaming.smetrics.MeasureDuration import scala.concurrent.duration.FiniteDuration trait CacheOf[F[_]] { def apply[K, V](topic: Topic): Resource[F, Cache[F, K, V]] } object CacheOf { def empty[F[_] : BracketThrowable]: CacheOf[F] = new CacheOf[F] { def apply[K, V](topic: Topic) = { val cache = new Cache[F, K, V] { def getOrUpdate(key: K)(value: => Resource[F, V]) = value.use(_.pure[F]) def remove(key: K) = ().pure[F] } Resource.liftF(cache.pure[F]) } } def apply[F[_] : Concurrent : Timer : Runtime : Parallel : MeasureDuration]( expireAfter: FiniteDuration, cacheMetrics: Option[CacheMetrics.Name => CacheMetrics[F]] ): CacheOf[F] = { new CacheOf[F] { def apply[K, V](topic: Topic) = { for { cache <- scache.Cache.expiring[F, K, V](expireAfter) cache <- cacheMetrics.fold { Resource.liftF(cache.pure[F]) } { cacheMetrics => cache.withMetrics(cacheMetrics(topic)) } } yield { new Cache[F, K, V] { def getOrUpdate(key: K)(value: => Resource[F, V]) = { cache.getOrUpdateReleasable(key) { Releasable.of(value) } } def remove(key: K) = cache.remove(key).flatten.void } } } } } }
Example 27
Source File: KafkaSingletonTest.scala From kafka-journal with MIT License | 5 votes |
package com.evolutiongaming.kafka.journal.replicator import cats.data.{NonEmptySet => Nes} import cats.effect.concurrent.{Deferred, Ref} import cats.effect.{Concurrent, IO, Resource, Timer} import cats.implicits._ import com.evolutiongaming.catshelper.Log import com.evolutiongaming.kafka.journal.IOSuite._ import com.evolutiongaming.skafka.consumer.RebalanceListener import com.evolutiongaming.skafka.{Partition, TopicPartition} import com.evolutiongaming.sstream.Stream import org.scalatest.funsuite.AsyncFunSuite import org.scalatest.matchers.should.Matchers import scala.concurrent.duration._ class KafkaSingletonTest extends AsyncFunSuite with Matchers { test("allocate & release when partition assigned or revoked") { `allocate & release when partition assigned or revoked`[IO]().run() } private def `allocate & release when partition assigned or revoked`[F[_] : Concurrent : Timer](): F[Unit] = { val topic = "topic" def consumer(deferred: Deferred[F, RebalanceListener[F]]) = { new TopicConsumer[F] { def subscribe(listener: RebalanceListener[F]) = deferred.complete(listener) def poll = Stream.empty def commit = TopicCommit.empty } } def topicPartition(partition: Partition) = TopicPartition(topic, partition) val result = for { listener <- Resource.liftF(Deferred[F, RebalanceListener[F]]) allocated <- Resource.liftF(Ref[F].of(false)) resource = Resource.make { allocated.set(true) } { _ => allocated.set(false) } singleton <- KafkaSingleton.of(topic, consumer(listener).pure[Resource[F, *]], resource, Log.empty[F]) listener <- Resource.liftF(listener.get) _ <- Resource.liftF { for { a <- singleton.get _ = a shouldEqual none[Unit] a <- allocated.get _ = a shouldEqual false _ <- listener.onPartitionsAssigned(Nes.of(topicPartition(Partition.max))) a <- singleton.get _ = a shouldEqual none[Unit] a <- allocated.get _ = a shouldEqual false _ <- listener.onPartitionsAssigned(Nes.of(topicPartition(Partition.min))) _ <- Timer[F].sleep(10.millis) a <- singleton.get _ = a shouldEqual ().some a <- allocated.get _ = a shouldEqual true _ <- listener.onPartitionsRevoked(Nes.of(topicPartition(Partition.max))) a <- singleton.get _ = a shouldEqual ().some a <- allocated.get _ = a shouldEqual true _ <- listener.onPartitionsRevoked(Nes.of(topicPartition(Partition.min))) _ <- Timer[F].sleep(10.millis) a <- singleton.get _ = a shouldEqual none[Unit] a <- allocated.get _ = a shouldEqual false } yield {} } } yield {} result.use { _ => ().pure[F] } } }
Example 28
Source File: ReplicatorSpec.scala From kafka-journal with MIT License | 5 votes |
package com.evolutiongaming.kafka.journal.replicator import cats.effect.{IO, Resource} import cats.implicits._ import com.evolutiongaming.catshelper.LogOf import com.evolutiongaming.kafka.journal.replicator.Replicator.Consumer import com.evolutiongaming.kafka.journal.IOSuite._ import com.evolutiongaming.skafka.Topic import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AsyncWordSpec import scala.concurrent.duration._ import scala.util.control.NoStackTrace class ReplicatorSpec extends AsyncWordSpec with Matchers { "Replicator" should { "fail if any of replicators failed" in { implicit val logOf = LogOf.empty[IO] val error = new RuntimeException with NoStackTrace val consumer = new Consumer[IO] { def topics = Set("journal").pure[IO] } val start = (_: Topic) => Resource.pure[IO, IO[Unit]](error.raiseError[IO, Unit]) val result = for { result <- Replicator.of( Replicator.Config(topicDiscoveryInterval = 0.millis), Resource.pure[IO, Consumer[IO]](consumer), start).use(identity).attempt } yield { result shouldEqual error.asLeft } result.run() } } }
Example 29
Source File: HeadCacheFenced.scala From kafka-journal with MIT License | 5 votes |
package com.evolutiongaming.kafka.journal import cats.Apply import cats.effect.concurrent.Ref import cats.effect.{Concurrent, Resource} import cats.implicits._ import com.evolutiongaming.catshelper.CatsHelper._ import com.evolutiongaming.skafka.{Offset, Partition} object HeadCacheFenced { def of[F[_] : Concurrent](headCache: Resource[F, HeadCache[F]]): Resource[F, HeadCache[F]] = { val fence = Resource.make { Ref[F].of(().pure[F]) } { fence => fence.set(HeadCacheReleasedError.raiseError[F, Unit]) } val result = for { headCache <- headCache fence <- fence } yield { apply(headCache, fence.get.flatten) } result.fenced } def apply[F[_] : Apply](headCache: HeadCache[F], fence: F[Unit]): HeadCache[F] = { (key: Key, partition: Partition, offset: Offset) => { fence *> headCache.get(key, partition, offset) } } }
Example 30
Source File: ThreadPoolOf.scala From kafka-journal with MIT License | 5 votes |
package com.evolutiongaming.kafka.journal.execution import java.util.concurrent.{SynchronousQueue, ThreadFactory, ThreadPoolExecutor} import cats.effect.{Resource, Sync} import cats.implicits._ import scala.concurrent.duration._ object ThreadPoolOf { def apply[F[_] : Sync]( minSize: Int, maxSize: Int, threadFactory: ThreadFactory, keepAlive: FiniteDuration = 1.minute, ): Resource[F, ThreadPoolExecutor] = { val result = for { result <- Sync[F].delay { new ThreadPoolExecutor( minSize, maxSize, keepAlive.length, keepAlive.unit, new SynchronousQueue[Runnable], threadFactory) } } yield { val release = Sync[F].delay { result.shutdown() } (result, release) } Resource(result) } }
Example 31
Source File: ForkJoinPoolOf.scala From kafka-journal with MIT License | 5 votes |
package com.evolutiongaming.kafka.journal.execution import java.util.concurrent.ForkJoinPool import java.util.concurrent.ForkJoinPool.ForkJoinWorkerThreadFactory import cats.effect.{Resource, Sync} import cats.implicits._ object ForkJoinPoolOf { def apply[F[_] : Sync]( name: String, parallelism: Int ): Resource[F, ForkJoinPool] = { val threadFactory = ForkJoinPool.defaultForkJoinWorkerThreadFactory.withPrefix(name) val threadPool = Sync[F].delay { new ForkJoinPool( parallelism, threadFactory, UncaughtExceptionHandler.default, true) } val result = for { threadPool <- threadPool } yield { val release = Sync[F].delay { threadPool.shutdown() } (threadPool, release) } Resource(result) } implicit class ForkJoinWorkerThreadFactoryOps(val self: ForkJoinWorkerThreadFactory) extends AnyVal { def withPrefix(prefix: String): ForkJoinWorkerThreadFactory = new ForkJoinWorkerThreadFactory { def newThread(pool: ForkJoinPool) = { val thread = self.newThread(pool) val threadId = thread.getId thread.setName(s"$prefix-$threadId") thread } } } }
Example 32
Source File: ScheduledExecutorServiceOf.scala From kafka-journal with MIT License | 5 votes |
package com.evolutiongaming.kafka.journal.execution import java.util.concurrent.{ScheduledExecutorService, ThreadFactory, Executors => ExecutorsJ} import cats.effect.{Resource, Sync} import cats.implicits._ object ScheduledExecutorServiceOf { def apply[F[_] : Sync]( parallelism: Int, threadFactory: ThreadFactory ): Resource[F, ScheduledExecutorService] = { val result = for { threadPool <- Sync[F].delay { ExecutorsJ.newScheduledThreadPool(parallelism, threadFactory) } } yield { val release = Sync[F].delay { threadPool.shutdown() } (threadPool, release) } Resource(result) } }
Example 33
Source File: HeadCacheMetrics.scala From kafka-journal with MIT License | 5 votes |
package com.evolutiongaming.kafka.journal import cats.effect.Resource import cats.{Applicative, Monad} import com.evolutiongaming.scache.CacheMetrics import com.evolutiongaming.smetrics.CollectorRegistry final case class HeadCacheMetrics[F[_]](headCache: HeadCache.Metrics[F], cache: CacheMetrics[F]) object HeadCacheMetrics { def empty[F[_] : Applicative]: HeadCacheMetrics[F] = apply(HeadCache.Metrics.empty, CacheMetrics.empty) def of[F[_] : Monad]( registry: CollectorRegistry[F], prefix: HeadCache.Metrics.Prefix = HeadCache.Metrics.Prefix.default ): Resource[F, HeadCacheMetrics[F]] = { for { headCache <- HeadCache.Metrics.of(registry, prefix) cache <- CacheMetrics.of(registry, s"${ prefix }_${ CacheMetrics.Prefix.Default }") } yield { apply(headCache, cache(prefix)) } } }
Example 34
Source File: ConsumeActionRecords.scala From kafka-journal with MIT License | 5 votes |
package com.evolutiongaming.kafka.journal import cats.data.{NonEmptyList => Nel, NonEmptySet => Nes} import cats.effect.Resource import cats.implicits._ import cats.~> import com.evolutiongaming.catshelper.{BracketThrowable, Log} import com.evolutiongaming.kafka.journal.conversions.ConsRecordToActionRecord import com.evolutiongaming.skafka.{Offset, Partition, TopicPartition} import com.evolutiongaming.sstream.Stream trait ConsumeActionRecords[F[_]] { def apply(key: Key, partition: Partition, from: Offset): Stream[F, ActionRecord[Action]] } object ConsumeActionRecords { def apply[F[_] : BracketThrowable]( consumer: Resource[F, Journals.Consumer[F]], log: Log[F])(implicit consRecordToActionRecord: ConsRecordToActionRecord[F] ): ConsumeActionRecords[F] = { (key: Key, partition: Partition, from: Offset) => { val topicPartition = TopicPartition(topic = key.topic, partition = partition) def seek(consumer: Journals.Consumer[F]) = { for { _ <- consumer.assign(Nes.of(topicPartition)) _ <- consumer.seek(topicPartition, from) _ <- log.debug(s"$key consuming from $partition:$from") } yield {} } def filter(records: List[Nel[ConsRecord]]) = { for { records <- records record <- records.toList if record.key.exists { _.value === key.id } } yield record } def poll(consumer: Journals.Consumer[F]) = { for { records0 <- consumer.poll records = filter(records0.values.values.toList) actions <- records.traverseFilter { a => consRecordToActionRecord(a).value } } yield actions } for { consumer <- Stream.fromResource(consumer) _ <- Stream.lift(seek(consumer)) records <- Stream.repeat(poll(consumer)) record <- Stream[F].apply(records) } yield record } } implicit class ConsumeActionRecordsOps[F[_]](val self: ConsumeActionRecords[F]) extends AnyVal { def mapK[G[_]](fg: F ~> G, gf: G ~> F): ConsumeActionRecords[G] = { (key: Key, partition: Partition, from1: Offset) => { self(key, partition, from1).mapK[G](fg, gf) } } } }
Example 35
Source File: HeadCacheOf.scala From kafka-journal with MIT License | 5 votes |
package com.evolutiongaming.kafka.journal import cats.{Applicative, Parallel} import cats.implicits._ import cats.effect.{Concurrent, Resource, Timer} import com.evolutiongaming.catshelper.{FromTry, LogOf} import com.evolutiongaming.kafka.journal.eventual.EventualJournal import com.evolutiongaming.skafka.consumer.ConsumerConfig import com.evolutiongaming.smetrics.MeasureDuration trait HeadCacheOf[F[_]] { def apply( consumerConfig: ConsumerConfig, eventualJournal: EventualJournal[F] ): Resource[F, HeadCache[F]] } object HeadCacheOf { def empty[F[_] : Applicative]: HeadCacheOf[F] = const(Resource.liftF(HeadCache.empty[F].pure[F])) def const[F[_]](value: Resource[F, HeadCache[F]]): HeadCacheOf[F] = { (_: ConsumerConfig, _: EventualJournal[F]) => value } def apply[F[_]](implicit F: HeadCacheOf[F]): HeadCacheOf[F] = F def apply[ F[_] : Concurrent : Parallel : Timer : LogOf : KafkaConsumerOf : MeasureDuration : FromTry : FromAttempt : FromJsResult : JsonCodec.Decode ]( metrics: Option[HeadCacheMetrics[F]] ): HeadCacheOf[F] = { (consumerConfig: ConsumerConfig, eventualJournal: EventualJournal[F]) => { for { headCache <- HeadCache.of[F](consumerConfig, eventualJournal, metrics) log <- Resource.liftF(LogOf[F].apply(HeadCache.getClass)) } yield { headCache.withLog(log) } } } }
Example 36
Source File: ResourceRef.scala From kafka-journal with MIT License | 5 votes |
package com.evolutiongaming.kafka.journal.util import cats.effect.concurrent.Ref import cats.effect.implicits._ import cats.effect.{Resource, Sync} import cats.implicits._ import scala.util.control.NoStackTrace trait ResourceRef[F[_], A] { def get: F[A] def set(a: A, release: F[Unit]): F[Unit] def set(a: Resource[F, A]): F[Unit] } object ResourceRef { def of[F[_] : Sync, A](resource: Resource[F, A]): Resource[F, ResourceRef[F, A]] = { case class State(a: A, release: F[Unit]) Resource .make { for { ab <- resource.allocated (a, release) = ab ref <- Ref[F].of(State(a, release).some) } yield ref } { ref => ref .getAndSet(none) .flatMap { _.foldMapM { _.release } } } .map { ref => new ResourceRef[F, A] { def get = { ref .get .flatMap { case Some(state) => state.a.pure[F] case None => ResourceReleasedError.raiseError[F, A] } } def set(a: A, release: F[Unit]) = { ref .modify { case Some(state) => (State(a, release).some, state.release ) case None => (none, ResourceReleasedError.raiseError[F, Unit]) } .flatten .uncancelable } def set(a: Resource[F, A]) = { a .allocated .flatMap { case (a, release) => set(a, release) } } } } } } case object ResourceReleasedError extends RuntimeException("Resource released") with NoStackTrace
Example 37
Source File: ActorSystemOf.scala From kafka-journal with MIT License | 5 votes |
package com.evolutiongaming.kafka.journal.util import akka.actor.ActorSystem import cats.effect.{Resource, Sync} import cats.implicits._ import com.evolutiongaming.catshelper.FromFuture import com.typesafe.config.Config object ActorSystemOf { def apply[F[_] : Sync : FromFuture]( name: String, config: Option[Config] = None): Resource[F, ActorSystem] = { val system = Sync[F].delay { config.fold(ActorSystem(name)) { config => ActorSystem(name, config) } } for { system <- Resource.liftF(system) result <- apply(system) } yield result } def apply[F[_] : Sync : FromFuture](system: ActorSystem): Resource[F, ActorSystem] = { val release = FromFuture[F].apply { system.terminate() }.void val result = (system, release).pure[F] Resource(result) } }
Example 38
Source File: Executors.scala From kafka-journal with MIT License | 5 votes |
package com.evolutiongaming.kafka.journal.util import java.util.concurrent.ScheduledExecutorService import cats.effect.{Resource, Sync} import com.evolutiongaming.catshelper.Runtime import com.evolutiongaming.kafka.journal.execution.{ForkJoinPoolOf, ScheduledExecutorServiceOf, ThreadFactoryOf, ThreadPoolOf} import scala.concurrent.{ExecutionContext, ExecutionContextExecutorService} object Executors { def blocking[F[_] : Sync]( name: String, ): Resource[F, ExecutionContextExecutorService] = { for { threadFactory <- Resource.liftF(ThreadFactoryOf[F](name)) threadPool <- ThreadPoolOf[F](2, Int.MaxValue, threadFactory) } yield { ExecutionContext.fromExecutorService(threadPool) } } def nonBlocking[F[_] : Sync]( name: String, ): Resource[F, ExecutionContextExecutorService] = { for { cores <- Resource.liftF(Runtime[F].availableCores) parallelism = cores + 1 forkJoinPool <- ForkJoinPoolOf[F](name, parallelism) } yield { ExecutionContext.fromExecutorService(forkJoinPool) } } def scheduled[F[_] : Sync]( name: String, parallelism: Int ): Resource[F, ScheduledExecutorService] = { for { threadFactory <- Resource.liftF(ThreadFactoryOf[F](name)) result <- ScheduledExecutorServiceOf[F](parallelism, threadFactory) } yield result } }
Example 39
Source File: KafkaReadMetrics.scala From kafka-journal with MIT License | 5 votes |
package com.evolutiongaming.kafka.journal.conversions import cats.effect.Resource import cats.{Applicative, Monad} import com.evolutiongaming.kafka.journal._ import com.evolutiongaming.smetrics.MetricsHelper._ import com.evolutiongaming.smetrics.{CollectorRegistry, LabelNames, Quantile, Quantiles} import scala.concurrent.duration.FiniteDuration trait KafkaReadMetrics[F[_]] { def apply(payloadAndType: PayloadAndType, latency: FiniteDuration): F[Unit] } object KafkaReadMetrics { def empty[F[_]: Applicative]: KafkaReadMetrics[F] = (_, _) => Applicative[F].unit def of[F[_]: Monad]( registry: CollectorRegistry[F], prefix: String = "journal" ): Resource[F, KafkaReadMetrics[F]] = { val durationSummary = registry.summary( name = s"${prefix}_payload_to_events_duration", help = "Journal payload to events conversion duration in seconds", quantiles = Quantiles(Quantile(0.9, 0.05), Quantile(0.99, 0.005)), labels = LabelNames("payload_type") ) for { durationSummary <- durationSummary } yield { new KafkaReadMetrics[F] { def apply(payloadAndType: PayloadAndType, latency: FiniteDuration): F[Unit] = durationSummary .labels(payloadAndType.payloadType.name) .observe(latency.toNanos.nanosToSeconds) } } } }
Example 40
Source File: KafkaWriteMetrics.scala From kafka-journal with MIT License | 5 votes |
package com.evolutiongaming.kafka.journal.conversions import cats.effect.Resource import cats.{Applicative, Monad} import com.evolutiongaming.kafka.journal._ import com.evolutiongaming.smetrics.MetricsHelper._ import com.evolutiongaming.smetrics.{ CollectorRegistry, LabelNames, Quantile, Quantiles } import scala.concurrent.duration.FiniteDuration trait KafkaWriteMetrics[F[_]] { def apply[A](events: Events[A], payloadAndType: PayloadAndType, latency: FiniteDuration): F[Unit] } object KafkaWriteMetrics { def empty[F[_]: Applicative]: KafkaWriteMetrics[F] = new KafkaWriteMetrics[F] { override def apply[A](events: Events[A], payloadAndType: PayloadAndType, latency: FiniteDuration): F[Unit] = Applicative[F].unit } def of[F[_]: Monad]( registry: CollectorRegistry[F], prefix: String = "journal" ): Resource[F, KafkaWriteMetrics[F]] = { val durationSummary = registry.summary( name = s"${prefix}_events_to_payload_duration", help = "Journal events to payload conversion duration in seconds", quantiles = Quantiles(Quantile(0.9, 0.05), Quantile(0.99, 0.005)), labels = LabelNames("payload_type") ) for { durationSummary <- durationSummary } yield { new KafkaWriteMetrics[F] { def apply[A](events: Events[A], payloadAndType: PayloadAndType, latency: FiniteDuration): F[Unit] = durationSummary .labels(payloadAndType.payloadType.name) .observe(latency.toNanos.nanosToSeconds) } } } }
Example 41
Source File: KafkaJournalCirce.scala From kafka-journal with MIT License | 5 votes |
package akka.persistence.kafka.journal.circe import akka.persistence.kafka.journal.EventSerializer.PersistentRepresentation import akka.persistence.kafka.journal._ import akka.persistence.kafka.journal.circe.KafkaJournalCirce._ import cats.effect.{IO, Resource} import cats.implicits._ import com.evolutiongaming.catshelper.MonadThrowable import com.evolutiongaming.kafka.journal._ import com.evolutiongaming.kafka.journal.circe.Codecs._ import com.evolutiongaming.kafka.journal.circe.FromCirceResult import com.evolutiongaming.kafka.journal.circe.Instances._ import com.evolutiongaming.kafka.journal.util.Fail import com.typesafe.config.Config import io.circe._ import io.circe.generic.semiauto._ import io.circe.syntax._ class KafkaJournalCirce(config: Config) extends KafkaJournal(config) { override def adapterIO: Resource[IO, JournalAdapter[IO]] = { for { serializer <- circeEventSerializer journalReadWrite <- Resource.liftF(circeJournalReadWrite) adapter <- adapterIO(serializer, journalReadWrite) } yield adapter } def circeEventSerializer: Resource[IO, EventSerializer[IO, Json]] = { val serializer = JsonEventSerializer.of[IO].pure[IO] Resource.liftF(serializer) } def circeJournalReadWrite: IO[JournalReadWrite[IO, Json]] = JournalReadWrite.of[IO, Json].pure[IO] } object KafkaJournalCirce { implicit def persistentJsonEncoder[A : Encoder]: Encoder[PersistentJson[A]] = deriveEncoder implicit def persistentJsonDecoder[A : Decoder]: Decoder[PersistentJson[A]] = deriveDecoder object JsonEventSerializer { def of[F[_] : MonadThrowable : FromCirceResult]: EventSerializer[F, Json] = { def toEventPayload(repr: PersistentRepresentation): F[Json] = { def json(json: Json, payloadType: Option[PayloadType.TextOrJson] = None) = { val persistent = PersistentJson( manifest = repr.manifest, writerUuid = repr.writerUuid, payloadType = payloadType, payload = json ) persistent.asJson.dropNullValues } repr.payload match { case payload: Json => json(payload).pure[F] case payload: String => json(Json.fromString(payload), PayloadType.Text.some).pure[F] case other => Fail.lift[F].fail(s"Event.payload is not supported, payload: $other") } } def fromEventPayload(json: Json): F[PersistentRepresentation] = { val fromCirceResult = FromCirceResult.summon[F] for { persistentJson <- fromCirceResult(json.as[PersistentJson[Json]]) payloadType = persistentJson.payloadType getOrElse PayloadType.Json payload = persistentJson.payload anyRef <- payloadType match { case PayloadType.Text => fromCirceResult(payload.as[String]).widen[AnyRef] case PayloadType.Json => payload.pure[F].widen[AnyRef] } } yield { PersistentRepresentation( payload = anyRef, manifest = persistentJson.manifest, writerUuid = persistentJson.writerUuid ) } } EventSerializer(toEventPayload, fromEventPayload) } } }
Example 42
Source File: RedisLiftKDemo.scala From redis4cats with Apache License 2.0 | 5 votes |
package dev.profunktor.redis4cats import cats.data.EitherT import cats.effect.{ IO, Resource } import dev.profunktor.redis4cats.effect.Log.NoOp._ object RedisLiftKDemo extends LoggerIOApp { import Demo._ val program: IO[Unit] = { val usernameKey = "test" val showResult: Option[String] => IO[Unit] = _.fold(putStrLn(s"Not found key: $usernameKey"))(s => putStrLn(s)) val commandsApi: Resource[IO, RedisCommands[IO, String, String]] = Redis[IO].utf8(redisURI) commandsApi.use( _.liftK[EitherT[IO, String, *]] .get(usernameKey) .semiflatMap(x => showResult(x)) .value .void ) } }
Example 43
Source File: RedisMasterReplicaStringsDemo.scala From redis4cats with Apache License 2.0 | 5 votes |
package dev.profunktor.redis4cats import cats.effect.{ IO, Resource } import dev.profunktor.redis4cats.connection._ import dev.profunktor.redis4cats.data.ReadFrom import dev.profunktor.redis4cats.effect.Log.NoOp._ object RedisMasterReplicaStringsDemo extends LoggerIOApp { import Demo._ val program: IO[Unit] = { val usernameKey = "test" val showResult: Option[String] => IO[Unit] = _.fold(putStrLn(s"Not found key: $usernameKey"))(s => putStrLn(s)) val connection: Resource[IO, RedisCommands[IO, String, String]] = for { uri <- Resource.liftF(RedisURI.make[IO](redisURI)) conn <- RedisMasterReplica[IO].make(stringCodec, uri)(Some(ReadFrom.MasterPreferred)) cmds <- Redis[IO].masterReplica(conn) } yield cmds connection .use { cmd => for { x <- cmd.get(usernameKey) _ <- showResult(x) _ <- cmd.set(usernameKey, "some value") y <- cmd.get(usernameKey) _ <- showResult(y) _ <- cmd.setNx(usernameKey, "should not happen") w <- cmd.get(usernameKey) _ <- showResult(w) _ <- cmd.del(usernameKey) z <- cmd.get(usernameKey) _ <- showResult(z) } yield () } } }
Example 44
Source File: JsonCodecDemo.scala From redis4cats with Apache License 2.0 | 5 votes |
package dev.profunktor.redis4cats import cats.effect.{ IO, Resource } import dev.profunktor.redis4cats.codecs.Codecs import dev.profunktor.redis4cats.codecs.splits.SplitEpi import dev.profunktor.redis4cats.data.RedisCodec import dev.profunktor.redis4cats.effect.Log.NoOp._ import io.circe.generic.auto._ import io.circe.parser.{ decode => jsonDecode } import io.circe.syntax._ object JsonCodecDemo extends LoggerIOApp { import Demo._ sealed trait Event object Event { case class Ack(id: Long) extends Event case class Message(id: Long, payload: String) extends Event case object Unknown extends Event } val program: IO[Unit] = { val eventsKey = "events" val eventSplitEpi: SplitEpi[String, Event] = SplitEpi[String, Event]( str => jsonDecode[Event](str).getOrElse(Event.Unknown), _.asJson.noSpaces ) val eventsCodec: RedisCodec[String, Event] = Codecs.derive(RedisCodec.Utf8, eventSplitEpi) val commandsApi: Resource[IO, RedisCommands[IO, String, Event]] = Redis[IO].simple(redisURI, eventsCodec) commandsApi .use { cmd => for { x <- cmd.sCard(eventsKey) _ <- putStrLn(s"Number of events: $x") _ <- cmd.sAdd(eventsKey, Event.Ack(1), Event.Message(23, "foo")) y <- cmd.sMembers(eventsKey) _ <- putStrLn(s"Events: $y") } yield () } } }
Example 45
Source File: RedisStringsDemo.scala From redis4cats with Apache License 2.0 | 5 votes |
package dev.profunktor.redis4cats import cats.effect.{ IO, Resource } import dev.profunktor.redis4cats.algebra.StringCommands import dev.profunktor.redis4cats.effect.Log.NoOp._ object RedisStringsDemo extends LoggerIOApp { import Demo._ val program: IO[Unit] = { val usernameKey = "test" val showResult: Option[String] => IO[Unit] = _.fold(putStrLn(s"Not found key: $usernameKey"))(s => putStrLn(s)) val commandsApi: Resource[IO, StringCommands[IO, String, String]] = Redis[IO].utf8(redisURI) commandsApi .use { cmd => for { x <- cmd.get(usernameKey) _ <- showResult(x) _ <- cmd.set(usernameKey, "some value") y <- cmd.get(usernameKey) _ <- showResult(y) _ <- cmd.setNx(usernameKey, "should not happen") w <- cmd.get(usernameKey) _ <- showResult(w) } yield () } } }
Example 46
Source File: RedisSortedSetsDemo.scala From redis4cats with Apache License 2.0 | 5 votes |
package dev.profunktor.redis4cats import cats.effect.{ IO, Resource } import dev.profunktor.redis4cats.algebra.SortedSetCommands import dev.profunktor.redis4cats.effect.Log.NoOp._ import dev.profunktor.redis4cats.effects.{ Score, ScoreWithValue, ZRange } object RedisSortedSetsDemo extends LoggerIOApp { import Demo._ val program: IO[Unit] = { val testKey = "zztop" val commandsApi: Resource[IO, SortedSetCommands[IO, String, Long]] = Redis[IO].simple(redisURI, longCodec) commandsApi .use { cmd => for { _ <- cmd.zAdd(testKey, args = None, ScoreWithValue(Score(1), 1), ScoreWithValue(Score(3), 2)) x <- cmd.zRevRangeByScore(testKey, ZRange(0, 2), limit = None) _ <- putStrLn(s"Score: $x") y <- cmd.zCard(testKey) _ <- putStrLn(s"Size: $y") z <- cmd.zCount(testKey, ZRange(0, 1)) _ <- putStrLn(s"Count: $z") } yield () } } }
Example 47
Source File: RedisListsDemo.scala From redis4cats with Apache License 2.0 | 5 votes |
package dev.profunktor.redis4cats import cats.effect.{ IO, Resource } import dev.profunktor.redis4cats.algebra.ListCommands import dev.profunktor.redis4cats.effect.Log.NoOp._ object RedisListsDemo extends LoggerIOApp { import Demo._ val program: IO[Unit] = { val testKey = "listos" val commandsApi: Resource[IO, ListCommands[IO, String, String]] = Redis[IO].utf8(redisURI) commandsApi .use { cmd => for { _ <- cmd.rPush(testKey, "one", "two", "three") x <- cmd.lRange(testKey, 0, 10) _ <- putStrLn(s"Range: $x") y <- cmd.lLen(testKey) _ <- putStrLn(s"Length: $y") a <- cmd.lPop(testKey) _ <- putStrLn(s"Left Pop: $a") b <- cmd.rPop(testKey) _ <- putStrLn(s"Right Pop: $b") z <- cmd.lRange(testKey, 0, 10) _ <- putStrLn(s"Range: $z") } yield () } } }
Example 48
Source File: RedisGeoDemo.scala From redis4cats with Apache License 2.0 | 5 votes |
package dev.profunktor.redis4cats import cats.effect.{ IO, Resource } import dev.profunktor.redis4cats.algebra.GeoCommands import dev.profunktor.redis4cats.effect.Log.NoOp._ import dev.profunktor.redis4cats.effects._ import io.lettuce.core.GeoArgs object RedisGeoDemo extends LoggerIOApp { import Demo._ val program: IO[Unit] = { val testKey = "location" val commandsApi: Resource[IO, GeoCommands[IO, String, String]] = Redis[IO].utf8(redisURI) val _BuenosAires = GeoLocation(Longitude(-58.3816), Latitude(-34.6037), "Buenos Aires") val _RioDeJaneiro = GeoLocation(Longitude(-43.1729), Latitude(-22.9068), "Rio de Janeiro") val _Montevideo = GeoLocation(Longitude(-56.164532), Latitude(-34.901112), "Montevideo") val _Tokyo = GeoLocation(Longitude(139.6917), Latitude(35.6895), "Tokyo") commandsApi .use { cmd => for { _ <- cmd.geoAdd(testKey, _BuenosAires) _ <- cmd.geoAdd(testKey, _RioDeJaneiro) _ <- cmd.geoAdd(testKey, _Montevideo) _ <- cmd.geoAdd(testKey, _Tokyo) x <- cmd.geoDist(testKey, _BuenosAires.value, _Tokyo.value, GeoArgs.Unit.km) _ <- putStrLn(s"Distance from ${_BuenosAires.value} to Tokyo: $x km") y <- cmd.geoPos(testKey, _RioDeJaneiro.value) _ <- putStrLn(s"Geo Pos of ${_RioDeJaneiro.value}: ${y.headOption}") z <- cmd.geoRadius(testKey, GeoRadius(_Montevideo.lon, _Montevideo.lat, Distance(10000.0)), GeoArgs.Unit.km) _ <- putStrLn(s"Geo Radius in 1000 km: $z") } yield () } } }
Example 49
Source File: RedisScriptsDemo.scala From redis4cats with Apache License 2.0 | 5 votes |
package dev.profunktor.redis4cats import cats.effect.{ IO, Resource } import dev.profunktor.redis4cats.algebra.ScriptCommands import dev.profunktor.redis4cats.effect.Log.NoOp._ import dev.profunktor.redis4cats.effects.ScriptOutputType object RedisScriptsDemo extends LoggerIOApp { import Demo._ val program: IO[Unit] = { val commandsApi: Resource[IO, ScriptCommands[IO, String, String]] = Redis[IO].utf8(redisURI) commandsApi .use { cmd => for { greeting <- cmd.eval("return 'Hello World'", ScriptOutputType.Value) _ <- putStrLn(s"Greetings from Lua: $greeting") fortyTwo <- cmd.eval("return 42", ScriptOutputType.Integer) _ <- putStrLn(s"Answer to the Ultimate Question of Life, the Universe, and Everything: $fortyTwo") list <- cmd.eval( "return {'Let', 'us', ARGV[1], ARGV[2]}", ScriptOutputType.Multi, Nil, List("have", "fun") ) _ <- putStrLn(s"We can even return lists: $list") shaRandom <- cmd.scriptLoad("math.randomseed(tonumber(ARGV[1])); return math.random() * 1000") List(exists) <- cmd.scriptExists(shaRandom) _ <- putStrLn(s"Script is cached on Redis: $exists") // seeding the RNG with 7 random <- cmd.evalSha(shaRandom, ScriptOutputType.Integer, Nil, List("7")) _ <- putStrLn(s"Execution of cached script returns a pseudo-random number: $random") () <- cmd.scriptFlush _ <- putStrLn("Flushed all cached scripts!") List(exists2) <- cmd.scriptExists(shaRandom) _ <- putStrLn(s"Script is still cached on Redis: $exists2") } yield () } } }
Example 50
Source File: RedisHashesDemo.scala From redis4cats with Apache License 2.0 | 5 votes |
package dev.profunktor.redis4cats import cats.effect.{ IO, Resource } import dev.profunktor.redis4cats.algebra.HashCommands import dev.profunktor.redis4cats.effect.Log.NoOp._ object RedisHashesDemo extends LoggerIOApp { import Demo._ val program: IO[Unit] = { val testKey = "foo" val testField = "bar" val showResult: Option[String] => IO[Unit] = _.fold(putStrLn(s"Not found key: $testKey | field: $testField"))(s => putStrLn(s)) val commandsApi: Resource[IO, HashCommands[IO, String, String]] = Redis[IO].utf8(redisURI) commandsApi .use { cmd => for { x <- cmd.hGet(testKey, testField) _ <- showResult(x) _ <- cmd.hSet(testKey, testField, "some value") y <- cmd.hGet(testKey, testField) _ <- showResult(y) _ <- cmd.hSetNx(testKey, testField, "should not happen") w <- cmd.hGet(testKey, testField) _ <- showResult(w) _ <- cmd.hDel(testKey, testField) z <- cmd.hGet(testKey, testField) _ <- showResult(z) } yield () } } }
Example 51
Source File: RedisClusterTransactionsDemo.scala From redis4cats with Apache License 2.0 | 5 votes |
package dev.profunktor.redis4cats import cats.effect.{ IO, Resource } import cats.implicits._ import dev.profunktor.redis4cats.connection._ import dev.profunktor.redis4cats.effect.Log.NoOp._ import dev.profunktor.redis4cats.transactions._ object RedisClusterTransactionsDemo extends LoggerIOApp { import Demo._ val program: IO[Unit] = { val key1 = "test1" val showResult: String => Option[String] => IO[Unit] = key => _.fold(putStrLn(s"Not found key: $key"))(s => putStrLn(s)) val commandsApi: Resource[IO, (RedisClusterClient, RedisCommands[IO, String, String])] = for { uri <- Resource.liftF(RedisURI.make[IO](redisClusterURI)) client <- RedisClusterClient[IO](uri) redis <- Redis[IO].fromClusterClient(client, stringCodec) } yield client -> redis commandsApi .use { case (client, cmd) => val nodeCmdResource = for { _ <- Resource.liftF(cmd.set(key1, "empty")) nodeId <- Resource.liftF(RedisClusterClient.nodeId[IO](client, key1)) nodeCmd <- Redis[IO].fromClusterClientByNode(client, stringCodec, nodeId) } yield nodeCmd // Transactions are only supported on a single node val notAllowed: IO[Unit] = cmd.multi .bracket(_ => cmd.set(key1, "nope") >> cmd.exec.void)(_ => cmd.discard) .handleErrorWith { case e: OperationNotSupported => putStrLn(e) } .void notAllowed *> // Transaction runs in a single shard, where "key1" is stored nodeCmdResource.use { nodeCmd => val tx = RedisTransaction(nodeCmd) val getter = cmd.get(key1).flatTap(showResult(key1)) val tx1 = putStrLn(tx) //.run(cmd.set(key1, "foo")) getter *> tx1 *> getter.void } } } }
Example 52
Source File: RedisClusterStringsDemo.scala From redis4cats with Apache License 2.0 | 5 votes |
package dev.profunktor.redis4cats import cats.effect.{ IO, Resource } import dev.profunktor.redis4cats.algebra.StringCommands import dev.profunktor.redis4cats.effect.Log.NoOp._ object RedisClusterStringsDemo extends LoggerIOApp { import Demo._ val program: IO[Unit] = { val usernameKey = "test" val showResult: Option[String] => IO[Unit] = _.fold(putStrLn(s"Not found key: $usernameKey"))(s => putStrLn(s)) val commandsApi: Resource[IO, StringCommands[IO, String, String]] = Redis[IO].clusterUtf8(redisClusterURI) commandsApi .use { cmd => for { x <- cmd.get(usernameKey) _ <- showResult(x) _ <- cmd.set(usernameKey, "some value") y <- cmd.get(usernameKey) _ <- showResult(y) _ <- cmd.setNx(usernameKey, "should not happen") w <- cmd.get(usernameKey) _ <- showResult(w) } yield () } } }
Example 53
Source File: RedisSetsDemo.scala From redis4cats with Apache License 2.0 | 5 votes |
package dev.profunktor.redis4cats import cats.effect.{ IO, Resource } import dev.profunktor.redis4cats.algebra.SetCommands import dev.profunktor.redis4cats.effect.Log.NoOp._ object RedisSetsDemo extends LoggerIOApp { import Demo._ val program: IO[Unit] = { val testKey = "foos" val showResult: Set[String] => IO[Unit] = x => putStrLn(s"$testKey members: $x") val commandsApi: Resource[IO, SetCommands[IO, String, String]] = Redis[IO].utf8(redisURI) commandsApi .use { cmd => for { x <- cmd.sMembers(testKey) _ <- showResult(x) _ <- cmd.sAdd(testKey, "set value") y <- cmd.sMembers(testKey) _ <- showResult(y) _ <- cmd.sCard(testKey).flatMap(s => putStrLn(s"size: $s")) _ <- cmd.sRem("non-existing", "random") w <- cmd.sMembers(testKey) _ <- showResult(w) _ <- cmd.sRem(testKey, "set value") z <- cmd.sMembers(testKey) _ <- showResult(z) _ <- cmd.sCard(testKey).flatMap(s => putStrLn(s"size: $s")) } yield () } } }
Example 54
Source File: RedisKeysDemo.scala From redis4cats with Apache License 2.0 | 5 votes |
package dev.profunktor.redis4cats import cats.effect.{ IO, Resource } import dev.profunktor.redis4cats.algebra.{ KeyCommands, StringCommands } import dev.profunktor.redis4cats.effect.Log.NoOp._ object RedisKeysDemo extends LoggerIOApp { import Demo._ val program: IO[Unit] = { val usernameKey = "test" val showResult: Option[String] => IO[Unit] = _.fold(putStrLn(s"Not found key: $usernameKey"))(s => putStrLn(s)) val commandsApi: Resource[IO, KeyCommands[IO, String] with StringCommands[IO, String, String]] = Redis[IO].utf8(redisURI) commandsApi .use { cmd => for { x <- cmd.get(usernameKey) _ <- showResult(x) _ <- cmd.set(usernameKey, "some value") y <- cmd.get(usernameKey) _ <- showResult(y) _ <- cmd.setNx(usernameKey, "should not happen") w <- cmd.get(usernameKey) _ <- cmd.del(usernameKey) z <- cmd.get(usernameKey) _ <- showResult(z) _ <- showResult(w) } yield () } } }
Example 55
Source File: ZIOAutoAckConsumer.scala From fs2-rabbit with Apache License 2.0 | 5 votes |
package dev.profunktor.fs2rabbit.examples import dev.profunktor.fs2rabbit.config.Fs2RabbitConfig import dev.profunktor.fs2rabbit.interpreter.RabbitClient import cats.effect.{Blocker, Resource} import zio._ import zio.interop.catz._ import zio.interop.catz.implicits._ import dev.profunktor.fs2rabbit.resiliency.ResilientStream import java.util.concurrent.Executors object ZIOAutoAckConsumer extends CatsApp { val config = Fs2RabbitConfig( virtualHost = "/", host = "127.0.0.1", username = Some("guest"), password = Some("guest"), port = 5672, ssl = false, connectionTimeout = 3, requeueOnNack = false, requeueOnReject = false, internalQueueSize = Some(500) ) val blockerResource = Resource .make(Task(Executors.newCachedThreadPool()))(es => Task(es.shutdown())) .map(Blocker.liftExecutorService) override def run(args: List[String]): UIO[Int] = blockerResource .use { blocker => RabbitClient[Task](config, blocker).flatMap { client => ResilientStream .runF(new AutoAckConsumerDemo[Task](client).program) } } .run .map(_ => 0) }
Example 56
Source File: Connection.scala From fs2-rabbit with Apache License 2.0 | 5 votes |
package dev.profunktor.fs2rabbit.algebra import cats.data.NonEmptyList import cats.effect.{Resource, Sync} import cats.implicits._ import com.rabbitmq.client.{Address, ConnectionFactory, DefaultSaslConfig, SaslConfig} import dev.profunktor.fs2rabbit.config.Fs2RabbitConfig import dev.profunktor.fs2rabbit.effects.Log import dev.profunktor.fs2rabbit.javaConversion._ import dev.profunktor.fs2rabbit.model.{AMQPChannel, AMQPConnection, RabbitChannel, RabbitConnection} import javax.net.ssl.SSLContext object ConnectionResource { type ConnectionResource[F[_]] = Connection[Resource[F, ?]] def make[F[_]: Sync: Log]( conf: Fs2RabbitConfig, sslCtx: Option[SSLContext] = None, // Unlike SSLContext, SaslConfig is not optional because it is always set // by the underlying Java library, even if the user doesn't set it. saslConf: SaslConfig = DefaultSaslConfig.PLAIN ): F[Connection[Resource[F, ?]]] = Sync[F].delay { new Connection[Resource[F, ?]] { private[fs2rabbit] def mkConnectionFactory: F[(ConnectionFactory, NonEmptyList[Address])] = Sync[F].delay { val factory = new ConnectionFactory() val firstNode = conf.nodes.head factory.setHost(firstNode.host) factory.setPort(firstNode.port) factory.setVirtualHost(conf.virtualHost) factory.setConnectionTimeout(conf.connectionTimeout) factory.setAutomaticRecoveryEnabled(conf.automaticRecovery) if (conf.ssl) { sslCtx.fold(factory.useSslProtocol())(factory.useSslProtocol) } factory.setSaslConfig(saslConf) conf.username.foreach(factory.setUsername) conf.password.foreach(factory.setPassword) val addresses = conf.nodes.map(node => new Address(node.host, node.port)) (factory, addresses) } private[fs2rabbit] def acquireChannel(connection: AMQPConnection): F[AMQPChannel] = Sync[F] .delay(connection.value.createChannel) .flatTap(c => Log[F].info(s"Acquired channel: $c")) .map(RabbitChannel) private[fs2rabbit] val acquireConnection: F[AMQPConnection] = mkConnectionFactory.flatMap { case (factory, addresses) => Sync[F] .delay(factory.newConnection(addresses.toList.asJava)) .flatTap(c => Log[F].info(s"Acquired connection: $c")) .map(RabbitConnection) } override def createConnection: Resource[F, AMQPConnection] = Resource.make(acquireConnection) { case RabbitConnection(conn) => Log[F].info(s"Releasing connection: $conn previously acquired.") *> Sync[F].delay { if (conn.isOpen) conn.close() } } override def createChannel(connection: AMQPConnection): Resource[F, AMQPChannel] = Resource.make(acquireChannel(connection)) { case RabbitChannel(channel) => Sync[F].delay { if (channel.isOpen) channel.close() } } } } } trait Connection[F[_]] { def createConnection: F[AMQPConnection] def createChannel(connection: AMQPConnection): F[AMQPChannel] }
Example 57
Source File: OTag.scala From docspell with GNU General Public License v3.0 | 5 votes |
package docspell.backend.ops import cats.effect.{Effect, Resource} import cats.implicits._ import docspell.common.{AccountId, Ident} import docspell.store.records.{RTag, RTagItem} import docspell.store.{AddResult, Store} trait OTag[F[_]] { def findAll(account: AccountId, nameQuery: Option[String]): F[Vector[RTag]] def add(s: RTag): F[AddResult] def update(s: RTag): F[AddResult] def delete(id: Ident, collective: Ident): F[AddResult] def loadAll(ids: List[Ident]): F[Vector[RTag]] } object OTag { def apply[F[_]: Effect](store: Store[F]): Resource[F, OTag[F]] = Resource.pure[F, OTag[F]](new OTag[F] { def findAll(account: AccountId, nameQuery: Option[String]): F[Vector[RTag]] = store.transact(RTag.findAll(account.collective, nameQuery, _.name)) def add(t: RTag): F[AddResult] = { def insert = RTag.insert(t) def exists = RTag.existsByName(t) val msg = s"A tag '${t.name}' already exists" store.add(insert, exists).map(_.fold(identity, _.withMsg(msg), identity)) } def update(t: RTag): F[AddResult] = { def insert = RTag.update(t) def exists = RTag.existsByName(t) val msg = s"A tag '${t.name}' already exists" store.add(insert, exists).map(_.fold(identity, _.withMsg(msg), identity)) } def delete(id: Ident, collective: Ident): F[AddResult] = { val io = for { optTag <- RTag.findByIdAndCollective(id, collective) n0 <- optTag.traverse(t => RTagItem.deleteTag(t.tagId)) n1 <- optTag.traverse(t => RTag.delete(t.tagId, collective)) } yield n0.getOrElse(0) + n1.getOrElse(0) store.transact(io).attempt.map(AddResult.fromUpdate) } def loadAll(ids: List[Ident]): F[Vector[RTag]] = if (ids.isEmpty) Vector.empty.pure[F] else store.transact(RTag.findAllById(ids)) }) }
Example 58
Source File: ONode.scala From docspell with GNU General Public License v3.0 | 5 votes |
package docspell.backend.ops import cats.effect.{Effect, Resource} import cats.implicits._ import docspell.common.syntax.all._ import docspell.common.{Ident, LenientUri, NodeType} import docspell.store.Store import docspell.store.records.RNode import org.log4s._ trait ONode[F[_]] { def register(appId: Ident, nodeType: NodeType, uri: LenientUri): F[Unit] def unregister(appId: Ident): F[Unit] } object ONode { private[this] val logger = getLogger def apply[F[_]: Effect](store: Store[F]): Resource[F, ONode[F]] = Resource.pure[F, ONode[F]](new ONode[F] { def register(appId: Ident, nodeType: NodeType, uri: LenientUri): F[Unit] = for { node <- RNode(appId, nodeType, uri) _ <- logger.finfo(s"Registering node ${node.id.id}") _ <- store.transact(RNode.set(node)) } yield () def unregister(appId: Ident): F[Unit] = logger.finfo(s"Unregister app ${appId.id}") *> store.transact(RNode.delete(appId)).map(_ => ()) }) }
Example 59
Source File: OEquipment.scala From docspell with GNU General Public License v3.0 | 5 votes |
package docspell.backend.ops import cats.effect.{Effect, Resource} import cats.implicits._ import docspell.common.{AccountId, Ident} import docspell.store.records.{REquipment, RItem} import docspell.store.{AddResult, Store} trait OEquipment[F[_]] { def findAll(account: AccountId, nameQuery: Option[String]): F[Vector[REquipment]] def add(s: REquipment): F[AddResult] def update(s: REquipment): F[AddResult] def delete(id: Ident, collective: Ident): F[AddResult] } object OEquipment { def apply[F[_]: Effect](store: Store[F]): Resource[F, OEquipment[F]] = Resource.pure[F, OEquipment[F]](new OEquipment[F] { def findAll(account: AccountId, nameQuery: Option[String]): F[Vector[REquipment]] = store.transact(REquipment.findAll(account.collective, nameQuery, _.name)) def add(e: REquipment): F[AddResult] = { def insert = REquipment.insert(e) def exists = REquipment.existsByName(e.cid, e.name) val msg = s"An equipment '${e.name}' already exists" store.add(insert, exists).map(_.fold(identity, _.withMsg(msg), identity)) } def update(e: REquipment): F[AddResult] = { def insert = REquipment.update(e) def exists = REquipment.existsByName(e.cid, e.name) val msg = s"An equipment '${e.name}' already exists" store.add(insert, exists).map(_.fold(identity, _.withMsg(msg), identity)) } def delete(id: Ident, collective: Ident): F[AddResult] = { val io = for { n0 <- RItem.removeConcEquip(collective, id) n1 <- REquipment.delete(id, collective) } yield n0 + n1 store.transact(io).attempt.map(AddResult.fromUpdate) } }) }
Example 60
Source File: JobQueue.scala From docspell with GNU General Public License v3.0 | 5 votes |
package docspell.store.queue import cats.effect.{Effect, Resource} import cats.implicits._ import docspell.common._ import docspell.common.syntax.all._ import docspell.store.Store import docspell.store.queries.QJob import docspell.store.records.RJob import org.log4s._ trait JobQueue[F[_]] { def insertIfNew(job: RJob): F[Unit] def insertAll(jobs: Seq[RJob]): F[Unit] def nextJob( prio: Ident => F[Priority], worker: Ident, retryPause: Duration ): F[Option[RJob]] } object JobQueue { private[this] val logger = getLogger def apply[F[_]: Effect](store: Store[F]): Resource[F, JobQueue[F]] = Resource.pure[F, JobQueue[F]](new JobQueue[F] { def nextJob( prio: Ident => F[Priority], worker: Ident, retryPause: Duration ): F[Option[RJob]] = logger .ftrace("Select next job") *> QJob.takeNextJob(store)(prio, worker, retryPause) def insert(job: RJob): F[Unit] = store .transact(RJob.insert(job)) .flatMap { n => if (n != 1) Effect[F] .raiseError(new Exception(s"Inserting job failed. Update count: $n")) else ().pure[F] } def insertIfNew(job: RJob): F[Unit] = for { rj <- job.tracker match { case Some(tid) => store.transact(RJob.findNonFinalByTracker(tid)) case None => None.pure[F] } ret <- if (rj.isDefined) ().pure[F] else insert(job) } yield ret def insertAll(jobs: Seq[RJob]): F[Unit] = jobs.toList .traverse(j => insert(j).attempt) .map(_.foreach { case Right(()) => case Left(ex) => logger.error(ex)("Could not insert job. Skipping it.") }) }) }
Example 61
Source File: TestClient.scala From franklin with Apache License 2.0 | 5 votes |
package com.azavea.franklin.api import cats.effect.Resource import cats.effect.Sync import cats.implicits._ import com.azavea.franklin.api.services.{CollectionItemsService, CollectionsService} import com.azavea.stac4s.{StacCollection, StacItem} import eu.timepit.refined.auto._ import io.circe.syntax._ import org.http4s.circe.CirceEntityDecoder._ import org.http4s.circe.CirceEntityEncoder._ import org.http4s.implicits._ import org.http4s.{Method, Request, Uri} import java.net.URLEncoder import java.nio.charset.StandardCharsets class TestClient[F[_]: Sync]( collectionsService: CollectionsService[F], collectionItemsService: CollectionItemsService[F] ) { private def createCollection(collection: StacCollection): F[StacCollection] = collectionsService.routes.orNotFound.run( Request( method = Method.POST, uri = Uri.unsafeFromString("/collections") ).withEntity(collection.asJson) ) flatMap { _.as[StacCollection] } private def deleteCollection(collection: StacCollection): F[Unit] = { val encodedCollectionId = URLEncoder.encode(collection.id, StandardCharsets.UTF_8.toString) collectionsService.routes.orNotFound .run( Request( method = Method.DELETE, uri = Uri.unsafeFromString(s"/collections/$encodedCollectionId") ) ) .void } private def createItemInCollection(collection: StacCollection, item: StacItem): F[StacItem] = { val encodedCollectionId = URLEncoder.encode(collection.id, StandardCharsets.UTF_8.toString) collectionItemsService.routes.orNotFound.run( Request( method = Method.POST, uri = Uri.unsafeFromString(s"/collections/$encodedCollectionId/items") ).withEntity(item) ) flatMap { _.as[StacItem] } } private def deleteItemInCollection(collection: StacCollection, item: StacItem): F[Unit] = { val encodedCollectionId = URLEncoder.encode(collection.id, StandardCharsets.UTF_8.toString) val encodedItemId = URLEncoder.encode(item.id, StandardCharsets.UTF_8.toString) collectionItemsService.routes.orNotFound .run( Request( method = Method.DELETE, uri = Uri.unsafeFromString(s"/collections/$encodedCollectionId/items/$encodedItemId") ) ) .void } def getItemResource(collection: StacCollection, item: StacItem): Resource[F, StacItem] = Resource.make(createItemInCollection(collection, item.copy(collection = Some(collection.id))))( item => deleteItemInCollection(collection, item) ) def getCollectionResource(collection: StacCollection): Resource[F, StacCollection] = Resource.make(createCollection(collection))(collection => deleteCollection(collection)) def getCollectionItemResource( item: StacItem, collection: StacCollection ): Resource[F, (StacItem, StacCollection)] = (getItemResource(collection, item), getCollectionResource(collection)).tupled }
Example 62
Source File: AbstractCommand.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.cli import cats.Parallel import cats.effect.{ConcurrentEffect, ContextShift, Resource, Timer} import cats.implicits._ import ch.epfl.bluebrain.nexus.cli.CliOpts._ import ch.epfl.bluebrain.nexus.cli.config.AppConfig import ch.epfl.bluebrain.nexus.cli.modules.config.ConfigModule import ch.epfl.bluebrain.nexus.cli.modules.influx.InfluxModule import ch.epfl.bluebrain.nexus.cli.modules.postgres.PostgresModule import com.monovore.decline.Opts import distage.{Injector, TagK} import izumi.distage.model.Locator import izumi.distage.model.definition.StandardAxis.Repo import izumi.distage.model.definition.{Activation, Module, ModuleDef} import izumi.distage.model.plan.Roots import izumi.distage.model.recursive.LocatorRef abstract class AbstractCommand[F[_]: TagK: Timer: ContextShift: Parallel](locatorOpt: Option[LocatorRef])(implicit F: ConcurrentEffect[F] ) { protected def locatorResource: Opts[Resource[F, Locator]] = locatorOpt match { case Some(value) => Opts(Resource.make(F.delay(value.get))(_ => F.unit)) case None => (envConfig.orNone, postgresConfig.orNone, influxConfig.orNone, token.orNone).mapN { case (e, p, i, t) => val res: Resource[F, Module] = Resource.make({ AppConfig.load[F](e, p, i, t).flatMap[Module] { case Left(err) => F.raiseError(err) case Right(value) => val effects = EffectModule[F] val cli = CliModule[F] val config = ConfigModule[F] val postgres = PostgresModule[F] val influx = InfluxModule[F] val modules = effects ++ cli ++ config ++ postgres ++ influx ++ new ModuleDef { make[AppConfig].from(value) } F.pure(modules) } })(_ => F.unit) res.flatMap { modules => Injector(Activation(Repo -> Repo.Prod)).produceF[F](modules, Roots.Everything).toCats } } } }
Example 63
Source File: AbstractInfluxSpec.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.cli.influx import cats.effect.{Blocker, IO, Resource} import ch.epfl.bluebrain.nexus.cli.{AbstractCliSpec, Console} import ch.epfl.bluebrain.nexus.cli.clients.InfluxClient import ch.epfl.bluebrain.nexus.cli.config.AppConfig import ch.epfl.bluebrain.nexus.cli.influx.InfluxDocker.InfluxHostConfig import izumi.distage.model.definition.{Module, ModuleDef} import org.http4s.client.blaze.BlazeClientBuilder import scala.concurrent.duration._ class AbstractInfluxSpec extends AbstractCliSpec { override protected def defaultModules: Module = { super.defaultModules ++ new InfluxDocker.Module[IO] } override def testModule: ModuleDef = new ModuleDef { make[AppConfig].fromEffect { host: InfluxHostConfig => copyConfigs.flatMap { case (envFile, _, influxFile) => AppConfig.load[IO](Some(envFile), influxConfigFile = Some(influxFile)).flatMap { case Left(value) => IO.raiseError(value) case Right(value) => val influxOffsetFile = influxFile.getParent.resolve("influx.offset") val cfg = value.copy(influx = value.influx.copy( endpoint = host.endpoint, offsetFile = influxOffsetFile, offsetSaveInterval = 100.milliseconds ) ) IO.pure(cfg) } } } make[InfluxClient[IO]].fromResource { (_: InfluxDocker.Container, cfg: AppConfig, blocker: Blocker, console: Console[IO]) => BlazeClientBuilder[IO](blocker.blockingContext).resource.flatMap { client => val influxClient = InfluxClient(client, cfg, console) waitForInfluxReady(influxClient).map(_ => influxClient) } } } private def waitForInfluxReady( client: InfluxClient[IO], maxDelay: FiniteDuration = 90.seconds ): Resource[IO, Unit] = { import retry.CatsEffect._ import retry.RetryPolicies._ import retry._ val policy = limitRetriesByCumulativeDelay[IO](maxDelay, constantDelay(5.second)) val healthIO = retryingOnAllErrors( policy = policy, onError = (_: Throwable, _) => IO.delay(println("Influx Container not ready, retrying...")) ) { client.health.map { case Left(err) => throw err case Right(_) => () } } Resource.liftF(healthIO) } }
Example 64
Source File: MongoClient.scala From fs2-mongodb with MIT License | 5 votes |
package org.lyranthe.fs2_mongodb import com.mongodb.async.client.{MongoClient, MongoClients} import com.mongodb.MongoClientSettings import cats.effect.{Resource, Sync} object Mongo { def fromUrl[F[_]](url: String)(implicit F: Sync[F]): Resource[F, MongoClient] = Resource.make(F.delay(MongoClients.create(url))){ client => F.delay(client.close()) } def fromSettings[F[_]](settings: MongoClientSettings)( implicit F: Sync[F]): Resource[F, MongoClient] = { Resource.make(F.delay(MongoClients.create(settings)))(client => F.delay(client.close())) } }
Example 65
Source File: PostgresTransactor.scala From ticket-booking-aecor with Apache License 2.0 | 5 votes |
package ru.pavkin.booking.common.postgres import cats.effect.{Async, ContextShift, Resource} import doobie.hikari.HikariTransactor import doobie.util.ExecutionContexts import ru.pavkin.booking.config.PostgresConfig object PostgresTransactor { def transactor[F[_]]( config: PostgresConfig )(implicit F: Async[F], C: ContextShift[F]): Resource[F, HikariTransactor[F]] = for { ce <- ExecutionContexts.fixedThreadPool[F](32) te <- ExecutionContexts.cachedThreadPool[F] tr <- HikariTransactor.newHikariTransactor[F]( "org.postgresql.Driver", s"jdbc:postgresql://${config.contactPoints}:${config.port}/${config.database}", config.username, config.password, ce, te ) _ <- Resource.liftF(tr.configure(ds => F.delay(ds.setAutoCommit(false)))) } yield tr }
Example 66
Source File: package.scala From odin with Apache License 2.0 | 5 votes |
package io.odin import io.odin.formatter.Formatter import _root_.monix.eval.Task import cats.effect.Resource import scala.concurrent.duration._ package object monix { def asyncFileLogger( fileName: String, formatter: Formatter = Formatter.default, timeWindow: FiniteDuration = 1.second, maxBufferSize: Option[Int] = None, minLevel: Level = Level.Trace ): Resource[Task, Logger[Task]] = io.odin.asyncFileLogger[Task](fileName, formatter, timeWindow, maxBufferSize, minLevel) }
Example 67
Source File: ConditionalLogger.scala From odin with Apache License 2.0 | 5 votes |
package io.odin.extras.loggers import cats.MonadError import cats.effect.{Concurrent, ContextShift, ExitCase, Resource, Timer} import cats.syntax.applicativeError._ import cats.syntax.flatMap._ import cats.syntax.functor._ import cats.syntax.order._ import io.odin.loggers.DefaultLogger import io.odin.{Level, Logger, LoggerMessage} import monix.catnap.ConcurrentQueue import monix.execution.{BufferCapacity, ChannelType} final case class ConditionalLogger[F[_]: Timer] private ( queue: ConcurrentQueue[F, LoggerMessage], inner: Logger[F], override val minLevel: Level )(implicit F: MonadError[F, Throwable]) extends DefaultLogger[F](minLevel) { def log(msg: LoggerMessage): F[Unit] = queue.tryOffer(msg).void private def drain(exitCase: ExitCase[Throwable]): F[Unit] = { val level = exitCase match { case ExitCase.Completed => inner.minLevel case _ => minLevel } queue .drain(0, Int.MaxValue) .flatMap(msgs => inner.log(msgs.filter(_.level >= level).toList)) .attempt .void } } object ConditionalLogger { def create[F[_]: Timer: Concurrent: ContextShift]( inner: Logger[F], minLevelOnError: Level, maxBufferSize: Option[Int] ): Resource[F, Logger[F]] = { val queueCapacity = maxBufferSize match { case Some(value) => BufferCapacity.Bounded(value) case None => BufferCapacity.Unbounded() } def acquire: F[ConditionalLogger[F]] = for { queue <- ConcurrentQueue.withConfig[F, LoggerMessage](queueCapacity, ChannelType.MPSC) } yield ConditionalLogger(queue, inner, minLevelOnError) def release(logger: ConditionalLogger[F], exitCase: ExitCase[Throwable]): F[Unit] = logger.drain(exitCase) Resource.makeCase(acquire)(release).widen } }
Example 68
Source File: FileLogger.scala From odin with Apache License 2.0 | 5 votes |
package io.odin.loggers import java.io.BufferedWriter import java.nio.file.{Files, Paths} import cats.effect.syntax.all._ import cats.effect.{Resource, Sync, Timer} import cats.instances.list._ import cats.syntax.all._ import io.odin.formatter.Formatter import io.odin.{Level, Logger, LoggerMessage} case class FileLogger[F[_]: Timer](buffer: BufferedWriter, formatter: Formatter, override val minLevel: Level)( implicit F: Sync[F] ) extends DefaultLogger[F](minLevel) { def log(msg: LoggerMessage): F[Unit] = write(msg, formatter).guarantee(flush) override def log(msgs: List[LoggerMessage]): F[Unit] = msgs.traverse(write(_, formatter)).void.guarantee(flush) private def write(msg: LoggerMessage, formatter: Formatter): F[Unit] = F.delay { buffer.write(formatter.format(msg) + System.lineSeparator()) } private def flush: F[Unit] = F.delay(buffer.flush()).handleErrorWith(_ => F.unit) } object FileLogger { def apply[F[_]: Timer](fileName: String, formatter: Formatter, minLevel: Level)( implicit F: Sync[F] ): Resource[F, Logger[F]] = { def mkBuffer: F[BufferedWriter] = F.delay(Files.newBufferedWriter(Paths.get(fileName))) def closeBuffer(buffer: BufferedWriter): F[Unit] = F.delay(buffer.close()).handleErrorWith(_ => F.unit) Resource.make(mkBuffer)(closeBuffer).map { buffer => FileLogger(buffer, formatter, minLevel) } } }
Example 69
Source File: AsyncLoggerSpec.scala From odin with Apache License 2.0 | 5 votes |
package io.odin.loggers import cats.effect.Resource import cats.effect.concurrent.Ref import cats.instances.list._ import cats.syntax.all._ import io.odin.{Level, Logger, LoggerMessage, OdinSpec} import monix.catnap.ConcurrentQueue import monix.eval.Task import monix.execution.schedulers.TestScheduler import io.odin.syntax._ import scala.concurrent.duration._ class AsyncLoggerSpec extends OdinSpec { implicit private val scheduler: TestScheduler = TestScheduler() case class RefLogger(ref: Ref[Task, List[LoggerMessage]]) extends DefaultLogger[Task] { def log(msg: LoggerMessage): Task[Unit] = Task.raiseError(new IllegalStateException("Async should always batch")) override def log(msgs: List[LoggerMessage]): Task[Unit] = { ref.update(_ ::: msgs) } } it should "push logs down the chain" in { forAll { msgs: List[LoggerMessage] => (for { ref <- Resource.liftF(Ref.of[Task, List[LoggerMessage]](List.empty)) logger <- RefLogger(ref).withMinimalLevel(Level.Trace).withAsync() _ <- Resource.liftF(msgs.traverse(logger.log)) _ = scheduler.tick(10.millis) reported <- Resource.liftF(ref.get) } yield { reported shouldBe msgs }).use(Task(_)).runSyncUnsafe() } } it should "push logs to the queue" in { forAll { msgs: List[LoggerMessage] => (for { queue <- ConcurrentQueue.unbounded[Task, LoggerMessage]() logger = AsyncLogger(queue, 1.millis, Logger.noop[Task]) _ <- msgs.traverse(logger.log) reported <- queue.drain(0, Int.MaxValue) } yield { reported shouldBe msgs }).runSyncUnsafe() } } it should "ignore errors in underlying logger" in { val errorLogger = new DefaultLogger[Task] { def log(msg: LoggerMessage): Task[Unit] = Task.raiseError(new Error) } forAll { msgs: List[LoggerMessage] => (for { queue <- ConcurrentQueue.unbounded[Task, LoggerMessage]() logger = AsyncLogger(queue, 1.millis, errorLogger) _ <- logger.log(msgs) result <- logger.drain } yield { result shouldBe (()) }).runSyncUnsafe() } } }
Example 70
Source File: FileLoggerSpec.scala From odin with Apache License 2.0 | 5 votes |
package io.odin.loggers import java.nio.file.{Files, Path, Paths} import java.util.UUID import cats.effect.Resource import io.odin._ import io.odin.formatter.Formatter import io.odin.{LoggerMessage, OdinSpec} import scala.concurrent.duration._ import monix.eval.Task import monix.execution.schedulers.TestScheduler class FileLoggerSpec extends OdinSpec { implicit private val scheduler: TestScheduler = TestScheduler() private val fileResource = Resource.make[Task, Path] { Task.delay(Files.createTempFile(UUID.randomUUID().toString, "")) } { file => Task.delay(Files.delete(file)) } it should "write formatted message into file" in { forAll { (loggerMessage: LoggerMessage, formatter: Formatter) => (for { path <- fileResource fileName = path.toString logger <- FileLogger[Task](fileName, formatter, Level.Trace) _ <- Resource.liftF(logger.log(loggerMessage)) } yield { new String(Files.readAllBytes(Paths.get(fileName))) shouldBe formatter.format(loggerMessage) + lineSeparator }).use(Task(_)) .runSyncUnsafe() } } it should "write formatted messages into file" in { forAll { (loggerMessage: List[LoggerMessage], formatter: Formatter) => (for { path <- fileResource fileName = path.toString logger <- FileLogger[Task](fileName, formatter, Level.Trace) _ <- Resource.liftF(logger.log(loggerMessage)) } yield { new String(Files.readAllBytes(Paths.get(fileName))) shouldBe loggerMessage .map(formatter.format) .mkString(lineSeparator) + (if (loggerMessage.isEmpty) "" else lineSeparator) }).use(Task(_)) .runSyncUnsafe() } } it should "write in async mode" in { forAll { (loggerMessage: List[LoggerMessage], formatter: Formatter) => (for { path <- fileResource fileName = path.toString logger <- asyncFileLogger[Task](fileName, formatter) _ <- Resource.liftF(logger.withMinimalLevel(Level.Trace).log(loggerMessage)) _ <- Resource.liftF(Task(scheduler.tick(2.seconds))) } yield { new String(Files.readAllBytes(Paths.get(fileName))) shouldBe loggerMessage .map(formatter.format) .mkString(lineSeparator) + (if (loggerMessage.isEmpty) "" else lineSeparator) }).use(Task(_)) .runSyncUnsafe() } } }
Example 71
Source File: ProducerOf.scala From skafka with MIT License | 5 votes |
package com.evolutiongaming.skafka.producer import cats.effect.{Bracket, ContextShift, Effect, Resource} import cats.{Defer, Monad, ~>} import com.evolutiongaming.smetrics.MeasureDuration import scala.concurrent.ExecutionContext trait ProducerOf[F[_]] { def apply(config: ProducerConfig): Resource[F, Producer[F]] } object ProducerOf { def apply[F[_] : Effect : ContextShift : MeasureDuration]( executorBlocking: ExecutionContext, metrics: Option[ProducerMetrics[F]] = None ): ProducerOf[F] = new ProducerOf[F] { def apply(config: ProducerConfig) = { for { producer <- Producer.of[F](config, executorBlocking) } yield { metrics.fold(producer)(producer.withMetrics[Throwable]) } } } implicit class ProducerOfOps[F[_]](val self: ProducerOf[F]) extends AnyVal { def mapK[G[_] : Monad : Defer]( fg: F ~> G, gf: G ~> F)(implicit B: Bracket[F, Throwable] ): ProducerOf[G] = new ProducerOf[G] { def apply(config: ProducerConfig) = { for { a <- self(config).mapK(fg) } yield { a.mapK(fg, gf) } } } } }
Example 72
Source File: ConsumerOf.scala From skafka with MIT License | 5 votes |
package com.evolutiongaming.skafka.consumer import cats.effect.{Bracket, Concurrent, ContextShift, Resource} import cats.{Applicative, Defer, ~>} import com.evolutiongaming.catshelper.{ToFuture, ToTry} import com.evolutiongaming.skafka.FromBytes import com.evolutiongaming.smetrics.MeasureDuration import scala.concurrent.ExecutionContext trait ConsumerOf[F[_]] { def apply[K, V]( config: ConsumerConfig)(implicit fromBytesK: FromBytes[F, K], fromBytesV: FromBytes[F, V] ): Resource[F, Consumer[F, K, V]] } object ConsumerOf { def apply[F[_] : Concurrent : ContextShift : ToTry : ToFuture : MeasureDuration]( executorBlocking: ExecutionContext, metrics: Option[ConsumerMetrics[F]] = None ): ConsumerOf[F] = new ConsumerOf[F] { def apply[K, V]( config: ConsumerConfig)(implicit fromBytesK: FromBytes[F, K], fromBytesV: FromBytes[F, V] ) = { for { consumer <- Consumer.of[F, K, V](config, executorBlocking) } yield { metrics.fold(consumer)(consumer.withMetrics[Throwable]) } } } implicit class ConsumerOfOps[F[_]](val self: ConsumerOf[F]) extends AnyVal { def mapK[G[_] : Applicative : Defer]( fg: F ~> G, gf: G ~> F)(implicit B: Bracket[F, Throwable] ): ConsumerOf[G] = new ConsumerOf[G] { def apply[K, V]( config: ConsumerConfig)(implicit fromBytesK: FromBytes[G, K], fromBytesV: FromBytes[G, V] ) = { for { a <- self[K, V](config)(fromBytesK.mapK(gf), fromBytesV.mapK(gf)).mapK(fg) } yield { a.mapK(fg, gf) } } } } }
Example 73
Source File: CatsEffect.scala From cats-effect-testing with Apache License 2.0 | 5 votes |
package cats.effect.testing.specs2 import cats.effect.{Effect, Resource, Sync} import cats.effect.syntax.effect._ import org.specs2.execute.{AsResult, Failure, Result} import scala.concurrent.duration._ import scala.language.higherKinds trait CatsEffect { protected val Timeout: Duration = 10.seconds implicit def effectAsResult[F[_]: Effect, R](implicit R: AsResult[R]): AsResult[F[R]] = new AsResult[F[R]] { def asResult(t: => F[R]): Result = t.toIO.unsafeRunTimed(Timeout) .map(R.asResult(_)) .getOrElse(Failure(s"expectation timed out after $Timeout")) } implicit def resourceAsResult[F[_]: Effect, R](implicit R: AsResult[R]): AsResult[Resource[F,R]] = new AsResult[Resource[F,R]]{ def asResult(t: => Resource[F, R]): Result = t.use(r => Sync[F].delay(R.asResult(r))) .toIO .unsafeRunTimed(Timeout) .getOrElse(Failure(s"expectation timed out after $Timeout")) } }
Example 74
Source File: CatsEffectSpecs.scala From cats-effect-testing with Apache License 2.0 | 5 votes |
package cats.effect.testing.specs2 import cats.effect.{IO, Resource} import cats.effect.concurrent.{Ref, Deferred} import cats.implicits._ import org.specs2.mutable.Specification class CatsEffectSpecs extends Specification with CatsEffect { "cats effect specifications" should { "run a non-effectful test" in { true must beTrue } "run a simple effectful test" in IO { true must beTrue false must beFalse } "run a simple resource test" in { true must beTrue }.pure[Resource[IO, *]] "resource must be live for use" in { Resource.make(Ref[IO].of(true))(_.set(false)).map{ _.get.map(_ must beTrue) } } "really execute effects" in { "First, this check creates a deferred value.".br val deferredValue = Deferred.unsafeUncancelable[IO, Boolean] "Then it executes two mutually associated steps:".br.tab "forcibly attempt to get the deferred value" in { deferredValue.get.unsafeRunTimed(Timeout) must beSome(true) } "Since specs2 executes steps in parallel by default, the second step gets executed anyway.".br "complete the deferred value inside IO context" in { deferredValue.complete(true) *> IO.pure(success) } "If effects didn't get executed then the previous step would fail after timeout.".br } // "timeout a failing test" in (IO.never: IO[Boolean]) } }
Example 75
Source File: EmailNotifierIntegrationSpec.scala From vinyldns with Apache License 2.0 | 5 votes |
package vinyldns.api.notifier.email import com.typesafe.config.{Config, ConfigFactory} import vinyldns.core.notifier._ import vinyldns.api.MySqlApiIntegrationSpec import vinyldns.mysql.MySqlIntegrationSpec import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AnyWordSpecLike import vinyldns.core.domain.batch._ import vinyldns.core.domain.record.RecordType import vinyldns.core.domain.record.AData import org.joda.time.DateTime import vinyldns.core.TestMembershipData._ import java.nio.file.{Files, Path, Paths} import cats.effect.{IO, Resource} import scala.collection.JavaConverters._ import org.scalatest.BeforeAndAfterEach import cats.implicits._ class EmailNotifierIntegrationSpec extends MySqlApiIntegrationSpec with MySqlIntegrationSpec with Matchers with AnyWordSpecLike with BeforeAndAfterEach { import vinyldns.api.domain.DomainValidations._ val emailConfig: Config = ConfigFactory.load().getConfig("vinyldns.email.settings") val targetDirectory = Paths.get("../../docker/email") override def beforeEach: Unit = deleteEmailFiles(targetDirectory).unsafeRunSync() override def afterEach: Unit = deleteEmailFiles(targetDirectory).unsafeRunSync() "Email Notifier" should { "send an email" in { val batchChange = BatchChange( okUser.id, okUser.userName, None, DateTime.now, List( SingleAddChange( Some("some-zone-id"), Some("zone-name"), Some("record-name"), "a" * HOST_MAX_LENGTH, RecordType.A, 300, AData("1.1.1.1"), SingleChangeStatus.Complete, None, None, None ) ), approvalStatus = BatchChangeApprovalStatus.AutoApproved ) val program = for { _ <- userRepository.save(okUser) notifier <- new EmailNotifierProvider() .load(NotifierConfig("", emailConfig), userRepository) _ <- notifier.notify(Notification(batchChange)) emailFiles <- retrieveEmailFiles(targetDirectory) } yield emailFiles val files = program.unsafeRunSync() files.length should be(1) } } def deleteEmailFiles(path: Path): IO[Unit] = for { files <- retrieveEmailFiles(path) _ <- files.traverse { file => IO(Files.delete(file)) } } yield () def retrieveEmailFiles(path: Path): IO[List[Path]] = Resource.fromAutoCloseable(IO(Files.newDirectoryStream(path, "*.eml"))).use { s => IO { s.iterator.asScala.toList } } }
Example 76
Source File: KafkaConsumer.scala From aecor with MIT License | 5 votes |
package aecor.kafkadistributedprocessing.internal import java.time.Duration import java.util.Properties import java.util.concurrent.Executors import cats.effect.{ Async, ContextShift, Resource } import cats.~> import org.apache.kafka.clients.consumer.{ Consumer, ConsumerRebalanceListener, ConsumerRecords } import org.apache.kafka.common.PartitionInfo import org.apache.kafka.common.serialization.Deserializer import scala.collection.JavaConverters._ import scala.concurrent.ExecutionContext import scala.concurrent.duration.FiniteDuration private[kafkadistributedprocessing] final class KafkaConsumer[F[_], K, V]( withConsumer: (Consumer[K, V] => *) ~> F ) { def subscribe(topics: Set[String], listener: ConsumerRebalanceListener): F[Unit] = withConsumer(_.subscribe(topics.asJava, listener)) def subscribe(topics: Set[String]): F[Unit] = withConsumer(_.subscribe(topics.asJava)) val unsubscribe: F[Unit] = withConsumer(_.unsubscribe()) def partitionsFor(topic: String): F[Set[PartitionInfo]] = withConsumer(_.partitionsFor(topic).asScala.toSet) def close: F[Unit] = withConsumer(_.close()) def poll(timeout: FiniteDuration): F[ConsumerRecords[K, V]] = withConsumer(_.poll(Duration.ofNanos(timeout.toNanos))) } private[kafkadistributedprocessing] object KafkaConsumer { final class Create[F[_]] { def apply[K, V]( config: Properties, keyDeserializer: Deserializer[K], valueDeserializer: Deserializer[V] )(implicit F: Async[F], contextShift: ContextShift[F]): Resource[F, KafkaConsumer[F, K, V]] = { val create = F.suspend { val executor = Executors.newSingleThreadExecutor() def eval[A](a: => A): F[A] = contextShift.evalOn(ExecutionContext.fromExecutor(executor)) { F.async[A] { cb => executor.execute(new Runnable { override def run(): Unit = cb { try Right(a) catch { case e: Throwable => Left(e) } } }) } } eval { val original = Thread.currentThread.getContextClassLoader Thread.currentThread.setContextClassLoader(null) val consumer = new org.apache.kafka.clients.consumer.KafkaConsumer[K, V]( config, keyDeserializer, valueDeserializer ) Thread.currentThread.setContextClassLoader(original) val withConsumer = new ((Consumer[K, V] => *) ~> F) { def apply[A](f: Consumer[K, V] => A): F[A] = eval(f(consumer)) } new KafkaConsumer[F, K, V](withConsumer) } } Resource.make(create)(_.close) } } def create[F[_]]: Create[F] = new Create[F] }
Example 77
package tofu.concurrent.syntax import cats.data.OptionT import cats.effect.Resource import cats.effect.concurrent.Ref import cats.{Functor, Monad} import tofu.BracketThrow import tofu.concurrent.impl.FocusedRef import tofu.optics.{Contains, PProperty} import tofu.syntax.monadic._ object ref { implicit final class TofuRefOps[F[_], A](private val self: Ref[F, A]) extends AnyVal { def focused[B](focus: A Contains B)(implicit F: Functor[F]): Ref[F, B] = FocusedRef(self, focus) def optimisticModifyRes[B, X, R](prop: PProperty[A, A, R, X])(init: => Resource[F, X])(f: X => R)(implicit F: BracketThrow[F] ): F[R] = OptionT(self.get.map(prop.downcast)).getOrElseF( init.use(x => self.modify(a => prop.downcast(a).fold((prop.set(a, x), f(x)))((a, _)))) ) } }
Example 78
Source File: Math2.scala From skunk with MIT License | 5 votes |
// Copyright (c) 2018-2020 by Rob Norris // This software is licensed under the MIT License (MIT). // For more information see LICENSE or https://opensource.org/licenses/MIT package example import cats.Monad import cats.effect.{ ExitCode, IO, IOApp, Resource } import skunk.Session import skunk.implicits._ import skunk.codec.numeric.{ int4, float8 } import natchez.Trace.Implicits.noop object Math2 extends IOApp { val session: Resource[IO, Session[IO]] = Session.single( host = "localhost", port = 5432, user = "jimmy", database = "world", password = Some("banana"), debug = true ) // An algebra for doing math. trait Math[F[_]] { def add(a: Int, b: Int): F[Int] def sqrt(d: Double): F[Double] } object Math { object Statements { val add = sql"select $int4 + $int4".query(int4) val sqrt = sql"select sqrt($float8)".query(float8) } def fromSession[F[_]: Monad](sess: Session[F]): Resource[F, Math[F]] = for { pAdd <- sess.prepare(Statements.add) pSqrt <- sess.prepare(Statements.sqrt) } yield new Math[F] { def add(a: Int, b: Int) = pAdd.unique(a ~ b) def sqrt(d: Double) = pSqrt.unique(d) } } def run(args: List[String]): IO[ExitCode] = session.flatMap(Math.fromSession(_)).use { m => for { n <- m.add(42, 71) d <- m.sqrt(2) d2 <- m.sqrt(42) _ <- IO(println(s"The answers were $n and $d and $d2")) } yield ExitCode.Success } }
Example 79
Source File: Math1.scala From skunk with MIT License | 5 votes |
// Copyright (c) 2018-2020 by Rob Norris // This software is licensed under the MIT License (MIT). // For more information see LICENSE or https://opensource.org/licenses/MIT package example import cats.effect.{ Bracket, ExitCode, IO, IOApp, Resource } import skunk.Session import skunk.implicits._ import skunk.codec.numeric.{ int4, float8 } import natchez.Trace.Implicits.noop object Math1 extends IOApp { val session: Resource[IO, Session[IO]] = Session.single( host = "localhost", port = 5432, user = "jimmy", database = "world", password = Some("banana") ) // An algebra for doing math. trait Math[F[_]] { def add(a: Int, b: Int): F[Int] def sqrt(d: Double): F[Double] } object Math { object Statements { val add = sql"select $int4 + $int4".query(int4) val sqrt = sql"select sqrt($float8)".query(float8) } // `Math` implementation that delegates its work to Postgres. def fromSession[F[_]: Bracket[?[_], Throwable]](sess: Session[F]): Math[F] = new Math[F] { def add(a: Int, b: Int) = sess.prepare(Statements.add).use(_.unique(a ~ b)) def sqrt(d: Double) = sess.prepare(Statements.sqrt).use(_.unique(d)) } } def run(args: List[String]): IO[ExitCode] = session.map(Math.fromSession(_)).use { m => for { n <- m.add(42, 71) d <- m.sqrt(2) d2 <- m.sqrt(42) _ <- IO(println(s"The answers were $n and $d and $d2")) } yield ExitCode.Success } }
Example 80
Source File: SkunkTest.scala From skunk with MIT License | 5 votes |
// Copyright (c) 2018-2020 by Rob Norris // This software is licensed under the MIT License (MIT). // For more information see LICENSE or https://opensource.org/licenses/MIT package tests import cats.effect.{ IO, Resource } import cats.implicits._ import skunk.Session import skunk.data._ import skunk.codec.all._ import skunk.implicits._ import skunk.util.Typer import natchez.Trace.Implicits.noop abstract class SkunkTest(debug: Boolean = false, strategy: Typer.Strategy = Typer.Strategy.BuiltinsOnly) extends ffstest.FTest { val session: Resource[IO, Session[IO]] = Session.single( host = "localhost", port = 5432, user = "jimmy", database = "world", password = Some("banana"), strategy = strategy, debug = debug ) def sessionTest[A](name: String)(fa: Session[IO] => IO[A]): Unit = test(name)(session.use(fa)) implicit class SkunkTestSessionOps(s: Session[IO]) { def assertTransactionStatus(msg: String, xas: TransactionStatus): IO[Unit] = s.transactionStatus.get.flatMap(a => assert(s"$msg (expected $xas, got $a)", a === xas)) def assertHealthy: IO[Unit] = for { _ <- assertTransactionStatus("sanity check", TransactionStatus.Idle) n <- s.unique(sql"select 'SkunkTest Health Check'::varchar".query(varchar)) _ <- assert("sanity check", n == "SkunkTest Health Check") } yield () } }
Example 81
Source File: Prepare.scala From skunk with MIT License | 5 votes |
// Copyright (c) 2018-2020 by Rob Norris // This software is licensed under the MIT License (MIT). // For more information see LICENSE or https://opensource.org/licenses/MIT package skunk.net.protocol import cats.effect.Resource import cats.MonadError import skunk.~ import skunk.data.Completion import skunk.net.MessageSocket import skunk.net.Protocol.{ PreparedCommand, PreparedQuery, CommandPortal, QueryPortal } import skunk.util.{ Origin, Namer } import skunk.util.Typer import natchez.Trace trait Prepare[F[_]] { def apply[A](command: skunk.Command[A], ty: Typer): Resource[F, PreparedCommand[F, A]] def apply[A, B](query: skunk.Query[A, B], ty: Typer): Resource[F, PreparedQuery[F, A, B]] } object Prepare { def apply[F[_]: MonadError[?[_], Throwable]: Exchange: MessageSocket: Namer: Trace]: Prepare[F] = new Prepare[F] { override def apply[A](command: skunk.Command[A], ty: Typer): Resource[F, PreparedCommand[F, A]] = for { id <- Parse[F].apply(command, ty) _ <- Resource.liftF(Describe[F].apply(command, id, ty)) } yield new PreparedCommand[F, A](id, command) { pc => def bind(args: A, origin: Origin): Resource[F, CommandPortal[F, A]] = Bind[F].apply(this, args, origin).map { new CommandPortal[F, A](_, pc, args, origin) { val execute: F[Completion] = Execute[F].apply(this) } } } override def apply[A, B](query: skunk.Query[A, B], ty: Typer): Resource[F, PreparedQuery[F, A, B]] = for { id <- Parse[F].apply(query, ty) rd <- Resource.liftF(Describe[F].apply(query, id, ty)) } yield new PreparedQuery[F, A, B](id, query, rd) { pq => def bind(args: A, origin: Origin): Resource[F, QueryPortal[F, A, B]] = Bind[F].apply(this, args, origin).map { new QueryPortal[F, A, B](_, pq, args, origin) { def execute(maxRows: Int): F[List[B] ~ Boolean] = Execute[F].apply(this, maxRows, ty) } } } } }
Example 82
Source File: Bind.scala From skunk with MIT License | 5 votes |
// Copyright (c) 2018-2020 by Rob Norris // This software is licensed under the MIT License (MIT). // For more information see LICENSE or https://opensource.org/licenses/MIT package skunk.net.protocol import cats.effect.Resource import cats.implicits._ import cats.MonadError import skunk.exception.PostgresErrorException import skunk.net.message.{ Bind => BindMessage, Close => _, _ } import skunk.net.MessageSocket import skunk.net.Protocol.{ PreparedStatement, PortalId } import skunk.util.{ Origin, Namer } import natchez.Trace trait Bind[F[_]] { def apply[A]( statement: PreparedStatement[F, A], args: A, argsOrigin: Origin ): Resource[F, PortalId] } object Bind { def apply[F[_]: MonadError[?[_], Throwable]: Exchange: MessageSocket: Namer: Trace]: Bind[F] = new Bind[F] { override def apply[A]( statement: PreparedStatement[F, A], args: A, argsOrigin: Origin ): Resource[F, PortalId] = Resource.make { exchange("bind") { for { pn <- nextName("portal").map(PortalId) _ <- Trace[F].put( "arguments" -> args.toString, "portal-id" -> pn.value ) _ <- send(BindMessage(pn.value, statement.id.value, statement.statement.encoder.encode(args))) _ <- send(Flush) _ <- flatExpect { case BindComplete => ().pure[F] case ErrorResponse(info) => syncAndFail(statement, args, argsOrigin, info) } } yield pn } } { Close[F].apply } def syncAndFail[A]( statement: PreparedStatement[F, A], args: A, argsOrigin: Origin, info: Map[Char, String] ): F[Unit] = for { hi <- history(Int.MaxValue) _ <- send(Sync) _ <- expect { case ReadyForQuery(_) => } a <- PostgresErrorException.raiseError[F, Unit]( sql = statement.statement.sql, sqlOrigin = Some(statement.statement.origin), info = info, history = hi, arguments = statement.statement.encoder.types.zip(statement.statement.encoder.encode(args)), argumentsOrigin = Some(argsOrigin) ) } yield a } }
Example 83
Source File: Parse.scala From skunk with MIT License | 5 votes |
// Copyright (c) 2018-2020 by Rob Norris // This software is licensed under the MIT License (MIT). // For more information see LICENSE or https://opensource.org/licenses/MIT package skunk.net.protocol import cats.effect.Resource import cats.implicits._ import cats.MonadError import skunk.exception.PostgresErrorException import skunk.net.message.{ Parse => ParseMessage, Close => _, _ } import skunk.net.MessageSocket import skunk.net.Protocol.StatementId import skunk.Statement import skunk.util.Namer import skunk.util.Typer import skunk.exception.UnknownTypeException import natchez.Trace trait Parse[F[_]] { def apply[A](statement: Statement[A], ty: Typer): Resource[F, StatementId] } object Parse { def apply[F[_]: MonadError[?[_], Throwable]: Exchange: MessageSocket: Namer: Trace]: Parse[F] = new Parse[F] { override def apply[A](statement: Statement[A], ty: Typer): Resource[F, StatementId] = statement.encoder.oids(ty) match { case Right(os) => Resource.make { exchange("parse") { for { id <- nextName("statement").map(StatementId) _ <- Trace[F].put( "statement-name" -> id.value, "statement-sql" -> statement.sql, "statement-parameter-types" -> os.map(n => ty.typeForOid(n, -1).getOrElse(n)).mkString("[", ", ", "]") ) _ <- send(ParseMessage(id.value, statement.sql, os)) _ <- send(Flush) _ <- flatExpect { case ParseComplete => ().pure[F] case ErrorResponse(info) => syncAndFail(statement, info) } } yield id } } { Close[F].apply } case Left(err) => Resource.liftF(UnknownTypeException(statement, err).raiseError[F, StatementId]) } def syncAndFail(statement: Statement[_], info: Map[Char, String]): F[Unit] = for { hi <- history(Int.MaxValue) _ <- send(Sync) _ <- expect { case ReadyForQuery(_) => } a <- new PostgresErrorException( sql = statement.sql, sqlOrigin = Some(statement.origin), info = info, history = hi, ).raiseError[F, Unit] } yield a } }
Example 84
Source File: DatabaseConfig.scala From scala-pet-store with Apache License 2.0 | 5 votes |
package io.github.pauljamescleary.petstore.config import cats.syntax.functor._ import cats.effect.{Async, Blocker, ContextShift, Resource, Sync} import doobie.hikari.HikariTransactor import org.flywaydb.core.Flyway import scala.concurrent.ExecutionContext case class DatabaseConnectionsConfig(poolSize: Int) case class DatabaseConfig( url: String, driver: String, user: String, password: String, connections: DatabaseConnectionsConfig, ) object DatabaseConfig { def dbTransactor[F[_]: Async: ContextShift]( dbc: DatabaseConfig, connEc: ExecutionContext, blocker: Blocker, ): Resource[F, HikariTransactor[F]] = HikariTransactor .newHikariTransactor[F](dbc.driver, dbc.url, dbc.user, dbc.password, connEc, blocker) def initializeDb[F[_]](cfg: DatabaseConfig)(implicit S: Sync[F]): F[Unit] = S.delay { val fw: Flyway = { Flyway .configure() .dataSource(cfg.url, cfg.user, cfg.password) .load() } fw.migrate() } .as(()) }
Example 85
Source File: HttpClientMonixBackend.scala From sttp with Apache License 2.0 | 5 votes |
package sttp.client.httpclient.monix import java.io.InputStream import java.net.http.HttpRequest.BodyPublishers import java.net.http.{HttpClient, HttpRequest} import java.nio.ByteBuffer import cats.effect.Resource import monix.eval.Task import monix.execution.Scheduler import monix.reactive.Observable import org.reactivestreams.FlowAdapters import sttp.client.httpclient.HttpClientBackend.EncodingHandler import sttp.client.httpclient.{HttpClientAsyncBackend, HttpClientBackend, WebSocketHandler} import sttp.client.impl.monix.TaskMonadAsyncError import sttp.client.testing.SttpBackendStub import sttp.client.{FollowRedirectsBackend, SttpBackend, SttpBackendOptions} import scala.util.{Success, Try} class HttpClientMonixBackend private ( client: HttpClient, closeClient: Boolean, customizeRequest: HttpRequest => HttpRequest, customEncodingHandler: EncodingHandler )(implicit s: Scheduler) extends HttpClientAsyncBackend[Task, Observable[ByteBuffer]]( client, TaskMonadAsyncError, closeClient, customizeRequest, customEncodingHandler ) { override def streamToRequestBody(stream: Observable[ByteBuffer]): Task[HttpRequest.BodyPublisher] = { monad.eval(BodyPublishers.fromPublisher(FlowAdapters.toFlowPublisher(stream.toReactivePublisher))) } override def responseBodyToStream(responseBody: InputStream): Try[Observable[ByteBuffer]] = { Success( Observable .fromInputStream(Task.now(responseBody)) .map(ByteBuffer.wrap) .guaranteeCase(_ => Task(responseBody.close())) ) } } object HttpClientMonixBackend { private def apply( client: HttpClient, closeClient: Boolean, customizeRequest: HttpRequest => HttpRequest, customEncodingHandler: EncodingHandler )(implicit s: Scheduler ): SttpBackend[Task, Observable[ByteBuffer], WebSocketHandler] = new FollowRedirectsBackend( new HttpClientMonixBackend(client, closeClient, customizeRequest, customEncodingHandler)(s) ) def apply( options: SttpBackendOptions = SttpBackendOptions.Default, customizeRequest: HttpRequest => HttpRequest = identity, customEncodingHandler: EncodingHandler = PartialFunction.empty )(implicit s: Scheduler = Scheduler.global ): Task[SttpBackend[Task, Observable[ByteBuffer], WebSocketHandler]] = Task.eval( HttpClientMonixBackend( HttpClientBackend.defaultClient(options), closeClient = true, customizeRequest, customEncodingHandler )(s) ) def resource( options: SttpBackendOptions = SttpBackendOptions.Default, customizeRequest: HttpRequest => HttpRequest = identity, customEncodingHandler: EncodingHandler = PartialFunction.empty )(implicit s: Scheduler = Scheduler.global ): Resource[Task, SttpBackend[Task, Observable[ByteBuffer], WebSocketHandler]] = Resource.make(apply(options, customizeRequest, customEncodingHandler))(_.close()) def usingClient( client: HttpClient, customizeRequest: HttpRequest => HttpRequest = identity, customEncodingHandler: EncodingHandler = PartialFunction.empty )(implicit s: Scheduler = Scheduler.global): SttpBackend[Task, Observable[ByteBuffer], WebSocketHandler] = HttpClientMonixBackend(client, closeClient = false, customizeRequest, customEncodingHandler)(s) def stub: SttpBackendStub[Task, Observable[ByteBuffer], WebSocketHandler] = SttpBackendStub(TaskMonadAsyncError) }
Example 86
Source File: LightstepSpan.scala From natchez with MIT License | 5 votes |
// Copyright (c) 2019 by Rob Norris // This software is licensed under the MIT License (MIT). // For more information see LICENSE or https://opensource.org/licenses/MIT package natchez package lightstep import cats.effect.{ Resource, Sync } import cats.implicits._ import io.{ opentracing => ot } import io.opentracing.propagation.{ Format, TextMapAdapter } import scala.jdk.CollectionConverters._ private[lightstep] final case class LightstepSpan[F[_]: Sync]( tracer: ot.Tracer, span: ot.Span ) extends Span[F] { import TraceValue._ override def kernel: F[Kernel] = Sync[F].delay { val m = new java.util.HashMap[String, String] tracer.inject(span.context, Format.Builtin.HTTP_HEADERS, new TextMapAdapter(m)) Kernel(m.asScala.toMap) } override def put(fields: (String, TraceValue)*): F[Unit] = fields.toList.traverse_ { case (k, StringValue(v)) => Sync[F].delay(span.setTag(k, v)) case (k, NumberValue(v)) => Sync[F].delay(span.setTag(k, v)) case (k, BooleanValue(v)) => Sync[F].delay(span.setTag(k, v)) } override def span(name: String): Resource[F,Span[F]] = Resource .make(Sync[F].delay(tracer.buildSpan(name).asChildOf(span).start()))(s => Sync[F].delay(s.finish())) .map(LightstepSpan(tracer, _)) }
Example 87
Source File: Lightstep.scala From natchez with MIT License | 5 votes |
// Copyright (c) 2019 by Rob Norris // This software is licensed under the MIT License (MIT). // For more information see LICENSE or https://opensource.org/licenses/MIT package natchez package lightstep import cats.effect.{ Resource, Sync } import cats.syntax.applicative._ import com.lightstep.tracer.shared.Options.OptionsBuilder import io.opentracing.Tracer import io.opentracing.propagation.{ Format, TextMapAdapter } import scala.jdk.CollectionConverters._ object Lightstep { def entryPoint[F[_]: Sync](configure: OptionsBuilder => F[Tracer]): Resource[F, EntryPoint[F]] = Resource.make(configure(new OptionsBuilder()))(t => Sync[F].delay(t.close())).map { t => new EntryPoint[F] { override def root(name: String): Resource[F, Span[F]] = Resource .make(Sync[F].delay(t.buildSpan(name).start()))(s => Sync[F].delay(s.finish())) .map(LightstepSpan(t, _)) override def continue(name: String, kernel: Kernel): Resource[F, Span[F]] = Resource.make( Sync[F].delay { val p = t.extract(Format.Builtin.HTTP_HEADERS, new TextMapAdapter(kernel.toHeaders.asJava)) t.buildSpan(name).asChildOf(p).start() } )(s => Sync[F].delay(s.finish())).map(LightstepSpan(t, _)) override def continueOrElseRoot(name: String, kernel: Kernel): Resource[F, Span[F]] = continue(name, kernel).flatMap { case null => root(name) case a => a.pure[Resource[F, *]] } } } }
Example 88
Source File: Honeycomb.scala From natchez with MIT License | 5 votes |
// Copyright (c) 2019 by Rob Norris // This software is licensed under the MIT License (MIT). // For more information see LICENSE or https://opensource.org/licenses/MIT package natchez package honeycomb import cats.effect.{ Resource, Sync } import cats.implicits._ import io.honeycomb.libhoney._ import io.honeycomb.libhoney.responses._ import org.slf4j.LoggerFactory import scala.jdk.CollectionConverters._ object Honeycomb { def entryPoint[F[_]: Sync]( service: String, responseObserver: ResponseObserver = DefaultResponseObserver )(f: Options.Builder => F[Options]): Resource[F, EntryPoint[F]] = Resource.make { for { b <- Sync[F].delay(LibHoney.options.setGlobalFields(Map("service_name" -> service).asJava)) o <- f(b) c <- Sync[F].delay(LibHoney.create(o)) _ <- Sync[F].delay(c.addResponseObserver(responseObserver)) } yield c } (c => Sync[F].delay(c.close)) map { c => new EntryPoint[F] { def continue(name: String, kernel: Kernel): Resource[F, Span[F]] = Resource.makeCase(HoneycombSpan.fromKernel(c, name, kernel))(HoneycombSpan.finish).widen def root(name: String): Resource[F, Span[F]] = Resource.makeCase(HoneycombSpan.root(c, name))(HoneycombSpan.finish).widen def continueOrElseRoot(name: String, kernel: Kernel): Resource[F,Span[F]] = Resource.makeCase(HoneycombSpan.fromKernelOrElseRoot(c, name, kernel))(HoneycombSpan.finish).widen } } // a minimal side-effecting observer val DefaultResponseObserver: ResponseObserver = new ResponseObserver { val log = LoggerFactory.getLogger("natchez.Honeycomb") def onServerAccepted(serverAccepted: ServerAccepted): Unit = () def onClientRejected(clientRejected: ClientRejected): Unit = log.warn( s"ResponseObserver: ClientRejected: ${clientRejected.getReason}", clientRejected.getException ) def onServerRejected(serverRejected: ServerRejected): Unit = log.warn( s"ResponseObserver: ServerRejected: ${serverRejected.getMessage}" ) def onUnknown(unknown: Unknown): Unit = log.warn( s"ResponseObserver: Unknown: ${unknown.getReason}", unknown.getException ) } }
Example 89
Source File: OpenCensus.scala From natchez with MIT License | 5 votes |
// Copyright (c) 2019 by Rob Norris // This software is licensed under the MIT License (MIT). // For more information see LICENSE or https://opensource.org/licenses/MIT package natchez package opencensus import cats.effect.{Resource, Sync} import cats.syntax.functor._ import io.opencensus.exporter.trace.ocagent.{OcAgentTraceExporter, OcAgentTraceExporterConfiguration} import io.opencensus.trace.{Sampler, Tracing} object OpenCensus { def ocAgentEntryPoint[F[_]: Sync](system: String)( configure: OcAgentTraceExporterConfiguration.Builder => OcAgentTraceExporterConfiguration.Builder, sampler: Sampler): Resource[F, EntryPoint[F]] = Resource .make( Sync[F].delay( OcAgentTraceExporter.createAndRegister(configure( OcAgentTraceExporterConfiguration.builder().setServiceName(system)) .build())))(_ => Sync[F].delay( OcAgentTraceExporter.unregister() )) .flatMap(_ => Resource.liftF(entryPoint[F](sampler))) def entryPoint[F[_]: Sync](sampler: Sampler): F[EntryPoint[F]] = Sync[F] .delay(Tracing.getTracer) .map { t => new EntryPoint[F] { def continue(name: String, kernel: Kernel): Resource[F, Span[F]] = Resource.makeCase(OpenCensusSpan.fromKernel(t, name, kernel))(OpenCensusSpan.finish).widen def root(name: String): Resource[F, Span[F]] = Resource.makeCase(OpenCensusSpan.root(t, name, sampler))(OpenCensusSpan.finish).widen def continueOrElseRoot(name: String, kernel: Kernel): Resource[F,Span[F]] = Resource.makeCase(OpenCensusSpan.fromKernelOrElseRoot(t, name, kernel, sampler))(OpenCensusSpan.finish).widen } } }
Example 90
Source File: JaegerSpan.scala From natchez with MIT License | 5 votes |
// Copyright (c) 2019 by Rob Norris // This software is licensed under the MIT License (MIT). // For more information see LICENSE or https://opensource.org/licenses/MIT package natchez package jaeger import io.{ opentracing => ot } import cats.effect.Sync import cats.effect.Resource import cats.implicits._ import io.opentracing.propagation.Format import io.opentracing.propagation.TextMapAdapter import scala.jdk.CollectionConverters._ private[jaeger] final case class JaegerSpan[F[_]: Sync]( tracer: ot.Tracer, span: ot.Span ) extends Span[F] { import TraceValue._ def kernel: F[Kernel] = Sync[F].delay { val m = new java.util.HashMap[String, String] tracer.inject( span.context, Format.Builtin.HTTP_HEADERS, new TextMapAdapter(m) ) Kernel(m.asScala.toMap) } def put(fields: (String, TraceValue)*): F[Unit] = fields.toList.traverse_ { case (k, StringValue(v)) => Sync[F].delay(span.setTag(k, v)) case (k, NumberValue(v)) => Sync[F].delay(span.setTag(k, v)) case (k, BooleanValue(v)) => Sync[F].delay(span.setTag(k, v)) } def span(name: String): Resource[F,Span[F]] = Resource.make( Sync[F].delay(tracer.buildSpan(name).asChildOf(span).start))( s => Sync[F].delay(s.finish) ).map(JaegerSpan(tracer, _)) }
Example 91
Source File: CatsImplicitsSpec.scala From neotypes with MIT License | 5 votes |
package neotypes.cats.effect import cats.{Applicative, Monad} import cats.effect.{Async, IO, Resource} import cats.effect.implicits._ import cats.implicits._ import neotypes.{BaseIntegrationSpec, Driver, Session} import neotypes.cats.effect.implicits._ import neotypes.implicits.all._ import org.neo4j.driver.v1.exceptions.ClientException final class CatsImplicitsSpec extends BaseIntegrationSpec[IO](IOTestkit) { it should "work with cats implicits and neotypes implicits" in { def test1[F[_]: Applicative]: F[Unit] = Applicative[F].unit def test2[F[_]: Monad]: F[Unit] = ().pure[F] def makeSession[F[_]: Async]: Resource[F, Session[F]] = Resource .make(Async[F].delay(new Driver[F](this.driver)))(_.close) .flatMap(_.session) def useSession[F[_]: Async]: F[String] = makeSession[F].use { s => (test1[F] *> test2[F]).flatMap { _=> """match (p:Person {name: "Charlize Theron"}) return p.name""" .query[String] .single(s) } } useSession[IO].unsafeToFuture().map { name => assert(name == "Charlize Theron") } } override val initQuery: String = BaseIntegrationSpec.DEFAULT_INIT_QUERY }
Example 92
Source File: Monix.scala From neotypes with MIT License | 5 votes |
package neotypes.monix import cats.effect.{ExitCase, Resource} import monix.eval.Task trait Monix { implicit final def monixAsync: neotypes.Async.Aux[Task, Monix.TaskResource] = Monix.instance } object Monix { private[neotypes] final type TaskResource[A] = Resource[Task, A] private final val instance: neotypes.Async.Aux[Task, TaskResource] = new neotypes.Async[Task] { override final type R[A] = Resource[Task, A] override final def async[T](cb: (Either[Throwable, T] => Unit) => Unit): Task[T] = Task.async(cb) override final def delay[A](t: => A): Task[A] = Task.delay(t) override final def failed[T](e: Throwable): Task[T] = Task.raiseError(e) override final def flatMap[T, U](m: Task[T])(f: T => Task[U]): Task[U] = m.flatMap(f) override final def guarantee[A, B](fa: Task[A]) (f: A => Task[B]) (finalizer: (A, Option[Throwable]) => Task[Unit]): Task[B] = Resource.makeCase(fa) { case (a, ExitCase.Completed | ExitCase.Canceled) => finalizer(a, None) case (a, ExitCase.Error(ex)) => finalizer(a, Some(ex)) }.use(f) override final def map[T, U](m: Task[T])(f: T => U): Task[U] = m.map(f) override final def recoverWith[T, U >: T](m: Task[T])(f: PartialFunction[Throwable, Task[U]]): Task[U] = m.onErrorRecoverWith(f) override final def resource[A](input: => A)(close: A => Task[Unit]): Resource[Task, A] = Resource.make(Task.delay(input))(close) } }
Example 93
Source File: ECBenchmark.scala From cats-effect with Apache License 2.0 | 5 votes |
package cats.effect.benchmarks import java.util.concurrent._ import cats.effect.{ExitCode, IO, IOApp, Resource, SyncIO} import cats.implicits._ import org.openjdk.jmh.annotations._ import scala.concurrent.ExecutionContext @State(Scope.Thread) @BenchmarkMode(Array(Mode.Throughput)) @OutputTimeUnit(TimeUnit.SECONDS) class ECBenchmark { trait Run { self: IOApp => val size = 100000 def run(args: List[String]) = { def loop(i: Int): IO[Int] = if (i < size) IO.shift.flatMap(_ => IO.pure(i + 1)).flatMap(loop) else IO.shift.flatMap(_ => IO.pure(i)) IO(0).flatMap(loop).as(ExitCode.Success) } } private val ioApp = new IOApp with Run private val ioAppCtx = new IOApp.WithContext with Run { protected def executionContextResource: Resource[SyncIO, ExecutionContext] = Resource.liftF(SyncIO.pure(ExecutionContext.Implicits.global)) } @Benchmark def app(): Unit = { val _ = ioApp.main(Array.empty) } @Benchmark def appWithCtx(): Unit = { val _ = ioAppCtx.main(Array.empty) } }
Example 94
Source File: JvmMonitoring.scala From datadog4s with MIT License | 5 votes |
package com.avast.datadog4s.extension.jvm import java.time.Duration import cats.effect.{ ConcurrentEffect, Resource, Sync, Timer } import com.avast.cloud.datadog4s.helpers.Repeated import com.avast.datadog4s.api.MetricFactory object JvmMonitoring { type ErrorHandler[F[_]] = Throwable => F[Unit] case class Config( delay: Duration = Duration.ofSeconds(60), timeout: Duration = Duration.ofSeconds(10) ) def default[F[_]: ConcurrentEffect: Timer](factory: MetricFactory[F]): Resource[F, Unit] = configured(factory, Config(), defaultErrorHandler) def configured[F[_]: ConcurrentEffect: Timer]( factory: MetricFactory[F], config: Config, errorHandler: ErrorHandler[F] ): Resource[F, Unit] = { val reporter = new JvmReporter[F](factory) Repeated.run[F](config.delay, config.timeout, errorHandler)(reporter.collect).map(_ => ()) } private def defaultErrorHandler[F[_]: Sync]: ErrorHandler[F] = err => Sync[F].delay { println(s"Error during metrics collection: ${err.getMessage}") err.printStackTrace() } }
Example 95
Source File: StatsDClient.scala From datadog4s with MIT License | 5 votes |
package com.avast.datadog4s.statsd import java.net.InetSocketAddress import cats.effect.{ Resource, Sync } import com.timgroup.statsd.{ NonBlockingStatsDClient, NonBlockingStatsDClientBuilder } object StatsDClient { def make[F[_]: Sync](statsDServer: InetSocketAddress, queueSize: Int): Resource[F, NonBlockingStatsDClient] = { val builder = new NonBlockingStatsDClientBuilder() .hostname(statsDServer.getHostName) .port(statsDServer.getPort) .queueSize(queueSize) .prefix("") fromBuilder(builder) } def fromBuilder[F[_]: Sync](builder: NonBlockingStatsDClientBuilder): Resource[F, NonBlockingStatsDClient] = Resource.fromAutoCloseable(Sync[F].delay(builder.build())) }
Example 96
Source File: Repeated.scala From datadog4s with MIT License | 5 votes |
package com.avast.cloud.datadog4s.helpers import java.time.Duration import cats.effect.{ Concurrent, Resource, Timer } import cats.syntax.applicativeError._ import cats.syntax.flatMap._ import cats.syntax.apply._ import cats.syntax.applicative._ import scala.concurrent.duration._ object Repeated { def run[F[_]: Concurrent: Timer]( delay: Duration, iterationTimeout: Duration, errorHandler: Throwable => F[Unit] )(task: F[Unit]): Resource[F, F[Unit]] = { val safeTask = Concurrent.timeout(task, toScala(iterationTimeout)).attempt.flatMap { case Right(a) => a.pure[F] case Left(e) => errorHandler(e) } val snooze = Timer[F].sleep(toScala(delay)) val process = (safeTask *> snooze).foreverM[Unit] Concurrent[F].background(process) } private def toScala(duration: Duration): FiniteDuration = duration.toMillis.millis }
Example 97
Source File: MkResource.scala From laserdisc with MIT License | 5 votes |
package laserdisc package fs2 import java.util.concurrent.TimeUnit.SECONDS import cats.effect.{Resource, Sync} import scala.concurrent.{ExecutionContext, ExecutionContextExecutorService} object MkResource { sealed trait CanShutdown[A] { def shutdown[F[_]](implicit F: Sync[F]): A => F[Unit] } final object CanShutdown { implicit val canShutdownExecutionContextExecutorService: CanShutdown[ExecutionContextExecutorService] = new CanShutdown[ExecutionContextExecutorService] { override def shutdown[F[_]](implicit F: Sync[F]): ExecutionContextExecutorService => F[Unit] = ec => F.delay { ec.shutdown() ec.awaitTermination(3, SECONDS) () } } } private[laserdisc] final def apply[F[_]: Sync, A](acquire: =>F[A])(implicit A: CanShutdown[A]): Resource[F, A] = Resource.make(acquire)(A.shutdown) @inline final def of[F[_]: Sync](fe: F[ExecutionContextExecutorService]): Resource[F, ExecutionContext] = MkResource(fe).widenRight[ExecutionContext] }
Example 98
Source File: RedisChannel.scala From laserdisc with MIT License | 5 votes |
package laserdisc package fs2 import java.net.InetSocketAddress import _root_.fs2._ import _root_.fs2.io.tcp.{Socket, SocketGroup} import cats.MonadError import cats.effect.{Blocker, Concurrent, ContextShift, Resource} import cats.syntax.flatMap._ import laserdisc.protocol._ import log.effect.LogWriter import scodec.Codec import scodec.stream.{StreamDecoder, StreamEncoder} import scala.concurrent.duration.FiniteDuration object RedisChannel { private[this] final val streamDecoder = StreamDecoder.many(Codec[RESP]) private[this] final val streamEncoder = StreamEncoder.many(Codec[RESP]) private[fs2] final def apply[F[_]: ContextShift: LogWriter: Concurrent]( address: InetSocketAddress, writeTimeout: Option[FiniteDuration], readMaxBytes: Int )(blocker: Blocker): Pipe[F, RESP, RESP] = { def connectedSocket: Resource[F, Socket[F]] = SocketGroup(blocker, nonBlockingThreadCount = 4) >>= (_.client(address, noDelay = true)) stream => Stream.resource(connectedSocket) >>= { socket => val send = stream.through(impl.send(socket.writes(writeTimeout))) val receive = socket.reads(readMaxBytes).through(impl.receiveResp) send.drain .covaryOutput[RESP] .mergeHaltBoth(receive) .onFinalizeWeak(socket.endOfOutput) } } private[this] final object impl { def send[F[_]: MonadError[*[_], Throwable]](socketChannel: Pipe[F, Byte, Unit])( implicit log: LogWriter[F] ): Pipe[F, RESP, Unit] = _.evalTap(resp => log.trace(s"sending $resp")) .through(streamEncoder.encode[F]) .flatMap(bits => Stream.chunk(Chunk.bytes(bits.toByteArray))) .through(socketChannel) def receiveResp[F[_]: MonadError[*[_], Throwable]](implicit log: LogWriter[F]): Pipe[F, Byte, RESP] = { def framing: Pipe[F, Byte, CompleteFrame] = { def loopScan(bytesIn: Stream[F, Byte], previous: RESPFrame): Pull[F, CompleteFrame, Unit] = bytesIn.pull.uncons.flatMap { case Some((chunk, rest)) => previous.append(chunk.toByteBuffer) match { case Left(ex) => Pull.raiseError(ex) case Right(frame: CompleteFrame) => Pull.output1(frame) >> loopScan(rest, EmptyFrame) case Right(frame: MoreThanOneFrame) => Pull.output(Chunk.vector(frame.complete)) >> { if (frame.remainder.isEmpty) loopScan(rest, EmptyFrame) else loopScan(rest, IncompleteFrame(frame.remainder, 0L)) } case Right(frame: IncompleteFrame) => loopScan(rest, frame) } case _ => Pull.done } bytesIn => loopScan(bytesIn, EmptyFrame).stream } pipeIn => streamDecoder .decode(pipeIn.through(framing) map (_.bits)) .evalTap(resp => log.trace(s"receiving $resp")) } } }
Example 99
Source File: AdserverApp.scala From scala-openrtb with Apache License 2.0 | 5 votes |
package com.powerspace.openrtb.examples.rtb.http4s.adserver import cats.effect.Resource import com.google.openrtb.{BidRequest, BidResponse} import com.powerspace.openrtb.examples.rtb.http4s.common.ExampleSerdeModule import io.circe.{Encoder, Json} import monix.eval.Task import org.http4s.client.Client import org.http4s.client.blaze.BlazeClientBuilder import scala.concurrent.duration.Duration object AdserverApp extends App { import monix.execution.Scheduler.Implicits.global val httpClient: Resource[Task, Client[Task]] = buildHttpClient() val potentialBidResponse = httpBid(httpClient) private val bidRequest = Adserver.buildBidRequest() potentialBidResponse .map(bidResponse => { bidResponse.foreach(br => println(buildAuctionString(br))) }) .runSyncUnsafe(Duration.Inf) private def buildHttpClient(): Resource[Task, Client[Task]] = { BlazeClientBuilder[Task](global).resource } private def httpBid(httpClient: Resource[Task, Client[Task]]) = httpClient.use(AdserverHttpClientBuilder.bid(_, bidRequest)) private def buildAuctionString(bidResponse: BidResponse) = { case class Auction(bidRequest: BidRequest, bidResponse: BidResponse) val auctionEncoder = new Encoder[Auction] { override def apply(auction: Auction): Json = Json.obj( ("request", ExampleSerdeModule.bidRequestEncoder.apply(auction.bidRequest)), ("response", ExampleSerdeModule.bidResponseEncoder.apply(auction.bidResponse)) ) } auctionEncoder(Auction(bidRequest, bidResponse)).toString() } }
Example 100
Source File: CatsInteropSpec.scala From interop-cats with Apache License 2.0 | 5 votes |
package zio.interop import cats.effect.{ Concurrent, Resource } import org.specs2.Specification import org.specs2.specification.AroundTimeout import zio.{ Promise, Runtime, Task } import zio.interop.catz._ class CatsInteropSpec extends Specification with AroundTimeout { def is = s2""" Resource cats fiber wrapped in Resource can be canceled $catsResourceInterruptible """ def catsResourceInterruptible = { val io = for { p <- Promise.make[Nothing, Int] resource = Resource.make(Concurrent[Task].start(p.succeed(1) *> Task.never))(_.cancel) _ <- resource.use(_ => p.await) } yield 0 Runtime.default.unsafeRun(io) must be_===(0) } }
Example 101
Source File: PlayServerTests.scala From tapir with Apache License 2.0 | 5 votes |
package sttp.tapir.server.play import akka.actor.ActorSystem import cats.data.NonEmptyList import cats.effect.{IO, Resource} import play.api.Mode import play.api.mvc.{Handler, RequestHeader} import play.api.routing.Router import play.api.routing.Router.Routes import play.core.server.{DefaultAkkaHttpServerComponents, ServerConfig} import sttp.tapir.Endpoint import sttp.tapir.server.tests.ServerTests import sttp.tapir.server.{DecodeFailureHandler, ServerDefaults, ServerEndpoint} import sttp.tapir.tests.{Port, PortCounter} import scala.concurrent.duration._ import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.{Await, Future} import scala.reflect.ClassTag class PlayServerTests extends ServerTests[Future, Nothing, Router.Routes] { override def multipleValueHeaderSupport: Boolean = false override def multipartInlineHeaderSupport: Boolean = false override def streamingSupport: Boolean = false private implicit val actorSystem: ActorSystem = ActorSystem() override protected def afterAll(): Unit = { Await.result(actorSystem.terminate(), 5.seconds) super.afterAll() } override def pureResult[T](t: T): Future[T] = Future.successful(t) override def suspendResult[T](t: => T): Future[T] = Future(t) override def route[I, E, O]( e: ServerEndpoint[I, E, O, Nothing, Future], decodeFailureHandler: Option[DecodeFailureHandler] ): Routes = { implicit val serverOptions: PlayServerOptions = PlayServerOptions.default.copy(decodeFailureHandler = decodeFailureHandler.getOrElse(ServerDefaults.decodeFailureHandler)) e.toRoute } override def routeRecoverErrors[I, E <: Throwable, O](e: Endpoint[I, E, O, Nothing], fn: I => Future[O])(implicit eClassTag: ClassTag[E] ): Routes = { e.toRouteRecoverErrors(fn) } override def server(routes: NonEmptyList[Routes], port: Port): Resource[IO, Unit] = { val components = new DefaultAkkaHttpServerComponents { override lazy val serverConfig: ServerConfig = ServerConfig(port = Some(port), address = "127.0.0.1", mode = Mode.Test) override def router: Router = Router.from( routes.reduce((a: Routes, b: Routes) => { val handler: PartialFunction[RequestHeader, Handler] = { case request => a.applyOrElse(request, b) } handler }) ) } val bind = IO { components.server } Resource.make(bind)(s => IO(s.stop())).map(_ => ()) } override val portCounter: PortCounter = new PortCounter(38000) }
Example 102
Source File: VertxServerTests.scala From tapir with Apache License 2.0 | 5 votes |
package sttp.tapir.server.vertx import cats.data.NonEmptyList import cats.effect.{IO, Resource} import cats.implicits._ import io.vertx.lang.scala.VertxExecutionContext import io.vertx.scala.core.Vertx import io.vertx.scala.core.http.HttpServerOptions import io.vertx.scala.ext.web.{Route, Router} import org.scalatest.BeforeAndAfterEach import sttp.tapir._ import sttp.tapir.server.tests.ServerTests import sttp.tapir.server.{DecodeFailureHandler, ServerDefaults, ServerEndpoint} import sttp.tapir.tests.{Port, PortCounter} import scala.concurrent.Future import scala.reflect.ClassTag class VertxServerTests extends ServerTests[Future, String, Router => Route] with BeforeAndAfterEach { implicit val options: VertxEndpointOptions = VertxEndpointOptions() .logWhenHandled(true) .logAllDecodeFailures(true) override def multipartInlineHeaderSupport: Boolean = false // README: doesn't seem supported but I may be wrong protected var vertx: Vertx = _ override def beforeAll(): Unit = { super.beforeAll() vertx = Vertx.vertx() } override def afterAll(): Unit = { super.afterAll() vertx.close() } override def pureResult[T](t: T): Future[T] = Future.successful(t) override def suspendResult[T](t: => T): Future[T] = Future(t)(VertxExecutionContext(vertx.getOrCreateContext())) override def route[I, E, O]( e: ServerEndpoint[I, E, O, String, Future], decodeFailureHandler: Option[DecodeFailureHandler] ): Router => Route = e.route(options.copy(decodeFailureHandler.getOrElse(ServerDefaults.decodeFailureHandler))) override def routeRecoverErrors[I, E <: Throwable, O](e: Endpoint[I, E, O, String], fn: I => Future[O])(implicit eClassTag: ClassTag[E] ): Router => Route = e.routeRecoverErrors(fn) override def server(routes: NonEmptyList[Router => Route], port: Port): Resource[IO, Unit] = { val router = Router.router(vertx) val server = vertx.createHttpServer(HttpServerOptions().setPort(port)).requestHandler(router) val listenIO = IO.fromFuture(IO(server.listenFuture(port))) routes.toList.foreach(_.apply(router)) Resource.make(listenIO)(s => IO(s.closeFuture())).void } override lazy val portCounter: PortCounter = new PortCounter(54000) }
Example 103
Source File: AkkaHttpServerTests.scala From tapir with Apache License 2.0 | 5 votes |
package sttp.tapir.server.akkahttp import cats.implicits._ import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.server.Route import akka.http.scaladsl.server.Directives import akka.http.scaladsl.server.Directives._ import cats.data.NonEmptyList import cats.effect.{IO, Resource} import sttp.client._ import com.typesafe.scalalogging.StrictLogging import sttp.tapir.{Endpoint, endpoint, stringBody} import sttp.tapir.server.tests.ServerTests import sttp.tapir._ import sttp.tapir.server.{DecodeFailureHandler, ServerDefaults, ServerEndpoint} import sttp.tapir.tests.{Port, PortCounter} import scala.concurrent.duration._ import scala.concurrent.{Await, Future} import scala.reflect.ClassTag class AkkaHttpServerTests extends ServerTests[Future, AkkaStream, Route] with StrictLogging { private implicit var actorSystem: ActorSystem = _ override protected def beforeAll(): Unit = { super.beforeAll() actorSystem = ActorSystem() } override protected def afterAll(): Unit = { Await.result(actorSystem.terminate(), 5.seconds) super.afterAll() } override def route[I, E, O]( e: ServerEndpoint[I, E, O, AkkaStream, Future], decodeFailureHandler: Option[DecodeFailureHandler] = None ): Route = { implicit val serverOptions: AkkaHttpServerOptions = AkkaHttpServerOptions.default.copy( decodeFailureHandler = decodeFailureHandler.getOrElse(ServerDefaults.decodeFailureHandler) ) e.toRoute } override def routeRecoverErrors[I, E <: Throwable, O](e: Endpoint[I, E, O, AkkaStream], fn: I => Future[O])(implicit eClassTag: ClassTag[E] ): Route = { e.toRouteRecoverErrors(fn) } override def server(routes: NonEmptyList[Route], port: Port): Resource[IO, Unit] = { val bind = IO.fromFuture(IO(Http().bindAndHandle(routes.toList.reduce(_ ~ _), "localhost", port))) Resource.make(bind)(binding => IO.fromFuture(IO(binding.unbind())).void).void } override def pureResult[T](t: T): Future[T] = Future.successful(t) override def suspendResult[T](t: => T): Future[T] = { import scala.concurrent.ExecutionContext.Implicits.global Future { t } } override lazy val portCounter: PortCounter = new PortCounter(57000) if (testNameFilter.isEmpty) { test("endpoint nested in a path directive") { val e = endpoint.get.in("test" and "directive").out(stringBody).serverLogic(_ => pureResult("ok".asRight[Unit])) val port = portCounter.next() val route = Directives.pathPrefix("api")(e.toRoute) server(NonEmptyList.of(route), port).use { _ => basicRequest.get(uri"http://localhost:$port/api/test/directive").send().map(_.body shouldBe Right("ok")) }.unsafeRunSync } } }
Example 104
Source File: FinatraServerTests.scala From tapir with Apache License 2.0 | 5 votes |
package sttp.tapir.server.finatra import cats.data.NonEmptyList import cats.effect.{ContextShift, IO, Resource, Timer} import com.github.ghik.silencer.silent import com.twitter.finagle.http.Request import com.twitter.finatra.http.filters.{AccessLoggingFilter, ExceptionMappingFilter} import com.twitter.finatra.http.{Controller, EmbeddedHttpServer, HttpServer} import com.twitter.finatra.http.routing.HttpRouter import com.twitter.util.{Future, FuturePool} import sttp.tapir.Endpoint import sttp.tapir.server.{DecodeFailureHandler, ServerDefaults, ServerEndpoint} import sttp.tapir.server.tests.ServerTests import sttp.tapir.tests.{Port, PortCounter} import scala.concurrent.ExecutionContext import scala.reflect.ClassTag import scala.concurrent.duration._ class FinatraServerTests extends ServerTests[Future, Nothing, FinatraRoute] { override def streamingSupport: Boolean = false private val futurePool = FuturePool.unboundedPool implicit val ec: ExecutionContext = scala.concurrent.ExecutionContext.Implicits.global implicit val contextShift: ContextShift[IO] = IO.contextShift(ec) implicit val timer: Timer[IO] = IO.timer(ec) override def pureResult[T](t: T): Future[T] = Future.value(t) override def suspendResult[T](t: => T): Future[T] = futurePool { t } override def route[I, E, O]( e: ServerEndpoint[I, E, O, Nothing, Future], decodeFailureHandler: Option[DecodeFailureHandler] = None ): FinatraRoute = { implicit val serverOptions: FinatraServerOptions = FinatraServerOptions.default.copy(decodeFailureHandler = decodeFailureHandler.getOrElse(ServerDefaults.decodeFailureHandler)) e.toRoute } override def routeRecoverErrors[I, E <: Throwable, O](e: Endpoint[I, E, O, Nothing], fn: I => Future[O])(implicit eClassTag: ClassTag[E] ): FinatraRoute = { e.toRouteRecoverErrors(fn) } override def server(routes: NonEmptyList[FinatraRoute], port: Port): Resource[IO, Unit] = FinatraServerTests.server(routes, port) override lazy val portCounter: PortCounter = new PortCounter(58000) } object FinatraServerTests { def server(routes: NonEmptyList[FinatraRoute], port: Port)(implicit ioTimer: Timer[IO]): Resource[IO, Unit] = { def waitUntilHealthy(s: EmbeddedHttpServer, count: Int): IO[EmbeddedHttpServer] = if (s.isHealthy) IO.pure(s) else if (count > 1000) IO.raiseError(new IllegalStateException("Server unhealthy")) else IO.sleep(10.milliseconds).flatMap(_ => waitUntilHealthy(s, count + 1)) val bind = IO { class TestController extends Controller with TapirController { routes.toList.foreach(addTapirRoute) } class TestServer extends HttpServer { @silent("discarded") override protected def configureHttp(router: HttpRouter): Unit = { router .filter[AccessLoggingFilter[Request]] .filter[ExceptionMappingFilter[Request]] .add(new TestController) } } val server = new EmbeddedHttpServer( new TestServer, Map( "http.port" -> s":$port" ), // in the default implementation waitForWarmup suspends the thread for 1 second between healthy checks // we improve on that by checking every 10ms waitForWarmup = false ) server.start() server }.flatMap(waitUntilHealthy(_, 0)) Resource .make(bind)(httpServer => IO(httpServer.close())) .map(_ => ()) } }
Example 105
Source File: FinatraServerCatsTests.scala From tapir with Apache License 2.0 | 5 votes |
package sttp.tapir.server.finatra.cats import cats.data.NonEmptyList import cats.effect.{ContextShift, IO, Resource, Timer} import sttp.tapir.Endpoint import sttp.tapir.server.finatra.{FinatraRoute, FinatraServerOptions, FinatraServerTests} import sttp.tapir.server.tests.ServerTests import sttp.tapir.server.{DecodeFailureHandler, ServerDefaults, ServerEndpoint} import sttp.tapir.tests.{Port, PortCounter} import scala.concurrent.ExecutionContext import scala.reflect.ClassTag class FinatraServerCatsTests extends ServerTests[IO, Nothing, FinatraRoute] { override def streamingSupport: Boolean = false implicit val ec: ExecutionContext = scala.concurrent.ExecutionContext.Implicits.global implicit val contextShift: ContextShift[IO] = IO.contextShift(ec) implicit val timer: Timer[IO] = IO.timer(ec) override def pureResult[T](t: T): IO[T] = IO.pure(t) override def suspendResult[T](t: => T): IO[T] = IO.apply(t) override def route[I, E, O]( e: ServerEndpoint[I, E, O, Nothing, IO], decodeFailureHandler: Option[DecodeFailureHandler] = None ): FinatraRoute = { implicit val serverOptions: FinatraServerOptions = FinatraServerOptions.default.copy(decodeFailureHandler = decodeFailureHandler.getOrElse(ServerDefaults.decodeFailureHandler)) e.toRoute } override def routeRecoverErrors[I, E <: Throwable, O](e: Endpoint[I, E, O, Nothing], fn: I => IO[O])(implicit eClassTag: ClassTag[E] ): FinatraRoute = e.toRouteRecoverErrors(fn) override def server(routes: NonEmptyList[FinatraRoute], port: Port): Resource[IO, Unit] = FinatraServerTests.server(routes, port) override lazy val portCounter: PortCounter = new PortCounter(59000) }
Example 106
Source File: MySqlInvoiceList.scala From event-sourcing-kafka-streams with MIT License | 5 votes |
package org.amitayh.invoices.dao import cats.Monad import cats.effect.{Async, ContextShift, Resource} import cats.syntax.functor._ import doobie.free.connection.ConnectionIO import doobie.hikari.HikariTransactor import doobie.implicits._ import doobie.util.ExecutionContexts import doobie.util.transactor.Transactor class MySqlInvoiceList[F[_]: Monad](transactor: Transactor[F]) extends InvoiceList[F] { override def save(record: InvoiceRecord): F[Unit] = MySqlInvoiceList.save(record).transact(transactor) override def get: F[List[InvoiceRecord]] = MySqlInvoiceList.get.transact(transactor) } object MySqlInvoiceList { def save(record: InvoiceRecord): ConnectionIO[Unit] = { import record._ val sql = sql""" INSERT INTO invoices (id, version, updated_at, customer_name, customer_email, issue_date, due_date, total, status) VALUES ($id, $version, $updatedAt, $customerName, $customerEmail, $issueDate, $dueDate, $total, $status) ON DUPLICATE KEY UPDATE version = VALUES(version), updated_at = VALUES(updated_at), customer_name = VALUES(customer_name), customer_email = VALUES(customer_email), issue_date = VALUES(issue_date), due_date = VALUES(due_date), total = VALUES(total), status = VALUES(status) """ sql.update.run.void } def get: ConnectionIO[List[InvoiceRecord]] = { val sql = sql""" SELECT id, version, updated_at, customer_name, customer_email, issue_date, due_date, total, status FROM invoices WHERE status IN ('New', 'Paid') ORDER BY created_at DESC """ sql.query[InvoiceRecord].to[List] } def resource[F[_]: Async: ContextShift]: Resource[F, MySqlInvoiceList[F]] = for { connectEC <- ExecutionContexts.fixedThreadPool[F](32) transactEC <- ExecutionContexts.cachedThreadPool[F] transactor <- HikariTransactor.newHikariTransactor[F]( driverClassName = sys.env("DB_DRIVER"), url = sys.env("DB_URL"), user = sys.env("DB_USER"), pass = sys.env("DB_PASS"), connectEC = connectEC, transactEC = transactEC) } yield new MySqlInvoiceList[F](transactor) }
Example 107
Source File: Main.scala From scala-server-toolkit with MIT License | 5 votes |
package com.avast.sst.example import java.util.concurrent.TimeUnit import cats.effect.{Clock, Resource} import com.avast.sst.bundle.ZioServerApp import com.avast.sst.doobie.DoobieHikariModule import com.avast.sst.example.config.Configuration import com.avast.sst.example.module.Http4sRoutingModule import com.avast.sst.example.service.RandomService import com.avast.sst.http4s.client.Http4sBlazeClientModule import com.avast.sst.http4s.client.monix.catnap.Http4sClientCircuitBreakerModule import com.avast.sst.http4s.server.Http4sBlazeServerModule import com.avast.sst.http4s.server.micrometer.MicrometerHttp4sServerMetricsModule import com.avast.sst.jvm.execution.ConfigurableThreadFactory.Config import com.avast.sst.jvm.execution.{ConfigurableThreadFactory, ExecutorModule} import com.avast.sst.jvm.micrometer.MicrometerJvmModule import com.avast.sst.jvm.system.console.{Console, ConsoleModule} import com.avast.sst.micrometer.jmx.MicrometerJmxModule import com.avast.sst.monix.catnap.CircuitBreakerModule import com.avast.sst.monix.catnap.CircuitBreakerModule.{withLogging, withMetrics} import com.avast.sst.monix.catnap.micrometer.MicrometerCircuitBreakerMetricsModule import com.avast.sst.pureconfig.PureConfigModule import com.zaxxer.hikari.metrics.micrometer.MicrometerMetricsTrackerFactory import org.http4s.server.Server import zio.Task import zio.interop.catz._ import zio.interop.catz.implicits._ import scala.concurrent.ExecutionContext object Main extends ZioServerApp { def program: Resource[Task, Server[Task]] = { for { configuration <- Resource.liftF(PureConfigModule.makeOrRaise[Task, Configuration]) executorModule <- ExecutorModule.makeFromExecutionContext[Task](runtime.platform.executor.asEC) clock = Clock.create[Task] currentTime <- Resource.liftF(clock.realTime(TimeUnit.MILLISECONDS)) console <- Resource.pure[Task, Console[Task]](ConsoleModule.make[Task]) _ <- Resource.liftF( console.printLine(s"The current Unix epoch time is $currentTime. This system has ${executorModule.numOfCpus} CPUs.") ) meterRegistry <- MicrometerJmxModule.make[Task](configuration.jmx) _ <- Resource.liftF(MicrometerJvmModule.make[Task](meterRegistry)) serverMetricsModule <- Resource.liftF(MicrometerHttp4sServerMetricsModule.make[Task](meterRegistry, clock)) boundedConnectExecutionContext <- executorModule .makeThreadPoolExecutor( configuration.boundedConnectExecutor, new ConfigurableThreadFactory(Config(Some("hikari-connect-%02d"))) ) .map(ExecutionContext.fromExecutorService) hikariMetricsFactory = new MicrometerMetricsTrackerFactory(meterRegistry) doobieTransactor <- DoobieHikariModule .make[Task](configuration.database, boundedConnectExecutionContext, executorModule.blocker, Some(hikariMetricsFactory)) randomService = RandomService(doobieTransactor) httpClient <- Http4sBlazeClientModule.make[Task](configuration.client, executorModule.executionContext) circuitBreakerMetrics <- Resource.liftF(MicrometerCircuitBreakerMetricsModule.make[Task]("test-http-client", meterRegistry)) circuitBreaker <- Resource.liftF(CircuitBreakerModule[Task].make(configuration.circuitBreaker, clock)) enrichedCircuitBreaker = withLogging("test-http-client", withMetrics(circuitBreakerMetrics, circuitBreaker)) client = Http4sClientCircuitBreakerModule.make[Task](httpClient, enrichedCircuitBreaker) routingModule = new Http4sRoutingModule(randomService, client, serverMetricsModule) server <- Http4sBlazeServerModule.make[Task](configuration.server, routingModule.router, executorModule.executionContext) } yield server } }
Example 108
Source File: GrpcServerModule.scala From scala-server-toolkit with MIT License | 5 votes |
package com.avast.sst.grpc.server import java.util.concurrent.TimeUnit import cats.effect.{Resource, Sync} import io.grpc.{Server, ServerBuilder, ServerInterceptor, ServerServiceDefinition} import scala.collection.immutable.Seq import scala.concurrent.ExecutionContext object GrpcServerModule { def make[F[_]: Sync]( config: GrpcServerConfig, services: Seq[ServerServiceDefinition], executionContext: ExecutionContext, interceptors: Seq[ServerInterceptor] = List.empty ): Resource[F, Server] = Resource.make { Sync[F].delay { val builder = ServerBuilder .forPort(config.port) .handshakeTimeout(config.handshakeTimeout.toMillis, TimeUnit.MILLISECONDS) .maxInboundMessageSize(config.maxInboundMessageSize) .maxInboundMetadataSize(config.maxInboundMetadataSize) .executor(executionContext.execute) services.foreach(builder.addService) interceptors.foreach(builder.intercept) builder.build.start() } } { s => Sync[F].delay { s.shutdown().awaitTermination(config.serverShutdownTimeout.toMillis, TimeUnit.MILLISECONDS) () } } }
Example 109
Source File: Http4sClientCircuitBreakerModule.scala From scala-server-toolkit with MIT License | 5 votes |
package com.avast.sst.http4s.client.monix.catnap import cats.effect.{Resource, Sync} import cats.syntax.applicativeError._ import cats.syntax.flatMap._ import monix.catnap.CircuitBreaker import org.http4s.Response import org.http4s.client.Client object Http4sClientCircuitBreakerModule { def make[F[_]: Sync]( client: Client[F], circuitBreaker: CircuitBreaker[F], httpStatusClassifier: HttpStatusClassifier = HttpStatusClassifier.default ): Client[F] = { val F = Sync[F] class ServerFailure(val response: Response[F], val close: F[Unit]) extends Exception Client[F] { request => val raisedInternal = client.run(request).allocated.flatMap { case tuple @ (response, _) if !httpStatusClassifier.isServerFailure(response.status) => F.pure(tuple) case (response, close) => F.raiseError[(Response[F], F[Unit])](new ServerFailure(response, close)) } val lifted = circuitBreaker.protect(raisedInternal).recover { case serverFailure: ServerFailure => (serverFailure.response, serverFailure.close) } Resource(lifted) } } }
Example 110
Source File: Http4sBlazeClientModule.scala From scala-server-toolkit with MIT License | 5 votes |
package com.avast.sst.http4s.client import cats.effect.{ConcurrentEffect, Resource} import javax.net.ssl.SSLContext import org.http4s.client.Client import org.http4s.client.blaze.BlazeClientBuilder import scala.concurrent.ExecutionContext object Http4sBlazeClientModule { def make[F[_]: ConcurrentEffect]( config: Http4sBlazeClientConfig, executionContext: ExecutionContext, sslContext: Option[SSLContext] = None ): Resource[F, Client[F]] = { val builder = BlazeClientBuilder[F](executionContext) .withResponseHeaderTimeout(config.responseHeaderTimeout) .withIdleTimeout(config.idleTimeout) .withRequestTimeout(config.requestTimeout) .withConnectTimeout(config.connectTimeout) .withUserAgent(config.userAgent) .withMaxTotalConnections(config.maxTotalConnections) .withMaxWaitQueueLimit(config.maxWaitQueueLimit) .withMaxConnectionsPerRequestKey(Function.const(config.maxConnectionsPerRequestkey)) .withCheckEndpointAuthentication(config.checkEndpointIdentification) .withMaxResponseLineSize(config.maxResponseLineSize) .withMaxHeaderLength(config.maxHeaderLength) .withMaxChunkSize(config.maxChunkSize) .withChunkBufferMaxSize(config.chunkBufferMaxSize) .withParserMode(config.parserMode) .withBufferSize(config.bufferSize) sslContext.map(builder.withSslContext).getOrElse(builder).resource } }
Example 111
Source File: Http4sBlazeServerModule.scala From scala-server-toolkit with MIT License | 5 votes |
package com.avast.sst.http4s.server import java.net.{InetSocketAddress, StandardSocketOptions} import cats.effect.{ConcurrentEffect, Resource, Timer} import org.http4s.HttpApp import org.http4s.server.Server import org.http4s.server.blaze.BlazeServerBuilder import scala.concurrent.ExecutionContext import scala.concurrent.duration.Duration object Http4sBlazeServerModule { def make[F[_]: ConcurrentEffect: Timer]( config: Http4sBlazeServerConfig, httpApp: HttpApp[F], executionContext: ExecutionContext ): Resource[F, Server[F]] = { for { inetSocketAddress <- Resource.liftF( ConcurrentEffect[F].delay( InetSocketAddress.createUnresolved(config.listenAddress, config.listenPort) ) ) server <- BlazeServerBuilder[F](executionContext) .bindSocketAddress(inetSocketAddress) .withHttpApp(httpApp) .withoutBanner .withNio2(config.nio2Enabled) .withWebSockets(config.webSocketsEnabled) .enableHttp2(config.http2Enabled) .withResponseHeaderTimeout(Duration.fromNanos(config.responseHeaderTimeout.toNanos)) .withIdleTimeout(Duration.fromNanos(config.idleTimeout.toNanos)) .withBufferSize(config.bufferSize) .withMaxRequestLineLength(config.maxRequestLineLength) .withMaxHeadersLength(config.maxHeadersLength) .withChunkBufferMaxSize(config.chunkBufferMaxSize) .withConnectorPoolSize(config.connectorPoolSize) .withChannelOption[java.lang.Boolean](StandardSocketOptions.TCP_NODELAY, config.socketOptions.tcpNoDelay) .resource } yield server } }
Example 112
Source File: MicrometerJmxModule.scala From scala-server-toolkit with MIT License | 5 votes |
package com.avast.sst.micrometer.jmx import java.time.Duration import cats.effect.{Resource, Sync} import com.codahale.metrics.MetricRegistry import com.codahale.metrics.jmx.JmxReporter import io.micrometer.core.instrument.Clock import io.micrometer.core.instrument.config.NamingConvention import io.micrometer.core.instrument.util.HierarchicalNameMapper import io.micrometer.jmx.{JmxConfig, JmxMeterRegistry} object MicrometerJmxModule { def make[F[_]: Sync]( config: MicrometerJmxConfig, clock: Clock = Clock.SYSTEM, nameMapper: HierarchicalNameMapper = HierarchicalNameMapper.DEFAULT ): Resource[F, JmxMeterRegistry] = { Resource .make { Sync[F].delay { if (config.enableTypeScopeNameHierarchy) { val dropwizardRegistry = new MetricRegistry val registry = new JmxMeterRegistry( new CustomJmxConfig(config), clock, nameMapper, dropwizardRegistry, makeJmxReporter(dropwizardRegistry, config.domain) ) registry.config.namingConvention(NamingConvention.dot) registry } else { new JmxMeterRegistry(new CustomJmxConfig(config), clock, nameMapper) } } }(registry => Sync[F].delay(registry.close())) } private def makeJmxReporter(metricRegistry: MetricRegistry, domain: String) = { JmxReporter .forRegistry(metricRegistry) .inDomain(domain) .createsObjectNamesWith(new TypeScopeNameObjectNameFactory()) .build } private class CustomJmxConfig(c: MicrometerJmxConfig) extends JmxConfig { override val domain: String = c.domain override val step: Duration = Duration.ofMillis(c.step.toMillis) // the method is @Nullable and we don't need to implement it here @SuppressWarnings(Array("scalafix:DisableSyntax.null")) override def get(key: String): String = null } }
Example 113
Source File: MonixResourceApp.scala From scala-server-toolkit with MIT License | 5 votes |
package com.avast.sst.bundle import cats.effect.{ExitCode, Resource} import monix.eval.{Task, TaskApp} import org.slf4j.LoggerFactory trait MonixResourceApp[A] extends TaskApp { private val logger = LoggerFactory.getLogger(this.getClass) def program: Resource[Task, A] override def run(args: List[String]): Task[ExitCode] = { program .use(_ => Task.unit) .redeem( ex => { logger.error("Application initialization failed!", ex) ExitCode.Error }, _ => ExitCode.Success ) } }
Example 114
Source File: MonixServerApp.scala From scala-server-toolkit with MIT License | 5 votes |
package com.avast.sst.bundle import cats.effect.{ExitCode, Resource} import monix.eval.{Task, TaskApp} import org.http4s.server.Server import org.slf4j.LoggerFactory trait MonixServerApp extends TaskApp { private val logger = LoggerFactory.getLogger(this.getClass) def program: Resource[Task, Server[Task]] override def run(args: List[String]): Task[ExitCode] = { program .use { server => for { _ <- Task.delay(logger.info(s"Server started @ ${server.address.getHostString}:${server.address.getPort}")) _ <- Task.never[Unit] } yield server } .redeem( ex => { logger.error("Server initialization failed!", ex) ExitCode.Error }, _ => ExitCode.Success ) } }
Example 115
Source File: DoobieHikariModule.scala From scala-server-toolkit with MIT License | 5 votes |
package com.avast.sst.doobie import java.util.Properties import java.util.concurrent.{ScheduledExecutorService, ThreadFactory} import cats.Show import cats.effect.{Async, Blocker, ContextShift, Resource, Sync} import cats.syntax.show._ import com.zaxxer.hikari.HikariConfig import com.zaxxer.hikari.metrics.MetricsTrackerFactory import doobie.enum.TransactionIsolation import doobie.hikari.HikariTransactor import scala.concurrent.ExecutionContext object DoobieHikariModule { def make[F[_]: Async]( config: DoobieHikariConfig, boundedConnectExecutionContext: ExecutionContext, blocker: Blocker, metricsTrackerFactory: Option[MetricsTrackerFactory] = None )(implicit cs: ContextShift[F]): Resource[F, HikariTransactor[F]] = { for { hikariConfig <- Resource.liftF(makeHikariConfig(config, metricsTrackerFactory)) transactor <- HikariTransactor.fromHikariConfig(hikariConfig, boundedConnectExecutionContext, blocker) } yield transactor } implicit private val transactionIsolationShow: Show[TransactionIsolation] = { case TransactionIsolation.TransactionNone => "TRANSACTION_NONE" case TransactionIsolation.TransactionReadUncommitted => "TRANSACTION_READ_UNCOMMITTED" case TransactionIsolation.TransactionReadCommitted => "TRANSACTION_READ_COMMITTED" case TransactionIsolation.TransactionRepeatableRead => "TRANSACTION_REPEATABLE_READ" case TransactionIsolation.TransactionSerializable => "TRANSACTION_SERIALIZABLE" } private def makeHikariConfig[F[_]: Sync]( config: DoobieHikariConfig, metricsTrackerFactory: Option[MetricsTrackerFactory], scheduledExecutorService: Option[ScheduledExecutorService] = None, threadFactory: Option[ThreadFactory] = None ): F[HikariConfig] = { Sync[F].delay { val c = new HikariConfig() c.setDriverClassName(config.driver) c.setJdbcUrl(config.url) c.setUsername(config.username) c.setPassword(config.password) c.setAutoCommit(config.autoCommit) c.setConnectionTimeout(config.connectionTimeout.toMillis) c.setIdleTimeout(config.idleTimeout.toMillis) c.setMaxLifetime(config.maxLifeTime.toMillis) c.setMinimumIdle(config.minimumIdle) c.setMaximumPoolSize(config.maximumPoolSize) c.setReadOnly(config.readOnly) c.setAllowPoolSuspension(config.allowPoolSuspension) c.setIsolateInternalQueries(config.isolateInternalQueries) c.setRegisterMbeans(config.registerMBeans) val dataSourceProperties = new Properties() config.dataSourceProperties.foreach { case (k, v) => dataSourceProperties.put(k, v) } c.setDataSourceProperties(dataSourceProperties) config.leakDetectionThreshold.map(_.toMillis).foreach(c.setLeakDetectionThreshold) config.initializationFailTimeout.map(_.toMillis).foreach(c.setInitializationFailTimeout) config.poolName.foreach(c.setPoolName) config.validationTimeout.map(_.toMillis).foreach(c.setValidationTimeout) config.transactionIsolation.map(_.show).foreach(c.setTransactionIsolation) scheduledExecutorService.foreach(c.setScheduledExecutor) threadFactory.foreach(c.setThreadFactory) metricsTrackerFactory.foreach(c.setMetricsTrackerFactory) c } } }
Example 116
Source File: ZioResourceApp.scala From scala-server-toolkit with MIT License | 5 votes |
package com.avast.sst.bundle import cats.effect.Resource import org.slf4j.LoggerFactory import zio._ import zio.interop.catz._ trait ZioResourceApp[A] extends CatsApp { private val logger = LoggerFactory.getLogger(this.getClass) def program: Resource[Task, A] override def run(args: List[String]): ZIO[ZEnv, Nothing, ExitCode] = { program .use(_ => Task.unit) .fold( ex => { logger.error("Application initialization failed!", ex) ExitCode.failure }, _ => ExitCode.success ) } }
Example 117
Source File: ZioServerApp.scala From scala-server-toolkit with MIT License | 5 votes |
package com.avast.sst.bundle import cats.effect.Resource import com.github.ghik.silencer.silent import org.http4s.server.Server import org.slf4j.LoggerFactory import zio._ import zio.interop.catz._ trait ZioServerApp extends CatsApp { private val logger = LoggerFactory.getLogger(this.getClass) def program: Resource[Task, Server[Task]] @silent("dead code") override def run(args: List[String]): ZIO[ZEnv, Nothing, ExitCode] = { program .use { server => for { _ <- UIO.effectTotal(logger.info(s"Server started @ ${server.address.getHostString}:${server.address.getPort}")) _ <- Task.never } yield server } .fold( ex => { logger.error("Server initialization failed!", ex) ExitCode.failure }, _ => ExitCode.success ) } }
Example 118
Source File: SentryModule.scala From scala-server-toolkit with MIT License | 5 votes |
package com.avast.sst.sentry import cats.effect.{Resource, Sync} import io.sentry.{SentryClient, SentryClientFactory} import scala.jdk.CollectionConverters._ import scala.reflect.ClassTag object SentryModule { def makeWithReleaseFromPackage[F[_]: Sync, Main: ClassTag](config: SentryConfig): Resource[F, SentryClient] = { for { customizedConfig <- Resource.liftF { Sync[F].delay { for { pkg <- Option(implicitly[ClassTag[Main]].runtimeClass.getPackage) title <- Option(pkg.getImplementationTitle) version <- Option(pkg.getImplementationVersion) } yield config.copy(release = Some(s"$title@$version")) } } sentryClient <- make[F](customizedConfig.getOrElse(config)) } yield sentryClient } }
Example 119
Source File: Fs2KafkaModule.scala From scala-server-toolkit with MIT License | 5 votes |
package com.avast.sst.fs2kafka import cats.effect.{Blocker, ConcurrentEffect, ContextShift, Resource, Timer} import fs2.kafka._ object Fs2KafkaModule { def makeConsumer[F[_]: ConcurrentEffect: ContextShift: Timer, K: Deserializer[F, *], V: Deserializer[F, *]]( config: ConsumerConfig, blocker: Option[Blocker] = None, createConsumer: Option[Map[String, String] => F[KafkaByteConsumer]] = None ): Resource[F, KafkaConsumer[F, K, V]] = { def setOpt[A](maybeValue: Option[A])( setter: (ConsumerSettings[F, K, V], A) => ConsumerSettings[F, K, V] )(initial: ConsumerSettings[F, K, V]): ConsumerSettings[F, K, V] = maybeValue match { case Some(value) => setter(initial, value) case None => initial } val settings = ConsumerSettings(implicitly[Deserializer[F, K]], implicitly[Deserializer[F, V]]) .withBootstrapServers(config.bootstrapServers.mkString(",")) .withGroupId(config.groupId) .pipe(setOpt(config.groupInstanceId)(_.withGroupInstanceId(_))) .pipe(setOpt(config.clientId)(_.withClientId(_))) .pipe(setOpt(config.clientRack)(_.withClientRack(_))) .withAutoOffsetReset(config.autoOffsetReset) .withEnableAutoCommit(config.enableAutoCommit) .withAutoCommitInterval(config.autoCommitInterval) .withAllowAutoCreateTopics(config.allowAutoCreateTopics) .withCloseTimeout(config.closeTimeout) .withCommitRecovery(config.commitRecovery) .withCommitTimeout(config.closeTimeout) .withDefaultApiTimeout(config.defaultApiTimeout) .withHeartbeatInterval(config.heartbeatInterval) .withIsolationLevel(config.isolationLevel) .withMaxPrefetchBatches(config.maxPrefetchBatches) .withPollInterval(config.pollInterval) .withPollTimeout(config.pollTimeout) .withMaxPollInterval(config.maxPollInterval) .withMaxPollRecords(config.maxPollRecords) .withRequestTimeout(config.requestTimeout) .withSessionTimeout(config.sessionTimeout) .pipe(setOpt(blocker)(_.withBlocker(_))) .withProperties(config.properties) .pipe(setOpt(createConsumer)(_.withCreateConsumer(_))) makeConsumer(settings) } def makeConsumer[F[_]: ConcurrentEffect: ContextShift: Timer, K, V]( settings: ConsumerSettings[F, K, V] ): Resource[F, KafkaConsumer[F, K, V]] = consumerResource[F].using(settings) def makeProducer[F[_]: ConcurrentEffect: ContextShift, K: Serializer[F, *], V: Serializer[F, *]]( config: ProducerConfig, blocker: Option[Blocker] = None, createProducer: Option[Map[String, String] => F[KafkaByteProducer]] = None ): Resource[F, KafkaProducer[F, K, V]] = { def setOpt[A](maybeValue: Option[A])( setter: (ProducerSettings[F, K, V], A) => ProducerSettings[F, K, V] )(initial: ProducerSettings[F, K, V]): ProducerSettings[F, K, V] = maybeValue match { case Some(value) => setter(initial, value) case None => initial } val settings = ProducerSettings(implicitly[Serializer[F, K]], implicitly[Serializer[F, V]]) .withBootstrapServers(config.bootstrapServers.mkString(",")) .pipe(setOpt(config.clientId)(_.withClientId(_))) .withAcks(config.acks) .withBatchSize(config.batchSize) .withCloseTimeout(config.closeTimeout) .withDeliveryTimeout(config.deliveryTimeout) .withRequestTimeout(config.requestTimeout) .withLinger(config.linger) .withEnableIdempotence(config.enableIdempotence) .withMaxInFlightRequestsPerConnection(config.maxInFlightRequestsPerConnection) .withParallelism(config.parallelism) .withRetries(config.retries) .pipe(setOpt(blocker)(_.withBlocker(_))) .withProperties(config.properties) .pipe(setOpt(createProducer)(_.withCreateProducer(_))) makeProducer(settings) } def makeProducer[F[_]: ConcurrentEffect: ContextShift, K, V](settings: ProducerSettings[F, K, V]): Resource[F, KafkaProducer[F, K, V]] = producerResource[F].using(settings) implicit private final class ChainingOps[A](private val self: A) extends AnyVal { def pipe[B](f: A => B): B = f(self) } }
Example 120
Source File: Fs2KafkaModuleTest.scala From scala-server-toolkit with MIT License | 5 votes |
package com.avast.sst.fs2kafka import cats.effect.{IO, Resource} import cats.syntax.flatMap._ import com.dimafeng.testcontainers.{ForAllTestContainer, KafkaContainer} import fs2.kafka.{AutoOffsetReset, ProducerRecord, ProducerRecords} import org.scalatest.funsuite.AsyncFunSuite import scala.concurrent.ExecutionContext.Implicits.global class Fs2KafkaModuleTest extends AsyncFunSuite with ForAllTestContainer { override val container = KafkaContainer() implicit private val cs = IO.contextShift(global) implicit private val timer = IO.timer(global) test("producer") { val io = for { producer <- Fs2KafkaModule.makeProducer[IO, String, String](ProducerConfig(List(container.bootstrapServers))) consumer <- Fs2KafkaModule.makeConsumer[IO, String, String]( ConsumerConfig(List(container.bootstrapServers), groupId = "test", autoOffsetReset = AutoOffsetReset.Earliest) ) _ <- Resource.liftF(consumer.subscribeTo("test")) _ <- Resource.liftF(producer.produce(ProducerRecords.one(ProducerRecord("test", "key", "value"))).flatten) event <- Resource.liftF(consumer.stream.head.compile.toList) } yield assert(event.head.record.key === "key" && event.head.record.value === "value") io.use(IO.pure).unsafeToFuture } }
Example 121
Source File: CorrelationIdMiddlewareTest.scala From scala-server-toolkit with MIT License | 5 votes |
package com.avast.sst.http4s.server.middleware import java.net.InetSocketAddress import cats.effect.{ContextShift, IO, Resource, Timer} import com.avast.sst.http4s.server.Http4sRouting import org.http4s.client.blaze.BlazeClientBuilder import org.http4s.dsl.Http4sDsl import org.http4s.server.blaze.BlazeServerBuilder import org.http4s.util.CaseInsensitiveString import org.http4s.{Header, HttpRoutes, Request, Uri} import org.scalatest.funsuite.AsyncFunSuite import scala.concurrent.ExecutionContext @SuppressWarnings(Array("scalafix:Disable.get", "scalafix:Disable.toString", "scalafix:Disable.createUnresolved")) class CorrelationIdMiddlewareTest extends AsyncFunSuite with Http4sDsl[IO] { implicit private val cs: ContextShift[IO] = IO.contextShift(ExecutionContext.global) implicit private val timer: Timer[IO] = IO.timer(ExecutionContext.global) test("CorrelationIdMiddleware fills Request attributes and HTTP response header") { val test = for { middleware <- Resource.liftF(CorrelationIdMiddleware.default[IO]) routes = Http4sRouting.make { middleware.wrap { HttpRoutes.of[IO] { case req @ GET -> Root / "test" => val id = middleware.retrieveCorrelationId(req) Ok("test").map(_.withHeaders(Header("Attribute-Value", id.toString))) } } } server <- BlazeServerBuilder[IO](ExecutionContext.global) .bindSocketAddress(InetSocketAddress.createUnresolved("127.0.0.1", 0)) .withHttpApp(routes) .resource client <- BlazeClientBuilder[IO](ExecutionContext.global).resource } yield (server, client) test .use { case (server, client) => client .run( Request[IO](uri = Uri.unsafeFromString(s"http://${server.address.getHostString}:${server.address.getPort}/test")) .withHeaders(Header("Correlation-Id", "test-value")) ) .use { response => IO.delay { assert(response.headers.get(CaseInsensitiveString("Correlation-Id")).get.value === "test-value") assert(response.headers.get(CaseInsensitiveString("Attribute-Value")).get.value === "Some(CorrelationId(test-value))") } } } .unsafeToFuture() } }
Example 122
Source File: RollingFileLogger.scala From odin with Apache License 2.0 | 4 votes |
package io.odin.loggers import java.nio.file.{Files, Path, Paths} import java.time.{Instant, LocalDateTime} import java.time.format.DateTimeFormatter import java.util.TimeZone import java.util.concurrent.TimeUnit import cats.Monad import cats.effect.concurrent.Ref import cats.effect.{Concurrent, ContextShift, Fiber, Resource, Timer} import cats.syntax.all._ import io.odin.formatter.Formatter import io.odin.{Level, Logger, LoggerMessage} import scala.concurrent.duration.{FiniteDuration, _} object RollingFileLogger { def apply[F[_]]( fileNamePattern: LocalDateTime => String, maxFileSizeInBytes: Option[Long], rolloverInterval: Option[FiniteDuration], formatter: Formatter, minLevel: Level )(implicit F: Concurrent[F], timer: Timer[F], cs: ContextShift[F]): Resource[F, Logger[F]] = { new RollingFileLoggerFactory( fileNamePattern, maxFileSizeInBytes, rolloverInterval, formatter, minLevel, FileLogger.apply[F] ).mk } private[odin] class RefLogger[F[_]: Timer: Monad]( current: Ref[F, Logger[F]], override val minLevel: Level ) extends DefaultLogger[F](minLevel) { def log(msg: LoggerMessage): F[Unit] = current.get.flatMap(_.log(msg)) override def log(msgs: List[LoggerMessage]): F[Unit] = current.get.flatMap(_.log(msgs)) } private[odin] class RollingFileLoggerFactory[F[_]]( fileNamePattern: LocalDateTime => String, maxFileSizeInBytes: Option[Long], rolloverInterval: Option[FiniteDuration], formatter: Formatter, minLevel: Level, underlyingLogger: (String, Formatter, Level) => Resource[F, Logger[F]], fileSizeCheck: Path => Long = Files.size )(implicit F: Concurrent[F], timer: Timer[F], cs: ContextShift[F]) { val df: DateTimeFormatter = DateTimeFormatter.ofPattern("yyyy-MM-dd-HH-mm-ss") def mk: Resource[F, Logger[F]] = { val logger = for { ((logger, watcherFiber), release) <- allocate.allocated refLogger <- Ref.of(logger) refRelease <- Ref.of(release) _ <- F.start(rollingLoop(watcherFiber, refLogger, refRelease)) } yield { (new RefLogger(refLogger, minLevel), refRelease) } Resource.make(logger)(_._2.get.flatten).map { case (logger, _) => logger } } def now: F[Long] = timer.clock.realTime(TimeUnit.MILLISECONDS) def rollingLoop(watcher: Fiber[F, Unit], logger: Ref[F, Logger[F]], release: Ref[F, F[Unit]]): F[Unit] = for { _ <- watcher.join oldRelease <- release.get ((newLogger, newWatcher), newRelease) <- allocate.allocated _ <- logger.set(newLogger) _ <- release.set(newRelease) _ <- oldRelease _ <- rollingLoop(newWatcher, logger, release) } yield () } }