cats.effect.concurrent.Ref Scala Examples
The following examples show how to use cats.effect.concurrent.Ref.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: NatPmpClient.scala From iotchain with MIT License | 6 votes |
package jbok.network.nat import cats.effect.Sync import cats.effect.concurrent.Ref import cats.implicits._ import com.offbynull.portmapper.gateways.network.NetworkGateway import com.offbynull.portmapper.gateways.network.internalmessages.KillNetworkRequest import com.offbynull.portmapper.gateways.process.ProcessGateway import com.offbynull.portmapper.gateways.process.internalmessages.KillProcessRequest import com.offbynull.portmapper.mapper.{MappedPort, PortType} import com.offbynull.portmapper.mappers.natpmp.NatPmpPortMapper import jbok.common.log.Logger object NatPmpClient { def apply[F[_]](implicit F: Sync[F]): F[Nat[F]] = for { network <- F.delay(NetworkGateway.create) networkBus = network.getBus process <- F.delay(ProcessGateway.create) processBus = process.getBus mappers <- F.delay(NatPmpPortMapper.identify(networkBus, processBus)) mapper <- F.delay(mappers.get(0)) mappedPorts <- Ref.of[F, Map[Int, MappedPort]](Map.empty) _ <- F.delay(network.getBus.send(new KillNetworkRequest())) _ <- F.delay(process.getBus.send(new KillProcessRequest())) } yield new Nat[F] { private[this] val log = Logger[F] override def addMapping(internalPort: Int, externalPort: Int, lifetime: Long): F[Unit] = for { _ <- deleteMapping(externalPort) port <- F .delay(mapper.mapPort(PortType.TCP, internalPort, externalPort, lifetime)) .handleErrorWith { e => log.error(s"add port mapping from ${internalPort} to ${externalPort} failed", e) >> F.raiseError(e) } _ <- mappedPorts.update(_ + (externalPort -> port)) } yield () override def deleteMapping(externalPort: Int): F[Unit] = for { portOpt <- mappedPorts.get.map(_.get(externalPort)) _ <- portOpt.fold(F.unit)(port => F.delay(mapper.unmapPort(port))) _ <- mappedPorts.update(_ - externalPort) } yield () } }
Example 2
Source File: package.scala From fs2-aws with MIT License | 5 votes |
package fs2.aws import cats.effect.Concurrent import cats.effect.concurrent.Ref import cats.implicits._ import fs2.concurrent.Queue import fs2.{ Pipe, Stream } package object core { def groupBy[F[_], A, K]( selector: A => F[K] )(implicit F: Concurrent[F]): Pipe[F, A, (K, Stream[F, A])] = { in => Stream.eval(Ref.of[F, Map[K, Queue[F, Option[A]]]](Map.empty)).flatMap { queueMap => val cleanup = { queueMap.get.flatMap(_.values.toList.traverse_(_.enqueue1(None))) } (in ++ Stream.eval_(cleanup)) .evalMap { elem => (selector(elem), queueMap.get).mapN { (key, queues) => queues .get(key) .fold { for { newQ <- Queue.unbounded[F, Option[A]] // Create a new queue _ <- queueMap.modify(queues => (queues + (key -> newQ), queues)) _ <- newQ.enqueue1( elem.some ) // Enqueue the element lifted into an Option to the new queue } yield (key -> newQ.dequeue.unNoneTerminate).some }(_.enqueue1(elem.some) as None) }.flatten } .unNone .onFinalize(cleanup) } } }
Example 3
Source File: TestKinesisProducerClient.scala From fs2-aws with MIT License | 5 votes |
package fs2.aws.testkit import java.nio.ByteBuffer import cats.effect.Sync import cats.effect.concurrent.Ref import com.amazonaws.services.kinesis.producer.{ Attempt, UserRecordResult } import com.google.common.util.concurrent.{ ListenableFuture, SettableFuture } import fs2.aws.internal.KinesisProducerClient import cats.implicits._ import io.circe.Decoder import io.circe.jawn.CirceSupportParser import scala.collection.JavaConverters._ case class TestKinesisProducerClient[F[_], T](state: Ref[F, List[T]])( implicit decoder: Decoder[T] ) extends KinesisProducerClient[F] { override def putData( streamName: String, partitionKey: String, data: ByteBuffer )(implicit F: Sync[F]): F[ListenableFuture[UserRecordResult]] = for { t <- CirceSupportParser .parseFromByteBuffer(data) .toEither .flatMap(_.as[T]) .liftTo[F] _ <- state.modify(orig => (t :: orig, orig)) res = { val future: SettableFuture[UserRecordResult] = SettableFuture.create() future.set(new UserRecordResult(List[Attempt]().asJava, "seq #", "shard #", true)) future } } yield res }
Example 4
Source File: lift.scala From tofu with Apache License 2.0 | 5 votes |
package tofu.syntax import cats.{Functor, ~>} import cats.effect.concurrent.{Deferred, MVar, Ref, Semaphore} import tofu.lift.{IsoK, Lift, Unlift} import cats.tagless.{FunctorK, InvariantK} import tofu.lift.{IsoK, Lift, Unlift} object lift { implicit final class LiftSyntax[F[_], A](private val fa: F[A]) extends AnyVal { def lift[G[_]](implicit lift: Lift[F, G]): G[A] = lift.lift(fa) } implicit final class MVarLiftSyntax[F[_], A](private val mvar: MVar[F, A]) extends AnyVal { def lift[G[_]](implicit lift: Lift[F, G]): MVar[G, A] = mvar.mapK(lift.liftF) } implicit final class RefLiftSyntax[F[_], A](private val ref: Ref[F, A]) extends AnyVal { def lift[G[_]](implicit lift: Lift[F, G], F: Functor[F]): Ref[G, A] = ref.mapK(lift.liftF) } implicit final class DeferredLiftSyntax[F[_], A](private val deferred: Deferred[F, A]) extends AnyVal { def lift[G[_]](implicit lift: Lift[F, G]): Deferred[G, A] = deferred.mapK(lift.liftF) } implicit final class SemaphoreLiftSyntax[F[_]](private val semaphore: Semaphore[F]) extends AnyVal { def ilift[G[_]](implicit lift: IsoK[F, G]): Semaphore[G] = semaphore.imapK(lift.tof, lift.fromF) def unlift[G[_]](implicit unlift: Unlift[F, G], G: Functor[G]): G[Semaphore[G]] = G.map(unlift.unlift)(backf => semaphore.imapK(unlift.liftF, backf)) } implicit final class CatsTaglessLiftSyntax[T[_[_]], F[_]](private val tf: T[F]) extends AnyVal { def lift[G[_]](implicit lift: Lift[F, G], fk: FunctorK[T]): T[G] = fk.mapK(tf)(lift.liftF) def ilift[G[_]](implicit lift: IsoK[F, G], fk: InvariantK[T]): T[G] = fk.imapK(tf)(lift.tof)(lift.fromF) def unlift[G[_]](implicit unlift: Unlift[F, G], G: Functor[G], fk: InvariantK[T]): G[T[G]] = G.map(unlift.unlift)(backf => fk.imapK(tf)(unlift.liftF)(backf)) } implicit final class CatsTagless1LiftSyntax[T[_[_], _], F[_], A](private val tf: T[F, A]) extends AnyVal { def mapK1[G[_]](f: F ~> G)(implicit fk: FunctorK[T[*[_], A]]): T[G, A] = fk.mapK(tf)(f) def imapK1[G[_]](f: F ~> G)(g: G ~> F)(implicit fk: InvariantK[T[*[_], A]]): T[G, A] = fk.imapK(tf)(f)(g) def lift1[G[_]](implicit lift: Lift[F, G], fk: FunctorK[T[*[_], A]]): T[G, A] = fk.mapK(tf)(lift.liftF) def ilift1[G[_]](implicit lift: IsoK[F, G], fk: InvariantK[T[*[_], A]]): T[G, A] = fk.imapK(tf)(lift.tof)(lift.fromF) def unlift1[G[_]](implicit unlift: Unlift[F, G], G: Functor[G], fk: InvariantK[T[*[_], A]]): G[T[G, A]] = G.map(unlift.unlift)(backf => fk.imapK(tf)(unlift.liftF)(backf)) } implicit final class CatsTagless2LiftSyntax[T[_[_], _, _], F[_], A, B](private val tf: T[F, A, B]) extends AnyVal { def mapK2[G[_]](f: F ~> G)(implicit fk: FunctorK[T[*[_], A, B]]): T[G, A, B] = fk.mapK(tf)(f) def imapK2[G[_]](f: F ~> G)(g: G ~> F)(implicit fk: InvariantK[T[*[_], A, B]]): T[G, A, B] = fk.imapK(tf)(f)(g) def lift2[G[_]](implicit lift: Lift[F, G], fk: FunctorK[T[*[_], A, B]]): T[G, A, B] = fk.mapK(tf)(lift.liftF) def ilift2[G[_]](implicit lift: IsoK[F, G], fk: InvariantK[T[*[_], A, B]]): T[G, A, B] = fk.imapK(tf)(lift.tof)(lift.fromF) def unlift2[G[_]](implicit unlift: Unlift[F, G], G: Functor[G], fk: InvariantK[T[*[_], A, B]]): G[T[G, A, B]] = G.map(unlift.unlift)(backf => fk.imapK(tf)(unlift.liftF)(backf)) } }
Example 5
Source File: FocusedRef.scala From tofu with Apache License 2.0 | 5 votes |
package tofu.concurrent.impl import cats.Functor import cats.data.State import cats.effect.concurrent.Ref import tofu.optics.Contains import cats.syntax.functor._ final case class FocusedRef[F[_]: Functor, A, B](ref: Ref[F, A], focus: Contains[A, B]) extends Ref[F, B] { private def focusedMod[X](f: B => (B, X))(a: A): (A, X) = { val (next, res) = f(focus.extract(a)) focus.set(a, next) -> res } def get: F[B] = ref.get.map(focus.extract) def set(b: B): F[Unit] = ref.update(a => focus.set(a, b)) def update(f: B => B): F[Unit] = ref.update(focus.update(_, f)) def modify[X](f: B => (B, X)): F[X] = ref.modify(focusedMod(f)) def modifyState[X](state: State[B, X]): F[X] = ref.modifyState(focus.focusState(state)) def tryUpdate(f: B => B): F[Boolean] = ref.tryUpdate(focus.update(_, f)) def tryModify[X](f: B => (B, X)): F[Option[X]] = ref.tryModify(focusedMod(f)) def tryModifyState[X](state: State[B, X]): F[Option[X]] = ref.tryModifyState(focus.focusState(state)) def access: F[(B, B => F[Boolean])] = ref.access.map { case (a, update) => (focus.extract(a), b => update(focus.set(a, b))) } }
Example 6
package tofu package concurrent import cats.Functor import cats.effect.Bracket import cats.effect.concurrent.{MVar, Ref} import cats.syntax.functor._ import tofu.concurrent.Mut.FocusedMut import tofu.optics.Contains import tofu.syntax.bracket._ @deprecated("use Atom / qvar.toAtom", since = "0.5.6") trait Mut[F[_], A] { def get: F[A] def update(f: A => A): F[Unit] def set(a: A): F[Unit] = update(_ => a) def focused[B](implicit focus: A Contains B, F: Functor[F]): Mut[F, B] = new FocusedMut(this, focus) } object Mut { def ref[F[_], A](ref: Ref[F, A]): Mut[F, A] = new RefMut(ref) def mvar[F[_], E, A](mvar: MVar[F, A])(implicit bracket: Bracket[F, E]): Mut[F, A] = new MVarMut(mvar) private class RefMut[F[_], A](ref: Ref[F, A]) extends Mut[F, A] { def get: F[A] = ref.get override def set(a: A): F[Unit] = ref.set(a) def update(f: A => A): F[Unit] = ref.update(f) } private class MVarMut[F[_]: Bracket[*[_], E], E, A](mvar: MVar[F, A]) extends Mut[F, A] { def get: F[A] = mvar.read def update(f: A => A): F[Unit] = mvar.take.bracketIncomplete(f andThen mvar.put)(mvar.put) } private[Mut] class FocusedMut[F[_], A, B](v: Mut[F, A], focus: Contains[A, B])(implicit F: Functor[F]) extends Mut[F, B] { def get: F[B] = v.get.map(focus.extract) def update(f: B => B): F[Unit] = v.update(focus.update(_, f)) override def set(b: B): F[Unit] = v.update(focus.set(_, b)) override def focused[C](implicit next: B Contains C, F: Functor[F]): Mut[F, C] = new FocusedMut[F, A, C](v, focus >> next) } }
Example 7
package tofu.concurrent.syntax import cats.data.OptionT import cats.effect.Resource import cats.effect.concurrent.Ref import cats.{Functor, Monad} import tofu.BracketThrow import tofu.concurrent.impl.FocusedRef import tofu.optics.{Contains, PProperty} import tofu.syntax.monadic._ object ref { implicit final class TofuRefOps[F[_], A](private val self: Ref[F, A]) extends AnyVal { def focused[B](focus: A Contains B)(implicit F: Functor[F]): Ref[F, B] = FocusedRef(self, focus) def optimisticModifyRes[B, X, R](prop: PProperty[A, A, R, X])(init: => Resource[F, X])(f: X => R)(implicit F: BracketThrow[F] ): F[R] = OptionT(self.get.map(prop.downcast)).getOrElseF( init.use(x => self.modify(a => prop.downcast(a).fold((prop.set(a, x), f(x)))((a, _)))) ) } }
Example 8
Source File: Memoize.scala From tofu with Apache License 2.0 | 5 votes |
package tofu.memo import cats.effect.{Concurrent, ExitCase} import cats.effect.concurrent.{Deferred, Ref} import simulacrum.typeclass import tofu.syntax.monadic._ import cats.syntax.option._ import cats.effect.syntax.concurrent._ import cats.effect.syntax.bracket._ def memoizeOnSuccess[A](fa: F[A]): F[F[A]] } object Memoize { def concurrentMemoize[F[_]](implicit F: Concurrent[F]): Memoize[F] = new Memoize[F] { def memoize[A](fa: F[A]): F[F[A]] = Concurrent.memoize(fa) //copy of Concurrent.memoize accepting success only def memoizeOnSuccess[A](f: F[A]): F[F[A]] = { { sealed trait State case class Subs(n: Int) extends State case object Done extends State case class Fetch(state: State, v: Deferred[F, A], stop: Deferred[F, F[Unit]]) Ref[F].of(Option.empty[Fetch]).map { state => (Deferred[F, A] product Deferred[F, F[Unit]]).flatMap { case (v, stop) => def endState(ec: ExitCase[Throwable]) = state.modify { case None => throw new AssertionError("unreachable") case s @ Some(Fetch(Done, _, _)) => s -> F.unit case Some(Fetch(Subs(n), v, stop)) => if (ec == ExitCase.Canceled && n == 1) None -> stop.get.flatten else if (ec == ExitCase.Canceled) Fetch(Subs(n - 1), v, stop).some -> F.unit else Fetch(Done, v, stop).some -> F.unit }.flatten def fetch = f.flatMap(v.complete) .start .flatMap(fiber => stop.complete(fiber.cancel)) state.modify { case s @ Some(Fetch(Done, v, _)) => s -> v.get case Some(Fetch(Subs(n), v, stop)) => Fetch(Subs(n + 1), v, stop).some -> v.get.guaranteeCase(endState) case None => Fetch(Subs(1), v, stop).some -> fetch.bracketCase(_ => v.get) { case (_, ec) => endState(ec) } }.flatten } } } } } }
Example 9
Source File: CacheState.scala From tofu with Apache License 2.0 | 5 votes |
package tofu package memo import cats.effect.concurrent.{MVar, Ref} import cats.tagless.InvariantK import cats.{Functor, Monad, ~>} import tofu.concurrent.{MVars, MakeMVar, MakeRef, Refs} import tofu.memo.CacheOperation.{CleanUp, GetOrElse} import tofu.syntax.bracket._ import tofu.syntax.monadic._ abstract class CacheState[F[_], A] { def runOperation[B](op: CacheOperation[F, A, B]): F[B] def value: F[CacheVal[A]] def getOrElse(process: F[A], now: Long, after: Long): F[A] = runOperation(GetOrElse(process, now, after)) def cleanUp(after: Long): F[Boolean] = runOperation(CleanUp(after)) } object CacheState { def apply[F[_]: Monad: Guarantee: Refs: MVars, A]( method: CacheMethod, initial: CacheVal[A] = CacheVal.None ): F[CacheState[F, A]] = in[F, F, A](method, initial) def in[I[_]: Functor, F[_]: Monad: Guarantee, A](method: CacheMethod, initial: CacheVal[A] = CacheVal.None)(implicit mvar: MakeMVar[I, F], refs: MakeRef[I, F] ): I[CacheState[F, A]] = method match { case CacheMethod.MVar => mvarIn[I, F, A](initial) case CacheMethod.Ref => refIn[I, F, A](initial) } def mvarIn[I[_]: Functor, F[_]: Monad: Guarantee, A](initial: CacheVal[A] = CacheVal.none)(implicit mvars: MakeMVar[I, F] ): I[CacheState[F, A]] = mvars.mvarOf(initial).map(CacheStateMVar(_)) def refIn[I[_]: Functor, F[_]: Monad, A]( initial: CacheVal[A] = CacheVal.none )(implicit refs: MakeRef[I, F]): I[CacheState[F, A]] = refs.refOf(initial).map(CacheStateRef(_)) def mvar[F[_]: Monad: Guarantee: MVars, A](initial: CacheVal[A] = CacheVal.none): F[CacheState[F, A]] = mvarIn[F, F, A](initial) def ref[F[_]: Monad: Refs, A](initial: CacheVal[A] = CacheVal.none): F[CacheState[F, A]] = refIn[F, F, A](initial) implicit def invariantK[A]: InvariantK[CacheState[*[_], A]] = new InvariantK[CacheState[*[_], A]] { def imapK[F[_], G[_]](af: CacheState[F, A])(fk: F ~> G)(gk: G ~> F): CacheState[G, A] = new CacheState[G, A] { def runOperation[B](op: CacheOperation[G, A, B]): G[B] = fk(af.runOperation(op.mapK(gk))) def value: G[CacheVal[A]] = fk(af.value) } } } final case class CacheStateMVar[F[_]: Monad: Guarantee, A](state: MVar[F, CacheVal[A]]) extends CacheState[F, A] { override def value: F[CacheVal[A]] = state.read override def runOperation[B](op: CacheOperation[F, A, B]): F[B] = for { cur <- state.read res <- op.getPureOrElse(cur)( state.bracketModify(fresh => op.update(fresh)) ) } yield res } final case class CacheStateRef[F[_]: Monad, A](state: Ref[F, CacheVal[A]]) extends CacheState[F, A] { override def value: F[CacheVal[A]] = state.get override def runOperation[B](op: CacheOperation[F, A, B]): F[B] = for { (cur, update) <- state.access res <- op.getPureOrElse(cur)( for { (newVal, res) <- op.update(cur) _ <- update(newVal) } yield res ) } yield res }
Example 10
Source File: Counter.scala From aecor with MIT License | 5 votes |
package aecor.runtime.akkageneric import aecor.encoding.{ KeyDecoder, KeyEncoder, WireProtocol } import aecor.macros.boopickle.BoopickleWireProtocol import cats.effect.Sync import cats.effect.concurrent.Ref import cats.implicits._ import cats.tagless.FunctorK trait Counter[F[_]] { def increment: F[Long] def decrement: F[Long] def value: F[Long] } object Counter { def inmem[F[_]: Sync]: F[Counter[F]] = Ref[F].of(0L).map { ref => new Counter[F] { override def increment: F[Long] = ref.update(_ + 1L) >> value override def decrement: F[Long] = ref.update(_ - 1L) >> value override def value: F[Long] = ref.get } } import boopickle.Default._ implicit def wireProtocol: WireProtocol[Counter] = BoopickleWireProtocol.derive implicit def functorK: FunctorK[Counter] = cats.tagless.Derive.functorK } final case class CounterId(value: String) extends AnyVal object CounterId { implicit val keyEncoder: KeyEncoder[CounterId] = KeyEncoder.anyVal implicit val keyDecoder: KeyDecoder[CounterId] = KeyDecoder.anyVal }
Example 11
Source File: KafkaDistributedProcessingTest.scala From aecor with MIT License | 5 votes |
package aecor.kafkadistributedprocessing import cats.effect.concurrent.{ Deferred, Ref } import cats.effect.{ ExitCase, IO } import cats.implicits._ import fs2.concurrent.Queue import org.apache.kafka.clients.consumer.ConsumerConfig import org.scalatest.funsuite.AnyFunSuiteLike import scala.concurrent.duration._ class KafkaDistributedProcessingTest extends AnyFunSuiteLike with KafkaSupport with IOSupport { val topicName = "process-distribution" createCustomTopic(topicName, partitions = 4) val settings = DistributedProcessingSettings(Set(s"localhost:${kafkaConfig.kafkaPort}"), topicName) test("Process error propagation") { val exception = new RuntimeException("Oops!") val result = DistributedProcessing(settings) .start("Process error propagation", List(IO.raiseError[Unit](exception))) .attempt .timeout(20.seconds) .unsafeRunSync() assert(result == Left(exception)) } test("Process lifecycle") { val test = Ref.of[IO, (Boolean, Boolean)]((false, false)).flatMap { ref => Deferred[IO, Unit] .flatMap { done => val process = ref.set((true, false)) >> done.complete(()) >> IO.never.guaranteeCase { case ExitCase.Canceled => ref.set((true, true)) case _ => IO.unit }.void val run = DistributedProcessing(settings) .start("Process lifecycle", List(process)) IO.race(run, done.get) >> ref.get } } val (started, finished) = test.timeout(20.seconds).unsafeRunSync() assert(started) assert(finished) } test("Process distribution") { val test = Queue.unbounded[IO, Int].flatMap { queue => def run(client: Int) = DistributedProcessing( settings.withConsumerSetting(ConsumerConfig.CLIENT_ID_CONFIG, client.toString) ).start( "Process distribution", Stream .from(0) .take(8) .map { n => val idx = client * 10 + n (queue.enqueue1(idx) >> IO.cancelBoundary <* IO.never) .guarantee(queue.enqueue1(-idx)) } .toList ) def dequeue(size: Long): IO[List[Int]] = queue.dequeue.take(size).compile.to[List] for { d1 <- run(1).start s1 <- dequeue(8) d2 <- run(2).start s2 <- dequeue(16) _ <- d1.cancel s3 <- dequeue(16) _ <- d2.cancel s4 <- dequeue(8) } yield (s1, s2, s3, s4) } val (s1, s2, s3, s4) = test.timeout(20.seconds).unsafeRunSync() assert(s1.toSet == Set(10, 11, 12, 13, 14, 15, 16, 17)) assert((s1 ++ s2 ++ s3 ++ s4).sum == 0) } }
Example 12
Source File: CatsEffectSpecs.scala From cats-effect-testing with Apache License 2.0 | 5 votes |
package cats.effect.testing.specs2 import cats.effect.{IO, Resource} import cats.effect.concurrent.{Ref, Deferred} import cats.implicits._ import org.specs2.mutable.Specification class CatsEffectSpecs extends Specification with CatsEffect { "cats effect specifications" should { "run a non-effectful test" in { true must beTrue } "run a simple effectful test" in IO { true must beTrue false must beFalse } "run a simple resource test" in { true must beTrue }.pure[Resource[IO, *]] "resource must be live for use" in { Resource.make(Ref[IO].of(true))(_.set(false)).map{ _.get.map(_ must beTrue) } } "really execute effects" in { "First, this check creates a deferred value.".br val deferredValue = Deferred.unsafeUncancelable[IO, Boolean] "Then it executes two mutually associated steps:".br.tab "forcibly attempt to get the deferred value" in { deferredValue.get.unsafeRunTimed(Timeout) must beSome(true) } "Since specs2 executes steps in parallel by default, the second step gets executed anyway.".br "complete the deferred value inside IO context" in { deferredValue.complete(true) *> IO.pure(success) } "If effects didn't get executed then the previous step would fail after timeout.".br } // "timeout a failing test" in (IO.never: IO[Boolean]) } }
Example 13
Source File: CatsResourceSpecs.scala From cats-effect-testing with Apache License 2.0 | 5 votes |
package cats.effect.testing.specs2 import cats.effect._ import cats.effect.concurrent.Ref import org.specs2.mutable.Specification class CatsResourceSpecs extends Specification with CatsResourceIO[Ref[IO, Int]] { sequential override def resource: Resource[IO, Ref[IO, Int]] = Resource.make(Ref[IO].of(0))(_.set(Int.MinValue)) "cats resource specifications" should { "run a resource modification" in withResource { ref => ref.modify{a => (a + 1, a) }.map( _ must_=== 0 ) } "be shared between tests" in withResource {ref => ref.modify{a => (a + 1, a) }.map( _ must_=== 1 ) } } }
Example 14
Source File: Main.scala From frdomain-extras with Apache License 2.0 | 5 votes |
package frdomain.ch6.domain package io package app import cats._ import cats.data._ import cats.implicits._ import cats.instances.all._ import cats.effect._ import cats.effect.concurrent.Ref import cats.mtl._ import squants.market._ import repository._ import service._ import service.interpreter._ import model.account._ import common._ import programs._ import Implicits._ object Main extends IOApp { override def run(args: List[String]): IO[ExitCode] = config.load[IO].flatMap { cfg => // load config values AppResources.make[IO](cfg).use { res => // make resources based on config Algebras.make[IO](res.psql).flatMap { algebras => // make appropriate interpreters for algebras implicit val repositoryAsk = DefaultApplicativeAsk.constant[IO, AccountRepository[IO]](algebras.accountRepository) programNormalOps[IO](new AccountServiceInterpreter[IO], new ReportingServiceInterpreter[IO]).map { result => println(result) ExitCode.Success } } } } }
Example 15
Source File: AccountRepositoryInMemory.scala From frdomain-extras with Apache License 2.0 | 5 votes |
package frdomain.ch6 package domain package repository package interpreter import java.time.LocalDateTime import scala.collection.immutable.Map import cats._ import cats.data._ import cats.implicits._ import cats.instances.all._ import cats.effect.concurrent.Ref import cats.effect.Sync import common._ import model.account._ // Constructor private for the interpreter to prevent the Ref from leaking // access through smart constructor below final class AccountRepositoryInMemory[M[_]: Monad] private (repo: Ref[M, Map[AccountNo, Account]]) extends AccountRepository[M] { def query(no: AccountNo): M[Option[Account]] = repo.get.map(_.get(no)) def store(a: Account): M[Account] = repo.update(_ + ((a.no, a))).map(_ => a) def query(openedOn: LocalDateTime): M[List[Account]] = repo.get.map(_.values.filter(_.dateOfOpen.getOrElse(today) == openedOn).toList) def all: M[List[Account]] = repo.get.map(_.values.toList) def balance(no: AccountNo): M[Option[Balance]] = query(no).map(_.map(_.balance)) } // Smart constructor object AccountRepositoryInMemory { def make[M[_]: Sync]: M[AccountRepositoryInMemory[M]] = Ref.of[M, Map[AccountNo, Account]](Map.empty).map(new AccountRepositoryInMemory(_)) }
Example 16
Source File: AsyncLoggerSpec.scala From odin with Apache License 2.0 | 5 votes |
package io.odin.loggers import cats.effect.Resource import cats.effect.concurrent.Ref import cats.instances.list._ import cats.syntax.all._ import io.odin.{Level, Logger, LoggerMessage, OdinSpec} import monix.catnap.ConcurrentQueue import monix.eval.Task import monix.execution.schedulers.TestScheduler import io.odin.syntax._ import scala.concurrent.duration._ class AsyncLoggerSpec extends OdinSpec { implicit private val scheduler: TestScheduler = TestScheduler() case class RefLogger(ref: Ref[Task, List[LoggerMessage]]) extends DefaultLogger[Task] { def log(msg: LoggerMessage): Task[Unit] = Task.raiseError(new IllegalStateException("Async should always batch")) override def log(msgs: List[LoggerMessage]): Task[Unit] = { ref.update(_ ::: msgs) } } it should "push logs down the chain" in { forAll { msgs: List[LoggerMessage] => (for { ref <- Resource.liftF(Ref.of[Task, List[LoggerMessage]](List.empty)) logger <- RefLogger(ref).withMinimalLevel(Level.Trace).withAsync() _ <- Resource.liftF(msgs.traverse(logger.log)) _ = scheduler.tick(10.millis) reported <- Resource.liftF(ref.get) } yield { reported shouldBe msgs }).use(Task(_)).runSyncUnsafe() } } it should "push logs to the queue" in { forAll { msgs: List[LoggerMessage] => (for { queue <- ConcurrentQueue.unbounded[Task, LoggerMessage]() logger = AsyncLogger(queue, 1.millis, Logger.noop[Task]) _ <- msgs.traverse(logger.log) reported <- queue.drain(0, Int.MaxValue) } yield { reported shouldBe msgs }).runSyncUnsafe() } } it should "ignore errors in underlying logger" in { val errorLogger = new DefaultLogger[Task] { def log(msg: LoggerMessage): Task[Unit] = Task.raiseError(new Error) } forAll { msgs: List[LoggerMessage] => (for { queue <- ConcurrentQueue.unbounded[Task, LoggerMessage]() logger = AsyncLogger(queue, 1.millis, errorLogger) _ <- logger.log(msgs) result <- logger.drain } yield { result shouldBe (()) }).runSyncUnsafe() } } }
Example 17
Source File: ConditionalLoggerSpec.scala From odin with Apache License 2.0 | 5 votes |
package io.odin.extras.loggers import cats.data.Kleisli import cats.effect.Sync import cats.effect.concurrent.Ref import cats.mtl.instances.all._ import cats.syntax.applicativeError._ import cats.syntax.flatMap._ import cats.syntax.order._ import io.odin.loggers.{DefaultLogger, HasContext} import io.odin.syntax._ import io.odin.extras.syntax._ import io.odin.{Level, LoggerMessage, OdinSpec} import monix.eval.Task import monix.execution.schedulers.TestScheduler class ConditionalLoggerSpec extends OdinSpec { implicit private val scheduler: TestScheduler = TestScheduler() type F[A] = Kleisli[Task, Map[String, String], A] case class RefLogger(ref: Ref[F, List[LoggerMessage]]) extends DefaultLogger[F] { def log(msg: LoggerMessage): F[Unit] = ref.update(_ :+ msg) } implicit private val hasContext: HasContext[Map[String, String]] = (env: Map[String, String]) => env it should "use log level of the inner logger in case of success" in { forAll { (messages: List[LoggerMessage], ctx: Map[String, String]) => val fa = for { ref <- Ref.of[F, List[LoggerMessage]](List.empty) _ <- RefLogger(ref) .withMinimalLevel(Level.Info) .withContext .withErrorLevel(Level.Debug)(logger => logger.log(messages)) written <- ref.get } yield written val written = fa.run(ctx).runSyncUnsafe() val expected = messages.filter(_.level >= Level.Info).map(m => m.copy(context = m.context ++ ctx)) written shouldBe expected } } it should "use log level of the conditional logger in case of error" in { forAll { (messages: List[LoggerMessage], ctx: Map[String, String]) => val error = new RuntimeException("Boom") val fa = for { ref <- Ref.of[F, List[LoggerMessage]](List.empty) attempt <- RefLogger(ref) .withMinimalLevel(Level.Info) .withContext .withErrorLevel(Level.Debug)(logger => logger.log(messages) >> Sync[F].raiseError[Unit](error)) .attempt written <- ref.get } yield (attempt, written) val (attempt, written) = fa.run(ctx).runSyncUnsafe() val expected = messages.filter(_.level >= Level.Debug).map(m => m.copy(context = m.context ++ ctx)) attempt shouldBe Left(error) written shouldBe expected } } }
Example 18
Source File: StubConfirmationService.scala From ticket-booking-aecor with Apache License 2.0 | 5 votes |
package ru.pavkin.booking.booking.service import java.time.temporal.ChronoUnit import java.time.{ Duration, Instant } import java.util.concurrent.TimeUnit import cats.Monad import cats.data.NonEmptyList import cats.effect.{ Clock, Sync } import cats.effect.concurrent.Ref import cats.implicits._ import ru.pavkin.booking.booking.service.TicketReservationService._ import ru.pavkin.booking.booking.service.StubConfirmationService.ConcertState import ru.pavkin.booking.common.models._ class StubConfirmationService[F[_]: Monad](clock: Clock[F], state: Ref[F, Map[ConcertId, ConcertState]]) extends TicketReservationService[F] { val expireAfter: Duration = Duration.of(6, ChronoUnit.HOURS) def reserve(bookingId: BookingKey, concertId: ConcertId, seats: NonEmptyList[Seat]): F[Either[ReservationFailure, Reservation]] = clock .realTime(TimeUnit.MILLISECONDS) .map(Instant.ofEpochMilli) .flatMap( now => state.modify[Either[ReservationFailure, Reservation]]( concerts => concerts.get(concertId) match { case None => concerts -> Left(UnknownSeats) case Some(concertState) => concertState .book(bookingId, seats) .fold(e => concerts -> Left(e), { case (c, t) => concerts.updated(concertId, c) -> Right( Reservation(t, Some(now.plus(expireAfter))) ) }) } ) ) def release(bookingId: BookingKey): F[Either[ReleaseFailure, Unit]] = state.modify[Either[ReleaseFailure, Unit]]( concerts => Either .fromOption(concerts.find(_._2.bookedSeats.contains(bookingId)), UnknownBooking) .flatMap { case (concertId, concertState) => concertState.release(bookingId).map(concertId -> _) } match { case Left(value) => concerts -> Left(value) case Right((concertId, newState)) => concerts.updated(concertId, newState) -> Right(()) } ) } object StubConfirmationService { def apply[F[_]: Sync](clock: Clock[F], initial: Map[ConcertId, ConcertState]): F[StubConfirmationService[F]] = Ref.of(initial).map(new StubConfirmationService(clock, _)) case class ConcertState(prices: Map[Seat, Money], availableSeats: Set[Seat], bookedSeats: Map[BookingKey, NonEmptyList[Seat]]) { def book( bookingId: BookingKey, seats: NonEmptyList[Seat] ): Either[ReservationFailure, (ConcertState, NonEmptyList[Ticket])] = if (bookedSeats.contains(bookingId)) Left(SeatsAlreadyBooked) else if (!seats.forall(availableSeats)) Left(SeatsAlreadyBooked) else if (!seats.forall(prices.contains)) Left(UnknownSeats) else Right( copy( availableSeats = availableSeats.diff(seats.toList.toSet), bookedSeats = bookedSeats.updated(bookingId, seats) ) -> seats.map(s => Ticket(s, prices(s))) ) def release(bookingId: BookingKey): Either[ReleaseFailure, ConcertState] = bookedSeats.get(bookingId) match { case Some(booked) => Right( copy( availableSeats = availableSeats ++ booked.toList.toSet, bookedSeats = bookedSeats - bookingId ) ) case None => Left(UnknownBooking) } } }
Example 19
Source File: TestEventStreamClient.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.cli.dummies import java.util.UUID import cats.effect.Sync import cats.effect.concurrent.Ref import cats.implicits._ import ch.epfl.bluebrain.nexus.cli.clients.{EventStreamClient, ProjectClient} import ch.epfl.bluebrain.nexus.cli.sse._ import ch.epfl.bluebrain.nexus.cli.{ClientErrOr, LabeledEvent} import fs2.{Pipe, Stream} class TestEventStreamClient[F[_]](events: List[Event], projectClient: ProjectClient[F])(implicit F: Sync[F]) extends EventStreamClient[F] { private val noOffset: Offset = Offset(new UUID(0L, 0L)) private val offsetEvents: Seq[(Offset, Event)] = events.map { ev => (Offset(new UUID(ev.instant.toEpochMilli, 0L)), ev) } private def saveOffset(lastEventIdCache: Ref[F, Option[Offset]]): Pipe[F, (Offset, Event), Event] = _.evalMap { case (offset, event) => lastEventIdCache.update(_ => Some(offset)) >> F.pure(event) } private def eventsFrom(lastEventIdCache: Ref[F, Option[Offset]]): F[Seq[(Offset, Event)]] = lastEventIdCache.get.map(lastEventId => offsetEvents.dropWhile { case (offset, _) => offset.value.getMostSignificantBits <= lastEventId.getOrElse(noOffset).value.getMostSignificantBits } ) private def eventAndLabels(event: Event): F[ClientErrOr[LabeledEvent]] = projectClient.labels(event.organization, event.project).map(_.map { case (org, proj) => (event, org, proj) }) override def apply(lastEventId: Option[Offset]): F[EventStream[F]] = Ref.of(lastEventId).flatMap { ref => val stream = eventsFrom(ref).map { events => Stream.fromIterator[F](events.iterator).through(saveOffset(ref)).evalMap(eventAndLabels) } F.delay(EventStream(stream, ref)) } override def apply(organization: OrgLabel, lastEventId: Option[Offset]): F[EventStream[F]] = Ref.of(lastEventId).flatMap { ref => val stream = eventsFrom(ref).map { events => Stream.fromIterator[F](events.iterator).through(saveOffset(ref)).evalMap(eventAndLabels).filter { case Right((_, org, _)) => org == organization case Left(_) => true } } F.delay(EventStream(stream, ref)) } override def apply(organization: OrgLabel, project: ProjectLabel, lastEventId: Option[Offset]): F[EventStream[F]] = Ref.of(lastEventId).flatMap { ref => val stream = eventsFrom(ref).map { events => Stream.fromIterator[F](events.iterator).through(saveOffset(ref)).evalMap(eventAndLabels).filter { case Right((_, org, proj)) => org == organization && proj == project case Left(_) => true } } F.delay(EventStream(stream, ref)) } }
Example 20
Source File: ProjectClientSpec.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.cli.clients import java.util.UUID import cats.effect.IO import cats.effect.concurrent.Ref import cats.implicits._ import ch.epfl.bluebrain.nexus.cli.{AbstractCliSpec, Console} import ch.epfl.bluebrain.nexus.cli.CliError.ClientError.ClientStatusError import ch.epfl.bluebrain.nexus.cli.config.{AppConfig, EnvConfig} import ch.epfl.bluebrain.nexus.cli.sse._ import ch.epfl.bluebrain.nexus.cli.utils.Http4sExtras import izumi.distage.model.definition.ModuleDef import org.http4s.circe.CirceEntityEncoder._ import org.http4s.client.Client import org.http4s.dsl.io._ import org.http4s.{HttpApp, Response, Status} class ProjectClientSpec extends AbstractCliSpec with Http4sExtras { private val projectJson = jsonContentOf("/templates/project.json", replacements) type Cache = Map[(OrgUuid, ProjectUuid), (OrgLabel, ProjectLabel)] type CacheRef = Ref[IO, Cache] override def overrides: ModuleDef = new ModuleDef { include(defaultModules) make[Client[IO]].from { cfg: AppConfig => val token = cfg.env.token val httpApp = HttpApp[IO] { case GET -> `v1` / "projects" / OrgUuidVar(`orgUuid`) / ProjectUuidVar(`projectUuid`) optbearer `token` => Response[IO](Status.Ok).withEntity(projectJson).pure[IO] case GET -> `v1` / "projects" / OrgUuidVar(_) / ProjectUuidVar(_) optbearer `token` => Response[IO](Status.NotFound).withEntity(notFoundJson).pure[IO] case GET -> `v1` / "projects" / OrgUuidVar(_) / ProjectUuidVar(_) bearer (_: BearerToken) => Response[IO](Status.Forbidden).withEntity(authFailedJson).pure[IO] } Client.fromHttpApp(httpApp) } make[CacheRef].fromEffect { Ref.of[IO, Cache](Map.empty) } } "A ProjectClient" should { "resolve a known (orgUuid, projUuid) pair" in { (client: Client[IO], console: Console[IO], cache: CacheRef, env: EnvConfig) => val cl = ProjectClient[IO](client, env, cache, console) for { labels <- cl.labels(orgUuid, projectUuid) _ = labels shouldEqual Right((orgLabel, projectLabel)) } yield () } "resolve from cache a known (orgUuid, projUuid) pair" in { (client: Client[IO], console: Console[IO], cache: CacheRef, env: EnvConfig) => val errClient = Client.fromHttpApp(HttpApp[IO] { case GET -> Root => IO.pure(Response[IO](Status.NotFound)) }) for { _ <- ProjectClient[IO](client, env, cache, console).labels(orgUuid, projectUuid) labels <- ProjectClient[IO](errClient, env, cache, console).labels(orgUuid, projectUuid) _ = labels shouldEqual Right((orgLabel, projectLabel)) } yield () } "fail to resolve an unknown (orgUuid, projUuid) pair" in { (client: Client[IO], console: Console[IO], cache: CacheRef, env: EnvConfig) => val cl = ProjectClient[IO](client, env, cache, console) for { labels <- cl.labels(OrgUuid(UUID.randomUUID()), projectUuid) _ = labels shouldEqual Left(ClientStatusError(Status.NotFound, notFoundJson.noSpaces)) } yield () } "fail to resolve a known (orgUuid, projUuid) pair with bad credentials" in { (client: Client[IO], console: Console[IO], cache: CacheRef, env: EnvConfig) => val cl = ProjectClient[IO](client, env.copy(token = Some(BearerToken("bad"))), cache, console) for { labels <- cl.labels(orgUuid, projectUuid) _ = labels shouldEqual Left(ClientStatusError(Status.Forbidden, authFailedJson.noSpaces)) } yield () } } }
Example 21
Source File: DefaultMetricsOps.scala From datadog4s with MIT License | 5 votes |
package com.avast.datadog4s.extension.http4s.impl import java.time.Duration import cats.effect.Sync import cats.effect.concurrent.Ref import cats.syntax.flatMap._ import com.avast.datadog4s.api.MetricFactory import com.avast.datadog4s.api.tag.Tagger import com.avast.datadog4s.extension.http4s.DatadogMetricsOps.ClassifierTags import com.avast.datadog4s.extension.http4s._ import com.github.ghik.silencer.silent import org.http4s.metrics.{ MetricsOps, TerminationType } import org.http4s.{ Method, Status } private[http4s] class DefaultMetricsOps[F[_]]( metricFactory: MetricFactory[F], classifierTags: ClassifierTags, activeConnectionsRef: Ref[F, ActiveConnections] )(implicit F: Sync[F] ) extends MetricsOps[F] { private[this] val methodTagger = Tagger.make[Method]("method") @deprecated("please use terminationTypeTagger - this will be removed in next release 0.8.0", "0.6.3") private[this] val typeTagger = Tagger.make[TerminationType]("type") private[this] val terminationTypeTagger = Tagger.make[TerminationType]("termination_type") private[this] val statusCodeTagger = Tagger.make[Status]("status_code") private[this] val statusBucketTagger = Tagger.make[String]("status_bucket") private[this] val activeRequests = metricFactory.gauge.long("active_requests") override def increaseActiveRequests(classifier: Option[String]): F[Unit] = modifyActiveRequests(classifier, 0, 1) override def decreaseActiveRequests(classifier: Option[String]): F[Unit] = // if we try to decrement non existing classifier, make sure it's zero modifyActiveRequests(classifier, 1, -1) private def modifyActiveRequests(classifier: Option[String], default: Int, delta: Int): F[Unit] = activeConnectionsRef.modify { activeConnections => val current = activeConnections.getOrElse(classifier, default) val next = current + delta val nextActiveConnections = activeConnections.updated(classifier, next) val action = activeRequests.set( next.toLong, classifier.toList.flatMap(classifierTags): _* ) (nextActiveConnections, action) }.flatten private[this] val headersTime = metricFactory.timer("headers_time") override def recordHeadersTime(method: Method, elapsed: Long, classifier: Option[String]): F[Unit] = headersTime .record( Duration.ofNanos(elapsed), methodTagger.tag(method) :: classifier.toList.flatMap(classifierTags): _* ) private[this] val requestCount = metricFactory.count("requests_count") private[this] val requestLatency = metricFactory.timer("requests_latency") override def recordTotalTime(method: Method, status: Status, elapsed: Long, classifier: Option[String]): F[Unit] = { val tags = methodTagger.tag(method) :: statusBucketTagger.tag(s"${status.code / 100}xx") :: statusCodeTagger.tag(status) :: classifier.toList.flatMap(classifierTags) requestCount.inc(tags: _*) >> requestLatency.record(Duration.ofNanos(elapsed), tags: _*) } private[this] val abnormalCount = metricFactory.count("abnormal_count") private[this] val abnormalLatency = metricFactory.timer("abnormal_latency") override def recordAbnormalTermination( elapsed: Long, terminationType: TerminationType, classifier: Option[String] ): F[Unit] = { val terminationTpe = terminationTypeTagger.tag(terminationType) @silent("deprecated") val tpe = typeTagger.tag(terminationType) val tags = tpe :: terminationTpe :: classifier.toList.flatMap(classifierTags) abnormalCount.inc(tags: _*) >> abnormalLatency.record(Duration.ofNanos(elapsed), tags: _*) } }
Example 22
Source File: MicrometerHttp4sMetricsOpsModule.scala From scala-server-toolkit with MIT License | 5 votes |
package com.avast.sst.http4s.server.micrometer import java.util.concurrent.TimeUnit import cats.effect.Effect import cats.effect.concurrent.Ref import cats.syntax.functor._ import io.micrometer.core.instrument.MeterRegistry import org.http4s.metrics.{MetricsOps, TerminationType} import org.http4s.{Method, Status} object MicrometerHttp4sMetricsOpsModule { def make[F[_]: Effect](meterRegistry: MeterRegistry): F[MetricsOps[F]] = { val F = Effect[F] for { activeRequests <- Ref.of[F, Long](0L) } yield new MetricsOps[F] { private val prefix = "http.global" private val failureTime = meterRegistry.timer(s"$prefix.failure-time") meterRegistry.gauge( s"$prefix.active-requests", activeRequests, (_: Ref[F, Long]) => Effect[F].toIO(activeRequests.get).unsafeRunSync().toDouble ) override def increaseActiveRequests(classifier: Option[String]): F[Unit] = activeRequests.update(_ + 1) override def decreaseActiveRequests(classifier: Option[String]): F[Unit] = activeRequests.update(_ - 1) override def recordHeadersTime(method: Method, elapsed: Long, classifier: Option[String]): F[Unit] = { F.delay(meterRegistry.timer(s"$prefix.headers-time", "method", method.name).record(elapsed, TimeUnit.NANOSECONDS)) } override def recordTotalTime(method: Method, status: Status, elapsed: Long, classifier: Option[String]): F[Unit] = { F.delay( meterRegistry .timer(s"$prefix.total-time", "status", s"${status.code}", "status-class", s"${status.code / 100}xx") .record(elapsed, TimeUnit.NANOSECONDS) ) } override def recordAbnormalTermination(elapsed: Long, terminationType: TerminationType, classifier: Option[String]): F[Unit] = { F.delay(failureTime.record(elapsed, TimeUnit.NANOSECONDS)) } } } }
Example 23
Source File: FOpsTest.scala From scala-server-toolkit with MIT License | 5 votes |
package com.avast.sst.catseffect.syntax import cats.effect.concurrent.Ref import cats.effect.{Clock, IO, Timer} import com.avast.sst.catseffect.syntax.time._ import org.scalatest.funsuite.AsyncFunSuite import scala.concurrent.ExecutionContext import scala.concurrent.duration.{Duration, TimeUnit} class FOpsTest extends AsyncFunSuite { implicit private val timer: Timer[IO] = IO.timer(ExecutionContext.global) test("time") { val sleepTime = Duration.fromNanos(500000000) implicit val mockClock: Clock[IO] = new Clock[IO] { var values = List(0L, sleepTime.toNanos) override def monotonic(unit: TimeUnit): IO[Long] = { val time = values.head values = values.tail IO.pure(time) } override def realTime(unit: TimeUnit): IO[Long] = ??? } val io = for { ref <- Ref.of[IO, Option[Duration]](None) _ <- IO.sleep(sleepTime).time(d => ref.set(Some(d))) result <- ref.get } yield assert(result.isDefined && result.get.toMillis === sleepTime.toMillis) io.unsafeToFuture() } }
Example 24
Source File: background.scala From pfps-shopping-cart with Apache License 2.0 | 5 votes |
package shop import cats.effect._ import cats.effect.concurrent.Ref import shop.effects.Background import scala.concurrent.duration.FiniteDuration object background { val NoOp: Background[IO] = new Background[IO] { def schedule[A](fa: IO[A], duration: FiniteDuration): IO[Unit] = IO.unit } def counter(ref: Ref[IO, Int]): Background[IO] = new Background[IO] { def schedule[A](fa: IO[A], duration: FiniteDuration): IO[Unit] = ref.update(_ + 1) } }
Example 25
Source File: logger.scala From pfps-shopping-cart with Apache License 2.0 | 5 votes |
package shop import cats.effect.IO import cats.effect.concurrent.Ref import io.chrisdavenport.log4cats.Logger object logger { implicit object NoOp extends NoLogger def acc(ref: Ref[IO, List[String]]): Logger[IO] = new NoLogger { override def error(message: => String): IO[Unit] = ref.update(xs => message :: xs) } private[logger] class NoLogger extends Logger[IO] { def warn(message: => String): IO[Unit] = IO.unit def warn(t: Throwable)(message: => String): IO[Unit] = IO.unit def debug(t: Throwable)(message: => String): IO[Unit] = IO.unit def debug(message: => String): IO[Unit] = IO.unit def error(t: Throwable)(message: => String): IO[Unit] = IO.unit def error(message: => String): IO[Unit] = IO.unit def info(t: Throwable)(message: => String): IO[Unit] = IO.unit def info(message: => String): IO[Unit] = IO.unit def trace(t: Throwable)(message: => String): IO[Unit] = IO.unit def trace(message: => String): IO[Unit] = IO.unit } }
Example 26
Source File: Server.scala From typed-schema with Apache License 2.0 | 5 votes |
package ru.tinkoff.tschema.example import cats.effect.ExitCode import cats.effect.concurrent.Ref import cats.instances.list._ import cats.syntax.foldable._ import cats.syntax.semigroupk._ import com.twitter.finagle import com.twitter.finagle.http.Response import com.twitter.util.{Await, Duration} import monix.eval.{Task, TaskApp} import ru.tinkoff.tschema.example.sample.SampleModule import ru.tinkoff.tschema.finagle.RunHttp import tofu.Void object Server extends TaskApp { def modules[H[_]]: List[ExampleModule[Http]] = List( new Greeting[Http, Example](), new SampleModule[Http, Example](), new FiltersModule(), new FormFieldsModule(), new MultiParameters(), new ProxyModule(), new VersionModule(), new Authorize ) val svc: Http[Response] = modules.foldMapK(_.route) <+> ExampleSwagger.route val server = for { srv <- RunHttp.run[Example](svc) list <- Example.delay(finagle.Http.serve("0.0.0.0:9191", srv)) _ <- Example.delay(println(s"started at ${list.boundAddress}")) _ <- Example.delay(Await.ready(list, Duration.Top)).fork res <- Example.fromTask(Task.never[Void]) } yield res def run(args: List[String]): Task[ExitCode] = for { ref <- Ref[Task].of(0) _ <- server .onErrorHandle(ex => println(ex.getMessage)) .run(ExampleEnv("lol", ref)) } yield ExitCode.Success }
Example 27
Source File: ExampleEnv.scala From typed-schema with Apache License 2.0 | 5 votes |
package ru.tinkoff.tschema.example import cats.effect.concurrent.Ref import monix.eval.Task import ru.tinkoff.tschema.finagle.routing._ import tofu.env.EnvSpecializedFunctions final case class ExampleEnv(trackingId: String, alohas: Ref[Task, Int]) trait AlohasState[F[_]] { def incrementAlohas(): F[Int] } object ExampleEnv { implicit val alohasState: AlohasState[Example] = () => Example(_.alohas.modify(i => (i + 1, i + 1))) } object Example extends EnvSpecializedFunctions[ExampleEnv] object Http extends EnvSpecializedFunctions[EnvRouting[ExampleEnv]]
Example 28
Source File: Server.scala From typed-schema with Apache License 2.0 | 5 votes |
package ru.tinkoff.tschema.example import Swagger._ import cats.effect.ExitCode import cats.effect.concurrent.Ref import cats.instances.list._ import cats.syntax.foldable._ import cats.syntax.semigroupk._ import com.twitter.finagle import finagle.{Http, http} import com.twitter.finagle.http.Response import com.twitter.util.{Await, Duration} import monix.eval.{Task, TaskApp} import ru.tinkoff.tschema.finagle.Runnable import tofu.Void object Server extends TaskApp { implicit val greeting: Greeting[Example] = Greeting[Example] val modules: List[ExampleModule] = List(GreetingModule[Example, Http](), TestModule, FiltersModule, FormFieldsModule, MultiParameters, ProxyModule, VersionModule, Authorize) val svc: Http[Response] = modules.foldMapK(_.route) <+> Swagger.route val server = for { srv <- Runnable.run[Example](svc) list <- Example.delay(finagle.Http.serve("0.0.0.0:9191", srv)) _ <- Example.delay(println(s"started at ${list.boundAddress}")) _ <- Example.delay(Await.ready(list, Duration.Top)).fork res <- Example.fromTask(Task.never[Void]) } yield res def run(args: List[String]): Task[ExitCode] = for { ref <- Ref[Task].of(0) _ <- server .onErrorHandle(ex => println(ex.getMessage)) .run(ExampleEnv("lol", ref)) } yield ExitCode.Success }
Example 29
Source File: AutoFetchingCacheSpec.scala From mules with MIT License | 5 votes |
package io.chrisdavenport.mules.reload import cats.effect.IO import cats.effect.concurrent.Ref import cats.implicits._ import io.chrisdavenport.mules._ import org.specs2.mutable.Specification import scala.concurrent.ExecutionContext import scala.concurrent.duration._ class AutoFetchingCacheSpec extends Specification { implicit val ctx = IO.contextShift(ExecutionContext.global) implicit val timer = IO.timer(ExecutionContext.Implicits.global) "AutoFetchingCache" should { "get a value in a quicker period than the timeout" in { val setup = for { count <- Ref.of[IO, Int](0) cache <- AutoFetchingCache.createCache[IO, String, Int](Some(TimeSpec.unsafeFromDuration(1.second)), None)(_ => count.update( _ + 1).as(1) ) value <- cache.lookupCurrent("Foo") cValue <- count.get } yield (cValue, value) setup.unsafeRunSync must_=== ((1, 1)) } "refetch value after expiration timeout" in { val setup = for { count <- Ref.of[IO, Int](0) cache <- AutoFetchingCache.createCache[IO, String, Int](Some(TimeSpec.unsafeFromDuration(1.second)), None)(_ => count.update( _ + 1).as(1) ) _ <- cache.lookupCurrent("Foo") _ <- timer.sleep(2.seconds) value <- cache.lookupCurrent("Foo") cValue <- count.get } yield (cValue, value) setup.unsafeRunSync must_=== ((2, 1)) } "refetch value after autoReload timeout" in { val setup = for { count <- Ref.of[IO, Int](0) cache <- AutoFetchingCache.createCache[IO, String, Int](None, Some(AutoFetchingCache.RefreshConfig(TimeSpec.unsafeFromDuration(500.milliseconds))))(_ => count.update( _ + 1).as(1) ) _ <- cache.lookupCurrent("Foo") _ <- timer.sleep(2.seconds) value <- cache.lookupCurrent("Foo") cValue <- count.get } yield (cValue, value) val (cValue, value) = setup.unsafeRunSync (value must_=== 1).and(cValue >= 4) } "refetch value after autoReload timeout and before default expiration" in { val setup = for { count <- Ref.of[IO, Int](0) cache <- AutoFetchingCache.createCache[IO, String, Int]( TimeSpec.fromDuration(3.second), Some(AutoFetchingCache.RefreshConfig(TimeSpec.unsafeFromDuration(500.milliseconds))))(_ => count.update( _ + 1) *> count.get ) _ <- cache.lookupCurrent("Foo") _ <- timer.sleep(2.seconds) value <- cache.lookupCurrent("Foo") cValue <- count.get } yield (cValue, value) val (cValue, value) = setup.unsafeRunSync (value must be >= 4).and(cValue >= 4) } } }
Example 30
Source File: MockMetricsFactory.scala From datadog4s with MIT License | 5 votes |
package com.avast.cloud.datadog4s.inmemory import java.time.Duration import cats.effect.Sync import cats.effect.concurrent.Ref import cats.syntax.flatMap._ import cats.syntax.functor._ import com.avast.datadog4s.api.metric._ import com.avast.datadog4s.api.{ DistributionFactory, GaugeFactory, HistogramFactory, MetricFactory, Tag } class MockMetricsFactory[F[_]: Sync](val state: Ref[F, Map[String, Vector[Record[Any]]]]) extends MetricFactory[F] { private def updateState[A](aspect: String, value: A, tags: Tag*): F[Unit] = state.update { oldState => val updatedField = oldState.getOrElse(aspect, Vector.empty) :+ Record[Any](value, tags) oldState.updated(aspect, updatedField) }.void override def histogram: HistogramFactory[F] = new HistogramFactory[F] { override def long(aspect: String, sampleRate: Option[Double]): Histogram[F, Long] = new Histogram[F, Long] { override def record(value: Long, tags: Tag*): F[Unit] = updateState(aspect, value, tags: _*) } override def double(aspect: String, sampleRate: Option[Double]): Histogram[F, Double] = new Histogram[F, Double] { override def record(value: Double, tags: Tag*): F[Unit] = updateState(aspect, value, tags: _*) } } override def gauge: GaugeFactory[F] = new GaugeFactory[F] { override def long(aspect: String, sampleRate: Option[Double]): Gauge[F, Long] = new Gauge[F, Long] { override def set(value: Long, tags: Tag*): F[Unit] = updateState(aspect, value, tags: _*) } override def double(aspect: String, sampleRate: Option[Double]): Gauge[F, Double] = new Gauge[F, Double] { override def set(value: Double, tags: Tag*): F[Unit] = updateState(aspect, value, tags: _*) } } override def timer(aspect: String, sampleRate: Option[Double]): Timer[F] = new Timer[F] { override def time[A](f: F[A], tags: Tag*): F[A] = f.flatMap(a => updateState(aspect, a, tags: _*).as(a)) override def record(duration: Duration, tags: Tag*): F[Unit] = updateState[Duration](aspect, duration, tags: _*) } override def count(aspect: String, sampleRate: Option[Double]): Count[F] = new Count[F] { override def modify(delta: Int, tags: Tag*): F[Unit] = updateState(aspect, delta, tags: _*) } override def uniqueSet(aspect: String): UniqueSet[F] = new UniqueSet[F] { override def record(value: String, tags: Tag*): F[Unit] = updateState(aspect, value, tags: _*) } override def distribution: DistributionFactory[F] = new DistributionFactory[F] { override def long(aspect: String, sampleRate: Option[Double]): Distribution[F, Long] = new Distribution[F, Long] { override def record(value: Long, tags: Tag*): F[Unit] = updateState(aspect, value, tags: _*) } override def double(aspect: String, sampleRate: Option[Double]): Distribution[F, Double] = new Distribution[F, Double] { override def record(value: Double, tags: Tag*): F[Unit] = updateState(aspect, value, tags: _*) } } override def withTags(tags: Tag*): MetricFactory[F] = this override def withScope(name: String): MetricFactory[F] = this } object MockMetricsFactory { def make[F[_]: Sync]: F[MockMetricsFactory[F]] = Ref.of(Map.empty[String, Vector[Record[Any]]]).map(state => new MockMetricsFactory[F](state)) }
Example 31
Source File: EventStream.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.cli.sse import cats.effect.concurrent.Ref import ch.epfl.bluebrain.nexus.cli.{ClientErrOr, LabeledEvent} import fs2.Stream def currentEventId(): F[Option[Offset]] } object EventStream { final def apply[F[_]]( stream: F[Stream[F, ClientErrOr[LabeledEvent]]], ref: Ref[F, Option[Offset]] ): EventStream[F] = new EventStream[F] { override def value: F[Stream[F, ClientErrOr[LabeledEvent]]] = stream override def currentEventId(): F[Option[Offset]] = ref.get } }
Example 32
Source File: AsyncTests.scala From cats-effect with Apache License 2.0 | 5 votes |
package cats.effect import cats.Eq import cats.effect.concurrent.Ref import cats.effect.implicits._ import cats.implicits._ import org.scalatest.compatible.Assertion import org.scalatest.funsuite.AsyncFunSuite import org.scalatest.Succeeded import org.scalatest.matchers.should.Matchers import scala.concurrent.duration._ import scala.concurrent.{ExecutionContext, Future} class AsyncTests extends AsyncFunSuite with Matchers { implicit override def executionContext: ExecutionContext = ExecutionContext.Implicits.global implicit val timer: Timer[IO] = IO.timer(executionContext) implicit val cs: ContextShift[IO] = IO.contextShift(executionContext) private val smallDelay: IO[Unit] = timer.sleep(20.millis) private def awaitEqual[A: Eq](t: IO[A], success: A): IO[Unit] = t.flatMap(a => if (Eq[A].eqv(a, success)) IO.unit else smallDelay *> awaitEqual(t, success)) private def run(t: IO[Unit]): Future[Assertion] = t.as(Succeeded).unsafeToFuture() test("F.parTraverseN(n)(collection)(f)") { val finalValue = 100 val r = Ref.unsafe[IO, Int](0) val list = List.range(0, finalValue) val modifies = implicitly[Async[IO]].parTraverseN(3)(list)(_ => IO.shift *> r.update(_ + 1)) run(IO.shift *> modifies.start *> awaitEqual(r.get, finalValue)) } test("F.parSequenceN(n)(collection)") { val finalValue = 100 val r = Ref.unsafe[IO, Int](0) val list = List.fill(finalValue)(IO.shift *> r.update(_ + 1)) val modifies = implicitly[Async[IO]].parSequenceN(3)(list) run(IO.shift *> modifies.start *> awaitEqual(r.get, finalValue)) } }
Example 33
Source File: ConcurrentTests.scala From cats-effect with Apache License 2.0 | 5 votes |
package cats.effect import cats.Eq import cats.effect.concurrent.Ref import cats.effect.implicits._ import cats.implicits._ import org.scalatest.compatible.Assertion import org.scalatest.funsuite.AsyncFunSuite import org.scalatest.Succeeded import org.scalatest.matchers.should.Matchers import scala.concurrent.duration._ import scala.concurrent.{ExecutionContext, Future} class ConcurrentTests extends AsyncFunSuite with Matchers { implicit override def executionContext: ExecutionContext = ExecutionContext.Implicits.global implicit val timer: Timer[IO] = IO.timer(executionContext) implicit val cs: ContextShift[IO] = IO.contextShift(executionContext) private val smallDelay: IO[Unit] = timer.sleep(20.millis) private def awaitEqual[A: Eq](t: IO[A], success: A): IO[Unit] = t.flatMap(a => if (Eq[A].eqv(a, success)) IO.unit else smallDelay *> awaitEqual(t, success)) private def run(t: IO[Unit]): Future[Assertion] = t.as(Succeeded).unsafeToFuture() test("F.parTraverseN(n)(collection)(f)") { val finalValue = 100 val r = Ref.unsafe[IO, Int](0) val list = List.range(0, finalValue) val modifies = implicitly[Concurrent[IO]].parTraverseN(3)(list)(_ => IO.shift *> r.update(_ + 1)) run(IO.shift *> modifies.start *> awaitEqual(r.get, finalValue)) } test("collection.parTraverseN(n)(f)") { val finalValue = 100 val r = Ref.unsafe[IO, Int](0) val list = List.range(0, finalValue) val modifies = list.parTraverseN(3)(_ => IO.shift *> r.update(_ + 1)) run(IO.shift *> modifies.start *> awaitEqual(r.get, finalValue)) } test("F.parSequenceN(n)(collection)") { val finalValue = 100 val r = Ref.unsafe[IO, Int](0) val list = List.fill(finalValue)(IO.shift *> r.update(_ + 1)) val modifies = implicitly[Concurrent[IO]].parSequenceN(3)(list) run(IO.shift *> modifies.start *> awaitEqual(r.get, finalValue)) } test("collection.parSequenceN(n)") { val finalValue = 100 val r = Ref.unsafe[IO, Int](0) val list = List.fill(finalValue)(IO.shift *> r.update(_ + 1)) val modifies = list.parSequenceN(3) run(IO.shift *> modifies.start *> awaitEqual(r.get, finalValue)) } }
Example 34
Source File: package.scala From origami with MIT License | 5 votes |
package org.atnos.origami.addon.fs2 import org.atnos.origami.Fold import fs2.Stream import cats.syntax.functor._ import cats.syntax.flatMap._ import cats.effect.Sync import cats.effect.concurrent.Ref package object stream { outer => def scanEval[F[_]: Sync, S, A](p: Stream[F, A])(start: F[S])(f: (S, A) => F[S]): Stream[F, S] = { def zipper(ref: Ref[F, S]): Stream[F, S] = p.zip(Stream.eval(ref.get).repeat).evalMap { case (a, s) => for { ns <- f(s, a) _ <- ref.set(ns) } yield ns } for { st <- Stream.eval(start) ref <- Stream.eval(Ref.of[F, S](st)) rs <- zipper(ref) } yield rs } implicit class StreamSyntax[F[_]: Sync, A](p: Stream[F, A]) { def scanEval[S](start: F[S])(f: (S, A) => F[S]): Stream[F, S] = outer.scanEval(p)(start)(f) def foldWith[B](fold: Fold[F, A, B]): F[B] = { p.scanEval(fold.start)(fold.fold).compile.last.flatMap { case Some(s) => fold.end(s) case None => fold.start.flatMap(fold.end) } } } }
Example 35
Source File: MockEnv.scala From polynote with Apache License 2.0 | 5 votes |
package polynote.testing.kernel import cats.effect.concurrent.Ref import fs2.Stream import fs2.concurrent.{Queue, SignallingRef, Topic} import polynote.config.PolynoteConfig import polynote.kernel.Kernel.Factory import polynote.kernel.environment.{CurrentNotebook, CurrentRuntime, NotebookUpdates} import polynote.kernel.interpreter.{CellExecutor, Interpreter} import polynote.kernel.logging.Logging import polynote.kernel.task.TaskManager import polynote.kernel.util.Publish import polynote.kernel.{BaseEnv, CellEnv, GlobalEnv, InterpreterEnv, KernelStatusUpdate, NotebookRef, Result, StreamingHandles, TaskInfo} import polynote.messages._ import polynote.runtime.{KernelRuntime, StreamingDataRepr, TableOp} import polynote.testing.MockPublish import zio.blocking.Blocking import zio.clock.Clock import zio.interop.catz._ import zio.{Has, RIO, Runtime, Task, URIO, ZIO, ZLayer} case class MockEnv( baseEnv: BaseEnv, cellID: CellID, currentTask: SignallingRef[Task, TaskInfo], publishResult: MockPublish[Result], publishStatus: MockPublish[KernelStatusUpdate], runtime: Runtime[Any] ) { val currentRuntime: KernelRuntime = runtime.unsafeRun(CurrentRuntime.from(cellID, publishResult, publishStatus, currentTask)) val logging: Logging.Service = new Logging.Service.Default(System.err, baseEnv.get[Blocking.Service]) val baseLayer: ZLayer[Any, Nothing, BaseEnv with InterpreterEnv] = ZLayer.succeedMany(baseEnv) ++ ZLayer.succeed(logging) ++ ZLayer.succeed(currentRuntime) ++ ZLayer.succeed(publishResult: Publish[Task, Result]) ++ ZLayer.succeed(publishStatus: Publish[Task, KernelStatusUpdate]) ++ ZLayer.succeed(currentTask: Ref[Task, TaskInfo]) def toCellEnv(classLoader: ClassLoader): ZLayer[Any, Throwable, BaseEnv with InterpreterEnv] = baseLayer ++ (baseLayer >>> CellExecutor.layer(classLoader)) } object MockEnv { def init: ZLayer[BaseEnv, Nothing, BaseEnv with InterpreterEnv] = ZLayer.fromManagedMany(MockEnv(-1).toManaged_.flatMap(_.baseLayer.build)) def apply(cellID: Int): URIO[BaseEnv, MockEnv] = for { env <- ZIO.access[BaseEnv](identity) runtime <- ZIO.runtime[Any] currentTask <- SignallingRef[Task, TaskInfo](TaskInfo(s"Cell$cellID")).orDie } yield new MockEnv(env, CellID(cellID), currentTask, new MockPublish, new MockPublish, runtime) def layer(cellID: Int): ZLayer[BaseEnv, Nothing, BaseEnv with InterpreterEnv] = ZLayer.fromManagedMany(MockEnv(cellID).toManaged_.flatMap(_.baseLayer.build)) type Env = BaseEnv with GlobalEnv with CellEnv with StreamingHandles with NotebookUpdates } case class MockKernelEnv( baseEnv: BaseEnv, kernelFactory: Factory.Service, publishResult: MockPublish[Result], publishStatus: MockPublish[KernelStatusUpdate], interpreterFactories: Map[String, List[Interpreter.Factory]], taskManager: TaskManager.Service, updateTopic: Topic[Task, Option[NotebookUpdate]], currentNotebook: MockNotebookRef, streamingHandles: StreamingHandles.Service, sessionID: Int = 0, polynoteConfig: PolynoteConfig = PolynoteConfig() ) { val logging: Logging.Service = new Logging.Service.Default(System.err, baseEnv.get[Blocking.Service]) val notebookUpdates: Stream[Task, NotebookUpdate] = updateTopic.subscribe(128).unNone val baseLayer: ZLayer[Any, Nothing, MockEnv.Env] = ZLayer.succeedMany { baseEnv ++ Has.allOf(kernelFactory, interpreterFactories, taskManager, notebookUpdates, polynoteConfig) ++ Has(streamingHandles) ++ Has(publishResult: Publish[Task, Result]) ++ Has(publishStatus: Publish[Task, KernelStatusUpdate]) } ++ CurrentNotebook.layer(currentNotebook) } object MockKernelEnv { def apply(kernelFactory: Factory.Service, config: PolynoteConfig, sessionId: Int): RIO[BaseEnv, MockKernelEnv] = for { baseEnv <- ZIO.access[BaseEnv](identity) currentNotebook <- MockNotebookRef(Notebook("empty", ShortList.Nil, None)) updateTopic <- Topic[Task, Option[NotebookUpdate]](None) publishUpdates = new MockPublish[KernelStatusUpdate] taskManager <- TaskManager(publishUpdates) handles <- StreamingHandles.make(sessionId) } yield new MockKernelEnv(baseEnv, kernelFactory, new MockPublish, publishUpdates, Map.empty, taskManager, updateTopic, currentNotebook, handles, handles.sessionID, config) def apply(kernelFactory: Factory.Service, sessionId: Int): RIO[BaseEnv, MockKernelEnv] = apply(kernelFactory, PolynoteConfig(), sessionId) def apply(kernelFactory: Factory.Service, config: PolynoteConfig): RIO[BaseEnv, MockKernelEnv] = apply(kernelFactory, config, 0) def apply(kernelFactory: Factory.Service): RIO[BaseEnv, MockKernelEnv] = apply(kernelFactory, 0) }
Example 36
Source File: package.scala From polynote with Apache License 2.0 | 5 votes |
package polynote.kernel import cats.effect.concurrent.Ref import fs2.Stream import polynote.app.{Args, MainArgs} import polynote.config.PolynoteConfig import polynote.kernel.util.Publish import polynote.messages.{Message, Notebook, NotebookUpdate} import polynote.runtime.KernelRuntime import zio.{Has, Task, ZLayer} package object environment { type Config = Has[PolynoteConfig] type PublishStatus = Has[Publish[Task, KernelStatusUpdate]] type PublishResult = Has[Publish[Task, Result]] type PublishMessage = Has[Publish[Task, Message]] type CurrentTask = Has[Ref[Task, TaskInfo]] type CurrentRuntime = Has[KernelRuntime] type CurrentNotebook = Has[NotebookRef] type NotebookUpdates = Has[Stream[Task, NotebookUpdate]] }
Example 37
Source File: EffectInstancesLawsSuite.scala From meow-mtl with MIT License | 5 votes |
package com.olegpy.meow.effects import cats.effect.IO import cats.effect.concurrent.Ref import cats.effect.laws.util.TestContext import cats.implicits._ import cats.effect.laws.discipline.arbitrary._ import cats.effect.laws.util.TestInstances._ import cats.mtl.laws.discipline._ import minitest.SimpleTestSuite import minitest.laws.Checkers import org.typelevel.discipline.Laws import scala.concurrent.duration._ object EffectInstancesLawsSuite extends SimpleTestSuite with Checkers { private def checkAll(name: String)(ruleSet: TestContext => Laws#RuleSet) = { implicit val ctx = TestContext() for ((id, prop) <- ruleSet(ctx).all.properties) test(name + "." + id) { ctx.tick(1.day) check(prop) } } checkAll("Ref.runAsk") { implicit ctx => Ref.unsafe[IO, Int](0).runAsk(ev => ApplicativeAskTests(ev).applicativeAsk[Int] ) } checkAll("Ref.runState") { implicit ctx => Ref.unsafe[IO, Int](0).runState(ev => MonadStateTests(ev).monadState[Int] ) } checkAll("Ref.runTell") { implicit ctx => Ref.unsafe[IO, Int](0).runTell(ev => FunctorTellTests(ev).functorTell[Int] ) } checkAll("Consumer.runTell") { implicit ctx => case object DummyErr extends Throwable def fun(x: Int) = if (x == 1) IO.raiseError[Unit](DummyErr) else IO.unit Consumer(fun _).runTell(ev => FunctorTellTests(ev).functorTell[Int] ) } }
Example 38
Source File: CatsEffectMtlInstances.scala From meow-mtl with MIT License | 5 votes |
package com.olegpy.meow.internal import cats.effect.concurrent.Ref import cats.kernel.Semigroup import cats.mtl._ import cats.syntax.functor._ import cats.syntax.semigroup._ import cats.{Applicative, Functor, Monad} private[meow] object CatsEffectMtlInstances { class RefMonadState[F[_]: Monad, S](ref: Ref[F, S]) extends MonadState[F, S] { val monad: Monad[F] = implicitly def get: F[S] = ref.get def set(s: S): F[Unit] = ref.set(s) def inspect[A](f: S => A): F[A] = ref.get.map(f) def modify(f: S => S): F[Unit] = ref.update(f) } class RefFunctorTell[F[_]: Functor, L: Semigroup](ref: Ref[F, L]) extends FunctorTell[F, L] with DefaultFunctorTell[F, L] { val functor: Functor[F] = implicitly def tell(l: L): F[Unit] = ref.update(_ |+| l) } class RefApplicativeAsk[F[_]: Applicative, S](ref: Ref[F, S]) extends ApplicativeAsk[F, S] with DefaultApplicativeAsk[F, S] { val applicative: Applicative[F] = implicitly def ask: F[S] = ref.get } }
Example 39
Source File: AsyncHttpClientPipedFs2WebsocketsTest.scala From sttp with Apache License 2.0 | 5 votes |
package sttp.client.asynchttpclient.fs2 import cats.effect.concurrent.Ref import cats.effect.IO import cats.implicits._ import fs2._ import sttp.client._ import sttp.client.asynchttpclient.WebSocketHandler import sttp.client.impl.cats.CatsTestBase import sttp.client.impl.fs2.Fs2WebSockets import sttp.client.testing.ToFutureWrapper import sttp.client.ws.WebSocket import sttp.model.ws.WebSocketFrame import sttp.client.testing.HttpTest.wsEndpoint import scala.collection.immutable.Queue import org.scalatest.flatspec.AsyncFlatSpec import org.scalatest.matchers.should.Matchers class AsyncHttpClientPipedFs2WebsocketsTest extends AsyncFlatSpec with Matchers with ToFutureWrapper with CatsTestBase { implicit val backend: SttpBackend[IO, Nothing, WebSocketHandler] = AsyncHttpClientFs2Backend[IO]().unsafeRunSync() def createHandler: Option[Int] => IO[WebSocketHandler[WebSocket[IO]]] = Fs2WebSocketHandler[IO](_) it should "run a simple echo pipe" in { basicRequest .get(uri"$wsEndpoint/ws/echo") .openWebsocketF(createHandler(None)) .product(Ref.of[IO, Queue[String]](Queue.empty)) .flatMap { case (response, results) => Fs2WebSockets.handleSocketThroughTextPipe(response.result) { in => val receive = in.evalMap(m => results.update(_.enqueue(m))) val send = Stream("Message 1".asRight, "Message 2".asRight, WebSocketFrame.close.asLeft) send merge receive.drain } >> results.get.map(_ should contain theSameElementsInOrderAs List("echo: Message 1", "echo: Message 2")) } .toFuture() } it should "run a simple read-only client" in { basicRequest .get(uri"$wsEndpoint/ws/send_and_wait") .openWebsocketF(createHandler(None)) .product(Ref.of[IO, Queue[String]](Queue.empty)) .flatMap { case (response, results) => Fs2WebSockets.handleSocketThroughTextPipe(response.result) { in => in.evalMap(m => results.update(_.enqueue(m)).flatMap(_ => results.get.map(_.size))).flatMap { case 2 => Stream(None) // terminating the stream case _ => Stream.empty // waiting for more messages }.unNoneTerminate } >> results.get.map(_ should contain theSameElementsInOrderAs List("test10", "test20")) } .toFuture() } }
Example 40
Source File: Namer.scala From skunk with MIT License | 5 votes |
// Copyright (c) 2018-2020 by Rob Norris // This software is licensed under the MIT License (MIT). // For more information see LICENSE or https://opensource.org/licenses/MIT package skunk.util import cats.effect.Sync import cats.effect.concurrent.Ref import cats.implicits._ trait Namer[F[_]] { def nextName(prefix: String): F[String] } object Namer { def apply[F[_]: Sync]: F[Namer[F]] = Ref[F].of(1).map { ctr => new Namer[F] { override def nextName(prefix: String): F[String] = ctr.modify(n => (n + 1, s"${prefix}_$n")) } } }
Example 41
Source File: BroadcastSpec.scala From canoe with MIT License | 5 votes |
package canoe.api import canoe.TestIO._ import cats.effect.IO import fs2.{Pipe, Stream} import org.scalatest.freespec.AnyFreeSpec import cats.effect.concurrent.Ref import scala.concurrent.duration._ class BroadcastSpec extends AnyFreeSpec { def broadcast[A]: Stream[IO, Broadcast[IO, A]] = Stream.eval(Broadcast[IO, A]) def recordPulled[A](b: Broadcast[IO, A], duration: FiniteDuration): Pipe[IO, A, List[A]] = input => Stream.eval(Ref[IO].of(List.empty[A])).flatMap { ref => input .evalTap(i => ref.update(i :: _)) .through(b.publish) .drain .interruptAfter(duration) .append(Stream.eval(ref.get)) } "Broadcast" - { val input = Stream.range(1, 100) "subscriber" - { "sees all elements after subscription" in { val res = broadcast[Int].flatMap { b => val pop = Stream.sleep_(0.05.second) ++ input.through(b.publish) val sub = b.subscribe(1).take(input.size()) sub.concurrently(pop) } assert(res.toList() == input.toList) } "is deregistered after it is done pulling" in { val pulled = broadcast[Int].flatMap { b => val pop = Stream.sleep_(0.1.second) ++ input.through(recordPulled(b, 1.second)) val consumer = b.subscribe(1).metered(0.1.second).take(5) pop.concurrently(consumer) } assert(pulled.value() == input.toList.reverse) } } "pulls from publisher" - { "one element before it's blocked by the subscriber" in { val pulled = broadcast[Int].flatMap { b => val pop = Stream.sleep_(0.05.second) ++ input.through(recordPulled(b, 0.2.second)) val consumer = b.subscribe(0).evalMap(_ => IO.never) pop.concurrently(consumer) } assert(pulled.value() == input.head.toList) } "maxQueued + 2 elements for non-empty blocking consumer" in { val maxQueued = 3 val pulled = broadcast[Int].flatMap { b => val pop = Stream.sleep_(0.05.second) ++ input.through(recordPulled(b, 0.2.second)) val consumer = b.subscribe(maxQueued).evalMap(_ => IO.never) pop.concurrently(consumer) } assert(pulled.value() == input.take(maxQueued + 2).toList.reverse) } "all elements" - { "for non-blocking consumer" in { val pulled = broadcast[Int].flatMap { b => val pop = input.through(recordPulled(b, 0.2.second)) val consumer = b.subscribe(1) pop.concurrently(consumer) } assert(pulled.value() == input.toList.reverse) } "for no consumer" in { val pulled = broadcast[Int].flatMap { b => input.through(recordPulled(b, 0.2.second)) } assert(pulled.value() == input.toList.reverse) } } } } }
Example 42
Source File: ResourceRegistry.scala From kafka-journal with MIT License | 5 votes |
package com.evolutiongaming.kafka.journal.util import cats.effect._ import cats.effect.concurrent.Ref import cats.effect.implicits._ import cats.implicits._ import cats.Applicative trait ResourceRegistry[F[_]] { def allocate[A](resource: Resource[F, A]): F[A] } object ResourceRegistry { def of[F[_] : Concurrent]: Resource[F, ResourceRegistry[F]] = { implicit val monoidUnit = Applicative.monoid[F, Unit] val result = for { releases <- Ref.of[F, List[F[Unit]]](List.empty[F[Unit]]) } yield { val registry = apply[F](releases) val release = for { releases <- releases.get ignore = (_: Throwable) => () _ <- releases.foldMap { _.handleError(ignore) } } yield {} (registry, release) } Resource(result) } def apply[F[_] : Sync](releases: Ref[F, List[F[Unit]]]): ResourceRegistry[F] = { new ResourceRegistry[F] { def allocate[B](resource: Resource[F, B]) = { resource.allocated.bracketCase { case (b, release) => for { _ <- releases.update(release :: _) } yield b } { case ((_, release), exitCase) => exitCase match { case ExitCase.Completed => ().pure[F] case _: ExitCase.Error[Throwable] => release case ExitCase.Canceled => release } } } } } }
Example 43
Source File: ResourceRef.scala From kafka-journal with MIT License | 5 votes |
package com.evolutiongaming.kafka.journal.util import cats.effect.concurrent.Ref import cats.effect.implicits._ import cats.effect.{Resource, Sync} import cats.implicits._ import scala.util.control.NoStackTrace trait ResourceRef[F[_], A] { def get: F[A] def set(a: A, release: F[Unit]): F[Unit] def set(a: Resource[F, A]): F[Unit] } object ResourceRef { def of[F[_] : Sync, A](resource: Resource[F, A]): Resource[F, ResourceRef[F, A]] = { case class State(a: A, release: F[Unit]) Resource .make { for { ab <- resource.allocated (a, release) = ab ref <- Ref[F].of(State(a, release).some) } yield ref } { ref => ref .getAndSet(none) .flatMap { _.foldMapM { _.release } } } .map { ref => new ResourceRef[F, A] { def get = { ref .get .flatMap { case Some(state) => state.a.pure[F] case None => ResourceReleasedError.raiseError[F, A] } } def set(a: A, release: F[Unit]) = { ref .modify { case Some(state) => (State(a, release).some, state.release ) case None => (none, ResourceReleasedError.raiseError[F, Unit]) } .flatten .uncancelable } def set(a: Resource[F, A]) = { a .allocated .flatMap { case (a, release) => set(a, release) } } } } } } case object ResourceReleasedError extends RuntimeException("Resource released") with NoStackTrace
Example 44
Source File: HeadCacheFenced.scala From kafka-journal with MIT License | 5 votes |
package com.evolutiongaming.kafka.journal import cats.Apply import cats.effect.concurrent.Ref import cats.effect.{Concurrent, Resource} import cats.implicits._ import com.evolutiongaming.catshelper.CatsHelper._ import com.evolutiongaming.skafka.{Offset, Partition} object HeadCacheFenced { def of[F[_] : Concurrent](headCache: Resource[F, HeadCache[F]]): Resource[F, HeadCache[F]] = { val fence = Resource.make { Ref[F].of(().pure[F]) } { fence => fence.set(HeadCacheReleasedError.raiseError[F, Unit]) } val result = for { headCache <- headCache fence <- fence } yield { apply(headCache, fence.get.flatten) } result.fenced } def apply[F[_] : Apply](headCache: HeadCache[F], fence: F[Unit]): HeadCache[F] = { (key: Key, partition: Partition, offset: Offset) => { fence *> headCache.get(key, partition, offset) } } }
Example 45
Source File: TopicCommitTest.scala From kafka-journal with MIT License | 5 votes |
package com.evolutiongaming.kafka.journal.replicator import cats.data.{NonEmptyMap => Nem} import cats.implicits._ import cats.effect.{Clock, IO} import com.evolutiongaming.kafka.journal.IOSuite._ import cats.effect.concurrent.{Deferred, Ref} import com.evolutiongaming.skafka.{Offset, Partition} import org.scalatest.funsuite.AsyncFunSuite import org.scalatest.matchers.should.Matchers import scala.concurrent.duration._ class TopicCommitTest extends AsyncFunSuite with Matchers{ test("delayed") { def commitOf( deferred: Deferred[IO, Unit], commitsRef: Ref[IO, List[Nem[Partition, Offset]]])(implicit clock: Clock[IO] ) = { val commit = new TopicCommit[IO] { def apply(offsets: Nem[Partition, Offset]) = { commitsRef.update { offsets :: _ } *> deferred.complete(()) } } TopicCommit.delayed(10.millis, commit) } def clockOf(ref: Ref[IO, FiniteDuration]): Clock[IO] = { new Clock[IO] { def realTime(unit: TimeUnit): IO[Long] = monotonic(unit) def monotonic(unit: TimeUnit): IO[Long] = ref.get.map { _.toUnit(unit).toLong } } } val result = for { commitsRef <- Ref[IO].of(List.empty[Nem[Partition, Offset]]) deferred <- Deferred[IO, Unit] clockRef <- Ref[IO].of(0.millis) clock = clockOf(clockRef) commit <- commitOf(deferred, commitsRef)(clock) _ <- commit(Nem.of((Partition.min, Offset.min))) offsets <- commitsRef.get _ = offsets shouldEqual List.empty _ <- clockRef.set(20.millis) _ <- commit(Nem.of((Partition.unsafe(1), Offset.unsafe(1)))) _ <- deferred.get offsets <- commitsRef.get _ = offsets shouldEqual List(Nem.of((Partition.min, Offset.min), (Partition.unsafe(1), Offset.unsafe(1)))) } yield {} result.run() } }
Example 46
Source File: KafkaSingletonTest.scala From kafka-journal with MIT License | 5 votes |
package com.evolutiongaming.kafka.journal.replicator import cats.data.{NonEmptySet => Nes} import cats.effect.concurrent.{Deferred, Ref} import cats.effect.{Concurrent, IO, Resource, Timer} import cats.implicits._ import com.evolutiongaming.catshelper.Log import com.evolutiongaming.kafka.journal.IOSuite._ import com.evolutiongaming.skafka.consumer.RebalanceListener import com.evolutiongaming.skafka.{Partition, TopicPartition} import com.evolutiongaming.sstream.Stream import org.scalatest.funsuite.AsyncFunSuite import org.scalatest.matchers.should.Matchers import scala.concurrent.duration._ class KafkaSingletonTest extends AsyncFunSuite with Matchers { test("allocate & release when partition assigned or revoked") { `allocate & release when partition assigned or revoked`[IO]().run() } private def `allocate & release when partition assigned or revoked`[F[_] : Concurrent : Timer](): F[Unit] = { val topic = "topic" def consumer(deferred: Deferred[F, RebalanceListener[F]]) = { new TopicConsumer[F] { def subscribe(listener: RebalanceListener[F]) = deferred.complete(listener) def poll = Stream.empty def commit = TopicCommit.empty } } def topicPartition(partition: Partition) = TopicPartition(topic, partition) val result = for { listener <- Resource.liftF(Deferred[F, RebalanceListener[F]]) allocated <- Resource.liftF(Ref[F].of(false)) resource = Resource.make { allocated.set(true) } { _ => allocated.set(false) } singleton <- KafkaSingleton.of(topic, consumer(listener).pure[Resource[F, *]], resource, Log.empty[F]) listener <- Resource.liftF(listener.get) _ <- Resource.liftF { for { a <- singleton.get _ = a shouldEqual none[Unit] a <- allocated.get _ = a shouldEqual false _ <- listener.onPartitionsAssigned(Nes.of(topicPartition(Partition.max))) a <- singleton.get _ = a shouldEqual none[Unit] a <- allocated.get _ = a shouldEqual false _ <- listener.onPartitionsAssigned(Nes.of(topicPartition(Partition.min))) _ <- Timer[F].sleep(10.millis) a <- singleton.get _ = a shouldEqual ().some a <- allocated.get _ = a shouldEqual true _ <- listener.onPartitionsRevoked(Nes.of(topicPartition(Partition.max))) a <- singleton.get _ = a shouldEqual ().some a <- allocated.get _ = a shouldEqual true _ <- listener.onPartitionsRevoked(Nes.of(topicPartition(Partition.min))) _ <- Timer[F].sleep(10.millis) a <- singleton.get _ = a shouldEqual none[Unit] a <- allocated.get _ = a shouldEqual false } yield {} } } yield {} result.use { _ => ().pure[F] } } }
Example 47
Source File: TopicCommit.scala From kafka-journal with MIT License | 5 votes |
package com.evolutiongaming.kafka.journal.replicator import java.time.Instant import cats.Applicative import cats.data.{NonEmptyMap => Nem} import cats.effect.concurrent.Ref import cats.effect.{Clock, Sync} import cats.implicits._ import com.evolutiongaming.catshelper.ClockHelper._ import com.evolutiongaming.catshelper.DataHelper._ import com.evolutiongaming.kafka.journal.util.TemporalHelper._ import com.evolutiongaming.kafka.journal.KafkaConsumer import com.evolutiongaming.skafka._ import scala.collection.immutable.SortedMap import scala.concurrent.duration._ trait TopicCommit[F[_]] { def apply(offsets: Nem[Partition, Offset]): F[Unit] } object TopicCommit { def empty[F[_] : Applicative]: TopicCommit[F] = (_: Nem[Partition, Offset]) => ().pure[F] def apply[F[_]]( topic: Topic, metadata: String, consumer: KafkaConsumer[F, _, _], ): TopicCommit[F] = { offsets: Nem[Partition, Offset] => { val offsets1 = offsets.mapKV { (partition, offset) => val offset1 = OffsetAndMetadata(offset, metadata) val partition1 = TopicPartition(topic, partition) (partition1, offset1) } consumer.commit(offsets1) } } def delayed[F[_] : Sync : Clock]( delay: FiniteDuration, commit: TopicCommit[F] ): F[TopicCommit[F]] = { case class State(until: Instant, offsets: SortedMap[Partition, Offset] = SortedMap.empty) for { timestamp <- Clock[F].instant stateRef <- Ref[F].of(State(timestamp + delay)) } yield { new TopicCommit[F] { def apply(offsets: Nem[Partition, Offset]) = { def apply(state: State, timestamp: Instant) = { val offsets1 = state.offsets ++ offsets.toSortedMap if (state.until <= timestamp) { offsets1 .toNem .foldMapM { offsets => commit(offsets) } .as(State(timestamp + delay)) } else { state .copy(offsets = offsets1) .pure[F] } } for { timestamp <- Clock[F].instant state <- stateRef.get state <- apply(state, timestamp) _ <- stateRef.set(state) } yield {} } } } } }
Example 48
Source File: ResultSetSpec.scala From kafka-journal with MIT License | 5 votes |
package com.evolutiongaming.kafka.journal.eventual.cassandra import cats.effect.concurrent.Ref import cats.effect.{Concurrent, IO} import cats.implicits._ import com.evolutiongaming.kafka.journal.IOSuite._ import org.scalatest.funsuite.AsyncFunSuite import org.scalatest.matchers.should.Matchers import scala.util.control.NoStackTrace class ResultSetSpec extends AsyncFunSuite with Matchers { for { size <- 0 to 5 take <- 1 to 5 fetchSize <- 1 to 5 } { test(s"size: $size, take: $take, fetchSize: $fetchSize") { testF[IO](size = size, take = take, fetchSize = fetchSize).run() } } private def testF[F[_] : Concurrent](size: Int, take: Int, fetchSize: Int) = { type Row = Int val all = (0 until size).toList for { fetches <- Ref[F].of(0) left <- Ref[F].of(all) fetched <- Ref[F].of(List.empty[Row]) next = fetched.modify { rows => (List.empty, rows) } fetch = for { _ <- fetches.update(_ + 1) toFetch1 <- left.get result <- { if (toFetch1.isEmpty) ().pure[F] else for { taken <- left.modify { rows => val fetched = rows.take(fetchSize) val left = rows.drop(fetchSize) (left, fetched) } _ <- fetched.set(taken) } yield {} } } yield result resultSet = ResultSet[F, Row](fetch, left.get.map(_.isEmpty), next) rows <- resultSet.take(take.toLong).toList fetches <- fetches.get } yield { rows shouldEqual all.take(take) if (take >= size) { val expected = { val n = size / fetchSize if (size % fetchSize == 0) n else n + 1 } fetches shouldEqual expected } } } case object NotImplemented extends RuntimeException with NoStackTrace }
Example 49
Source File: CassandraHealthCheck.scala From kafka-journal with MIT License | 5 votes |
package com.evolutiongaming.kafka.journal.eventual.cassandra import cats.Monad import cats.effect._ import cats.effect.concurrent.Ref import cats.implicits._ import com.evolutiongaming.catshelper.{Log, LogOf} import com.evolutiongaming.kafka.journal.util.CatsHelper._ import com.evolutiongaming.kafka.journal.eventual.cassandra.CassandraHelper._ import scala.concurrent.duration._ trait CassandraHealthCheck[F[_]] { def error: F[Option[Throwable]] } object CassandraHealthCheck { def of[F[_] : Concurrent : Timer : LogOf]( session: Resource[F, CassandraSession[F]] ): Resource[F, CassandraHealthCheck[F]] = { val statement = for { session <- session statement <- { implicit val session1 = session Resource.liftF(Statement.of[F]) } } yield statement for { log <- Resource.liftF(LogOf[F].apply(CassandraHealthCheck.getClass)) result <- of(initial = 10.seconds, interval = 1.second, statement = statement, log = log) } yield result } def of[F[_] : Concurrent : Timer]( initial: FiniteDuration, interval: FiniteDuration, statement: Resource[F, Statement[F]], log: Log[F] ): Resource[F, CassandraHealthCheck[F]] = { val result = for { ref <- Ref.of[F, Option[Throwable]](none) fiber <- statement.start { statement => for { _ <- Timer[F].sleep(initial) _ <- { for { e <- statement.error[Throwable] _ <- e.fold(().pure[F]) { e => log.error(s"failed with $e", e) } _ <- ref.set(e) _ <- Timer[F].sleep(interval) } yield ().asLeft }.foreverM[Unit] } yield {} } } yield { val result = new CassandraHealthCheck[F] { def error = ref.get } (result, fiber.cancel) } Resource(result) } type Statement[F[_]] = F[Unit] object Statement { def of[F[_] : Monad : CassandraSession]: F[Statement[F]] = { for { prepared <- "SELECT now() FROM system.local".prepare } yield { prepared.bind().first.void } } } }
Example 50
Source File: actors.scala From actors-cats-effect-fs2 with Apache License 2.0 | 5 votes |
package app import app.syntax._ import cats.effect.Concurrent import cats.effect.concurrent.{Deferred, Ref} import cats.effect.syntax.concurrent._ import cats.syntax.flatMap._ import cats.syntax.functor._ import fs2.concurrent.Queue object actors { def actor[F[_], S, O]( initialState: S, receive: Ref[F, S] => F[O] )(implicit F: Concurrent[F]): F[F[O]] = for { ref <- Ref.of[F, S](initialState) queue <- Queue.unbounded[F, Deferred[F, O]] fiber <- (for { deferred <- queue.dequeue1 output <- receive(ref) _ <- deferred.complete(output) } yield ()).foreverM.void.start ask = for { deferred <- Deferred[F, O] _ <- queue.offer1(deferred) output <- (fiber.join race deferred.get) .collect { case Right(o) => o } } yield output } yield ask def actorWithInput[F[_], S, I, O]( initialState: S, receive: (I, Ref[F, S]) => F[O] )(implicit F: Concurrent[F]): F[I => F[O]] = for { ref <- Ref.of[F, S](initialState) queue <- Queue.unbounded[F, (I, Deferred[F, O])] fiber <- (for { inputAndDeferred <- queue.dequeue1 (input, deferred) = inputAndDeferred output <- receive(input, ref) _ <- deferred.complete(output) } yield ()).foreverM.void.start ask = (input: I) => for { deferred <- Deferred[F, O] _ <- queue.offer1((input, deferred)) output <- (fiber.join race deferred.get) .collect { case Right(o) => o } } yield output } yield ask }
Example 51
Source File: GracefulFiber.scala From kafka-journal with MIT License | 5 votes |
package com.evolutiongaming.kafka.journal.util import cats.effect.concurrent.Ref import cats.effect.{Concurrent, Fiber} import cats.implicits._ trait GracefulFiber[F[_]] { def apply[A](f: F[Boolean] => F[Fiber[F, A]]): F[Fiber[F, A]] } object GracefulFiber { def apply[F[_] : Concurrent]: GracefulFiber[F] = { new GracefulFiber[F] { def apply[A](f: F[Boolean] => F[Fiber[F, A]]) = { for { cancelRef <- Ref.of[F, Boolean](false) fiber <- f(cancelRef.get) } yield { new Fiber[F, A] { def join = fiber.join def cancel = { for { cancel <- cancelRef.getAndSet(true) _ <- if (cancel) ().pure[F] else fiber.join } yield {} } } } } } } }
Example 52
Source File: Broadcast.scala From canoe with MIT License | 5 votes |
package canoe.api import cats.syntax.all._ import cats.instances.list._ import cats.effect.{Concurrent} import cats.effect.concurrent.Ref import fs2.{Pipe, Stream} import fs2.concurrent.{Queue, Topic} private[api] class Broadcast[F[_], A](subs: Ref[F, List[Queue[F, A]]])(implicit C: Concurrent[F]) extends Topic[F, A] { def publish: Pipe[F, A, Unit] = _.evalMap(publish1) def publish1(a: A): F[Unit] = subs.get.flatMap(_.traverse_(_.enqueue1(a))) def subscribe(maxQueued: Int): Stream[F, A] = subscription(maxQueued).evalTap(q => subs.update(q :: _)).flatMap(_.dequeue) private def subscription(maxQueued: Int): Stream[F, Queue[F, A]] = Stream.bracket(Queue.bounded[F, A](maxQueued)) { q => subs.update(_.filter(_ ne q)) *> q.tryDequeue1.void } def subscribeSize(maxQueued: Int): Stream[F, (A, Int)] = subscribe(maxQueued).zip(subscribers) def subscribers: Stream[F, Int] = Stream.repeatEval(subs.get).map(_.size) } object Broadcast { private [api] def apply[F[_], A](implicit C: Concurrent[F]): F[Broadcast[F, A]] = Ref.of[F, List[Queue[F, A]]](List.empty).map(new Broadcast(_)) }
Example 53
Source File: KafkaAdminAlgebra.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.kafka.algebras import cats.effect.concurrent.Ref import cats.effect.{Async, Concurrent, ContextShift, Resource, Sync} import cats.implicits._ import fs2.kafka._ import hydra.core.protocol._ import hydra.kafka.util.KafkaUtils.TopicDetails import org.apache.kafka.clients.admin.NewTopic import org.apache.kafka.common.errors.UnknownTopicOrPartitionException import scala.util.control.NoStackTrace def deleteTopic(name: String): F[Unit] } object KafkaAdminAlgebra { type TopicName = String final case class Topic(name: TopicName, numberPartitions: Int) def live[F[_]: Sync: Concurrent: ContextShift]( bootstrapServers: String, ): F[KafkaAdminAlgebra[F]] = Sync[F].delay { new KafkaAdminAlgebra[F] { override def describeTopic(name: TopicName): F[Option[Topic]] = { getAdminClientResource .use(_.describeTopics(name :: Nil)) .map(_.headOption.map(_._2).map { td => Topic(td.name(), td.partitions().size()) }) .recover { case _: UnknownTopicOrPartitionException => None } } override def getTopicNames: F[List[TopicName]] = getAdminClientResource.use(_.listTopics.names.map(_.toList)) override def createTopic(name: TopicName, d: TopicDetails): F[Unit] = { import scala.collection.JavaConverters._ val newTopic = new NewTopic(name, d.numPartitions, d.replicationFactor) .configs(d.configs.asJava) getAdminClientResource.use(_.createTopic(newTopic)) } override def deleteTopic(name: String): F[Unit] = getAdminClientResource.use(_.deleteTopic(name)) private def getAdminClientResource: Resource[F, KafkaAdminClient[F]] = { adminClientResource( AdminClientSettings.apply.withBootstrapServers(bootstrapServers) ) } } } def test[F[_]: Sync]: F[KafkaAdminAlgebra[F]] = Ref[F].of(Map[TopicName, Topic]()).flatMap(getTestKafkaClient[F]) private[this] def getTestKafkaClient[F[_]: Sync]( ref: Ref[F, Map[TopicName, Topic]] ): F[KafkaAdminAlgebra[F]] = Sync[F].delay { new KafkaAdminAlgebra[F] { override def describeTopic(name: TopicName): F[Option[Topic]] = ref.get.map(_.get(name)) override def getTopicNames: F[List[TopicName]] = ref.get.map(_.keys.toList) override def createTopic( name: TopicName, details: TopicDetails ): F[Unit] = { val entry = name -> Topic(name, details.numPartitions) ref.update(old => old + entry) } override def deleteTopic(name: String): F[Unit] = ref.update(_ - name) } } }
Example 54
Source File: fixtures.scala From sonar-scala with GNU Lesser General Public License v3.0 | 5 votes |
package com.mwz.sonar.scala import java.io.File import java.nio.file.{Files, Path} import cats.effect.IO import cats.effect.concurrent.Ref import com.mwz.sonar.scala.util.Logger trait WithFiles { def withFiles(paths: String*)(test: Seq[File] => Any): Unit = { val tmpDir: Path = Files.createTempDirectory("") val files: Seq[File] = paths.map(path => Files.createFile(tmpDir.resolve(path)).toFile) try test(files) finally { files.foreach(f => Files.deleteIfExists(f.toPath)) Files.deleteIfExists(tmpDir) } } } trait WithTracing { def withTracing(test: Ref[IO, List[String]] => Any): Unit = test(Ref.unsafe[IO, List[String]](List.empty)) } trait WithLogging { object LogLevel { sealed trait Level final case object Debug extends Level final case object Info extends Level final case object Warn extends Level final case object Error extends Level } def withLogging(test: (Ref[IO, List[(LogLevel.Level, String)]], Logger[IO]) => Any): Unit = { val logs = Ref.unsafe[IO, List[(LogLevel.Level, String)]](List.empty) val logger: Logger[IO] = new Logger[IO] { def debug(s: String): IO[Unit] = logs.update((LogLevel.Debug, s) :: _) def info(s: String): IO[Unit] = logs.update((LogLevel.Info, s) :: _) def warn(s: String): IO[Unit] = logs.update((LogLevel.Warn, s) :: _) def error(s: String): IO[Unit] = logs.update((LogLevel.Error, s) :: _) def error(s: String, e: Throwable): IO[Unit] = logs.update((LogLevel.Error, s) :: _) } test(logs, logger) } }
Example 55
Source File: MockingKeyStore.scala From iotchain with MIT License | 5 votes |
package jbok.core.keystore import cats.effect.Sync import cats.implicits._ import cats.effect.concurrent.Ref import jbok.core.models.Address import jbok.crypto.signature.{ECDSA, KeyPair, Signature} import scodec.bits.ByteVector final class MockingKeyStore[F[_]](implicit F: Sync[F]) extends KeyStore[F] { private val m: Ref[F, Map[Address, KeyPair]] = Ref.unsafe(Map.empty) override def newAccount(passphrase: String): F[Address] = for { kp <- Signature[ECDSA].generateKeyPair[F]() _ <- m.update(_ + (Address(kp) -> kp)) } yield Address(kp) override def readPassphrase(prompt: String): F[String] = ??? override def importPrivateKey(key: ByteVector, passphrase: String): F[Address] = for { secret <- KeyPair.Secret(key).pure[F] public <- Signature[ECDSA].generatePublicKey[F](secret) kp = KeyPair(public, secret) address = Address(kp) _ <- m.update(_ + (address -> kp)) } yield address override def listAccounts: F[List[Address]] = m.get.map(_.keys.toList) override def unlockAccount(address: Address, passphrase: String): F[Wallet] = m.get.map(_(address)).map { kp => Wallet(Address(kp), kp) } override def deleteAccount(address: Address): F[Boolean] = m.update(_ - address).as(true) override def changePassphrase(address: Address, oldPassphrase: String, newPassphrase: String): F[Boolean] = F.pure(true) } object MockingKeyStore { def withInitKeys[F[_]: Sync](initKeys: List[KeyPair]): F[MockingKeyStore[F]] = { val keystore = new MockingKeyStore[F]() initKeys.traverse(kp => keystore.importPrivateKey(kp.secret.bytes, "")).as(keystore) } }
Example 56
Source File: Peer.scala From iotchain with MIT License | 5 votes |
package jbok.core.peer import cats.effect.concurrent.Ref import cats.effect.{Concurrent, Sync} import cats.implicits._ import fs2.concurrent.Queue import jbok.core.messages.{SignedTransactions, Status} import jbok.network.Message import scodec.bits.ByteVector import jbok.codec.rlp.implicits._ import jbok.common.log.Logger import jbok.common.math.N import jbok.crypto._ final case class Peer[F[_]]( uri: PeerUri, queue: Queue[F, Message[F]], status: Ref[F, Status], knownBlocks: Ref[F, Set[ByteVector]], knownTxs: Ref[F, Set[ByteVector]] )(implicit F: Sync[F]) { import Peer._ private[this] val log = Logger[F] def hasBlock(blockHash: ByteVector): F[Boolean] = knownBlocks.get.map(_.contains(blockHash)) def hasTxs(stxs: SignedTransactions): F[Boolean] = knownTxs.get.map(_.contains(stxs.encoded.bytes.kec256)) def markBlock(blockHash: ByteVector, number: N): F[Unit] = knownBlocks.update(s => s.take(MaxKnownBlocks - 1) + blockHash) >> status.update(s => s.copy(bestNumber = s.bestNumber.max(number))) def markTxs(stxs: SignedTransactions): F[Unit] = knownTxs.update(known => known.take(MaxKnownTxs - 1) + stxs.encoded.bytes.kec256) def markStatus(newStatus: Status): F[Unit] = status.update(s => if (newStatus.td > s.td) s.copy(bestNumber = newStatus.bestNumber, td = newStatus.td) else s) } object Peer { val MaxKnownTxs = 32768 val MaxKnownBlocks = 1024 def apply[F[_]: Concurrent](uri: PeerUri, status: Status): F[Peer[F]] = for { queue <- Queue.circularBuffer[F, Message[F]](100000) status <- Ref.of[F, Status](status) knownBlocks <- Ref.of[F, Set[ByteVector]](Set.empty) knownTxs <- Ref.of[F, Set[ByteVector]](Set.empty) } yield Peer[F](uri, queue, status, knownBlocks, knownTxs) }
Example 57
Source File: PeerMessageHandler.scala From iotchain with MIT License | 5 votes |
package jbok.core.peer import cats.effect.Concurrent import cats.effect.concurrent.Ref import cats.implicits._ import fs2._ import jbok.codec.rlp.implicits._ import jbok.common.log.Logger import jbok.core.NodeStatus import jbok.core.messages.{BlockHash, NewBlock, NewBlockHashes, SignedTransactions, Status} import jbok.core.models.Block import jbok.core.peer.PeerSelector.PeerSelector import jbok.core.queue.{Consumer, Producer} import jbok.network.Request class PeerMessageHandler[F[_]]( txInbound: Producer[F, Peer[F], SignedTransactions], txOutbound: Consumer[F, PeerSelector[F], SignedTransactions], blockInbound: Producer[F, Peer[F], Block], blockOutbound: Consumer[F, PeerSelector[F], Block], statusInbound: Producer[F, Peer[F], Status], statusOutbound: Consumer[F, PeerSelector[F], Status], peerManager: PeerManager[F], status: Ref[F, NodeStatus] )(implicit F: Concurrent[F]) { private[this] val log = Logger[F] def onNewBlockHashes(peer: Peer[F], hashes: List[BlockHash]): F[Unit] = hashes.traverse_(hash => peer.markBlock(hash.hash, hash.number)) def onNewBlock(peer: Peer[F], block: Block): F[Unit] = status.get.flatMap { case NodeStatus.Done => blockInbound.produce(peer, block) case _ => F.unit } def onSignedTransactions(peer: Peer[F], stxs: SignedTransactions): F[Unit] = txInbound.produce(peer, stxs) def onStatus(peer: Peer[F], remoteStatus: Status):F[Unit] = for { localStatus <- peerManager.outgoing.localStatus _ <- if (!localStatus.isCompatible(remoteStatus)) { F.raiseError(Incompatible(localStatus, remoteStatus)) }else{ peer.markStatus(remoteStatus) } } yield () val consume: Stream[F, Unit] = peerManager.inbound.evalMap { case (peer, req @ Request(_, NewBlockHashes.name, _, _)) => for { hashes <- req.as[NewBlockHashes].map(_.hashes) _ <- onNewBlockHashes(peer, hashes) } yield () case (peer, req @ Request(_, NewBlock.name, _, _)) => for { block <- req.as[NewBlock].map(_.block) _ <- onNewBlock(peer, block) } yield () case (peer, req @ Request(_, SignedTransactions.name, _, _)) => for { stxs <- req.as[SignedTransactions] _ <- onSignedTransactions(peer, stxs) } yield () case (peer, req @ Request(_, Status.name, _, _)) => for { status <- req.as[Status] _ <- onStatus(peer, status) } yield () case _ => F.unit } val produce: Stream[F, Unit] = { Stream( blockOutbound.consume.map { case (selector, block) => selector -> Request.binary[F, NewBlock](NewBlock.name, NewBlock(block).encoded) }, txOutbound.consume.map { case (selector, tx) => selector -> Request.binary[F, SignedTransactions](SignedTransactions.name, tx.encoded) }, statusOutbound.consume.map { case (selector, st) => selector -> Request.binary[F, Status](Status.name, st.encoded) } ).parJoinUnbounded .through(peerManager.outbound) } val stream: Stream[F, Unit] = Stream.eval_(log.i(s"starting Core/PeerMessageHandler")) ++ consume merge produce }
Example 58
Source File: BaseManager.scala From iotchain with MIT License | 5 votes |
package jbok.core.peer import java.net.InetSocketAddress import cats.effect._ import cats.effect.concurrent.Ref import cats.implicits._ import fs2.io.tcp.Socket import jbok.codec.rlp.implicits._ import jbok.common.math.N import jbok.core.config.FullConfig import jbok.core.ledger.History import jbok.core.messages.Status import jbok.core.queue.Queue import jbok.network.tcp.implicits._ import jbok.network.{Message, Request} import scala.util.control.NoStackTrace final case class Incompatible(local: Status, remote: Status) extends NoStackTrace { override def toString: String = s"peer incompatible chainId:${local.chainId}/${remote.chainId} genesis:${local.genesisHash.toHex}/${remote.genesisHash.toHex}" } abstract class BaseManager[F[_]](config: FullConfig, history: History[F])(implicit F: Concurrent[F]) { def inbound: Queue[F, Peer[F], Message[F]] val connected: Ref[F, Map[PeerUri, (Peer[F], Socket[F])]] = Ref.unsafe(Map.empty) def isConnected(uri: PeerUri): F[Boolean] = connected.get.map(_.get(uri).isDefined) def close(uri: PeerUri): F[Unit] = connected.get.map(_.get(uri)).flatMap { case Some((_, socket)) => socket.endOfOutput >> socket.close case _ => F.unit } val localStatus: F[Status] = for { genesis <- history.genesisHeader number <- history.getBestBlockNumber td <- history.getTotalDifficultyByNumber(number).map(_.getOrElse(N(0))) } yield Status(history.chainId, genesis.hash, number, td, config.service.uri) def handshake(socket: Socket[F]): F[Peer[F]] = for { localStatus <- localStatus request = Request.binary[F, Status](Status.name, localStatus.encoded) _ <- socket.writeMessage(request) remoteStatus <- socket.readMessage.flatMap(_.as[Status]) remote <- socket.remoteAddress.map(_.asInstanceOf[InetSocketAddress]) peer <- if (!localStatus.isCompatible(remoteStatus)) { F.raiseError(Incompatible(localStatus, remoteStatus)) } else { Peer[F](PeerUri.fromTcpAddr(remote), remoteStatus) } } yield peer val seedDisconnects: F[List[PeerUri]] = config.peer.seedUris.filterA(uri => isConnected(uri).map(b => !b)) val seedConnects: F[List[PeerUri]] = config.peer.seedUris.filterA(uri => isConnected(uri)) }
Example 59
Source File: CoreNode.scala From iotchain with MIT License | 5 votes |
package jbok.core import cats.effect.concurrent.Ref import cats.effect.{ConcurrentEffect, Timer} import cats.implicits._ import fs2._ import jbok.common.log.Logger import jbok.core.config.FullConfig import jbok.core.ledger.{BlockExecutor, History} import jbok.core.mining.BlockMiner import jbok.core.peer.{PeerManager, PeerMessageHandler} import jbok.core.pool.TxPool import jbok.core.sync.SyncClient import scala.concurrent.duration._ final case class CoreNode[F[_]]( config: FullConfig, nodeStatus: Ref[F, NodeStatus], history: History[F], peerManager: PeerManager[F], executor: BlockExecutor[F], miner: BlockMiner[F], txPool: TxPool[F], handler: PeerMessageHandler[F], syncClient: SyncClient[F] )(implicit F: ConcurrentEffect[F], T: Timer[F]) { private val log = Logger[F] val logStatus: Stream[F, Unit] = Stream.eval { for { number <- history.getBestBlockNumber td <- history.getTotalDifficultyByNumber(number).map(_.getOrElse(BigInt(0))) status <- nodeStatus.get _ <- log.i(s"status=${status},bestNumber=${number},td=${td}") _ <- T.sleep(10.seconds) } yield () }.repeat val stream: Stream[F, Unit] = Stream( peerManager.stream, miner.stream, txPool.stream, executor.stream, handler.stream, syncClient.stream, syncClient.checkSeedConnect, syncClient.heartBeatStream, syncClient.statusStream, logStatus ).parJoinUnbounded .handleErrorWith(e => Stream.eval(log.e("CoreNode has an unhandled failure", e))) }
Example 60
Source File: MemoryKVStore.scala From iotchain with MIT License | 5 votes |
package jbok.persistent import cats.effect.Sync import cats.effect.concurrent.Ref import cats.implicits._ import fs2._ final class MemoryKVStore[F[_]](m: Ref[F, Map[ColumnFamily, Map[Seq[Byte], Array[Byte]]]])(implicit F: Sync[F]) extends KVStore[F] { override def put(cf: ColumnFamily, key: Array[Byte], value: Array[Byte]): F[Unit] = m.update(m => m.updated(cf, m.getOrElse(cf, Map.empty) + (key.toSeq -> value))) override def del(cf: ColumnFamily, key: Array[Byte]): F[Unit] = m.update(m => m.updated(cf, m.getOrElse(cf, Map.empty) - key)) override def writeBatch(cf: ColumnFamily, puts: List[(Array[Byte], Array[Byte])], dels: List[Array[Byte]]): F[Unit] = for { _ <- puts.traverse { case (key, value) => put(cf, key, value) } _ <- dels.traverse { key => del(cf, key) } } yield () override def writeBatch(cf: ColumnFamily, ops: List[(Array[Byte], Option[Array[Byte]])]): F[Unit] = ops.traverse_ { case (key, Some(value)) => put(cf, key, value) case (key, None) => del(cf, key) } override def writeBatch(puts: List[Put], dels: List[Del]): F[Unit] = for { _ <- puts.traverse { case (cf, key, value) => put(cf, key, value) } _ <- dels.traverse { case (cf, key) => del(cf, key) } } yield () override def get(cf: ColumnFamily, key: Array[Byte]): F[Option[Array[Byte]]] = m.get.map(_.get(cf).flatMap(_.get(key))) override def toStream(cf: ColumnFamily): Stream[F, (Array[Byte], Array[Byte])] = Stream.eval(toList(cf)).flatMap(Stream.emits) override def toList(cf: ColumnFamily): F[List[(Array[Byte], Array[Byte])]] = toMap(cf).map(_.toList) override def toMap(cf: ColumnFamily): F[Map[Array[Byte], Array[Byte]]] = m.get.map(_.getOrElse(cf, Map.empty).map { case (k, v) => k.toArray -> v }) override def size(cf: ColumnFamily): F[Int] = m.get.map(_.get(cf).map(_.size).getOrElse(0)) } object MemoryKVStore { def apply[F[_]](implicit F: Sync[F]): F[KVStore[F]] = Ref.of[F, Map[ColumnFamily, Map[Seq[Byte], Array[Byte]]]](Map.empty).map(ref => new MemoryKVStore[F](ref)) }
Example 61
Source File: Subscriber.scala From redis4cats with Apache License 2.0 | 5 votes |
package dev.profunktor.redis4cats package pubsub import cats.effect._ import cats.effect.concurrent.Ref import cats.effect.implicits._ import cats.syntax.all._ import dev.profunktor.redis4cats.pubsub.internals.{ PubSubInternals, PubSubState } import dev.profunktor.redis4cats.data.RedisChannel import dev.profunktor.redis4cats.effect.{ JRFuture, Log } import fs2.Stream import io.lettuce.core.pubsub.StatefulRedisPubSubConnection class Subscriber[F[_]: ConcurrentEffect: ContextShift: Log, K, V]( state: Ref[F, PubSubState[F, K, V]], subConnection: StatefulRedisPubSubConnection[K, V], blocker: Blocker ) extends SubscribeCommands[Stream[F, *], K, V] { override def subscribe(channel: RedisChannel[K]): Stream[F, V] = Stream .eval( state.get.flatMap { st => PubSubInternals[F, K, V](state, subConnection).apply(channel)(st) <* JRFuture(F.delay(subConnection.async().subscribe(channel.underlying)))(blocker) } ) .flatMap(_.subscribe(500).unNone) override def unsubscribe(channel: RedisChannel[K]): Stream[F, Unit] = Stream.eval { JRFuture(F.delay(subConnection.async().unsubscribe(channel.underlying)))(blocker).void .guarantee(state.get.flatMap { st => st.get(channel.underlying).fold(().pure)(_.publish1(none[V])) *> state.update(_ - channel.underlying) }) } }
Example 62
Source File: CliModule.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.cli import cats.effect.concurrent.Ref import cats.effect.{ConcurrentEffect, Timer} import cats.implicits._ import ch.epfl.bluebrain.nexus.cli.clients._ import ch.epfl.bluebrain.nexus.cli.config.AppConfig import ch.epfl.bluebrain.nexus.cli.sse.{OrgLabel, OrgUuid, ProjectLabel, ProjectUuid} import distage.{ModuleDef, TagK} import izumi.distage.model.definition.StandardAxis.Repo import org.http4s.client.Client import org.http4s.client.blaze.BlazeClientBuilder import scala.concurrent.ExecutionContext import scala.concurrent.duration.Duration final class CliModule[F[_]: ConcurrentEffect: Timer: TagK] extends ModuleDef { make[Console[F]].tagged(Repo.Prod).from(Console[F]) make[Client[F]].tagged(Repo.Prod).fromResource { BlazeClientBuilder[F](ExecutionContext.global).withIdleTimeout(Duration.Inf).resource } make[ProjectClient[F]].tagged(Repo.Prod).fromEffect { (cfg: AppConfig, client: Client[F], console: Console[F]) => Ref.of[F, Map[(OrgUuid, ProjectUuid), (OrgLabel, ProjectLabel)]](Map.empty).map { cache => ProjectClient(client, cfg.env, cache, console) } } make[SparqlClient[F]].tagged(Repo.Prod).from { (cfg: AppConfig, client: Client[F], console: Console[F]) => SparqlClient(client, cfg.env, console) } make[EventStreamClient[F]].tagged(Repo.Prod).from { (cfg: AppConfig, client: Client[F], pc: ProjectClient[F]) => EventStreamClient(client, pc, cfg.env) } make[InfluxClient[F]].tagged(Repo.Prod).from { (cfg: AppConfig, client: Client[F], console: Console[F]) => InfluxClient(client, cfg, console) } } object CliModule { final def apply[F[_]: ConcurrentEffect: Timer: TagK]: CliModule[F] = new CliModule[F] }
Example 63
Source File: Fixture.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.sourcing.projections import akka.persistence.journal.{Tagged, WriteEventAdapter} import cats.effect.{IO, Sync} import cats.effect.concurrent.Ref import scala.annotation.nowarn object Fixture { sealed trait Event final case object Executed extends Event final case object OtherExecuted extends Event final case object AnotherExecuted extends Event final case object YetAnotherExecuted extends Event final case object RetryExecuted extends Event final case object IgnoreExecuted extends Event final case object NotDiscarded extends Event final case object Discarded extends Event sealed trait EventTransform final case object ExecutedTransform extends EventTransform final case object OtherExecutedTransform extends EventTransform final case object AnotherExecutedTransform extends EventTransform final case object YetAnotherExecutedTransform extends EventTransform final case object RetryExecutedTransform extends EventTransform final case object IgnoreExecutedTransform extends EventTransform final case object NotDiscardedTransform extends EventTransform final case object DiscardedTransform extends EventTransform sealed trait Cmd final case object Execute extends Cmd final case object ExecuteOther extends Cmd final case object ExecuteAnother extends Cmd final case object ExecuteYetAnother extends Cmd final case object ExecuteRetry extends Cmd final case object ExecuteIgnore extends Cmd sealed trait State final case object Perpetual extends State sealed trait Rejection final case object Reject extends Rejection class TaggingAdapter extends WriteEventAdapter { override def manifest(event: Any): String = "" override def toJournal(event: Any): Any = event match { case Executed => Tagged(event, Set("executed")) case OtherExecuted => Tagged(event, Set("other")) case AnotherExecuted => Tagged(event, Set("another")) case YetAnotherExecuted => Tagged(event, Set("yetanother")) case RetryExecuted => Tagged(event, Set("retry")) case IgnoreExecuted => Tagged(event, Set("ignore")) case NotDiscarded => Tagged(event, Set("discard")) case Discarded => Tagged(event, Set("discard")) } } val initial: State = Perpetual @nowarn("cat=unused") def next(state: State, event: Event): State = Perpetual @nowarn("cat=unused") def eval(state: State, cmd: Cmd): IO[Either[Rejection, Event]] = cmd match { case Execute => IO.pure(Right(Executed)) case ExecuteOther => IO.pure(Right(OtherExecuted)) case ExecuteAnother => IO.pure(Right(AnotherExecuted)) case ExecuteYetAnother => IO.pure(Right(YetAnotherExecuted)) case ExecuteRetry => IO.pure(Right(RetryExecuted)) case ExecuteIgnore => IO.pure(Right(IgnoreExecuted)) } def memoize[F[_], A](fa: F[A])(implicit F: Sync[F]): F[F[A]] = { import cats.implicits._ for { ref <- Ref[F].of(fa.attempt) _ <- ref.update(_.flatTap(a => ref.set(a.pure[F]))) } yield ref.get.flatten.rethrow } }
Example 64
Source File: MemUserRepository.scala From http4s-tracer with Apache License 2.0 | 5 votes |
package dev.profunktor.tracer.repository.interpreter import cats.effect.Sync import cats.effect.concurrent.Ref import cats.syntax.all._ import dev.profunktor.tracer.model.user.{User, Username} import dev.profunktor.tracer.repository.algebra.UserRepository object MemUserRepository { def create[F[_]: Sync]: F[UserRepository[F]] = Ref.of[F, Map[Username, User]](Map.empty).map(new MemUserRepository[F](_)) } class MemUserRepository[F[_]: Sync] private ( state: Ref[F, Map[Username, User]] ) extends UserRepository[F] { def find(username: Username): F[Option[User]] = state.get.map(_.get(username)) def persist(user: User): F[Unit] = state.update(_.updated(user.username, user)) }
Example 65
Source File: ConsoleSpec.scala From console4cats with Apache License 2.0 | 5 votes |
package cats.effect import cats._ import cats.data._ import cats.effect.concurrent.Ref import cats.effect.test.TestConsole import cats.implicits._ import munit.FunSuite class ConsoleSpec extends FunSuite { def program[F[_]: Console: FlatMap]: F[List[String]] = for { _ <- F.putStrLn("a") _ <- F.putStrLn(true) _ <- F.putStr(123) _ <- F.putStr("b") rd1 <- F.readLn _ <- F.putError(rd1) _ <- F.putError(1.5) rd2 <- F.readLn rd3 <- F.readLn } yield List(rd1, rd2, rd3) override def munitValueTransforms = super.munitValueTransforms :+ new ValueTransform("IO", { case ioa: IO[_] => ioa.unsafeToFuture }) test("Console") { for { out1 <- Ref[IO].of(Chain.empty[String]) out2 <- Ref[IO].of(Chain.empty[String]) out3 <- Ref[IO].of(Chain.empty[String]) in1 <- TestConsole.inputs .sequenceAndDefault[IO](Chain("foo", "bar", "baz"), "") implicit0(console: Console[IO]) <- TestConsole.make(out1, out2, out3, in1) rs <- program[IO] rs1 <- out1.get rs2 <- out2.get rs3 <- out3.get } yield { assert(rs == List("foo", "bar", "baz")) assert(rs1.mkString_("", ",", "") == "a,true") assert(rs2.mkString_("", ",", "") == "123,b") assert(rs3.mkString_("", ",", "") == "foo,1.5") } } test("mapK") { type E[A] = EitherT[IO, String, A] for { out1 <- Ref[IO].of(Chain.empty[String]) out2 <- Ref[IO].of(Chain.empty[String]) out3 <- Ref[IO].of(Chain.empty[String]) in1 <- TestConsole.inputs .sequenceAndDefault[IO](Chain("foo"), "undefined") implicit0(console: Console[E]) <- TestConsole .make(out1, out2, out3, in1) .map(_.mapK(EitherT.liftK[IO, String])) rs <- program[E].value rs1 <- out1.get rs2 <- out2.get rs3 <- out3.get } yield { assert(rs.getOrElse(fail("Either.Left")) == List("foo", "undefined", "undefined")) assert(rs1.mkString_("", ",", "") == "a,true") assert(rs2.mkString_("", ",", "") == "123,b") assert(rs3.mkString_("", ",", "") == "foo,1.5") } } // Monad Transformer instances def instances[F[_]: Applicative: Console] = { Console[OptionT[F, *]] Console[EitherT[F, String, *]] Console[IorT[F, String, *]] Console[Kleisli[F, String, *]] Console[ReaderWriterStateT[F, String, Int, Boolean, *]] Console[StateT[F, Int, *]] Console[WriterT[F, String, *]] } }
Example 66
Source File: TestConsole.scala From console4cats with Apache License 2.0 | 5 votes |
package cats.effect.test import cats._ import cats.data.Chain import cats.effect.concurrent.Ref import cats.effect.{ Console, Sync } import cats.syntax.functor._ import cats.syntax.show._ private class TestConsole[F[_]: Applicative]( outLines: Ref[F, Chain[String]], outWords: Ref[F, Chain[String]], outErrors: Ref[F, Chain[String]], val readLn: F[String] ) extends Console[F] { override def putStrLn[A: Show](a: A): F[Unit] = outLines.update(_.append(a.show)) override def putStr[A: Show](a: A): F[Unit] = outWords.update(_.append(a.show)) override def putError[A: Show](a: A): F[Unit] = outErrors.update(_.append(a.show)) } object TestConsole { def sequenceAndDefault[F[_]: Sync]( inputs: Chain[String], default: String ): F[F[String]] = Ref[F].of(inputs).map { _.modify { _.uncons match { case Some((head, tail)) => (tail, head) case None => (Chain.nil, default) } } } } }
Example 67
Source File: JoexServer.scala From docspell with GNU General Public License v3.0 | 5 votes |
package docspell.joex import cats.effect._ import cats.effect.concurrent.Ref import fs2.Stream import fs2.concurrent.SignallingRef import docspell.common.Pools import docspell.joex.routes._ import org.http4s.HttpApp import org.http4s.implicits._ import org.http4s.server.Router import org.http4s.server.blaze.BlazeServerBuilder import org.http4s.server.middleware.Logger object JoexServer { private case class App[F[_]]( httpApp: HttpApp[F], termSig: SignallingRef[F, Boolean], exitRef: Ref[F, ExitCode] ) def stream[F[_]: ConcurrentEffect: ContextShift]( cfg: Config, pools: Pools )(implicit T: Timer[F]): Stream[F, Nothing] = { val app = for { signal <- Resource.liftF(SignallingRef[F, Boolean](false)) exitCode <- Resource.liftF(Ref[F].of(ExitCode.Success)) joexApp <- JoexAppImpl .create[F](cfg, signal, pools.connectEC, pools.httpClientEC, pools.blocker) httpApp = Router( "/api/info" -> InfoRoutes(), "/api/v1" -> JoexRoutes(joexApp) ).orNotFound // With Middlewares in place finalHttpApp = Logger.httpApp(false, false)(httpApp) } yield App(finalHttpApp, signal, exitCode) Stream .resource(app) .flatMap(app => BlazeServerBuilder[F](pools.restEC) .bindHttp(cfg.bind.port, cfg.bind.address) .withHttpApp(app.httpApp) .withoutBanner .serveWhile(app.termSig, app.exitRef) ) }.drain }
Example 68
Source File: ResilientStreamSpec.scala From fs2-rabbit with Apache License 2.0 | 5 votes |
package dev.profunktor.fs2rabbit.resiliency import cats.effect.IO import cats.effect.concurrent.Ref import cats.implicits._ import dev.profunktor.fs2rabbit.BaseSpec import fs2._ import scala.concurrent.duration._ import org.scalatest.compatible.Assertion class ResilientStreamSpec extends BaseSpec { private val sink: Pipe[IO, Int, Unit] = _.evalMap(putStrLn) val emptyAssertion: Assertion = true shouldBe true it should "run a stream until it's finished" in { val program = Stream(1, 2, 3).covary[IO].through(sink) ResilientStream.run(program).as(emptyAssertion).unsafeToFuture } it should "run a stream and recover in case of failure" in { val errorProgram = Stream.raiseError[IO](new Exception("on purpose")).through(sink) def p(ref: Ref[IO, Int]): Stream[IO, Unit] = errorProgram.handleErrorWith { t => Stream.eval(ref.get) flatMap { n => if (n == 0) Stream.eval(IO.unit) else Stream.eval(ref.update(_ - 1) *> IO.raiseError(t)) } } Ref.of[IO, Int](2).flatMap(r => ResilientStream.run(p(r), 1.second)).as(emptyAssertion).unsafeToFuture } }
Example 69
Source File: Fs2Streaming.scala From redis4cats with Apache License 2.0 | 5 votes |
package dev.profunktor.redis4cats package streams import cats.effect._ import cats.effect.concurrent.Ref import cats.implicits._ import dev.profunktor.redis4cats.connection._ import dev.profunktor.redis4cats.data._ import dev.profunktor.redis4cats.effect.{ JRFuture, Log } import dev.profunktor.redis4cats.effect.JRFuture._ import dev.profunktor.redis4cats.streams.data._ import fs2.Stream import io.lettuce.core.{ ReadFrom => JReadFrom } object RedisStream { def mkStreamingConnection[F[_]: Concurrent: ContextShift: Log, K, V]( client: RedisClient, codec: RedisCodec[K, V] ): Stream[F, Streaming[Stream[F, *], K, V]] = Stream.resource(mkBlocker[F]).flatMap { blocker => val acquire = JRFuture .fromConnectionFuture(F.delay(client.underlying.connectAsync[K, V](codec.underlying, client.uri.underlying)))( blocker ) .map(new RedisRawStreaming(_, blocker)) val release: RedisRawStreaming[F, K, V] => F[Unit] = c => JRFuture.fromCompletableFuture(F.delay(c.client.closeAsync()))(blocker) *> F.info(s"Releasing Streaming connection: ${client.uri.underlying}") Stream.bracket(acquire)(release).map(rs => new RedisStream(rs)) } def mkMasterReplicaConnection[F[_]: Concurrent: ContextShift: Log, K, V]( codec: RedisCodec[K, V], uris: RedisURI* )(readFrom: Option[JReadFrom] = None): Stream[F, Streaming[Stream[F, *], K, V]] = Stream.resource(mkBlocker[F]).flatMap { blocker => Stream.resource(RedisMasterReplica[F].make(codec, uris: _*)(readFrom)).map { conn => new RedisStream(new RedisRawStreaming(conn.underlying, blocker)) } } } class RedisStream[F[_]: Concurrent, K, V](rawStreaming: RedisRawStreaming[F, K, V]) extends Streaming[Stream[F, *], K, V] { private[streams] val nextOffset: K => StreamingMessageWithId[K, V] => StreamingOffset[K] = key => msg => StreamingOffset.Custom(key, (msg.id.value.dropRight(2).toLong + 1).toString) private[streams] val offsetsByKey: List[StreamingMessageWithId[K, V]] => Map[K, Option[StreamingOffset[K]]] = list => list.groupBy(_.key).map { case (k, values) => k -> values.lastOption.map(nextOffset(k)) } override def append: Stream[F, StreamingMessage[K, V]] => Stream[F, Unit] = _.evalMap(msg => rawStreaming.xAdd(msg.key, msg.body).void) override def read(keys: Set[K], initialOffset: K => StreamingOffset[K]): Stream[F, StreamingMessageWithId[K, V]] = { val initial = keys.map(k => k -> initialOffset(k)).toMap Stream.eval(Ref.of[F, Map[K, StreamingOffset[K]]](initial)).flatMap { ref => (for { offsets <- Stream.eval(ref.get) list <- Stream.eval(rawStreaming.xRead(offsets.values.toSet)) newOffsets = offsetsByKey(list).collect { case (key, Some(value)) => key -> value }.toList _ <- Stream.eval(newOffsets.map { case (k, v) => ref.update(_.updated(k, v)) }.sequence) result <- Stream.fromIterator(list.iterator) } yield result).repeat } } }
Example 70
Source File: ProjectClient.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.cli.clients import cats.effect.concurrent.Ref import cats.effect.{Sync, Timer} import cats.implicits._ import ch.epfl.bluebrain.nexus.cli.config.EnvConfig import ch.epfl.bluebrain.nexus.cli.sse.{OrgLabel, OrgUuid, ProjectLabel, ProjectUuid} import ch.epfl.bluebrain.nexus.cli.{ClientErrOr, Console} import io.circe.Decoder import io.circe.generic.semiauto.deriveDecoder import org.http4s.client.Client import org.http4s.{Headers, Request} trait ProjectClient[F[_]] { final def apply[F[_]: Sync: Timer]( client: Client[F], env: EnvConfig, cache: Ref[F, Map[(OrgUuid, ProjectUuid), (OrgLabel, ProjectLabel)]], console: Console[F] ): ProjectClient[F] = { implicit val c: Console[F] = console new LiveProjectClient[F](client, env, cache) } private class LiveProjectClient[F[_]: Timer: Console: Sync]( client: Client[F], env: EnvConfig, cache: Ref[F, Map[(OrgUuid, ProjectUuid), (OrgLabel, ProjectLabel)]] ) extends AbstractHttpClient[F](client, env) with ProjectClient[F] { override def labels(org: OrgUuid, proj: ProjectUuid): F[ClientErrOr[(OrgLabel, ProjectLabel)]] = cache.get.flatMap { map => map.get((org, proj)) match { // value in cache, return case Some(value) => F.pure(Right(value)) // value not in cache, fetch, update and return case None => get(org, proj).flatMap { // propagate error case l @ Left(_) => F.pure(l) // success, update cache and return case r @ Right(value) => cache.modify(m => (m.updated((org, proj), value), value)) *> F.pure(r) } } } private def get(org: OrgUuid, proj: ProjectUuid): F[ClientErrOr[(OrgLabel, ProjectLabel)]] = { val uri = env.project(org, proj) val req = Request[F](uri = uri, headers = Headers(env.authorizationHeader.toList)) executeParse[NexusAPIProject](req).map { case Right(NexusAPIProject(orgLabel, projectLabel)) => Right((orgLabel, projectLabel)) case Left(err) => Left(err) } } } final private[ProjectClient] case class NexusAPIProject(`_organizationLabel`: OrgLabel, `_label`: ProjectLabel) private[ProjectClient] object NexusAPIProject { implicit val nexusAPIProjectDecoder: Decoder[NexusAPIProject] = deriveDecoder[NexusAPIProject] } }
Example 71
Source File: PubSub.scala From redis4cats with Apache License 2.0 | 5 votes |
package dev.profunktor.redis4cats package pubsub import cats.effect._ import cats.effect.concurrent.Ref import cats.syntax.all._ import dev.profunktor.redis4cats.data._ import dev.profunktor.redis4cats.connection.RedisClient import dev.profunktor.redis4cats.effect.{ JRFuture, Log } import dev.profunktor.redis4cats.effect.JRFuture._ import fs2.Stream import fs2.concurrent.Topic import io.lettuce.core.pubsub.StatefulRedisPubSubConnection object PubSub { private[redis4cats] def acquireAndRelease[F[_]: ConcurrentEffect: ContextShift: Log, K, V]( client: RedisClient, codec: RedisCodec[K, V], blocker: Blocker ): (F[StatefulRedisPubSubConnection[K, V]], StatefulRedisPubSubConnection[K, V] => F[Unit]) = { val acquire: F[StatefulRedisPubSubConnection[K, V]] = JRFuture.fromConnectionFuture( F.delay(client.underlying.connectPubSubAsync(codec.underlying, client.uri.underlying)) )(blocker) val release: StatefulRedisPubSubConnection[K, V] => F[Unit] = c => JRFuture.fromCompletableFuture(F.delay(c.closeAsync()))(blocker) *> F.info(s"Releasing PubSub connection: ${client.uri.underlying}") (acquire, release) } def mkSubscriberConnection[F[_]: ConcurrentEffect: ContextShift: Log, K, V]( client: RedisClient, codec: RedisCodec[K, V] ): Stream[F, SubscribeCommands[Stream[F, *], K, V]] = Stream.resource(mkBlocker[F]).flatMap { blocker => val (acquire, release) = acquireAndRelease[F, K, V](client, codec, blocker) Stream.eval(Ref.of(Map.empty[K, Topic[F, Option[V]]])).flatMap { st => Stream.bracket(acquire)(release).map(new Subscriber(st, _, blocker)) } } }
Example 72
Source File: PubSubInternals.scala From redis4cats with Apache License 2.0 | 5 votes |
package dev.profunktor.redis4cats.pubsub.internals import cats.effect.ConcurrentEffect import cats.effect.concurrent.Ref import cats.effect.syntax.effect._ import cats.syntax.all._ import dev.profunktor.redis4cats.data.RedisChannel import dev.profunktor.redis4cats.effect.Log import fs2.concurrent.Topic import io.lettuce.core.pubsub.{ RedisPubSubListener, StatefulRedisPubSubConnection } object PubSubInternals { private[redis4cats] def defaultListener[F[_]: ConcurrentEffect, K, V]( channel: RedisChannel[K], topic: Topic[F, Option[V]] ): RedisPubSubListener[K, V] = new RedisPubSubListener[K, V] { override def message(ch: K, msg: V): Unit = if (ch == channel.underlying) { topic.publish1(Option(msg)).toIO.unsafeRunAsync(_ => ()) } override def message(pattern: K, channel: K, message: V): Unit = this.message(channel, message) override def psubscribed(pattern: K, count: Long): Unit = () override def subscribed(channel: K, count: Long): Unit = () override def unsubscribed(channel: K, count: Long): Unit = () override def punsubscribed(pattern: K, count: Long): Unit = () } private[redis4cats] def apply[F[_]: ConcurrentEffect: Log, K, V]( state: Ref[F, PubSubState[F, K, V]], subConnection: StatefulRedisPubSubConnection[K, V] ): GetOrCreateTopicListener[F, K, V] = { channel => st => st.get(channel.underlying) .fold { Topic[F, Option[V]](None).flatTap { topic => val listener = defaultListener(channel, topic) F.info(s"Creating listener for channel: $channel") *> F.delay(subConnection.addListener(listener)) *> state.update(_.updated(channel.underlying, topic)) } }(F.pure) } }
Example 73
Source File: LivePubSubCommands.scala From redis4cats with Apache License 2.0 | 5 votes |
package dev.profunktor.redis4cats package pubsub import cats.effect._ import cats.effect.concurrent.Ref import cats.syntax.all._ import dev.profunktor.redis4cats.data.RedisChannel import dev.profunktor.redis4cats.pubsub.data.Subscription import dev.profunktor.redis4cats.pubsub.internals.{ PubSubInternals, PubSubState } import dev.profunktor.redis4cats.effect.{ JRFuture, Log } import fs2.Stream import io.lettuce.core.pubsub.StatefulRedisPubSubConnection class LivePubSubCommands[F[_]: ConcurrentEffect: ContextShift: Log, K, V]( state: Ref[F, PubSubState[F, K, V]], subConnection: StatefulRedisPubSubConnection[K, V], pubConnection: StatefulRedisPubSubConnection[K, V], blocker: Blocker ) extends PubSubCommands[Stream[F, *], K, V] { private[redis4cats] val subCommands: SubscribeCommands[Stream[F, *], K, V] = new Subscriber[F, K, V](state, subConnection, blocker) private[redis4cats] val pubSubStats: PubSubStats[Stream[F, *], K] = new LivePubSubStats(pubConnection, blocker) override def subscribe(channel: RedisChannel[K]): Stream[F, V] = subCommands.subscribe(channel) override def unsubscribe(channel: RedisChannel[K]): Stream[F, Unit] = subCommands.unsubscribe(channel) override def publish(channel: RedisChannel[K]): Stream[F, V] => Stream[F, Unit] = _.evalMap { message => state.get.flatMap { st => PubSubInternals[F, K, V](state, subConnection).apply(channel)(st) *> JRFuture(F.delay(pubConnection.async().publish(channel.underlying, message)))(blocker) }.void } override def pubSubChannels: Stream[F, List[K]] = pubSubStats.pubSubChannels override def pubSubSubscriptions(channel: RedisChannel[K]): Stream[F, Subscription[K]] = pubSubStats.pubSubSubscriptions(channel) override def pubSubSubscriptions(channels: List[RedisChannel[K]]): Stream[F, List[Subscription[K]]] = pubSubStats.pubSubSubscriptions(channels) }
Example 74
Source File: Fs2UnaryServerCallListener.scala From fs2-grpc with MIT License | 5 votes |
package org.lyranthe.fs2_grpc package java_runtime package server import cats.effect.{ConcurrentEffect, Effect} import cats.effect.concurrent.{Deferred, Ref} import cats.syntax.all._ import io.grpc._ class Fs2UnaryServerCallListener[F[_], Request, Response] private ( request: Ref[F, Option[Request]], isComplete: Deferred[F, Unit], val isCancelled: Deferred[F, Unit], val call: Fs2ServerCall[F, Request, Response] )(implicit F: Effect[F]) extends ServerCall.Listener[Request] with Fs2ServerCallListener[F, F, Request, Response] { import Fs2UnaryServerCallListener._ override def onCancel(): Unit = { isCancelled.complete(()).unsafeRun() } override def onMessage(message: Request): Unit = { request.access .flatMap[Unit] { case (curValue, modify) => if (curValue.isDefined) F.raiseError(statusException(TooManyRequests)) else modify(message.some).void } .unsafeRun() } override def onHalfClose(): Unit = isComplete.complete(()).unsafeRun() override def source: F[Request] = for { _ <- isComplete.get valueOrNone <- request.get value <- valueOrNone.fold[F[Request]](F.raiseError(statusException(NoMessage)))(F.pure) } yield value } object Fs2UnaryServerCallListener { val TooManyRequests: String = "Too many requests" val NoMessage: String = "No message for unary call" private val statusException: String => StatusRuntimeException = msg => new StatusRuntimeException(Status.INTERNAL.withDescription(msg)) class PartialFs2UnaryServerCallListener[F[_]](val dummy: Boolean = false) extends AnyVal { def apply[Request, Response]( call: ServerCall[Request, Response], options: ServerCallOptions = ServerCallOptions.default )(implicit F: ConcurrentEffect[F] ): F[Fs2UnaryServerCallListener[F, Request, Response]] = for { request <- Ref.of[F, Option[Request]](none) isComplete <- Deferred[F, Unit] isCancelled <- Deferred[F, Unit] serverCall <- Fs2ServerCall[F, Request, Response](call, options) } yield new Fs2UnaryServerCallListener[F, Request, Response](request, isComplete, isCancelled, serverCall) } def apply[F[_]] = new PartialFs2UnaryServerCallListener[F] }
Example 75
Source File: Fs2UnaryClientCallListener.scala From fs2-grpc with MIT License | 5 votes |
package org.lyranthe.fs2_grpc package java_runtime package client import cats.effect._ import cats.effect.concurrent.{Deferred, Ref} import cats.implicits._ import io.grpc._ class Fs2UnaryClientCallListener[F[_], Response](grpcStatus: Deferred[F, GrpcStatus], value: Ref[F, Option[Response]])( implicit F: Effect[F] ) extends ClientCall.Listener[Response] { override def onClose(status: Status, trailers: Metadata): Unit = grpcStatus.complete(GrpcStatus(status, trailers)).unsafeRun() override def onMessage(message: Response): Unit = value.set(message.some).unsafeRun() def getValue: F[Response] = { for { r <- grpcStatus.get v <- value.get result <- { if (!r.status.isOk) F.raiseError(r.status.asRuntimeException(r.trailers)) else { v match { case None => F.raiseError( Status.INTERNAL .withDescription("No value received for unary call") .asRuntimeException(r.trailers) ) case Some(v1) => F.pure(v1) } } } } yield result } } object Fs2UnaryClientCallListener { def apply[F[_]: ConcurrentEffect, Response]: F[Fs2UnaryClientCallListener[F, Response]] = { (Deferred[F, GrpcStatus], Ref.of[F, Option[Response]](none)).mapN((response, value) => new Fs2UnaryClientCallListener[F, Response](response, value) ) } }
Example 76
Source File: StartResourceSpec.scala From kafka-journal with MIT License | 5 votes |
package com.evolutiongaming.kafka.journal.util import cats.effect._ import cats.effect.concurrent.{Deferred, Ref} import cats.implicits._ import com.evolutiongaming.kafka.journal.IOSuite._ import org.scalatest.funsuite.AsyncFunSuite import org.scalatest.matchers.should.Matchers class StartResourceSpec extends AsyncFunSuite with Matchers { test("StartResource") { val result = for { deferred <- Deferred[IO, Unit] ref <- Ref.of[IO, Boolean](false) res = Resource.make(IO.unit)(_ => ref.set(true)) fiber <- StartResource(res)(_ => deferred.complete(()) *> IO.never.as(())) _ <- deferred.get _ <- fiber.cancel result <- ref.get } yield { result shouldEqual true } result.run() } }
Example 77
Source File: ResourceRefTest.scala From kafka-journal with MIT License | 5 votes |
package com.evolutiongaming.kafka.journal.util import cats.effect._ import cats.effect.concurrent.Ref import cats.implicits._ import com.evolutiongaming.kafka.journal.IOSuite._ import org.scalatest.funsuite.AsyncFunSuite import org.scalatest.matchers.should.Matchers class ResourceRefTest extends AsyncFunSuite with Matchers { test("ResourceRef") { def resourceOf[A](a: A, ref: Ref[IO, Boolean]) = { Resource.make { ref.set(true).as(a) } { _ => ref.set(false) } } val result = for { ref0 <- Ref[IO].of(false) ref1 <- Ref[IO].of(false) ref <- ResourceRef.of(resourceOf(0, ref0)).use { ref => for { a <- ref.get _ = a shouldEqual 0 a <- ref0.get _ = a shouldEqual true _ <- ref.set(resourceOf(1, ref1)) a <- ref.get _ = a shouldEqual 1 a <- ref0.get _ = a shouldEqual false a <- ref1.get _ = a shouldEqual true } yield ref } a <- ref1.get _ = a shouldEqual false _ <- ().pure[IO] a <- ref.get.attempt _ = a shouldEqual ResourceReleasedError.asLeft } yield {} result.run() } }
Example 78
Source File: GracefulFiberSpec.scala From kafka-journal with MIT License | 5 votes |
package com.evolutiongaming.kafka.journal.util import cats.effect._ import cats.effect.concurrent.{Deferred, Ref} import cats.implicits._ import com.evolutiongaming.kafka.journal.IOSuite._ import org.scalatest.funsuite.AsyncFunSuite import org.scalatest.matchers.should.Matchers class GracefulFiberSpec extends AsyncFunSuite with Matchers { test("GracefulFiber") { val result = for { deferred <- Deferred[IO, Unit] ref <- Ref.of[IO, Boolean](false) fiber <- GracefulFiber[IO].apply { cancel => Concurrent[IO].start[Unit] { val loop = for { cancel <- cancel _ <- ref.set(cancel) } yield { if (cancel) ().some else none } for { _ <- deferred.complete(()) _ <- loop.untilDefinedM } yield {} } } _ <- fiber.cancel result <- ref.get } yield { result shouldEqual true } result.run() } }
Example 79
Source File: ResourceRegistrySpec.scala From kafka-journal with MIT License | 5 votes |
package com.evolutiongaming.kafka.journal.util import cats.effect._ import cats.effect.concurrent.{Deferred, Ref} import cats.implicits._ import cats.{Applicative, Foldable} import com.evolutiongaming.kafka.journal.IOSuite._ import org.scalatest.funsuite.AsyncFunSuite import org.scalatest.matchers.should.Matchers import scala.util.control.NoStackTrace class ResourceRegistrySpec extends AsyncFunSuite with Matchers { val error: Throwable = new RuntimeException with NoStackTrace for { exitCase <- List( ExitCase.complete, ExitCase.error(error), ExitCase.canceled) } yield { test(s"ResRegistry releases resources, exitCase: $exitCase") { val result = exitCase match { case ExitCase.Completed => testError(none) case ExitCase.Canceled => testCancel case ExitCase.Error(error) => testError(error.some) } result.run() } } private def testError(error: Option[Throwable]) = { val n = 3 def logic(release: IO[Unit]) = { ResourceRegistry.of[IO].use { registry => val resource = Resource.make(().pure[IO]) { _ => release } val fa = registry.allocate(resource) implicit val monoidUnit = Applicative.monoid[IO, Unit] for { _ <- Foldable[List].fold(List.fill(n)(fa)) _ <- error.fold(().pure[IO])(_.raiseError[IO, Unit]) } yield {} } } for { ref <- Ref.of[IO, Int](0) fa = logic(ref.update(_ + 1)) result <- fa.redeem(_.some, _ => none) releases <- ref.get } yield { result shouldEqual result releases shouldEqual n } } private def testCancel = { for { released <- Ref.of[IO, Int](0) started <- Deferred[IO, Unit] fiber <- Concurrent[IO].start { ResourceRegistry.of[IO].use { registry => val resource = Resource.make(().pure[IO]) { _ => released.update(_ + 1) } for { _ <- registry.allocate(resource) _ <- started.complete(()) _ <- IO.never.as(()) } yield {} } } _ <- started.get _ <- fiber.cancel released <- released.get } yield { released shouldEqual 1 } } }
Example 80
Source File: RollingFileLogger.scala From odin with Apache License 2.0 | 4 votes |
package io.odin.loggers import java.nio.file.{Files, Path, Paths} import java.time.{Instant, LocalDateTime} import java.time.format.DateTimeFormatter import java.util.TimeZone import java.util.concurrent.TimeUnit import cats.Monad import cats.effect.concurrent.Ref import cats.effect.{Concurrent, ContextShift, Fiber, Resource, Timer} import cats.syntax.all._ import io.odin.formatter.Formatter import io.odin.{Level, Logger, LoggerMessage} import scala.concurrent.duration.{FiniteDuration, _} object RollingFileLogger { def apply[F[_]]( fileNamePattern: LocalDateTime => String, maxFileSizeInBytes: Option[Long], rolloverInterval: Option[FiniteDuration], formatter: Formatter, minLevel: Level )(implicit F: Concurrent[F], timer: Timer[F], cs: ContextShift[F]): Resource[F, Logger[F]] = { new RollingFileLoggerFactory( fileNamePattern, maxFileSizeInBytes, rolloverInterval, formatter, minLevel, FileLogger.apply[F] ).mk } private[odin] class RefLogger[F[_]: Timer: Monad]( current: Ref[F, Logger[F]], override val minLevel: Level ) extends DefaultLogger[F](minLevel) { def log(msg: LoggerMessage): F[Unit] = current.get.flatMap(_.log(msg)) override def log(msgs: List[LoggerMessage]): F[Unit] = current.get.flatMap(_.log(msgs)) } private[odin] class RollingFileLoggerFactory[F[_]]( fileNamePattern: LocalDateTime => String, maxFileSizeInBytes: Option[Long], rolloverInterval: Option[FiniteDuration], formatter: Formatter, minLevel: Level, underlyingLogger: (String, Formatter, Level) => Resource[F, Logger[F]], fileSizeCheck: Path => Long = Files.size )(implicit F: Concurrent[F], timer: Timer[F], cs: ContextShift[F]) { val df: DateTimeFormatter = DateTimeFormatter.ofPattern("yyyy-MM-dd-HH-mm-ss") def mk: Resource[F, Logger[F]] = { val logger = for { ((logger, watcherFiber), release) <- allocate.allocated refLogger <- Ref.of(logger) refRelease <- Ref.of(release) _ <- F.start(rollingLoop(watcherFiber, refLogger, refRelease)) } yield { (new RefLogger(refLogger, minLevel), refRelease) } Resource.make(logger)(_._2.get.flatten).map { case (logger, _) => logger } } def now: F[Long] = timer.clock.realTime(TimeUnit.MILLISECONDS) def rollingLoop(watcher: Fiber[F, Unit], logger: Ref[F, Logger[F]], release: Ref[F, F[Unit]]): F[Unit] = for { _ <- watcher.join oldRelease <- release.get ((newLogger, newWatcher), newRelease) <- allocate.allocated _ <- logger.set(newLogger) _ <- release.set(newRelease) _ <- oldRelease _ <- rollingLoop(newWatcher, logger, release) } yield () } }