zio.clock.Clock Scala Examples

The following examples show how to use zio.clock.Clock. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: IteratorTest.scala    From spark-tools   with Apache License 2.0 6 votes vote down vote up
package io.univalence.sparkzio

import zio.{ DefaultRuntime, IO, Ref, Task, UIO, ZIO, ZManaged }
import zio.clock.Clock
import zio.stream.{ Stream, ZSink, ZStream }
import zio.test.DefaultRunnableSpec
import zio.test._
import zio.test.Assertion._

object StreamTest {

  def assertForAll[R, E, A](zstream: ZStream[R, E, A])(f: A => TestResult): ZIO[R, E, TestResult] =
    zstream.fold(assert(Unit, Assertion.anything))((as, a) => as && f(a))

  def isSorted[A: Ordering]: Assertion[Iterable[A]] =
    Assertion.assertion("sorted")()(x => {
      val y = x.toList
      y.sorted == y
    })
}

object IteratorTest
    extends DefaultRunnableSpec(
      suite("iterator")(
        testM("to iterator should be lazy")({
          case class Element(n: Int, time: Long)

          (for {
            clock      <- ZIO.environment[Clock]
            n          <- Ref.make(0)
            incCounter <- n.update(_ + 1).forever.fork

          } yield {
            def element: UIO[Element] = n.get.zipWith(clock.clock.nanoTime)(Element)

            val in = Stream.repeatEffect(element)

            val iterator = Iterator.unwrapManaged(Iterator.fromStream(in))

            val out: ZStream[Any, Nothing, Element] =
              ZStream.fromIterator(iterator).mapConcatM(e => element.map(List(e, _)))

            implicit val ordering: Ordering[Element] = Ordering.by(x => x.n -> x.time)

            out.take(2000).runCollect.map(e => assert(e, StreamTest.isSorted))
          }).flatten
        }),
        testM("<=>")({
          val in: List[Int] = (1 to 100).toList

          (for {
            _ <- UIO.unit.toManaged_
            stream1 = ZStream.fromIterator(UIO(in.toIterator))
            iterator <- Iterator.fromStream(stream1)
            stream2 = ZStream.fromIterator(UIO(iterator))
            out <- stream2.runCollect.toManaged_
          } yield {
            assert(in, equalTo(out))
          }).use(x => ZIO.effect(x))

        }),
        testM("on exit")(
          (for {
            isOpen <- Ref.make(false).toManaged_
            stream = ZStream.managed(ZManaged.make(isOpen.update(_ => true))(_ => isOpen.set(false)))
            iterator <- Iterator.fromStream(stream)
          } yield {
            assert(iterator.toList, equalTo(List(true)))
          }).use(x => IO.effect(x))
        )
      )
    ) 
Example 2
Source File: ExampleApp.scala    From caliban   with Apache License 2.0 5 votes vote down vote up
package caliban.finch

import caliban.ExampleData.sampleCharacters
import caliban.ExampleService.ExampleService
import caliban.{ ExampleApi, ExampleService, FinchAdapter }
import com.twitter.io.{ Buf, BufReader, Reader }
import com.twitter.util.Await
import io.finch.Endpoint
import zio.clock.Clock
import zio.console.Console
import zio.internal.Platform
import zio.interop.catz._
import zio.{ Runtime, Task }

object ExampleApp extends App with Endpoint.Module[Task] {

  implicit val runtime: Runtime[ExampleService with Console with Clock] =
    Runtime.unsafeFromLayer(ExampleService.make(sampleCharacters) ++ Console.live ++ Clock.live, Platform.default)

  val interpreter = runtime.unsafeRun(ExampleApi.api.interpreter)

  
  import com.twitter.finagle.Http
  import io.finch._
  import io.finch.circe._

  val endpoint = "api" :: "graphql" :: FinchAdapter.makeHttpService(interpreter)

  val graphiqlBuf = {
    val stream = getClass.getResourceAsStream("/graphiql.html")
    BufReader.readAll(Reader.fromStream(stream))
  }

  val grapihql: Endpoint[Task, Buf] = get("graphiql") {
    graphiqlBuf.map(Ok)
  }

  val services = Bootstrap.serve[Application.Json](endpoint).serve[Text.Html](grapihql).toService

  val server = Http.server.serve(":8088", services)

  println(s"Server online at http://localhost:8088/\nPress RETURN to stop...")
  Await.ready(server)

} 
Example 3
Source File: ProtocolRecorder.scala    From zio-keeper   with Apache License 2.0 5 votes vote down vote up
package zio.keeper

import izumi.reflect.Tags.Tag
import zio._
import zio.clock.Clock
import zio.keeper.swim.{ Message, Nodes, Protocol }
import zio.logging.Logging
import zio.stream.{ Sink, ZStream }

object ProtocolRecorder {
  type ProtocolRecorder[A] = Has[ProtocolRecorder.Service[A]]

  trait Service[A] {
    def withBehavior(pf: PartialFunction[Message.Direct[A], Message[A]]): UIO[Service[A]]
    def collectN[B](n: Long)(pr: PartialFunction[Message[A], B]): UIO[List[B]]
    def send(msg: Message.Direct[A]): IO[zio.keeper.Error, Message[A]]
  }

  def apply[A: Tag](
    pf: PartialFunction[Message.Direct[A], Message[A]] = PartialFunction.empty
  ): ZIO[ProtocolRecorder[A], Nothing, Service[A]] =
    ZIO.accessM[ProtocolRecorder[A]](recorder => recorder.get.withBehavior(pf))

  def make[R, E, A: Tag](
    protocolFactory: ZIO[R, E, Protocol[A]]
  ): ZLayer[Clock with Logging with Nodes with R, E, ProtocolRecorder[A]] =
    ZLayer.fromEffect {
      for {
        behaviorRef  <- Ref.make[PartialFunction[Message.Direct[A], Message[A]]](PartialFunction.empty)
        protocol     <- protocolFactory
        messageQueue <- ZQueue.bounded[Message[A]](100)
        _            <- protocol.produceMessages.foreach(consumeMessages(messageQueue, _, behaviorRef, protocol)).fork
        stream       = ZStream.fromQueue(messageQueue)
      } yield new Service[A] {

        override def withBehavior(pf: PartialFunction[Message.Direct[A], Message[A]]): UIO[Service[A]] =
          behaviorRef.set(pf).as(this)

        override def collectN[B](n: Long)(pf: PartialFunction[Message[A], B]): UIO[List[B]] =
          stream.collect(pf).run(Sink.collectAllN[B](n))

        override def send(msg: Message.Direct[A]): IO[zio.keeper.Error, Message[A]] =
          protocol.onMessage(msg)
      }
    }

  private def consumeMessages[A](
    messageQueue: zio.Queue[Message[A]],
    message: Message[A],
    behaviorRef: Ref[PartialFunction[Message.Direct[A], Message[A]]],
    protocol: Protocol[A]
  ): ZIO[Clock with Logging with Nodes, zio.keeper.Error, Unit] =
    message match {
      case Message.WithTimeout(message, action, timeout) =>
        consumeMessages(messageQueue, message, behaviorRef, protocol).unit *>
          action.delay(timeout).flatMap(consumeMessages(messageQueue, _, behaviorRef, protocol)).fork.unit
      case md: Message.Direct[A] =>
        messageQueue.offer(md) *>
          behaviorRef.get.flatMap { fn =>
            ZIO.whenCase(fn.lift(md)) {
              case Some(d: Message.Direct[A]) => protocol.onMessage(d)
            }
          }
      case msg =>
        messageQueue.offer(msg).unit
    }
} 
Example 4
Source File: ProtocolRecorder.scala    From zio-keeper   with Apache License 2.0 5 votes vote down vote up
package zio.keeper.swim

import izumi.reflect.Tags.Tag
import zio._
import zio.clock.Clock
import zio.logging.Logging
import zio.stream.{ Sink, ZStream }

object ProtocolRecorder {
  type ProtocolRecorder[A] = Has[ProtocolRecorder.Service[A]]

  trait Service[A] {
    def withBehavior(pf: PartialFunction[Message.Direct[A], Message[A]]): UIO[Service[A]]
    def collectN[B](n: Long)(pr: PartialFunction[Message[A], B]): UIO[List[B]]
    def send(msg: Message.Direct[A]): IO[zio.keeper.Error, Message[A]]
  }

  def apply[A: Tag](
    pf: PartialFunction[Message.Direct[A], Message[A]] = PartialFunction.empty
  ): ZIO[ProtocolRecorder[A], Nothing, Service[A]] =
    ZIO.accessM[ProtocolRecorder[A]](recorder => recorder.get.withBehavior(pf))

  def make[R, E, A: Tag](
    protocolFactory: ZIO[R, E, Protocol[A]]
  ): ZLayer[Clock with Logging with Nodes with R, E, ProtocolRecorder[A]] =
    ZLayer.fromEffect {
      for {
        behaviorRef  <- Ref.make[PartialFunction[Message.Direct[A], Message[A]]](PartialFunction.empty)
        protocol     <- protocolFactory
        messageQueue <- ZQueue.bounded[Message[A]](100)
        _            <- protocol.produceMessages.foreach(consumeMessages(messageQueue, _, behaviorRef, protocol)).fork
        stream       = ZStream.fromQueue(messageQueue)
      } yield new Service[A] {

        override def withBehavior(pf: PartialFunction[Message.Direct[A], Message[A]]): UIO[Service[A]] =
          behaviorRef.set(pf).as(this)

        override def collectN[B](n: Long)(pf: PartialFunction[Message[A], B]): UIO[List[B]] =
          stream.collect(pf).run(Sink.collectAllN[B](n))

        override def send(msg: Message.Direct[A]): IO[zio.keeper.Error, Message[A]] =
          protocol.onMessage(msg)
      }
    }

  private def consumeMessages[A](
    messageQueue: zio.Queue[Message[A]],
    message: Message[A],
    behaviorRef: Ref[PartialFunction[Message.Direct[A], Message[A]]],
    protocol: Protocol[A]
  ): ZIO[Clock with Logging with Nodes, zio.keeper.Error, Unit] =
    message match {
      case Message.WithTimeout(message, action, timeout) =>
        consumeMessages(messageQueue, message, behaviorRef, protocol).unit *>
          action.delay(timeout).flatMap(consumeMessages(messageQueue, _, behaviorRef, protocol)).unit
      case md: Message.Direct[A] =>
        messageQueue.offer(md) *>
          behaviorRef.get.flatMap { fn =>
            ZIO.whenCase(fn.lift(md)) {
              case Some(d: Message.Direct[A]) => protocol.onMessage(d)
            }
          }
      case msg =>
        messageQueue.offer(msg).unit
    }
} 
Example 5
Source File: NodesSpec.scala    From zio-keeper   with Apache License 2.0 5 votes vote down vote up
package zio.keeper.swim

import zio.ZLayer
import zio.clock.Clock
import zio.keeper.MembershipEvent
import zio.keeper.swim.Nodes._
import zio.keeper.{ KeeperSpec, NodeAddress }
import zio.logging.Logging
import zio.stream.Sink
import zio.test.Assertion._
import zio.test._

object NodesSpec extends KeeperSpec {

  val logger = Logging.console((_, line) => line)

  val spec = suite("nodes")(
    testM("add node") {
      val testNodeAddress = NodeAddress(Array(1, 2, 3, 4), 1111)
      for {
        next0 <- nextNode
        _     <- addNode(testNodeAddress)
        next1 <- nextNode
        _     <- changeNodeState(testNodeAddress, NodeState.Healthy)
        next2 <- nextNode
      } yield assert(next0)(isNone) && assert(next1)(isNone) && assert(next2)(isSome(equalTo(testNodeAddress)))
    },
    testM("add node twice") {
      val testNodeAddress = NodeAddress(Array(1, 2, 3, 4), 1111)
      for {
        _    <- addNode(testNodeAddress)
        _    <- changeNodeState(testNodeAddress, NodeState.Healthy)
        _    <- addNode(testNodeAddress)
        next <- nextNode
      } yield assert(next)(isSome(equalTo(testNodeAddress)))
    },
    testM("should propagate events") {
      val testNodeAddress1 = NodeAddress(Array(1, 2, 3, 4), 1111)
      val testNodeAddress2 = NodeAddress(Array(1, 2, 3, 4), 1112)
      for {
        _       <- addNode(testNodeAddress1)
        _       <- changeNodeState(testNodeAddress1, NodeState.Healthy)
        _       <- changeNodeState(testNodeAddress1, NodeState.Suspicion)
        _       <- changeNodeState(testNodeAddress1, NodeState.Dead)
        events1 <- Nodes.events.run(Sink.collectAllN[MembershipEvent](2))
        _       <- addNode(testNodeAddress2)
        _       <- changeNodeState(testNodeAddress2, NodeState.Healthy)
        _       <- changeNodeState(testNodeAddress2, NodeState.Suspicion)
        _       <- changeNodeState(testNodeAddress2, NodeState.Dead)
        events2 <- Nodes.events.run(Sink.collectAllN[MembershipEvent](2))
      } yield assert(events1)(
        hasSameElements(
          List(
            MembershipEvent.Join(testNodeAddress1),
            MembershipEvent.Leave(testNodeAddress1)
          )
        )
      ) && assert(events2)(
        hasSameElements(
          List(
            MembershipEvent.Join(testNodeAddress2),
            MembershipEvent.Leave(testNodeAddress2)
          )
        )
      )
    }
  ).provideCustomLayer((ZLayer.requires[Clock] ++ logger) >>> Nodes.live)

} 
Example 6
Source File: AsyncHttpClientHighLevelZioWebsocketTest.scala    From sttp   with Apache License 2.0 5 votes vote down vote up
package sttp.client.asynchttpclient.zio

import sttp.client._
import sttp.client.asynchttpclient.{AsyncHttpClientHighLevelWebsocketTest, WebSocketHandler}
import sttp.client.impl.zio.{RIOMonadAsyncError, convertZioTaskToFuture, runtime}
import sttp.client.monad.MonadError
import sttp.client.testing.ConvertToFuture
import sttp.client.ws.WebSocket
import zio.clock.Clock
import zio.{Schedule, Task, ZIO}
import zio.duration._

import scala.concurrent.duration.FiniteDuration

class AsyncHttpClientHighLevelZioWebsocketTest extends AsyncHttpClientHighLevelWebsocketTest[Task] {
  override implicit val backend: SttpBackend[Task, Nothing, WebSocketHandler] =
    runtime.unsafeRun(AsyncHttpClientZioBackend())
  override implicit val convertToFuture: ConvertToFuture[Task] = convertZioTaskToFuture
  override implicit val monad: MonadError[Task] = new RIOMonadAsyncError

  override def createHandler: Option[Int] => Task[WebSocketHandler[WebSocket[Task]]] =
    bufferCapacity => ZioWebSocketHandler(bufferCapacity)

  override def eventually[T](interval: FiniteDuration, attempts: Int)(f: => Task[T]): Task[T] = {
    ZIO.sleep(interval.toMillis.millis).andThen(f).retry(Schedule.recurs(attempts)).provideLayer(Clock.live)
  }
} 
Example 7
Source File: RetryZio.scala    From sttp   with Apache License 2.0 5 votes vote down vote up
package sttp.client.examples

import sttp.client._
import sttp.client.asynchttpclient.zio.AsyncHttpClientZioBackend
import zio.{ExitCode, Schedule, ZIO}
import zio.clock.Clock
import zio.duration._

object RetryZio extends zio.App {
  override def run(args: List[String]): ZIO[zio.ZEnv, Nothing, ExitCode] = {
    AsyncHttpClientZioBackend()
      .flatMap { implicit backend =>
        val localhostRequest = basicRequest
          .get(uri"http://localhost/test")
          .response(asStringAlways)

        val sendWithRetries: ZIO[Clock, Throwable, Response[String]] = localhostRequest
          .send()
          .either
          .repeat(
            Schedule.spaced(1.second) *>
              Schedule.recurs(10) *>
              Schedule.doWhile(result => RetryWhen.Default(localhostRequest, result))
          )
          .absolve

        sendWithRetries.ensuring(backend.close().ignore)
      }
      .fold(_ => ExitCode.failure, _ => ExitCode.success)
  }
} 
Example 8
Source File: HttpClientHighLevelZioWebsocketTest.scala    From sttp   with Apache License 2.0 5 votes vote down vote up
package sttp.client.httpclient.zio

import sttp.client._
import sttp.client.httpclient.WebSocketHandler
import sttp.client.impl.zio.RIOMonadAsyncError
import sttp.client.monad.MonadError
import sttp.client.testing.ConvertToFuture
import sttp.client.testing.HttpTest.wsEndpoint
import sttp.client.testing.websocket.HighLevelWebsocketTest
import sttp.client.ws.WebSocket
import zio.blocking.Blocking
import zio.stream.ZStream
import sttp.client.impl.zio._
import zio.clock.Clock
import zio.{Schedule, ZIO}

import scala.concurrent.duration._
import zio.duration.Duration

class HttpClientHighLevelZioWebsocketTest extends HighLevelWebsocketTest[BlockingTask, WebSocketHandler] {
  implicit val backend: SttpBackend[BlockingTask, ZStream[Blocking, Throwable, Byte], WebSocketHandler] =
    runtime.unsafeRun(HttpClientZioBackend())
  implicit val convertToFuture: ConvertToFuture[BlockingTask] = convertZioBlockingTaskToFuture
  implicit val monad: MonadError[BlockingTask] = new RIOMonadAsyncError

  def createHandler: Option[Int] => BlockingTask[WebSocketHandler[WebSocket[BlockingTask]]] = _ => ZioWebSocketHandler()

  it should "handle backpressure correctly" in {
    new ConvertToFutureDecorator(
      basicRequest
        .get(uri"$wsEndpoint/ws/echo")
        .openWebsocketF(createHandler(None))
        .flatMap { response =>
          val ws = response.result
          send(ws, 1000).flatMap(_ =>
            eventually(10.millis, 500) {
              ws.isOpen.map(_ shouldBe true)
            }
          )
        }
    ).toFuture()
  }

  override def eventually[T](interval: FiniteDuration, attempts: Int)(f: => BlockingTask[T]): BlockingTask[T] = {
    (ZIO.sleep(Duration.fromScala(interval)) *> f.retry(Schedule.recurs(attempts)))
      .provideSomeLayer[Blocking](Clock.live)
  }
} 
Example 9
Source File: ZIOSpec.scala    From polynote   with Apache License 2.0 5 votes vote down vote up
package polynote.testing

import polynote.config.PolynoteConfig
import polynote.env.ops.Enrich
import polynote.kernel.Kernel.Factory
import polynote.kernel.{BaseEnv, CellEnv, GlobalEnv, Kernel, ResultValue, interpreter}
import polynote.kernel.environment.{Config, Env, NotebookUpdates}
import interpreter.Interpreter
import org.scalatest.{BeforeAndAfterAll, Suite}
import polynote.kernel.logging.Logging
import zio.blocking.Blocking
import zio.clock.Clock
import zio.console.Console
import zio.internal.Platform
import zio.random.Random
import zio.system.System
import zio.{Has, RIO, Runtime, Tagged, ZIO, ZLayer}

abstract class TestRuntime
object TestRuntime {
  val runtime: Runtime.Managed[zio.ZEnv with Logging] = ZIOSpecBase.runtime
  def fiberDump(): List[zio.Fiber.Dump] = runtime.unsafeRun(zio.Fiber.dumpAll).toList
}

trait ZIOSpecBase[Env <: Has[_]] {
  import ZIOSpecBase.BaseEnv
  type Environment = Env
  val baseLayer: ZLayer[Any, Nothing, BaseEnv] = ZIOSpecBase.baseLayer
  def envLayer: ZLayer[zio.ZEnv with Logging, Nothing, Env]
  val runtime: Runtime.Managed[BaseEnv] = ZIOSpecBase.runtime

  // TODO: should test platform behave differently? Isolate per suite?
  implicit class IORunOps[A](val self: ZIO[BaseEnv, Throwable, A]) {
    def runIO(): A = ZIOSpecBase.this.runIO(self)
  }

  implicit class IORunWithOps[R <: Has[_], A](val self: ZIO[R, Throwable, A]) {
    def runWith[R1](env: R1)(implicit ev: Env with Has[R1] <:< R, ev1: Tagged[R1], ev2: Tagged[Has[R1]], ev3: Tagged[Env]): A =
      ZIOSpecBase.this.runIO(self.provideSomeLayer[Env](ZLayer.succeed(env)).provideSomeLayer[BaseEnv](envLayer))
  }

  def runIO[A](io: ZIO[BaseEnv, Throwable, A]): A = runtime.unsafeRunSync(io).getOrElse {
    c => throw c.squash
  }


}

object ZIOSpecBase {

  type BaseEnv = zio.ZEnv with Logging
  val baseLayer: ZLayer[Any, Nothing, zio.ZEnv with Logging] = Clock.live ++ Console.live ++ System.live ++ Random.live ++ Blocking.live ++ (Blocking.live >>> Logging.live)
  val platform: Platform = Platform.default
    .withReportFailure(_ => ()) // suppress printing error stack traces by default
  val runtime: Runtime.Managed[zio.ZEnv with Logging] = Runtime.unsafeFromLayer(baseLayer, platform)
}

trait ZIOSpec extends ZIOSpecBase[Clock with Console with System with Random with Blocking with Logging] {
  override lazy val envLayer: ZLayer[zio.ZEnv, Nothing, Environment] = baseLayer
  implicit class ConfigIORunOps[A](val self: ZIO[Environment with Config, Throwable, A]) {
    def runWithConfig(config: PolynoteConfig): A = ZIOSpec.this.runIO(self.provideSomeLayer[Environment](ZLayer.succeed(config)))
  }
}

trait ConfiguredZIOSpec extends ZIOSpecBase[BaseEnv with Config] { this: Suite =>
  def config: PolynoteConfig = PolynoteConfig()
  override lazy val envLayer: ZLayer[zio.ZEnv, Nothing, BaseEnv with Config] =
    baseLayer ++ ZLayer.succeed(config)
}

trait ExtConfiguredZIOSpec[Env <: Has[_]] extends ZIOSpecBase[BaseEnv with Config with Env] {
  def tagged: Tagged[Env]
  def configuredEnvLayer: ZLayer[zio.ZEnv with Config, Nothing, Env]

  private implicit def _tagged: Tagged[Env] = tagged

  def config: PolynoteConfig = PolynoteConfig()
  lazy val configLayer: ZLayer[Any, Nothing, Config] = ZLayer.succeed(config)
  override final lazy val envLayer: ZLayer[zio.ZEnv, Nothing, BaseEnv with Config with Env] = baseLayer ++ Logging.live ++ ((baseLayer ++ configLayer) >>> configuredEnvLayer) ++ configLayer
}

object ValueMap {
  def unapply(values: List[ResultValue]): Option[Map[String, Any]] = Some(apply(values))
  def apply(values: List[ResultValue]): Map[String, Any] = values.map(v => v.name -> v.value).toMap
} 
Example 10
Source File: MockEnv.scala    From polynote   with Apache License 2.0 5 votes vote down vote up
package polynote.testing.kernel

import cats.effect.concurrent.Ref
import fs2.Stream
import fs2.concurrent.{Queue, SignallingRef, Topic}
import polynote.config.PolynoteConfig
import polynote.kernel.Kernel.Factory
import polynote.kernel.environment.{CurrentNotebook, CurrentRuntime, NotebookUpdates}
import polynote.kernel.interpreter.{CellExecutor, Interpreter}
import polynote.kernel.logging.Logging
import polynote.kernel.task.TaskManager
import polynote.kernel.util.Publish
import polynote.kernel.{BaseEnv, CellEnv, GlobalEnv, InterpreterEnv, KernelStatusUpdate, NotebookRef, Result, StreamingHandles, TaskInfo}
import polynote.messages._
import polynote.runtime.{KernelRuntime, StreamingDataRepr, TableOp}
import polynote.testing.MockPublish
import zio.blocking.Blocking
import zio.clock.Clock
import zio.interop.catz._
import zio.{Has, RIO, Runtime, Task, URIO, ZIO, ZLayer}

case class MockEnv(
  baseEnv: BaseEnv,
  cellID: CellID,
  currentTask: SignallingRef[Task, TaskInfo],
  publishResult: MockPublish[Result],
  publishStatus: MockPublish[KernelStatusUpdate],
  runtime: Runtime[Any]
) {
  val currentRuntime: KernelRuntime = runtime.unsafeRun(CurrentRuntime.from(cellID, publishResult, publishStatus, currentTask))
  val logging: Logging.Service = new Logging.Service.Default(System.err, baseEnv.get[Blocking.Service])
  val baseLayer: ZLayer[Any, Nothing, BaseEnv with InterpreterEnv] =
    ZLayer.succeedMany(baseEnv) ++
      ZLayer.succeed(logging) ++
      ZLayer.succeed(currentRuntime) ++
      ZLayer.succeed(publishResult: Publish[Task, Result]) ++
      ZLayer.succeed(publishStatus: Publish[Task, KernelStatusUpdate]) ++
      ZLayer.succeed(currentTask: Ref[Task, TaskInfo])

  def toCellEnv(classLoader: ClassLoader): ZLayer[Any, Throwable, BaseEnv with InterpreterEnv] = baseLayer ++ (baseLayer >>> CellExecutor.layer(classLoader))
}

object MockEnv {

  def init: ZLayer[BaseEnv, Nothing, BaseEnv with InterpreterEnv] = ZLayer.fromManagedMany(MockEnv(-1).toManaged_.flatMap(_.baseLayer.build))

  def apply(cellID: Int): URIO[BaseEnv, MockEnv] = for {
    env <- ZIO.access[BaseEnv](identity)
    runtime <- ZIO.runtime[Any]
    currentTask <- SignallingRef[Task, TaskInfo](TaskInfo(s"Cell$cellID")).orDie
  } yield new MockEnv(env, CellID(cellID), currentTask, new MockPublish, new MockPublish, runtime)

  def layer(cellID: Int): ZLayer[BaseEnv, Nothing, BaseEnv with InterpreterEnv] = ZLayer.fromManagedMany(MockEnv(cellID).toManaged_.flatMap(_.baseLayer.build))

  type Env = BaseEnv with GlobalEnv with CellEnv with StreamingHandles with NotebookUpdates
}

case class MockKernelEnv(
  baseEnv: BaseEnv,
  kernelFactory: Factory.Service,
  publishResult: MockPublish[Result],
  publishStatus: MockPublish[KernelStatusUpdate],
  interpreterFactories: Map[String, List[Interpreter.Factory]],
  taskManager: TaskManager.Service,
  updateTopic: Topic[Task, Option[NotebookUpdate]],
  currentNotebook: MockNotebookRef,
  streamingHandles: StreamingHandles.Service,
  sessionID: Int = 0,
  polynoteConfig: PolynoteConfig = PolynoteConfig()
) {

  val logging: Logging.Service = new Logging.Service.Default(System.err, baseEnv.get[Blocking.Service])
  val notebookUpdates: Stream[Task, NotebookUpdate] = updateTopic.subscribe(128).unNone

  val baseLayer: ZLayer[Any, Nothing, MockEnv.Env] =
    ZLayer.succeedMany {
      baseEnv ++ Has.allOf(kernelFactory, interpreterFactories, taskManager, notebookUpdates, polynoteConfig) ++
        Has(streamingHandles) ++ Has(publishResult: Publish[Task, Result]) ++ Has(publishStatus: Publish[Task, KernelStatusUpdate])
    } ++ CurrentNotebook.layer(currentNotebook)

}

object MockKernelEnv {
  def apply(kernelFactory: Factory.Service, config: PolynoteConfig, sessionId: Int): RIO[BaseEnv, MockKernelEnv] = for {
    baseEnv         <- ZIO.access[BaseEnv](identity)
    currentNotebook <- MockNotebookRef(Notebook("empty", ShortList.Nil, None))
    updateTopic     <- Topic[Task, Option[NotebookUpdate]](None)
    publishUpdates   = new MockPublish[KernelStatusUpdate]
    taskManager     <- TaskManager(publishUpdates)
    handles         <- StreamingHandles.make(sessionId)
  } yield new MockKernelEnv(baseEnv, kernelFactory, new MockPublish, publishUpdates, Map.empty, taskManager, updateTopic, currentNotebook, handles, handles.sessionID, config)

  def apply(kernelFactory: Factory.Service, sessionId: Int): RIO[BaseEnv, MockKernelEnv] = apply(kernelFactory, PolynoteConfig(), sessionId)
  def apply(kernelFactory: Factory.Service, config: PolynoteConfig): RIO[BaseEnv, MockKernelEnv] = apply(kernelFactory, config, 0)
  def apply(kernelFactory: Factory.Service): RIO[BaseEnv, MockKernelEnv] = apply(kernelFactory, 0)
} 
Example 11
Source File: InterpreterSpec.scala    From polynote   with Apache License 2.0 5 votes vote down vote up
package polynote.testing

import java.io.File

import cats.data.StateT
import cats.syntax.traverse._
import cats.instances.list._
import org.scalatest.Suite
import polynote.config.PolynoteConfig
import polynote.kernel.environment.Config
import polynote.kernel.{Output, Result, ScalaCompiler}
import polynote.kernel.interpreter.{Interpreter, State}
import polynote.kernel.logging.Logging
import polynote.testing.kernel.MockEnv
import zio.{RIO, ZIO}
import zio.blocking.Blocking
import zio.clock.Clock
import zio.console.Console
import zio.random.Random
import zio.system.System
import zio.interop.catz._

import scala.reflect.internal.util.AbstractFileClassLoader
import scala.reflect.io.VirtualDirectory
import scala.tools.nsc.Settings
import scala.tools.nsc.io.AbstractFile

trait InterpreterSpec extends ZIOSpec {
  import runtime.{unsafeRun, unsafeRunSync}
  val classpath: List[File] = sys.props("java.class.path").split(File.pathSeparator).toList.map(new File(_))
  val settings: Settings = ScalaCompiler.defaultSettings(new Settings(), classpath)

  def outDir: AbstractFile = new VirtualDirectory("(memory)", None)
  settings.outputDirs.setSingleOutput(outDir)

  val classLoader: AbstractFileClassLoader = unsafeRun(ScalaCompiler.makeClassLoader(settings, Nil).provide(Config.of(PolynoteConfig())))
  val compiler: ScalaCompiler = ScalaCompiler(settings, classLoader).runIO()

  def interpreter: Interpreter

  lazy val initialState: State = unsafeRun(interpreter.init(State.Root).provideSomeLayer[Environment](MockEnv.layer(State.Root.id + 1)))
  def cellState: State = State.id(1, initialState)

  def assertOutput(code: String)(assertion: (Map[String, Any], Seq[Result]) => Unit): Unit =
    assertOutput(List(code))(assertion)

  def assertOutput(code: Seq[String])(assertion: (Map[String, Any], Seq[Result]) => Unit): Unit= {
    val (finalState, interpResults) = code.toList.map(interp).sequence.run(cellState).runIO()
    val terminalResults = interpResults.foldLeft((Map.empty[String, Any], List.empty[Result])) {
      case ((vars, results), next) =>
        val nextVars = vars ++ next.state.values.map(v => v.name -> v.value).toMap
        val nextOutputs = results ++ next.env.publishResult.toList.runIO()
        (nextVars, nextOutputs)
    }
    assertion.tupled(terminalResults)
  }

  case class InterpResult(state: State, env: MockEnv)

  type ITask[A] = RIO[Clock with Console with System with Random with Blocking with Logging, A]

  def interp(code: String): StateT[ITask, State, InterpResult] = StateT[ITask, State, InterpResult] {
    state => MockEnv(state.id).flatMap {
      env => interpreter.run(code, state).map {
        newState => State.id(newState.id + 1, newState) -> InterpResult(newState, env)
      }.provideSomeLayer[Environment](env.toCellEnv(classLoader))
    }
  }

  def interp1(code: String): InterpResult = unsafeRun {
    MockEnv(cellState.id).flatMap {
      env =>
        interpreter.run(code, cellState).provideSomeLayer(env.toCellEnv(getClass.getClassLoader)).map {
          state => InterpResult(state, env)
        }
    }
  }

  def stdOut(results: Seq[Result]): String = results.foldLeft("") {
    case (accum, Output("text/plain; rel=stdout", next)) => accum + next.mkString
    case (accum, _) => accum
  }

} 
Example 12
Source File: WAL.scala    From polynote   with Apache License 2.0 5 votes vote down vote up
package polynote.server
package repository.fs

import java.nio.ByteBuffer
import java.nio.charset.StandardCharsets
import java.time.Instant

import polynote.messages.{Message, Notebook}
import scodec.bits.{BitVector, ByteVector}
import scodec.{Attempt, Codec, codecs}
import scodec.stream.decode
import scodec.stream.decode.StreamDecoder
import zio.{RIO, Task, ZIO}
import zio.blocking.Blocking
import zio.clock.{Clock, currentDateTime}

import scala.util.Try

object WAL {

  val WALMagicNumber: Array[Byte] = "PNWAL".getBytes(StandardCharsets.US_ASCII)
  val WALVersion: Short = 1

  // Timestamp for each update is stored in 32 bits unsigned, epoch UTC seconds.
  // So we'll have to change the format by February of 2106. Apologies to my great-great-great grandchildren.
  private val instantCodec = codecs.uint32.exmap[Instant](
    epochSeconds => Attempt.fromTry(Try(Instant.ofEpochSecond(epochSeconds))),
    instant      => Attempt.successful(instant.getEpochSecond)
  )

  def encodeTimestamp(instant: Instant): Task[BitVector] =
    ZIO.fromEither(instantCodec.encode(instant).toEither)
      .mapError(err => new RuntimeException(err.message))

  val messageCodec: Codec[(Instant, Message)] = codecs.variableSizeBytes(codecs.int32, instantCodec ~ Message.codec)

  val decoder: StreamDecoder[(Instant, Message)] = {
    val readMagic = decode.once(codecs.constant(ByteVector(WALMagicNumber)))
    val readVersion = decode.once(codecs.int16)
    def readMessages(version: Int): StreamDecoder[(Instant, Message)] = version match {
      case 1 => decode.many(messageCodec)
      case v => decode.raiseError(new Exception(s"Unknown WAL version $v"))
    }

    for {
      _       <- readMagic
      ver     <- readVersion
      message <- readMessages(ver)
    } yield message
  }

  trait WALWriter {
    protected def append(bytes: Array[Byte]): RIO[Blocking, Unit] = append(ByteBuffer.wrap(bytes))
    protected def append(bytes: BitVector): RIO[Blocking, Unit] = append(bytes.toByteBuffer)
    protected def append(bytes: ByteBuffer): RIO[Blocking, Unit]

    def writeHeader(notebook: Notebook): RIO[Blocking with Clock, Unit] =
      append(WALMagicNumber) *>
        append(BitVector.fromShort(WALVersion)) *>
        appendMessage(notebook.withoutResults)

    def appendMessage(message: Message): RIO[Blocking with Clock, Unit] = for {
      ts    <- currentDateTime.map(_.toInstant)
      bytes <- ZIO.fromEither(messageCodec.encode((ts, message)).toEither).mapError(err => new RuntimeException(err.message))
      _     <- append(bytes)
    } yield ()

    def sync(): RIO[Blocking, Unit]

    def close(): RIO[Blocking, Unit]
  }

  object WALWriter {
    object NoWAL extends WALWriter {
      override protected def append(bytes: ByteBuffer): RIO[Blocking, Unit] = ZIO.unit
      override def sync(): RIO[Blocking, Unit] = ZIO.unit
      override def close(): RIO[Blocking, Unit] = ZIO.unit
    }
  }
} 
Example 13
Source File: TamerApp.scala    From tamer   with MIT License 5 votes vote down vote up
package tamer

import tamer.config.Config
import tamer.db.Db
import tamer.kafka.Kafka
import zio._
import zio.blocking.Blocking
import zio.clock.Clock

abstract class TamerApp[K, V, State](private val setup: IO[SetupError, Setup[K, V, State]]) extends App {
  final val run: ZIO[Blocking with Clock with Config with Db with Kafka, TamerError, Unit] =
    for {
      setup      <- setup
      config     <- Config.>.load
      blockingEC <- blocking.blockingExecutor.map(_.asEC)
      program <- Db.mkTransactor(config.db, platform.executor.asEC, blockingEC).use { tnx =>
        Kafka.>.run(config.kafka, setup)(Db.>.runQuery(tnx, setup, config.query))
      }
    } yield program

  override final def run(args: List[String]): ZIO[ZEnv, Nothing, Int] =
    run
      .provide(new Blocking.Live with Clock.Live with Config.Live with Db.Live with Kafka.Live {})
      .foldM(
        err => console.putStrLn(s"Execution failed with: $err") *> IO.succeed(1),
        _ => IO.succeed(0)
      )
} 
Example 14
Source File: ExampleApp.scala    From caliban   with Apache License 2.0 5 votes vote down vote up
package caliban.akkahttp

import scala.concurrent.ExecutionContextExecutor
import scala.io.StdIn
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.server.Directives._
import caliban.ExampleData.sampleCharacters
import caliban.ExampleService.ExampleService
import caliban.interop.circe.AkkaHttpCirceAdapter
import caliban.{ ExampleApi, ExampleService }
import zio.clock.Clock
import zio.console.Console
import zio.internal.Platform
import zio.Runtime


  val route =
    path("api" / "graphql") {
      adapter.makeHttpService(interpreter)
    } ~ path("ws" / "graphql") {
      adapter.makeWebSocketService(interpreter)
    } ~ path("graphiql") {
      getFromResource("graphiql.html")
    }

  val bindingFuture = Http().bindAndHandle(route, "localhost", 8088)
  println(s"Server online at http://localhost:8088/\nPress RETURN to stop...")
  StdIn.readLine()
  bindingFuture
    .flatMap(_.unbind())
    .onComplete(_ => system.terminate())

} 
Example 15
Source File: HyParView.scala    From zio-keeper   with Apache License 2.0 5 votes vote down vote up
package zio.keeper.hyparview

import zio.clock.Clock
import zio.keeper.transport.Transport
import zio.keeper.{ Error, NodeAddress, SendError, TransportError }
import zio.logging.Logging
import zio.logging.log
import zio.stream.{ Stream, Take, ZStream }
import zio._
import zio.keeper.hyparview.ActiveProtocol._
import zio.duration._

object HyParView {

  def live[R <: Transport with TRandom with Logging with Clock with HyParViewConfig](
    localAddr: NodeAddress,
    seedNodes: List[NodeAddress],
    shuffleSchedule: Schedule[R, ViewState, Any]
  ): ZLayer[R, Error, PeerService] = {
    type R1 = R with Views
    val layer = ZLayer.identity[R] ++ Views.fromConfig(localAddr)
    layer >>> ZLayer.fromManaged {
      for {
        env <- ZManaged.environment[R1]
        cfg <- getConfig.toManaged_
        _ <- log
              .info(s"Starting HyParView on $localAddr with configuration:\n${cfg.prettyPrint}")
              .toManaged(_ => log.info("Shut down HyParView"))
        scope <- ZManaged.scope
        connections <- Queue
                        .bounded[
                          (NodeAddress, Chunk[Byte] => IO[TransportError, Unit], Stream[Error, Chunk[Byte]], UIO[_])
                        ](
                          cfg.connectionBuffer
                        )
                        .toManaged(_.shutdown)
        plumTreeMessages <- Queue
                             .sliding[Take[Error, (NodeAddress, PlumTreeProtocol)]](cfg.userMessagesBuffer)
                             .toManaged(_.shutdown)
        peerEvents <- Queue.sliding[PeerEvent](128).toManaged(_.shutdown)
        sendInitial0 = (to: NodeAddress, msg: InitialProtocol.InitialMessage) =>
          sendInitial(to, msg, scope, connections).provide(env)
        _ <- receiveInitialProtocol[R1, Error](Transport.bind(localAddr), cfg.concurrentIncomingConnections)
              .merge(ZStream.fromQueue(connections))
              .merge(neighborProtocol.scheduleElements(Schedule.spaced(2.seconds)))
              .flatMapParSwitch(cfg.activeViewCapacity) {
                case (addr, send, receive, release) =>
                  ZStream
                    .fromEffect(peerEvents.offer(PeerEvent.NeighborUp(addr)))
                    .ensuring(peerEvents.offer(PeerEvent.NeighborDown(addr)))
                    .flatMap(_ => runActiveProtocol[R1, Error](addr, send, sendInitial0)(receive).ensuring(release))
              }
              .into(plumTreeMessages)
              .toManaged_
              .fork
        _ <- periodic.doShuffle
              .repeat(shuffleSchedule)
              .toManaged_
              .fork
        _ <- periodic.doReport
              .repeat(Schedule.spaced(2.seconds))
              .toManaged_
              .fork
        _ <- ZIO.foreach_(seedNodes)(sendInitial0(_, InitialProtocol.Join(localAddr))).toManaged_
      } yield new PeerService.Service {
        override val identity: UIO[NodeAddress] =
          ZIO.succeed(localAddr)

        override val getPeers: UIO[Set[NodeAddress]] =
          Views.activeView.commit.provide(env)

        override def send(to: NodeAddress, message: PlumTreeProtocol): IO[SendError, Unit] =
          Views.send(to, message).provide(env)

        override val receive: ZStream[Any, Error, (NodeAddress, PlumTreeProtocol)] =
          ZStream.fromQueue(plumTreeMessages).unTake

        override val events: ZStream[Any, Nothing, PeerEvent] =
          ZStream.fromQueue(peerEvents)
      }
    }
  }
} 
Example 16
Source File: ExampleApp.scala    From caliban   with Apache License 2.0 5 votes vote down vote up
package caliban.play

import caliban.{ ExampleApi, ExampleService, PlayRouter }
import caliban.ExampleData.sampleCharacters
import caliban.ExampleService.ExampleService
import play.api.Mode
import play.api.mvc.DefaultControllerComponents
import play.core.server.{ AkkaHttpServer, ServerConfig }
import zio.clock.Clock
import zio.console.Console
import zio.internal.Platform
import zio.Runtime
import scala.io.StdIn.readLine

object ExampleApp extends App {

  implicit val runtime: Runtime[ExampleService with Console with Clock] =
    Runtime.unsafeFromLayer(ExampleService.make(sampleCharacters) ++ Console.live ++ Clock.live, Platform.default)

  val interpreter = runtime.unsafeRun(ExampleApi.api.interpreter)

  val server = AkkaHttpServer.fromRouterWithComponents(
    ServerConfig(
      mode = Mode.Dev,
      port = Some(8088),
      address = "127.0.0.1"
    )
  ) { components =>
    PlayRouter(
      interpreter,
      DefaultControllerComponents(
        components.defaultActionBuilder,
        components.playBodyParsers,
        components.messagesApi,
        components.langs,
        components.fileMimeTypes,
        components.executionContext
      )
    )(runtime, components.materializer).routes
  }

  println("Server online at http://localhost:8088/\nPress RETURN to stop...")
  readLine()
  server.stop()

} 
Example 17
Source File: ExampleApi.scala    From caliban   with Apache License 2.0 5 votes vote down vote up
package caliban

import scala.language.postfixOps
import caliban.ExampleData._
import caliban.ExampleService.ExampleService
import caliban.GraphQL.graphQL
import caliban.schema.Annotations.{ GQLDeprecated, GQLDescription }
import caliban.schema.GenericSchema
import caliban.wrappers.ApolloTracing.apolloTracing
import caliban.wrappers.Wrappers.{ maxDepth, maxFields, printSlowQueries, timeout }
import zio.URIO
import zio.clock.Clock
import zio.console.Console
import zio.duration._
import zio.stream.ZStream

object ExampleApi extends GenericSchema[ExampleService] {

  case class Queries(
    @GQLDescription("Return all characters from a given origin")
    characters: CharactersArgs => URIO[ExampleService, List[Character]],
    @GQLDeprecated("Use `characters`")
    character: CharacterArgs => URIO[ExampleService, Option[Character]]
  )
  case class Mutations(deleteCharacter: CharacterArgs => URIO[ExampleService, Boolean])
  case class Subscriptions(characterDeleted: ZStream[ExampleService, Nothing, String])

  implicit val roleSchema           = gen[Role]
  implicit val characterSchema      = gen[Character]
  implicit val characterArgsSchema  = gen[CharacterArgs]
  implicit val charactersArgsSchema = gen[CharactersArgs]

  val api: GraphQL[Console with Clock with ExampleService] =
    graphQL(
      RootResolver(
        Queries(
          args => ExampleService.getCharacters(args.origin),
          args => ExampleService.findCharacter(args.name)
        ),
        Mutations(args => ExampleService.deleteCharacter(args.name)),
        Subscriptions(ExampleService.deletedEvents)
      )
    ) @@
      maxFields(200) @@               // query analyzer that limit query fields
      maxDepth(30) @@                 // query analyzer that limit query depth
      timeout(3 seconds) @@           // wrapper that fails slow queries
      printSlowQueries(500 millis) @@ // wrapper that logs slow queries
      apolloTracing // wrapper for https://github.com/apollographql/apollo-tracing

} 
Example 18
Source File: Wrappers.scala    From caliban   with Apache License 2.0 5 votes vote down vote up
package caliban.wrappers

import caliban.CalibanError.{ ExecutionError, ValidationError }
import caliban.{ GraphQLRequest, GraphQLResponse }
import caliban.Value.NullValue
import caliban.execution.Field
import caliban.parsing.adt.Document
import caliban.wrappers.Wrapper.{ OverallWrapper, ValidationWrapper }
import zio.clock.Clock
import zio.console.{ putStrLn, Console }
import zio.duration.Duration
import zio.{ IO, UIO, URIO, ZIO }

object Wrappers {

  
  def maxFields(maxFields: Int): ValidationWrapper[Any] =
    ValidationWrapper { process => (doc: Document) =>
      for {
        req    <- process(doc)
        fields <- countFields(req.field)
        _ <- IO.when(fields > maxFields)(
              IO.fail(ValidationError(s"Query has too many fields: $fields. Max fields: $maxFields.", ""))
            )
      } yield req
    }

  private def countFields(field: Field): UIO[Int] =
    for {
      inner <- innerFields(field.fields)
      conditional <- IO.foreach(field.conditionalFields.values)(innerFields).map {
                      case Nil  => 0
                      case list => list.max
                    }
    } yield inner + conditional

  private def innerFields(fields: List[Field]): UIO[Int] =
    IO.foreach(fields)(countFields).map(_.sum + fields.length)

} 
Example 19
Source File: ImplicitSuite.scala    From typed-schema   with Apache License 2.0 5 votes vote down vote up
package ru.tinkoff.tschema.finagle.zioRouting

import ru.tinkoff.tschema.finagle.{LiftHttp, Routed, RoutedPlus}
import zio.{Has, UIO, URIO, ZIO}
import zio.blocking.Blocking
import zio.clock.Clock

case class TestEnv()

class ImplicitSuite {
  type FullEnv       = Has[TestEnv] with Blocking with Clock
  type Test[+A]      = URIO[FullEnv, A]
  type Test2[+A]     = ZIO[FullEnv, String, A]
  type TestH[+A]     = URIOH[FullEnv, A]
  type Test2H[+A]    = ZIOH[FullEnv, String, A]
  type TestHttp[+A]  = URIOHttp[FullEnv, A]
  type Test2Http[+A] = ZIOHttp[FullEnv, String, A]

  implicitly[LiftHttp[TestHttp, Test]]
  implicitly[LiftHttp[TestH, Test]]
  implicitly[LiftHttp[UIOHttp, UIO]]

  implicitly[RoutedPlus[TestHttp]]
  implicitly[RoutedPlus[Test2Http]]
  implicitly[RoutedPlus[TestH]]
  implicitly[RoutedPlus[Test2H]]
  implicitly[RoutedPlus[UIOHttp]]
  implicitly[RoutedPlus[UIOH]]
} 
Example 20
Source File: JaegerTracer.scala    From zio-telemetry   with Apache License 2.0 5 votes vote down vote up
package zio.telemetry.opentracing.example

import io.jaegertracing.Configuration
import io.jaegertracing.internal.samplers.ConstSampler
import io.jaegertracing.zipkin.ZipkinV2Reporter
import org.apache.http.client.utils.URIBuilder
import zio.ZLayer
import zio.clock.Clock
import zio.telemetry.opentracing.OpenTracing
import zipkin2.reporter.AsyncReporter
import zipkin2.reporter.okhttp3.OkHttpSender

object JaegerTracer {

  def makeService(host: String, serviceName: String): ZLayer[Clock, Throwable, Clock with OpenTracing] = {
    val url           = new URIBuilder().setScheme("http").setHost(host).setPath("/api/v2/spans").build.toString
    val senderBuilder = OkHttpSender.newBuilder.compressionEnabled(true).endpoint(url)

    val tracer = new Configuration(serviceName).getTracerBuilder
      .withSampler(new ConstSampler(true))
      .withReporter(new ZipkinV2Reporter(AsyncReporter.create(senderBuilder.build)))
      .build

    OpenTracing.live(tracer) ++ Clock.live
  }
} 
Example 21
Source File: StatusService.scala    From zio-telemetry   with Apache License 2.0 5 votes vote down vote up
package zio.telemetry.opentracing.example.http

import io.circe.Encoder
import io.circe.syntax._
import io.opentracing.propagation.Format.Builtin.{ HTTP_HEADERS => HttpHeadersFormat }
import io.opentracing.propagation.TextMapAdapter
import org.http4s._
import org.http4s.circe.jsonEncoderOf
import org.http4s.dsl.Http4sDsl
import zio.clock.Clock
import zio.interop.catz._
import zio.telemetry.opentracing.example.http.{ Status => ServiceStatus }
import zio.telemetry.opentracing._
import zio.ZIO
import zio.ZLayer

import scala.jdk.CollectionConverters._

object StatusService {

  val dsl: Http4sDsl[AppTask] = Http4sDsl[AppTask]
  import dsl._

  implicit def encoder[A: Encoder]: EntityEncoder[AppTask, A] = jsonEncoderOf[AppTask, A]

  def status(service: ZLayer[Clock, Throwable, Clock with OpenTracing]): HttpRoutes[AppTask] =
    HttpRoutes.of[AppTask] {
      case request @ GET -> Root / "status" =>
        val headers = request.headers.toList.map(h => h.name.value -> h.value).toMap
        ZIO.unit
          .spanFrom(HttpHeadersFormat, new TextMapAdapter(headers.asJava), "/status")
          .provideLayer(service) *> Ok(ServiceStatus.up("backend").asJson)
    }

} 
Example 22
Source File: StatusesService.scala    From zio-telemetry   with Apache License 2.0 5 votes vote down vote up
package zio.telemetry.opentracing.example.http

import io.circe.Encoder
import io.opentracing.propagation.Format.Builtin.{ HTTP_HEADERS => HttpHeadersFormat }
import io.opentracing.propagation.TextMapAdapter
import io.opentracing.tag.Tags
import org.http4s.circe.jsonEncoderOf
import org.http4s.dsl.Http4sDsl
import org.http4s.{ EntityEncoder, HttpRoutes }
import sttp.model.Uri
import zio.clock.Clock
import zio.interop.catz._
import zio.telemetry.opentracing.OpenTracing
import zio.UIO
import zio.ZIO
import zio.ZLayer

import scala.collection.mutable
import scala.jdk.CollectionConverters._

object StatusesService {

  def statuses(backendUri: Uri, service: ZLayer[Clock, Throwable, Clock with OpenTracing]): HttpRoutes[AppTask] = {
    val dsl: Http4sDsl[AppTask] = Http4sDsl[AppTask]
    import dsl._

    implicit def encoder[A: Encoder]: EntityEncoder[AppTask, A] = jsonEncoderOf[AppTask, A]

    HttpRoutes.of[AppTask] {
      case GET -> Root / "statuses" =>
        val zio =
          for {
            env     <- ZIO.environment[OpenTracing]
            _       <- env.get.root("/statuses")
            _       <- OpenTracing.tag(Tags.SPAN_KIND.getKey, Tags.SPAN_KIND_CLIENT)
            _       <- OpenTracing.tag(Tags.HTTP_METHOD.getKey, GET.name)
            _       <- OpenTracing.setBaggageItem("proxy-baggage-item-key", "proxy-baggage-item-value")
            buffer  <- UIO.succeed(new TextMapAdapter(mutable.Map.empty[String, String].asJava))
            _       <- OpenTracing.inject(HttpHeadersFormat, buffer)
            headers <- extractHeaders(buffer)
            up      = Status.up("proxy")
            res <- Client
                    .status(backendUri.path("status"), headers)
                    .map(_.body)
                    .flatMap {
                      case Right(s) => Ok(Statuses(List(s, up)))
                      case _        => Ok(Statuses(List(Status.down("backend"), up)))
                    }
          } yield res

        zio.provideLayer(service)
    }
  }

  private def extractHeaders(adapter: TextMapAdapter): UIO[Map[String, String]] = {
    val m = mutable.Map.empty[String, String]
    UIO(adapter.forEach { entry =>
      m.put(entry.getKey, entry.getValue)
      ()
    }).as(m.toMap)
  }

} 
Example 23
Source File: BackendServer.scala    From zio-telemetry   with Apache License 2.0 5 votes vote down vote up
package zio.telemetry.opentelemetry.example

import org.http4s.server.{ Router, _ }
import org.http4s.server.blaze.BlazeServerBuilder
import zio.clock.Clock
import zio.interop.catz._
import zio.telemetry.opentelemetry.Tracing
import zio.telemetry.opentelemetry.example.config.{ Config, Configuration }
import zio.telemetry.opentelemetry.example.http.{ AppEnv, AppTask, Client, StatusService }
import zio.{ ExitCode, Managed, ZIO, ZLayer }
import org.http4s.syntax.kleisli._
import sttp.client.asynchttpclient.zio.AsyncHttpClientZioBackend

object BackendServer extends zio.App {
  val router = Router[AppTask]("/" -> StatusService.routes).orNotFound

  val server =
    ZIO
      .runtime[AppEnv]
      .flatMap(implicit runtime =>
        BlazeServerBuilder[AppTask]
          .bindHttp(
            runtime.environment.get[Config].backend.host.port.getOrElse(defaults.HttpPort),
            runtime.environment.get[Config].backend.host.host
          )
          .withHttpApp(router)
          .serve
          .compile
          .drain
      )

  val httpBackend = ZLayer.fromManaged(Managed.make(AsyncHttpClientZioBackend())(_.close.ignore))
  val client      = Configuration.live ++ httpBackend >>> Client.live
  val tracer      = Configuration.live >>> JaegerTracer.live("zio-backend")
  val envLayer    = tracer ++ Clock.live >>> Tracing.live ++ Configuration.live ++ client

  override def run(args: List[String]) =
    server.provideCustomLayer(envLayer).fold(_ => ExitCode.failure, _ => ExitCode.success)
} 
Example 24
Source File: ProxyServer.scala    From zio-telemetry   with Apache License 2.0 5 votes vote down vote up
package zio.telemetry.opentelemetry.example

import org.http4s.server.blaze.BlazeServerBuilder
import org.http4s.server.{ defaults, Router }
import zio.clock.Clock
import zio.interop.catz._
import zio.telemetry.opentelemetry.Tracing
import zio.telemetry.opentelemetry.example.config.{ Config, Configuration }
import zio.telemetry.opentelemetry.example.http.{ AppEnv, AppTask, Client, StatusesService }
import zio.{ ExitCode, Managed, ZIO, ZLayer }
import org.http4s.syntax.kleisli._
import sttp.client.asynchttpclient.zio.AsyncHttpClientZioBackend

object ProxyServer extends zio.App {

  val router = Router[AppTask]("/" -> StatusesService.routes).orNotFound

  val server =
    ZIO
      .runtime[AppEnv]
      .flatMap(implicit runtime =>
        BlazeServerBuilder[AppTask]
          .bindHttp(
            runtime.environment.get[Config].proxy.host.port.getOrElse(defaults.HttpPort),
            runtime.environment.get[Config].proxy.host.host
          )
          .withHttpApp(router)
          .serve
          .compile
          .drain
      )

  val httpBackend = ZLayer.fromManaged(Managed.make(AsyncHttpClientZioBackend())(_.close.ignore))
  val client      = Configuration.live ++ httpBackend >>> Client.live
  val tracer      = Configuration.live >>> JaegerTracer.live("zio-proxy")
  val envLayer    = tracer ++ Clock.live >>> Tracing.live ++ Configuration.live ++ client

  override def run(args: List[String]) =
    server.provideCustomLayer(envLayer).fold(_ => ExitCode.failure, _ => ExitCode.success)
} 
Example 25
Source File: TestTeamThreeCacheApi.scala    From http4s-poc-api   with MIT License 5 votes vote down vote up
package interpreters

import cats.effect.{ConcurrentEffect, IO, Timer}
import cats.syntax.flatMap._
import external.TeamThreeCacheApi
import log.effect.LogWriter
import model.DomainModel.{Product, ProductId}
import zio.clock.Clock
import zio.interop.catz._
import zio.{Runtime, Task}

import scala.concurrent.duration._

object TestTeamThreeCacheApi {
  @inline def make(productsInCache: Map[ProductId, Product])(testLogger: LogWriter[Task])(
    implicit t: Timer[IO],
    rt: Runtime[Clock]
  ): TeamThreeCacheApi[ProductId, Product] =
    new TeamThreeCacheApi[ProductId, Product] {
      def get: ProductId => IO[Option[Product]] = { id =>
        ConcurrentEffect[Task].toIO(
          testLogger.debug(s"DEP cachedProduct -> Getting the product $id from the cache in test")
        ) >> t.sleep(200.milliseconds) >> IO(productsInCache.get(id))
      }

      def put: ProductId => Product => IO[Unit] = { id => _ =>
        ConcurrentEffect[Task].toIO(
          testLogger.debug(s"DEP storeProductToCache -> Storing the product $id to the repo in test")
        ) >> t.sleep(200.milliseconds) >> IO.unit
      }
    }

  @inline def makeFail(implicit t: Timer[IO]): TeamThreeCacheApi[ProductId, Product] =
    new TeamThreeCacheApi[ProductId, Product] {
      def get: ProductId => IO[Option[Product]] = { _ =>
        t.sleep(300.milliseconds) >> IO.delay(
          throw new Throwable(
            "DependencyFailure. The dependency def cachedProduct: ProductId => Task[Option[Product]] failed with message not responding"
          )
        )
      }

      def put: ProductId => Product => IO[Unit] = { _ => _ =>
        t.sleep(150.milliseconds) >> IO.delay(
          throw new Throwable(
            "DependencyFailure. The dependency def storeProductToCache: ProductId => Product => Task[Unit] failed with message not responding"
          )
        )
      }
    }
} 
Example 26
Source File: TestTeamTwoHttpApi.scala    From http4s-poc-api   with MIT License 5 votes vote down vote up
package interpreters

import cats.effect.{ConcurrentEffect, IO, Timer}
import cats.syntax.flatMap._
import external.TeamTwoHttpApi
import log.effect.LogWriter
import model.DomainModel.{Product, ProductId, User, UserId}
import zio.clock.Clock
import zio.interop.catz._
import zio.{Runtime, Task}

import scala.concurrent.duration._

object TestTeamTwoHttpApi {
  @inline def make(aUser: User, productsInStore: Map[ProductId, Product])(testLogger: LogWriter[Task])(
    implicit t: Timer[IO],
    rt: Runtime[Clock]
  ): TeamTwoHttpApi =
    new TeamTwoHttpApi {
      def user: UserId => IO[User] = { id =>
        ConcurrentEffect[Task].toIO(
          testLogger.debug(s"DEP user -> Getting the user $id in test")
        ) >> t.sleep(1.second) >> IO.delay(aUser)
      }

      def product: ProductId => IO[Option[Product]] = { id =>
        ConcurrentEffect[Task].toIO(
          testLogger.debug(s"DEP product -> Getting the product $id from the store in test")
        ) >> t.sleep(1.second) >> IO(productsInStore.get(id))
      }
    }

  @inline def makeFail(implicit t: Timer[IO]): TeamTwoHttpApi =
    new TeamTwoHttpApi {
      def user: UserId => IO[User] = { _ =>
        t.sleep(200.milliseconds) >> IO.delay(
          throw new Throwable(
            "DependencyFailure. The dependency `UserId => IO[User]` failed with message network failure"
          )
        )
      }

      def product: ProductId => IO[Option[Product]] = { _ =>
        t.sleep(400.milliseconds) >> IO.delay(
          throw new Throwable(
            "DependencyFailure. The dependency `ProductId => IO[Option[Product]]` failed with message network failure"
          )
        )
      }
    }
} 
Example 27
Source File: catzSpecBase.scala    From interop-cats   with Apache License 2.0 5 votes vote down vote up
package zio.interop

import cats.Eq
import cats.effect.laws.util.{ TestContext, TestInstances }
import cats.implicits._
import org.scalacheck.Arbitrary
import org.scalatest.funsuite.AnyFunSuite
import org.scalatest.prop.Configuration
import org.typelevel.discipline.Laws
import org.typelevel.discipline.scalatest.FunSuiteDiscipline
import zio.clock.Clock
import zio.console.Console
import zio.internal.{ Executor, Platform, Tracing }
import zio.interop.catz.taskEffectInstance
import zio.random.Random
import zio.system.System
import zio.{ =!=, Cause, IO, Runtime, Task, UIO, ZIO, ZManaged }

private[zio] trait catzSpecBase
    extends AnyFunSuite
    with FunSuiteDiscipline
    with Configuration
    with TestInstances
    with catzSpecBaseLowPriority {

  type Env = Clock with Console with System with Random

  implicit def rts(implicit tc: TestContext): Runtime[Unit] = Runtime(
    (),
    Platform
      .fromExecutor(Executor.fromExecutionContext(Int.MaxValue)(tc))
      .withTracing(Tracing.disabled)
      .withReportFailure(_ => ())
  )

  implicit val zioEqCauseNothing: Eq[Cause[Nothing]] = Eq.fromUniversalEquals

  implicit def zioEqIO[E: Eq, A: Eq](implicit rts: Runtime[Any], tc: TestContext): Eq[IO[E, A]] =
    Eq.by(_.either)

  implicit def zioEqTask[A: Eq](implicit rts: Runtime[Any], tc: TestContext): Eq[Task[A]] =
    Eq.by(_.either)

  implicit def zioEqUIO[A: Eq](implicit rts: Runtime[Any], tc: TestContext): Eq[UIO[A]] =
    Eq.by(uio => taskEffectInstance.toIO(uio.sandbox.either))

  implicit def zioEqZManaged[E: Eq, A: Eq](implicit rts: Runtime[Any], tc: TestContext): Eq[ZManaged[Any, E, A]] =
    Eq.by(
      zm => ZManaged.ReleaseMap.make.flatMap(releaseMap => zm.zio.provideSome[Any]((_, releaseMap)).map(_._2).either)
    )

  def checkAllAsync(name: String, f: TestContext => Laws#RuleSet): Unit =
    checkAll(name, f(TestContext()))

}

private[interop] sealed trait catzSpecBaseLowPriority { this: catzSpecBase =>

  implicit def zioEq[R: Arbitrary, E: Eq, A: Eq](implicit rts: Runtime[Any], tc: TestContext): Eq[ZIO[R, E, A]] = {
    def run(r: R, zio: ZIO[R, E, A]) = taskEffectInstance.toIO(zio.provide(r).either)
    Eq.instance((io1, io2) => Arbitrary.arbitrary[R].sample.fold(false)(r => catsSyntaxEq(run(r, io1)) eqv run(r, io2)))
  }

  // 'R =!= Any' evidence fixes the 'diverging implicit expansion for type Arbitrary' error reproducible on scala 2.12 and 2.11.
  implicit def zmanagedEq[R: * =!= Any: Arbitrary, E: Eq, A: Eq](
    implicit rts: Runtime[Any],
    tc: TestContext
  ): Eq[ZManaged[R, E, A]] = {
    def run(r: R, zm: ZManaged[R, E, A]) =
      taskEffectInstance.toIO(
        ZManaged.ReleaseMap.make.flatMap(releaseMap => zm.zio.provide((r, releaseMap)).map(_._2).either)
      )
    Eq.instance((io1, io2) => Arbitrary.arbitrary[R].sample.fold(false)(r => catsSyntaxEq(run(r, io1)) eqv run(r, io2)))
  }

} 
Example 28
Source File: RunnableSpec.scala    From zio   with Apache License 2.0 5 votes vote down vote up
package zio.test

import zio.clock.Clock
import zio.test.Spec.TestCase
import zio.{ Has, UIO, URIO }


  final def main(args: Array[String]): Unit = {
    val testArgs     = TestArgs.parse(args)
    val filteredSpec = FilteredSpec(spec, testArgs)
    val runtime      = runner.runtime
    if (TestPlatform.isJVM) {
      val exitCode = runtime.unsafeRun(run(filteredSpec).provideLayer(runner.bootstrap))
      doExit(exitCode)
    } else if (TestPlatform.isJS) {
      runtime.unsafeRunAsync[Nothing, Int](run(filteredSpec).provideLayer(runner.bootstrap)) { exit =>
        val exitCode = exit.getOrElse(_ => 1)
        doExit(exitCode)
      }
    }
  }

  private def doExit(exitCode: Int): Unit =
    try if (!isAmmonite) sys.exit(exitCode)
    catch { case _: SecurityException => }

  private def isAmmonite: Boolean =
    sys.env.exists {
      case (k, v) => k.contains("JAVA_MAIN_CLASS") && v == "ammonite.Main"
    }
} 
Example 29
Source File: Server.scala    From zio-metrics   with Apache License 2.0 5 votes vote down vote up
package zio.metrics.dropwizard

import scala.util.Properties.envOrNone

import cats.data.Kleisli
import org.http4s.server.blaze._
import org.http4s.{ Request, Response }

import zio.{ RIO, ZIO }
import zio.system.System
import zio.clock.Clock
import zio.console.Console
import zio.random.Random
import zio.blocking.Blocking
import zio.interop.catz._
import io.circe.Json
import org.http4s.circe._
import org.http4s.dsl.impl.Root
import org.http4s.dsl.io._
import org.http4s.{ HttpRoutes, Response }
import zio.RIO
import zio.interop.catz._
import zio.metrics.dropwizard.typeclasses._
import zio.metrics.dropwizard.DropwizardExtractor._
import cats.instances.list._
import com.codahale.metrics.MetricRegistry

object Server {
  val port: Int = envOrNone("HTTP_PORT").fold(9090)(_.toInt)

  type HttpEnvironment = Clock with Console with System with Random with Blocking
  type HttpTask[A]     = RIO[HttpEnvironment, A]

  type KleisliApp = Kleisli[HttpTask, Request[HttpTask], Response[HttpTask]]

  //type HttpApp[R <: Registry] = R => KleisliApp

  def builder[Ctx]: KleisliApp => HttpTask[Unit] =
    (app: KleisliApp) =>
      ZIO
        .runtime[HttpEnvironment]
        .flatMap { implicit rts =>
          BlazeServerBuilder[HttpTask]
            .bindHttp(port)
            .withHttpApp(app)
            .serve
            .compile
            .drain
        }

  def serveMetrics: MetricRegistry => HttpRoutes[Server.HttpTask] =
    registry =>
      HttpRoutes.of[Server.HttpTask] {
        case GET -> Root / filter => {
          println(s"filter: $filter")
          val optFilter = if (filter == "ALL") None else Some(filter)
          RegistryPrinter
            .report[List, Json](registry, optFilter)(
              (k: String, v: Json) => Json.obj((k, v))
            )
            .map(m => Response[Server.HttpTask](Ok).withEntity(m))
        }
      }
} 
Example 30
Source File: Client.scala    From zio-metrics   with Apache License 2.0 5 votes vote down vote up
package zio.metrics

import zio.{ Fiber, Queue, RIO, Task, UIO, URIO, ZManaged, ZQueue }
import zio.clock.Clock
import zio.stream.ZStream
import zio.duration.Duration.Finite
import zio.metrics.encoders._
import java.util.concurrent.ThreadLocalRandom

class Client(val bufferSize: Long, val timeout: Long, val queueCapacity: Int, host: Option[String], port: Option[Int]) {

  type UDPQueue = ZQueue[Nothing, Any, Encoder, Throwable, Nothing, Metric]

  val queue: UIO[Queue[Metric]] = ZQueue.bounded[Metric](queueCapacity)
  private val duration: Finite  = Finite(timeout)

  val udpClient: ZManaged[Any, Throwable, UDPClient] = (host, port) match {
    case (None, None)       => UDPClient()
    case (Some(h), Some(p)) => UDPClient(h, p)
    case (Some(h), None)    => UDPClient(h, 8125)
    case (None, Some(p))    => UDPClient("localhost", p)
  }

  val sample: List[Metric] => Task[List[Metric]] = metrics =>
    Task(
      metrics.filter(
        m =>
          m match {
            case sm: SampledMetric =>
              if (sm.sampleRate >= 1.0 || ThreadLocalRandom.current.nextDouble <= sm.sampleRate) true else false
            case _ => true
          }
      )
    )

  val udp: List[Metric] => RIO[Encoder, List[Int]] = metrics =>
    for {
      sde  <- RIO.environment[Encoder]
      flt  <- sample(metrics)
      msgs <- RIO.foreach(flt)(sde.get.encode(_))
      ints <- RIO.foreach(msgs.flatten)(s => udpClient.use(_.send(s)))
    } yield ints

  def listen(implicit queue: UDPQueue): URIO[Client.ClientEnv, Fiber[Throwable, Unit]] =
    listen[List, Int](udp)

  def listen[F[_], A](
    f: List[Metric] => RIO[Encoder, F[A]]
  )(implicit queue: UDPQueue): URIO[Client.ClientEnv, Fiber[Throwable, Unit]] =
    ZStream
      .fromQueue[Encoder, Throwable, Metric](queue)
      .groupedWithin(bufferSize, duration)
      .mapM(l => f(l))
      .runDrain
      .fork

  val send: Queue[Metric] => Metric => Task[Unit] = q =>
    metric =>
      for {
        _ <- q.offer(metric)
      } yield ()

  val sendAsync: Queue[Metric] => Metric => Task[Unit] = q =>
    metric =>
      for {
        _ <- q.offer(metric).fork
      } yield ()
}

object Client {

  type ClientEnv = Encoder with Clock //with Console

  def apply(): Client = apply(5, 5000, 100, None, None)

  def apply(bufferSize: Long, timeout: Long): Client =
    apply(bufferSize, timeout, 100, None, None)

  def apply(bufferSize: Long, timeout: Long, queueCapacity: Int): Client =
    apply(bufferSize, timeout, queueCapacity, None, None)

  def apply(bufferSize: Long, timeout: Long, queueCapacity: Int, host: Option[String], port: Option[Int]): Client =
    new Client(bufferSize, timeout, queueCapacity, host, port)

} 
Example 31
Source File: StatsDClientTest.scala    From zio-metrics   with Apache License 2.0 5 votes vote down vote up
package zio.metrics

import zio.{ Queue, RIO, Runtime, Schedule }
import zio.clock.Clock
import zio.console._
import zio.metrics.encoders._
import zio.metrics.statsd._
import zio.duration.Duration
import java.util.concurrent.TimeUnit

object StatsDClientTest {

  val rt = Runtime.unsafeFromLayer(Encoder.statsd ++ Console.live ++ Clock.live)

  val schd = Schedule.recurs(10)

  val client = StatsDClient()

  def program(r: Long)(implicit queue: Queue[Metric]) =
    for {
      clock <- RIO.environment[Clock]
      _     <- client.listen
      t1    <- clock.get.currentTime(TimeUnit.MILLISECONDS)
      _     <- client.increment("zmetrics.counter", 0.9)
      _     <- putStrLn(s"waiting for $r s") *> clock.get.sleep(Duration(r, TimeUnit.SECONDS))
      t2    <- clock.get.currentTime(TimeUnit.MILLISECONDS)
      _     <- client.timer("zmetrics.timer", (t2 - t1).toDouble, 0.9)
    } yield ()

  def main(args: Array[String]): Unit = {
    val timeouts = Seq(4L, 6L, 2L)
    rt.unsafeRun(
      client.queue >>= (
        q =>
          RIO
            .foreach(timeouts)(l => program(l)(q))
            .repeat(schd)
        )
    )
    Thread.sleep(10000)
  }

} 
Example 32
Source File: ClientTest.scala    From zio-metrics   with Apache License 2.0 5 votes vote down vote up
package zio.metrics

import zio.{ RIO, Runtime, Task }
import zio.clock.Clock
import zio.console._
import zio.metrics.encoders._

object ClientTest {

  val rt = Runtime.unsafeFromLayer(Encoder.statsd ++ Console.live ++ Clock.live)

  val myudp: List[Metric] => RIO[Encoder with Console, List[Int]] = msgs =>
    for {
      sde <- RIO.environment[Encoder]
      opt <- RIO.foreach(msgs)(sde.get.encode(_))
      _   <- putStrLn(s"udp: $opt")
      l   <- RIO.foreach(opt.flatten)(s => UDPClient().use(_.send(s)))
    } yield l

  val program = {
    val messages = List(1.0, 2.2, 3.4, 4.6, 5.1, 6.0, 7.9)
    val client   = Client()
    client.queue >>= (queue => {
      implicit val q = queue
      for {
        f <- client.listen[List, Int] { l =>
              myudp(l).provideSomeLayer[Encoder](Console.live)
            }
        _   <- putStrLn(s"implicit queue: $q")
        opt <- RIO.foreach(messages)(d => Task(Counter("clientbar", d, 1.0, Seq.empty[Tag])))
        _   <- RIO.foreach(opt)(m => client.sendAsync(q)(m))
        _   <- f.join
      } yield queue
    })
  }

  def main(args: Array[String]): Unit =
    rt.unsafeRun(program >>= (q => q.shutdown *> putStrLn("Bye bye").provideSomeLayer(Console.live)))

} 
Example 33
Source File: DogStatsDClientTest.scala    From zio-metrics   with Apache License 2.0 5 votes vote down vote up
package zio.metrics

import zio.{ Queue, RIO, Runtime, Schedule }
import zio.clock.Clock
import zio.console._
import java.util.concurrent.TimeUnit
import zio.duration.Duration
import zio.metrics.dogstatsd._
import zio.metrics.encoders._

object DogStatsDClientTest {

  val rt = Runtime.unsafeFromLayer(Encoder.dogstatsd ++ Console.live ++ Clock.live)

  val schd = Schedule.recurs(10)

  val client = DogStatsDClient()

  def program(r: Long)(implicit queue: Queue[Metric]) =
    for {
      clock <- RIO.environment[Clock]
      _     <- client.listen
      t1    <- clock.get.currentTime(TimeUnit.MILLISECONDS)
      _     <- client.increment("zmetrics.dog.counter", 0.9)
      _     <- putStrLn(s"waiting for $r ms") *> clock.get.sleep(Duration(r, TimeUnit.MILLISECONDS))
      t2    <- clock.get.currentTime(TimeUnit.MILLISECONDS)
      d     = (t2 - t1).toDouble
      _     <- client.timer("zmetrics.dog.timer", d, 0.9)
      _     <- client.histogram("zmetrics.dog.hist", d)
      _     <- client.serviceCheck("zmetrics.dog.check", ServiceCheckOk)
      _     <- client.event("zmetrics.dog.event", "something amazing happened")
    } yield ()

  def main(args: Array[String]): Unit = {
    val timeouts = Seq(34L, 76L, 52L)
    rt.unsafeRun(
      client.queue >>= (
        q =>
          RIO
            .foreach(timeouts)(l => program(l)(q))
            .repeat(schd)
        )
    )
    Thread.sleep(10000)
  }

} 
Example 34
Source File: Render.scala    From didactic-computing-machine   with GNU Affero General Public License v3.0 5 votes vote down vote up
package me.benetis.visual

import me.benetis.initial_states.RandomState
import me.benetis.{GameOfLifeRules, LifeState, Point}
import org.scalajs.dom
import org.scalajs.dom.{CanvasRenderingContext2D, document, html}
import scala.util.Random
import zio.clock.Clock
import zio.{IO, Ref, Schedule, Task, UIO, ZIO}
import zio.duration._

object Render {
  case class RenderConfig(scale: Int,
                          blobSize: Int,
                          browserWidth: Double,
                          browserHeight: Double)

  val config =
    RenderConfig(15, 10, dom.window.innerWidth, dom.window.innerHeight)

  val rng = new Random()

  val program: ZIO[Clock, Throwable, Unit] = {
    for {
      renderer <- prepareScreen(config)
      refState <- Ref.make(RandomState.randomCenter(config))
      _ <- (render(renderer, config, refState) *> updateState(refState) *> UIO(
        dom.console.log("tick")
      )).repeat(Schedule.fixed(1.second)).forever
    } yield ()
  }

  private def render(renderer: CanvasRenderingContext2D,
                     config: RenderConfig,
                     ref: Ref[LifeState]): ZIO[Any, Nothing, Unit] = {
    for {
      _ <- clearCanvas(renderer, config)
      state <- ref.get
      _ = state.foreach(p => renderPoint(renderer, config, p))
    } yield ()
  }

  private def updateState(ref: Ref[LifeState]): ZIO[Any, Nothing, Unit] =
    for {
      _ <- ref.update(state => GameOfLifeRules.nextState(state))
    } yield ()

  private def clearCanvas(renderer: CanvasRenderingContext2D,
                          config: RenderConfig): UIO[Unit] = UIO {
    renderer.fillStyle = "black"
    renderer.fillRect(0, 0, config.browserWidth, config.browserHeight)
  }

  private def renderPoint(renderer: CanvasRenderingContext2D,
                          config: RenderConfig,
                          point: Point): Unit = {

    val colors = Vector("red", "yellow")

    val randomColor: String = colors(rng.nextInt(colors.length))
    renderer.fillStyle = randomColor
    renderer.fillRect(
      point.x * config.scale,
      point.y * config.scale,
      config.scale,
      config.scale
    )
  }

  private def prepareScreen(
    config: RenderConfig
  ): UIO[CanvasRenderingContext2D] = UIO {
    val canvas: html.Canvas =
      document.querySelector("#canvas").asInstanceOf[html.Canvas]

    canvas.width = config.browserWidth.toInt
    canvas.height = config.browserHeight.toInt

    val renderer = canvas
      .getContext("2d")
      .asInstanceOf[dom.CanvasRenderingContext2D]

    renderer
  }

} 
Example 35
Source File: Slf4jMdc.scala    From zio-logging   with Apache License 2.0 5 votes vote down vote up
package zio.logging

import java.util.UUID

import zio.logging.slf4j.Slf4jLogger
import zio.clock.Clock
import zio._
import zio.duration._

object Slf4jMdc extends App {

  val userId = LogAnnotation[UUID](
    name = "user-id",
    initialValue = UUID.fromString("0-0-0-0-0"),
    combine = (_, newValue) => newValue,
    render = _.toString
  )

  val logLayer = Slf4jLogger.makeWithAnnotationsAsMdc(List(userId))
  val users    = List.fill(2)(UUID.randomUUID())

  override def run(args: List[String]): ZIO[zio.ZEnv, Nothing, ExitCode] =
    (for {
      _             <- log.info("Start...")
      correlationId <- UIO(Some(UUID.randomUUID()))
      _             <- ZIO.foreachPar(users) { uId =>
                         log.locally(_.annotate(userId, uId).annotate(LogAnnotation.CorrelationId, correlationId)) {
                           log.info("Starting operation") *>
                             ZIO.sleep(500.millis) *>
                             log.info("Stopping operation")
                         }
                       }
    } yield ExitCode.success).provideSomeLayer[Clock](logLayer)
} 
Example 36
Source File: HTTPLogger.scala    From zio-logging   with Apache License 2.0 5 votes vote down vote up
package zio.logging.js

import java.time.OffsetDateTime
import java.util.UUID

import org.scalajs.dom.ext.Ajax
import zio.{ ZIO, ZLayer }
import zio.clock.{ currentDateTime, Clock }
import zio.logging.Logging
import zio.logging.{ LogAnnotation, LogContext, LogLevel, Logging }

import scala.scalajs.js
import scala.scalajs.js.JSON

object HTTPLogger {

  private def sendMessage(url: String, msg: js.Object) =
    Ajax.post(url, JSON.stringify(msg), headers = Map("Content-Type" -> "application/json"))

  
  type MessageFormatter = (OffsetDateTime, String, LogLevel, String, String, Throwable) => js.Object

  val defaultFormatter: MessageFormatter = (date, clientId, level, name, msg, cause) =>
    js.Dynamic.literal(
      date = date.toString,
      clientId = clientId,
      level = level match {
        case LogLevel.Fatal => "fatal"
        case LogLevel.Error => "error"
        case LogLevel.Warn  => "warn"
        case LogLevel.Info  => "info"
        case LogLevel.Debug => "debug"
        case LogLevel.Trace => "trace"
        case LogLevel.Off   => ""
      },
      name = name,
      msg = msg,
      cause = if (cause == null) "" else cause.toString
    )

  def makeWithName(
    url: String,
    clientId: String = UUID.randomUUID().toString,
    formatter: MessageFormatter = defaultFormatter
  )(name: String)(logFormat: (LogContext, => String) => String): ZLayer[Clock, Nothing, Logging] =
    make(url, clientId, formatter)((context, line) =>
      logFormat(context.annotate(LogAnnotation.Name, name :: Nil), line)
    )

  def make(url: String, clientId: String = UUID.randomUUID().toString, formatter: MessageFormatter = defaultFormatter)(
    logFormat: (LogContext, => String) => String
  ): ZLayer[Clock, Nothing, Logging] =
    Logging.make { (context, line) =>
      for {
        date      <- currentDateTime.orDie
        level      = context.get(LogAnnotation.Level)
        loggerName = LogAnnotation.Name.render(context.get(LogAnnotation.Name))
        msg        = formatter(date, clientId, level, loggerName, logFormat(context, line), null)
        _         <- ZIO.effectTotal(sendMessage(url, msg))
      } yield ()
    }

} 
Example 37
Source File: Logging.scala    From zio-logging   with Apache License 2.0 5 votes vote down vote up
package zio.logging

import zio._
import zio.clock.Clock
import zio.console.Console

object Logging {

  def console(
    format: (LogContext, => String) => String = (_, s) => s,
    rootLoggerName: Option[String] = None
  ): ZLayer[Console with Clock, Nothing, Logging] =
    make(
      LogWriter.ColoredLogWriter(format),
      rootLoggerName
    )

  val context: URIO[Logging, LogContext] =
    ZIO.accessM[Logging](_.get.logContext)

  def debug(line: => String): ZIO[Logging, Nothing, Unit] =
    ZIO.accessM[Logging](_.get.debug(line))

  def error(line: => String): ZIO[Logging, Nothing, Unit] =
    ZIO.accessM[Logging](_.get.error(line))

  def error(line: => String, cause: Cause[Any]): ZIO[Logging, Nothing, Unit] =
    ZIO.accessM[Logging](_.get.error(line, cause))

  val ignore: Layer[Nothing, Logging] =
    make((_, _) => ZIO.unit)

  def info(line: => String): ZIO[Logging, Nothing, Unit] =
    ZIO.accessM[Logging](_.get.info(line))

  def log(level: LogLevel)(line: => String): ZIO[Logging, Nothing, Unit] =
    ZIO.accessM[Logging](_.get.log(level)(line))

  def locally[A, R <: Logging, E, A1](fn: LogContext => LogContext)(zio: ZIO[R, E, A1]): ZIO[Logging with R, E, A1] =
    ZIO.accessM(_.get.locally(fn)(zio))

  def locallyM[A, R <: Logging, E, A1](
    fn: LogContext => URIO[R, LogContext]
  )(zio: ZIO[R, E, A1]): ZIO[Logging with R, E, A1] =
    ZIO.accessM(_.get.locallyM(fn)(zio))

  def make[R](
    logger: LogWriter[R],
    rootLoggerName: Option[String] = None
  ): ZLayer[R, Nothing, Logging] =
    ZLayer.fromEffect(
      ZIO
        .environment[R]
        .flatMap(env =>
          FiberRef
            .make(LogContext.empty)
            .tap(_.getAndUpdateSome {
              case ctx if rootLoggerName.isDefined =>
                ctx.annotate(LogAnnotation.Name, rootLoggerName.toList)
            })
            .map { ref =>
              new Logger[String] {
                def locally[R1, E, A](f: LogContext => LogContext)(zio: ZIO[R1, E, A]): ZIO[R1, E, A] =
                  ref.get.flatMap(context => ref.locally(f(context))(zio))

                def log(line: => String): UIO[Unit] =
                  ref.get.flatMap(context => logger.writeLog(context, line).provide(env))

                def logContext: UIO[LogContext] = ref.get
              }
            }
        )
    )

  def throwable(line: => String, t: Throwable): ZIO[Logging, Nothing, Unit] =
    ZIO.accessM[Logging](_.get.throwable(line, t))

  def trace(line: => String): ZIO[Logging, Nothing, Unit] =
    ZIO.accessM[Logging](_.get.trace(line))

  def warn(line: => String): ZIO[Logging, Nothing, Unit] =
    ZIO.accessM[Logging](_.get.warn(line))
} 
Example 38
Source File: LogWriter.scala    From zio-logging   with Apache License 2.0 5 votes vote down vote up
package zio.logging

import java.time.OffsetDateTime

import zio.{ Cause, URIO }
import zio.clock.{ currentDateTime, Clock }
import zio.console.{ putStrLn, Console }
import zio.logging.LogDatetimeFormatter.humanReadableDateTimeFormatter
import zio.logging.LogLevel._

import scala.io.AnsiColor._

trait LogWriter[R] {
  def writeLog(context: LogContext, line: => String): URIO[R, Unit]
}

object LogWriter {

  private val NL = System.lineSeparator()

  type LineFormatter = (LogContext, => String) => String

  case class SimpleConsoleLogWriter(format: LineFormatter = (_, s) => s) extends LogWriter[Console with Clock] {
    override def writeLog(context: LogContext, line: => String): URIO[Console with Clock, Unit] =
      for {
        date      <- currentDateTime.orDie
        level      = context(LogAnnotation.Level)
        loggerName = context(LogAnnotation.Name)
        maybeError = context
                       .get(LogAnnotation.Throwable)
                       .map(Cause.fail)
                       .orElse(context.get(LogAnnotation.Cause))
                       .map(cause => NL + cause.prettyPrint)
                       .getOrElse("")
        _         <- putStrLn(
                       humanReadableDateTimeFormatter
                         .format(date) + " " + level + " " + loggerName + " " + format(context, line) + " " + maybeError
                     )
      } yield ()
  }

  case class ColoredLogWriter(lineFormat: LineFormatter = (_, s) => s) extends LogWriter[Console with Clock] {
    private def withColor(color: String, s: String): String = s"$color$s$RESET"

    private def highlightLog(level: LogLevel, message: String): String = {
      val color = level match {
        case Error => RED
        case Warn  => YELLOW
        case Info  => CYAN
        case Debug => GREEN
        case Trace => MAGENTA
        case _     => RESET
      }
      withColor(color, message)
    }

    private def format(
      line: => String,
      time: OffsetDateTime,
      level: LogLevel,
      loggerName: String,
      maybeError: Option[String]
    ): String = {
      val logTag  = highlightLog(level, level.render)
      val logTime = withColor(BLUE, humanReadableDateTimeFormatter.format(time))
      val logMsg  =
        f"$logTime $logTag%14s [${withColor(WHITE, loggerName)}] ${highlightLog(level, line)}"
      maybeError.fold(logMsg)(err => s"$logMsg$NL${highlightLog(level, err)}")
    }

    override def writeLog(context: LogContext, line: => String): URIO[Console with Clock, Unit] =
      for {
        date      <- currentDateTime.orDie
        level      = context.get(LogAnnotation.Level)
        loggerName = context(LogAnnotation.Name)
        maybeError = context
                       .get(LogAnnotation.Throwable)
                       .map(Cause.fail)
                       .orElse(context.get(LogAnnotation.Cause))
                       .map(_.prettyPrint)
        _         <- putStrLn(format(lineFormat(context, line), date, level, loggerName, maybeError))
      } yield ()
  }
} 
Example 39
Source File: BaseTestTask.scala    From zio   with Apache License 2.0 5 votes vote down vote up
package zio.test.sbt

import sbt.testing.{ EventHandler, Logger, Task, TaskDef }

import zio.UIO
import zio.clock.Clock
import zio.test.{ AbstractRunnableSpec, FilteredSpec, SummaryBuilder, TestArgs, TestLogger }
import zio.{ Layer, Runtime, ZIO, ZLayer }

abstract class BaseTestTask(
  val taskDef: TaskDef,
  val testClassLoader: ClassLoader,
  val sendSummary: SendSummary,
  val args: TestArgs
) extends Task {

  protected lazy val specInstance: AbstractRunnableSpec = {
    import org.portablescala.reflect._
    val fqn = taskDef.fullyQualifiedName.stripSuffix("$") + "$"
    Reflect
      .lookupLoadableModuleClass(fqn, testClassLoader)
      .getOrElse(throw new ClassNotFoundException("failed to load object: " + fqn))
      .loadModule()
      .asInstanceOf[AbstractRunnableSpec]
  }

  protected def run(eventHandler: EventHandler): ZIO[TestLogger with Clock, Throwable, Unit] =
    for {
      spec    <- specInstance.runSpec(FilteredSpec(specInstance.spec, args))
      summary <- SummaryBuilder.buildSummary(spec)
      _       <- sendSummary.provide(summary)
      events  <- ZTestEvent.from(spec, taskDef.fullyQualifiedName, taskDef.fingerprint)
      _       <- ZIO.foreach[Any, Throwable, ZTestEvent, Unit](events)(e => ZIO.effect(eventHandler.handle(e)))
    } yield ()

  protected def sbtTestLayer(loggers: Array[Logger]): Layer[Nothing, TestLogger with Clock] =
    ZLayer.succeed[TestLogger.Service](new TestLogger.Service {
      def logLine(line: String): UIO[Unit] =
        ZIO.effect(loggers.foreach(_.info(colored(line)))).ignore
    }) ++ Clock.live

  override def execute(eventHandler: EventHandler, loggers: Array[Logger]): Array[Task] =
    try {
      Runtime((), specInstance.platform).unsafeRun {
        run(eventHandler)
          .provideLayer(sbtTestLayer(loggers))
          .onError(e => UIO(println(e.prettyPrint)))
      }
      Array()
    } catch {
      case t: Throwable =>
        t.printStackTrace()
        throw t
    }

  override def tags(): Array[String] = Array.empty

} 
Example 40
Source File: MockClock.scala    From zio   with Apache License 2.0 5 votes vote down vote up
package zio.test.mock

import java.time.{ DateTimeException, OffsetDateTime }
import java.util.concurrent.TimeUnit

import zio.clock.Clock
import zio.duration.Duration
import zio.{ Has, IO, UIO, URLayer, ZLayer }

object MockClock extends Mock[Clock] {

  object CurrentTime     extends Effect[TimeUnit, Nothing, Long]
  object CurrentDateTime extends Effect[Unit, DateTimeException, OffsetDateTime]
  object NanoTime        extends Effect[Unit, Nothing, Long]
  object Sleep           extends Effect[Duration, Nothing, Unit]

  val compose: URLayer[Has[Proxy], Clock] =
    ZLayer.fromService(proxy =>
      new Clock.Service {
        def currentTime(unit: TimeUnit): UIO[Long]                 = proxy(CurrentTime, unit)
        def currentDateTime: IO[DateTimeException, OffsetDateTime] = proxy(CurrentDateTime)
        val nanoTime: UIO[Long]                                    = proxy(NanoTime)
        def sleep(duration: Duration): UIO[Unit]                   = proxy(Sleep, duration)
      }
    )
} 
Example 41
Source File: udp.scala    From zio-keeper   with Apache License 2.0 5 votes vote down vote up
package zio.keeper.transport

import zio._
import zio.clock.Clock
import zio.keeper.TransportError
import zio.keeper.TransportError._
import zio.logging.Logging
import zio.logging.log
import zio.nio.channels.{ Channel => _, _ }
import zio.nio.core.{ Buffer, SocketAddress }

object udp {

  
  def live(mtu: Int): ZLayer[Clock with Logging, Nothing, ConnectionLessTransport] =
    ZLayer.fromFunction { env =>
      new ConnectionLessTransport.Service {
        def bind(addr: SocketAddress)(connectionHandler: Channel => UIO[Unit]): Managed[TransportError, Bind] =
          DatagramChannel
            .bind(Some(addr))
            .mapError(BindFailed(addr, _))
            .withEarlyRelease
            .onExit { _ =>
              log.info("shutting down server")
            }
            .mapM {
              case (close, server) =>
                Buffer
                  .byte(mtu)
                  .flatMap(
                    buffer =>
                      server
                        .receive(buffer)
                        .mapError(ExceptionWrapper)
                        .tap(_ => buffer.flip)
                        .map {
                          case Some(addr) =>
                            new Channel(
                              bytes => buffer.getChunk(bytes).mapError(ExceptionWrapper),
                              chunk => Buffer.byte(chunk).flatMap(server.send(_, addr)).mapError(ExceptionWrapper).unit,
                              ZIO.succeed(true),
                              ZIO.unit
                            )
                          case None =>
                            new Channel(
                              bytes => buffer.flip.flatMap(_ => buffer.getChunk(bytes)).mapError(ExceptionWrapper),
                              _ => ZIO.fail(new RuntimeException("Cannot reply")).mapError(ExceptionWrapper).unit,
                              ZIO.succeed(true),
                              ZIO.unit
                            )
                        }
                        .flatMap(
                          connectionHandler
                        )
                  )
                  .forever
                  .fork
                  .as {
                    val local = server.localAddress
                      .flatMap(opt => IO.effect(opt.get).orDie)
                      .mapError(ExceptionWrapper(_))
                    new Bind(server.isOpen, close.unit, local)
                  }
            }
            .provide(env)

        def connect(to: SocketAddress): Managed[TransportError, Channel] =
          DatagramChannel
            .connect(to)
            .mapM(
              channel =>
                Channel.withLock(
                  channel.read(_).mapError(ExceptionWrapper),
                  channel.write(_).mapError(ExceptionWrapper).unit,
                  ZIO.succeed(true),
                  ZIO.unit
                )
            )
            .mapError(ExceptionWrapper)
      }
    }
} 
Example 42
Source File: ComposedMockSpec.scala    From zio   with Apache License 2.0 5 votes vote down vote up
package zio.test.mock

import zio.clock.Clock
import zio.console.Console
import zio.duration._
import zio.random.Random
import zio.system.System
import zio.test.{ assertM, suite, testM, Assertion, ZIOBaseSpec }
import zio.{ clock, console, random, system, Has, Tag, ULayer, ZIO }

object ComposedMockSpec extends ZIOBaseSpec {

  import Assertion._
  import Expectation._

  private def testValueComposed[R1 <: Has[_]: Tag, E, A](name: String)(
    mock: ULayer[R1],
    app: ZIO[R1, E, A],
    check: Assertion[A]
  ) = testM(name) {
    val result = mock.build.use[R1, E, A](app.provide _)
    assertM(result)(check)
  }

  def spec = suite("ComposedMockSpec")(
    suite("mocking composed environments")(
      {
        val cmd1     = MockClock.NanoTime(value(42L))
        val cmd2     = MockConsole.PutStrLn(equalTo("42"))
        val composed = (cmd1 ++ cmd2)

        val program =
          for {
            time <- clock.nanoTime
            _    <- console.putStrLn(time.toString)
          } yield ()

        testValueComposed[Clock with Console, Nothing, Unit]("Console with Clock")(composed, program, isUnit)
      }, {
        val cmd1 = MockRandom.NextInt(value(42))
        val cmd2 = MockClock.Sleep(equalTo(42.seconds))
        val cmd3 = MockSystem.Property(equalTo("foo"), value(None))
        val cmd4 = MockConsole.PutStrLn(equalTo("None"))

        val composed = (cmd1 ++ cmd2 ++ cmd3 ++ cmd4)

        val program =
          for {
            n <- random.nextInt
            _ <- clock.sleep(n.seconds)
            v <- system.property("foo")
            _ <- console.putStrLn(v.toString)
          } yield ()

        testValueComposed[Random with Clock with System with Console, Throwable, Unit](
          "Random with Clock with System with Console"
        )(composed, program, isUnit)
      }
    )
  )
} 
Example 43
Source File: PlatformSpecific.scala    From zio   with Apache License 2.0 5 votes vote down vote up
package zio

import zio.blocking.Blocking
import zio.clock.Clock
import zio.console.Console
import zio.random.Random
import zio.system.System

private[zio] trait PlatformSpecific {
  type ZEnv = Clock with Console with System with Random with Blocking

  object ZEnv {

    private[zio] object Services {
      val live: ZEnv =
        Has.allOf[Clock.Service, Console.Service, System.Service, Random.Service, Blocking.Service](
          Clock.Service.live,
          Console.Service.live,
          System.Service.live,
          Random.Service.live,
          Blocking.Service.live
        )
    }

    val any: ZLayer[ZEnv, Nothing, ZEnv] =
      ZLayer.requires[ZEnv]

    val live: Layer[Nothing, ZEnv] =
      Clock.live ++ Console.live ++ System.live ++ Random.live ++ Blocking.live
  }
} 
Example 44
Source File: PlatformSpecific.scala    From zio   with Apache License 2.0 5 votes vote down vote up
package zio

import zio.clock.Clock
import zio.console.Console
import zio.random.Random
import zio.system.System

private[zio] trait PlatformSpecific {
  type ZEnv = Clock with Console with System with Random

  object ZEnv {

    private[zio] object Services {
      val live: ZEnv =
        Has.allOf[Clock.Service, Console.Service, System.Service, Random.Service](
          Clock.Service.live,
          Console.Service.live,
          System.Service.live,
          Random.Service.live
        )
    }

    val any: ZLayer[ZEnv, Nothing, ZEnv] =
      ZLayer.requires[ZEnv]

    val live: Layer[Nothing, ZEnv] =
      Clock.live ++ Console.live ++ System.live ++ Random.live
  }
} 
Example 45
Source File: PlatformSpecific.scala    From zio   with Apache License 2.0 5 votes vote down vote up
package zio

import zio.clock.Clock
import zio.console.Console
import zio.random.Random
import zio.system.System

private[zio] trait PlatformSpecific {
  type ZEnv = Clock with Console with System with Random

  object ZEnv {

    object Services { 
      val live: ZEnv =
        Has.allOf[Clock.Service, Console.Service, System.Service, Random.Service](Clock.Service.live, Console.Service.live, System.Service.live, Random.Service.live)
      }

    val any: ZLayer[ZEnv, Nothing, ZEnv] =
      ZLayer.requires[ZEnv]

    val live: Layer[Nothing, ZEnv] =
      Clock.live ++ Console.live ++ System.live ++ Random.live
  }
} 
Example 46
Source File: SubscribedConsumer.scala    From zio-kafka   with Apache License 2.0 5 votes vote down vote up
package zio.kafka.consumer

import org.apache.kafka.common.TopicPartition
import zio.RIO
import zio.blocking.Blocking
import zio.clock.Clock
import zio.stream.ZStream
import zio.kafka.serde.Deserializer

class SubscribedConsumer(
  private val underlying: RIO[Blocking, Consumer.Service]
) {

  def partitionedStream[R, K, V](keyDeserializer: Deserializer[R, K], valueDeserializer: Deserializer[R, V]): ZStream[
    Clock with Blocking,
    Throwable,
    (TopicPartition, ZStream[R, Throwable, CommittableRecord[K, V]])
  ] =
    ZStream.fromEffect(underlying).flatMap(_.partitionedStream(keyDeserializer, valueDeserializer))

  def plainStream[R, K, V](
    keyDeserializer: Deserializer[R, K],
    valueDeserializer: Deserializer[R, V]
  ): ZStream[R with Clock with Blocking, Throwable, CommittableRecord[K, V]] =
    partitionedStream(keyDeserializer, valueDeserializer).flatMapPar(n = Int.MaxValue)(_._2)
}

class SubscribedConsumerFromEnvironment(
  private val underlying: RIO[Blocking with Consumer, Consumer.Service]
) {

  def partitionedStream[R, K, V](keyDeserializer: Deserializer[R, K], valueDeserializer: Deserializer[R, V]): ZStream[
    Clock with Blocking with Consumer,
    Throwable,
    (TopicPartition, ZStream[R, Throwable, CommittableRecord[K, V]])
  ] =
    ZStream.fromEffect(underlying).flatMap(_.partitionedStream(keyDeserializer, valueDeserializer))

  def plainStream[R, K, V](
    keyDeserializer: Deserializer[R, K],
    valueDeserializer: Deserializer[R, V]
  ): ZStream[R with Clock with Blocking with Consumer, Throwable, CommittableRecord[K, V]] =
    partitionedStream(keyDeserializer, valueDeserializer).flatMapPar(n = Int.MaxValue)(_._2)
} 
Example 47
Source File: ProducerSpec.scala    From zio-kafka   with Apache License 2.0 5 votes vote down vote up
package zio.kafka.producer

import org.apache.kafka.clients.producer.ProducerRecord
import zio._
import zio.clock.Clock
import zio.kafka.KafkaTestUtils._
import zio.kafka.consumer.{ Consumer, ConsumerSettings, Subscription }
import zio.kafka.embedded.Kafka
import zio.kafka.serde.Serde
import zio.test.Assertion._
import zio.test._
import zio.test.environment.TestEnvironment

object ProducerSpec extends DefaultRunnableSpec {
  override def spec =
    suite("producer test suite")(
      testM("one record") {
        for {
          _ <- Producer.produce[Any, String, String](new ProducerRecord("topic", "boo", "baa"))
        } yield assertCompletes
      },
      testM("a non-empty chunk of records") {
        import Subscription._

        val (topic1, key1, value1) = ("topic1", "boo", "baa")
        val (topic2, key2, value2) = ("topic2", "baa", "boo")
        val chunks = Chunk.fromIterable(
          List(new ProducerRecord(topic1, key1, value1), new ProducerRecord(topic2, key2, value2))
        )
        def withConsumer(subscription: Subscription, settings: ConsumerSettings) =
          Consumer.make(settings).flatMap { c =>
            (c.subscribe(subscription).toManaged_ *> c.plainStream(Serde.string, Serde.string).toQueue())
          }

        for {
          outcome  <- Producer.produceChunk[Any, String, String](chunks)
          settings <- consumerSettings("testGroup", "testClient")
          record1 <- withConsumer(Topics(Set(topic1)), settings).use { consumer =>
                      for {
                        messages <- consumer.take.flatMap(_.done).mapError(_.getOrElse(new NoSuchElementException))
                        record = messages
                          .filter(rec => rec.record.key == key1 && rec.record.value == value1)
                          .toSeq
                      } yield record
                    }
          record2 <- withConsumer(Topics(Set(topic2)), settings).use { consumer =>
                      for {
                        messages <- consumer.take.flatMap(_.done).mapError(_.getOrElse(new NoSuchElementException))
                        record   = messages.filter(rec => rec.record.key == key2 && rec.record.value == value2)
                      } yield record
                    }
        } yield {
          assert(outcome.length)(equalTo(2)) &&
          assert(record1)(isNonEmpty) &&
          assert(record2.length)(isGreaterThan(0))
        }
      },
      testM("an empty chunk of records") {
        val chunks = Chunk.fromIterable(List.empty)
        for {
          outcome <- Producer.produceChunk[Any, String, String](chunks)
        } yield assert(outcome.length)(equalTo(0))
      }
    ).provideSomeLayerShared[TestEnvironment](
      ((Kafka.embedded >>> stringProducer) ++ Kafka.embedded).mapError(TestFailure.fail) ++ Clock.live
    )
} 
Example 48
Source File: ZLogsSuite.scala    From tofu   with Apache License 2.0 5 votes vote down vote up
package tofu.logging.zlogs

import ch.qos.logback.classic.Logger
import ch.qos.logback.classic.spi.ILoggingEvent
import ch.qos.logback.core.read.ListAppender
import derevo.derive
import io.circe.JsonObject
import io.circe.syntax._
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers
import org.slf4j.LoggerFactory
import tofu.logging.LogTree
import tofu.logging.derivation.loggable
import tofu.logging.impl.ContextMarker
import tofu.syntax.logging._
import zio.blocking.Blocking
import zio.clock.Clock
import zio.console.Console
import zio.{Has, Runtime, URIO, URLayer, ZLayer}

import scala.jdk.CollectionConverters._

class ZLogsSuite extends AnyFlatSpec with Matchers {
  import ZLogsSuite.MyLogging

  val expr = debug"hello" *> info"world"

  "ZLogs" should "log the context" in {
    val appender = ZLogsSuite.attachList()
    Runtime.default.unsafeRun(expr.provideLayer(ZLogsSuite.fullLayer))
    val items    = appender.list.asScala

    val expected = JsonObject("foo" -> "kojima".asJson, "bar" -> 2.asJson).asJson

    items.map(_.getMarker).collect {
      case ContextMarker(ctx, _) => LogTree(ctx)
    } should ===(List.fill(2)(expected))
  }
}

object ZLogsSuite {
  val Name = "zio logs suite"

  @derive(loggable)
  case class FooService(foo: String)

  val fooLayer = ZLayer.succeed(FooService("kojima"))

  @derive(loggable)
  case class BarService(bar: Int)

  val barLayer = ZLayer.succeed(BarService(2))

  type Foo = Has[FooService]
  type Bar = Has[BarService]

  type LogEnv    = Foo with Bar
  type SystemEnv = Blocking with Clock with Console
  type MyEnv     = SystemEnv with LogEnv with ZLog[LogEnv]
  type TIO[+A]   = URIO[MyEnv, A]

  val logs: ZLogs[Foo with Bar] = ZLogs.build.of[Foo].of[Bar].make

  implicit val MyLogging: ZLogging[MyEnv] = ZLogs.access[MyEnv, LogEnv]

  implicitly[ZioHasBuilder.UnHas[Foo]](ZioHasBuilder.UnHas.unHas[FooService])
  val fullLayer: URLayer[Blocking with Console with Clock, MyEnv] =
    ZLayer.identity[SystemEnv] ++ fooLayer ++ barLayer ++ ZLogs.named(logs, Name)

  def attachList() = {
    val logger   = LoggerFactory.getLogger(Name).asInstanceOf[Logger]
    val appender = new ListAppender[ILoggingEvent]
    appender.start()
    logger.addAppender(appender)
    appender
  }
} 
Example 49
Source File: implicits.scala    From tofu   with Apache License 2.0 5 votes vote down vote up
package tofu.zioInstances
import java.io.IOException

import tofu.optics.{Contains, Extract}
import zio.clock.Clock
import zio.console.Console
import zio.random.Random
import zio.{Has, Tag}

object implicits extends ZioTofuImplicits1

private[zioInstances] class ZioTofuImplicits1 extends ZioTofuImplicits2 {
  @inline final implicit def rioTofuImplicit[R]: RioTofuInstance[R] = rioTofuInstance

  @inline final implicit def zioTofuErrorsToImplicit[R, E]: ZioTofuErrorsToInstance[R, E, Nothing] =
    zioTofuErrorsToInstance

  @inline final implicit def zioTofuErrorsExtractToImplicit[R, E, E1: * Extract E]: ZioTofuErrorsToInstance[R, E, E1] =
    zioTofuExtractErrorsInstance

  @inline final implicit def zioTofuTimeoutImplicit[R <: Clock, E]: ZioTofuTimeoutInstance[R, E] =
    zioTofuTimeoutInstance

  @inline final implicit def zioTofuConcurrentImplicit[R1, E1, R, E]: ZioTofuConcurrentInstance[R1, E1, R, E] =
    zioTofuConcurrentInstance

  @inline final implicit def zioTofuConsoleImplicit[R <: Console, E >: IOException]: ZioTofuConsoleInstance[R, E] =
    zioTofuConsoleInstance

  @inline final implicit def zioTofuRandomImplicit[R <: Random, E]: ZioTofuRandomInstance[R, E] = zioTofuRandomInstance

  @inline final implicit def zioTofuContainsUnliftImplicit[R1, R2: * Contains R1, E]
      : ZioTofuContainsUnliftInstance[R1, R2, E] =
    zioTofuContainsUnliftInstance[R1, R2, E]

  @inline final implicit def rioTofuUnliftIOImplicit[R]: RioTofuUnliftIOInstance[R] = rioTofuUnliftIOInstance

}
private[zioInstances] trait ZioTofuImplicits2 extends ZioTofuImplicits3 {
  @inline final implicit def zioTofuImplicit[R, E]: ZioTofuInstance[R, E]               = zioTofuInstance
  @inline final implicit def zioTofuWithRunImplicit[R, E]: ZioTofuWithRunInstance[R, E] = zioTofuWithRunInstance
}

private[zioInstances] trait ZioTofuImplicits3 {
  @inline final implicit def zioTofuUnliftHasImplicit[R <: Has[_], E, C: Tag]: ZioTofuUnliftHasInstance[R, E, C] =
    zioTofuUnliftHasInstance
} 
Example 50
Source File: ZioInstances.scala    From tofu   with Apache License 2.0 5 votes vote down vote up
package tofu
package zioInstances

import java.io.IOException
import tofu.optics.functions.extractSubtype
import tofu.optics.{Contains, Extract}
import zio.clock.Clock
import zio.console.Console
import zio.random.Random
import zio.{Has, Tag}

private[zioInstances] class ZioInstances {
  private[this] val rioTofuInstanceAny: RioTofuInstance[Any] = new RioTofuInstance
  final def rioTofuInstance[R]: RioTofuInstance[R]           = rioTofuInstanceAny.asInstanceOf[RioTofuInstance[R]]

  private[this] val zioErrorsToInstanceAny: ZioTofuErrorsToInstance[Any, Any, Nothing]             =
    new ZioTofuErrorsToInstance[Any, Any, Nothing]()(extractSubtype[Nothing, Any])
  final def zioTofuErrorsToInstance[R, E]: ZioTofuErrorsToInstance[R, E, Nothing]                  =
    zioErrorsToInstanceAny.asInstanceOf[ZioTofuErrorsToInstance[R, E, Nothing]]
  final def zioTofuExtractErrorsInstance[R, E, E1: * Extract E]: ZioTofuErrorsToInstance[R, E, E1] =
    new ZioTofuErrorsToInstance

  private[this] val zioTofuTimeoutInstanceAny: ZioTofuTimeoutInstance[Clock, Any] = new ZioTofuTimeoutInstance
  final def zioTofuTimeoutInstance[R <: Clock, E]: ZioTofuTimeoutInstance[R, E]   =
    zioTofuTimeoutInstanceAny.asInstanceOf[ZioTofuTimeoutInstance[R, E]]

  private[this] val zioTofuConcurrentInstanceAny: ZioTofuConcurrentInstance[Any, Nothing, Any, Nothing] =
    new ZioTofuConcurrentInstanceUIO

  final def zioTofuConcurrentInstance[R1, E1, R, E]: ZioTofuConcurrentInstance[R1, E1, R, E] =
    zioTofuConcurrentInstanceAny.asInstanceOf[ZioTofuConcurrentInstance[R1, E1, R, E]]

  private[this] val zioTofuConsoleInstanceAny: ZioTofuConsoleInstance[Console, IOException] = new ZioTofuConsoleInstance

  final def zioTofuConsoleInstance[R <: Console, E >: IOException]: ZioTofuConsoleInstance[R, E] =
    zioTofuConsoleInstanceAny.asInstanceOf[ZioTofuConsoleInstance[R, E]]

  private[this] val zioTofuRandomInstanceAny: ZioTofuRandomInstance[Random, Nothing] = new ZioTofuRandomInstance

  final def zioTofuRandomInstance[R <: Random, E]: ZioTofuRandomInstance[R, E] =
    zioTofuRandomInstanceAny.asInstanceOf[ZioTofuRandomInstance[R, E]]

  final def zioTofuContainsUnliftInstance[R1, R2: * Contains R1, E]: ZioTofuContainsUnliftInstance[R1, R2, E] =
    new ZioTofuContainsUnliftInstance[R1, R2, E]

  private[this] val rioTofuUnliftIOInstanceAny: RioTofuUnliftIOInstance[Any] = new RioTofuUnliftIOInstance
  final def rioTofuUnliftIOInstance[R]: RioTofuUnliftIOInstance[R]           =
    rioTofuUnliftIOInstanceAny.asInstanceOf[RioTofuUnliftIOInstance[R]]

  private[this] val rioTofuUnsafeExecFutureInstanceAny: RioTofuUnsafeExecFutureInstance[Any] =
    new RioTofuUnsafeExecFutureInstance
  final def rioTofuUnsafeExecFutureInstance[R]: RioTofuUnsafeExecFutureInstance[R]           =
    rioTofuUnsafeExecFutureInstanceAny.asInstanceOf[RioTofuUnsafeExecFutureInstance[R]]

  private[this] val zioTofuInstanceAny: ZioTofuInstance[Any, Any] = new ZioTofuInstance
  final def zioTofuInstance[R, E]: ZioTofuInstance[R, E]          = zioTofuInstanceAny.asInstanceOf[ZioTofuInstance[R, E]]

  private[this] val zioTofuWithRunInstanceAny                          = new ZioTofuWithRunInstance[Any, Any]
  final def zioTofuWithRunInstance[R, E]: ZioTofuWithRunInstance[R, E] =
    zioTofuWithRunInstanceAny.asInstanceOf[ZioTofuWithRunInstance[R, E]]

  final def zioTofuUnliftHasInstance[R <: Has[_], E, C: Tag]: ZioTofuUnliftHasInstance[R, E, C] =
    new ZioTofuUnliftHasInstance
} 
Example 51
Source File: UdpServer.scala    From zio-keeper   with Apache License 2.0 5 votes vote down vote up
package zio.keeper.example

import zio.{ Chunk, Schedule }
import zio.clock.Clock
import zio.console.{ Console, putStrLn }
import zio.keeper.transport.Channel
import zio.logging.Logging
import zio.nio.core.{ InetAddress, SocketAddress }

object UdpServer extends zio.App {
  import zio._
  import zio.keeper.transport._

  val logging = Logging.console((_, msg) => msg)

  val transport = (Clock.live ++ logging) >>> udp.live(128)

  val localEnvironment = Console.live ++ transport

  override def run(args: List[String]) =
    (for {
      localHost <- InetAddress.localHost.orDie
      publicAddress <- SocketAddress
                        .inetSocketAddress(localHost, 8010)
                        .orDie
      console <- ZIO.environment[Console]
      handler = (channel: Channel) => {
        for {
          data <- channel.read
          _    <- putStrLn(new String(data.toArray))
          _    <- channel.send(data)
        } yield ()
      }.catchAll(ex => putStrLn("error: " + ex.msg))
        .provide(console)

      _ <- putStrLn("public address: " + publicAddress.toString())
      _ <- bind(publicAddress)(handler)
            .use(ch => ZIO.never.ensuring(ch.close.ignore))

    } yield ()).ignore.as(0).provideLayer(localEnvironment)
}

object UdpClient extends zio.App {
  import zio.keeper.transport._

  val logging = Logging.console((_, msg) => msg)

  val transport = (Clock.live ++ logging) >>> udp.live(128)

  val localEnvironment = Console.live ++ transport

  override def run(args: List[String]) =
    (for {
      localHost <- InetAddress.localHost.orDie
      publicAddress <- SocketAddress
                        .inetSocketAddress(localHost, 5557)
                        .orDie
      _ <- putStrLn("connect to address: " + publicAddress.toString())
      _ <- connect(publicAddress)
            .use(_.send(Chunk.fromArray("message from client".getBytes)).repeat(Schedule.recurs(100)))
    } yield ()).ignore.as(0).provideLayer(localEnvironment)
} 
Example 52
Source File: Swim.scala    From zio-keeper   with Apache License 2.0 5 votes vote down vote up
package zio.keeper.swim

import izumi.reflect.Tags.Tag
import zio.{ IO, Queue, Schedule, UIO, ZLayer }
import zio.clock.Clock
import zio.config._
import zio.duration._
import zio.keeper._
import zio.keeper.discovery.Discovery
import zio.keeper.swim.protocols._
import zio.logging.Logging
import zio.logging._
import zio.stream._
import zio.ZManaged
import zio.keeper.discovery._
import zio.clock._

object Swim {

  trait Service[A] extends Membership.Service[A] {
    def broadcast(data: A): IO[zio.keeper.Error, Unit]
    def events: Stream[Nothing, MembershipEvent]
    def localMember: NodeAddress
    def nodes: UIO[Set[NodeAddress]]
    def receive: Stream[Nothing, (NodeAddress, A)]
    def send(data: A, receipt: NodeAddress): UIO[Unit]
  }

  type SwimEnv = Config[SwimConfig] with Discovery with Logging with Clock

  final private[this] val QueueSize = 1000

  def live[B: ByteCodec: Tag]: ZLayer[SwimEnv, Error, Swim[B]] = {
    val internalLayer = ZLayer.requires[SwimEnv] ++ ConversationId.live ++ Nodes.live

    val managed =
      for {
        env              <- ZManaged.environment[ConversationId with Nodes]
        swimConfig       <- config[SwimConfig].toManaged_
        _                <- log.info("starting SWIM on port: " + swimConfig.port).toManaged_
        udpTransport     <- transport.udp.live(swimConfig.messageSizeLimit).build.map(_.get)
        userIn           <- Queue.bounded[Message.Direct[B]](QueueSize).toManaged(_.shutdown)
        userOut          <- Queue.bounded[Message.Direct[B]](QueueSize).toManaged(_.shutdown)
        localNodeAddress <- NodeAddress.local(swimConfig.port).toManaged_
        _                <- Nodes.prettyPrint.flatMap(log.info(_)).repeat(Schedule.spaced(5.seconds)).toManaged_.fork
        initial          <- Initial.protocol(localNodeAddress).flatMap(_.debug).toManaged_

        failureDetection <- FailureDetection
                             .protocol(swimConfig.protocolInterval, swimConfig.protocolTimeout)
                             .flatMap(_.debug)
                             .map(_.binary)
                             .toManaged_
        suspicion <- Suspicion
                      .protocol(localNodeAddress, swimConfig.suspicionTimeout)
                      .flatMap(_.debug)
                      .map(_.binary)
                      .toManaged_

        user       <- User.protocol[B](userIn, userOut).map(_.binary).toManaged_
        deadLetter <- DeadLetter.protocol.toManaged_
        swim       = Protocol.compose(initial.binary, failureDetection, suspicion, user, deadLetter)
        broadcast0 <- Broadcast.make(swimConfig.messageSizeLimit, swimConfig.broadcastResent).toManaged_
        messages0  <- Messages.make(localNodeAddress, broadcast0, udpTransport)
        _          <- messages0.process(swim).toManaged_
      } yield new Swim.Service[B] {

        override def broadcast(data: B): IO[SerializationError, Unit] =
          (for {
            bytes <- ByteCodec.encode[User[B]](User(data))
            _     <- broadcast0.add(Message.Broadcast(bytes))
          } yield ())

        override val receive: Stream[Nothing, (NodeAddress, B)] =
          ZStream.fromQueue(userIn).collect {
            case Message.Direct(n, _, m) => (n, m)
          }

        override def send(data: B, receipt: NodeAddress): UIO[Unit] =
          Message.direct(receipt, data).provide(env).flatMap(userOut.offer(_).unit)

        override def events: Stream[Nothing, MembershipEvent] =
          env.get[Nodes.Service].events

        override def localMember: NodeAddress = localNodeAddress

        override def nodes: UIO[Set[NodeAddress]] =
          env.get[Nodes.Service].healthyNodes.map(_.map(_._1).toSet)
      }

    internalLayer >>> ZLayer.fromManaged(managed)
  }
}