zio.stream.Stream Scala Examples

The following examples show how to use zio.stream.Stream. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: IteratorTest.scala    From spark-tools   with Apache License 2.0 6 votes vote down vote up
package io.univalence.sparkzio

import zio.{ DefaultRuntime, IO, Ref, Task, UIO, ZIO, ZManaged }
import zio.clock.Clock
import zio.stream.{ Stream, ZSink, ZStream }
import zio.test.DefaultRunnableSpec
import zio.test._
import zio.test.Assertion._

object StreamTest {

  def assertForAll[R, E, A](zstream: ZStream[R, E, A])(f: A => TestResult): ZIO[R, E, TestResult] =
    zstream.fold(assert(Unit, Assertion.anything))((as, a) => as && f(a))

  def isSorted[A: Ordering]: Assertion[Iterable[A]] =
    Assertion.assertion("sorted")()(x => {
      val y = x.toList
      y.sorted == y
    })
}

object IteratorTest
    extends DefaultRunnableSpec(
      suite("iterator")(
        testM("to iterator should be lazy")({
          case class Element(n: Int, time: Long)

          (for {
            clock      <- ZIO.environment[Clock]
            n          <- Ref.make(0)
            incCounter <- n.update(_ + 1).forever.fork

          } yield {
            def element: UIO[Element] = n.get.zipWith(clock.clock.nanoTime)(Element)

            val in = Stream.repeatEffect(element)

            val iterator = Iterator.unwrapManaged(Iterator.fromStream(in))

            val out: ZStream[Any, Nothing, Element] =
              ZStream.fromIterator(iterator).mapConcatM(e => element.map(List(e, _)))

            implicit val ordering: Ordering[Element] = Ordering.by(x => x.n -> x.time)

            out.take(2000).runCollect.map(e => assert(e, StreamTest.isSorted))
          }).flatten
        }),
        testM("<=>")({
          val in: List[Int] = (1 to 100).toList

          (for {
            _ <- UIO.unit.toManaged_
            stream1 = ZStream.fromIterator(UIO(in.toIterator))
            iterator <- Iterator.fromStream(stream1)
            stream2 = ZStream.fromIterator(UIO(iterator))
            out <- stream2.runCollect.toManaged_
          } yield {
            assert(in, equalTo(out))
          }).use(x => ZIO.effect(x))

        }),
        testM("on exit")(
          (for {
            isOpen <- Ref.make(false).toManaged_
            stream = ZStream.managed(ZManaged.make(isOpen.update(_ => true))(_ => isOpen.set(false)))
            iterator <- Iterator.fromStream(stream)
          } yield {
            assert(iterator.toList, equalTo(List(true)))
          }).use(x => IO.effect(x))
        )
      )
    ) 
Example 2
Source File: SqsStream.scala    From zio-sqs   with Apache License 2.0 5 votes vote down vote up
package zio.sqs

import software.amazon.awssdk.services.sqs.SqsAsyncClient
import software.amazon.awssdk.services.sqs.model._
import zio.stream.Stream
import zio.{ IO, Task }

import scala.jdk.CollectionConverters._

object SqsStream {

  def apply(
    client: SqsAsyncClient,
    queueUrl: String,
    settings: SqsStreamSettings = SqsStreamSettings()
  ): Stream[Throwable, Message] = {

    val builder = ReceiveMessageRequest.builder
      .queueUrl(queueUrl)
      .attributeNamesWithStrings(settings.attributeNames.asJava)
      .messageAttributeNames(settings.messageAttributeNames.asJava)
      .maxNumberOfMessages(settings.maxNumberOfMessages)

    settings.visibilityTimeout.foreach(builder.visibilityTimeout(_))
    settings.waitTimeSeconds.foreach(builder.waitTimeSeconds(_))

    val request = builder.build

    Stream.fromEffect {
      Task.effectAsync[List[Message]] { cb =>
        client
          .receiveMessage(request)
          .handle[Unit] { (result, err) =>
            err match {
              case null => cb(IO.succeed(result.messages.asScala.toList))
              case ex   => cb(IO.fail(ex))
            }
          }
        ()
      }
    }.forever
      .takeWhile(_.nonEmpty || !settings.stopWhenQueueEmpty)
      .flatMap[Any, Throwable, Message](Stream.fromIterable(_))
      .mapM(msg => IO.when(settings.autoDelete)(deleteMessage(client, queueUrl, msg)).as(msg))
  }

  def deleteMessage(client: SqsAsyncClient, queueUrl: String, msg: Message): Task[Unit] =
    Task.effectAsync[Unit] { cb =>
      client
        .deleteMessage(
          DeleteMessageRequest
            .builder()
            .queueUrl(queueUrl)
            .receiptHandle(msg.receiptHandle())
            .build()
        )
        .handle[Unit] { (_, err) =>
          err match {
            case null => cb(IO.unit)
            case ex   => cb(IO.fail(ex))
          }
        }
      ()
    }
} 
Example 3
Source File: HyParView.scala    From zio-keeper   with Apache License 2.0 5 votes vote down vote up
package zio.keeper.hyparview

import zio.clock.Clock
import zio.keeper.transport.Transport
import zio.keeper.{ Error, NodeAddress, SendError, TransportError }
import zio.logging.Logging
import zio.logging.log
import zio.stream.{ Stream, Take, ZStream }
import zio._
import zio.keeper.hyparview.ActiveProtocol._
import zio.duration._

object HyParView {

  def live[R <: Transport with TRandom with Logging with Clock with HyParViewConfig](
    localAddr: NodeAddress,
    seedNodes: List[NodeAddress],
    shuffleSchedule: Schedule[R, ViewState, Any]
  ): ZLayer[R, Error, PeerService] = {
    type R1 = R with Views
    val layer = ZLayer.identity[R] ++ Views.fromConfig(localAddr)
    layer >>> ZLayer.fromManaged {
      for {
        env <- ZManaged.environment[R1]
        cfg <- getConfig.toManaged_
        _ <- log
              .info(s"Starting HyParView on $localAddr with configuration:\n${cfg.prettyPrint}")
              .toManaged(_ => log.info("Shut down HyParView"))
        scope <- ZManaged.scope
        connections <- Queue
                        .bounded[
                          (NodeAddress, Chunk[Byte] => IO[TransportError, Unit], Stream[Error, Chunk[Byte]], UIO[_])
                        ](
                          cfg.connectionBuffer
                        )
                        .toManaged(_.shutdown)
        plumTreeMessages <- Queue
                             .sliding[Take[Error, (NodeAddress, PlumTreeProtocol)]](cfg.userMessagesBuffer)
                             .toManaged(_.shutdown)
        peerEvents <- Queue.sliding[PeerEvent](128).toManaged(_.shutdown)
        sendInitial0 = (to: NodeAddress, msg: InitialProtocol.InitialMessage) =>
          sendInitial(to, msg, scope, connections).provide(env)
        _ <- receiveInitialProtocol[R1, Error](Transport.bind(localAddr), cfg.concurrentIncomingConnections)
              .merge(ZStream.fromQueue(connections))
              .merge(neighborProtocol.scheduleElements(Schedule.spaced(2.seconds)))
              .flatMapParSwitch(cfg.activeViewCapacity) {
                case (addr, send, receive, release) =>
                  ZStream
                    .fromEffect(peerEvents.offer(PeerEvent.NeighborUp(addr)))
                    .ensuring(peerEvents.offer(PeerEvent.NeighborDown(addr)))
                    .flatMap(_ => runActiveProtocol[R1, Error](addr, send, sendInitial0)(receive).ensuring(release))
              }
              .into(plumTreeMessages)
              .toManaged_
              .fork
        _ <- periodic.doShuffle
              .repeat(shuffleSchedule)
              .toManaged_
              .fork
        _ <- periodic.doReport
              .repeat(Schedule.spaced(2.seconds))
              .toManaged_
              .fork
        _ <- ZIO.foreach_(seedNodes)(sendInitial0(_, InitialProtocol.Join(localAddr))).toManaged_
      } yield new PeerService.Service {
        override val identity: UIO[NodeAddress] =
          ZIO.succeed(localAddr)

        override val getPeers: UIO[Set[NodeAddress]] =
          Views.activeView.commit.provide(env)

        override def send(to: NodeAddress, message: PlumTreeProtocol): IO[SendError, Unit] =
          Views.send(to, message).provide(env)

        override val receive: ZStream[Any, Error, (NodeAddress, PlumTreeProtocol)] =
          ZStream.fromQueue(plumTreeMessages).unTake

        override val events: ZStream[Any, Nothing, PeerEvent] =
          ZStream.fromQueue(peerEvents)
      }
    }
  }
} 
Example 4
Source File: SocketSession.scala    From polynote   with Apache License 2.0 5 votes vote down vote up
package polynote.server

import cats.instances.list._
import cats.syntax.traverse._
import fs2.concurrent.Topic
import polynote.buildinfo.BuildInfo
import polynote.kernel.util.Publish
import polynote.kernel.{BaseEnv, StreamThrowableOps}
import polynote.kernel.environment.{Env, PublishMessage, Config}
import polynote.kernel.interpreter.Interpreter
import polynote.kernel.logging.Logging
import polynote.messages._
import polynote.server.auth.IdentityProvider.checkPermission
import polynote.server.auth.{IdentityProvider, Permission, UserIdentity}
import uzhttp.websocket.Frame
import zio.stream.ZStream
import zio.stream.{Stream, Take}
import zio.Queue
import zio.{Promise, RIO, Task, URIO, ZIO}

import scala.collection.immutable.SortedMap

object SocketSession {

  def apply(in: Stream[Throwable, Frame], broadcastAll: Topic[Task, Option[Message]]): URIO[SessionEnv with NotebookManager, Stream[Throwable, Frame]] =
    for {
      output          <- Queue.unbounded[Take[Nothing, Message]]
      publishMessage  <- Env.add[SessionEnv with NotebookManager](Publish(output): Publish[Task, Message])
      env             <- ZIO.environment[SessionEnv with NotebookManager with PublishMessage]
      closed          <- Promise.make[Throwable, Unit]
      _               <- broadcastAll.subscribe(32).unNone.interruptAndIgnoreWhen(closed).through(publishMessage.publish).compile.drain.forkDaemon
      close            = closeQueueIf(closed, output)
    } yield parallelStreams(
        toFrames(ZStream.fromEffect(handshake) ++ Stream.fromQueue(output).unTake),
        in.handleMessages(close)(handler andThen errorHandler) ++ closeStream(closed, output),
        keepaliveStream(closed)).provide(env).catchAllCause {
      cause =>
        ZStream.empty
    }

  private val handler: Message => RIO[SessionEnv with PublishMessage with NotebookManager, Option[Message]] = {
    case ListNotebooks(_) =>
      NotebookManager.list().map {
        notebooks => Some(ListNotebooks(notebooks.map(ShortString.apply)))
      }

    case CreateNotebook(path, maybeContent) =>
      NotebookManager.assertValidPath(path) *>
        checkPermission(Permission.CreateNotebook(path)) *> NotebookManager.create(path, maybeContent).as(None)

    case RenameNotebook(path, newPath) =>
      (NotebookManager.assertValidPath(path) &> NotebookManager.assertValidPath(newPath)) *>
        checkPermission(Permission.CreateNotebook(newPath)) *> checkPermission(Permission.DeleteNotebook(path)) *>
        NotebookManager.rename(path, newPath).as(None)

    case CopyNotebook(path, newPath) =>
      (NotebookManager.assertValidPath(path) &> NotebookManager.assertValidPath(newPath)) *>
        checkPermission(Permission.CreateNotebook(newPath)) *>
        NotebookManager.copy(path, newPath).as(None)

    case DeleteNotebook(path) =>
      NotebookManager.assertValidPath(path) *>
        checkPermission(Permission.DeleteNotebook(path)) *> NotebookManager.delete(path).as(None)

    case RunningKernels(_) => for {
      paths          <- NotebookManager.listRunning()
      statuses       <- ZIO.collectAllPar(paths.map(NotebookManager.status))
      kernelStatuses  = paths.zip(statuses).map { case (p, s) => ShortString(p) -> s }
    } yield Some(RunningKernels(kernelStatuses))

    case other =>
      ZIO.succeed(None)
  }

  val errorHandler: RIO[SessionEnv with PublishMessage with NotebookManager, Option[Message]] => RIO[SessionEnv with PublishMessage with NotebookManager, Option[Message]] =
    _.catchAll {
      err => Logging.error(err).as(Some(Error(0, err)))
    }

  def handshake: RIO[SessionEnv, ServerHandshake] =
    for {
      factories <- Interpreter.Factories.access
      identity  <- UserIdentity.access
      config    <- Config.access
    } yield ServerHandshake(
      (SortedMap.empty[String, String] ++ factories.mapValues(_.head.languageName)).asInstanceOf[TinyMap[TinyString, TinyString]],
      serverVersion = BuildInfo.version,
      serverCommit = BuildInfo.commit,
      identity = identity.map(i => Identity(i.name, i.avatar.map(ShortString))),
      sparkTemplates = config.spark.flatMap(_.propertySets).getOrElse(Nil)
    )
} 
Example 5
Source File: SubscriberToSinkSpec.scala    From interop-reactive-streams   with Apache License 2.0 5 votes vote down vote up
package zio.interop.reactivestreams

import org.reactivestreams.tck.TestEnvironment
import org.reactivestreams.tck.TestEnvironment.ManualSubscriberWithSubscriptionSupport
import scala.jdk.CollectionConverters._
import zio.{ Task, UIO, ZIO }
import zio.blocking._
import zio.stream.Stream
import zio.test._
import zio.test.Assertion._

object SubscriberToSinkSpec extends DefaultRunnableSpec {
  override def spec =
    suite("Converting a `Subscriber` to a `Sink`")(
      testM("works on the happy path") {
        for {
          probe         <- makeSubscriber
          errorSink     <- probe.underlying.toSink[Throwable]
          (error, sink) = errorSink
          fiber         <- Stream.fromIterable(seq).run(sink).fork
          _             <- probe.request(length + 1)
          elements      <- probe.nextElements(length).run
          completion    <- probe.expectCompletion.run
          _             <- fiber.join
        } yield assert(elements)(succeeds(equalTo(seq))) && assert(completion)(succeeds(isUnit))
      },
      testM("transports errors") {
        for {
          probe         <- makeSubscriber
          errorSink     <- probe.underlying.toSink[Throwable]
          (error, sink) = errorSink
          fiber <- (Stream.fromIterable(seq) ++
                    Stream.fail(e)).run(sink).catchAll(t => error.fail(t)).fork
          _        <- probe.request(length + 1)
          elements <- probe.nextElements(length).run
          err      <- probe.expectError.run
          _        <- fiber.join
        } yield assert(elements)(succeeds(equalTo(seq))) && assert(err)(succeeds(equalTo(e)))
      }
    )

  val seq: List[Int] = List.range(0, 31)
  val length: Long   = seq.length.toLong
  val e: Throwable   = new RuntimeException("boom")

  case class Probe[T](underlying: ManualSubscriberWithSubscriptionSupport[T]) {
    def request(n: Long): UIO[Unit] =
      UIO(underlying.request(n))
    def nextElements(n: Long): ZIO[Blocking, Throwable, List[T]] =
      blocking(Task(underlying.nextElements(n.toLong).asScala.toList))
    def expectError: ZIO[Blocking, Throwable, Throwable] =
      blocking(Task(underlying.expectError(classOf[Throwable])))
    def expectCompletion: ZIO[Blocking, Throwable, Unit] =
      blocking(Task(underlying.expectCompletion()))
  }

  val makeSubscriber = UIO(new ManualSubscriberWithSubscriptionSupport[Int](new TestEnvironment(2000))).map(Probe.apply)

} 
Example 6
Source File: StreamToPublisherSpec.scala    From interop-reactive-streams   with Apache License 2.0 5 votes vote down vote up
package zio.interop.reactivestreams

import java.lang.reflect.InvocationTargetException
import org.reactivestreams.Publisher
import org.reactivestreams.tck.{ PublisherVerification, TestEnvironment }
import org.testng.annotations.Test
import zio.Task
import zio.UIO
import zio.ZIO
import zio.blocking._
import zio.stream.Stream
import zio.test._
import zio.test.Assertion._

object StreamToPublisherSpec extends DefaultRunnableSpec {
  override def spec =
    suite("Converting a `Stream` to a `Publisher`")(
      suite("passes all required and optional TCK tests")(tests: _*)
    )

  def makePV(runtime: zio.Runtime[Any]) =
    new PublisherVerification[Int](new TestEnvironment(2000, 500), 2000L) {

      def createPublisher(elements: Long): Publisher[Int] =
        runtime.unsafeRun(
          Stream
            .unfold(elements)(n => if (n > 0) Some((1, n - 1)) else None)
            .toPublisher
        )

      override def createFailedPublisher(): Publisher[Int] =
        runtime.unsafeRun(
          Stream
            .fail(new RuntimeException("boom!"))
            .map(_.asInstanceOf[Int])
            .toPublisher
        )
    }

  val tests =
    classOf[PublisherVerification[Int]]
      .getMethods()
      .toList
      .filter { method =>
        method
          .getAnnotations()
          .exists(annotation => classOf[Test].isAssignableFrom(annotation.annotationType()))
      }
      .collect {
        case method if method.getName().startsWith("untested") =>
          test(method.getName())(assert(())(anything)) @@ TestAspect.ignore
        case method =>
          testM(method.getName())(
            for {
              runtime <- ZIO.runtime[Any]
              pv      = makePV(runtime)
              _       <- UIO(pv.setUp())
              r <- blocking(Task(method.invoke(pv))).unit.mapError {
                    case e: InvocationTargetException => e.getTargetException()
                  }.run
            } yield assert(r)(succeeds(isUnit))
          )
      }
} 
Example 7
Source File: CharsetSpec.scala    From zio-nio   with Apache License 2.0 5 votes vote down vote up
package zio
package nio.core
package charset

import java.nio.charset.{ MalformedInputException, UnmappableCharacterException }

import zio.test._
import zio.test.Assertion._
import zio.stream.Stream

object CharsetSpec extends DefaultRunnableSpec {

  override def spec = suite("CharsetSpec")(
    chunkEncodeDecode(Charset.Standard.utf8),
    chunkEncodeDecode(Charset.Standard.utf16),
    bufferEncodeDecode(Charset.Standard.utf8),
    bufferEncodeDecode(Charset.Standard.utf16),
    testM("utf8 encode") {
      Charset.Standard.utf8.encodeChunk(arabicChunk).map {
        assert(_)(equalTo(arabicUtf8))
      }
    },
    streamEncodeDecode(Charset.Standard.utf8),
    streamEncodeDecode(Charset.Standard.utf16Be),
    testM("stream decode across chunk boundaries") {
      val byteStream = Stream.fromChunks(arabicUtf8.map(Chunk.single): _*)
      for {
        chars <- byteStream.transduce(Charset.Standard.utf8.newDecoder.transducer()).runCollect
      } yield assert(chars)(equalTo(arabicChunk))
    },
    testM("minimum buffer size for encoding") {
      val in = Stream.fromChunk(arabicChunk)
      val t  = Charset.Standard.utf8.newEncoder.transducer(49)
      assertM(in.transduce(t).runDrain.run)(dies(isSubtype[IllegalArgumentException](anything)))
    },
    testM("minimum buffer size for decoding") {
      val in = Stream.fromChunk(arabicUtf8)
      val t  = Charset.Standard.utf8.newDecoder.transducer(49)
      assertM(in.transduce(t).runDrain.run)(dies(isSubtype[IllegalArgumentException](anything)))
    },
    testM("handles encoding errors") {
      val in = Stream.fromChunk(arabicChunk)
      val t  = Charset.Standard.iso8859_1.newEncoder.transducer()
      assertM(in.transduce(t).runDrain.run)(fails(isSubtype[UnmappableCharacterException](anything)))
    },
    testM("handles decoding errors") {
      val in = Stream(0xd8, 0x00, 0xa5, 0xd8).map(_.toByte)
      val t  = Charset.Standard.utf16Le.newDecoder.transducer()
      assertM(in.transduce(t).runDrain.run)(fails(isSubtype[MalformedInputException](anything)))
    }
  )

  val arabic = "إزَّي حضرتك؟"

  val arabicChunk = Chunk.fromIterable(arabic)

  val arabicUtf8 = Chunk(0xd8, 0xa5, 0xd8, 0xb2, 0xd9, 0x91, 0xd9, 0x8e, 0xd9, 0x8a, 0x20, 0xd8, 0xad, 0xd8, 0xb6, 0xd8,
    0xb1, 0xd8, 0xaa, 0xd9, 0x83, 0xd8, 0x9f)
    .map(_.toByte)

  def chunkEncodeDecode(charset: Charset) = testM(s"chunk encode/decode ${charset.displayName}") {
    for {
      encoded <- charset.encodeChunk(arabicChunk)
      decoded <- charset.decodeChunk(encoded)
    } yield assert(decoded)(equalTo(arabicChunk))
  }

  def bufferEncodeDecode(charset: Charset) = testM(s"buffer encode/decode ${charset.displayName}") {
    for {
      chars             <- Buffer.char(100).orDie
      _                 <- chars.putChunk(arabicChunk).orDie
      _                 <- chars.flip
      bytes             <- charset.encode(chars)
      charsHasRemaining <- chars.hasRemaining
      decoded           <- charset.decode(bytes)
      chunk             <- decoded.getChunk().orDie
    } yield assert(charsHasRemaining)(isFalse) && assert(chunk)(equalTo(arabicChunk))
  }

  def streamEncodeDecode(charset: Charset) = testM(s"stream encode/decode ${charset.displayName}") {
    val charStream = Stream.fromIterable(arabic)
    for {
      byteChunks <- charStream.transduce(charset.newEncoder.transducer()).runCollect
      byteStream = Stream.fromIterable(byteChunks)
      chars      <- byteStream.transduce(charset.newDecoder.transducer()).runCollect
    } yield assert(chars)(equalTo(arabicChunk))
  }
} 
Example 8
Source File: CoreSummonSpec.scala    From interop-cats   with Apache License 2.0 5 votes vote down vote up
package zio.interop.test

import cats.data.NonEmptyList
import cats.{ Bifunctor, Monad, MonadError, SemigroupK }
import zio._
import zio.interop.catz.core._
import zio.stream.interop.catz.core._
import zio.stream.{ Stream, ZStream }
import zio.test.Assertion._
import zio.test.{ DefaultRunnableSpec, test, _ }

object CoreSummonSpecextends extends DefaultRunnableSpec {
  override def spec =
    suite("summons from catz.core work with only a cats-core dependency")(
      test("ZIO instances") {
        val monad      = implicitly[Monad[UIO]]
        val monadError = implicitly[MonadError[Task, Throwable]]
        val semigroupK = implicitly[SemigroupK[IO[NonEmptyList[Unit], ?]]]
        val bifunctor  = implicitly[Bifunctor[IO]]

        monad.map(ZIO.unit)(identity)
        monadError.map(ZIO.unit)(identity)
        semigroupK.combineK(ZIO.unit, ZIO.unit)
        bifunctor.leftMap(ZIO.fromOption(None))(identity)

        assert(())(anything)
      },
      test("ZManaged instances") {
        val monad      = implicitly[Monad[ZManaged[Any, Nothing, ?]]]
        val monadError = implicitly[MonadError[Managed[Throwable, ?], Throwable]]
        val semigroupK = implicitly[SemigroupK[Managed[Nothing, ?]]]
        val bifunctor  = implicitly[Bifunctor[Managed]]

        monad.map(ZManaged.unit)(identity)
        monadError.map(ZManaged.unit)(identity)
        semigroupK.combineK(ZManaged.unit, ZManaged.unit)
        bifunctor.leftMap(ZManaged.fail(()))(identity)

        assert(())(anything)
      },
      test("ZStream instances") {
        val monad      = implicitly[Monad[ZStream[Any, Nothing, ?]]]
        val monadError = implicitly[MonadError[Stream[Throwable, ?], Throwable]]
        val semigroupK = implicitly[SemigroupK[Stream[Nothing, ?]]]
        val bifunctor  = implicitly[Bifunctor[Stream]]

        monad.map(ZStream.unit)(identity)
        monadError.map(ZStream.unit)(identity)
        semigroupK.combineK(ZStream.unit, ZStream.unit)
        bifunctor.leftMap(ZStream.fail(()))(identity)

        assert(())(anything)
      }
    )

}