akka.stream.scaladsl.RunnableGraph Scala Examples
The following examples show how to use akka.stream.scaladsl.RunnableGraph.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: PersistentBufferCommitOrderSpec.scala From squbs with Apache License 2.0 | 5 votes |
package org.squbs.pattern.stream import akka.actor.ActorSystem import akka.stream.scaladsl.{GraphDSL, RunnableGraph} import akka.stream.{ActorMaterializer, ClosedShape} import com.typesafe.config.ConfigFactory import org.scalatest.concurrent.Eventually import org.scalatest.{BeforeAndAfterAll, FlatSpec, Matchers} import org.squbs.testkit.Timeouts._ import scala.concurrent.Await class PersistentBufferCommitOrderSpec extends FlatSpec with Matchers with BeforeAndAfterAll with Eventually { implicit val system = ActorSystem("PersistentBufferCommitOrderSpec", PersistentBufferSpec.testConfig) implicit val mat = ActorMaterializer() implicit val serializer = QueueSerializer[Int]() import StreamSpecUtil._ override def afterAll = { Await.ready(system.terminate(), awaitMax) } it should "fail when an out of order commit is attempted and commit-order-policy = strict" in { val util = new StreamSpecUtil[Int, Event[Int]] import util._ val buffer = PersistentBufferAtLeastOnce[Int](ConfigFactory.parseString("commit-order-policy = strict").withFallback(config)) val commit = buffer.commit[Int] val streamGraph = RunnableGraph.fromGraph(GraphDSL.create(flowCounter) { implicit builder => sink => import GraphDSL.Implicits._ in ~> buffer.async ~> filterARandomElement ~> commit ~> sink ClosedShape }) val sinkF = streamGraph.run() Await.result(sinkF.failed, awaitMax) shouldBe an[CommitOrderException] clean() } it should "not fail when an out of order commit is attempted and commit-order-policy = lenient" in { val util = new StreamSpecUtil[Int, Event[Int]] import util._ val buffer = PersistentBufferAtLeastOnce[Int](ConfigFactory.parseString("commit-order-policy = lenient").withFallback(config)) val commit = buffer.commit[Int] val streamGraph = RunnableGraph.fromGraph(GraphDSL.create(flowCounter) { implicit builder => sink => import GraphDSL.Implicits._ in ~> buffer.async ~> filterARandomElement ~> commit ~> sink ClosedShape }) val countFuture = streamGraph.run() val count = Await.result(countFuture, awaitMax) count shouldBe elementCount - 1 eventually { buffer.queue shouldBe 'closed } clean() } }
Example 2
Source File: EventRegistration.scala From AckCord with MIT License | 5 votes |
package ackcord import scala.concurrent.Future import akka.Done import akka.stream.{KillSwitches, UniqueKillSwitch} import akka.stream.scaladsl.{Keep, RunnableGraph, Sink, Source} case class EventRegistration[Mat](materialized: Mat, onDone: Future[Done], killSwitch: UniqueKillSwitch) { def stop(): Unit = killSwitch.shutdown() } object EventRegistration { def toSink[A, M](source: Source[A, M]): RunnableGraph[EventRegistration[M]] = source.viaMat(KillSwitches.single)(Keep.both).toMat(Sink.ignore) { case ((m, killSwitch), done) => EventRegistration(m, done, killSwitch) } def withRegistration[A, M](source: Source[A, M]): Source[A, EventRegistration[M]] = source.viaMat(KillSwitches.single)(Keep.both).watchTermination() { case ((m, killSwitch), done) => EventRegistration(m, done, killSwitch) } }
Example 3
Source File: CommandRegistration.scala From AckCord with MIT License | 5 votes |
package ackcord.commands import scala.concurrent.Future import akka.Done import akka.stream.scaladsl.{Keep, RunnableGraph, Sink, Source} import akka.stream.{KillSwitches, UniqueKillSwitch} case class CommandRegistration[Mat](materialized: Mat, onDone: Future[Done], killSwitch: UniqueKillSwitch) { def stop(): Unit = killSwitch.shutdown() } object CommandRegistration { def toSink[A, M](source: Source[A, M]): RunnableGraph[CommandRegistration[M]] = source.viaMat(KillSwitches.single)(Keep.both).toMat(Sink.ignore) { case ((m, killSwitch), done) => CommandRegistration(m, done, killSwitch) } def withRegistration[A, M](source: Source[A, M]): Source[A, CommandRegistration[M]] = source.viaMat(KillSwitches.single)(Keep.both).watchTermination() { case ((m, killSwitch), done) => CommandRegistration(m, done, killSwitch) } }
Example 4
Source File: MaterializeValue.scala From fusion-data with Apache License 2.0 | 5 votes |
package example.akkastream.graph import akka.NotUsed import akka.actor.ActorSystem import akka.stream.ActorMaterializer import akka.stream.scaladsl.{ Flow, Keep, RunnableGraph, Sink, Source, Tcp } import akka.util.ByteString import scala.concurrent.{ Future, Promise } object MaterializeValue { implicit val system = ActorSystem() implicit val mat = ActorMaterializer() import system.dispatcher case class MyClass(private val p: Promise[Option[Int]], conn: Tcp.OutgoingConnection) extends AutoCloseable { override def close(): Unit = p.trySuccess(None) } // Materializes to Promise[Option[Int]] val source: Source[Int, Promise[Option[Int]]] = Source.maybe[Int] // Materializes to NotUsed val flow1: Flow[Int, Int, NotUsed] = Flow[Int].take(100) // Materializes to Promise[Int] val nestedSource : Source[Int, Promise[Option[Int]]] = source.viaMat(flow1)(Keep.left).named("nestedSource") // viaMat === via()(Keep.left) // val nestedSource2: Source[Int, NotUsed] = source.viaMat(flow1)(Keep.right) // Materializes to NotUsed val flow2: Flow[Int, ByteString, NotUsed] = Flow[Int].map(i => ByteString(i.toString)) // Materializes to Future[Tcp.OutgoingConnection (Keep.right) val flow3: Flow[ByteString, ByteString, Future[Tcp.OutgoingConnection]] = Tcp().outgoingConnection("localhost", 8080) val nestedFlow: Flow[Int, ByteString, Future[Tcp.OutgoingConnection]] = flow2.viaMat(flow3)(Keep.right) val nestedFlow2: Flow[Int, ByteString, NotUsed] = flow2.viaMat(flow3)(Keep.left) // flow2.via(flow3) val nestedFlow3: Flow[Int, ByteString, (NotUsed, Future[Tcp.OutgoingConnection])] = flow2.viaMat(flow3)(Keep.both) // Materializes to Future[String] (Keep.right) val sink: Sink[ByteString, Future[String]] = Sink.fold[String, ByteString]("")(_ + _.utf8String) val nestedSink: Sink[Int, (Future[Tcp.OutgoingConnection], Future[String])] = nestedFlow.toMat(sink)(Keep.both) def f(p: Promise[Option[Int]], rest: (Future[Tcp.OutgoingConnection], Future[String])): Future[MyClass] = { val connFuture = rest._1 connFuture.map(outConn => MyClass(p, outConn)) } // Materializes to Future[MyClass] val runnableGraph: RunnableGraph[Future[MyClass]] = nestedSource.toMat(nestedSink)(f) val r: RunnableGraph[Promise[Option[Int]]] = nestedSource.toMat(nestedSink)(Keep.left) val r2: RunnableGraph[(Future[Tcp.OutgoingConnection], Future[String])] = nestedSource.toMat(nestedSink)(Keep.right) }
Example 5
Source File: PartialGraph.scala From fusion-data with Apache License 2.0 | 5 votes |
package example.akkastream.graph import akka.actor.ActorSystem import akka.stream.scaladsl.{ Balance, Broadcast, Flow, GraphDSL, Keep, Merge, RunnableGraph, Sink, Source } import akka.stream.{ ActorMaterializer, FlowShape, SourceShape } import scala.concurrent.Future import scala.io.StdIn object PartialGraph extends App { implicit val system = ActorSystem() implicit val mat = ActorMaterializer() import system.dispatcher def partial = GraphDSL .create() { implicit b => import GraphDSL.Implicits._ val B = b.add(Broadcast[Int](2)) val C = b.add(Merge[Int](2)) val D = Flow[Int].map(_ + 1) val E = b.add(Balance[Int](2)) val F = b.add(Merge[Int](2)) C <~ F B ~> C ~> F B ~> D ~> E ~> F FlowShape(B.in, E.out(1)) } .named("partial") // 转换partial从FlowShape到Flow,可访问流DSL(比如:.filter() 函数) val flow = Flow.fromGraph(partial) val source = Source.fromGraph(GraphDSL.create() { implicit b => import GraphDSL.Implicits._ val merge = b.add(Merge[Int](2)) Source.single(0) ~> merge Source(List(2, 3, 4)) ~> merge SourceShape(merge.out) }) val sink: Sink[Int, Future[Int]] = Flow[Int].map(_ * 2).drop(10).named("nestedFlow").toMat(Sink.head)(Keep.right) val closed: RunnableGraph[Future[Int]] = source.via(flow.filter(_ > 1)).toMat(sink)(Keep.right) closed.run().foreach(println) StdIn.readLine() system.terminate() }
Example 6
Source File: Graph1.scala From fusion-data with Apache License 2.0 | 5 votes |
package example.akkastream.basic import akka.NotUsed import akka.actor.ActorSystem import akka.stream.{ ActorMaterializer, ClosedShape } import akka.stream.scaladsl.{ Broadcast, Flow, GraphDSL, Merge, RunnableGraph, Sink, Source } import scala.collection.immutable import scala.io.StdIn object Graph1 extends App { implicit val system = ActorSystem() implicit val mat = ActorMaterializer() val graph = g(1 to 2) graph.run() StdIn.readLine() system.terminate() def g(data: immutable.Iterable[Int]) = RunnableGraph.fromGraph(GraphDSL.create() { implicit b: GraphDSL.Builder[NotUsed] => import GraphDSL.Implicits._ val in = Source(data) val out = Sink.foreach(println) val bcast = b.add(Broadcast[Int](2)) val merge = b.add(Merge[Int](2)) val f1, f2, f3, f4 = Flow[Int].map(_ + 10) in ~> f1 ~> bcast ~> f2 ~> merge ~> f3 ~> out bcast ~> f4 ~> merge ClosedShape }) }
Example 7
Source File: GraphComponent.scala From fusion-data with Apache License 2.0 | 5 votes |
package example.akkastream.basic import akka.actor.ActorSystem import akka.stream.FanInShape.{ Init, Name } import akka.stream._ import akka.stream.scaladsl.{ Balance, Flow, GraphDSL, Merge, MergePreferred, RunnableGraph, Sink, Source } import scala.collection.immutable import scala.io.StdIn case class PriorityWorkerPoolShape[In, Out](jobsIn: Inlet[In], priorityJobsIn: Inlet[In], resultsOut: Outlet[Out]) extends Shape { override def inlets: immutable.Seq[Inlet[_]] = jobsIn :: priorityJobsIn :: Nil override def outlets: immutable.Seq[Outlet[_]] = resultsOut :: Nil override def deepCopy(): Shape = PriorityWorkerPoolShape(jobsIn.carbonCopy(), priorityJobsIn.carbonCopy(), resultsOut.carbonCopy()) } case class PriorityWorkerPoolShape2[In, Out](_init: Init[Out] = Name("PriorityWorkerPoolShape2")) extends FanInShape[Out](_init) { override protected def construct(init: Init[Out]): FanInShape[Out] = PriorityWorkerPoolShape2(init) val jobsIn: Inlet[In] = newInlet[In]("jobsIn") val priorityJobsIn: Inlet[In] = newInlet[In]("priorityJobsIn") // Outlet[Out] 使用名字 "out" 将被自动创建 } object PriorityWorkerPool { def apply[In, Out](worker: Flow[In, Out, Any], workerCount: Int) = GraphDSL.create() { implicit b => import GraphDSL.Implicits._ val priorityMerge = b.add(MergePreferred[In](1)) val balance = b.add(Balance[In](workerCount)) val resultsMerge = b.add(Merge[Out](workerCount)) for (i <- 0 until workerCount) balance.out(i) ~> worker ~> resultsMerge.in(i) // 在合并优先和普通作业后发送到平衡器 priorityMerge ~> balance PriorityWorkerPoolShape(priorityMerge.in(0), priorityMerge.preferred, resultsMerge.out) } } object GraphComponent extends App { implicit val system = ActorSystem() implicit val mat = ActorMaterializer() import system.dispatcher val worker1 = Flow[String].map("step 1 " + _) val worker2 = Flow[String].map("step 2 " + _) val g = RunnableGraph.fromGraph(GraphDSL.create() { implicit b => import GraphDSL.Implicits._ val priorityPool1 = b.add(PriorityWorkerPool(worker1, 4)) val priorityPool2 = b.add(PriorityWorkerPool(worker2, 2)) Source(1 to 10).map("job: " + _) ~> priorityPool1.jobsIn Source(1 to 10).map("priority job: " + _) ~> priorityPool1.priorityJobsIn priorityPool1.resultsOut ~> priorityPool2.jobsIn Source(1 to 10).map("one-step, priority " + _) ~> priorityPool2.priorityJobsIn priorityPool2.resultsOut ~> Sink.foreach(println) ClosedShape }) g.run() StdIn.readLine() system.terminate() }
Example 8
Source File: Graph2.scala From fusion-data with Apache License 2.0 | 5 votes |
package example.akkastream.basic import akka.actor.ActorSystem import akka.stream.{ ActorMaterializer, ClosedShape } import akka.stream.scaladsl.{ Broadcast, Flow, GraphDSL, RunnableGraph, Sink, Source } import scala.io.StdIn object Graph2 extends App { implicit val system = ActorSystem() implicit val mat = ActorMaterializer() import system.dispatcher val topHeadSink = Sink.head[Int] val bottomHeadSink = Sink.head[Int] val sharedDoubler = Flow[Int].map(_ * 2) val g = RunnableGraph.fromGraph(GraphDSL.create(topHeadSink, bottomHeadSink)((_, _)) { implicit builder => (topHS, bottomHS) => import GraphDSL.Implicits._ val broadcast = builder.add(Broadcast[Int](2)) Source.single(1) ~> broadcast.in broadcast ~> sharedDoubler ~> topHS.in broadcast ~> sharedDoubler ~> bottomHS.in ClosedShape }) val (topF, bottomF) = g.run() topF.foreach(v => println(s"top is $v")) bottomF.foreach(v => println(s"bottom is $v")) StdIn.readLine() system.terminate() }
Example 9
Source File: PartialGraph.scala From fusion-data with Apache License 2.0 | 5 votes |
package example.akkastream.basic import akka.actor.ActorSystem import akka.stream.scaladsl.{ GraphDSL, RunnableGraph, Sink, Source, ZipWith } import akka.stream.{ ActorMaterializer, ClosedShape, UniformFanInShape } import scala.concurrent.Await import scala.concurrent.duration._ import scala.io.StdIn object PartialGraph extends App { implicit val system = ActorSystem() implicit val mat = ActorMaterializer() val pickMaxOfThree = GraphDSL.create() { implicit b => import GraphDSL.Implicits._ // ZipWith 最后一个泛型是输出参数类型。 val zip1 = b.add(ZipWith[Int, Int, Int](math.max)) val zip2 = b.add(ZipWith[Int, Int, Int](math.max)) zip1.out ~> zip2.in0 UniformFanInShape(zip2.out, zip1.in0, zip1.in1, zip2.in1) } val resultSink = Sink.head[Int] val g = RunnableGraph.fromGraph(GraphDSL.create(resultSink) { implicit b => sink => import GraphDSL.Implicits._ val pm3 = b.add(pickMaxOfThree) Source.single(4) ~> pm3.in(0) Source.single(2) ~> pm3.in(1) Source.single(3) ~> pm3.in(2) pm3.out ~> sink.in ClosedShape }) val result = Await.result(g.run, 300.millis) println(s"result: $result") StdIn.readLine() system.terminate() }
Example 10
Source File: BufferProblem.scala From fusion-data with Apache License 2.0 | 5 votes |
package example.akkastream.buffer import akka.actor.ActorSystem import akka.stream.scaladsl.{ GraphDSL, RunnableGraph, Sink, Source, ZipWith } import akka.stream.{ ActorMaterializer, Attributes, ClosedShape } import scala.concurrent.duration._ import scala.io.StdIn object BufferProblem extends App { implicit val system = ActorSystem() implicit val mat = ActorMaterializer() case class Tick() val g = RunnableGraph.fromGraph(GraphDSL.create() { implicit b => import akka.stream.scaladsl.GraphDSL.Implicits._ // this is the asynchronous stage in this graph val zipper = b.add(ZipWith[Tick, Int, Int]((tick, count) => count).async.addAttributes(Attributes.inputBuffer(1, 1))) // 用默认缓冲区设置时将只打印 1 // val zipper = b.add(ZipWith[Tick, Int, Int]((tick, count) => count).async) Source.tick(initialDelay = 3.second, interval = 3.second, Tick()) ~> zipper.in0 Source .tick(initialDelay = 1.second, interval = 1.second, "message!") .conflateWithSeed(seed = (_) => 1)((count, _) => count + 1) ~> zipper.in1 zipper.out ~> Sink.foreach(println) ClosedShape }) g.run() StdIn.readLine() system.terminate() }
Example 11
Source File: MergeHubDemo.scala From fusion-data with Apache License 2.0 | 5 votes |
package example.akkastream.dynamichub import akka.NotUsed import akka.actor.ActorSystem import akka.stream.ActorMaterializer import akka.stream.scaladsl.{ MergeHub, RunnableGraph, Sink, Source } import com.typesafe.scalalogging.StrictLogging import scala.io.StdIn object MergeHubDemo extends App with StrictLogging { implicit val system = ActorSystem() implicit val mat = ActorMaterializer() // A simple consumer that will print to the console for now val consumer = Sink.foreach[String](v => logger.info(s"consumer: $v")) // Attach a MergeHub Source to the consumer. This will materialize to a // corresponding Sink. val runnableGraph: RunnableGraph[Sink[String, NotUsed]] = MergeHub.source[String](perProducerBufferSize = 16).to(consumer) // By running/materializing the consumer we get back a Sink, and hence // now have access to feed elements into it. This Sink can be materialized // any number of times, and every element that enters the Sink will // be consumed by our consumer. val toConsumer: Sink[String, NotUsed] = runnableGraph.run() // Feeding two independent sources into the hub. Source.single("Hello!").runWith(toConsumer) Source.single("Hub!").runWith(toConsumer) StdIn.readLine() system.terminate() }
Example 12
Source File: KillSwitchMatStream.scala From squbs with Apache License 2.0 | 5 votes |
package org.squbs.stream import java.util.concurrent.atomic.AtomicLong import akka.stream.{ClosedShape, KillSwitch, KillSwitches} import akka.stream.ThrottleMode.Shaping import akka.stream.scaladsl.{Flow, GraphDSL, Keep, RunnableGraph, Sink, Source} import scala.concurrent.Future import scala.concurrent.duration._ import scala.language.postfixOps object KillSwitchMatStream { val genCount = new AtomicLong(0L) } class KillSwitchMatStream extends PerpetualStream[(KillSwitch, Future[Long])] { import KillSwitchMatStream._ import org.squbs.unicomplex.Timeouts._ override def stopTimeout = awaitMax def generator = Iterator.iterate(0){ p => if (p == Int.MaxValue) 0 else p + 1 } map { v => genCount.incrementAndGet() v } val source = Source.fromIterator(generator _) val throttle = Flow[Int].throttle(5000, 1 second, 1000, Shaping) val counter = Flow[Int].map { _ => 1L }.reduce { _ + _ }.toMat(Sink.head)(Keep.right) override def streamGraph = RunnableGraph.fromGraph(GraphDSL.create(KillSwitches.single[Int], counter)((_, _)) { implicit builder => (kill, sink) => import GraphDSL.Implicits._ source ~> kill ~> throttle ~> sink ClosedShape }) override def receive = { case NotifyWhenDone => // Send back the future directly here, don't map the future. The map will likely happen after ActorSystem // shutdown so we cannot use context.dispatcher as execution context for the map as it won't be there when // the map is supposed to happen. sender() ! matValue._2 } }
Example 13
Source File: KillSwitchWithChildActorStream.scala From squbs with Apache License 2.0 | 5 votes |
package org.squbs.stream import java.util.concurrent.atomic.AtomicLong import akka.actor.{Actor, Props} import akka.stream.ClosedShape import akka.stream.ThrottleMode.Shaping import akka.stream.scaladsl.{Flow, GraphDSL, Keep, RunnableGraph, Sink, Source} import scala.concurrent.Future import scala.concurrent.duration._ import scala.language.postfixOps object KillSwitchWithChildActorStream { val genCount = new AtomicLong(0L) } class DummyChildActor extends Actor { def receive = PartialFunction.empty } class KillSwitchWithChildActorStream extends PerpetualStream[Future[Long]] { import KillSwitchWithChildActorStream._ import org.squbs.unicomplex.Timeouts._ val dummyChildActor = context.actorOf(Props[DummyChildActor]) override def stopTimeout = awaitMax def generator = Iterator.iterate(0){ p => if (p == Int.MaxValue) 0 else p + 1 } map { v => genCount.incrementAndGet() v } val source = Source.fromIterator(generator _) val throttle = Flow[Int].throttle(5000, 1 second, 1000, Shaping) val counter = Flow[Int].map { _ => 1L }.reduce { _ + _ }.toMat(Sink.head)(Keep.right) override def streamGraph = RunnableGraph.fromGraph(GraphDSL.create(counter) { implicit builder => sink => import GraphDSL.Implicits._ source ~> killSwitch.flow[Int] ~> throttle ~> sink ClosedShape }) override def receive = { case NotifyWhenDone => // Send back the future directly here, don't map the future. The map will likely happen after ActorSystem // shutdown so we cannot use context.dispatcher as execution context for the map as it won't be there when // the map is supposed to happen. sender() ! matValue } override def shutdown() = { val f = super.shutdown() defaultMidActorStop(Seq(dummyChildActor)) f } }
Example 14
Source File: KillSwitchStream.scala From squbs with Apache License 2.0 | 5 votes |
package org.squbs.stream import java.util.concurrent.atomic.AtomicLong import akka.stream.ClosedShape import akka.stream.ThrottleMode.Shaping import akka.stream.scaladsl.{Flow, GraphDSL, Keep, RunnableGraph, Sink, Source} import scala.concurrent.Future import scala.concurrent.duration._ import scala.language.postfixOps object KillSwitchStream { val genCount = new AtomicLong(0L) } class KillSwitchStream extends PerpetualStream[Future[Long]] { import KillSwitchStream._ import org.squbs.unicomplex.Timeouts._ override def stopTimeout = awaitMax def generator = Iterator.iterate(0){ p => if (p == Int.MaxValue) 0 else p + 1 } map { v => genCount.incrementAndGet() v } val source = Source.fromIterator(generator _) val throttle = Flow[Int].throttle(5000, 1 second, 1000, Shaping) val counter = Flow[Int].map { _ => 1L }.reduce { _ + _ }.toMat(Sink.head)(Keep.right) override def streamGraph = RunnableGraph.fromGraph(GraphDSL.create(counter) { implicit builder => sink => import GraphDSL.Implicits._ source ~> killSwitch.flow[Int] ~> throttle ~> sink ClosedShape }) override def receive = { case NotifyWhenDone => // Send back the future directly here, don't map the future. The map will likely happen after ActorSystem // shutdown so we cannot use context.dispatcher as execution context for the map as it won't be there when // the map is supposed to happen. sender() ! matValue } }
Example 15
Source File: ProperShutdownStream.scala From squbs with Apache License 2.0 | 5 votes |
package org.squbs.stream import java.util.concurrent.atomic.AtomicLong import akka.Done import akka.actor.ActorRef import akka.stream.ClosedShape import akka.stream.ThrottleMode.Shaping import akka.stream.scaladsl.{Flow, GraphDSL, Keep, RunnableGraph, Sink, Source} import scala.concurrent.Future import scala.concurrent.duration._ import scala.language.postfixOps object ProperShutdownStream { val genCount = new AtomicLong(0L) } class ProperShutdownStream extends PerpetualStream[(ActorRef, Future[Long])] { import ProperShutdownStream._ import org.squbs.unicomplex.Timeouts._ override def stopTimeout = awaitMax def generator = Iterator.iterate(0){ p => if (p == Int.MaxValue) 0 else p + 1 } map { v => genCount.incrementAndGet() v } val managedSource = LifecycleManaged().source(Source fromIterator generator _) val throttle = Flow[Int].throttle(5000, 1 second, 1000, Shaping) val counter = Flow[Int].map { _ => 1L }.reduce { _ + _ }.toMat(Sink.head)(Keep.right) override def streamGraph = RunnableGraph.fromGraph(GraphDSL.create(managedSource, counter)((a, b) => (a._2, b)) { implicit builder => (source, sink) => import GraphDSL.Implicits._ source ~> throttle ~> sink ClosedShape }) override def receive = { case NotifyWhenDone => val (_, fCount) = matValue // Send back the future directly here, don't map the future. The map will likely happen after ActorSystem // shutdown so we cannot use context.dispatcher as execution context for the map as it won't be there when // the map is supposed to happen. sender() ! fCount } override def shutdown() = { super.shutdown() import context.dispatcher val (actorRef, fCount) = matValue val fStopped = gracefulStop(actorRef, awaitMax) for { _ <- fCount; _ <- fStopped } yield Done } }
Example 16
Source File: StaticTime.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.client.services.testing.time import java.time.Instant import java.util.concurrent.atomic.AtomicReference import akka.stream.scaladsl.{Broadcast, Flow, GraphDSL, RunnableGraph, Sink} import akka.stream.{ClosedShape, KillSwitches, Materializer, UniqueKillSwitch} import com.daml.api.util.{TimeProvider, TimestampConversion} import com.daml.api.util.TimestampConversion._ import com.daml.grpc.adapter.ExecutionSequencerFactory import com.daml.grpc.adapter.client.akka.ClientAdapter import com.daml.dec.DirectExecutionContext import com.daml.ledger.api.v1.testing.time_service.{GetTimeRequest, SetTimeRequest} import com.daml.ledger.api.v1.testing.time_service.TimeServiceGrpc.{TimeService, TimeServiceStub} import com.daml.ledger.client.LedgerClient import scala.concurrent.{ExecutionContext, Future} class StaticTime( timeService: TimeService, clock: AtomicReference[Instant], killSwitch: UniqueKillSwitch, ledgerId: String) extends TimeProvider with AutoCloseable { def getCurrentTime: Instant = clock.get def timeRequest(instant: Instant) = SetTimeRequest( ledgerId, Some(TimestampConversion.fromInstant(getCurrentTime)), Some(TimestampConversion.fromInstant(instant))) def setTime(instant: Instant)(implicit ec: ExecutionContext): Future[Unit] = { timeService.setTime(timeRequest(instant)).map { _ => val _ = StaticTime.advanceClock(clock, instant) } } override def close(): Unit = killSwitch.shutdown() } object StaticTime { def advanceClock(clock: AtomicReference[Instant], instant: Instant): Instant = { clock.updateAndGet { case current if instant isAfter current => instant case current => current } } def updatedVia(timeService: TimeServiceStub, ledgerId: String, token: Option[String] = None)( implicit m: Materializer, esf: ExecutionSequencerFactory): Future[StaticTime] = { val clockRef = new AtomicReference[Instant](Instant.EPOCH) val killSwitchExternal = KillSwitches.single[Instant] val sinkExternal = Sink.head[Instant] RunnableGraph .fromGraph { GraphDSL.create(killSwitchExternal, sinkExternal) { case (killSwitch, futureOfFirstElem) => // We serve this in a future which completes when the first element has passed through. // Thus we make sure that the object we serve already received time data from the ledger. futureOfFirstElem.map(_ => new StaticTime(timeService, clockRef, killSwitch, ledgerId))( DirectExecutionContext) } { implicit b => (killSwitch, sinkHead) => import GraphDSL.Implicits._ val instantSource = b.add( ClientAdapter .serverStreaming( GetTimeRequest(ledgerId), LedgerClient.stub(timeService, token).getTime) .map(r => toInstant(r.getCurrentTime))) val updateClock = b.add(Flow[Instant].map { i => advanceClock(clockRef, i) i }) val broadcastTimes = b.add(Broadcast[Instant](2)) val ignore = b.add(Sink.ignore) // format: OFF instantSource ~> killSwitch ~> updateClock ~> broadcastTimes.in broadcastTimes.out(0) ~> sinkHead broadcastTimes.out(1) ~> ignore // format: ON ClosedShape } } .run() } }
Example 17
Source File: BroadcastBufferCommitOrderSpec.scala From squbs with Apache License 2.0 | 5 votes |
package org.squbs.pattern.stream import akka.actor.ActorSystem import akka.stream.{ClosedShape, ActorMaterializer} import akka.stream.scaladsl.{GraphDSL, RunnableGraph} import com.typesafe.config.ConfigFactory import org.scalatest.concurrent.Eventually import org.scalatest.{BeforeAndAfterAll, Matchers, FlatSpec} import org.squbs.testkit.Timeouts._ import scala.concurrent.Await class BroadcastBufferCommitOrderSpec extends FlatSpec with Matchers with BeforeAndAfterAll with Eventually { implicit val system = ActorSystem("BroadcastBufferCommitOrderSpec", PersistentBufferSpec.testConfig) implicit val mat = ActorMaterializer() implicit val serializer = QueueSerializer[Int]() import StreamSpecUtil._ override def afterAll = { Await.ready(system.terminate(), awaitMax) } it should "fail when an out of order commit is attempted and commit-order-policy = strict" in { val util = new StreamSpecUtil[Int, Event[Int]](2) import util._ val buffer = BroadcastBufferAtLeastOnce[Int](ConfigFactory.parseString("commit-order-policy = strict").withFallback(config)) val streamGraph = RunnableGraph.fromGraph(GraphDSL.create(flowCounter) { implicit builder => sink => import GraphDSL.Implicits._ val commit = buffer.commit[Int] val bcBuffer = builder.add(buffer.async) val mr = builder.add(merge) in ~> bcBuffer ~> filterARandomElement ~> commit ~> mr ~> sink bcBuffer ~> commit ~> mr ClosedShape }) val sinkF = streamGraph.run() Await.result(sinkF.failed, awaitMax) shouldBe an[CommitOrderException] clean() } it should "not fail when an out of order commit is attempted and commit-order-policy = lenient" in { val util = new StreamSpecUtil[Int, Event[Int]](2) import util._ val buffer = BroadcastBufferAtLeastOnce[Int](ConfigFactory.parseString("commit-order-policy = lenient").withFallback(config)) val streamGraph = RunnableGraph.fromGraph(GraphDSL.create(flowCounter) { implicit builder => sink => import GraphDSL.Implicits._ val commit = buffer.commit[Int] val bcBuffer = builder.add(buffer.async) val mr = builder.add(merge) in ~> bcBuffer ~> filterARandomElement ~> commit ~> mr ~> sink bcBuffer ~> commit ~> mr ClosedShape }) val countFuture = streamGraph.run() val count = Await.result(countFuture, awaitMax) eventually { buffer.queue shouldBe 'closed } count shouldBe (elementCount * outputPorts - 1) println(s"Total records processed $count") clean() } }
Example 18
Source File: ReloadableSchemaProvider.scala From graphql-gateway with Apache License 2.0 | 5 votes |
package sangria.gateway.schema import java.util.concurrent.atomic.AtomicReference import akka.actor.ActorSystem import akka.stream.{Materializer, OverflowStrategy} import akka.stream.scaladsl.{BroadcastHub, Keep, RunnableGraph, Source} import better.files.File import sangria.gateway.AppConfig import sangria.gateway.file.FileMonitorActor import sangria.gateway.http.client.HttpClient import sangria.gateway.schema.materializer.{GatewayContext, GatewayMaterializer} import sangria.gateway.util.Logging import scala.concurrent.{ExecutionContext, Future} import scala.util.{Failure, Success} // TODO: on a timer reload all external schemas and check for changes class ReloadableSchemaProvider(config: AppConfig, client: HttpClient, mat: GatewayMaterializer)(implicit system: ActorSystem, ec: ExecutionContext, amat: Materializer) extends SchemaProvider[GatewayContext, Any] with Logging { val loader = new SchemaLoader(config, client, mat) val schemaRef = new AtomicReference[Option[SchemaInfo[GatewayContext, Any]]](None) system.actorOf(FileMonitorActor.props(config.watch.allFiles, config.watch.threshold, config.watch.allGlobs, reloadSchema)) private val producer = Source.actorRef[Boolean](100, OverflowStrategy.dropTail) private val runnableGraph = producer.toMat(BroadcastHub.sink(bufferSize = 256))(Keep.both) private val (changesPublisher, changesSource) = runnableGraph.run() val schemaChanges = Some(changesSource) def schemaInfo = schemaRef.get() match { case v @ Some(_) ⇒ Future.successful(v) case None ⇒ reloadSchema } def reloadSchema(files: Vector[File]): Unit = { logger.info(s"Schema files are changed: ${files mkString ", "}. Reloading schema") reloadSchema } def reloadSchema: Future[Option[SchemaInfo[GatewayContext, Any]]] = loader.loadSchema.andThen { case Success(Some(newSchema)) ⇒ schemaRef.get() match { case Some(currentSchema) ⇒ val changes = newSchema.schema.compare(currentSchema.schema) val renderedChanges = if (changes.nonEmpty) " with following changes:\n" + changes.map(c ⇒ " * " + c.description + (if (c.breakingChange) " (breaking)" else "")).mkString("\n") else " without any changes." changesPublisher ! true logger.info(s"Schema successfully reloaded$renderedChanges") case None ⇒ logger.info(s"Schema successfully loaded from files:\n${newSchema.files.map(f ⇒ " * " + f).mkString("\n")}") } schemaRef.set(Some(newSchema)) case Failure(error) ⇒ logger.error("Failed to load the schema", error) } }
Example 19
Source File: MapInitAndLastTests.scala From CM-Well with Apache License 2.0 | 5 votes |
package cmwell.util.streams.test import akka.stream._ import akka.stream.scaladsl.{GraphDSL, RunnableGraph, Source} import akka.stream.testkit.scaladsl.{TestSink, TestSource} import akka.stream.testkit.TestPublisher.{Probe => SrcProbe} import akka.stream.testkit.TestSubscriber.{Probe => SnkProbe} import cmwell.util.stream.MapInitAndLast import scala.concurrent.duration.DurationInt class MapInitAndLastTests extends StreamSpec { def generateGraph[In](): (SrcProbe[In],SnkProbe[(In,Boolean)]) = { val src = TestSource.probe[In] val snk = TestSink.probe[(In,Boolean)] RunnableGraph.fromGraph(GraphDSL.create(src, snk)((a, b) => (a, b)) { implicit b => { (s1, s2) => { import GraphDSL.Implicits._ val mial = b.add(new MapInitAndLast[In, (In,Boolean)](_ -> false, _ -> true)) s1 ~> mial ~> s2 ClosedShape } } }).run() } describe("MapInitAndLast Stage"){ it("should buffer a single element"){ val (src,snk) = generateGraph[Int]() snk.request(99) src.sendNext(1) snk.expectNoMessage(300.millis) src.sendComplete() snk.expectNext((1,true)) snk.expectComplete() } it("should treat last element differently") { val (src,snk) = generateGraph[Int]() snk.request(99) src.sendNext(1) snk.expectNoMessage(300.millis) src.sendNext(2) snk.expectNext((1,false)) src.sendNext(3) snk.expectNext((2,false)) src.sendComplete() snk.expectNext((3,true)) snk.expectComplete() } it("should propagate back-pressure"){ val (src,snk) = generateGraph[Int]() snk.ensureSubscription() src.sendNext(1) snk.expectNoMessage(300.millis) src.sendNext(1) snk.expectNoMessage(300.millis) src.sendComplete() snk.expectNoMessage(300.millis) snk.request(1) snk.expectNext((1,false)) snk.request(1) snk.expectNext((1,true)) snk.expectComplete() } } }
Example 20
Source File: AkkaStreamsHelloWorldApp3.scala From Scala-Reactive-Programming with MIT License | 5 votes |
package com.packt.publishing.akka.streams.hello import akka.actor.ActorSystem import akka.stream.ActorMaterializer import akka.stream.scaladsl.{Flow, Keep, RunnableGraph, Sink, Source} import akka.{Done, NotUsed} import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.Future import scala.util.{Failure, Success} object AkkaStreamsHelloWorldApp3 extends App{ implicit val actorSystem = ActorSystem("HelloWorldSystem") implicit val materializer = ActorMaterializer() val helloWorldSource:Source[String,NotUsed] = Source.single("Akka Streams Hello World") val helloWorldSink: Sink[String,Future[Done]] = Sink.foreach(println) val helloWorldFlow:Flow[String,String,NotUsed] = Flow[String].map(str => str.toUpperCase) val helloWorldGraph:RunnableGraph[NotUsed] = helloWorldSource .via(helloWorldFlow) .to(helloWorldSink) val helloWorldGraph2:RunnableGraph[Future[Done]] = helloWorldSource .via(helloWorldFlow) .toMat(helloWorldSink)(Keep.right) helloWorldGraph.run val helloWorldMaterializedValue: Future[Done] = helloWorldGraph2.run helloWorldMaterializedValue.onComplete{ case Success(Done) => println("HelloWorld Stream ran succssfully.") case Failure(exception) => println(s"HelloWorld Stream ran into an issue: ${exception}.") } actorSystem.terminate }
Example 21
Source File: AkkaStreamsMergeHubApp.scala From Scala-Reactive-Programming with MIT License | 5 votes |
package com.packt.publishing.dynamic.akka.streams import akka.NotUsed import akka.actor.ActorSystem import akka.stream.ActorMaterializer import akka.stream.scaladsl.{MergeHub, RunnableGraph, Sink, Source} object AkkaStreamsMergeHubApp extends App{ implicit val actorSystem = ActorSystem("MergeHubSystem") implicit val materializer = ActorMaterializer() val consumer = Sink.foreach(println) val mergeHub = MergeHub.source[String](perProducerBufferSize = 16) val runnableGraph: RunnableGraph[Sink[String, NotUsed]] = mergeHub.to(consumer) val toConsumer: Sink[String, NotUsed] = runnableGraph.run() Source.single("Hello!").runWith(toConsumer) Source.single("MergeHub!").runWith(toConsumer) Source.single("World!").runWith(toConsumer) Thread.sleep(500) actorSystem.terminate }
Example 22
Source File: AkkaStreamsPartitionHubApp.scala From Scala-Reactive-Programming with MIT License | 5 votes |
package com.packt.publishing.dynamic.akka.streams import akka.NotUsed import akka.actor.ActorSystem import akka.stream.ActorMaterializer import akka.stream.scaladsl.{Keep, PartitionHub, RunnableGraph, Source} import scala.concurrent.duration._ object AkkaStreamsPartitionHubApp extends App { implicit val actorSystem = ActorSystem("PartitionHubSystem") implicit val materializer = ActorMaterializer() val producer = Source.tick(1.second, 1.second, "message") .zipWith(Source(1 to 10))((a, b) ⇒ s"$a-$b") val runnableGraph: RunnableGraph[Source[String, NotUsed]] = producer.toMat(PartitionHub.sink( (size, elem) ⇒ math.abs(elem.hashCode) % size, startAfterNrOfConsumers = 2, bufferSize = 256))(Keep.right) val fromProducer: Source[String, NotUsed] = runnableGraph.run() fromProducer.runForeach(msg ⇒ println("consumer1: " + msg)) fromProducer.runForeach(msg ⇒ println("consumer2: " + msg)) Thread.sleep(5000) actorSystem.terminate }
Example 23
Source File: Main.scala From kinesis-stream with MIT License | 5 votes |
import akka.Done import akka.actor.ActorSystem import akka.stream.ActorMaterializer import akka.stream.scaladsl.{Keep, RunnableGraph, Sink} import px.kinesis.stream.consumer import scala.concurrent.Future object Main extends App { implicit val system = ActorSystem("kinesis-source") implicit val ec = system.dispatcher implicit val mat = ActorMaterializer() // A simple consumer that will print to the console for now val console = Sink.foreach[String](println) val runnableGraph: RunnableGraph[Future[Done]] = consumer .source("test-stream", "test-app") .via(consumer.commitFlow(parallelism = 2)) .map(r => r.data.utf8String) .toMat(console)(Keep.left) val done = runnableGraph.run() done.onComplete(_ => { println("Shutdown completed") system.terminate() }) }
Example 24
Source File: LoadTest.scala From ws_to_kafka with MIT License | 5 votes |
package com.pkinsky import java.util.concurrent.atomic.AtomicInteger import akka.http.scaladsl.model.ws.{InvalidUpgradeResponse, WebsocketUpgradeResponse, WebsocketRequest, TextMessage} import akka.http.scaladsl.Http import akka.http.scaladsl.model.Uri import akka.stream.ThrottleMode import akka.stream.scaladsl.{Keep, Sink, RunnableGraph, Source} import play.api.libs.json.Json import scala.concurrent.{Future, Await} import scala.concurrent.duration._ import scala.language.postfixOps object LoadTest extends App with AppContext { val clients = 256 val eventsPerClient = 256 val eventsSent = new AtomicInteger(0) def testData(clientId: String): Source[Event, Unit] = Source.unfoldInf(1) { n => val event = Event(s"msg number $n", clientId, System.currentTimeMillis()) (n + 1, event) }.take(eventsPerClient).throttle(1, 100 millis, 1, ThrottleMode.Shaping) def wsClient(clientId: String): RunnableGraph[Future[WebsocketUpgradeResponse]] = testData(clientId).map(e => TextMessage.Strict(Json.toJson(e).toString)) .map { x => eventsSent.incrementAndGet(); x } .viaMat(Http().websocketClientFlow(WebsocketRequest(Uri(s"ws://localhost:$port/ws"))))(Keep.right).to(Sink.ignore) //set up websocket connections (1 to clients).foreach { id => wsClient(s"client $id").run() } //watch kafka for messages sent via websocket val kafkaConsumerGraph: RunnableGraph[Future[Seq[Event]]] = kafka.consume[Event](eventTopic, "group_new") .take(clients * eventsPerClient).takeWithin(2 minutes) .toMat(Sink.seq)(Keep.right) val res = Await.result(kafkaConsumerGraph.run, 5 minutes) println(s"sent ${eventsSent.get()} events total") println(s"res size: ${res.length}") }
Example 25
Source File: ProcessingKafkaApplication.scala From Akka-Cookbook with MIT License | 5 votes |
package com.packt.chapter8 import akka.actor.ActorSystem import akka.kafka.scaladsl.{Consumer, Producer} import akka.kafka.{ConsumerSettings, ProducerSettings, Subscriptions} import akka.stream.{ActorMaterializer, ClosedShape} import akka.stream.scaladsl.{Flow, GraphDSL, RunnableGraph, Sink, Source} import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord} import org.apache.kafka.clients.producer.ProducerRecord import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.serialization.{ByteArrayDeserializer, ByteArraySerializer, StringDeserializer, StringSerializer} import scala.concurrent.duration._ object ProcessingKafkaApplication extends App { implicit val actorSystem = ActorSystem("SimpleStream") implicit val actorMaterializer = ActorMaterializer() val bootstrapServers = "localhost:9092" val kafkaTopic = "akka_streams_topic" val partition = 0 val subscription = Subscriptions.assignment(new TopicPartition(kafkaTopic, partition)) val consumerSettings = ConsumerSettings(actorSystem, new ByteArrayDeserializer, new StringDeserializer) .withBootstrapServers(bootstrapServers) .withGroupId("akka_streams_group") .withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") val producerSettings = ProducerSettings(actorSystem, new ByteArraySerializer, new StringSerializer) .withBootstrapServers(bootstrapServers) val runnableGraph = RunnableGraph.fromGraph(GraphDSL.create() { implicit builder => import GraphDSL.Implicits._ val tickSource = Source.tick(0 seconds, 5 seconds, "Hello from Akka Streams using Kafka!") val kafkaSource = Consumer.plainSource(consumerSettings, subscription) val kafkaSink = Producer.plainSink(producerSettings) val printlnSink = Sink.foreach(println) val mapToProducerRecord = Flow[String].map(elem => new ProducerRecord[Array[Byte], String](kafkaTopic, elem)) val mapFromConsumerRecord = Flow[ConsumerRecord[Array[Byte], String]].map(record => record.value()) tickSource ~> mapToProducerRecord ~> kafkaSink kafkaSource ~> mapFromConsumerRecord ~> printlnSink ClosedShape }) runnableGraph.run() }
Example 26
Source File: WorkingWithGraphsApplication.scala From Akka-Cookbook with MIT License | 5 votes |
package com.packt.chapter8 import akka.actor.ActorSystem import akka.stream._ import akka.stream.scaladsl.{Balance, Broadcast, Flow, GraphDSL, Merge, RunnableGraph, Sink, Source} import scala.concurrent.duration._ import scala.util.Random object WorkingWithGraphsApplication extends App { implicit val actorSystem = ActorSystem("WorkingWithGraphs") implicit val actorMaterializer = ActorMaterializer() trait MobileMsg { def id = Random.nextInt(1000) def toGenMsg(origin: String) = GenericMsg(id, origin) } class AndroidMsg extends MobileMsg class IosMsg extends MobileMsg case class GenericMsg(id: Int, origin: String) val graph = RunnableGraph.fromGraph(GraphDSL.create() { implicit builder => import GraphDSL.Implicits._ //Sources val androidNotification = Source.tick(2 seconds, 500 millis, new AndroidMsg) val iOSNotification = Source.tick(700 millis, 600 millis, new IosMsg) //Flow val groupAndroid = Flow[AndroidMsg].map(_.toGenMsg("ANDROID")).groupedWithin(5, 5 seconds).async val groupIos = Flow[IosMsg].map(_.toGenMsg("IOS")).groupedWithin(5, 5 seconds).async def counter = Flow[Seq[GenericMsg]].via(new StatefulCounterFlow()) def mapper = Flow[Seq[GenericMsg]].mapConcat(_.toList) //Junctions val aBroadcast = builder.add(Broadcast[Seq[GenericMsg]](2)) val iBroadcast = builder.add(Broadcast[Seq[GenericMsg]](2)) val balancer = builder.add(Balance[Seq[GenericMsg]](2)) val notitificationMerge = builder.add(Merge[Seq[GenericMsg]](2)) val genericNotitificationMerge = builder.add(Merge[GenericMsg](2)) def counterSink(s: String) = Sink.foreach[Int](x => println(s"$s: [$x]")) //Graph androidNotification ~> groupAndroid ~> aBroadcast ~> counter ~> counterSink("Android") aBroadcast ~> notitificationMerge iBroadcast ~> notitificationMerge iOSNotification ~> groupIos ~> iBroadcast ~> counter ~> counterSink("Ios") notitificationMerge ~> balancer ~> mapper.async ~> genericNotitificationMerge balancer ~> mapper.async ~> genericNotitificationMerge genericNotitificationMerge ~> Sink.foreach(println) ClosedShape }) graph.run() }
Example 27
Source File: Test15.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.akkastream.example import akka.actor.ActorSystem import akka.stream._ import akka.stream.scaladsl.{Balance, Broadcast, Flow, GraphDSL, Merge, RunnableGraph, Sink, Source} import org.apache.gearpump.akkastream.GearpumpMaterializer import org.apache.gearpump.cluster.main.{ArgumentsParser, CLIOption} import org.apache.gearpump.util.AkkaApp import scala.concurrent.Await import scala.concurrent.duration._ object Test15 extends AkkaApp with ArgumentsParser { // scalastyle:off println override val options: Array[(String, CLIOption[Any])] = Array( "gearpump" -> CLIOption[Boolean]("<boolean>", required = false, defaultValue = Some(false)) ) override def main(akkaConf: Config, args: Array[String]): Unit = { val config = parse(args) implicit val system = ActorSystem("Test15", akkaConf) implicit val materializer: ActorMaterializer = config.getBoolean("gearpump") match { case true => GearpumpMaterializer() case false => ActorMaterializer( ActorMaterializerSettings(system).withAutoFusing(false) ) } import akka.stream.scaladsl.GraphDSL.Implicits._ RunnableGraph.fromGraph(GraphDSL.create() { implicit builder => val A = builder.add(Source.single(0)).out val B = builder.add(Broadcast[Int](2)) val C = builder.add(Merge[Int](2).named("C")) val D = builder.add(Flow[Int].map(_ + 1).named("D")) val E = builder.add(Balance[Int](2).named("E")) val F = builder.add(Merge[Int](2).named("F")) val G = builder.add(Sink.foreach(println).named("G")).in C <~ F A ~> B ~> C ~> F B ~> D ~> E ~> F E ~> G ClosedShape }).run() Await.result(system.whenTerminated, 60.minutes) } // scalastyle:on println }
Example 28
Source File: QueryProgress.scala From clickhouse-scala-client with GNU Lesser General Public License v3.0 | 5 votes |
package com.crobox.clickhouse.internal.progress import akka.NotUsed import akka.stream.scaladsl.{BroadcastHub, Keep, RunnableGraph, Source, SourceQueueWithComplete} import akka.stream.{ActorAttributes, OverflowStrategy, Supervision} import com.typesafe.scalalogging.LazyLogging import spray.json._ import spray.json.DefaultJsonProtocol._ import scala.util.{Failure, Success, Try} object QueryProgress extends LazyLogging { sealed trait QueryProgress case object QueryAccepted extends QueryProgress case object QueryFinished extends QueryProgress case object QueryRejected extends QueryProgress case class QueryFailed(cause: Throwable) extends QueryProgress case class QueryRetry(cause: Throwable, retryNumber: Int) extends QueryProgress case class ClickhouseQueryProgress(identifier: String, progress: QueryProgress) case class Progress(rowsRead: Long, bytesRead: Long, rowsWritten: Long, bytesWritten: Long, totalRows: Long) extends QueryProgress def queryProgressStream: RunnableGraph[(SourceQueueWithComplete[String], Source[ClickhouseQueryProgress, NotUsed])] = Source .queue[String](1000, OverflowStrategy.dropHead) .map[Option[ClickhouseQueryProgress]](queryAndProgress => { queryAndProgress.split("\n", 2).toList match { case queryId :: ProgressHeadersAsEventsStage.AcceptedMark :: Nil => Some(ClickhouseQueryProgress(queryId, QueryAccepted)) case queryId :: progressJson :: Nil => Try { progressJson.parseJson match { case JsObject(fields) if fields.size == 3 => ClickhouseQueryProgress( queryId, Progress( fields("read_rows").convertTo[String].toLong, fields("read_bytes").convertTo[String].toLong, 0, 0, fields("total_rows").convertTo[String].toLong ) ) case JsObject(fields) if fields.size == 5 => ClickhouseQueryProgress( queryId, Progress( fields("read_rows").convertTo[String].toLong, fields("read_bytes").convertTo[String].toLong, fields("written_rows").convertTo[String].toLong, fields("written_bytes").convertTo[String].toLong, fields("total_rows_to_read").convertTo[String].toLong ) ) case _ => throw new IllegalArgumentException(s"Cannot extract progress from $progressJson") } } match { case Success(value) => Some(value) case Failure(exception) => logger.warn(s"Failed to parse json $progressJson", exception) None } case other @ _ => logger.warn(s"Could not get progress from $other") None } }) .collect { case Some(progress) => progress } .withAttributes(ActorAttributes.supervisionStrategy({ case ex @ _ => logger.warn("Detected failure in the query progress stream, resuming operation.", ex) Supervision.Resume })) .toMat(BroadcastHub.sink)(Keep.both) }
Example 29
Source File: PartitionHubWithDynamicSinks.scala From akka_streams_tutorial with MIT License | 5 votes |
package sample.stream import akka.NotUsed import akka.actor.ActorSystem import akka.stream.ThrottleMode import akka.stream.scaladsl.{Keep, PartitionHub, RunnableGraph, Source} import scala.concurrent.duration._ object PartitionHubWithDynamicSinks { implicit val system = ActorSystem() implicit val ec = system.dispatcher def main(args: Array[String]): Unit = { val producer = Source.tick(1.second, 100.millis, "message").zipWith(Source(1 to 100))((a, b) => s"$a-$b") // A new instance of the partitioner functions and its state is created for each materialization of the PartitionHub def partitionRoundRobin(): (PartitionHub.ConsumerInfo, String) => Long = { var i = -1L (info, elem) => { i += 1 info.consumerIdByIdx((i % info.size).toInt) } } def partitionToFastestConsumer(): (PartitionHub.ConsumerInfo, String) => Long = { (info: PartitionHub.ConsumerInfo, each:String) => info.consumerIds.minBy(id => info.queueSize(id)) } // Attach a PartitionHub Sink to the producer. This will materialize to a corresponding Source // We need to use toMat and Keep.right since by default the materialized value to the left is used val runnableGraph: RunnableGraph[Source[String, NotUsed]] = producer.toMat(PartitionHub.statefulSink( //Switch the partitioning function () => partitionRoundRobin(), //() => partitionToFastestConsumer(), startAfterNrOfConsumers = 1, bufferSize = 1))(Keep.right) // By running/materializing the producer, we get back a Source, which // gives us access to the elements published by the producer. val fromProducer: Source[String, NotUsed] = runnableGraph.run() // Attach three dynamic fan-out sinks to the PartitionHub fromProducer.runForeach(msg => println("fast consumer1 received: " + msg)) fromProducer.throttle(100, 1.millis, 10, ThrottleMode.Shaping) .runForeach(msg => println("slow consumer2 received: " + msg)) fromProducer.throttle(100, 2.millis, 10, ThrottleMode.Shaping) .runForeach(msg => println("really slow consumer3 received: " + msg)) } }
Example 30
Source File: MergeHubWithDynamicSources.scala From akka_streams_tutorial with MIT License | 5 votes |
package sample.stream import akka.NotUsed import akka.actor.ActorSystem import akka.stream.DelayOverflowStrategy import akka.stream.scaladsl.{Flow, MergeHub, RunnableGraph, Sink, Source} import scala.concurrent.duration._ object MergeHubWithDynamicSources { implicit val system = ActorSystem("MergeHubWithDynamicSources") implicit val ec = system.dispatcher def main(args: Array[String]): Unit = { val slowSink: Sink[Seq[String], NotUsed] = Flow[Seq[String]] .delay(1.seconds, DelayOverflowStrategy.backpressure) .to(Sink.foreach(e => println(s"Reached Sink: $e"))) // Attach a MergeHub Source to the consumer. This will materialize to a corresponding Sink val runnableGraph: RunnableGraph[Sink[String, NotUsed]] = MergeHub.source[String](perProducerBufferSize = 16) .groupedWithin(10, 2.seconds) .to(slowSink) // By running/materializing the graph we get back a Sink, and hence now have access to feed elements into it // This Sink can then be materialized any number of times, and every element that enters the Sink will be consumed by our consumer val toConsumer: Sink[String, NotUsed] = runnableGraph.run() def fastSource(sourceId: Int, toConsumer: Sink[String, NotUsed]) = { Source(1 to 10) .map{each => println(s"Produced: $sourceId.$each"); s"$sourceId.$each"} .runWith(toConsumer) } // Add dynamic producer sources. If the consumer cannot keep up, then ALL of the producers are backpressured (1 to 10).par.foreach(each => fastSource(each, toConsumer)) } }