akka.stream.scaladsl.SourceQueueWithComplete Scala Examples
The following examples show how to use akka.stream.scaladsl.SourceQueueWithComplete.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: DataProducer.scala From reactive-kafka-microservice-template with Apache License 2.0 | 5 votes |
package com.omearac.producers import akka.actor._ import akka.event.Logging import akka.stream.scaladsl.SourceQueueWithComplete import com.omearac.producers.DataProducer.PublishMessages import com.omearac.shared.EventMessages.{ActivatedProducerStream, MessagesPublished} import com.omearac.shared.EventSourcing import com.omearac.shared.KafkaMessages.KafkaMessage object DataProducer { //Command Messages case class PublishMessages(numberOfMessages: Int) def props: Props = Props(new DataProducer) } class DataProducer extends Actor with EventSourcing { import context._ implicit val system = context.system val log = Logging(system, this.getClass.getName) var producerStream: SourceQueueWithComplete[Any] = null def receive: Receive = { case ActivatedProducerStream(streamRef, kafkaTopic) => producerStream = streamRef become(publishData) case msg: PublishMessages => if (producerStream == null) self ! msg case other => log.error("DataProducer got the unknown message while in idle: " + other) } def publishData: Receive = { case PublishMessages(numberOfMessages) => for (i <- 1 to numberOfMessages) { val myPublishableMessage = KafkaMessage(timetag, " send me to kafka, yo!", i) producerStream.offer(myPublishableMessage) } //Tell the akka-http front end that messages were sent sender() ! MessagesPublished(numberOfMessages) publishLocalEvent(MessagesPublished(numberOfMessages)) case other => log.error("DataProducer got the unknown message while producing: " + other) } }
Example 2
Source File: BasicEventStore.scala From izanami with Apache License 2.0 | 5 votes |
package domains.events.impl import akka.actor.{Actor, ActorSystem, Props} import akka.stream.scaladsl.{Source, SourceQueueWithComplete} import akka.stream.{ActorMaterializer, Materializer} import akka.{Done, NotUsed} import domains.Domain.Domain import domains.configuration.PlayModule import domains.events.EventLogger._ import domains.events.EventStore import domains.events.Events.IzanamiEvent import libs.streams.CacheableQueue import domains.errors.IzanamiErrors import store.datastore.DataStoreLayerContext import zio.{IO, Task, ZLayer} import scala.util.Try object BasicEventStore { val live: ZLayer[DataStoreLayerContext, Throwable, EventStore] = ZLayer.fromFunction { mix => implicit val system: ActorSystem = mix.get[PlayModule.Service].system new BasicEventStore } } class BasicEventStore(implicit system: ActorSystem) extends EventStore.Service { logger.info("Starting default event store") private val queue = CacheableQueue[IzanamiEvent](500, queueBufferSize = 500) system.actorOf(EventStreamActor.props(queue)) override def publish(event: IzanamiEvent): IO[IzanamiErrors, Done] = //Already published Task { system.eventStream.publish(event) Done }.orDie override def events(domains: Seq[Domain], patterns: Seq[String], lastEventId: Option[Long]): Source[IzanamiEvent, NotUsed] = lastEventId match { case Some(_) => queue.sourceWithCache .via(dropUntilLastId(lastEventId)) .filter(eventMatch(patterns, domains)) case None => queue.rawSource .filter(eventMatch(patterns, domains)) } override def check(): Task[Unit] = IO.succeed(()) } private[events] object EventStreamActor { def props(queue: SourceQueueWithComplete[IzanamiEvent]) = Props(new EventStreamActor(queue)) } private[events] class EventStreamActor(queue: SourceQueueWithComplete[IzanamiEvent]) extends Actor { import context.dispatcher override def receive = { case e: IzanamiEvent => logger.debug(s"New event : $e") queue.offer(e) } override def preStart(): Unit = { queue .watchCompletion() .onComplete(_ => Try(context.system.eventStream.unsubscribe(self))) context.system.eventStream.subscribe(self, classOf[IzanamiEvent]) } override def postStop(): Unit = { context.system.eventStream.unsubscribe(self) queue.complete() } }
Example 3
Source File: VoiceUDPHandler.scala From AckCord with MIT License | 5 votes |
package ackcord.voice import java.net.InetSocketAddress import scala.concurrent.duration._ import scala.util.{Failure, Success} import ackcord.data.{RawSnowflake, UserId} import akka.NotUsed import akka.actor.typed._ import akka.actor.typed.scaladsl._ import akka.stream.OverflowStrategy import akka.stream.scaladsl.{Keep, Sink, Source, SourceQueueWithComplete} import akka.util.ByteString import org.slf4j.Logger object VoiceUDPHandler { def apply( address: String, port: Int, ssrc: Int, serverId: RawSnowflake, userId: UserId, soundProducer: Source[ByteString, NotUsed], soundConsumer: Sink[AudioAPIMessage, NotUsed], parent: ActorRef[VoiceHandler.Command] ): Behavior[Command] = Behaviors .supervise( Behaviors.setup[Command] { ctx => implicit val system: ActorSystem[Nothing] = ctx.system val ((queue, futIp), watchDone) = soundProducer .viaMat( VoiceUDPFlow .flow( new InetSocketAddress(address, port), ssrc, serverId, userId, Source.queue[Option[ByteString]](0, OverflowStrategy.dropBuffer) ) .watchTermination()(Keep.both) )(Keep.right) .to(soundConsumer) .run() ctx.pipeToSelf(futIp) { case Success(value) => IPDiscoveryResult(value) case Failure(e) => SendExeption(e) } ctx.pipeToSelf(watchDone)(_ => ConnectionDied) handle(ctx, ctx.log, ssrc, queue, parent) } ) .onFailure( SupervisorStrategy .restartWithBackoff(100.millis, 5.seconds, 1D) .withResetBackoffAfter(10.seconds) .withMaxRestarts(5) ) def handle( ctx: ActorContext[Command], log: Logger, ssrc: Int, queue: SourceQueueWithComplete[Option[ByteString]], parent: ActorRef[VoiceHandler.Command] ): Behavior[Command] = Behaviors.receiveMessage { case SendExeption(e) => throw e case ConnectionDied => Behaviors.stopped case Shutdown => queue.complete() Behaviors.same case IPDiscoveryResult(VoiceUDPFlow.FoundIP(localAddress, localPort)) => parent ! VoiceHandler.GotLocalIP(localAddress, localPort) Behaviors.same case SetSecretKey(key) => queue.offer(key) Behaviors.same } sealed trait Command case object Shutdown extends Command private case class SendExeption(e: Throwable) extends Command private case object ConnectionDied extends Command private case class IPDiscoveryResult(foundIP: VoiceUDPFlow.FoundIP) extends Command private[voice] case class SetSecretKey(key: Option[ByteString]) extends Command }
Example 4
Source File: UseCaseSupport.scala From akka-ddd-cqrs-es-example with MIT License | 5 votes |
package com.github.j5ik2o.bank.useCase import akka.{ Done, NotUsed } import akka.stream.{ FlowShape, QueueOfferResult } import akka.stream.scaladsl.{ Flow, GraphDSL, Sink, SourceQueueWithComplete, Unzip, Zip } import scala.concurrent.{ ExecutionContext, Future, Promise } object UseCaseSupport { implicit class FlowOps[A, B](val self: Flow[A, B, NotUsed]) extends AnyVal { def zipPromise: Flow[(A, Promise[B]), (B, Promise[B]), NotUsed] = Flow .fromGraph(GraphDSL.create() { implicit b => import GraphDSL.Implicits._ val unzip = b.add(Unzip[A, Promise[B]]) val zip = b.add(Zip[B, Promise[B]]) unzip.out0 ~> self ~> zip.in0 unzip.out1 ~> zip.in1 FlowShape(unzip.in, zip.out) }) } } trait UseCaseSupport { protected def offerToQueue[A, B]( sourceQueue: SourceQueueWithComplete[(A, Promise[B])] )(request: A, promise: Promise[B])(implicit ec: ExecutionContext): Future[B] = { sourceQueue.offer((request, promise)).flatMap { case QueueOfferResult.Enqueued => promise.future case QueueOfferResult.Failure(t) => Future.failed(new Exception("Failed to offer request", t)) case QueueOfferResult.Dropped => Future.failed( new Exception( s"Failed to enqueue resolve request, the queue buffer was full, please check the bank.interface.buffer-size setting" ) ) case QueueOfferResult.QueueClosed => Future.failed(new Exception("Failed to enqueue request batch write, the queue was closed")) } } protected def completePromiseSink[T]: Sink[(T, Promise[T]), Future[Done]] = Sink.foreach { case (response, promise) => promise.success(response) } }
Example 5
Source File: BankAccountReadModelUseCase.scala From akka-ddd-cqrs-es-example with MIT License | 5 votes |
package com.github.j5ik2o.bank.useCase import akka.actor.ActorSystem import akka.stream.scaladsl.{ Flow, Keep, Sink, Source, SourceQueueWithComplete } import akka.stream.{ ActorMaterializer, OverflowStrategy } import akka.{ Done, NotUsed } import com.github.j5ik2o.bank.domain.model._ import com.github.j5ik2o.bank.useCase.BankAccountAggregateUseCase.Protocol.{ ResolveBankAccountEventsRequest, ResolveBankAccountEventsResponse } import com.github.j5ik2o.bank.useCase.port.{ BankAccountReadModelFlows, JournalReader } import pureconfig._ import scala.concurrent.{ ExecutionContext, ExecutionContextExecutor, Future, Promise } class BankAccountReadModelUseCase(bankAccountReadModelFlows: BankAccountReadModelFlows, journalReader: JournalReader)( implicit val system: ActorSystem ) extends UseCaseSupport { import UseCaseSupport._ private val config = loadConfigOrThrow[BankAccountAggregateUseCaseConfig]("bank.use-case.bank-account-use-case") private val bufferSize: Int = config.bufferSize private implicit val mat: ActorMaterializer = ActorMaterializer() private implicit val ec: ExecutionContextExecutor = system.dispatcher def resolveBankAccountEventsById( request: ResolveBankAccountEventsRequest )(implicit ec: ExecutionContext): Future[ResolveBankAccountEventsResponse] = offerToQueue(resolveBankAccountEventQueue)(request, Promise()) private lazy val resolveBankAccountEventQueue : SourceQueueWithComplete[(ResolveBankAccountEventsRequest, Promise[ResolveBankAccountEventsResponse])] = Source .queue[(ResolveBankAccountEventsRequest, Promise[ResolveBankAccountEventsResponse])](bufferSize, OverflowStrategy.dropNew) .via(bankAccountReadModelFlows.resolveBankAccountEventByIdFlow.zipPromise) .toMat(completePromiseSink)(Keep.left) .run() private val projectionFlow: Flow[(BankAccountEvent, Long), Int, NotUsed] = Flow[(BankAccountEvent, Long)].flatMapConcat { case (event: BankAccountOpened, sequenceNr: Long) => Source .single((event.bankAccountId, event.name.value, sequenceNr, event.occurredAt)) .via(bankAccountReadModelFlows.openBankAccountFlow) case (event: BankAccountEventUpdated, sequenceNr: Long) => Source .single((event.bankAccountId, event.name.value, sequenceNr, event.occurredAt)) .via(bankAccountReadModelFlows.updateAccountFlow) case (event: BankAccountDeposited, sequenceNr: Long) => Source .single((event.bankAccountId, event.deposit, sequenceNr, event.occurredAt)) .via(bankAccountReadModelFlows.depositBankAccountFlow) case (event: BankAccountWithdrawn, sequenceNr: Long) => Source .single((event.bankAccountId, event.withdraw, sequenceNr, event.occurredAt)) .via(bankAccountReadModelFlows.withdrawBankAccountFlow) case (event: BankAccountClosed, sequenceNr: Long) => Source .single((event.bankAccountId, sequenceNr, event.occurredAt)) .via(bankAccountReadModelFlows.closeBankAccountFlow) } def execute(): Future[Done] = { bankAccountReadModelFlows.resolveLastSeqNrSource .flatMapConcat { lastSeqNr => journalReader.eventsByTagSource(classOf[BankAccountEvent].getName, lastSeqNr + 1) } .map { eventBody => (eventBody.event.asInstanceOf[BankAccountEvent], eventBody.sequenceNr) } .via(projectionFlow) .toMat(Sink.ignore)(Keep.right) .run() } }
Example 6
Source File: SourceActor.scala From Akka-Cookbook with MIT License | 5 votes |
package com.packt.chapter8 import akka.actor.{Actor, Props} import akka.stream.scaladsl.SourceQueueWithComplete import scala.concurrent.duration._ object SourceActor { case object Tick def props(sourceQueue: SourceQueueWithComplete[String]) = Props(new SourceActor(sourceQueue)) } class SourceActor(sourceQueue: SourceQueueWithComplete[String]) extends Actor { import SourceActor._ import context.dispatcher override def preStart() = { context.system.scheduler.schedule(0 seconds, 5 seconds, self, Tick) } def receive = { case Tick => println(s"Offering element from SourceActor") sourceQueue.offer("Integrating!!### Akka$$$ Actors? with}{ Akka** Streams") } }
Example 7
Source File: ProducerStreamManagerSpec.scala From reactive-kafka-microservice-template with Apache License 2.0 | 5 votes |
package akka.kafka import akka.actor.ActorSystem import akka.stream.scaladsl.SourceQueueWithComplete import akka.testkit.{DefaultTimeout, ImplicitSender, TestActorRef, TestKit, TestProbe} import com.omearac.producers.ProducerStreamManager import com.omearac.producers.ProducerStreamManager.InitializeProducerStream import com.omearac.shared.AkkaStreams import com.omearac.shared.EventMessages.ActivatedProducerStream import com.omearac.shared.KafkaMessages.{ExampleAppEvent, KafkaMessage} import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike} class ProducerStreamManagerSpec extends TestKit(ActorSystem("ProducerStreamManagerSpec")) with DefaultTimeout with ImplicitSender with WordSpecLike with Matchers with BeforeAndAfterAll with AkkaStreams { val testProducerStreamManager = TestActorRef(new ProducerStreamManager) val producerStreamManagerActor = testProducerStreamManager.underlyingActor //Create an test event listener for the local message bus val testEventListener = TestProbe() system.eventStream.subscribe(testEventListener.ref, classOf[ExampleAppEvent]) override def afterAll: Unit = { shutdown() } "Sending InitializeProducerStream(self, KafkaMessage) to ProducerStreamManager" should { "initialize the stream for that particular message type, return ActivatedProducerStream(streaRef, \"TempChannel1\") and produce local event " in { testProducerStreamManager ! InitializeProducerStream(self, KafkaMessage) Thread.sleep(500) var streamRef: SourceQueueWithComplete[Any] = null expectMsgPF() { case ActivatedProducerStream(sr, kt) => if (kt == "TempChannel1") { streamRef = sr; () } else fail() } Thread.sleep(500) val resultMessage = ActivatedProducerStream(streamRef, "TempChannel1") testEventListener.expectMsgPF() { case ExampleAppEvent(_, _, m) => if (m == resultMessage.toString) () else fail() } } } "Sending InitializeProducerStream(self, ExampleAppEvent) to ProducerStreamManager" should { "initialize the stream for that particular message type, return ActivatedProducerStream(streaRef, \"TempChannel2\") and produce local event " in { testProducerStreamManager ! InitializeProducerStream(self, ExampleAppEvent) Thread.sleep(500) var streamRef: SourceQueueWithComplete[Any] = null expectMsgPF() { case ActivatedProducerStream(sr, kt) => if (kt == "TempChannel2") { streamRef = sr; () } else fail() } Thread.sleep(500) val resultMessage = ActivatedProducerStream(streamRef, "TempChannel2") testEventListener.expectMsgPF() { case ExampleAppEvent(_, _, m) => if (m == resultMessage.toString) () else fail() } } } }
Example 8
Source File: DataProducerSpec.scala From reactive-kafka-microservice-template with Apache License 2.0 | 5 votes |
package akka.kafka import akka.Done import akka.actor.ActorSystem import akka.stream.QueueOfferResult import akka.stream.QueueOfferResult.Enqueued import akka.stream.scaladsl.SourceQueueWithComplete import akka.testkit.{DefaultTimeout, EventFilter, ImplicitSender, TestActorRef, TestKit, TestProbe} import com.omearac.producers.DataProducer import com.omearac.producers.DataProducer.PublishMessages import com.omearac.shared.AkkaStreams import com.omearac.shared.EventMessages.{ActivatedProducerStream, MessagesPublished} import com.omearac.shared.KafkaMessages.ExampleAppEvent import com.typesafe.config.ConfigFactory import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike} import scala.concurrent.Future class DataProducerSpec extends TestKit(ActorSystem("DataProducerSpec", ConfigFactory.parseString( """ akka.loggers = ["akka.testkit.TestEventListener"] """))) with DefaultTimeout with ImplicitSender with WordSpecLike with Matchers with BeforeAndAfterAll with AkkaStreams { val testProducer = TestActorRef(new DataProducer) val producerActor = testProducer.underlyingActor val mockProducerStream: SourceQueueWithComplete[Any] = new SourceQueueWithComplete[Any] { override def complete(): Unit = println("complete") override def fail(ex: Throwable): Unit = println("fail") override def offer(elem: Any): Future[QueueOfferResult] = Future { Enqueued } override def watchCompletion(): Future[Done] = Future { Done } } override def afterAll: Unit = { shutdown() } //Create an test event listener for the local message bus val testEventListener = TestProbe() system.eventStream.subscribe(testEventListener.ref, classOf[ExampleAppEvent]) "Sending ActivatedProducerStream to DataProducer in receive state" should { "save the stream ref and change state to producing " in { testProducer ! ActivatedProducerStream(mockProducerStream, "TestTopic") Thread.sleep(500) producerActor.producerStream should be(mockProducerStream) EventFilter.error(message = "DataProducer got the unknown message while producing: testMessage", occurrences = 1) intercept { testProducer ! "testMessage" } } } "Sending PublishMessages(number: Int) to DataProducer in publishData state" should { "return MessagesPublished(number: Int) and publish the local event " in { val producing = producerActor.publishData producerActor.context.become(producing) producerActor.producerStream = mockProducerStream val resultMessage = MessagesPublished(5) testProducer ! PublishMessages(5) expectMsg(resultMessage) testEventListener.expectMsgPF() { case ExampleAppEvent(_, _, m) => if (m == resultMessage.toString) () else fail() } } } }
Example 9
Source File: EventProducerSpec.scala From reactive-kafka-microservice-template with Apache License 2.0 | 5 votes |
package akka.kafka import java.util.Date import akka.Done import akka.actor.ActorSystem import akka.serialization.Serialization import akka.stream.QueueOfferResult import akka.stream.QueueOfferResult.Enqueued import akka.stream.scaladsl.SourceQueueWithComplete import akka.testkit.{DefaultTimeout, EventFilter, ImplicitSender, TestActorRef, TestKit, TestProbe} import com.omearac.producers.EventProducer import com.omearac.shared.AkkaStreams import com.omearac.shared.EventMessages.{ActivatedProducerStream, MessagesPublished} import com.omearac.shared.KafkaMessages.ExampleAppEvent import com.typesafe.config.ConfigFactory import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike} import scala.concurrent.Future class EventProducerSpec extends TestKit(ActorSystem("EventProducerSpec",ConfigFactory.parseString(""" akka.loggers = ["akka.testkit.TestEventListener"] """))) with DefaultTimeout with ImplicitSender with WordSpecLike with Matchers with BeforeAndAfterAll with AkkaStreams { val testProducer = TestActorRef(new EventProducer) val producerActor = testProducer.underlyingActor val mockProducerStream: SourceQueueWithComplete[Any] = new SourceQueueWithComplete[Any] { override def complete(): Unit = println("complete") override def fail(ex: Throwable): Unit = println("fail") override def offer(elem: Any): Future[QueueOfferResult] = Future{Enqueued} override def watchCompletion(): Future[Done] = Future{Done} } override def afterAll: Unit = { shutdown() } //Create an test event listener for the local message bus val testEventListener = TestProbe() system.eventStream.subscribe(testEventListener.ref, classOf[ExampleAppEvent]) "Sending ActivatedProducerStream to EventProducer in receive state" should { "save the stream ref and change state to producing " in { testProducer ! ActivatedProducerStream(mockProducerStream, "TestTopic") Thread.sleep(500) producerActor.producerStream should be(mockProducerStream) EventFilter.error(message = "EventProducer got the unknown message while producing: testMessage", occurrences = 1) intercept { testProducer ! "testMessage" } } } "Sending ExampleAppEvent to system bus while EventProducer is in publishEvent state" should { "offer the ExampleAppEvent to the stream " in { val producingState = producerActor.publishEvent producerActor.context.become(producingState) producerActor.producerStream = mockProducerStream val dateFormat = new java.text.SimpleDateFormat("dd:MM:yy:HH:mm:ss.SSS") lazy val timetag = dateFormat.format(new Date(System.currentTimeMillis())) val eventMsg = MessagesPublished(5) val testMessage = ExampleAppEvent(timetag,Serialization.serializedActorPath(self),eventMsg.toString) system.eventStream.publish(testMessage) testEventListener.expectMsgPF(){ case ExampleAppEvent(_,_,m) => if (m == eventMsg.toString) () else fail() } } } }
Example 10
Source File: HTTPInterfaceSpec.scala From reactive-kafka-microservice-template with Apache License 2.0 | 5 votes |
package akka import akka.event.Logging import akka.http.scaladsl.testkit.ScalatestRouteTest import akka.stream.QueueOfferResult import akka.stream.QueueOfferResult.Enqueued import akka.stream.scaladsl.SourceQueueWithComplete import akka.testkit.{TestActorRef, TestProbe} import com.omearac.consumers.{DataConsumer, EventConsumer} import com.omearac.http.routes.{ConsumerCommands, ProducerCommands} import com.omearac.producers.DataProducer import org.scalatest.{Matchers, WordSpec} import scala.concurrent.Future class HTTPInterfaceSpec extends WordSpec with Matchers with ScalatestRouteTest with ConsumerCommands with ProducerCommands { val log = Logging(system, this.getClass.getName) //Mocks for DataConsumer Tests val dataConsumer = TestActorRef(new DataConsumer) val manager = TestProbe() dataConsumer.underlyingActor.consumerStreamManager = manager.ref //Mocks for EventConsumer Tests val eventConsumer = TestActorRef(new EventConsumer) eventConsumer.underlyingActor.consumerStreamManager = manager.ref //Mocks for DataProducer Tests val dataProducer = TestActorRef(new DataProducer) val mockProducerStream: SourceQueueWithComplete[Any] = new SourceQueueWithComplete[Any] { override def complete(): Unit = println("complete") override def fail(ex: Throwable): Unit = println("fail") override def offer(elem: Any): Future[QueueOfferResult] = Future{Enqueued} override def watchCompletion(): Future[Done] = Future{Done} } "The HTTP interface to control the DataConsumerStream" should { "return a Already Stopped message for GET requests to /data_consumer/stop" in { Get("/data_consumer/stop") ~> dataConsumerHttpCommands ~> check { responseAs[String] shouldEqual "Data Consumer Stream Already Stopped" } } "return a Stream Started response for GET requests to /data_consumer/start" in { Get("/data_consumer/start") ~> dataConsumerHttpCommands ~> check { responseAs[String] shouldEqual "Data Consumer Stream Started" } } } "The HTTP interface to control the EventConsumerStream" should { "return a Already Stopped message for GET requests to /event_consumer/stop" in { Get("/event_consumer/stop") ~> eventConsumerHttpCommands ~> check { responseAs[String] shouldEqual "Event Consumer Stream Already Stopped" } } "return a Stream Started response for GET requests to /data_consumer/start" in { Get("/event_consumer/start") ~> eventConsumerHttpCommands ~> check { responseAs[String] shouldEqual "Event Consumer Stream Started" } } } "The HTTP interface to tell the DataProducer Actor to publish messages to Kafka" should { "return a Messages Produced message for GET requests to /data_producer/produce/10" in { dataProducer.underlyingActor.producerStream = mockProducerStream val producing = dataProducer.underlyingActor.publishData dataProducer.underlyingActor.context.become(producing) Get("/data_producer/produce/10") ~> producerHttpCommands ~> check { responseAs[String] shouldEqual "10 messages Produced as Ordered, Boss!" } } } }
Example 11
Source File: TrackerImplTest.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.apiserver.services.tracking import akka.NotUsed import akka.stream.OverflowStrategy import akka.stream.scaladsl.{Keep, Source, SourceQueueWithComplete} import akka.stream.testkit.TestSubscriber import akka.stream.testkit.scaladsl.TestSink import com.daml.ledger.api.testing.utils.{ AkkaBeforeAndAfterAll, IsStatusException, TestingException } import com.daml.ledger.api.v1.command_service.SubmitAndWaitRequest import com.daml.ledger.api.v1.commands.Commands import com.daml.ledger.api.v1.completion.Completion import com.daml.dec.DirectExecutionContext import com.google.rpc.status.{Status => RpcStatus} import io.grpc.Status import org.scalatest.concurrent.ScalaFutures import org.scalatest.{BeforeAndAfterEach, Matchers, Succeeded, WordSpec} import scala.concurrent.ExecutionContext.Implicits.global class TrackerImplTest extends WordSpec with Matchers with BeforeAndAfterEach with ScalaFutures with AkkaBeforeAndAfterAll { private var sut: Tracker = _ private var consumer: TestSubscriber.Probe[NotUsed] = _ private var queue: SourceQueueWithComplete[TrackerImpl.QueueInput] = _ private def input(cid: Int) = SubmitAndWaitRequest(Some(Commands(commandId = cid.toString))) override protected def beforeEach(): Unit = { val (q, sink) = Source .queue[TrackerImpl.QueueInput](1, OverflowStrategy.dropNew) .map { in => in.context.success(Completion(in.value.getCommands.commandId, Some(RpcStatus()))) NotUsed } .toMat(TestSink.probe[NotUsed])(Keep.both) .run() queue = q sut = new TrackerImpl(q) consumer = sink } override protected def afterEach(): Unit = { consumer.cancel() queue.complete() } "Tracker Implementation" when { "input is submitted, and the queue is available" should { "work successfully" in { val resultF1 = sut.track(input(1)) consumer.requestNext() val resultF = resultF1.flatMap(_ => sut.track(input(2)))(DirectExecutionContext) consumer.requestNext() whenReady(resultF)(_ => Succeeded) } } "input is submitted, and the queue is backpressuring" should { "return a RESOURCE_EXHAUSTED error" in { sut.track(input(1)) whenReady(sut.track(input(2)).failed)(IsStatusException(Status.RESOURCE_EXHAUSTED)) } } "input is submitted, and the queue has been completed" should { "return an ABORTED error" in { queue.complete() whenReady(sut.track(input(2)).failed)(IsStatusException(Status.ABORTED)) } } "input is submitted, and the queue has failed" should { "return an ABORTED error" in { queue.fail(TestingException("The queue fails with this error.")) whenReady(sut.track(input(2)).failed)(IsStatusException(Status.ABORTED)) } } } }
Example 12
Source File: EventProducer.scala From reactive-kafka-microservice-template with Apache License 2.0 | 5 votes |
package com.omearac.producers import akka.actor.{Actor, Props} import akka.event.Logging import akka.stream.scaladsl.SourceQueueWithComplete import com.omearac.shared.EventMessages.ActivatedProducerStream import com.omearac.shared.EventSourcing import com.omearac.shared.KafkaMessages.ExampleAppEvent object EventProducer { def props: Props = Props(new EventProducer) } class EventProducer extends Actor with EventSourcing { import context._ implicit val system = context.system val log = Logging(system, this.getClass.getName) var producerStream: SourceQueueWithComplete[Any] = null val subscribedMessageTypes = Seq(classOf[ExampleAppEvent]) override def preStart(): Unit = { super.preStart() subscribedMessageTypes.foreach(system.eventStream.subscribe(self, _)) } override def postStop(): Unit = { subscribedMessageTypes.foreach(system.eventStream.unsubscribe(self, _)) super.postStop() } def receive: Receive = { case ActivatedProducerStream(streamRef, _) => producerStream = streamRef become(publishEvent) case msg: ExampleAppEvent => if (producerStream == null) self ! msg else producerStream.offer(msg) case other => log.error("EventProducer got the unknown message while in idle: " + other) } def publishEvent: Receive = { case msg: ExampleAppEvent => producerStream.offer(msg) case other => log.error("EventProducer got the unknown message while producing: " + other) } }
Example 13
Source File: Messages.scala From reactive-kafka-microservice-template with Apache License 2.0 | 5 votes |
package com.omearac.shared import akka.stream.scaladsl.SourceQueueWithComplete object EventMessages { abstract class EventMessage case class ActivatedConsumerStream(kafkaTopic: String) extends EventMessage case class TerminatedConsumerStream(kafkaTopic: String) extends EventMessage case class ActivatedProducerStream[msgType](producerStream: SourceQueueWithComplete[msgType], kafkaTopic: String) extends EventMessage case class MessagesPublished(numberOfMessages: Int) extends EventMessage case class FailedMessageConversion(kafkaTopic: String, msg: String, msgType: String) extends EventMessage } object KafkaMessages { case class KafkaMessage(time: String, subject: String, item: Int) case class ExampleAppEvent(time: String, senderID: String, eventType: String) }
Example 14
Source File: QueryProgress.scala From clickhouse-scala-client with GNU Lesser General Public License v3.0 | 5 votes |
package com.crobox.clickhouse.internal.progress import akka.NotUsed import akka.stream.scaladsl.{BroadcastHub, Keep, RunnableGraph, Source, SourceQueueWithComplete} import akka.stream.{ActorAttributes, OverflowStrategy, Supervision} import com.typesafe.scalalogging.LazyLogging import spray.json._ import spray.json.DefaultJsonProtocol._ import scala.util.{Failure, Success, Try} object QueryProgress extends LazyLogging { sealed trait QueryProgress case object QueryAccepted extends QueryProgress case object QueryFinished extends QueryProgress case object QueryRejected extends QueryProgress case class QueryFailed(cause: Throwable) extends QueryProgress case class QueryRetry(cause: Throwable, retryNumber: Int) extends QueryProgress case class ClickhouseQueryProgress(identifier: String, progress: QueryProgress) case class Progress(rowsRead: Long, bytesRead: Long, rowsWritten: Long, bytesWritten: Long, totalRows: Long) extends QueryProgress def queryProgressStream: RunnableGraph[(SourceQueueWithComplete[String], Source[ClickhouseQueryProgress, NotUsed])] = Source .queue[String](1000, OverflowStrategy.dropHead) .map[Option[ClickhouseQueryProgress]](queryAndProgress => { queryAndProgress.split("\n", 2).toList match { case queryId :: ProgressHeadersAsEventsStage.AcceptedMark :: Nil => Some(ClickhouseQueryProgress(queryId, QueryAccepted)) case queryId :: progressJson :: Nil => Try { progressJson.parseJson match { case JsObject(fields) if fields.size == 3 => ClickhouseQueryProgress( queryId, Progress( fields("read_rows").convertTo[String].toLong, fields("read_bytes").convertTo[String].toLong, 0, 0, fields("total_rows").convertTo[String].toLong ) ) case JsObject(fields) if fields.size == 5 => ClickhouseQueryProgress( queryId, Progress( fields("read_rows").convertTo[String].toLong, fields("read_bytes").convertTo[String].toLong, fields("written_rows").convertTo[String].toLong, fields("written_bytes").convertTo[String].toLong, fields("total_rows_to_read").convertTo[String].toLong ) ) case _ => throw new IllegalArgumentException(s"Cannot extract progress from $progressJson") } } match { case Success(value) => Some(value) case Failure(exception) => logger.warn(s"Failed to parse json $progressJson", exception) None } case other @ _ => logger.warn(s"Could not get progress from $other") None } }) .collect { case Some(progress) => progress } .withAttributes(ActorAttributes.supervisionStrategy({ case ex @ _ => logger.warn("Detected failure in the query progress stream, resuming operation.", ex) Supervision.Resume })) .toMat(BroadcastHub.sink)(Keep.both) }
Example 15
Source File: ParametrizedFlow.scala From akka_streams_tutorial with MIT License | 5 votes |
package sample.stream_shared_state import akka.Done import akka.actor.{ActorSystem, Cancellable} import akka.stream.scaladsl.{Flow, GraphDSL, Keep, Sink, Source, SourceQueueWithComplete, Zip} import akka.stream.{FlowShape, OverflowStrategy} import scala.collection.immutable import scala.concurrent.Future import scala.concurrent.duration._ import scala.util.{Failure, Success} object ParametrizedFlow extends App { val service = ParameterizedFlowService Thread.sleep(5000) service.update(1.0) Thread.sleep(2000) service.update(1.5) Thread.sleep(2000) service.cancel() Thread.sleep(2000) println(service.result()) } object ParameterizedFlowService { implicit val system = ActorSystem("ParameterizedFlowService") implicit val executionContext = system.dispatcher def update(element: Double): Unit = flow._1._2.offer(element) def cancel(): Boolean = flow._1._1.cancel() def result(): Future[Seq[Double]] = flow._2 val fun = (flowValue: Int, paramValue: Double) => flowValue * paramValue val flow: ((Cancellable, SourceQueueWithComplete[Double]), Future[immutable.Seq[Double]]) = Source.tick(0.seconds, 500.millis, 10) .viaMat(createParamFlow(1, OverflowStrategy.dropBuffer, 0.5)(fun))(Keep.both) .wireTap(x => println(x)) .toMat(Sink.seq)(Keep.both) .run() val done: Future[Done] = flow._1._2.watchCompletion() terminateWhen(done) private def createParamFlow[A, P, O](bufferSize: Int, overflowStrategy: OverflowStrategy, initialParam: P)(fun: (A, P) => O) = Flow.fromGraph(GraphDSL.create(Source.queue[P](bufferSize, overflowStrategy)) { implicit builder => queue => import GraphDSL.Implicits._ val zip = builder.add(Zip[A, P]()) //Interesting use of the extrapolate operator //based on https://doc.akka.io/docs/akka/current/stream/stream-rate.html#understanding-extrapolate-and-expand val extra = builder.add(Flow[P].extrapolate(Iterator.continually(_), Some(initialParam))) val map = builder.add(Flow[(A, P)].map(r => fun(r._1, r._2))) queue ~> extra ~> zip.in1 zip.out ~> map FlowShape(zip.in0, map.out) }) private def terminateWhen(done: Future[_]) = { done.onComplete { case Success(_) => println("Flow Success. About to terminate...") system.terminate() case Failure(e) => println(s"Flow Failure: $e. About to terminate...") system.terminate() } } }
Example 16
Source File: PublishToSourceQueueFromMultipleThreads.scala From akka_streams_tutorial with MIT License | 5 votes |
package sample.stream import akka.actor.ActorSystem import akka.stream.Supervision.Decider import akka.stream._ import akka.stream.scaladsl.{Flow, Sink, Source, SourceQueueWithComplete} import akka.{Done, NotUsed} import org.slf4j.{Logger, LoggerFactory} import scala.concurrent.Future import scala.concurrent.duration._ import scala.util.{Failure, Success} object PublishToSourceQueueFromMultipleThreads extends App { val logger: Logger = LoggerFactory.getLogger(this.getClass) implicit val system = ActorSystem("PublishToSourceQueueFromMultipleThreads") implicit val ec = system.dispatcher val bufferSize = 100 // As of akka 2.6.x there is a thread safe implementation for SourceQueue val maxConcurrentOffers = 1000 val numberOfPublishingClients = 1000 val slowSink: Sink[Seq[Int], NotUsed] = Flow[Seq[Int]] .delay(2.seconds, DelayOverflowStrategy.backpressure) .to(Sink.foreach(e => logger.info(s"Reached sink: $e"))) val sourceQueue: SourceQueueWithComplete[Int] = Source .queue[Int](bufferSize, OverflowStrategy.backpressure, maxConcurrentOffers) .groupedWithin(10, 1.seconds) .to(slowSink) .run val doneConsuming: Future[Done] = sourceQueue.watchCompletion() signalWhen(doneConsuming, "consuming") //never completes simulatePublishingFromMulitpleThreads() // Before 2.6.x a stream had to be used to throttle and control the backpressure //simulatePublishingClientsFromStream() // Decide on the stream level, because the OverflowStrategy.backpressure // on the sourceQueue causes an IllegalStateException // Handling this on the stream level allows to restart the stream private def simulatePublishingClientsFromStream() = { val decider: Decider = { case _: IllegalStateException => println("Got backpressure signal for offered element, restart..."); Supervision.Restart case _ => Supervision.Stop } val donePublishing: Future[Done] = Source(1 to numberOfPublishingClients) .mapAsync(10)(offerToSourceQueue) //throttle .withAttributes(ActorAttributes.supervisionStrategy(decider)) .runWith(Sink.ignore) signalWhen(donePublishing, "publishing") } private def simulatePublishingFromMulitpleThreads() = (1 to numberOfPublishingClients).par.foreach(offerToSourceQueue) private def offerToSourceQueue(each: Int) = { sourceQueue.offer(each).map { case QueueOfferResult.Enqueued => logger.info(s"enqueued $each") case QueueOfferResult.Dropped => logger.info(s"dropped $each") case QueueOfferResult.Failure(ex) => logger.info(s"Offer failed: $ex") case QueueOfferResult.QueueClosed => logger.info("Source Queue closed") } } private def signalWhen(done: Future[Done], operation: String) = { done.onComplete { case Success(b) => logger.info(s"Finished: $operation") case Failure(e) => logger.info(s"Failure: $e About to terminate...") system.terminate() } } }
Example 17
Source File: FailedSource.scala From intro-to-akka-streams with Apache License 2.0 | 5 votes |
package com.github.dnvriend.streams.source import akka.Done import akka.stream.OverflowStrategy import akka.stream.scaladsl.{ Keep, Merge, Sink, Source, SourceQueueWithComplete } import com.github.dnvriend.streams.TestSpec import scala.concurrent.Future import scala.concurrent.duration._ import scala.collection.immutable._ class FailedSource extends TestSpec { it should "fail the stream" in { Source.failed[Int](new RuntimeException("test error")).testProbe { tp ⇒ tp.request(Long.MaxValue) tp.expectError() } } it should "complete a stream" in { val (queue: SourceQueueWithComplete[Int], done: Future[Done]) = Source.queue[Int](1, OverflowStrategy.dropNew) .toMat(Sink.ignore)(Keep.both).run queue.complete() done.toTry should be a 'success } it should "complete a stream normally" in { val (queue: SourceQueueWithComplete[String], done: Future[Done]) = Source.queue[String](1, OverflowStrategy.dropNew).flatMapConcat { case "stop" ⇒ Source.failed(new RuntimeException("test error")) case str ⇒ Source.single(str) }.toMat(Sink.seq)(Keep.both).run Thread.sleep(3000) queue.offer("foo").futureValue queue.offer("bar").futureValue queue.complete() done.futureValue shouldBe List("foo", "bar") } it should "force stop a stream with an error" in { val (queue: SourceQueueWithComplete[String], done: Future[Done]) = Source.queue[String](1, OverflowStrategy.dropNew).flatMapConcat { case "stop" ⇒ Source.failed(new RuntimeException("test error")) case str ⇒ Source.single(str) }.toMat(Sink.seq)(Keep.both).run Thread.sleep(3000) queue.offer("stop").futureValue done.toTry should be a 'failure } }
Example 18
Source File: QueueSourceTest.scala From intro-to-akka-streams with Apache License 2.0 | 5 votes |
package com.github.dnvriend.streams.source import akka.stream.OverflowStrategy import akka.stream.scaladsl.{ Keep, Sink, Source, SourceQueueWithComplete } import com.github.dnvriend.streams.TestSpec import scala.collection.immutable._ import scala.concurrent.Future class QueueSourceTest extends TestSpec { it should "queue a b and c and return Seq(a, b, c)" in { val (queue: SourceQueueWithComplete[String], xs: Future[Seq[String]]) = Source.queue[String](Int.MaxValue, OverflowStrategy.backpressure).toMat(Sink.seq)(Keep.both).run() queue.offer("a").toTry should be a 'success // offer 'a' to stream queue.offer("b").toTry should be a 'success // b queue.offer("c").toTry should be a 'success // and c // complete the queue queue.complete() queue.watchCompletion().toTry should be a 'success // get the results of the stream xs.futureValue shouldEqual Seq("a", "b", "c") xs.futureValue should not equal Seq("c", "b", "a") } }
Example 19
Source File: BatchingQueue.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.participant.state.kvutils.api import java.io.Closeable import java.util.concurrent.atomic.AtomicBoolean import akka.stream.scaladsl.{Sink, Source, SourceQueueWithComplete} import akka.stream.{Materializer, OverflowStrategy, QueueOfferResult} import com.daml.ledger.participant.state.kvutils.DamlKvutils.DamlSubmissionBatch import com.daml.ledger.participant.state.v1.SubmissionResult import scala.concurrent.Future import scala.concurrent.duration._ object BatchingQueue { type CommitBatchFunction = Seq[DamlSubmissionBatch.CorrelatedSubmission] => Future[Unit] } case class DefaultBatchingQueue( maxQueueSize: Int, maxBatchSizeBytes: Long, maxWaitDuration: FiniteDuration, maxConcurrentCommits: Int ) extends BatchingQueue { private val queue: Source[ Seq[DamlSubmissionBatch.CorrelatedSubmission], SourceQueueWithComplete[DamlSubmissionBatch.CorrelatedSubmission]] = Source .queue(maxQueueSize, OverflowStrategy.dropNew) .groupedWeightedWithin(maxBatchSizeBytes, maxWaitDuration)( (cs: DamlSubmissionBatch.CorrelatedSubmission) => cs.getSubmission.size.toLong) def run(commitBatch: Seq[DamlSubmissionBatch.CorrelatedSubmission] => Future[Unit])( implicit materializer: Materializer): RunningBatchingQueueHandle = { val materializedQueue = queue .mapAsync(maxConcurrentCommits)(commitBatch) .to(Sink.ignore) .run() val queueAlive = new AtomicBoolean(true) materializedQueue.watchCompletion.foreach { _ => queueAlive.set(false) }(materializer.executionContext) new RunningBatchingQueueHandle { override def alive: Boolean = queueAlive.get() override def offer( submission: DamlSubmissionBatch.CorrelatedSubmission): Future[SubmissionResult] = { materializedQueue .offer(submission) .map { case QueueOfferResult.Enqueued => SubmissionResult.Acknowledged case QueueOfferResult.Dropped => SubmissionResult.Overloaded case f: QueueOfferResult.Failure => SubmissionResult.InternalError(f.toString) case QueueOfferResult.QueueClosed => SubmissionResult.InternalError("DefaultBatchingQueue.queue is closed") }(materializer.executionContext) } override def close(): Unit = { materializedQueue.complete() } } } }