akka.stream.OverflowStrategy Scala Examples
The following examples show how to use akka.stream.OverflowStrategy.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: CiApi.scala From asura with MIT License | 5 votes |
package asura.app.api.ci import akka.actor.ActorSystem import akka.stream.scaladsl.Source import akka.stream.{Materializer, OverflowStrategy} import asura.common.actor.SenderMessage import asura.core.ci.{CiManager, CiTriggerEventMessage} import asura.core.job.actor.JobCiActor import asura.play.api.BaseApi import javax.inject.{Inject, Singleton} import org.pac4j.play.scala.SecurityComponents import play.api.http.ContentTypes import play.api.libs.EventSource import play.api.libs.streams.ActorFlow import play.api.mvc.{Codec, WebSocket} import scala.concurrent.{ExecutionContext, Future} @Singleton class CiApi @Inject()( implicit val system: ActorSystem, implicit val exec: ExecutionContext, implicit val mat: Materializer, val controllerComponents: SecurityComponents ) extends BaseApi { implicit val codec = Codec.utf_8 def home() = Action { Ok("CI") } def trigger() = Action(parse.byteString).async { implicit req => val msg = req.bodyAs(classOf[CiTriggerEventMessage]) CiManager.eventSource(msg) Future.successful(Ok("OK")) } def jobWS(id: String) = WebSocket.accept[String, String] { implicit req => ActorFlow.actorRef(out => JobCiActor.props(id, out)) } def jobSSE(id: String) = Action { val ciActor = system.actorOf(JobCiActor.props(id)) val source = Source.actorRef[String](BaseApi.DEFAULT_SOURCE_BUFFER_SIZE, OverflowStrategy.dropHead) .mapMaterializedValue(ref => ciActor ! SenderMessage(ref)) Ok.chunked(source via EventSource.flow) .as(ContentTypes.EVENT_STREAM) .withHeaders(BaseApi.responseNoCacheHeaders: _*) } }
Example 2
Source File: HatDataEventRouter.scala From HAT2.0 with GNU Affero General Public License v3.0 | 5 votes |
package org.hatdex.hat.api.service.monitoring import javax.inject.{ Inject, Named } import akka.{ Done, NotUsed } import akka.actor.{ ActorRef, ActorSystem } import akka.stream.scaladsl.{ Sink, Source } import akka.stream.{ ActorMaterializer, OverflowStrategy } import scala.concurrent.duration._ trait HatDataEventRouter { def init(): Done } class HatDataEventRouterImpl @Inject() ( dataEventBus: HatDataEventBus, @Named("hatDataStatsProcessor") statsProcessor: ActorRef, implicit val actorSystem: ActorSystem) extends HatDataEventRouter { private implicit val materializer = ActorMaterializer() init() def init(): Done = { // Inbound/outbound data stats are reported via a buffering stage to control load and network traffic dataEventBus.subscribe(buffer(statsProcessor), classOf[HatDataEventBus.DataCreatedEvent]) dataEventBus.subscribe(buffer(statsProcessor), classOf[HatDataEventBus.RichDataRetrievedEvent]) // Data Debit Events are dispatched without buffering dataEventBus.subscribe(statsProcessor, classOf[HatDataEventBus.RichDataDebitEvent]) dataEventBus.subscribe(statsProcessor, classOf[HatDataEventBus.DataDebitEvent]) Done } private def buffer(target: ActorRef, batch: Int = 100, period: FiniteDuration = 60.seconds): ActorRef = Source.actorRef(bufferSize = 1000, OverflowStrategy.dropNew) .groupedWithin(batch, period) .to(Sink.actorRef(target, NotUsed)) .run() }
Example 3
Source File: BuildTagViewForPersistenceId.scala From akka-persistence-cassandra with Apache License 2.0 | 5 votes |
package akka.persistence.cassandra.reconciler import akka.actor.ActorSystem import akka.persistence.cassandra.PluginSettings import akka.Done import akka.persistence.cassandra.journal.TagWriter._ import scala.concurrent.duration._ import scala.concurrent.Future import akka.stream.scaladsl.Source import akka.actor.ExtendedActorSystem import akka.persistence.query.PersistenceQuery import akka.persistence.cassandra.query.scaladsl.CassandraReadJournal import akka.event.Logging import akka.persistence.cassandra.journal.CassandraTagRecovery import akka.persistence.cassandra.Extractors import akka.util.Timeout import akka.stream.OverflowStrategy import akka.stream.scaladsl.Sink import akka.annotation.InternalApi import akka.serialization.SerializationExtension @InternalApi private[akka] final class BuildTagViewForPersisetceId( persistenceId: String, system: ActorSystem, recovery: CassandraTagRecovery, settings: PluginSettings) { import system.dispatcher private implicit val sys = system private val log = Logging(system, classOf[BuildTagViewForPersisetceId]) private val serialization = SerializationExtension(system) private val queries: CassandraReadJournal = PersistenceQuery(system.asInstanceOf[ExtendedActorSystem]) .readJournalFor[CassandraReadJournal]("akka.persistence.cassandra.query") private implicit val flushTimeout = Timeout(30.seconds) def reconcile(flushEvery: Int = 1000): Future[Done] = { val recoveryPrep = for { tp <- recovery.lookupTagProgress(persistenceId) _ <- recovery.setTagProgress(persistenceId, tp) } yield tp Source .futureSource(recoveryPrep.map((tp: Map[String, TagProgress]) => { log.debug("[{}] Rebuilding tag view table from: [{}]", persistenceId, tp) queries .eventsByPersistenceId( persistenceId, 0, Long.MaxValue, Long.MaxValue, None, settings.journalSettings.readProfile, "BuildTagViewForPersistenceId", extractor = Extractors.rawEvent(settings.eventsByTagSettings.bucketSize, serialization, system)) .map(recovery.sendMissingTagWriteRaw(tp, actorRunning = false)) .buffer(flushEvery, OverflowStrategy.backpressure) .mapAsync(1)(_ => recovery.flush(flushTimeout)) })) .runWith(Sink.ignore) } }
Example 4
Source File: LagSim.scala From kafka-lag-exporter with Apache License 2.0 | 5 votes |
package com.lightbend.kafkalagexporter.integration import akka.actor.Cancellable import akka.actor.typed.scaladsl.Behaviors import akka.actor.typed.{Behavior, PostStop} import akka.kafka.{CommitterSettings, Subscriptions} import akka.kafka.scaladsl.{Committer, Consumer} import akka.kafka.testkit.scaladsl.KafkaSpec import akka.stream.OverflowStrategy import akka.stream.scaladsl.Keep import akka.stream.testkit.scaladsl.TestSink import org.scalatest.concurrent.ScalaFutures import scala.concurrent.Await import scala.concurrent.duration._ trait LagSim extends KafkaSpec with ScalaFutures { private implicit val patience: PatienceConfig = PatienceConfig(30.seconds, 1.second) class LagSimulator(topic: String, group: String) { private var offset: Int = 0 private val committerSettings = CommitterSettings(system).withMaxBatch(1).withParallelism(1) private lazy val (consumerControl, consumerProbe) = Consumer .committableSource(consumerDefaults.withGroupId(group), Subscriptions.topics(topic)) .buffer(size = 1, OverflowStrategy.backpressure) .map { elem => log.debug("Committing elem with offset: {}", elem.committableOffset.partitionOffset) elem.committableOffset.commitScaladsl() } .toMat(TestSink.probe)(Keep.both) .run() def produceElements(num: Int): Unit = { Await.result(produce(topic, offset to (offset + num)), remainingOrDefault) offset += num + 1 } // TODO: Replace this with regular Kafka Consumer for more fine-grained control over committing def consumeElements(num: Int): Unit = { consumerProbe .request(num) .expectNextN(num) } def shutdown(): Unit = { consumerControl.shutdown().futureValue consumerProbe.cancel() } } sealed trait Simulator case class Tick(produce: Int, consume: Int) extends Simulator def lagSimActor(simulator: LagSimulator, scheduledTick: Cancellable = Cancellable.alreadyCancelled): Behavior[Simulator] = Behaviors.receive[Simulator] { case (context, tick @ Tick(produce, consume)) => simulator.produceElements(produce) simulator.consumeElements(consume) lagSimActor(simulator, context.scheduleOnce(1 second, context.self, tick)) } receiveSignal { case (_, PostStop) => simulator.shutdown() scheduledTick.cancel() Behaviors.same } }
Example 5
Source File: Webservice.scala From akka-viz with MIT License | 5 votes |
package akkaviz.server import akka.actor.{ActorRef, ActorSystem, Kill, PoisonPill} import akka.http.scaladsl.coding.Gzip import akka.http.scaladsl.model._ import akka.http.scaladsl.model.ws.{BinaryMessage, Message} import akka.http.scaladsl.server.Directives import akka.stream.scaladsl._ import akka.stream.{Materializer, OverflowStrategy} import akkaviz.config.Config import akkaviz.events._ import akkaviz.events.types._ import akkaviz.persistence.{PersistenceSources, ReceivedRecord} import akkaviz.protocol import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.duration._ class Webservice(implicit materializer: Materializer, system: ActorSystem) extends Directives with SubscriptionSession with ReplSupport with AkkaHttpHelpers with ArchiveSupport with FrontendResourcesSupport with ProtocolSerializationSupport with BackendEventsMarshalling { def route: Flow[HttpRequest, HttpResponse, Any] = encodeResponseWith(Gzip) { get { path("stream") { handleWebSocketMessages(tracingEventsFlow.mapMaterializedValue(EventSystem.subscribe)) } } ~ archiveRouting ~ replRouting ~ frontendResourcesRouting } def tracingEventsFlow: Flow[Message, Message, ActorRef] = { val eventSrc = Source.actorRef[BackendEvent](Config.bufferSize, OverflowStrategy.dropNew) val wsIn = Flow[Message] .via(websocketMessageToClientMessage) .via(handleUserCommand) .scan(defaultSettings)(updateSettings) .expand(r => Iterator.continually(r)) val out = wsIn.zipMat(eventSrc)((_, m) => m) .collect { case (settings, r: BackendEvent) if settings.eventAllowed(r) => r }.via(backendEventToProtocolFlow) .keepAlive(10.seconds, () => protocol.Ping) .via(protocolServerMessageToByteString) .map(BinaryMessage.Strict(_)) out } private[this] val handleUserCommand: Flow[protocol.ApiClientMessage, ChangeSubscriptionSettings, _] = Flow[protocol.ApiClientMessage].mapConcat { case protocol.SetAllowedMessages(classNames) => system.log.debug(s"Set allowed messages to $classNames") List(SetAllowedClasses(classNames)) case protocol.ObserveActors(actors) => system.log.debug(s"Set observed actors to $actors") List(SetActorEventFilter(actors)) case protocol.SetReceiveDelay(duration) => system.log.debug(s"Setting receive delay to $duration") EventSystem.setReceiveDelay(duration) Nil case protocol.SetEnabled(isEnabled) => system.log.info(s"Setting EventSystem.setEnabled($isEnabled)") EventSystem.setEnabled(isEnabled) Nil case protocol.RefreshInternalState(actor) => ActorSystems.refreshActorState(actor) Nil case protocol.PoisonPillActor(actor) => ActorSystems.tell(actor, PoisonPill) Nil case protocol.KillActor(actor) => ActorSystems.tell(actor, Kill) Nil } override def receivedOf(ref: String): Source[ReceivedRecord, _] = PersistenceSources.of(ref) override def receivedBetween(ref: String, ref2: String): Source[ReceivedRecord, _] = PersistenceSources.between(ref, ref2) override def isArchiveEnabled: Boolean = Config.enableArchive }
Example 6
Source File: GlobalSettingsActor.scala From akka-viz with MIT License | 5 votes |
package akkaviz.events import akka.actor._ import akka.stream.{ActorMaterializer, OverflowStrategy} import akka.stream.scaladsl.{Sink, Source} import akkaviz.config.Config import akkaviz.events.GlobalSettingsActor.{DisableThroughput, EnableThroughput, GetDelay} import akkaviz.events.types.{ThroughputMeasurement, BackendEvent, ReceiveDelaySet} import scala.concurrent.duration._ class GlobalSettingsActor extends Actor with ActorLogging { private[this] var eventPublisher: Option[ActorRef] = None private[this] var throughputSrcRef: Option[ActorRef] = None implicit val mat = ActorMaterializer() override def receive: Receive = { case publisher: ActorRef => eventPublisher = Some(publisher) self ! DisableThroughput // todo get from config (could be on by default) case EnableThroughput => val src = Source.actorRef[BackendEvent](Config.bufferSize, OverflowStrategy.dropHead) .mapMaterializedValue(EventSystem.subscribe) val sink = Sink.foreach[ThroughputMeasurement](ev => EventSystem.report(ev)) val flow = src.via(ThroughputMeasurementFlow.apply(1.second)).to(sink).run() case DisableThroughput => throughputSrcRef.foreach { ref => ref ! PoisonPill throughputSrcRef = None } } } object GlobalSettingsActor { case object GetDelay case object EnableThroughput case object DisableThroughput }
Example 7
Source File: ReloadableSchemaProvider.scala From graphql-gateway with Apache License 2.0 | 5 votes |
package sangria.gateway.schema import java.util.concurrent.atomic.AtomicReference import akka.actor.ActorSystem import akka.stream.{Materializer, OverflowStrategy} import akka.stream.scaladsl.{BroadcastHub, Keep, RunnableGraph, Source} import better.files.File import sangria.gateway.AppConfig import sangria.gateway.file.FileMonitorActor import sangria.gateway.http.client.HttpClient import sangria.gateway.schema.materializer.{GatewayContext, GatewayMaterializer} import sangria.gateway.util.Logging import scala.concurrent.{ExecutionContext, Future} import scala.util.{Failure, Success} // TODO: on a timer reload all external schemas and check for changes class ReloadableSchemaProvider(config: AppConfig, client: HttpClient, mat: GatewayMaterializer)(implicit system: ActorSystem, ec: ExecutionContext, amat: Materializer) extends SchemaProvider[GatewayContext, Any] with Logging { val loader = new SchemaLoader(config, client, mat) val schemaRef = new AtomicReference[Option[SchemaInfo[GatewayContext, Any]]](None) system.actorOf(FileMonitorActor.props(config.watch.allFiles, config.watch.threshold, config.watch.allGlobs, reloadSchema)) private val producer = Source.actorRef[Boolean](100, OverflowStrategy.dropTail) private val runnableGraph = producer.toMat(BroadcastHub.sink(bufferSize = 256))(Keep.both) private val (changesPublisher, changesSource) = runnableGraph.run() val schemaChanges = Some(changesSource) def schemaInfo = schemaRef.get() match { case v @ Some(_) ⇒ Future.successful(v) case None ⇒ reloadSchema } def reloadSchema(files: Vector[File]): Unit = { logger.info(s"Schema files are changed: ${files mkString ", "}. Reloading schema") reloadSchema } def reloadSchema: Future[Option[SchemaInfo[GatewayContext, Any]]] = loader.loadSchema.andThen { case Success(Some(newSchema)) ⇒ schemaRef.get() match { case Some(currentSchema) ⇒ val changes = newSchema.schema.compare(currentSchema.schema) val renderedChanges = if (changes.nonEmpty) " with following changes:\n" + changes.map(c ⇒ " * " + c.description + (if (c.breakingChange) " (breaking)" else "")).mkString("\n") else " without any changes." changesPublisher ! true logger.info(s"Schema successfully reloaded$renderedChanges") case None ⇒ logger.info(s"Schema successfully loaded from files:\n${newSchema.files.map(f ⇒ " * " + f).mkString("\n")}") } schemaRef.set(Some(newSchema)) case Failure(error) ⇒ logger.error("Failed to load the schema", error) } }
Example 8
Source File: KinesisNonBlockingStreamSource.scala From gfc-aws-kinesis with Apache License 2.0 | 5 votes |
package com.gilt.gfc.aws.kinesis.akka import akka.stream.OverflowStrategy import akka.stream.scaladsl.{Source, SourceQueue} import com.amazonaws.services.kinesis.clientlibrary.interfaces.IRecordProcessorCheckpointer import com.gilt.gfc.aws.kinesis.client.KinesisRecordReader import scala.concurrent.Await import scala.concurrent.Future import scala.concurrent.ExecutionContext import scala.concurrent.duration.Duration @deprecated("This API is deprecated, use KinesisStreamSource instead", "0.17.0") object KinesisNonBlockingStreamSource { @deprecated("This API is deprecated, use KinesisStreamSource instead", "0.17.0") def apply[T]( streamConfig: KinesisStreamConsumerConfig[T], pumpingTimeoutDuration: Duration = Duration.Inf, bufferSize : Int = 0, overflowStrategy: OverflowStrategy = OverflowStrategy.backpressure ) ( implicit evReader: KinesisRecordReader[T], ec : ExecutionContext) = { KinesisStreamSource(streamConfig, pumpingTimeoutDuration, bufferSize, overflowStrategy) } }
Example 9
Source File: WebSocketClient.scala From NSDb with Apache License 2.0 | 5 votes |
package io.radicalbit.nsdb.minicluster.ws import akka.actor.{ActorRef, ActorSystem} import akka.http.scaladsl.Http import akka.http.scaladsl.model._ import akka.http.scaladsl.model.ws._ import akka.stream.scaladsl._ import akka.stream.{ActorMaterializer, OverflowStrategy} import akka.{Done, NotUsed} import com.typesafe.scalalogging.LazyLogging import scala.collection.mutable.ListBuffer import scala.concurrent.Future class WebSocketClient(host: String, port: Int) extends LazyLogging with SynchronizedBuffer[Message] { implicit val system = ActorSystem() implicit val materializer = ActorMaterializer() import system.dispatcher val req = WebSocketRequest(uri = s"ws://$host:$port/ws-stream") val webSocketFlow = Http().webSocketClientFlow(req) val messageSource: Source[Message, ActorRef] = Source.actorRef[TextMessage.Strict](bufferSize = 10, OverflowStrategy.fail) val messageSink: Sink[Message, NotUsed] = Flow[Message] .map { message => logger.debug(s"Received text message: [$message]") accumulate(message) } .to(Sink.ignore) val ((ws, upgradeResponse), closed) = messageSource .viaMat(webSocketFlow)(Keep.both) .toMat(messageSink)(Keep.both) .run() val connected = upgradeResponse.flatMap { upgrade => if (upgrade.response.status == StatusCodes.SwitchingProtocols) { Future.successful(Done) } else { throw new RuntimeException(s"Connection failed: ${upgrade.response.status}") } } def send(msg: String): Unit = { ws ! TextMessage.Strict(msg) } def receivedBuffer(): ListBuffer[Message] = buffer def subscribe(db: String, namespace: String, metric: String): Unit = ws ! TextMessage.Strict( s"""{"db":"$db","namespace":"$namespace","metric":"$metric","queryString":"select * from $metric limit 1"}""") }
Example 10
Source File: RecordProcessorFactoryImpl.scala From kinesis-stream with MIT License | 5 votes |
package px.kinesis.stream.consumer import akka.NotUsed import akka.event.LoggingAdapter import akka.stream.scaladsl.{Keep, Sink, Source} import akka.stream.{KillSwitch, Materializer, OverflowStrategy} import px.kinesis.stream.consumer.checkpoint.CheckpointTracker import software.amazon.kinesis.processor.{ShardRecordProcessor, ShardRecordProcessorFactory} import scala.collection.immutable.Seq import scala.concurrent.ExecutionContext class RecordProcessorFactoryImpl( sink: Sink[Record, NotUsed], workerId: String, checkpointTracker: CheckpointTracker, killSwitch: KillSwitch )(implicit am: Materializer, ec: ExecutionContext, logging: LoggingAdapter) extends ShardRecordProcessorFactory { override def shardRecordProcessor(): ShardRecordProcessor = { val queue = Source .queue[Seq[Record]](0, OverflowStrategy.backpressure) .mapConcat(identity) .toMat(sink)(Keep.left) .run() new RecordProcessorImpl(queue, checkpointTracker, killSwitch, workerId) } }
Example 11
Source File: WebSocketMessageHandler.scala From asura with MIT License | 5 votes |
package asura.core.actor.flow import akka.NotUsed import akka.actor.{ActorRef, PoisonPill} import akka.http.scaladsl.model.ws.{Message, TextMessage} import akka.stream.OverflowStrategy import akka.stream.scaladsl.{Flow, Sink, Source} import asura.common.actor.{ActorEvent, SenderMessage} import asura.common.exceptions.InvalidStatusException import asura.core.CoreConfig import asura.core.util.JacksonSupport import scala.concurrent.duration._ object WebSocketMessageHandler { val DEFAULT_BUFFER_SIZE = CoreConfig.DEFAULT_WS_ACTOR_BUFFER_SIZE val KEEP_ALIVE_INTERVAL = 2 def newHandleFlow[T <: AnyRef](workActor: ActorRef, msgClass: Class[T]): Flow[Message, Message, NotUsed] = { val incomingMessages: Sink[Message, NotUsed] = Flow[Message].map { case TextMessage.Strict(text) => JacksonSupport.parse(text, msgClass) case _ => throw InvalidStatusException("Unsupported message type") }.to(Sink.actorRef[T](workActor, PoisonPill)) val outgoingMessages: Source[Message, NotUsed] = Source.actorRef[ActorEvent](DEFAULT_BUFFER_SIZE, OverflowStrategy.dropHead) .mapMaterializedValue { outActor => workActor ! SenderMessage(outActor) NotUsed } .map(result => TextMessage(JacksonSupport.stringify(result))) .keepAlive(KEEP_ALIVE_INTERVAL.seconds, () => TextMessage.Strict("")) Flow.fromSinkAndSource(incomingMessages, outgoingMessages) } def newHandleStringFlow[T <: AnyRef](workActor: ActorRef, msgClass: Class[T]): Flow[Message, Message, NotUsed] = { val incomingMessages: Sink[Message, NotUsed] = Flow[Message].map { case TextMessage.Strict(text) => JacksonSupport.parse(text, msgClass) case _ => throw InvalidStatusException("Unsupported message type") }.to(Sink.actorRef[T](workActor, PoisonPill)) val outgoingMessages: Source[Message, NotUsed] = Source.actorRef[String](DEFAULT_BUFFER_SIZE, OverflowStrategy.dropHead) .mapMaterializedValue { outActor => workActor ! SenderMessage(outActor) NotUsed } .map(result => TextMessage(result)) .keepAlive(KEEP_ALIVE_INTERVAL.seconds, () => TextMessage.Strict("")) Flow.fromSinkAndSource(incomingMessages, outgoingMessages) } def stringToActorEventFlow[T <: AnyRef](workActor: ActorRef, msgClass: Class[T]): Flow[String, String, NotUsed] = { val incomingMessages: Sink[String, NotUsed] = Flow[String].map { case text: String => JacksonSupport.parse(text, msgClass) }.to(Sink.actorRef[T](workActor, PoisonPill)) val outgoingMessages: Source[String, NotUsed] = Source.actorRef[ActorEvent](DEFAULT_BUFFER_SIZE, OverflowStrategy.dropHead) .mapMaterializedValue { outActor => workActor ! SenderMessage(outActor) NotUsed } .map(result => JacksonSupport.stringify(result)) .keepAlive(KEEP_ALIVE_INTERVAL.seconds, () => "") Flow.fromSinkAndSource(incomingMessages, outgoingMessages) } def stringToActorEventFlow[T <: AnyRef](workActor: ActorRef): Flow[String, String, NotUsed] = { val incomingMessages: Sink[String, NotUsed] = Flow[String].to(Sink.actorRef[String](workActor, PoisonPill)) val outgoingMessages: Source[String, NotUsed] = Source.actorRef[ActorEvent](DEFAULT_BUFFER_SIZE, OverflowStrategy.dropHead) .mapMaterializedValue { outActor => workActor ! SenderMessage(outActor) NotUsed } .map(result => JacksonSupport.stringify(result)) .keepAlive(KEEP_ALIVE_INTERVAL.seconds, () => "") Flow.fromSinkAndSource(incomingMessages, outgoingMessages) } }
Example 12
Source File: SttpBackendStubAkkaTests.scala From sttp with Apache License 2.0 | 5 votes |
package sttp.client.akkahttp import akka.actor.ActorSystem import akka.http.scaladsl.model.ws.{Message, TextMessage} import akka.stream.OverflowStrategy import akka.stream.scaladsl.{Flow, Keep, Sink, Source} import org.scalatest.BeforeAndAfterAll import org.scalatest.concurrent.ScalaFutures import org.scalatest.flatspec.AnyFlatSpec import org.scalatest.matchers.should.Matchers import sttp.client._ import sttp.model.Headers import scala.concurrent.{Await, Future} import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.duration._ class SttpBackendStubAkkaTests extends AnyFlatSpec with Matchers with ScalaFutures with BeforeAndAfterAll { implicit val system: ActorSystem = ActorSystem() override protected def afterAll(): Unit = { Await.result(system.terminate().map(_ => ()), 5.seconds) } "backend stub" should "cycle through responses using a single sent request" in { // given implicit val backend = AkkaHttpBackend.stub .whenRequestMatches(_ => true) .thenRespondCyclic("a", "b", "c") // when def r = basicRequest.get(uri"http://example.org/a/b/c").send().futureValue // then r.body shouldBe Right("a") r.body shouldBe Right("b") r.body shouldBe Right("c") r.body shouldBe Right("a") } it should "use given flow as web socket handler" in { // This test is an example how can we test client flow. // We check behavior of client when connected to echo server. // Client responsibility was to send two messages to the server and collect received messages. val useHandler: Flow[Message, Message, Future[Seq[Message]]] => Future[Seq[Message]] = clientFlow => { val ((outQueue, clientReceivedMessages), inQueue) = Source .queue(1, OverflowStrategy.fail) .viaMat(clientFlow)(Keep.both) .toMat(Sink.queue())(Keep.both) .run() def echoMsg(): Future[Unit] = inQueue.pull().flatMap { case None => echoMsg() case Some(msg) => outQueue.offer(TextMessage(s"echo: " + msg.asTextMessage.getStrictText)).map(_ => ()) } (for { _ <- outQueue.offer(TextMessage("Hi!")) _ <- echoMsg() _ <- echoMsg() _ = outQueue.complete() _ <- outQueue.watchCompletion() } yield ()).flatMap(_ => clientReceivedMessages) } val clientFlow: Flow[Message, Message, Future[Seq[Message]]] = { Flow.fromSinkAndSourceMat( Sink.seq[Message], Source((1 to 2).map(i => TextMessage(s"test$i"))) )(Keep.left) } implicit val b = AkkaHttpBackend.stub .whenRequestMatches(_ => true) .thenHandleOpenWebSocket(Headers(List.empty), useHandler) val receivedMessages = basicRequest .get(uri"wss://echo.websocket.org") .openWebsocket(clientFlow) .flatMap(_.result) .futureValue .toList receivedMessages shouldBe List("Hi!", "echo: test1", "echo: test2").map(TextMessage(_)) } }
Example 13
Source File: BankAccountReadModelUseCase.scala From akka-ddd-cqrs-es-example with MIT License | 5 votes |
package com.github.j5ik2o.bank.useCase import akka.actor.ActorSystem import akka.stream.scaladsl.{ Flow, Keep, Sink, Source, SourceQueueWithComplete } import akka.stream.{ ActorMaterializer, OverflowStrategy } import akka.{ Done, NotUsed } import com.github.j5ik2o.bank.domain.model._ import com.github.j5ik2o.bank.useCase.BankAccountAggregateUseCase.Protocol.{ ResolveBankAccountEventsRequest, ResolveBankAccountEventsResponse } import com.github.j5ik2o.bank.useCase.port.{ BankAccountReadModelFlows, JournalReader } import pureconfig._ import scala.concurrent.{ ExecutionContext, ExecutionContextExecutor, Future, Promise } class BankAccountReadModelUseCase(bankAccountReadModelFlows: BankAccountReadModelFlows, journalReader: JournalReader)( implicit val system: ActorSystem ) extends UseCaseSupport { import UseCaseSupport._ private val config = loadConfigOrThrow[BankAccountAggregateUseCaseConfig]("bank.use-case.bank-account-use-case") private val bufferSize: Int = config.bufferSize private implicit val mat: ActorMaterializer = ActorMaterializer() private implicit val ec: ExecutionContextExecutor = system.dispatcher def resolveBankAccountEventsById( request: ResolveBankAccountEventsRequest )(implicit ec: ExecutionContext): Future[ResolveBankAccountEventsResponse] = offerToQueue(resolveBankAccountEventQueue)(request, Promise()) private lazy val resolveBankAccountEventQueue : SourceQueueWithComplete[(ResolveBankAccountEventsRequest, Promise[ResolveBankAccountEventsResponse])] = Source .queue[(ResolveBankAccountEventsRequest, Promise[ResolveBankAccountEventsResponse])](bufferSize, OverflowStrategy.dropNew) .via(bankAccountReadModelFlows.resolveBankAccountEventByIdFlow.zipPromise) .toMat(completePromiseSink)(Keep.left) .run() private val projectionFlow: Flow[(BankAccountEvent, Long), Int, NotUsed] = Flow[(BankAccountEvent, Long)].flatMapConcat { case (event: BankAccountOpened, sequenceNr: Long) => Source .single((event.bankAccountId, event.name.value, sequenceNr, event.occurredAt)) .via(bankAccountReadModelFlows.openBankAccountFlow) case (event: BankAccountEventUpdated, sequenceNr: Long) => Source .single((event.bankAccountId, event.name.value, sequenceNr, event.occurredAt)) .via(bankAccountReadModelFlows.updateAccountFlow) case (event: BankAccountDeposited, sequenceNr: Long) => Source .single((event.bankAccountId, event.deposit, sequenceNr, event.occurredAt)) .via(bankAccountReadModelFlows.depositBankAccountFlow) case (event: BankAccountWithdrawn, sequenceNr: Long) => Source .single((event.bankAccountId, event.withdraw, sequenceNr, event.occurredAt)) .via(bankAccountReadModelFlows.withdrawBankAccountFlow) case (event: BankAccountClosed, sequenceNr: Long) => Source .single((event.bankAccountId, sequenceNr, event.occurredAt)) .via(bankAccountReadModelFlows.closeBankAccountFlow) } def execute(): Future[Done] = { bankAccountReadModelFlows.resolveLastSeqNrSource .flatMapConcat { lastSeqNr => journalReader.eventsByTagSource(classOf[BankAccountEvent].getName, lastSeqNr + 1) } .map { eventBody => (eventBody.event.asInstanceOf[BankAccountEvent], eventBody.sequenceNr) } .via(projectionFlow) .toMat(Sink.ignore)(Keep.right) .run() } }
Example 14
Source File: Deadlocks.scala From fusion-data with Apache License 2.0 | 5 votes |
package example.akkastream.graph import akka.actor.ActorSystem import akka.stream.{ ActorMaterializer, ClosedShape, OverflowStrategy } import akka.stream.scaladsl.{ Broadcast, Concat, Flow, GraphDSL, Merge, MergePreferred, RunnableGraph, Sink, Source, ZipWith } import scala.io.StdIn object Deadlocks extends App { implicit val system = ActorSystem() implicit val mat = ActorMaterializer() val source = Source(1 to 10) concat <~ bcast ClosedShape }) .run() StdIn.readLine() system.terminate() }
Example 15
Source File: ExtensionExample.scala From korolev with Apache License 2.0 | 5 votes |
import akka.stream.OverflowStrategy import akka.stream.scaladsl.{Sink, Source} import korolev._ import korolev.akka._ import korolev.server._ import korolev.state.javaSerialization._ import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.Future object ExtensionExample extends SimpleAkkaHttpKorolevApp { private val ctx = Context[Future, List[String], String] import ctx._ private val (queue, queueSource) = Source .queue[String](10, OverflowStrategy.fail) .preMaterialize() private val topicListener = Extension.pure[Future, List[String], String] { access => val queueSink = queueSource.runWith(Sink.queue[String]) def aux(): Future[Unit] = queueSink.pull() flatMap { case Some(message) => access .transition(_ :+ message) .flatMap(_ => aux()) case None => Future.unit } aux() Extension.Handlers[Future, List[String], String]( onMessage = message => queue.offer(message).map(_ => ()), onDestroy = () => Future.successful(queueSink.cancel()) ) } private def onSubmit(access: Access) = { for { sessionId <- access.sessionId name <- access.valueOf(nameElement) text <- access.valueOf(textElement) userName = if (name.trim.isEmpty) s"Anonymous #${sessionId.hashCode().toHexString}" else name _ <- if (text.trim.isEmpty) Future.unit else access.publish(s"$userName: $text") _ <- access.property(textElement).set("value", "") } yield () } private val nameElement = elementId() private val textElement = elementId() private val config = KorolevServiceConfig[Future, List[String], String]( stateLoader = StateLoader.default(Nil), extensions = List(topicListener), document = { message => import levsha.dsl._ import html._ optimize { Html( body( div( backgroundColor @= "yellow", padding @= "10px", border @= "1px solid black", "This is a chat. Open this app in few browser tabs or on few different computers" ), div( marginTop @= "10px", padding @= "10px", height @= "250px", backgroundColor @= "#eeeeee", message map { x => div(x) } ), form( marginTop @= "10px", input(`type` := "text", placeholder := "Name", nameElement), input(`type` := "text", placeholder := "Message", textElement), button("Sent"), event("submit")(onSubmit) ) ) ) } } ) val service: AkkaHttpService = akkaHttpService(config) }
Example 16
Source File: Converters.scala From korolev with Apache License 2.0 | 5 votes |
package korolev.akka import akka.NotUsed import akka.stream.OverflowStrategy import akka.stream.scaladsl.{Sink, Source} import korolev.akka.util.{KorolevStreamPublisher, KorolevStreamSubscriber} import korolev.effect.{Effect, Stream} import org.reactivestreams.Publisher object Converters { implicit final class SinkCompanionOps(value: Sink.type) { def korolevStream[F[_]: Effect, T]: Sink[T, Stream[F, T]] = { val subscriber = new KorolevStreamSubscriber[F, T]() Sink .fromSubscriber(subscriber) .mapMaterializedValue(_ => subscriber) } } implicit final class StreamCompanionOps(value: Stream.type) { def fromPublisher[F[_]: Effect, T](publisher: Publisher[T]): Stream[F, T] = { val result = new KorolevStreamSubscriber[F, T]() publisher.subscribe(result) result } } implicit final class KorolevStreamsOps[F[_]: Effect, T](stream: Stream[F, T]) { def asPublisher(fanout: Boolean = false): Publisher[T] = new KorolevStreamPublisher(stream, fanout) def asAkkaSource: Source[T, NotUsed] = { val publisher = new KorolevStreamPublisher(stream, fanout = false) Source .fromPublisher(publisher) .buffer(10, OverflowStrategy.backpressure) // FIXME should work without this line. Looks like bug in akka-streams } } }
Example 17
Source File: VoiceUDPHandler.scala From AckCord with MIT License | 5 votes |
package ackcord.voice import java.net.InetSocketAddress import scala.concurrent.duration._ import scala.util.{Failure, Success} import ackcord.data.{RawSnowflake, UserId} import akka.NotUsed import akka.actor.typed._ import akka.actor.typed.scaladsl._ import akka.stream.OverflowStrategy import akka.stream.scaladsl.{Keep, Sink, Source, SourceQueueWithComplete} import akka.util.ByteString import org.slf4j.Logger object VoiceUDPHandler { def apply( address: String, port: Int, ssrc: Int, serverId: RawSnowflake, userId: UserId, soundProducer: Source[ByteString, NotUsed], soundConsumer: Sink[AudioAPIMessage, NotUsed], parent: ActorRef[VoiceHandler.Command] ): Behavior[Command] = Behaviors .supervise( Behaviors.setup[Command] { ctx => implicit val system: ActorSystem[Nothing] = ctx.system val ((queue, futIp), watchDone) = soundProducer .viaMat( VoiceUDPFlow .flow( new InetSocketAddress(address, port), ssrc, serverId, userId, Source.queue[Option[ByteString]](0, OverflowStrategy.dropBuffer) ) .watchTermination()(Keep.both) )(Keep.right) .to(soundConsumer) .run() ctx.pipeToSelf(futIp) { case Success(value) => IPDiscoveryResult(value) case Failure(e) => SendExeption(e) } ctx.pipeToSelf(watchDone)(_ => ConnectionDied) handle(ctx, ctx.log, ssrc, queue, parent) } ) .onFailure( SupervisorStrategy .restartWithBackoff(100.millis, 5.seconds, 1D) .withResetBackoffAfter(10.seconds) .withMaxRestarts(5) ) def handle( ctx: ActorContext[Command], log: Logger, ssrc: Int, queue: SourceQueueWithComplete[Option[ByteString]], parent: ActorRef[VoiceHandler.Command] ): Behavior[Command] = Behaviors.receiveMessage { case SendExeption(e) => throw e case ConnectionDied => Behaviors.stopped case Shutdown => queue.complete() Behaviors.same case IPDiscoveryResult(VoiceUDPFlow.FoundIP(localAddress, localPort)) => parent ! VoiceHandler.GotLocalIP(localAddress, localPort) Behaviors.same case SetSecretKey(key) => queue.offer(key) Behaviors.same } sealed trait Command case object Shutdown extends Command private case class SendExeption(e: Throwable) extends Command private case object ConnectionDied extends Command private case class IPDiscoveryResult(foundIP: VoiceUDPFlow.FoundIP) extends Command private[voice] case class SetSecretKey(key: Option[ByteString]) extends Command }
Example 18
Source File: VoiceUDPFlow.scala From AckCord with MIT License | 5 votes |
package ackcord.voice import java.net.InetSocketAddress import java.nio.ByteOrder import scala.concurrent.{Future, Promise} import ackcord.data.{RawSnowflake, UserId} import ackcord.util.UdpConnectedFlow import akka.NotUsed import akka.actor.typed.ActorSystem import akka.stream.scaladsl.{BidiFlow, Concat, Flow, GraphDSL, Keep, Source} import akka.stream.{BidiShape, OverflowStrategy} import akka.util.ByteString object VoiceUDPFlow { val silence = ByteString(0xF8, 0xFF, 0xFE) val SampleRate = 48000 val FrameSize = 960 val FrameTime = 20 def flow[Mat]( remoteAddress: InetSocketAddress, ssrc: Int, serverId: RawSnowflake, userId: UserId, secretKeys: Source[Option[ByteString], Mat] )(implicit system: ActorSystem[Nothing]): Flow[ByteString, AudioAPIMessage.ReceivedData, (Mat, Future[FoundIP])] = NaclBidiFlow .bidiFlow(ssrc, serverId, userId, secretKeys) .atopMat(voiceBidi(ssrc).reversed)(Keep.both) .async .join(Flow[ByteString].buffer(32, OverflowStrategy.backpressure).via(UdpConnectedFlow.flow(remoteAddress))) def voiceBidi(ssrc: Int): BidiFlow[ByteString, ByteString, ByteString, ByteString, Future[FoundIP]] = { implicit val byteOrder: ByteOrder = ByteOrder.BIG_ENDIAN val ipDiscoveryPacket = { val byteBuilder = ByteString.createBuilder byteBuilder.sizeHint(74) byteBuilder.putShort(0x1).putShort(70).putInt(ssrc) byteBuilder.putBytes(new Array[Byte](66)) byteBuilder.result() } val valvePromise = Promise[Unit] val valve = Source.future(valvePromise.future).drop(1).asInstanceOf[Source[ByteString, NotUsed]] val ipDiscoveryFlow = Flow[ByteString] .viaMat(new IPDiscoveryFlow(() => valvePromise.success(())))(Keep.right) BidiFlow .fromGraph(GraphDSL.create(ipDiscoveryFlow) { implicit b => ipDiscovery => import GraphDSL.Implicits._ val voiceIn = b.add(Flow[ByteString]) val ipDiscoverySource = b.add(Source.single(ipDiscoveryPacket) ++ valve) val ipDiscoveryAndThenVoiceData = b.add(Concat[ByteString]()) ipDiscoverySource ~> ipDiscoveryAndThenVoiceData voiceIn ~> ipDiscoveryAndThenVoiceData BidiShape( ipDiscovery.in, ipDiscovery.out, voiceIn.in, ipDiscoveryAndThenVoiceData.out ) }) } case class FoundIP(address: String, port: Int) }
Example 19
Source File: IngestSocketFactory.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.ingest.services import akka.NotUsed import akka.actor.{ActorRef, ActorRefFactory, Props} import akka.stream.OverflowStrategy import akka.stream.scaladsl.{Flow, Sink, Source} import hydra.core.ingest.IngestionReport trait IngestSocketFactory { def ingestFlow(): Flow[String, OutgoingMessage, NotUsed] } object IngestSocketFactory { def createSocket(fact: ActorRefFactory): IngestSocketFactory = { () => { val socketActor = fact.actorOf(Props[IngestionSocketActor]) def actorSink = Sink.actorRefWithBackpressure( socketActor, onInitMessage = SocketInit, ackMessage = SocketAck, onCompleteMessage = SocketEnded, onFailureMessage = SocketFailed.apply ) val in = Flow[String] .map(IncomingMessage) .to(actorSink) val out = Source .actorRefWithBackpressure[OutgoingMessage]( SocketAck, PartialFunction.empty, PartialFunction.empty ) .mapMaterializedValue(socketActor ! SocketStarted(_)) Flow.fromSinkAndSourceCoupled(in, out) } } } sealed trait SocketEvent case object SocketInit extends SocketEvent case class SocketStarted(ref: ActorRef) extends SocketEvent case object SocketEnded extends SocketEvent case object SocketAck extends SocketEvent case class IncomingMessage(message: String) extends SocketEvent case class SocketFailed(ex: Throwable) sealed trait OutgoingMessage extends SocketEvent case class SimpleOutgoingMessage(status: Int, message: String) extends OutgoingMessage case class IngestionOutgoingMessage(report: IngestionReport) extends OutgoingMessage
Example 20
Source File: BatchingQueue.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.participant.state.kvutils.api import java.io.Closeable import java.util.concurrent.atomic.AtomicBoolean import akka.stream.scaladsl.{Sink, Source, SourceQueueWithComplete} import akka.stream.{Materializer, OverflowStrategy, QueueOfferResult} import com.daml.ledger.participant.state.kvutils.DamlKvutils.DamlSubmissionBatch import com.daml.ledger.participant.state.v1.SubmissionResult import scala.concurrent.Future import scala.concurrent.duration._ object BatchingQueue { type CommitBatchFunction = Seq[DamlSubmissionBatch.CorrelatedSubmission] => Future[Unit] } case class DefaultBatchingQueue( maxQueueSize: Int, maxBatchSizeBytes: Long, maxWaitDuration: FiniteDuration, maxConcurrentCommits: Int ) extends BatchingQueue { private val queue: Source[ Seq[DamlSubmissionBatch.CorrelatedSubmission], SourceQueueWithComplete[DamlSubmissionBatch.CorrelatedSubmission]] = Source .queue(maxQueueSize, OverflowStrategy.dropNew) .groupedWeightedWithin(maxBatchSizeBytes, maxWaitDuration)( (cs: DamlSubmissionBatch.CorrelatedSubmission) => cs.getSubmission.size.toLong) def run(commitBatch: Seq[DamlSubmissionBatch.CorrelatedSubmission] => Future[Unit])( implicit materializer: Materializer): RunningBatchingQueueHandle = { val materializedQueue = queue .mapAsync(maxConcurrentCommits)(commitBatch) .to(Sink.ignore) .run() val queueAlive = new AtomicBoolean(true) materializedQueue.watchCompletion.foreach { _ => queueAlive.set(false) }(materializer.executionContext) new RunningBatchingQueueHandle { override def alive: Boolean = queueAlive.get() override def offer( submission: DamlSubmissionBatch.CorrelatedSubmission): Future[SubmissionResult] = { materializedQueue .offer(submission) .map { case QueueOfferResult.Enqueued => SubmissionResult.Acknowledged case QueueOfferResult.Dropped => SubmissionResult.Overloaded case f: QueueOfferResult.Failure => SubmissionResult.InternalError(f.toString) case QueueOfferResult.QueueClosed => SubmissionResult.InternalError("DefaultBatchingQueue.queue is closed") }(materializer.executionContext) } override def close(): Unit = { materializedQueue.complete() } } } }
Example 21
Source File: WebsocketController.scala From gbf-raidfinder with MIT License | 5 votes |
package walfie.gbf.raidfinder.server.controller import akka.actor._ import akka.stream.scaladsl.Flow import akka.stream.{Materializer, OverflowStrategy} import monix.execution.Scheduler import play.api.http.websocket.Message import play.api.libs.streams._ import play.api.mvc._ import play.api.mvc.WebSocket.MessageFlowTransformer import scala.concurrent.duration.FiniteDuration import scala.concurrent.Future import walfie.gbf.raidfinder.domain._ import walfie.gbf.raidfinder.protocol._ import walfie.gbf.raidfinder.RaidFinder import walfie.gbf.raidfinder.server.actor.WebsocketRaidsHandler import walfie.gbf.raidfinder.server.util.MessageFlowTransformerUtil import walfie.gbf.raidfinder.server.{BossNameTranslator, MetricsCollector} class WebsocketController( raidFinder: RaidFinder[BinaryProtobuf], translator: BossNameTranslator, keepAliveInterval: FiniteDuration, metricsCollector: MetricsCollector )(implicit system: ActorSystem, materializer: Materializer, scheduler: Scheduler) extends Controller { private val jsonTransformer = MessageFlowTransformerUtil.protobufJsonMessageFlowTransformer private val binaryTransformer = MessageFlowTransformerUtil.protobufBinaryMessageFlowTransformer private val defaultTransformer = jsonTransformer val flow = ActorFlow.actorRef(props = props) transformer.transform(flow) } case None => Left { val unsupportedProtocols = requestedProtocols.mkString("[", ", ", "]") Results.BadRequest("Unsupported websocket subprotocols " + unsupportedProtocols) } } Future.successful(result) } }
Example 22
Source File: WsConnection.scala From matcher with MIT License | 5 votes |
package com.wavesplatform.dex.api.ws.connection import java.util.concurrent.ConcurrentLinkedQueue import akka.Done import akka.actor.{ActorRef, ActorSystem, Status} import akka.http.scaladsl.Http import akka.http.scaladsl.model.ws.{BinaryMessage, Message, TextMessage, WebSocketRequest} import akka.stream.scaladsl.{Flow, Sink, Source} import akka.stream.{CompletionStrategy, Materializer, OverflowStrategy} import com.wavesplatform.dex.api.ws.protocol.{WsClientMessage, WsMessage, WsPingOrPong, WsServerMessage} import com.wavesplatform.dex.domain.utils.ScorexLogging import play.api.libs.json.Json import scala.collection.JavaConverters._ import scala.concurrent.Future import scala.concurrent.duration._ import scala.util.{Failure, Success, Try} class WsConnection(uri: String, keepAlive: Boolean = true)(implicit system: ActorSystem, materializer: Materializer) extends ScorexLogging { log.info(s"""Connecting to Matcher WS API: | URI = $uri | Keep alive = $keepAlive""".stripMargin) import materializer.executionContext private val wsHandlerRef = system.actorOf(TestWsHandlerActor props keepAlive) protected def stringifyClientMessage(cm: WsClientMessage): TextMessage.Strict = WsMessage.toStrictTextMessage(cm)(WsClientMessage.wsClientMessageWrites) // From test to server private val source: Source[TextMessage.Strict, ActorRef] = { val completionMatcher: PartialFunction[Any, CompletionStrategy] = { case akka.actor.Status.Success(_) => CompletionStrategy.draining } val failureMatcher: PartialFunction[Any, Throwable] = { case Status.Failure(cause) => cause } Source .actorRef[WsClientMessage](completionMatcher, failureMatcher, 10, OverflowStrategy.fail) .map(stringifyClientMessage) .mapMaterializedValue { source => wsHandlerRef.tell(TestWsHandlerActor.AssignSourceRef, source) source } } private val messagesBuffer: ConcurrentLinkedQueue[WsServerMessage] = new ConcurrentLinkedQueue[WsServerMessage]() // From server to test private val sink: Sink[Message, Future[Done]] = Sink.foreach { case tm: TextMessage => for { strictText <- tm.toStrict(1.second).map(_.getStrictText) clientMessage <- { log.trace(s"Got $strictText") Try { Json.parse(strictText).as[WsServerMessage] } match { case Failure(exception) => Future.failed(exception) case Success(x) => { messagesBuffer.add(x) if (keepAlive) x match { case value: WsPingOrPong => wsHandlerRef ! value case _ => } Future.successful(x) } } } } yield clientMessage case bm: BinaryMessage => bm.dataStream.runWith(Sink.ignore) Future.failed { new IllegalArgumentException("Binary messages are not supported") } } private val flow: Flow[Message, TextMessage.Strict, Future[Done]] = Flow.fromSinkAndSourceCoupled(sink, source).watchTermination() { case (_, f) => f.onComplete { case Success(_) => log.info(s"WebSocket connection to $uri successfully closed") case Failure(e) => log.error(s"WebSocket connection to $uri closed with an error", e) }(materializer.executionContext) f } val (connectionResponse, closed) = Http().singleWebSocketRequest(WebSocketRequest(uri), flow) val connectionOpenedTs: Long = System.currentTimeMillis val connectionClosedTs: Future[Long] = closed.map(_ => System.currentTimeMillis) val connectionLifetime: Future[FiniteDuration] = connectionClosedTs.map(cc => FiniteDuration(cc - connectionOpenedTs, MILLISECONDS)) def messages: List[WsServerMessage] = messagesBuffer.iterator().asScala.toList def clearMessages(): Unit = messagesBuffer.clear() def send(message: WsClientMessage): Unit = wsHandlerRef ! TestWsHandlerActor.SendToServer(message) def close(): Unit = if (!isClosed) wsHandlerRef ! TestWsHandlerActor.CloseConnection def isClosed: Boolean = closed.isCompleted }
Example 23
Source File: WsConnection.scala From matcher with MIT License | 5 votes |
package com.wavesplatform.dex.load.ws import akka.Done import akka.actor.{ActorRef, ActorSystem, Status} import akka.http.scaladsl.Http import akka.http.scaladsl.model.ws.{BinaryMessage, Message, TextMessage, WebSocketRequest} import akka.stream.scaladsl.{Flow, Sink, Source} import akka.stream.{CompletionStrategy, Materializer, OverflowStrategy} import com.wavesplatform.dex.api.ws.connection.TestWsHandlerActor import com.wavesplatform.dex.api.ws.protocol.{WsClientMessage, WsMessage, WsServerMessage} import com.wavesplatform.dex.domain.utils.ScorexLogging import play.api.libs.json.Json import scala.concurrent.Future import scala.concurrent.duration.DurationInt import scala.util.{Failure, Success, Try} class WsConnection(uri: String, receive: WsServerMessage => Option[WsClientMessage])(implicit system: ActorSystem) extends ScorexLogging { import system.dispatcher private implicit val materializer = Materializer(system) private val wsHandlerRef = system.actorOf(TestWsHandlerActor.props(keepAlive = true)) log.info(s"Connecting to Matcher WS API: $uri") protected def stringifyClientMessage(cm: WsClientMessage): TextMessage.Strict = WsMessage.toStrictTextMessage(cm)(WsClientMessage.wsClientMessageWrites) // To server private val source: Source[TextMessage.Strict, ActorRef] = { val completionMatcher: PartialFunction[Any, CompletionStrategy] = { case akka.actor.Status.Success(_) => CompletionStrategy.draining } val failureMatcher: PartialFunction[Any, Throwable] = { case Status.Failure(cause) => cause } Source .actorRef[WsClientMessage](completionMatcher, failureMatcher, 10, OverflowStrategy.fail) .map(stringifyClientMessage) .mapMaterializedValue { source => wsHandlerRef.tell(TestWsHandlerActor.AssignSourceRef, source) source } } // To client private val sink: Sink[Message, Future[Done]] = Sink.foreach { case tm: TextMessage => // TODO move to tests for { strictText <- tm.toStrict(1.second).map(_.getStrictText) clientMessage <- { log.trace(s"Got $strictText") Try { Json.parse(strictText).as[WsServerMessage] } match { case Failure(exception) => Future.failed(exception) case Success(x) => Future.successful { receive(x).foreach(wsHandlerRef ! _) } } } } yield clientMessage case bm: BinaryMessage => bm.dataStream.runWith(Sink.ignore) Future.failed { new IllegalArgumentException("Binary messages are not supported") } } private val flow: Flow[Message, TextMessage.Strict, Future[Done]] = Flow.fromSinkAndSourceCoupled(sink, source).watchTermination() { case (_, f) => f.onComplete { case Success(_) => log.info(s"WebSocket connection to $uri successfully closed") case Failure(e) => log.error(s"WebSocket connection to $uri closed with an error", e) }(materializer.executionContext) f } val (connectionResponse, closed) = Http().singleWebSocketRequest(WebSocketRequest(uri), flow) def send(message: WsClientMessage): Unit = wsHandlerRef ! TestWsHandlerActor.SendToServer(message) def isClosed: Boolean = closed.isCompleted def close(): Future[Done] = { if (!isClosed) wsHandlerRef ! TestWsHandlerActor.CloseConnection closed } }
Example 24
Source File: QueueSourceTest.scala From intro-to-akka-streams with Apache License 2.0 | 5 votes |
package com.github.dnvriend.streams.source import akka.stream.OverflowStrategy import akka.stream.scaladsl.{ Keep, Sink, Source, SourceQueueWithComplete } import com.github.dnvriend.streams.TestSpec import scala.collection.immutable._ import scala.concurrent.Future class QueueSourceTest extends TestSpec { it should "queue a b and c and return Seq(a, b, c)" in { val (queue: SourceQueueWithComplete[String], xs: Future[Seq[String]]) = Source.queue[String](Int.MaxValue, OverflowStrategy.backpressure).toMat(Sink.seq)(Keep.both).run() queue.offer("a").toTry should be a 'success // offer 'a' to stream queue.offer("b").toTry should be a 'success // b queue.offer("c").toTry should be a 'success // and c // complete the queue queue.complete() queue.watchCompletion().toTry should be a 'success // get the results of the stream xs.futureValue shouldEqual Seq("a", "b", "c") xs.futureValue should not equal Seq("c", "b", "a") } }
Example 25
Source File: FailedSource.scala From intro-to-akka-streams with Apache License 2.0 | 5 votes |
package com.github.dnvriend.streams.source import akka.Done import akka.stream.OverflowStrategy import akka.stream.scaladsl.{ Keep, Merge, Sink, Source, SourceQueueWithComplete } import com.github.dnvriend.streams.TestSpec import scala.concurrent.Future import scala.concurrent.duration._ import scala.collection.immutable._ class FailedSource extends TestSpec { it should "fail the stream" in { Source.failed[Int](new RuntimeException("test error")).testProbe { tp ⇒ tp.request(Long.MaxValue) tp.expectError() } } it should "complete a stream" in { val (queue: SourceQueueWithComplete[Int], done: Future[Done]) = Source.queue[Int](1, OverflowStrategy.dropNew) .toMat(Sink.ignore)(Keep.both).run queue.complete() done.toTry should be a 'success } it should "complete a stream normally" in { val (queue: SourceQueueWithComplete[String], done: Future[Done]) = Source.queue[String](1, OverflowStrategy.dropNew).flatMapConcat { case "stop" ⇒ Source.failed(new RuntimeException("test error")) case str ⇒ Source.single(str) }.toMat(Sink.seq)(Keep.both).run Thread.sleep(3000) queue.offer("foo").futureValue queue.offer("bar").futureValue queue.complete() done.futureValue shouldBe List("foo", "bar") } it should "force stop a stream with an error" in { val (queue: SourceQueueWithComplete[String], done: Future[Done]) = Source.queue[String](1, OverflowStrategy.dropNew).flatMapConcat { case "stop" ⇒ Source.failed(new RuntimeException("test error")) case str ⇒ Source.single(str) }.toMat(Sink.seq)(Keep.both).run Thread.sleep(3000) queue.offer("stop").futureValue done.toTry should be a 'failure } }
Example 26
Source File: Ctx.scala From sangria-subscriptions-example with Apache License 2.0 | 5 votes |
import akka.NotUsed import akka.util.Timeout import schema.MutationError import akka.actor.ActorRef import generic.Event import generic.MemoryEventStore._ import generic.View.{Get, GetMany} import akka.pattern.ask import akka.stream.OverflowStrategy import akka.stream.scaladsl.Source import org.reactivestreams.Publisher import scala.concurrent.{ExecutionContext, Future} case class Ctx( authors: ActorRef, articles: ActorRef, eventStore: ActorRef, eventStorePublisher: Publisher[Event], ec: ExecutionContext, to: Timeout ) extends Mutation { implicit def executionContext = ec implicit def timeout = to lazy val eventStream: Source[Event, NotUsed] = Source.fromPublisher(eventStorePublisher).buffer(100, OverflowStrategy.fail) def addEvent[T](view: ActorRef, event: Event) = (eventStore ? AddEvent(event)).flatMap { case EventAdded(_) ⇒ (view ? Get(event.id, Some(event.version))).mapTo[Option[T]] case OverCapacity(_) ⇒ throw MutationError("Service is overloaded.") case ConcurrentModification(_, latestVersion) ⇒ throw MutationError(s"Concurrent Modification error for entity '${event.id}'. Latest entity version is '$latestVersion'.") } def addDeleteEvent(event: Event) = (eventStore ? AddEvent(event)).map { case EventAdded(e) ⇒ e case OverCapacity(_) ⇒ throw MutationError("Service is overloaded.") case ConcurrentModification(_, latestVersion) ⇒ throw MutationError(s"Concurrent Modification error for entity '${event.id}'. Latest entity version is '$latestVersion'.") } def loadLatestVersion(id: String, version: Long): Future[Long] = (eventStore ? LatestEventVersion(id)) map { case Some(latestVersion: Long) if version != latestVersion ⇒ throw MutationError(s"Concurrent Modification error for entity '$id'. Latest entity version is '$latestVersion'.") case Some(version: Long) ⇒ version + 1 case _ ⇒ throw MutationError(s"Entity with ID '$id' does not exist.") } def loadAuthors(ids: Seq[String]) = (authors ? GetMany(ids)).mapTo[Seq[Author]] }
Example 27
Source File: ParametrizedFlow.scala From akka_streams_tutorial with MIT License | 5 votes |
package sample.stream_shared_state import akka.Done import akka.actor.{ActorSystem, Cancellable} import akka.stream.scaladsl.{Flow, GraphDSL, Keep, Sink, Source, SourceQueueWithComplete, Zip} import akka.stream.{FlowShape, OverflowStrategy} import scala.collection.immutable import scala.concurrent.Future import scala.concurrent.duration._ import scala.util.{Failure, Success} object ParametrizedFlow extends App { val service = ParameterizedFlowService Thread.sleep(5000) service.update(1.0) Thread.sleep(2000) service.update(1.5) Thread.sleep(2000) service.cancel() Thread.sleep(2000) println(service.result()) } object ParameterizedFlowService { implicit val system = ActorSystem("ParameterizedFlowService") implicit val executionContext = system.dispatcher def update(element: Double): Unit = flow._1._2.offer(element) def cancel(): Boolean = flow._1._1.cancel() def result(): Future[Seq[Double]] = flow._2 val fun = (flowValue: Int, paramValue: Double) => flowValue * paramValue val flow: ((Cancellable, SourceQueueWithComplete[Double]), Future[immutable.Seq[Double]]) = Source.tick(0.seconds, 500.millis, 10) .viaMat(createParamFlow(1, OverflowStrategy.dropBuffer, 0.5)(fun))(Keep.both) .wireTap(x => println(x)) .toMat(Sink.seq)(Keep.both) .run() val done: Future[Done] = flow._1._2.watchCompletion() terminateWhen(done) private def createParamFlow[A, P, O](bufferSize: Int, overflowStrategy: OverflowStrategy, initialParam: P)(fun: (A, P) => O) = Flow.fromGraph(GraphDSL.create(Source.queue[P](bufferSize, overflowStrategy)) { implicit builder => queue => import GraphDSL.Implicits._ val zip = builder.add(Zip[A, P]()) //Interesting use of the extrapolate operator //based on https://doc.akka.io/docs/akka/current/stream/stream-rate.html#understanding-extrapolate-and-expand val extra = builder.add(Flow[P].extrapolate(Iterator.continually(_), Some(initialParam))) val map = builder.add(Flow[(A, P)].map(r => fun(r._1, r._2))) queue ~> extra ~> zip.in1 zip.out ~> map FlowShape(zip.in0, map.out) }) private def terminateWhen(done: Future[_]) = { done.onComplete { case Success(_) => println("Flow Success. About to terminate...") system.terminate() case Failure(e) => println(s"Flow Failure: $e. About to terminate...") system.terminate() } } }
Example 28
Source File: TrackerImplTest.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.apiserver.services.tracking import akka.NotUsed import akka.stream.OverflowStrategy import akka.stream.scaladsl.{Keep, Source, SourceQueueWithComplete} import akka.stream.testkit.TestSubscriber import akka.stream.testkit.scaladsl.TestSink import com.daml.ledger.api.testing.utils.{ AkkaBeforeAndAfterAll, IsStatusException, TestingException } import com.daml.ledger.api.v1.command_service.SubmitAndWaitRequest import com.daml.ledger.api.v1.commands.Commands import com.daml.ledger.api.v1.completion.Completion import com.daml.dec.DirectExecutionContext import com.google.rpc.status.{Status => RpcStatus} import io.grpc.Status import org.scalatest.concurrent.ScalaFutures import org.scalatest.{BeforeAndAfterEach, Matchers, Succeeded, WordSpec} import scala.concurrent.ExecutionContext.Implicits.global class TrackerImplTest extends WordSpec with Matchers with BeforeAndAfterEach with ScalaFutures with AkkaBeforeAndAfterAll { private var sut: Tracker = _ private var consumer: TestSubscriber.Probe[NotUsed] = _ private var queue: SourceQueueWithComplete[TrackerImpl.QueueInput] = _ private def input(cid: Int) = SubmitAndWaitRequest(Some(Commands(commandId = cid.toString))) override protected def beforeEach(): Unit = { val (q, sink) = Source .queue[TrackerImpl.QueueInput](1, OverflowStrategy.dropNew) .map { in => in.context.success(Completion(in.value.getCommands.commandId, Some(RpcStatus()))) NotUsed } .toMat(TestSink.probe[NotUsed])(Keep.both) .run() queue = q sut = new TrackerImpl(q) consumer = sink } override protected def afterEach(): Unit = { consumer.cancel() queue.complete() } "Tracker Implementation" when { "input is submitted, and the queue is available" should { "work successfully" in { val resultF1 = sut.track(input(1)) consumer.requestNext() val resultF = resultF1.flatMap(_ => sut.track(input(2)))(DirectExecutionContext) consumer.requestNext() whenReady(resultF)(_ => Succeeded) } } "input is submitted, and the queue is backpressuring" should { "return a RESOURCE_EXHAUSTED error" in { sut.track(input(1)) whenReady(sut.track(input(2)).failed)(IsStatusException(Status.RESOURCE_EXHAUSTED)) } } "input is submitted, and the queue has been completed" should { "return an ABORTED error" in { queue.complete() whenReady(sut.track(input(2)).failed)(IsStatusException(Status.ABORTED)) } } "input is submitted, and the queue has failed" should { "return an ABORTED error" in { queue.fail(TestingException("The queue fails with this error.")) whenReady(sut.track(input(2)).failed)(IsStatusException(Status.ABORTED)) } } } }
Example 29
Source File: QueryProgress.scala From clickhouse-scala-client with GNU Lesser General Public License v3.0 | 5 votes |
package com.crobox.clickhouse.internal.progress import akka.NotUsed import akka.stream.scaladsl.{BroadcastHub, Keep, RunnableGraph, Source, SourceQueueWithComplete} import akka.stream.{ActorAttributes, OverflowStrategy, Supervision} import com.typesafe.scalalogging.LazyLogging import spray.json._ import spray.json.DefaultJsonProtocol._ import scala.util.{Failure, Success, Try} object QueryProgress extends LazyLogging { sealed trait QueryProgress case object QueryAccepted extends QueryProgress case object QueryFinished extends QueryProgress case object QueryRejected extends QueryProgress case class QueryFailed(cause: Throwable) extends QueryProgress case class QueryRetry(cause: Throwable, retryNumber: Int) extends QueryProgress case class ClickhouseQueryProgress(identifier: String, progress: QueryProgress) case class Progress(rowsRead: Long, bytesRead: Long, rowsWritten: Long, bytesWritten: Long, totalRows: Long) extends QueryProgress def queryProgressStream: RunnableGraph[(SourceQueueWithComplete[String], Source[ClickhouseQueryProgress, NotUsed])] = Source .queue[String](1000, OverflowStrategy.dropHead) .map[Option[ClickhouseQueryProgress]](queryAndProgress => { queryAndProgress.split("\n", 2).toList match { case queryId :: ProgressHeadersAsEventsStage.AcceptedMark :: Nil => Some(ClickhouseQueryProgress(queryId, QueryAccepted)) case queryId :: progressJson :: Nil => Try { progressJson.parseJson match { case JsObject(fields) if fields.size == 3 => ClickhouseQueryProgress( queryId, Progress( fields("read_rows").convertTo[String].toLong, fields("read_bytes").convertTo[String].toLong, 0, 0, fields("total_rows").convertTo[String].toLong ) ) case JsObject(fields) if fields.size == 5 => ClickhouseQueryProgress( queryId, Progress( fields("read_rows").convertTo[String].toLong, fields("read_bytes").convertTo[String].toLong, fields("written_rows").convertTo[String].toLong, fields("written_bytes").convertTo[String].toLong, fields("total_rows_to_read").convertTo[String].toLong ) ) case _ => throw new IllegalArgumentException(s"Cannot extract progress from $progressJson") } } match { case Success(value) => Some(value) case Failure(exception) => logger.warn(s"Failed to parse json $progressJson", exception) None } case other @ _ => logger.warn(s"Could not get progress from $other") None } }) .collect { case Some(progress) => progress } .withAttributes(ActorAttributes.supervisionStrategy({ case ex @ _ => logger.warn("Detected failure in the query progress stream, resuming operation.", ex) Supervision.Resume })) .toMat(BroadcastHub.sink)(Keep.both) }
Example 30
Source File: CarbonClient.scala From akka-http-metrics with Apache License 2.0 | 5 votes |
package fr.davit.akka.http.metrics.graphite import java.time.{Clock, Instant} import akka.NotUsed import akka.actor.ActorSystem import akka.event.Logging import akka.stream.scaladsl.{Flow, Keep, RestartFlow, Sink, Source, Tcp} import akka.stream.{OverflowStrategy, QueueOfferResult} import akka.util.ByteString import fr.davit.akka.http.metrics.core.Dimension import scala.concurrent.Await import scala.concurrent.duration.{Duration, _} object CarbonClient { def apply(host: String, port: Int)(implicit system: ActorSystem): CarbonClient = new CarbonClient(host, port) } class CarbonClient(host: String, port: Int)(implicit system: ActorSystem) extends AutoCloseable { private val logger = Logging(system.eventStream, classOf[CarbonClient]) protected val clock: Clock = Clock.systemUTC() private def serialize[T](name: String, value: T, dimensions: Seq[Dimension], ts: Instant): ByteString = { val tags = dimensions.map(d => d.key + "=" + d.value).toList val taggedMetric = (name :: tags).mkString(";") ByteString(s"$taggedMetric $value ${ts.getEpochSecond}\n") } // TODO read backoff from config private def connection: Flow[ByteString, ByteString, NotUsed] = RestartFlow.withBackoff( minBackoff = 3.seconds, maxBackoff = 30.seconds, randomFactor = 0.2, // adds 20% "noise" to vary the intervals slightly maxRestarts = -1 // keep retrying forever )(() => Tcp().outgoingConnection(host, port)) private val queue = Source .queue[ByteString](19, OverflowStrategy.dropHead) .via(connection) .toMat(Sink.ignore)(Keep.left) .run() def publish[T]( name: String, value: T, dimensions: Seq[Dimension] = Seq.empty, ts: Instant = Instant .now(clock) ): Unit = { // it's reasonable to block until the message in enqueued Await.result(queue.offer(serialize(name, value, dimensions, ts)), Duration.Inf) match { case QueueOfferResult.Enqueued => logger.debug("Metric {} enqueued", name) case QueueOfferResult.Dropped => logger.debug("Metric {} dropped", name) case QueueOfferResult.Failure(e) => logger.error(e, s"Failed publishing metric $name") case QueueOfferResult.QueueClosed => throw new Exception("Failed publishing metric to closed carbon client") } } override def close(): Unit = { queue.complete() Await.result(queue.watchCompletion(), Duration.Inf) } }
Example 31
Source File: ProducerStream.scala From reactive-kafka-microservice-template with Apache License 2.0 | 5 votes |
package com.omearac.producers import akka.actor.{ActorRef, ActorSystem} import akka.kafka.ProducerSettings import akka.kafka.scaladsl.Producer import akka.stream.OverflowStrategy import akka.stream.scaladsl.{Flow, Source} import com.omearac.shared.JsonMessageConversion.Conversion import com.omearac.shared.{AkkaStreams, EventSourcing} import org.apache.kafka.clients.producer.ProducerRecord import org.apache.kafka.common.serialization.{ByteArraySerializer, StringSerializer} trait ProducerStream extends AkkaStreams with EventSourcing { implicit val system: ActorSystem def self: ActorRef def createStreamSource[msgType] = { Source.queue[msgType](Int.MaxValue,OverflowStrategy.backpressure) } def createStreamSink(producerProperties: Map[String, String]) = { val kafkaMBAddress = producerProperties("bootstrap-servers") val producerSettings = ProducerSettings(system, new ByteArraySerializer, new StringSerializer).withBootstrapServers(kafkaMBAddress) Producer.plainSink(producerSettings) } def createStreamFlow[msgType: Conversion](producerProperties: Map[String, String]) = { val numberOfPartitions = producerProperties("num.partitions").toInt -1 val topicToPublish = producerProperties("publish-topic") val rand = new scala.util.Random val range = 0 to numberOfPartitions Flow[msgType].map { msg => val partition = range(rand.nextInt(range.length)) val stringJSONMessage = Conversion[msgType].convertToJson(msg) new ProducerRecord[Array[Byte], String](topicToPublish, partition, null, stringJSONMessage) } } }
Example 32
Source File: PoolingRestClient.scala From openwhisk with Apache License 2.0 | 5 votes |
package org.apache.openwhisk.http import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._ import akka.http.scaladsl.marshalling._ import akka.http.scaladsl.model._ import akka.http.scaladsl.settings.ConnectionPoolSettings import akka.http.scaladsl.unmarshalling._ import akka.stream.{ActorMaterializer, OverflowStrategy, QueueOfferResult} import akka.stream.scaladsl.{Flow, _} import spray.json._ import scala.concurrent.{ExecutionContext, Future, Promise} import scala.concurrent.duration._ import scala.util.{Failure, Success, Try} def requestJson[T: RootJsonReader](futureRequest: Future[HttpRequest]): Future[Either[StatusCode, T]] = request(futureRequest).flatMap { response => if (response.status.isSuccess) { Unmarshal(response.entity.withoutSizeLimit).to[T].map(Right.apply) } else { Unmarshal(response.entity).to[String].flatMap { body => val statusCode = response.status val reason = if (body.nonEmpty) s"${statusCode.reason} (details: $body)" else statusCode.reason val customStatusCode = StatusCodes .custom(intValue = statusCode.intValue, reason = reason, defaultMessage = statusCode.defaultMessage) // This is important, as it drains the entity stream. // Otherwise the connection stays open and the pool dries up. response.discardEntityBytes().future.map(_ => Left(customStatusCode)) } } } def shutdown(): Future[Unit] = Future.successful(materializer.shutdown()) } object PoolingRestClient { def mkRequest(method: HttpMethod, uri: Uri, body: Future[MessageEntity] = Future.successful(HttpEntity.Empty), headers: List[HttpHeader] = List.empty)(implicit ec: ExecutionContext): Future[HttpRequest] = { body.map { b => HttpRequest(method, uri, headers, b) } } def mkJsonRequest(method: HttpMethod, uri: Uri, body: JsValue, headers: List[HttpHeader] = List.empty)( implicit ec: ExecutionContext): Future[HttpRequest] = { val b = Marshal(body).to[MessageEntity] mkRequest(method, uri, b, headers) } }
Example 33
Source File: KafkaEventProducer.scala From openwhisk with Apache License 2.0 | 5 votes |
package org.apache.openwhisk.core.database.cosmosdb.cache import akka.Done import akka.actor.ActorSystem import akka.kafka.scaladsl.Producer import akka.kafka.{ProducerMessage, ProducerSettings} import akka.stream.scaladsl.{Keep, Sink, Source} import akka.stream.{ActorMaterializer, OverflowStrategy, QueueOfferResult} import org.apache.kafka.clients.consumer.ConsumerConfig import org.apache.kafka.clients.producer.ProducerRecord import org.apache.openwhisk.connector.kafka.KamonMetricsReporter import scala.collection.immutable.Seq import scala.concurrent.{ExecutionContext, Future, Promise} case class KafkaEventProducer( settings: ProducerSettings[String, String], topic: String, eventProducerConfig: EventProducerConfig)(implicit system: ActorSystem, materializer: ActorMaterializer) extends EventProducer { private implicit val executionContext: ExecutionContext = system.dispatcher private val queue = Source .queue[(Seq[String], Promise[Done])](eventProducerConfig.bufferSize, OverflowStrategy.dropNew) //TODO Use backpressure .map { case (msgs, p) => ProducerMessage.multi(msgs.map(newRecord), p) } .via(Producer.flexiFlow(producerSettings)) .map { case ProducerMessage.MultiResult(_, passThrough) => passThrough.success(Done) case _ => //As we use multi mode only other modes need not be handled } .toMat(Sink.ignore)(Keep.left) .run override def send(msg: Seq[String]): Future[Done] = { val promise = Promise[Done] queue.offer(msg -> promise).flatMap { case QueueOfferResult.Enqueued => promise.future case QueueOfferResult.Dropped => Future.failed(new Exception("Kafka request queue is full.")) case QueueOfferResult.QueueClosed => Future.failed(new Exception("Kafka request queue was closed.")) case QueueOfferResult.Failure(f) => Future.failed(f) } } def close(): Future[Done] = { queue.complete() queue.watchCompletion() } private def newRecord(msg: String) = new ProducerRecord[String, String](topic, "messages", msg) private def producerSettings = settings.withProperty(ConsumerConfig.METRIC_REPORTER_CLASSES_CONFIG, KamonMetricsReporter.name) }
Example 34
Source File: IntegratingWithActorsApplication.scala From Akka-Cookbook with MIT License | 5 votes |
package com.packt.chapter8 import akka.actor.{ActorSystem, Props} import akka.stream.{ActorMaterializer, OverflowStrategy} import akka.stream.scaladsl._ import akka.pattern.ask import akka.util.Timeout import com.packt.chapter8.SinkActor.{AckSinkActor, CompletedSinkActor, InitSinkActor} import scala.concurrent.duration._ object IntegratingWithActorsApplication extends App { implicit val actorSystem = ActorSystem("IntegratingWithActors") implicit val actorMaterializer = ActorMaterializer() implicit val askTimeout = Timeout(5 seconds) val stringCleaner = actorSystem.actorOf(Props[StringCleanerActor]) val sinkActor = actorSystem.actorOf(Props[SinkActor]) val source = Source.queue[String](100, OverflowStrategy.backpressure) val sink = Sink.actorRefWithAck[String](sinkActor, InitSinkActor, AckSinkActor, CompletedSinkActor) val queue = source .mapAsync(parallelism = 5)(elem => (stringCleaner ? elem).mapTo[String]) .to(sink) .run() actorSystem.actorOf(SourceActor.props(queue)) }
Example 35
Source File: InternalSubscriberStub.scala From lagom with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.internal.testkit import akka.Done import akka.actor.ActorRef import akka.stream.Materializer import akka.stream.OverflowStrategy import akka.stream.scaladsl.Flow import akka.stream.scaladsl.Keep import akka.stream.scaladsl.Sink import akka.stream.scaladsl.Source import scala.concurrent.Future import scala.language.higherKinds private[lagom] class InternalSubscriberStub[Payload, Message[_]]( groupId: String, topicBuffer: ActorRef )(implicit materializer: Materializer) { def mostOnceSource: Source[Message[Payload], _] = { Source .actorRef[Message[Payload]](1024, OverflowStrategy.fail) .prependMat(Source.empty)(subscribeToBuffer) } def leastOnce(flow: Flow[Message[Payload], Done, _]): Future[Done] = { mostOnceSource .via(flow) .toMat(Sink.ignore)(Keep.right[Any, Future[Done]]) .run() } private def subscribeToBuffer[R](ref: ActorRef, t: R) = { topicBuffer.tell(TopicBufferActor.SubscribeToBuffer(groupId, ref), ActorRef.noSender) t } }
Example 36
Source File: Demo.scala From chordial with BSD 3-Clause "New" or "Revised" License | 5 votes |
package com.tristanpenman.chordial.demo import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.model.ws.TextMessage import akka.stream.scaladsl._ import akka.stream.{ActorAttributes, ActorMaterializer, OverflowStrategy, Supervision} import akka.util.Timeout import com.tristanpenman.chordial.core.Event import com.tristanpenman.chordial.core.Event._ import scala.concurrent.Await import scala.concurrent.duration._ object Demo extends App { implicit val system = ActorSystem("chordial-demo") implicit val mat = ActorMaterializer() implicit val ec = system.dispatcher implicit val timeout: Timeout = 3.seconds // Generate IDs ranging from 0 to 63 (inclusive) so that when visualising the network, // each node represents a ~5.625 degree arc on the ring private val keyspaceBits = 6 // Create an actor that is responsible for creating and terminating nodes, while ensuring // that nodes are assigned unique IDs in the Chord key-space private val governor = system.actorOf(Governor.props(keyspaceBits), "Governor") // Create an actor that will log events published by nodes private val eventWriter = system.actorOf(EventWriter.props, "EventWriter") // Subscribe the EventWriter actor to events published by nodes system.eventStream.subscribe(eventWriter, classOf[Event]) val (listener, eventsSource) = Source .actorRef[Event](Int.MaxValue, OverflowStrategy.fail) .map { case FingerReset(nodeId: Long, index: Int) => s"""{ "type": "FingerReset", "nodeId": $nodeId, "index": $index }""" case FingerUpdated(nodeId: Long, index: Int, fingerId: Long) => s"""{ "type": "FingerUpdated", "nodeId": $nodeId, "index": $index, "fingerId": $fingerId }""" case NodeCreated(nodeId, successorId) => s"""{ "type": "NodeCreated", "nodeId": $nodeId, "successorId": $successorId }""" case NodeShuttingDown(nodeId) => s"""{ "type": "NodeDeleted", "nodeId": $nodeId }""" case PredecessorReset(nodeId) => s"""{ "type": "PredecessorReset", "nodeId": $nodeId }""" case PredecessorUpdated(nodeId, predecessorId) => s"""{ "type": "PredecessorUpdated", "nodeId": $nodeId, "predecessorId": $predecessorId }""" case SuccessorListUpdated(nodeId, primarySuccessorId, _) => s"""{ "type": "SuccessorUpdated", "nodeId": $nodeId, "successorId": $primarySuccessorId }""" } .map(TextMessage(_)) .withAttributes(ActorAttributes.supervisionStrategy(Supervision.resumingDecider)) .toMat(BroadcastHub.sink[TextMessage](bufferSize = 16))(Keep.both) .run() system.eventStream.subscribe(listener, classOf[Event]) Http().bindAndHandle(WebSocketWorker(governor, eventsSource), "0.0.0.0", 4567) Await.result(system.whenTerminated, Duration.Inf) }