akka.stream.ActorAttributes Scala Examples
The following examples show how to use akka.stream.ActorAttributes.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: DispatcherUtils.scala From akka-persistence-dynamodb with Apache License 2.0 | 5 votes |
package com.github.j5ik2o.akka.persistence.dynamodb.utils import akka.NotUsed import akka.stream.ActorAttributes import akka.stream.scaladsl.Flow import com.github.j5ik2o.akka.persistence.dynamodb.config.PluginConfig import com.github.j5ik2o.akka.persistence.dynamodb.config.client.ClientVersion object DispatcherUtils extends LoggingSupport { def applyV1Dispatcher[A, B](pluginConfig: PluginConfig, flow: Flow[A, B, NotUsed]): Flow[A, B, NotUsed] = { (if (pluginConfig.clientConfig.clientVersion == ClientVersion.V1) pluginConfig.clientConfig.v1ClientConfig.dispatcherName else pluginConfig.clientConfig.v1DaxClientConfig.dispatcherName) .fold { flow } { name => flow.withAttributes(ActorAttributes.dispatcher(name)) } } def applyV2Dispatcher[A, B](pluginConfig: PluginConfig, flow: Flow[A, B, NotUsed]): Flow[A, B, NotUsed] = { pluginConfig.clientConfig.v2ClientConfig.dispatcherName.fold(flow) { name => flow.withAttributes(ActorAttributes.dispatcher(name)) } } }
Example 2
Source File: AsyncExecution.scala From akka_streams_tutorial with MIT License | 5 votes |
package sample.stream import akka.Done import akka.actor.ActorSystem import akka.stream.ActorAttributes import akka.stream.scaladsl.{Flow, Sink, Source} import org.slf4j.{Logger, LoggerFactory} import scala.concurrent.Future import scala.util.{Failure, Success} object AsyncExecution extends App { val logger: Logger = LoggerFactory.getLogger(this.getClass) implicit val system = ActorSystem("AsyncExecution") implicit val ec = system.dispatcher def stage(name: String) = Flow[Int] .wireTap(index => logger.info(s"Stage $name processing element $index by ${Thread.currentThread().getName}")) def stageBlocking(name: String) = Flow[Int] .wireTap(index => logger.info(s"Stage $name processing element $index by ${Thread.currentThread().getName}")) .wireTap(_ => Thread.sleep(5000)) .withAttributes(ActorAttributes.dispatcher("custom-dispatcher-for-blocking")) def sinkBlocking: Sink[Int, Future[Done]] = Sink.foreach { index: Int => Thread.sleep(2000) logger.info(s"Slow sink processing element $index by ${Thread.currentThread().getName}") } //Adding a custom dispatcher creates an async boundary //see discussion in: https://discuss.lightbend.com/t/how-can-i-make-sure-that-fileio-frompath-is-picking-up-my-dispatcher/6528/4 .withAttributes(ActorAttributes.dispatcher("custom-dispatcher-for-blocking")) val done = Source(1 to 10) .via(stage("A")).async //When activated instead of alsoTo(sinkBlocking): elements for stage C are held up by stage B //.via(stageBlocking("B")).async .alsoTo(sinkBlocking).async .via(stage("C")).async .runWith(Sink.ignore) //With alsoTo(sinkBlocking) the stages A and C signal "done" too early and thus would terminate the whole stream //The reason for this is the custom dispatcher in sinkBlocking //terminateWhen(done) def terminateWhen(done: Future[_]) = { done.onComplete { case Success(_) => println("Flow Success. About to terminate...") system.terminate() case Failure(e) => println(s"Flow Failure: $e. About to terminate...") system.terminate() } } }
Example 3
Source File: ClusterAwareHostBalancer.scala From clickhouse-scala-client with GNU Lesser General Public License v3.0 | 5 votes |
package com.crobox.clickhouse.balancing import akka.actor.{ActorRef, ActorSystem} import akka.http.scaladsl.model.Uri import akka.pattern.ask import akka.stream.scaladsl.Sink import akka.stream.{ActorAttributes, Materializer, Supervision} import akka.util.Timeout import com.crobox.clickhouse.balancing.discovery.ConnectionManagerActor.{GetConnection, LogDeadConnections} import com.crobox.clickhouse.balancing.discovery.cluster.ClusterConnectionFlow import scala.concurrent.duration._ import scala.concurrent.{ExecutionContext, Future} case class ClusterAwareHostBalancer(host: Uri, cluster: String = "cluster", manager: ActorRef, scanningInterval: FiniteDuration)( implicit system: ActorSystem, connectionRetrievalTimeout: Timeout, ec: ExecutionContext, materializer: Materializer ) extends HostBalancer { ClusterConnectionFlow .clusterConnectionsFlow(Future.successful(host), scanningInterval, cluster) .withAttributes( ActorAttributes.supervisionStrategy({ case ex: IllegalArgumentException => logger.error("Failed resolving hosts for cluster, stopping the flow.", ex) Supervision.stop case ex => logger.error("Failed resolving hosts for cluster, resuming.", ex) Supervision.Resume }) ) .runWith(Sink.actorRef(manager, LogDeadConnections)) override def nextHost: Future[Uri] = (manager ? GetConnection()).mapTo[Uri] }
Example 4
Source File: QueryProgress.scala From clickhouse-scala-client with GNU Lesser General Public License v3.0 | 5 votes |
package com.crobox.clickhouse.internal.progress import akka.NotUsed import akka.stream.scaladsl.{BroadcastHub, Keep, RunnableGraph, Source, SourceQueueWithComplete} import akka.stream.{ActorAttributes, OverflowStrategy, Supervision} import com.typesafe.scalalogging.LazyLogging import spray.json._ import spray.json.DefaultJsonProtocol._ import scala.util.{Failure, Success, Try} object QueryProgress extends LazyLogging { sealed trait QueryProgress case object QueryAccepted extends QueryProgress case object QueryFinished extends QueryProgress case object QueryRejected extends QueryProgress case class QueryFailed(cause: Throwable) extends QueryProgress case class QueryRetry(cause: Throwable, retryNumber: Int) extends QueryProgress case class ClickhouseQueryProgress(identifier: String, progress: QueryProgress) case class Progress(rowsRead: Long, bytesRead: Long, rowsWritten: Long, bytesWritten: Long, totalRows: Long) extends QueryProgress def queryProgressStream: RunnableGraph[(SourceQueueWithComplete[String], Source[ClickhouseQueryProgress, NotUsed])] = Source .queue[String](1000, OverflowStrategy.dropHead) .map[Option[ClickhouseQueryProgress]](queryAndProgress => { queryAndProgress.split("\n", 2).toList match { case queryId :: ProgressHeadersAsEventsStage.AcceptedMark :: Nil => Some(ClickhouseQueryProgress(queryId, QueryAccepted)) case queryId :: progressJson :: Nil => Try { progressJson.parseJson match { case JsObject(fields) if fields.size == 3 => ClickhouseQueryProgress( queryId, Progress( fields("read_rows").convertTo[String].toLong, fields("read_bytes").convertTo[String].toLong, 0, 0, fields("total_rows").convertTo[String].toLong ) ) case JsObject(fields) if fields.size == 5 => ClickhouseQueryProgress( queryId, Progress( fields("read_rows").convertTo[String].toLong, fields("read_bytes").convertTo[String].toLong, fields("written_rows").convertTo[String].toLong, fields("written_bytes").convertTo[String].toLong, fields("total_rows_to_read").convertTo[String].toLong ) ) case _ => throw new IllegalArgumentException(s"Cannot extract progress from $progressJson") } } match { case Success(value) => Some(value) case Failure(exception) => logger.warn(s"Failed to parse json $progressJson", exception) None } case other @ _ => logger.warn(s"Could not get progress from $other") None } }) .collect { case Some(progress) => progress } .withAttributes(ActorAttributes.supervisionStrategy({ case ex @ _ => logger.warn("Detected failure in the query progress stream, resuming operation.", ex) Supervision.Resume })) .toMat(BroadcastHub.sink)(Keep.both) }
Example 5
Source File: ResumingEventFilter.scala From 006877 with MIT License | 5 votes |
package aia.stream import java.nio.file.{ Path, Paths } import java.nio.file.StandardOpenOption import java.nio.file.StandardOpenOption._ import scala.concurrent.Future import akka.NotUsed import akka.actor.ActorSystem import akka.stream.{ ActorMaterializer, IOResult } import akka.util.ByteString import spray.json._ import com.typesafe.config.{ Config, ConfigFactory } object ResumingEventFilter extends App with EventMarshalling { val config = ConfigFactory.load() val maxLine = config.getInt("log-stream-processor.max-line") if(args.length != 3) { System.err.println("Provide args: input-file output-file state") System.exit(1) } val inputFile = FileArg.shellExpanded(args(0)) val outputFile = FileArg.shellExpanded(args(1)) val filterState = args(2) match { case State(state) => state case unknown => System.err.println(s"Unknown state $unknown, exiting.") System.exit(1) } import akka.stream.scaladsl._ val source: Source[ByteString, Future[IOResult]] = FileIO.fromPath(inputFile) val sink: Sink[ByteString, Future[IOResult]] = FileIO.toPath(outputFile, Set(CREATE, WRITE, APPEND)) val frame: Flow[ByteString, String, NotUsed] = Framing.delimiter(ByteString("\n"), maxLine) .map(_.decodeString("UTF8")) import akka.stream.ActorAttributes import akka.stream.Supervision import LogStreamProcessor.LogParseException val decider : Supervision.Decider = { case _: LogParseException => Supervision.Resume case _ => Supervision.Stop } val parse: Flow[String, Event, NotUsed] = Flow[String].map(LogStreamProcessor.parseLineEx) .collect { case Some(e) => e } .withAttributes(ActorAttributes.supervisionStrategy(decider)) val filter: Flow[Event, Event, NotUsed] = Flow[Event].filter(_.state == filterState) val serialize: Flow[Event, ByteString, NotUsed] = Flow[Event].map(event => ByteString(event.toJson.compactPrint)) implicit val system = ActorSystem() implicit val ec = system.dispatcher val graphDecider : Supervision.Decider = { case _: LogParseException => Supervision.Resume case _ => Supervision.Stop } import akka.stream.ActorMaterializerSettings implicit val materializer = ActorMaterializer( ActorMaterializerSettings(system) .withSupervisionStrategy(graphDecider) ) val composedFlow: Flow[ByteString, ByteString, NotUsed] = frame.via(parse) .via(filter) .via(serialize) val runnableGraph: RunnableGraph[Future[IOResult]] = source.via(composedFlow).toMat(sink)(Keep.right) runnableGraph.run().foreach { result => println(s"Wrote ${result.count} bytes to '$outputFile'.") system.terminate() } }
Example 6
Source File: LogJson.scala From 006877 with MIT License | 5 votes |
package aia.stream import java.nio.file.{ Files, Path } import java.io.File import java.time.ZonedDateTime import scala.concurrent.duration._ import scala.concurrent.ExecutionContext import scala.concurrent.Future import scala.util.{ Success, Failure } import akka.Done import akka.actor._ import akka.util.ByteString import akka.stream.{ ActorAttributes, ActorMaterializer, IOResult } import akka.stream.scaladsl.JsonFraming import akka.stream.scaladsl.{ FileIO, BidiFlow, Flow, Framing, Keep, Sink, Source } import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._ import akka.http.scaladsl.marshalling.Marshal import akka.http.scaladsl.model._ import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server._ import spray.json._ object LogJson extends EventMarshalling with NotificationMarshalling with MetricMarshalling { def textInFlow(maxLine: Int) = { Framing.delimiter(ByteString("\n"), maxLine) .map(_.decodeString("UTF8")) .map(LogStreamProcessor.parseLineEx) .collect { case Some(e) => e } } def jsonInFlow(maxJsonObject: Int) = { JsonFraming.objectScanner(maxJsonObject) .map(_.decodeString("UTF8").parseJson.convertTo[Event]) } def jsonFramed(maxJsonObject: Int) = JsonFraming.objectScanner(maxJsonObject) val jsonOutFlow = Flow[Event].map { event => ByteString(event.toJson.compactPrint) } val notifyOutFlow = Flow[Summary].map { ws => ByteString(ws.toJson.compactPrint) } val metricOutFlow = Flow[Metric].map { m => ByteString(m.toJson.compactPrint) } val textOutFlow = Flow[Event].map{ event => ByteString(LogStreamProcessor.logLine(event)) } def logToJson(maxLine: Int) = { BidiFlow.fromFlows(textInFlow(maxLine), jsonOutFlow) } def jsonToLog(maxJsonObject: Int) = { BidiFlow.fromFlows(jsonInFlow(maxJsonObject), textOutFlow) } def logToJsonFlow(maxLine: Int) = { logToJson(maxLine).join(Flow[Event]) } def jsonToLogFlow(maxJsonObject: Int) = { jsonToLog(maxJsonObject).join(Flow[Event]) } }
Example 7
Source File: HandlingErrorsApplication.scala From Akka-Cookbook with MIT License | 5 votes |
package com.packt.chapter8 import akka.actor.ActorSystem import akka.stream.{ActorAttributes, ActorMaterializer, ActorMaterializerSettings, Supervision} import akka.stream.scaladsl._ object HandlingErrorsApplication extends App { implicit val actorSystem = ActorSystem("HandlingErrors") val streamDecider: Supervision.Decider = { case e: IndexOutOfBoundsException => println("Dropping element because of IndexOufOfBoundException. Resuming.") Supervision.Resume case _ => Supervision.Stop } val flowDecider: Supervision.Decider = { case e: IllegalArgumentException => println("Dropping element because of IllegalArgumentException. Restarting.") Supervision.Restart case _ => Supervision.Stop } val actorMaterializerSettings = ActorMaterializerSettings(actorSystem).withSupervisionStrategy(streamDecider) implicit val actorMaterializer = ActorMaterializer(actorMaterializerSettings) val words = List("Handling", "Errors", "In", "Akka", "Streams", "") val flow = Flow[String].map(word => { if(word.length == 0) throw new IllegalArgumentException("Empty words are not allowed") word }).withAttributes(ActorAttributes.supervisionStrategy(flowDecider)) Source(words).via(flow).map(array => array(2)).to(Sink.foreach(println)).run() }
Example 8
Source File: CassandraReadSideHandler.scala From lagom with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.internal.scaladsl.persistence.cassandra import akka.persistence.query.Offset import akka.stream.ActorAttributes import akka.stream.scaladsl.Flow import akka.Done import akka.NotUsed import com.datastax.driver.core.BatchStatement import com.datastax.driver.core.BoundStatement import com.lightbend.lagom.internal.persistence.cassandra.CassandraOffsetDao import com.lightbend.lagom.internal.persistence.cassandra.CassandraOffsetStore import com.lightbend.lagom.scaladsl.persistence.ReadSideProcessor.ReadSideHandler import com.lightbend.lagom.scaladsl.persistence._ import com.lightbend.lagom.scaladsl.persistence.cassandra.CassandraSession import org.slf4j.LoggerFactory import scala.collection.immutable import scala.concurrent.ExecutionContext import scala.concurrent.Future import scala.collection.JavaConverters._ private[cassandra] final class CassandraAutoReadSideHandler[Event <: AggregateEvent[Event]]( session: CassandraSession, offsetStore: CassandraOffsetStore, handlers: Map[Class[_ <: Event], CassandraAutoReadSideHandler.Handler[Event]], globalPrepareCallback: () => Future[Done], prepareCallback: AggregateEventTag[Event] => Future[Done], readProcessorId: String, dispatcher: String )(implicit ec: ExecutionContext) extends CassandraReadSideHandler[Event, CassandraAutoReadSideHandler.Handler[Event]]( session, handlers, dispatcher ) { import CassandraAutoReadSideHandler.Handler @volatile private var offsetDao: CassandraOffsetDao = _ protected override def invoke( handler: Handler[Event], element: EventStreamElement[Event] ): Future[immutable.Seq[BoundStatement]] = { for { statements <- handler .asInstanceOf[EventStreamElement[Event] => Future[immutable.Seq[BoundStatement]]] .apply(element) } yield statements :+ offsetDao.bindSaveOffset(element.offset) } protected def offsetStatement(offset: Offset): immutable.Seq[BoundStatement] = immutable.Seq(offsetDao.bindSaveOffset(offset)) override def globalPrepare(): Future[Done] = { globalPrepareCallback.apply() } override def prepare(tag: AggregateEventTag[Event]): Future[Offset] = { for { _ <- prepareCallback.apply(tag) dao <- offsetStore.prepare(readProcessorId, tag.tag) } yield { offsetDao = dao dao.loadedOffset } } }
Example 9
Source File: Demo.scala From chordial with BSD 3-Clause "New" or "Revised" License | 5 votes |
package com.tristanpenman.chordial.demo import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.model.ws.TextMessage import akka.stream.scaladsl._ import akka.stream.{ActorAttributes, ActorMaterializer, OverflowStrategy, Supervision} import akka.util.Timeout import com.tristanpenman.chordial.core.Event import com.tristanpenman.chordial.core.Event._ import scala.concurrent.Await import scala.concurrent.duration._ object Demo extends App { implicit val system = ActorSystem("chordial-demo") implicit val mat = ActorMaterializer() implicit val ec = system.dispatcher implicit val timeout: Timeout = 3.seconds // Generate IDs ranging from 0 to 63 (inclusive) so that when visualising the network, // each node represents a ~5.625 degree arc on the ring private val keyspaceBits = 6 // Create an actor that is responsible for creating and terminating nodes, while ensuring // that nodes are assigned unique IDs in the Chord key-space private val governor = system.actorOf(Governor.props(keyspaceBits), "Governor") // Create an actor that will log events published by nodes private val eventWriter = system.actorOf(EventWriter.props, "EventWriter") // Subscribe the EventWriter actor to events published by nodes system.eventStream.subscribe(eventWriter, classOf[Event]) val (listener, eventsSource) = Source .actorRef[Event](Int.MaxValue, OverflowStrategy.fail) .map { case FingerReset(nodeId: Long, index: Int) => s"""{ "type": "FingerReset", "nodeId": $nodeId, "index": $index }""" case FingerUpdated(nodeId: Long, index: Int, fingerId: Long) => s"""{ "type": "FingerUpdated", "nodeId": $nodeId, "index": $index, "fingerId": $fingerId }""" case NodeCreated(nodeId, successorId) => s"""{ "type": "NodeCreated", "nodeId": $nodeId, "successorId": $successorId }""" case NodeShuttingDown(nodeId) => s"""{ "type": "NodeDeleted", "nodeId": $nodeId }""" case PredecessorReset(nodeId) => s"""{ "type": "PredecessorReset", "nodeId": $nodeId }""" case PredecessorUpdated(nodeId, predecessorId) => s"""{ "type": "PredecessorUpdated", "nodeId": $nodeId, "predecessorId": $predecessorId }""" case SuccessorListUpdated(nodeId, primarySuccessorId, _) => s"""{ "type": "SuccessorUpdated", "nodeId": $nodeId, "successorId": $primarySuccessorId }""" } .map(TextMessage(_)) .withAttributes(ActorAttributes.supervisionStrategy(Supervision.resumingDecider)) .toMat(BroadcastHub.sink[TextMessage](bufferSize = 16))(Keep.both) .run() system.eventStream.subscribe(listener, classOf[Event]) Http().bindAndHandle(WebSocketWorker(governor, eventsSource), "0.0.0.0", 4567) Await.result(system.whenTerminated, Duration.Inf) }
Example 10
Source File: CouchbaseReadSideHandler.scala From akka-persistence-couchbase with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.internal.javadsl.persistence.couchbase import java.util.concurrent.CompletionStage import akka.Done import akka.japi.Pair import akka.stream.ActorAttributes import akka.stream.alpakka.couchbase.javadsl.CouchbaseSession import akka.stream.javadsl.Flow import com.lightbend.lagom.internal.javadsl.persistence.OffsetAdapter import com.lightbend.lagom.internal.persistence.couchbase.{CouchbaseOffsetDao, CouchbaseOffsetStore} import com.lightbend.lagom.javadsl.persistence.ReadSideProcessor.ReadSideHandler import com.lightbend.lagom.javadsl.persistence.{AggregateEvent, AggregateEventTag, Offset} import org.slf4j.LoggerFactory import scala.compat.java8.FutureConverters._ import scala.concurrent.{ExecutionContext, Future} private[couchbase] final class CouchbaseReadSideHandler[Event <: AggregateEvent[Event]]( couchbaseSession: CouchbaseSession, offsetStore: CouchbaseOffsetStore, handlers: Map[Class[_ <: Event], Handler[Event]], globalPrepareCallback: CouchbaseSession => CompletionStage[Done], prepareCallback: (CouchbaseSession, AggregateEventTag[Event]) => CompletionStage[Done], readProcessorId: String, dispatcher: String )(implicit ec: ExecutionContext) extends ReadSideHandler[Event] { private val log = LoggerFactory.getLogger(this.getClass) @volatile private var offsetDao: CouchbaseOffsetDao = _ protected def invoke(handler: Handler[Event], event: Event, offset: Offset): CompletionStage[Done] = handler .asInstanceOf[(CouchbaseSession, Event, Offset) => CompletionStage[Done]] .apply(couchbaseSession, event, offset) .toScala .flatMap { _ => val akkaOffset = OffsetAdapter.dslOffsetToOffset(offset) offsetDao.bindSaveOffset(akkaOffset).execute(couchbaseSession.asScala, ec) } .toJava override def globalPrepare(): CompletionStage[Done] = globalPrepareCallback.apply(couchbaseSession) override def prepare(tag: AggregateEventTag[Event]): CompletionStage[Offset] = (for { _ <- prepareCallback.apply(couchbaseSession, tag).toScala dao <- offsetStore.prepare(readProcessorId, tag.tag) } yield { offsetDao = dao OffsetAdapter.offsetToDslOffset(dao.loadedOffset) }).toJava override def handle(): Flow[Pair[Event, Offset], Done, _] = akka.stream.scaladsl .Flow[Pair[Event, Offset]] .mapAsync(parallelism = 1) { pair => val Pair(event, offset) = pair val eventClass = event.getClass val handler = handlers.getOrElse( // lookup handler eventClass, // fallback to empty handler if none { if (log.isDebugEnabled()) log.debug("Unhandled event [{}]", eventClass.getName) CouchbaseReadSideHandler.emptyHandler } ) invoke(handler, event, offset).toScala } .withAttributes(ActorAttributes.dispatcher(dispatcher)) .asJava }
Example 11
Source File: CouchbaseReadSideHandler.scala From akka-persistence-couchbase with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.internal.scaladsl.persistence.couchbase import akka.persistence.query.Offset import akka.stream.ActorAttributes import akka.stream.alpakka.couchbase.scaladsl.CouchbaseSession import akka.stream.scaladsl.Flow import akka.{Done, NotUsed} import com.lightbend.lagom.internal.persistence.couchbase.{CouchbaseOffsetDao, CouchbaseOffsetStore} import com.lightbend.lagom.scaladsl.persistence.ReadSideProcessor.ReadSideHandler import com.lightbend.lagom.scaladsl.persistence._ import org.slf4j.LoggerFactory import scala.concurrent.{ExecutionContext, Future} private[couchbase] final class CouchbaseReadSideHandler[Event <: AggregateEvent[Event]]( couchbase: CouchbaseSession, offsetStore: CouchbaseOffsetStore, handlers: Map[Class[_ <: Event], CouchbaseReadSideHandler.Handler[Event]], globalPrepareCallback: CouchbaseSession => Future[Done], prepareCallback: (CouchbaseSession, AggregateEventTag[Event]) => Future[Done], readProcessorId: String, dispatcher: String )(implicit ec: ExecutionContext) extends ReadSideHandler[Event] { import CouchbaseReadSideHandler.Handler private val log = LoggerFactory.getLogger(this.getClass) @volatile private var offsetDao: CouchbaseOffsetDao = _ protected def invoke(handler: Handler[Event], element: EventStreamElement[Event]): Future[Done] = handler .apply(couchbase, element) .flatMap(_ => offsetDao.bindSaveOffset(element.offset).execute(couchbase, ec)) override def globalPrepare(): Future[Done] = globalPrepareCallback(couchbase) override def prepare(tag: AggregateEventTag[Event]): Future[Offset] = for { _ <- prepareCallback.apply(couchbase, tag) dao <- offsetStore.prepare(readProcessorId, tag.tag) } yield { offsetDao = dao dao.loadedOffset } override def handle(): Flow[EventStreamElement[Event], Done, NotUsed] = Flow[EventStreamElement[Event]] .mapAsync(parallelism = 1) { elem => val eventClass = elem.event.getClass val handler = handlers.getOrElse( // lookup handler eventClass, // fallback to empty handler if none { if (log.isDebugEnabled()) log.debug("Unhandled event [{}]", eventClass.getName) CouchbaseReadSideHandler.emptyHandler.asInstanceOf[Handler[Event]] } ) invoke(handler, elem) } .withAttributes(ActorAttributes.dispatcher(dispatcher)) }
Example 12
Source File: WSApi.scala From mist with Apache License 2.0 | 5 votes |
package io.hydrosphere.mist.master.interfaces.http import akka.http.scaladsl.model.ws._ import akka.http.scaladsl.server.{Directives, Route} import akka.stream.ActorAttributes.supervisionStrategy import akka.stream.Supervision.resumingDecider import akka.stream.{ActorAttributes, Supervision} import akka.stream.scaladsl.{Flow, Sink} import io.hydrosphere.mist.master.EventsStreamer import io.hydrosphere.mist.master.Messages.StatusMessages._ import io.hydrosphere.mist.master.interfaces.JsonCodecs import scala.concurrent.duration._ import spray.json._ import scala.language.postfixOps class WSApi(streamer: EventsStreamer)(implicit val keepAliveTimeout: FiniteDuration) { import Directives._ import JsonCodecs._ val route: Route = { pathPrefix("v2" / "api"/ "ws" ) { parameter('withLogs ? false) { withLogs => path("all") { get { handleWebSocketMessagesWithKeepAlive(allEventsWsFlow(withLogs)) } } ~ path("jobs" / Segment) { jobId => get { handleWebSocketMessagesWithKeepAlive(jobWsFlow(jobId, withLogs)) } } }} } private def handleWebSocketMessagesWithKeepAlive(handler: Flow[Message, Message, akka.NotUsed]): Route = handleWebSocketMessages(handler .withAttributes(supervisionStrategy(resumingDecider)) .keepAlive( keepAliveTimeout, () => TextMessage.Strict(KeepAliveEvent.asInstanceOf[SystemEvent].toJson.toString()) )) private def jobWsFlow(id: String, withLogs: Boolean): Flow[Message, Message, akka.NotUsed] = { val source = streamer.eventsSource() .filter({ case e: UpdateStatusEvent => e.id == id case e: ReceivedLogs if withLogs => e.id == id case _ => false }) .map(toWsMessage) val sink = Sink.ignore Flow.fromSinkAndSource(sink, source) } private def allEventsWsFlow(withLogs: Boolean): Flow[Message, Message, akka.NotUsed] = { val source = streamer.eventsSource() .filter({ case _: ReceivedLogs => withLogs case _ => true }) .map(toWsMessage) val sink = Sink.ignore Flow.fromSinkAndSource(sink, source) } private def toWsMessage(e: SystemEvent): Message = TextMessage.Strict(e.toJson.toString()) }
Example 13
Source File: StreamLoader.scala From recogito2 with Apache License 2.0 | 5 votes |
package controllers.admin.authorities import akka.stream.{ActorAttributes, ClosedShape, Materializer, Supervision} import akka.stream.scaladsl._ import akka.util.ByteString import java.io.InputStream import services.entity.EntityRecord import services.entity.builtin.importer.EntityImporter import play.api.Logger import play.api.libs.json.Json import scala.concurrent.{Await, ExecutionContext} import scala.concurrent.duration._ class StreamLoader(implicit materializer: Materializer) { private val BATCH_SIZE = 100 private val decider: Supervision.Decider = { case t: Throwable => t.printStackTrace() Supervision.Stop } def importPlaces(is: InputStream, crosswalk: String => Option[EntityRecord], importer: EntityImporter)(implicit ctx: ExecutionContext) = { val source = StreamConverters.fromInputStream(() => is, 1024) .via(Framing.delimiter(ByteString("\n"), maximumFrameLength = Int.MaxValue, allowTruncation = false)) .map(_.utf8String) val parser = Flow.fromFunction[String, Option[EntityRecord]](crosswalk) .withAttributes(ActorAttributes.supervisionStrategy(decider)) .grouped(BATCH_SIZE) val sink = Sink.foreach[Seq[Option[EntityRecord]]] { records => val toImport = records.flatten Await.result(importer.importRecords(toImport), 60.minutes) } val graph = RunnableGraph.fromGraph(GraphDSL.create() { implicit builder => import GraphDSL.Implicits._ source ~> parser ~> sink ClosedShape }).withAttributes(ActorAttributes.supervisionStrategy(decider)) graph.run() } }
Example 14
Source File: CmdStreams.scala From AckCord with MIT License | 5 votes |
package ackcord.oldcommands import ackcord._ import akka.NotUsed import akka.actor.typed.ActorSystem import akka.stream.scaladsl.{BroadcastHub, Keep, Source} import akka.stream.{ActorAttributes, Supervision} object CmdStreams { def cmdStreams[A]( settings: AbstractCommandSettings, apiMessages: Source[APIMessage, A] )(implicit system: ActorSystem[Nothing]): (A, Source[RawCmdMessage, NotUsed]) = { apiMessages .collect { case APIMessage.MessageCreate(msg, c) => implicit val cache: MemoryCacheSnapshot = c.current CmdHelper.isValidCommand(settings.needMention(msg), msg).map { args => if (args == Nil) NoCmd(msg, c.current) else { settings .getPrefix(args, msg) .fold[RawCmdMessage](NoCmdPrefix(msg, args.head, args.tail, cache)) { case (prefix, remaining) => RawCmd(msg, prefix, remaining.head, remaining.tail.toList, c.current) } } } } .mapConcat(_.toList) .toMat(BroadcastHub.sink(bufferSize = 256))(Keep.both) .addAttributes(ActorAttributes.supervisionStrategy(Supervision.resumingDecider)) .run() } }
Example 15
Source File: SupervisionStreams.scala From AckCord with MIT License | 5 votes |
package ackcord.requests import akka.actor.typed.ActorSystem import akka.stream.javadsl.RunnableGraph import akka.stream.scaladsl.{Flow, Sink, Source} import akka.stream.{ActorAttributes, Attributes, Supervision} object SupervisionStreams { def addLogAndContinueFunction[G](addAtributes: Attributes => G)(implicit system: ActorSystem[Nothing]): G = addAtributes(ActorAttributes.supervisionStrategy { case _: RetryFailedRequestException[_] => Supervision.Stop case e => system.log.error("Unhandled exception in stream", e) Supervision.Resume }) def logAndContinue[M](graph: RunnableGraph[M])(implicit system: ActorSystem[Nothing]): RunnableGraph[M] = addLogAndContinueFunction(graph.addAttributes) def logAndContinue[Out, Mat](source: Source[Out, Mat])(implicit system: ActorSystem[Nothing]): Source[Out, Mat] = addLogAndContinueFunction(source.addAttributes) def logAndContinue[In, Out, Mat]( flow: Flow[In, Out, Mat] )(implicit system: ActorSystem[Nothing]): Flow[In, Out, Mat] = addLogAndContinueFunction(flow.addAttributes) def logAndContinue[In, Mat](sink: Sink[In, Mat])(implicit system: ActorSystem[Nothing]): Sink[In, Mat] = addLogAndContinueFunction(sink.addAttributes) }