akka.stream.Supervision Scala Examples

The following examples show how to use akka.stream.Supervision. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: ClusterAwareHostBalancer.scala    From clickhouse-scala-client   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.crobox.clickhouse.balancing

import akka.actor.{ActorRef, ActorSystem}
import akka.http.scaladsl.model.Uri
import akka.pattern.ask
import akka.stream.scaladsl.Sink
import akka.stream.{ActorAttributes, Materializer, Supervision}
import akka.util.Timeout
import com.crobox.clickhouse.balancing.discovery.ConnectionManagerActor.{GetConnection, LogDeadConnections}
import com.crobox.clickhouse.balancing.discovery.cluster.ClusterConnectionFlow

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}


case class ClusterAwareHostBalancer(host: Uri,
                                    cluster: String = "cluster",
                                    manager: ActorRef,
                                    scanningInterval: FiniteDuration)(
    implicit system: ActorSystem,
    connectionRetrievalTimeout: Timeout,
    ec: ExecutionContext,
    materializer: Materializer
) extends HostBalancer {

  ClusterConnectionFlow
    .clusterConnectionsFlow(Future.successful(host), scanningInterval, cluster)
    .withAttributes(
      ActorAttributes.supervisionStrategy({
        case ex: IllegalArgumentException =>
          logger.error("Failed resolving hosts for cluster, stopping the flow.", ex)
          Supervision.stop
        case ex =>
          logger.error("Failed resolving hosts for cluster, resuming.", ex)
          Supervision.Resume
      })
    )
    .runWith(Sink.actorRef(manager, LogDeadConnections))

  override def nextHost: Future[Uri] =
    (manager ? GetConnection()).mapTo[Uri]
} 
Example 2
Source File: QueryProgress.scala    From clickhouse-scala-client   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.crobox.clickhouse.internal.progress
import akka.NotUsed
import akka.stream.scaladsl.{BroadcastHub, Keep, RunnableGraph, Source, SourceQueueWithComplete}
import akka.stream.{ActorAttributes, OverflowStrategy, Supervision}
import com.typesafe.scalalogging.LazyLogging
import spray.json._
import spray.json.DefaultJsonProtocol._
import scala.util.{Failure, Success, Try}

object QueryProgress extends LazyLogging {

  sealed trait QueryProgress
  case object QueryAccepted                                 extends QueryProgress
  case object QueryFinished                                 extends QueryProgress
  case object QueryRejected                                 extends QueryProgress
  case class QueryFailed(cause: Throwable)                  extends QueryProgress
  case class QueryRetry(cause: Throwable, retryNumber: Int) extends QueryProgress

  case class ClickhouseQueryProgress(identifier: String, progress: QueryProgress)
  case class Progress(rowsRead: Long, bytesRead: Long, rowsWritten: Long, bytesWritten: Long, totalRows: Long) extends QueryProgress

  def queryProgressStream: RunnableGraph[(SourceQueueWithComplete[String], Source[ClickhouseQueryProgress, NotUsed])] =
    Source
      .queue[String](1000, OverflowStrategy.dropHead)
      .map[Option[ClickhouseQueryProgress]](queryAndProgress => {
        queryAndProgress.split("\n", 2).toList match {
          case queryId :: ProgressHeadersAsEventsStage.AcceptedMark :: Nil =>
            Some(ClickhouseQueryProgress(queryId, QueryAccepted))
          case queryId :: progressJson :: Nil =>
            Try {
              progressJson.parseJson match {
                case JsObject(fields) if fields.size == 3 =>
                  ClickhouseQueryProgress(
                    queryId,
                    Progress(
                        fields("read_rows").convertTo[String].toLong,
                        fields("read_bytes").convertTo[String].toLong,
                        0,
                        0,
                        fields("total_rows").convertTo[String].toLong
                    )
                  )
                case JsObject(fields) if fields.size == 5 =>
                  ClickhouseQueryProgress(
                    queryId,
                    Progress(
                      fields("read_rows").convertTo[String].toLong,
                      fields("read_bytes").convertTo[String].toLong,
                      fields("written_rows").convertTo[String].toLong,
                      fields("written_bytes").convertTo[String].toLong,
                      fields("total_rows_to_read").convertTo[String].toLong
                    )
                  )
                case _ => throw new IllegalArgumentException(s"Cannot extract progress from $progressJson")
              }
            } match {
              case Success(value) => Some(value)
              case Failure(exception) =>
                logger.warn(s"Failed to parse json $progressJson", exception)
                None
            }
          case other @ _ =>
            logger.warn(s"Could not get progress from $other")
            None

        }
      })
      .collect {
        case Some(progress) => progress
      }
      .withAttributes(ActorAttributes.supervisionStrategy({
        case ex @ _ =>
          logger.warn("Detected failure in the query progress stream, resuming operation.", ex)
          Supervision.Resume
      }))
      .toMat(BroadcastHub.sink)(Keep.both)
} 
Example 3
Source File: ResumingEventFilter.scala    From 006877   with MIT License 5 votes vote down vote up
package aia.stream

import java.nio.file.{ Path, Paths }
import java.nio.file.StandardOpenOption
import java.nio.file.StandardOpenOption._

import scala.concurrent.Future

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.{ ActorMaterializer, IOResult }
import akka.util.ByteString

import spray.json._
import com.typesafe.config.{ Config, ConfigFactory }

object ResumingEventFilter extends App with EventMarshalling {
  val config = ConfigFactory.load() 
  val maxLine = config.getInt("log-stream-processor.max-line")
  
  if(args.length != 3) {
    System.err.println("Provide args: input-file output-file state")
    System.exit(1)
  }

  val inputFile = FileArg.shellExpanded(args(0))
  val outputFile = FileArg.shellExpanded(args(1))

  val filterState = args(2) match {
    case State(state) => state
    case unknown => 
      System.err.println(s"Unknown state $unknown, exiting.") 
      System.exit(1)
  }
  import akka.stream.scaladsl._

  val source: Source[ByteString, Future[IOResult]] = 
    FileIO.fromPath(inputFile)

  val sink: Sink[ByteString, Future[IOResult]] = 
    FileIO.toPath(outputFile, Set(CREATE, WRITE, APPEND))

  val frame: Flow[ByteString, String, NotUsed] =  
    Framing.delimiter(ByteString("\n"), maxLine) 
      .map(_.decodeString("UTF8"))


  import akka.stream.ActorAttributes
  import akka.stream.Supervision

  import LogStreamProcessor.LogParseException

  val decider : Supervision.Decider = {
    case _: LogParseException => Supervision.Resume
    case _                    => Supervision.Stop
  }

  val parse: Flow[String, Event, NotUsed] = 
    Flow[String].map(LogStreamProcessor.parseLineEx) 
      .collect { case Some(e) => e }
      .withAttributes(ActorAttributes.supervisionStrategy(decider))


  val filter: Flow[Event, Event, NotUsed] =   
    Flow[Event].filter(_.state == filterState)
  
  val serialize: Flow[Event, ByteString, NotUsed] =  
    Flow[Event].map(event => ByteString(event.toJson.compactPrint))

  implicit val system = ActorSystem() 
  implicit val ec = system.dispatcher


  val graphDecider : Supervision.Decider = { 
    case _: LogParseException => Supervision.Resume
    case _                    => Supervision.Stop
  }

  import akka.stream.ActorMaterializerSettings
  implicit val materializer = ActorMaterializer(
    ActorMaterializerSettings(system)
      .withSupervisionStrategy(graphDecider)
  )



  val composedFlow: Flow[ByteString, ByteString, NotUsed] =  
    frame.via(parse)
      .via(filter)
      .via(serialize)

  val runnableGraph: RunnableGraph[Future[IOResult]] = 
    source.via(composedFlow).toMat(sink)(Keep.right)

  runnableGraph.run().foreach { result =>
    println(s"Wrote ${result.count} bytes to '$outputFile'.")
    system.terminate()
  }  

} 
Example 4
Source File: FanLogsApp.scala    From 006877   with MIT License 5 votes vote down vote up
package aia.stream

import java.nio.file.{ Files, FileSystems, Path }
import scala.concurrent.Future
import scala.concurrent.duration._

import akka.NotUsed
import akka.actor.{ ActorSystem , Actor, Props }
import akka.event.Logging

import akka.stream.{ ActorMaterializer, ActorMaterializerSettings, Supervision }

import akka.http.scaladsl.Http
import akka.http.scaladsl.Http.ServerBinding
import akka.http.scaladsl.server.Directives._

import com.typesafe.config.{ Config, ConfigFactory }

object FanLogsApp extends App {

  val config = ConfigFactory.load() 
  val host = config.getString("http.host")
  val port = config.getInt("http.port")

  val logsDir = {
    val dir = config.getString("log-stream-processor.logs-dir")
    Files.createDirectories(FileSystems.getDefault.getPath(dir))
  }
  val maxLine = config.getInt("log-stream-processor.max-line")
  val maxJsObject = config.getInt("log-stream-processor.max-json-object")

  implicit val system = ActorSystem() 
  implicit val ec = system.dispatcher
  
  val decider : Supervision.Decider = {
    case _: LogStreamProcessor.LogParseException => Supervision.Resume
    case _                    => Supervision.Stop
  }
  
  implicit val materializer = ActorMaterializer(
   ActorMaterializerSettings(system)
     .withSupervisionStrategy(decider)
  )
  
  val api = new FanLogsApi(logsDir, maxLine, maxJsObject).routes
 
  val bindingFuture: Future[ServerBinding] =
    Http().bindAndHandle(api, host, port)
 
  val log =  Logging(system.eventStream, "fan-logs")
  bindingFuture.map { serverBinding =>
    log.info(s"Bound to ${serverBinding.localAddress} ")
  }.onFailure { 
    case ex: Exception =>
      log.error(ex, "Failed to bind to {}:{}!", host, port)
      system.terminate()
  }
} 
Example 5
Source File: LogsApp.scala    From 006877   with MIT License 5 votes vote down vote up
package aia.stream

import java.nio.file.{ Files, FileSystems, Path }
import scala.concurrent.Future
import scala.concurrent.duration._

import akka.NotUsed
import akka.actor.{ ActorSystem , Actor, Props }
import akka.event.Logging

import akka.stream.{ ActorMaterializer, ActorMaterializerSettings, Supervision }

import akka.http.scaladsl.Http
import akka.http.scaladsl.Http.ServerBinding
import akka.http.scaladsl.server.Directives._

import com.typesafe.config.{ Config, ConfigFactory }

object LogsApp extends App {

  val config = ConfigFactory.load() 
  val host = config.getString("http.host")
  val port = config.getInt("http.port")

  val logsDir = {
    val dir = config.getString("log-stream-processor.logs-dir")
    Files.createDirectories(FileSystems.getDefault.getPath(dir))
  }
  val maxLine = config.getInt("log-stream-processor.max-line")

  implicit val system = ActorSystem() 
  implicit val ec = system.dispatcher
  
  val decider : Supervision.Decider = {
    case _: LogStreamProcessor.LogParseException => Supervision.Stop
    case _                    => Supervision.Stop
  }
  
  implicit val materializer = ActorMaterializer(
   ActorMaterializerSettings(system)
     .withSupervisionStrategy(decider)
  )
  
  val api = new LogsApi(logsDir, maxLine).routes
 
  val bindingFuture: Future[ServerBinding] =
    Http().bindAndHandle(api, host, port)
 
  val log =  Logging(system.eventStream, "logs")
  bindingFuture.map { serverBinding =>
    log.info(s"Bound to ${serverBinding.localAddress} ")
  }.onFailure { 
    case ex: Exception =>
      log.error(ex, "Failed to bind to {}:{}!", host, port)
      system.terminate()
  }
} 
Example 6
Source File: LogStreamProcessorApp.scala    From 006877   with MIT License 5 votes vote down vote up
package aia.stream

import java.nio.file.{ Files, FileSystems, Path }
import scala.concurrent.Future
import scala.concurrent.duration._

import akka.NotUsed
import akka.actor.{ ActorSystem , Actor, Props }
import akka.event.Logging

import akka.stream.{ ActorMaterializer, ActorMaterializerSettings, Supervision }

import akka.http.scaladsl.Http
import akka.http.scaladsl.Http.ServerBinding
import akka.http.scaladsl.server.Directives._

import com.typesafe.config.{ Config, ConfigFactory }

object LogStreamProcessorApp extends App {

  val config = ConfigFactory.load() 
  val host = config.getString("http.host")
  val port = config.getInt("http.port")

  val logsDir = {
    val dir = config.getString("log-stream-processor.logs-dir")
    Files.createDirectories(FileSystems.getDefault.getPath(dir))
  }

  val notificationsDir = {
    val dir = config.getString("log-stream-processor.notifications-dir")
    Files.createDirectories(FileSystems.getDefault.getPath(dir))
  }

  val metricsDir = {
    val dir = config.getString("log-stream-processor.metrics-dir")
    Files.createDirectories(FileSystems.getDefault.getPath(dir))
  }

  val maxLine = config.getInt("log-stream-processor.max-line")
  val maxJsObject = config.getInt("log-stream-processor.max-json-object")

  implicit val system = ActorSystem() 
  implicit val ec = system.dispatcher
  
  val decider : Supervision.Decider = {
    case _: LogStreamProcessor.LogParseException => Supervision.Resume
    case _                    => Supervision.Stop
  }
  
  implicit val materializer = ActorMaterializer(
   ActorMaterializerSettings(system)
     .withSupervisionStrategy(decider)
  )
  
  val api = new LogStreamProcessorApi(logsDir, notificationsDir, metricsDir, maxLine, maxJsObject).routes
 
  val bindingFuture: Future[ServerBinding] =
    Http().bindAndHandle(api, host, port)
 
  val log =  Logging(system.eventStream, "processor")
  bindingFuture.map { serverBinding =>
    log.info(s"Bound to ${serverBinding.localAddress} ")
  }.onFailure { 
    case ex: Exception =>
      log.error(ex, "Failed to bind to {}:{}!", host, port)
      system.terminate()
  }
} 
Example 7
Source File: ContentNegLogsApp.scala    From 006877   with MIT License 5 votes vote down vote up
package aia.stream

import java.nio.file.{ Files, FileSystems, Path }
import scala.concurrent.Future
import scala.concurrent.duration._

import akka.NotUsed
import akka.actor.{ ActorSystem , Actor, Props }
import akka.event.Logging

import akka.stream.{ ActorMaterializer, ActorMaterializerSettings, Supervision }

import akka.http.scaladsl.Http
import akka.http.scaladsl.Http.ServerBinding
import akka.http.scaladsl.server.Directives._

import com.typesafe.config.{ Config, ConfigFactory }

object ContentNegLogsApp extends App {

  val config = ConfigFactory.load() 
  val host = config.getString("http.host")
  val port = config.getInt("http.port")

  val logsDir = {
    val dir = config.getString("log-stream-processor.logs-dir")
    Files.createDirectories(FileSystems.getDefault.getPath(dir))
  }
  val maxLine = config.getInt("log-stream-processor.max-line")
  val maxJsObject = config.getInt("log-stream-processor.max-json-object")

  implicit val system = ActorSystem() 
  implicit val ec = system.dispatcher
  
  val decider : Supervision.Decider = {
    case _: LogStreamProcessor.LogParseException => Supervision.Stop
    case _                    => Supervision.Stop
  }
  
  implicit val materializer = ActorMaterializer(
   ActorMaterializerSettings(system)
     .withSupervisionStrategy(decider)
  )
  
  val api = new ContentNegLogsApi(logsDir, maxLine, maxJsObject).routes
 
  val bindingFuture: Future[ServerBinding] =
    Http().bindAndHandle(api, host, port)
 
  val log =  Logging(system.eventStream, "content-neg-logs")
  bindingFuture.map { serverBinding =>
    log.info(s"Bound to ${serverBinding.localAddress} ")
  }.onFailure { 
    case ex: Exception =>
      log.error(ex, "Failed to bind to {}:{}!", host, port)
      system.terminate()
  }
} 
Example 8
Source File: HandlingErrorsApplication.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter8

import akka.actor.ActorSystem
import akka.stream.{ActorAttributes, ActorMaterializer, ActorMaterializerSettings, Supervision}
import akka.stream.scaladsl._

object HandlingErrorsApplication extends App {

  implicit val actorSystem = ActorSystem("HandlingErrors")

  val streamDecider: Supervision.Decider = {
    case e: IndexOutOfBoundsException =>
      println("Dropping element because of IndexOufOfBoundException. Resuming.")
      Supervision.Resume
    case _ => Supervision.Stop
  }

  val flowDecider: Supervision.Decider = {
    case e: IllegalArgumentException =>
      println("Dropping element because of IllegalArgumentException. Restarting.")
      Supervision.Restart
    case _ => Supervision.Stop
  }

  val actorMaterializerSettings = ActorMaterializerSettings(actorSystem).withSupervisionStrategy(streamDecider)
  implicit val actorMaterializer = ActorMaterializer(actorMaterializerSettings)

  val words = List("Handling", "Errors", "In", "Akka", "Streams", "")

  val flow = Flow[String].map(word => {
    if(word.length == 0) throw new IllegalArgumentException("Empty words are not allowed")
    word
  }).withAttributes(ActorAttributes.supervisionStrategy(flowDecider))

  Source(words).via(flow).map(array => array(2)).to(Sink.foreach(println)).run()
} 
Example 9
Source File: Demo.scala    From chordial   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package com.tristanpenman.chordial.demo

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.ws.TextMessage
import akka.stream.scaladsl._
import akka.stream.{ActorAttributes, ActorMaterializer, OverflowStrategy, Supervision}
import akka.util.Timeout
import com.tristanpenman.chordial.core.Event
import com.tristanpenman.chordial.core.Event._

import scala.concurrent.Await
import scala.concurrent.duration._

object Demo extends App {
  implicit val system = ActorSystem("chordial-demo")
  implicit val mat = ActorMaterializer()
  implicit val ec = system.dispatcher

  implicit val timeout: Timeout = 3.seconds

  // Generate IDs ranging from 0 to 63 (inclusive) so that when visualising the network,
  // each node represents a ~5.625 degree arc on the ring
  private val keyspaceBits = 6

  // Create an actor that is responsible for creating and terminating nodes, while ensuring
  // that nodes are assigned unique IDs in the Chord key-space
  private val governor =
    system.actorOf(Governor.props(keyspaceBits), "Governor")

  // Create an actor that will log events published by nodes
  private val eventWriter = system.actorOf(EventWriter.props, "EventWriter")

  // Subscribe the EventWriter actor to events published by nodes
  system.eventStream.subscribe(eventWriter, classOf[Event])

  val (listener, eventsSource) =
    Source
      .actorRef[Event](Int.MaxValue, OverflowStrategy.fail)
      .map {
        case FingerReset(nodeId: Long, index: Int) =>
          s"""{ "type": "FingerReset", "nodeId": $nodeId, "index": $index }"""
        case FingerUpdated(nodeId: Long, index: Int, fingerId: Long) =>
          s"""{ "type": "FingerUpdated", "nodeId": $nodeId, "index": $index, "fingerId": $fingerId }"""
        case NodeCreated(nodeId, successorId) =>
          s"""{ "type": "NodeCreated", "nodeId": $nodeId, "successorId": $successorId }"""
        case NodeShuttingDown(nodeId) =>
          s"""{ "type": "NodeDeleted", "nodeId": $nodeId }"""
        case PredecessorReset(nodeId) =>
          s"""{ "type": "PredecessorReset", "nodeId": $nodeId }"""
        case PredecessorUpdated(nodeId, predecessorId) =>
          s"""{ "type": "PredecessorUpdated", "nodeId": $nodeId, "predecessorId": $predecessorId }"""
        case SuccessorListUpdated(nodeId, primarySuccessorId, _) =>
          s"""{ "type": "SuccessorUpdated", "nodeId": $nodeId, "successorId": $primarySuccessorId }"""
      }
      .map(TextMessage(_))
      .withAttributes(ActorAttributes.supervisionStrategy(Supervision.resumingDecider))
      .toMat(BroadcastHub.sink[TextMessage](bufferSize = 16))(Keep.both)
      .run()

  system.eventStream.subscribe(listener, classOf[Event])

  Http().bindAndHandle(WebSocketWorker(governor, eventsSource), "0.0.0.0", 4567)

  Await.result(system.whenTerminated, Duration.Inf)
} 
Example 10
Source File: OnCompleteStage.scala    From graphcool-framework   with Apache License 2.0 5 votes vote down vote up
package cool.graph.akkautil.stream

import akka.stream.ActorAttributes.SupervisionStrategy
import akka.stream.impl.fusing.GraphStages.SimpleLinearGraphStage
import akka.stream.stage.{GraphStageLogic, InHandler, OutHandler}
import akka.stream.{Attributes, Supervision}

case class OnCompleteStage[T](op: () ⇒ Unit) extends SimpleLinearGraphStage[T] {
  override def toString: String = "OnComplete"

  override def createLogic(inheritedAttributes: Attributes): GraphStageLogic =
    new GraphStageLogic(shape) with OutHandler with InHandler {
      def decider =
        inheritedAttributes
          .get[SupervisionStrategy]
          .map(_.decider)
          .getOrElse(Supervision.stoppingDecider)

      override def onPush(): Unit = {
        push(out, grab(in))
      }

      override def onPull(): Unit = pull(in)

      override def onDownstreamFinish() = {
        op()
        super.onDownstreamFinish()
      }

      override def onUpstreamFinish() = {
        op()
        super.onUpstreamFinish()
      }

      setHandlers(in, out, this)
    }
} 
Example 11
Source File: RemoraApp.scala    From remora   with MIT License 5 votes vote down vote up
import java.io.IOException
import java.net.ConnectException
import java.util.concurrent.{TimeUnit, TimeoutException}

import akka.actor.ActorSystem
import akka.stream.{ActorMaterializer, ActorMaterializerSettings, Supervision}
import com.amazonaws.services.cloudwatch.{AmazonCloudWatchAsync, AmazonCloudWatchAsyncClientBuilder}
import com.blacklocus.metrics.CloudWatchReporterBuilder
import com.codahale.metrics.jvm.{GarbageCollectorMetricSet, MemoryUsageGaugeSet, ThreadStatesGaugeSet}
import com.typesafe.scalalogging.LazyLogging
import config.{KafkaSettings, MetricsSettings}
import kafka.admin.RemoraKafkaConsumerGroupService
import reporter.RemoraDatadogReporter

import scala.concurrent.duration._
import scala.util.control.NonFatal

object RemoraApp extends App with nl.grons.metrics.scala.DefaultInstrumented with LazyLogging {

  private val actorSystemName: String = "remora"
  implicit val actorSystem = ActorSystem(actorSystemName)

  metricRegistry.registerAll(new GarbageCollectorMetricSet)
  metricRegistry.registerAll(new MemoryUsageGaugeSet)
  metricRegistry.registerAll(new ThreadStatesGaugeSet)

  lazy val decider: Supervision.Decider = {
    case _: IOException | _: ConnectException | _: TimeoutException => Supervision.Restart
    case NonFatal(err: Throwable) =>
      actorSystem.log.error(err, "Unhandled Exception in Stream: {}", err.getMessage)
      Supervision.Stop
  }

  implicit val materializer = ActorMaterializer(
    ActorMaterializerSettings(actorSystem).withSupervisionStrategy(decider))(actorSystem)

  implicit val executionContext = actorSystem.dispatchers.lookup("kafka-consumer-dispatcher")
  val kafkaSettings = KafkaSettings(actorSystem.settings.config)
  val consumer = new RemoraKafkaConsumerGroupService(kafkaSettings)
  val kafkaClientActor = actorSystem.actorOf(KafkaClientActor.props(consumer), name = "kafka-client-actor")

  Api(kafkaClientActor).start()

  val metricsSettings = MetricsSettings(actorSystem.settings.config)

  if (metricsSettings.registryOptions.enabled) {
    val exportConsumerMetricsToRegistryActor =
      actorSystem.actorOf(ExportConsumerMetricsToRegistryActor.props(kafkaClientActor),
        name = "export-consumer-metrics-actor")
    actorSystem.scheduler.schedule(0 second, metricsSettings.registryOptions.intervalSeconds second, exportConsumerMetricsToRegistryActor, "export")
  }

  if (metricsSettings.cloudWatch.enabled) {
    logger.info("Reporting metricsRegistry to Cloudwatch")
    val amazonCloudWatchAsync: AmazonCloudWatchAsync = AmazonCloudWatchAsyncClientBuilder.defaultClient

    new CloudWatchReporterBuilder()
      .withNamespace(metricsSettings.cloudWatch.name)
      .withRegistry(metricRegistry)
      .withClient(amazonCloudWatchAsync)
      .build()
      .start(metricsSettings.cloudWatch.intervalMinutes, TimeUnit.MINUTES)
  }

  if (metricsSettings.dataDog.enabled) {
    logger.info(s"Reporting metricsRegistry to Datadog at ${metricsSettings.dataDog.agentHost}:${metricsSettings.dataDog.agentPort}")
    val datadogReporter = new RemoraDatadogReporter(metricRegistry, metricsSettings.dataDog)
    datadogReporter.startReporter()
  }

} 
Example 12
Source File: WSApi.scala    From mist   with Apache License 2.0 5 votes vote down vote up
package io.hydrosphere.mist.master.interfaces.http

import akka.http.scaladsl.model.ws._
import akka.http.scaladsl.server.{Directives, Route}
import akka.stream.ActorAttributes.supervisionStrategy
import akka.stream.Supervision.resumingDecider
import akka.stream.{ActorAttributes, Supervision}
import akka.stream.scaladsl.{Flow, Sink}
import io.hydrosphere.mist.master.EventsStreamer
import io.hydrosphere.mist.master.Messages.StatusMessages._
import io.hydrosphere.mist.master.interfaces.JsonCodecs

import scala.concurrent.duration._
import spray.json._

import scala.language.postfixOps


class WSApi(streamer: EventsStreamer)(implicit val keepAliveTimeout: FiniteDuration) {

  import Directives._
  import JsonCodecs._

  val route: Route = {
    pathPrefix("v2" / "api"/ "ws" ) { parameter('withLogs ? false)  { withLogs =>
      path("all") {
        get {
          handleWebSocketMessagesWithKeepAlive(allEventsWsFlow(withLogs))
        }
      } ~
      path("jobs" / Segment) { jobId =>
        get {
          handleWebSocketMessagesWithKeepAlive(jobWsFlow(jobId, withLogs))
        }
      }
    }}
  }

  private def handleWebSocketMessagesWithKeepAlive(handler: Flow[Message, Message, akka.NotUsed]): Route =
    handleWebSocketMessages(handler
      .withAttributes(supervisionStrategy(resumingDecider))
      .keepAlive(
        keepAliveTimeout,
        () => TextMessage.Strict(KeepAliveEvent.asInstanceOf[SystemEvent].toJson.toString())
      ))


  private def jobWsFlow(id: String, withLogs: Boolean): Flow[Message, Message, akka.NotUsed] = {
    val source = streamer.eventsSource()
      .filter({
        case e: UpdateStatusEvent => e.id == id
        case e: ReceivedLogs if withLogs => e.id == id
        case _ => false
      })
      .map(toWsMessage)

    val sink = Sink.ignore
    Flow.fromSinkAndSource(sink, source)
  }

  private def allEventsWsFlow(withLogs: Boolean): Flow[Message, Message, akka.NotUsed] = {
    val source = streamer.eventsSource()
      .filter({
        case _: ReceivedLogs => withLogs
        case _ => true
      })
      .map(toWsMessage)

    val sink = Sink.ignore
    Flow.fromSinkAndSource(sink, source)
  }

  private def toWsMessage(e: SystemEvent): Message = TextMessage.Strict(e.toJson.toString())
} 
Example 13
Source File: StreamLoader.scala    From recogito2   with Apache License 2.0 5 votes vote down vote up
package controllers.admin.authorities

import akka.stream.{ActorAttributes, ClosedShape, Materializer, Supervision}
import akka.stream.scaladsl._
import akka.util.ByteString
import java.io.InputStream
import services.entity.EntityRecord
import services.entity.builtin.importer.EntityImporter
import play.api.Logger
import play.api.libs.json.Json
import scala.concurrent.{Await, ExecutionContext}
import scala.concurrent.duration._

class StreamLoader(implicit materializer: Materializer) {
  
  private val BATCH_SIZE = 100
  
  private val decider: Supervision.Decider = {    
    case t: Throwable => 
      t.printStackTrace()
      Supervision.Stop    
  }
  
  def importPlaces(is: InputStream, crosswalk: String => Option[EntityRecord], importer: EntityImporter)(implicit ctx: ExecutionContext) = {
    
    val source = StreamConverters.fromInputStream(() => is, 1024)
      .via(Framing.delimiter(ByteString("\n"), maximumFrameLength = Int.MaxValue, allowTruncation = false))
      .map(_.utf8String)
      
    val parser = Flow.fromFunction[String, Option[EntityRecord]](crosswalk)
      .withAttributes(ActorAttributes.supervisionStrategy(decider))
      .grouped(BATCH_SIZE)
      
    val sink = Sink.foreach[Seq[Option[EntityRecord]]] { records =>
      val toImport = records.flatten
      Await.result(importer.importRecords(toImport), 60.minutes)
    }
    
    val graph = RunnableGraph.fromGraph(GraphDSL.create() { implicit builder =>
      
      import GraphDSL.Implicits._
      
      source ~> parser ~> sink
      
      ClosedShape
    }).withAttributes(ActorAttributes.supervisionStrategy(decider))
        
    graph.run()
  }
  
} 
Example 14
Source File: CmdStreams.scala    From AckCord   with MIT License 5 votes vote down vote up
package ackcord.oldcommands

import ackcord._
import akka.NotUsed
import akka.actor.typed.ActorSystem
import akka.stream.scaladsl.{BroadcastHub, Keep, Source}
import akka.stream.{ActorAttributes, Supervision}

object CmdStreams {

  
  def cmdStreams[A](
      settings: AbstractCommandSettings,
      apiMessages: Source[APIMessage, A]
  )(implicit system: ActorSystem[Nothing]): (A, Source[RawCmdMessage, NotUsed]) = {
    apiMessages
      .collect {
        case APIMessage.MessageCreate(msg, c) =>
          implicit val cache: MemoryCacheSnapshot = c.current

          CmdHelper.isValidCommand(settings.needMention(msg), msg).map { args =>
            if (args == Nil) NoCmd(msg, c.current)
            else {
              settings
                .getPrefix(args, msg)
                .fold[RawCmdMessage](NoCmdPrefix(msg, args.head, args.tail, cache)) {
                  case (prefix, remaining) => RawCmd(msg, prefix, remaining.head, remaining.tail.toList, c.current)
                }
            }
          }
      }
      .mapConcat(_.toList)
      .toMat(BroadcastHub.sink(bufferSize = 256))(Keep.both)
      .addAttributes(ActorAttributes.supervisionStrategy(Supervision.resumingDecider))
      .run()
  }

} 
Example 15
Source File: SupervisionStreams.scala    From AckCord   with MIT License 5 votes vote down vote up
package ackcord.requests

import akka.actor.typed.ActorSystem
import akka.stream.javadsl.RunnableGraph
import akka.stream.scaladsl.{Flow, Sink, Source}
import akka.stream.{ActorAttributes, Attributes, Supervision}

object SupervisionStreams {

  def addLogAndContinueFunction[G](addAtributes: Attributes => G)(implicit system: ActorSystem[Nothing]): G =
    addAtributes(ActorAttributes.supervisionStrategy {
      case _: RetryFailedRequestException[_] => Supervision.Stop
      case e =>
        system.log.error("Unhandled exception in stream", e)
        Supervision.Resume
    })

  def logAndContinue[M](graph: RunnableGraph[M])(implicit system: ActorSystem[Nothing]): RunnableGraph[M] =
    addLogAndContinueFunction(graph.addAttributes)

  def logAndContinue[Out, Mat](source: Source[Out, Mat])(implicit system: ActorSystem[Nothing]): Source[Out, Mat] =
    addLogAndContinueFunction(source.addAttributes)

  def logAndContinue[In, Out, Mat](
      flow: Flow[In, Out, Mat]
  )(implicit system: ActorSystem[Nothing]): Flow[In, Out, Mat] =
    addLogAndContinueFunction(flow.addAttributes)

  def logAndContinue[In, Mat](sink: Sink[In, Mat])(implicit system: ActorSystem[Nothing]): Sink[In, Mat] =
    addLogAndContinueFunction(sink.addAttributes)
}