akka.actor.Status Scala Examples
The following examples show how to use akka.actor.Status.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: TelnetClientActor.scala From asura with MIT License | 5 votes |
package asura.dubbo.actor import java.net.InetSocketAddress import akka.actor.{ActorRef, Props, Status} import akka.io.{IO, Tcp} import akka.util.ByteString import asura.common.actor.BaseActor import asura.common.util.LogUtils import asura.dubbo.DubboConfig class TelnetClientActor(remote: InetSocketAddress, listener: ActorRef) extends BaseActor { import Tcp._ import context.system IO(Tcp) ! Connect(remote) override def receive: Receive = { case CommandFailed(_: Connect) => listener ! ByteString(s"${TelnetClientActor.MSG_CONNECT_TO} ${remote.getAddress.getHostAddress}:${remote.getPort} ${TelnetClientActor.MSG_FAIL}\r\n") context stop self case Connected(remote, local) => log.debug(s"local address: ${local}, remote address: ${remote}") listener ! ByteString(s"${TelnetClientActor.MSG_CONNECT_TO} ${remote.getAddress.getHostAddress}:${remote.getPort} ${TelnetClientActor.MSG_SUCCESS}\r\n") val remoteConnection = sender() remoteConnection ! Register(self) context.become { case data: ByteString => remoteConnection ! Write(data) case CommandFailed(_: Write) => listener ! ByteString("write failed\r\n") case Received(data) => listener ! data case TelnetClientActor.CMD_CLOSE => remoteConnection ! Close case _: ConnectionClosed => listener ! ByteString(s"connection to ${remote.getAddress.getHostAddress}:${remote.getPort} closed\r\n") context stop self } case Status.Failure(t) => val stackTrace = LogUtils.stackTraceToString(t) log.warning(stackTrace) listener ! t.getMessage context stop self } override def postStop(): Unit = log.debug(s"${self.path} stopped") } object TelnetClientActor { val CMD_CLOSE = "close" val MSG_CONNECT_TO = "connect to" val MSG_SUCCESS = "success" val MSG_FAIL = "fail" def props(remote: InetSocketAddress, replies: ActorRef) = { Props(new TelnetClientActor(remote, replies)) } def props(address: String, port: Int, replies: ActorRef) = { Props( new TelnetClientActor( new InetSocketAddress(address, if (port > 0) port else DubboConfig.DEFAULT_PORT), replies ) ) } }
Example 2
Source File: DistributedProcessingWorker.scala From aecor with MIT License | 5 votes |
package aecor.distributedprocessing import aecor.distributedprocessing.DistributedProcessing._ import aecor.distributedprocessing.DistributedProcessingWorker.KeepRunning import aecor.distributedprocessing.serialization.Message import cats.effect.syntax.effect._ import akka.actor.{ Actor, ActorLogging, Props, Status } import akka.pattern._ import cats.effect.Effect import cats.implicits._ private[aecor] object DistributedProcessingWorker { def props[F[_]: Effect](processWithId: Int => Process[F], processName: String): Props = Props(new DistributedProcessingWorker[F](processWithId, processName)) final case class KeepRunning(workerId: Int) extends Message } private[aecor] final class DistributedProcessingWorker[F[_]: Effect]( processFor: Int => Process[F], processName: String ) extends Actor with ActorLogging { import context.dispatcher case class ProcessStarted(process: RunningProcess[F]) case object ProcessTerminated var killSwitch: Option[F[Unit]] = None override def postStop: Unit = killSwitch.foreach(_.toIO.unsafeRunSync()) def receive: Receive = { case KeepRunning(workerId) => log.info("[{}] Starting process {}", workerId, processName) processFor(workerId).run .map(ProcessStarted) .toIO .unsafeToFuture() pipeTo self context.become { case ProcessStarted(RunningProcess(watchTermination, terminate)) => log.info("[{}] Process started {}", workerId, processName) killSwitch = Some(terminate) watchTermination.toIO.map(_ => ProcessTerminated).unsafeToFuture() pipeTo self context.become { case Status.Failure(e) => log.error(e, "Process failed {}", processName) throw e case ProcessTerminated => log.error("Process terminated {}", processName) throw new IllegalStateException(s"Process terminated $processName") } case Status.Failure(e) => log.error(e, "Process failed to start {}", processName) throw e case KeepRunning(_) => () } } }
Example 3
Source File: FutureRetryUtility.scala From NSDb with Apache License 2.0 | 5 votes |
package io.radicalbit.nsdb.util import akka.actor.{Actor, ActorRef, Scheduler, Status} import akka.event.LoggingAdapter import akka.pattern.after import scala.concurrent.duration.FiniteDuration import scala.concurrent.{ExecutionContext, Future} import scala.util.{Failure, Success} trait FutureRetryUtility { implicit class FutureRetry[T](f: => Future[T]) { def retry(delay: FiniteDuration, retries: Int)( wasSuccessful: T => Boolean)(implicit ec: ExecutionContext, s: Scheduler, log: LoggingAdapter): Future[T] = (for { a <- f result <- if (wasSuccessful(a) || retries < 1) Future(a) else { log.warning("{}. Retrying...", a); after(delay, s)(retry(delay, retries - 1)(wasSuccessful)) } } yield result) recoverWith { case t if retries > 0 => log.warning("{}. Retrying...", t); after(delay, s)(retry(delay, retries - 1)(wasSuccessful)) } } implicit class PipeToFutureRetry[T](f: => Future[T]) { def pipeTo(delay: FiniteDuration, retries: Int, recipient: ActorRef)(wasSuccessful: T => Boolean = _ => true)( implicit ec: ExecutionContext, s: Scheduler, log: LoggingAdapter, sender: ActorRef = Actor.noSender) = f.retry(delay, retries)(wasSuccessful) andThen { case Success(r) ⇒ recipient ! r case Failure(f) ⇒ recipient ! Status.Failure(f) } } }
Example 4
Source File: PipeableFutureWithSideEffect.scala From NSDb with Apache License 2.0 | 5 votes |
package io.radicalbit.nsdb.util import akka.actor.{Actor, ActorRef, Status} import scala.concurrent.{ExecutionContext, Future} import scala.util.{Failure, Success} final class PipeableFutureWithSideEffect[T](val future: Future[T])(implicit executionContext: ExecutionContext) { def pipeTo(recipient: ActorRef)(implicit sender: ActorRef = Actor.noSender): Future[T] = { future andThen { case Success(r) ⇒ recipient ! r case Failure(f) ⇒ recipient ! Status.Failure(f) } } def pipeToWithEffect(recipient: ActorRef)(effect: T => Unit)( implicit sender: ActorRef = Actor.noSender): Future[T] = { future andThen { case Success(r) ⇒ effect(r) recipient ! r case Failure(f) ⇒ recipient ! Status.Failure(f) } } } object PipeableFutureWithSideEffect { implicit def pipe[T](future: Future[T])( implicit executionContext: ExecutionContext): PipeableFutureWithSideEffect[T] = new PipeableFutureWithSideEffect(future) }
Example 5
Source File: FutureRetryUtilitySpec.scala From NSDb with Apache License 2.0 | 5 votes |
package io.radicalbit.nsdb.util import akka.actor.{ActorSystem, Scheduler, Status} import akka.event.{Logging, LoggingAdapter} import akka.testkit.{TestKit, TestProbe} import org.scalatest.{Matchers, WordSpecLike} import scala.collection.mutable import scala.concurrent.duration._ import scala.concurrent.{Await, Future} import scala.concurrent.ExecutionContext.Implicits.global class FutureRetryUtilitySpec extends TestKit(ActorSystem("MySpec")) with WordSpecLike with Matchers with FutureRetryUtility { implicit val schedule: Scheduler = system.scheduler implicit val logger: LoggingAdapter = Logging.getLogger(system, this) private final val delay: FiniteDuration = 2.seconds private final val retries: Int = 3 private def future(flag: Boolean) = if (flag) Future.successful(3) else Future.failed(new RuntimeException("Failure")) "retry function in FutureRetryUtility" must { "successfully returns whether, after retries, the future is eventually successful" in { Await.result(future(true).retry(delay, retries)(_ > 2), Duration.Inf) shouldBe 3 } "thrown an Exception whether, after retries, the future eventually returns an Exception" in { an[RuntimeException] shouldBe thrownBy(Await.result(future(false).retry(delay, retries)(_ => true), Duration.Inf)) } "consider the number of retries" in { val q = mutable.Queue(0) def future = { val nRetries = q.dequeue() if (nRetries < 2) { q.enqueue(nRetries + 1); Future.failed(new RuntimeException) } else { q.enqueue(nRetries + 1); Future.successful(nRetries) } } Await.result(future.retry(delay, retries)(_ > 2), Duration.Inf) shouldBe 3 } } "pipeTo function in FutureRetryUtility" must { "returns a successful future and send the content of it through pipe" in { val testProbe = TestProbe("actor-test") future(true).pipeTo(delay, retries, testProbe.testActor)() testProbe.expectMsg(3) } "return a failed future and send a status failure through pipe" in { val testProbe = TestProbe("actor-test") future(false).pipeTo(delay, retries, testProbe.testActor)() testProbe.expectMsgAllClassOf(classOf[Status.Failure]) } } }
Example 6
Source File: SequentialFutureProcessing.scala From NSDb with Apache License 2.0 | 5 votes |
package io.radicalbit.nsdb.cluster.actor import akka.actor.{Actor, ActorRef, Stash, Status} import io.radicalbit.nsdb.cluster.actor.SequentialFutureProcessing.{Continue, PipeableFutureWithContinue} import io.radicalbit.nsdb.common.protocol.NSDbSerializable import scala.concurrent.{ExecutionContext, Future} import scala.util.{Failure, Success} def pipeToWithEffect(recipient: ActorRef)(effect: T => Unit)( implicit sender: ActorRef = Actor.noSender): Future[T] = { future andThen { case Success(r) => effect(r) recipient ! r sender ! Continue case Failure(f) => recipient ! Status.Failure(f) sender ! Continue } future } } }
Example 7
Source File: GenericServiceInvokerActor.scala From asura with MIT License | 5 votes |
package asura.dubbo.actor import akka.actor.{Props, Status} import akka.pattern.{ask, pipe} import akka.util.Timeout import asura.common.actor.BaseActor import asura.common.util.LogUtils import asura.dubbo.actor.GenericServiceInvokerActor.{GetInterfaceMethodParams, GetInterfacesMessage, GetProvidersMessage} import asura.dubbo.{DubboConfig, GenericRequest} import scala.concurrent.{ExecutionContext, Future} class GenericServiceInvokerActor extends BaseActor { implicit val ec: ExecutionContext = context.dispatcher implicit val timeout: Timeout = DubboConfig.DEFAULT_ACTOR_ASK_TIMEOUT val curatorClientCacheActor = context.actorOf(CuratorClientCacheActor.props()) val referenceActor = context.actorOf(DubboReferenceCacheActor.props()) override def receive: Receive = { case msg: GetInterfacesMessage => curatorClientCacheActor ? msg pipeTo sender() case msg: GetProvidersMessage => curatorClientCacheActor ? msg pipeTo sender() case msg: GenericRequest => referenceActor ? msg pipeTo sender() case msg: GetInterfaceMethodParams => context.actorOf(InterfaceMethodParamsActor.props(sender(), msg)) case Status.Failure(t) => log.warning(LogUtils.stackTraceToString(t)) Future.failed(t) pipeTo sender() case _ => Future.failed(new RuntimeException("Unknown message type")) pipeTo sender() } override def postStop(): Unit = { DubboConfig.DUBBO_EC.shutdown() } } object GenericServiceInvokerActor { def props() = Props(new GenericServiceInvokerActor()) case class GetInterfacesMessage( zkConnectString: String, path: String, zkUsername: String = null, zkPassword: String = null, ) case class GetProvidersMessage( zkConnectString: String, path: String, ref: String, zkUsername: String = null, zkPassword: String = null, ) case class GetInterfaceMethodParams(address: String, port: Int, ref: String) }
Example 8
Source File: TelnetDubboProviderActor.scala From asura with MIT License | 5 votes |
package asura.dubbo.actor import akka.actor.{ActorRef, Props, Status, Terminated} import akka.util.ByteString import asura.common.actor.{BaseActor, ErrorActorEvent, NotifyActorEvent, SenderMessage} import asura.common.util.LogUtils class TelnetDubboProviderActor(address: String, port: Int) extends BaseActor { override def receive: Receive = { case SenderMessage(sender) => val providerActor = context.actorOf(TelnetClientActor.props(address, port, self)) context.watch(providerActor) context.become(handleRequest(sender, providerActor)) } def handleRequest(wsActor: ActorRef, providerActor: ActorRef): Receive = { case cmd: String => if (cmd == TelnetDubboProviderActor.CMD_EXIT || cmd == TelnetDubboProviderActor.CMD_QUIT) { providerActor ! ByteString(TelnetClientActor.CMD_CLOSE) wsActor ! NotifyActorEvent(TelnetDubboProviderActor.MSG_BYE) } else { providerActor ! ByteString(cmd) } case data: ByteString => wsActor ! NotifyActorEvent(data.utf8String) case Terminated(_) => wsActor ! Status.Success case Status.Failure(t) => val stackTrace = LogUtils.stackTraceToString(t) log.warning(stackTrace) wsActor ! ErrorActorEvent(t.getMessage) providerActor ! ByteString(TelnetClientActor.CMD_CLOSE) wsActor ! Status.Success } override def postStop(): Unit = { log.debug(s"${address}:${port} stopped") } } object TelnetDubboProviderActor { val CMD_QUIT = "quit" val CMD_EXIT = "exit" val MSG_BYE = "Bye!" def props(address: String, port: Int) = Props(new TelnetDubboProviderActor(address, port)) }
Example 9
Source File: InterfaceMethodParamsActor.scala From asura with MIT License | 5 votes |
package asura.dubbo.actor import akka.actor.{ActorRef, Props, Status} import akka.pattern.pipe import akka.util.ByteString import asura.common.actor.BaseActor import asura.common.util.LogUtils import asura.dubbo.DubboConfig import asura.dubbo.actor.GenericServiceInvokerActor.GetInterfaceMethodParams import asura.dubbo.model.InterfaceMethodParams import asura.dubbo.model.InterfaceMethodParams.MethodSignature import scala.collection.mutable.ArrayBuffer import scala.concurrent.{ExecutionContext, Future} class InterfaceMethodParamsActor(invoker: ActorRef, msg: GetInterfaceMethodParams) extends BaseActor { implicit val ec: ExecutionContext = context.dispatcher private val telnet: ActorRef = context.actorOf(TelnetClientActor.props(msg.address, if (msg.port > 0) msg.port else DubboConfig.DEFAULT_PORT, self)) override def receive: Receive = { case telnetData: ByteString => val utf8String = telnetData.utf8String if (utf8String.contains(TelnetClientActor.MSG_CONNECT_TO)) { log.debug(utf8String) if (utf8String.contains(TelnetClientActor.MSG_SUCCESS)) { telnet ! ByteString(s"ls -l ${msg.ref}\r\n") } else if (utf8String.contains(TelnetClientActor.MSG_FAIL)) { Future.failed(new RuntimeException(s"Remote connection to ${msg.address}:${msg.port} failed")) pipeTo invoker telnet ! TelnetClientActor.CMD_CLOSE context stop self } else { Future.failed(new RuntimeException(s"Unknown response ${utf8String}")) pipeTo invoker telnet ! TelnetClientActor.CMD_CLOSE context stop self } } else if (utf8String.contains("(") && utf8String.contains(")")) { getInterfaceMethodParams(msg.ref, utf8String) pipeTo invoker telnet ! TelnetClientActor.CMD_CLOSE } else { Future.failed(new RuntimeException(s"Unknown response: ${utf8String}")) pipeTo invoker telnet ! TelnetClientActor.CMD_CLOSE context stop self } case Status.Failure(t) => val stackTrace = LogUtils.stackTraceToString(t) log.warning(stackTrace) context stop self } def getInterfaceMethodParams(ref: String, content: String): Future[InterfaceMethodParams] = { Future.successful { val methods = ArrayBuffer[MethodSignature]() content.split("\r\n") .filter(!_.startsWith(DubboConfig.DEFAULT_PROMPT)) .map(signature => { val splits = signature.split(" ") if (splits.length == 2) { val ret = splits(0) val secondPart = splits(1) val idx = secondPart.indexOf("(") val method = secondPart.substring(0, idx) val params = secondPart.substring(idx + 1, secondPart.length - 1).split(",") methods += (MethodSignature(ret, method, params)) } }) InterfaceMethodParams(ref, methods) } } override def postStop(): Unit = log.debug(s"${self.path} stopped") } object InterfaceMethodParamsActor { def props(invoker: ActorRef, msg: GetInterfaceMethodParams) = { Props(new InterfaceMethodParamsActor(invoker, msg)) } }
Example 10
Source File: RetryActor.scala From seahorse with Apache License 2.0 | 5 votes |
package ai.deepsense.commons.utils import scala.concurrent.{ExecutionContext, Future} import scala.concurrent.duration.FiniteDuration import scala.util.{Failure, Success, Try} import akka.actor.{Actor, ActorRef, Status} class RetryActor[T]( retryInterval: FiniteDuration, retryCountLimit: Int, workCode: => Future[T], workDescription: Option[String]) extends Actor with Logging { import RetryActor._ private implicit val ec: ExecutionContext = context.system.dispatcher override def receive: Receive = { case Trigger => doWork(sender, 0) case Retry(initialSender, retryCount) => doWork(initialSender, retryCount) } val workDescriptionForLogs: String = workDescription.map(" " + _).getOrElse(" some work") private def doWork(initialSender: ActorRef, retryCount: Int): Unit = { workCode.onComplete { case Success(t) => initialSender ! t case Failure(RetriableException(msg, cause)) if retryCount < retryCountLimit => logFailure(msg, cause) logger.info(s"Will retry$workDescriptionForLogs in $retryInterval.") context.system.scheduler.scheduleOnce(retryInterval, self, Retry(initialSender, retryCount + 1)) case Failure(RetriableException(msg, cause)) if retryCount >= retryCountLimit => logFailure(msg, cause) val retryLimitReachedException = RetryLimitReachedException(s"Retry limit of $retryCountLimit reached, last error was $cause", cause) logger.error(s"Retry limit reached for$workDescriptionForLogs.", retryLimitReachedException) initialSender ! Status.Failure(retryLimitReachedException) case Failure(f) => logFailure(f.getMessage, Some(f)) logger.error(s"Unexpected exception when performing$workDescriptionForLogs.", f) initialSender ! Status.Failure(f) } } private def logFailure(msg: String, tOpt: Option[Throwable]): Unit = { val msgText = s"Exception when performing$workDescriptionForLogs. The message was: $msg" tOpt match { case Some(t) => logger.info(msgText, t) case None => logger.info(msgText) } } } object RetryActor { sealed trait Message case object Trigger extends Message case class Retry(initialSender: ActorRef, retryCount: Int) extends Message case class RetryLimitReachedException(msg: String, lastError: Option[Throwable]) extends Exception(msg) case class RetriableException(msg: String, cause: Option[Throwable]) extends Exception(msg, cause.orNull) }
Example 11
Source File: JobReportDataItemSaveActor.scala From asura with MIT License | 5 votes |
package asura.core.job.actor import akka.actor.{Props, Status} import asura.common.actor.BaseActor import asura.common.util.LogUtils import asura.core.actor.messages.Flush import asura.core.es.model.JobReportDataItem import asura.core.es.service.JobReportDataItemService import asura.core.job.actor.JobReportDataItemSaveActor.SaveReportDataHttpItemMessage import scala.collection.mutable.ArrayBuffer import scala.concurrent.duration._ class JobReportDataItemSaveActor(dayIndexSuffix: String) extends BaseActor { val messages = ArrayBuffer[SaveReportDataHttpItemMessage]() override def receive: Receive = { case m: SaveReportDataHttpItemMessage => messages += m if (messages.length >= 10) { insert() } context.system.scheduler.scheduleOnce(2 seconds) { self ! Flush }(context.system.dispatcher) case Flush => insert() case Status.Failure(t) => log.warning(LogUtils.stackTraceToString(t)) } override def preStart(): Unit = { } override def postStop(): Unit = { insert() log.debug(s"${self.path} is stopped") } private def insert(): Unit = { if (messages.length > 0) { log.debug(s"${messages.length} items is saving...") JobReportDataItemService.index(messages, dayIndexSuffix) messages.clear() } } } object JobReportDataItemSaveActor { def props(dayIndexSuffix: String) = Props(new JobReportDataItemSaveActor(dayIndexSuffix)) case class SaveReportDataHttpItemMessage(id: String, dataItem: JobReportDataItem) }
Example 12
Source File: JobManualActor.scala From asura with MIT License | 5 votes |
package asura.core.job.actor import akka.actor.{ActorRef, PoisonPill, Props, Status} import akka.pattern.pipe import asura.common.actor.{BaseActor, SenderMessage} import asura.common.util.{LogUtils, StringUtils} import asura.core.CoreConfig import asura.core.es.model.{Job, JobReport} import asura.core.es.service.{JobReportService, JobService} import asura.core.job.{JobCenter, JobExecDesc} class JobManualActor(jobId: String, user: String, out: ActorRef) extends BaseActor { implicit val executionContext = context.dispatcher if (null != out) self ! SenderMessage(out) override def receive: Receive = { case SenderMessage(sender) => context.become(handleRequest(sender)) self ! jobId } def handleRequest(wsActor: ActorRef): Receive = { case job: Job => val jobImplOpt = JobCenter.classAliasJobMap.get(job.classAlias) if (jobImplOpt.isEmpty) { wsActor ! s"Can't find job implementation of ${job.classAlias}" wsActor ! JobExecDesc.STATUS_FAIL wsActor ! Status.Success } else { val jobImpl = jobImplOpt.get val (isOk, errMsg) = jobImpl.checkJobData(job.jobData) if (isOk) { JobExecDesc.from(jobId, job, JobReport.TYPE_MANUAL, null, user).map(jobExecDesc => { jobImpl.doTestAsync(jobExecDesc, logMsg => { wsActor ! logMsg }).pipeTo(self) }).recover { case t: Throwable => self ! Status.Failure(t) } } else { wsActor ! errMsg wsActor ! Status.Success } } case jobId: String => if (StringUtils.isNotEmpty(jobId)) { JobService.getJobById(jobId).pipeTo(self) } else { wsActor ! s"jobId is empty." wsActor ! Status.Success } case execDesc: JobExecDesc => execDesc.prepareEnd() val report = execDesc.report JobReportService.indexReport(execDesc.reportId, report).map { _ => val reportUrl = s"view report: ${CoreConfig.reportBaseUrl}/${execDesc.reportId}" wsActor ! reportUrl wsActor ! execDesc.report.result wsActor ! Status.Success }.recover { case t: Throwable => self ! Status.Failure(t) } case Status.Failure(t) => val stackTrace = LogUtils.stackTraceToString(t) log.warning(stackTrace) wsActor ! t.getMessage wsActor ! JobExecDesc.STATUS_FAIL wsActor ! Status.Success } override def postStop(): Unit = { log.debug(s"${self.path} is stopped") } } object JobManualActor { def props(jobId: String, user: String, out: ActorRef = null) = Props(new JobManualActor(jobId, user, out)) }
Example 13
Source File: TriggerEventsSaveActor.scala From asura with MIT License | 5 votes |
package asura.core.es.actor import akka.actor.{Props, Status} import asura.common.actor.BaseActor import asura.common.util.LogUtils import asura.core.actor.messages.Flush import asura.core.es.model.TriggerEventLog import asura.core.es.service.TriggerEventLogService import scala.collection.mutable.ArrayBuffer import scala.concurrent.duration._ class TriggerEventsSaveActor extends BaseActor { val logs = ArrayBuffer[TriggerEventLog]() override def receive: Receive = { case m: TriggerEventLog => logs += m if (logs.length >= 20) { insert() } context.system.scheduler.scheduleOnce(2 seconds) { self ! Flush }(context.system.dispatcher) case Flush => insert() case Status.Failure(t) => log.warning(LogUtils.stackTraceToString(t)) } override def preStart(): Unit = { } override def postStop(): Unit = { insert() log.debug(s"${self.path} is stopped") } private def insert(): Unit = { if (logs.length > 0) { log.debug(s"${logs.length} trigger events is saving...") TriggerEventLogService.index(logs) logs.clear() } } } object TriggerEventsSaveActor { def props() = Props(new TriggerEventsSaveActor()) }
Example 14
Source File: ActivitySaveActor.scala From asura with MIT License | 5 votes |
package asura.core.es.actor import akka.actor.{Props, Status} import asura.common.actor.BaseActor import asura.common.util.LogUtils import asura.core.actor.messages.Flush import asura.core.es.model.Activity import asura.core.es.service.ActivityService import scala.collection.mutable.ArrayBuffer import scala.concurrent.duration._ class ActivitySaveActor extends BaseActor { val activities = ArrayBuffer[Activity]() override def receive: Receive = { case m: Activity => activities += m if (activities.length >= 20) { insert() } context.system.scheduler.scheduleOnce(2 seconds) { self ! Flush }(context.system.dispatcher) case Flush => insert() case Status.Failure(t) => log.warning(LogUtils.stackTraceToString(t)) } override def preStart(): Unit = { } override def postStop(): Unit = { insert() log.debug(s"${self.path} is stopped") } private def insert(): Unit = { if (activities.length > 0) { log.debug(s"${activities.length} activities is saving...") ActivityService.index(activities) activities.clear() } } } object ActivitySaveActor { def props() = Props(new ActivitySaveActor()) }
Example 15
Source File: OnlineAction.scala From marvin-engine-executor with Apache License 2.0 | 5 votes |
package org.marvin.executor.actions import akka.Done import akka.actor.SupervisorStrategy._ import akka.actor.{Actor, ActorLogging, ActorRef, OneForOneStrategy, Props, Status} import akka.pattern.{ask, pipe} import akka.util.Timeout import io.grpc.StatusRuntimeException import org.marvin.artifact.manager.ArtifactSaver import org.marvin.executor.actions.OnlineAction.{OnlineExecute, OnlineHealthCheck, OnlineReload} import org.marvin.executor.proxies.EngineProxy.{ExecuteOnline, HealthCheck, Reload} import org.marvin.executor.proxies.OnlineActionProxy import org.marvin.artifact.manager.ArtifactSaver.SaveToLocal import org.marvin.model.{EngineActionMetadata, EngineMetadata} import org.marvin.util.ProtocolUtil import scala.collection.mutable.ListBuffer import scala.concurrent.Future import scala.concurrent.duration._ import scala.util.{Failure, Success} object OnlineAction { case class OnlineExecute(message: String, params: String) case class OnlineReload(protocol: String) case class OnlineHealthCheck() } class OnlineAction(actionName: String, metadata: EngineMetadata) extends Actor with ActorLogging { var onlineActionProxy: ActorRef = _ var artifactSaver: ActorRef = _ var engineActionMetadata: EngineActionMetadata = _ var artifactsToLoad: String = _ implicit val ec = context.dispatcher override def preStart() = { engineActionMetadata = metadata.actionsMap(actionName) artifactsToLoad = engineActionMetadata.artifactsToLoad.mkString(",") onlineActionProxy = context.actorOf(Props(new OnlineActionProxy(engineActionMetadata)), name = "onlineActionProxy") artifactSaver = context.actorOf(ArtifactSaver.build(metadata), name = "artifactSaver") } override val supervisorStrategy = OneForOneStrategy(maxNrOfRetries = 10, withinTimeRange = metadata.onlineActionTimeout milliseconds) { case _: StatusRuntimeException => Restart case _: Exception => Escalate } override def receive = { case OnlineExecute(message, params) => implicit val futureTimeout = Timeout(metadata.onlineActionTimeout milliseconds) log.info(s"Starting to process execute to $actionName. Message: [$message] and params: [$params].") val originalSender = sender ask(onlineActionProxy, ExecuteOnline(message, params)) pipeTo originalSender case OnlineReload(protocol) => implicit val futureTimeout = Timeout(metadata.reloadTimeout milliseconds) log.info(s"Starting to process reload to $actionName. Protocol: [$protocol].") if(protocol == null || protocol.isEmpty){ onlineActionProxy forward Reload() }else{ val splitedProtocols = ProtocolUtil.splitProtocol(protocol, metadata) val futures:ListBuffer[Future[Any]] = ListBuffer[Future[Any]]() for(artifactName <- engineActionMetadata.artifactsToLoad) { futures += (artifactSaver ? SaveToLocal(artifactName, splitedProtocols(artifactName))) } val origSender = sender() Future.sequence(futures).onComplete{ case Success(_) => onlineActionProxy.ask(Reload(protocol)) pipeTo origSender case Failure(e) => { log.error(s"Failure to reload artifacts using protocol $protocol.") origSender ! Status.Failure(e) } } } case OnlineHealthCheck => implicit val futureTimeout = Timeout(metadata.healthCheckTimeout milliseconds) log.info(s"Starting to process health to $actionName.") val originalSender = sender ask(onlineActionProxy, HealthCheck) pipeTo originalSender case Done => log.info("Work Done!") case _ => log.warning(s"Not valid message !!") } }
Example 16
Source File: Checker.scala From cave with MIT License | 5 votes |
package worker import java.util.concurrent.Executor import akka.actor.{Actor, ActorLogging, Status} import akka.pattern.pipe import com.cave.metrics.data._ import com.cave.metrics.data.evaluator.{CheckEvaluator, DataFetcher} import init.Init import scala.concurrent.{ExecutionContext, Future} import scala.util.Try object Checker { type Result = Try[Boolean] case class Done(alarm: Result) case class Aborted(reason: String) } class Checker(check: Check) extends Actor with ActorLogging { implicit val exec = context.dispatcher.asInstanceOf[Executor with ExecutionContext] val evaluator = new CheckEvaluator(check) def fetcher = new DataFetcher(Init.influxClientFactory) this run check pipeTo self def receive = { case alarm: Checker.Result => context.parent ! Checker.Done(alarm) stop() case x: Status.Failure => context.parent ! Checker.Aborted(x.cause.getMessage) stop() } def stop(): Unit = { context stop self } private[worker] def run(check: Check)(implicit ec: ExecutionContext): Future[Try[Boolean]] = { val result = evaluator.evaluate(fetcher) result map { v => log.warning("Result of evaluation: " + v) } result } }
Example 17
Source File: RetryActor.scala From seahorse-workflow-executor with Apache License 2.0 | 5 votes |
package io.deepsense.commons.utils import scala.concurrent.{ExecutionContext, Future} import scala.concurrent.duration.FiniteDuration import scala.util.{Failure, Success, Try} import akka.actor.{Actor, ActorRef, Status} class RetryActor[T]( retryInterval: FiniteDuration, retryCountLimit: Int, workCode: => Future[T], workDescription: Option[String]) extends Actor with Logging { import RetryActor._ private implicit val ec: ExecutionContext = context.system.dispatcher override def receive: Receive = { case Trigger => doWork(sender, 0) case Retry(initialSender, retryCount) => doWork(initialSender, retryCount) } val workDescriptionForLogs: String = workDescription.map(" " + _).getOrElse(" some work") private def doWork(initialSender: ActorRef, retryCount: Int): Unit = { workCode.onComplete { case Success(t) => initialSender ! t case Failure(RetriableException(msg, cause)) if retryCount < retryCountLimit => logFailure(msg, cause) logger.info(s"Will retry$workDescriptionForLogs in $retryInterval.") context.system.scheduler.scheduleOnce(retryInterval, self, Retry(initialSender, retryCount + 1)) case Failure(RetriableException(msg, cause)) if retryCount >= retryCountLimit => logFailure(msg, cause) val retryLimitReachedException = RetryLimitReachedException(s"Retry limit of $retryCountLimit reached, last error was $cause", cause) logger.error(s"Retry limit reached for$workDescriptionForLogs.", retryLimitReachedException) initialSender ! Status.Failure(retryLimitReachedException) case Failure(f) => logFailure(f.getMessage, Some(f)) logger.error(s"Unexpected exception when performing$workDescriptionForLogs.", f) initialSender ! Status.Failure(f) } } private def logFailure(msg: String, tOpt: Option[Throwable]): Unit = { val msgText = s"Exception when performing$workDescriptionForLogs. The message was: $msg" tOpt match { case Some(t) => logger.info(msgText, t) case None => logger.info(msgText) } } } object RetryActor { sealed trait Message case object Trigger extends Message case class Retry(initialSender: ActorRef, retryCount: Int) extends Message case class RetryLimitReachedException(msg: String, lastError: Option[Throwable]) extends Exception(msg) case class RetriableException(msg: String, cause: Option[Throwable]) extends Exception(msg, cause.orNull) }
Example 18
Source File: AskActor.scala From matcher with MIT License | 5 votes |
package com.wavesplatform.dex.actors import akka.actor.{Actor, ActorRef, ActorSystem, Props, Status} import scala.concurrent.duration.FiniteDuration import scala.concurrent.{Future, Promise, TimeoutException} import scala.reflect.ClassTag class AskActor[T](p: Promise[T], timeout: FiniteDuration)(implicit ct: ClassTag[T]) extends Actor { import context.dispatcher private val timeoutCancelable = context.system.scheduler.scheduleOnce(timeout, self, AskActor.timeoutMessage) override val receive: Receive = { case x => // Fix in Scala 2.13 timeoutCancelable.cancel() context.stop(self) x match { case x: T if x.getClass == ct.runtimeClass => p.trySuccess(x) case e: Status.Failure => p.tryFailure(e.cause) case _ => p.tryFailure(new IllegalArgumentException(s"Expected ${ct.runtimeClass.getName}, but got $x")) } } } object AskActor { private val timeoutMessage = { val reason = new TimeoutException("Typed ask is timed out!") reason.setStackTrace(Array.empty) Status.Failure(reason) } def props[T](p: Promise[T], timeout: FiniteDuration)(implicit ct: ClassTag[T]) = Props(new AskActor(p, timeout)) def mk[T](timeout: FiniteDuration)(implicit ct: ClassTag[T], system: ActorSystem): (ActorRef, Future[T]) = { val p = Promise[T]() val ref = system.actorOf(props(p, timeout)) (ref, p.future) } }
Example 19
Source File: LeagueProjection.scala From eventsourcing-intro with Apache License 2.0 | 5 votes |
package eu.reactivesystems.league.impl import akka.actor.{Actor, ActorLogging, Props, Status} import akka.pattern.pipe import akka.persistence.cassandra.query.scaladsl.CassandraReadJournal import akka.persistence.query.{EventEnvelope2, PersistenceQuery} import akka.stream.ActorMaterializer import akka.stream.scaladsl._ import com.lightbend.lagom.scaladsl.persistence.jdbc.JdbcSession class LeagueProjection(jdbcSession: JdbcSession) extends Actor with ActorLogging { import DBOperations._ override def receive: Receive = { case Status.Failure(ex) => log.error(ex, "read side generation terminated") context.stop(self) } override def preStart(): Unit = { val materializer = ActorMaterializer.create(context.system) val readJournal = PersistenceQuery .get(context.system) .readJournalFor[CassandraReadJournal](CassandraReadJournal.Identifier) import context.dispatcher val result = getOffset(jdbcSession) .flatMap( offset => readJournal .eventsByTag(LeagueEvent.Tag.tag, offset) .mapAsync(1)(e => projectEvent(e)) .runWith(Sink.ignore)(materializer)) result pipeTo self () } private def projectEvent(event: EventEnvelope2) = event.event match { case ClubRegistered(club) => addClub(jdbcSession, event.offset, club) case GamePlayed(game) => addGame(jdbcSession, event.offset, game) case ResultRevoked(game) => revokeResult(jdbcSession, event.offset, game) } } object LeagueProjection { val readSideId = "leagueProjection" def props(jdbcSession: JdbcSession) = Props(new LeagueProjection(jdbcSession)) }
Example 20
Source File: BatchingClient.scala From eclair with Apache License 2.0 | 5 votes |
package fr.acinq.eclair.blockchain.bitcoind.rpc import akka.actor.{Actor, ActorLogging, ActorRef, Status} import akka.pattern.pipe import fr.acinq.eclair.blockchain.bitcoind.rpc.BatchingClient.Pending import scala.collection.immutable.Queue class BatchingClient(rpcClient: BasicBitcoinJsonRPCClient) extends Actor with ActorLogging { import scala.concurrent.ExecutionContext.Implicits.global override def receive: Receive = { case request: JsonRPCRequest => // immediately process isolated request process(queue = Queue(Pending(request, sender))) } def waiting(queue: Queue[Pending], processing: Seq[Pending]): Receive = { case request: JsonRPCRequest => // there is already a batch in flight, just add this request to the queue context become waiting(queue :+ Pending(request, sender), processing) case responses: Seq[JsonRPCResponse]@unchecked => log.debug("got {} responses", responses.size) // let's send back answers to the requestors require(responses.size == processing.size, s"responses=${responses.size} != processing=${processing.size}") responses.zip(processing).foreach { case (JsonRPCResponse(result, None, _), Pending(_, requestor)) => requestor ! result case (JsonRPCResponse(_, Some(error), _), Pending(_, requestor)) => requestor ! Status.Failure(JsonRPCError(error)) } process(queue) case [email protected](t) => log.error(t, s"got exception for batch of ${processing.size} requests") // let's fail all requests processing.foreach { case Pending(_, requestor) => requestor ! s } process(queue) } def process(queue: Queue[Pending]) = { // do we have queued requests? if (queue.isEmpty) { log.debug("no more requests, going back to idle") context become receive } else { val (batch, rest) = queue.splitAt(BatchingClient.BATCH_SIZE) log.debug(s"sending {} request(s): {} (queue={})", batch.size, batch.groupBy(_.request.method).map(e => e._1 + "=" + e._2.size).mkString(" "), queue.size) rpcClient.invoke(batch.map(_.request)) pipeTo self context become waiting(rest, batch) } } } object BatchingClient { val BATCH_SIZE = 50 case class Pending(request: JsonRPCRequest, requestor: ActorRef) }
Example 21
Source File: UnfoldPullerAsync.scala From akka-stream-extensions with Apache License 2.0 | 5 votes |
package com.mfglabs.stream.internals.source import akka.pattern.pipe import akka.actor.{Props, Status, ActorLogging} import akka.stream.actor.ActorPublisher import scala.concurrent.Future class UnfoldPullerAsync[A, B](zero: => B)(f: B => Future[(Option[A], Option[B])]) extends ActorPublisher[A] with ActorLogging { import akka.stream.actor.ActorPublisherMessage._ implicit val ec = context.dispatcher def receive = waitingForDownstreamReq(zero) case object Pull def waitingForDownstreamReq(s: B): Receive = { case Request(_) | Pull => if (totalDemand > 0 && isActive) { f(s).pipeTo(self) context.become(waitingForFut(s)) } case Cancel => context.stop(self) } def waitingForFut(s: B): Receive = { case (maybeA: Option[A], maybeB: Option[B]) => maybeA.foreach(onNext) maybeB match { case Some(b) => if (totalDemand > 0) self ! Pull context.become(waitingForDownstreamReq(b)) case None => onComplete() } case Request(_) | Pull => // ignoring until we receive the future response case Status.Failure(err) => context.become(waitingForDownstreamReq(s)) onError(err) case Cancel => context.stop(self) } } object UnfoldPullerAsync { def props[A, B](zero: => B)(f: B => Future[(Option[A], Option[B])]) = Props(new UnfoldPullerAsync[A, B](zero)(f)) }
Example 22
Source File: PipeToSupport.scala From perf_tester with Apache License 2.0 | 5 votes |
package akka.pattern import language.implicitConversions import scala.concurrent.{ Future, ExecutionContext } import scala.util.{ Failure, Success } import akka.actor.{ Status, ActorRef, Actor } import akka.actor.ActorSelection import java.util.concurrent.CompletionStage import java.util.function.BiConsumer trait PipeToSupport { final class PipeableFuture[T](val future: Future[T])(implicit executionContext: ExecutionContext) { def pipeTo(recipient: ActorRef)(implicit sender: ActorRef = Actor.noSender): Future[T] = { future andThen { case Success(r) ⇒ recipient ! r case Failure(f) ⇒ recipient ! Status.Failure(f) } } def pipeToSelection(recipient: ActorSelection)(implicit sender: ActorRef = Actor.noSender): Future[T] = { future andThen { case Success(r) ⇒ recipient ! r case Failure(f) ⇒ recipient ! Status.Failure(f) } } def to(recipient: ActorRef): PipeableFuture[T] = to(recipient, Actor.noSender) def to(recipient: ActorRef, sender: ActorRef): PipeableFuture[T] = { pipeTo(recipient)(sender) this } def to(recipient: ActorSelection): PipeableFuture[T] = to(recipient, Actor.noSender) def to(recipient: ActorSelection, sender: ActorRef): PipeableFuture[T] = { pipeToSelection(recipient)(sender) this } } final class PipeableCompletionStage[T](val future: CompletionStage[T])(implicit executionContext: ExecutionContext) { def pipeTo(recipient: ActorRef)(implicit sender: ActorRef = Actor.noSender): CompletionStage[T] = { future whenComplete new BiConsumer[T, Throwable] { override def accept(t: T, ex: Throwable) { if (t != null) recipient ! t if (ex != null) recipient ! Status.Failure(ex) } } } def pipeToSelection(recipient: ActorSelection)(implicit sender: ActorRef = Actor.noSender): CompletionStage[T] = { future whenComplete new BiConsumer[T, Throwable] { override def accept(t: T, ex: Throwable) { if (t != null) recipient ! t if (ex != null) recipient ! Status.Failure(ex) } } } def to(recipient: ActorRef): PipeableCompletionStage[T] = to(recipient, Actor.noSender) def to(recipient: ActorRef, sender: ActorRef): PipeableCompletionStage[T] = { pipeTo(recipient)(sender) this } def to(recipient: ActorSelection): PipeableCompletionStage[T] = to(recipient, Actor.noSender) def to(recipient: ActorSelection, sender: ActorRef): PipeableCompletionStage[T] = { pipeToSelection(recipient)(sender) this } } implicit def pipeCompletionStage[T](future: CompletionStage[T])(implicit executionContext: ExecutionContext): PipeableCompletionStage[T] = new PipeableCompletionStage(future) }
Example 23
Source File: LocalTransformServiceActor.scala From mleap with Apache License 2.0 | 5 votes |
package ml.combust.mleap.executor.service import akka.actor.{Actor, ActorRef, Props, Status, Terminated} import akka.stream.{ActorMaterializer, Materializer} import ml.combust.mleap.executor.repository.RepositoryBundleLoader import ml.combust.mleap.executor._ import ml.combust.mleap.executor.error.NotFoundException import scala.util.{Failure, Success, Try} object LocalTransformServiceActor { def props(loader: RepositoryBundleLoader, config: ExecutorConfig): Props = { Props(new LocalTransformServiceActor(loader, config)) } object Messages { case object Close } } class LocalTransformServiceActor(loader: RepositoryBundleLoader, config: ExecutorConfig) extends Actor { import LocalTransformServiceActor.Messages private implicit val materializer: Materializer = ActorMaterializer()(context.system) private var lookup: Map[String, ActorRef] = Map() private var modelNameLookup: Map[ActorRef, String] = Map() override def postStop(): Unit = { for (child <- context.children) { context.unwatch(child) context.stop(child) } } override def receive: Receive = { case request: TransformFrameRequest => handleModelRequest(request) case request: GetBundleMetaRequest => handleModelRequest(request) case request: GetModelRequest => handleModelRequest(request) case request: CreateFrameStreamRequest => handleModelRequest(request) case request: CreateRowStreamRequest => handleModelRequest(request) case request: GetRowStreamRequest => handleModelRequest(request) case request: CreateFrameFlowRequest => handleModelRequest(request) case request: GetFrameStreamRequest => handleModelRequest(request) case request: CreateRowFlowRequest => handleModelRequest(request) case request: UnloadModelRequest => handleModelRequest(request) case request: LoadModelRequest => loadModel(request) case Messages.Close => context.stop(self) case Terminated(actor) => terminated(actor) } def handleModelRequest(request: ModelRequest): Unit = { lookup.get(request.modelName) match { case Some(actor) => actor.tell(request, sender) case None => sender ! Status.Failure(new NotFoundException(s"no model with name ${request.modelName}")) } } def loadModel(request: LoadModelRequest): Unit = { Try(context.actorOf(BundleActor.props(request, loader, config), request.modelName)) match { case Success(actor) => lookup += (request.modelName -> actor) modelNameLookup += (actor -> request.modelName) context.watch(actor) actor.tell(request, sender) case Failure(err) => sender ! Status.Failure(err) } } private def terminated(ref: ActorRef): Unit = { val uri = modelNameLookup(ref) modelNameLookup -= ref lookup -= uri } }
Example 24
Source File: ActorStorageProvider.scala From eventuate with Apache License 2.0 | 5 votes |
package com.rbmhtechnology.eventuate.adapter.vertx import akka.actor.{ ActorSystem, Status } import akka.pattern.ask import akka.testkit.TestProbe import akka.util.Timeout import com.rbmhtechnology.eventuate.adapter.vertx.api.StorageProvider import scala.concurrent.duration._ import scala.concurrent.{ ExecutionContext, Future } class ActorStorageProvider(defaultId: String)(implicit system: ActorSystem) extends StorageProvider { implicit val timeout = Timeout(20.seconds) val probe = TestProbe() override def readProgress(id: String)(implicit executionContext: ExecutionContext): Future[Long] = probe.ref.ask(read(id)).mapTo[Long] override def writeProgress(id: String, sequenceNr: Long)(implicit executionContext: ExecutionContext): Future[Long] = probe.ref.ask(write(id, sequenceNr)).mapTo[Long] def expectRead(replySequenceNr: Long, id: String = defaultId): Unit = { probe.expectMsg(read(id)) probe.reply(replySequenceNr) } def expectWrite(sequenceNr: Long, id: String = defaultId): Unit = { probe.expectMsg(write(id, sequenceNr)) probe.reply(sequenceNr) } def expectWriteAndFail(sequenceNr: Long, failure: Throwable, id: String = defaultId): Unit = { probe.expectMsg(write(id, sequenceNr)) probe.reply(Status.Failure(failure)) } def expectWriteAnyOf(sequenceNrs: Seq[Long], id: String = defaultId): Unit = { probe.expectMsgAnyOf(sequenceNrs.map(write(id, _)): _*) probe.reply(sequenceNrs.max) } def expectNoMsg(duration: FiniteDuration): Unit = { probe.expectNoMsg(duration) } private def read(id: String): String = s"read[$id]" private def write(id: String, sequenceNr: Long): String = s"write[$id]-$sequenceNr" }
Example 25
Source File: Component.scala From wookiee with Apache License 2.0 | 5 votes |
package com.webtrends.harness.component import akka.actor.{ActorRef, Status} import akka.pattern.ask import akka.util.Timeout import com.webtrends.harness.HarnessConstants import com.webtrends.harness.app.HActor import com.webtrends.harness.app.HarnessActor.{ConfigChange, PrepareForShutdown, SystemReady} import scala.concurrent.duration._ import scala.util.{Failure, Success} sealed class ComponentMessages() case class StartComponent() extends ComponentMessages case class StopComponent() extends ComponentMessages case class ComponentRequest[T](msg:T, name:Option[String]=None, timeout:Timeout=5 seconds) extends ComponentMessages case class ComponentMessage[T](msg:T, name:Option[String]=None) extends ComponentMessages case class ComponentResponse[T](resp:T) def prepareForShutdown() = {} } object Component { def getActorPath() : String = { s"${HarnessConstants.ComponentName}/" } }
Example 26
Source File: HealthCheckActor.scala From wookiee with Apache License 2.0 | 5 votes |
package com.webtrends.harness.health import akka.actor.{Props, Status} import com.webtrends.harness.HarnessConstants import com.webtrends.harness.app.HActor import com.webtrends.harness.utils.ConfigUtil import scala.util.{Failure, Success} object HealthCheckActor { def props: Props = Props[HealthCheckActor] // These objects will be temporary enough, favoring time complexity concerns over space concerns protected[health] def collectHealthStates(health: ApplicationHealth): collection.mutable.Map[Seq[String], ComponentState.ComponentState] = { val checks = collection.mutable.Map.empty[Seq[String], ComponentState.ComponentState] def drillDown(parentPath: Seq[String], check: HealthComponent): Unit = { checks.+=((parentPath :+ check.name, check.state)) check.components.foreach(c => drillDown(parentPath :+ check.name, c)) } checks.+=((Seq(health.applicationName), health.state)) health.components.foreach(c => drillDown(Seq(health.applicationName), c)) checks } protected[health] def healthChecksDiffer(previous: ApplicationHealth, current: ApplicationHealth): Boolean = { val previousStates = collectHealthStates(previous) var foundDiff = false def drillDown(parentPath: Seq[String], check: HealthComponent): Unit = if (!foundDiff) { val previous = previousStates.get(parentPath :+ check.name) if (!previous.contains(check.state)) foundDiff = true else check.components.foreach(c => drillDown(parentPath :+ check.name, c)) } current.components.foreach(c => drillDown(Seq(current.applicationName), c)) previous.state != current.state || foundDiff } } class HealthCheckActor extends HActor with HealthCheckProvider { private var previousCheck: Option[ApplicationHealth] = None override def preStart() { log.info("Health Manager started: {}", context.self.path) } override def postStop(): Unit = { log.info("Health Manager stopped: {}", context.self.path) } override def receive = health orElse { case HealthRequest(typ) => val caller = sender log.debug("Fetching the system health") import context.dispatcher runChecks onComplete { case Success(s) => comparePreviousCheck(s) val res = typ match { case HealthResponseType.NAGIOS => "%s|%s".format(s.state.toString.toUpperCase, s.details) case HealthResponseType.LB => if (s.state == ComponentState.CRITICAL) "DOWN" else "UP" case _ => s } caller ! res case Failure(f) => caller ! Status.Failure(f) } } private def comparePreviousCheck(health: ApplicationHealth): Unit = if (ConfigUtil.getDefaultValue(HarnessConstants.LogHealthCheckDiffs, config.getBoolean, false)) { previousCheck match { case Some(c) => if (HealthCheckActor.healthChecksDiffer(c, health)) log.info(s"Health check status changed. Old: ${c.toJson()} New: ${health.toJson()}") case None => // Not much use checking against nothing } previousCheck = Some(health) } }
Example 27
Source File: MessageDispatcherActor.scala From reliable-http-client with Apache License 2.0 | 5 votes |
package rhttpc.client.subscription import akka.actor.{Actor, ActorLogging, ActorRef, Status} import rhttpc.client.protocol.{Correlated, Exchange} import scala.util.{Failure, Success} private[subscription] class MessageDispatcherActor extends Actor with ActorLogging { private var promisesOnPending: Map[SubscriptionOnResponse, Option[PendingMessage]] = Map.empty private var subscriptions: Map[SubscriptionOnResponse, ActorRef] = Map.empty override def receive: Actor.Receive = { case RegisterSubscriptionPromise(sub) => log.debug(s"Registering subscription promise: $sub") promisesOnPending += sub -> None case ConfirmOrRegisterSubscription(sub, consumer) => promisesOnPending.get(sub).foreach { pending => if (pending.nonEmpty) { log.debug(s"Confirming subscription: $sub. Sending outstanding messages: ${pending.size}.") pending.foreach { pending => consumer.tell(MessageFromSubscription(pending.msg, sub), pending.sender) } } else { log.debug(s"Confirming subscription: $sub") } promisesOnPending -= sub } subscriptions += sub -> consumer case AbortSubscription(sub) => promisesOnPending.get(sub) match { case Some(pending) if pending.isEmpty => log.debug(s"Aborted subscription: $sub.") promisesOnPending -= sub case Some(pending) => log.error(s"Aborted subscription: $sub. There were pending messages: ${pending.size}.") promisesOnPending -= sub case None => log.warning(s"Confirmed subscription promise: $sub was missing") } case Correlated(msg: Exchange[_, _], correlationId) => val sub = SubscriptionOnResponse(correlationId) val underlyingOrFailure = msg.tryResponse match { case Success(underlying) => underlying case Failure(ex) => Status.Failure(ex) } (subscriptions.get(sub), promisesOnPending.get(sub)) match { case (Some(consumer), optionalPending) => optionalPending.foreach { pending => log.error(s"There were both registered subscription and subscription promise with pending messages: ${pending.size}.") } log.debug(s"Consuming message: $correlationId") subscriptions -= sub consumer forward MessageFromSubscription(underlyingOrFailure, sub) // consumer should ack case (None, Some(None)) => log.debug(s"Adding pending message: $correlationId") promisesOnPending = promisesOnPending.updated(sub, Some(PendingMessage(underlyingOrFailure))) case (None, Some(Some(pending))) => log.error(s"There already was pending message: $pending for subscription. Overriding it.") pending.ack() promisesOnPending = promisesOnPending.updated(sub, Some(PendingMessage(underlyingOrFailure))) case (None, None) => log.error(s"No subscription (promise) registered for $correlationId. Will be skipped.") // TODO: DLQ sender() ! Unit // ack } } class PendingMessage private (val msg: Any, val sender: ActorRef) { def ack() = sender ! Unit } object PendingMessage { def apply(msg: Any): PendingMessage = new PendingMessage(msg, sender()) } } private[subscription] case class RegisterSubscriptionPromise(sub: SubscriptionOnResponse) private[subscription] case class ConfirmOrRegisterSubscription(sub: SubscriptionOnResponse, consumer: ActorRef) private[subscription] case class AbortSubscription(sub: SubscriptionOnResponse)
Example 28
Source File: PromiseSubscriptionCommandsListener.scala From reliable-http-client with Apache License 2.0 | 5 votes |
package rhttpc.client.subscription import akka.actor.{Actor, Props, Status} import scala.concurrent.Promise private class PromiseSubscriptionCommandsListener(pubPromise: ReplyFuture, replyPromise: Promise[Any]) (subscriptionManager: SubscriptionManager) extends PublicationListener { import context.dispatcher override def subscriptionPromiseRegistered(sub: SubscriptionOnResponse): Unit = {} override def receive: Actor.Receive = { case RequestPublished(sub) => subscriptionManager.confirmOrRegister(sub, self) context.become(waitForMessage) case RequestAborted(sub, cause) => replyPromise.failure(cause) context.stop(self) } private val waitForMessage: Receive = { case MessageFromSubscription(Status.Failure(ex), sub) => replyPromise.failure(ex) context.stop(self) case MessageFromSubscription(msg, sub) => replyPromise.success(msg) context.stop(self) } pubPromise.pipeTo(this) } private[subscription] object PromiseSubscriptionCommandsListener { def props(pubPromise: ReplyFuture, replyPromise: Promise[Any]) (subscriptionManager: SubscriptionManager): Props = Props(new PromiseSubscriptionCommandsListener(pubPromise, replyPromise)(subscriptionManager)) }
Example 29
Source File: InMemTransportSpec.scala From reliable-http-client with Apache License 2.0 | 5 votes |
package rhttpc.transport.inmem import akka.actor.{ActorSystem, Status} import akka.testkit.{TestKit, TestProbe} import org.scalatest._ import rhttpc.transport.PubSubTransport import scala.concurrent.Await import scala.concurrent.duration._ class InMemTransportSpec extends TestKit(ActorSystem("InMemTransportSpec")) with fixture.FlatSpecLike with BeforeAndAfterAll { import rhttpc.transport.dumb._ val someQueueName = "fooQueue" val someMessage = "fooMessage" val someMessage2 = "fooMessage2" it should "delivery message to consumer subscribed before publishing" in { transport => val probe = TestProbe() val subscriber = transport.subscriber[String](someQueueName, probe.testActor) subscriber.start() val publisher = transport.publisher[String](someQueueName) publisher.publish(someMessage) probe.expectMsg(someMessage) probe.reply(Unit) } it should "delivery message to consumer subscribed after publishing" in { transport => val probe = TestProbe() val publisher = transport.publisher[String](someQueueName) val subscriber = transport.subscriber[String](someQueueName, probe.testActor) subscriber.start() publisher.publish(someMessage) probe.expectMsg(someMessage) probe.reply(Unit) } it should "delivery message to consumer started after publishing" in { transport => val probe = TestProbe() val publisher = transport.publisher[String](someQueueName) val subscriber = transport.subscriber[String](someQueueName, probe.testActor) publisher.publish(someMessage) subscriber.start() probe.expectMsg(someMessage) probe.reply(Unit) } it should "delivery message to multiple consumers" in { transport => val probe1 = TestProbe() val subscriber = transport.subscriber[String](someQueueName, probe1.testActor) subscriber.start() val probe2 = TestProbe() val subscriber2 = transport.subscriber[String](someQueueName, probe2.testActor) subscriber2.start() val publisher = transport.publisher[String](someQueueName) publisher.publish(someMessage) publisher.publish(someMessage2) probe1.expectMsg(someMessage) probe1.reply(Unit) probe2.expectMsg(someMessage2) probe2.reply(Unit) } it should "retry message if failure" in { transport => val probe = TestProbe() val subscriber = transport.subscriber[String](someQueueName, probe.testActor) subscriber.start() val publisher = transport.publisher[String](someQueueName) publisher.publish(someMessage) probe.expectMsg(someMessage) probe.reply(Status.Failure(new Exception("failure"))) probe.expectMsg(someMessage) probe.reply(Unit) } override type FixtureParam = PubSubTransport override protected def withFixture(test: OneArgTest): Outcome = { val transport = InMemTransport(retryDelay = 0.seconds) try { test(transport) } finally { Await.result(transport.stop(), InMemDefaults.stopTimeout) } } override protected def afterAll(): Unit = { shutdown() } }
Example 30
Source File: TransportActor.scala From reliable-http-client with Apache License 2.0 | 5 votes |
package rhttpc.transport.inmem import akka.actor.{Actor, Props, Status} import scala.util.control.NonFatal private class TransportActor(queueActorProps: => Props) extends Actor { override def receive: Receive = { case GetOrCreateQueue(name) => try { val ref = context.child(name).getOrElse(context.actorOf(queueActorProps, name)) sender() ! ref } catch { case NonFatal(ex) => sender() ! Status.Failure(ex) } } } object TransportActor { def props(queueActorProps: => Props): Props = Props(new TransportActor(queueActorProps)) } private[inmem] case class GetOrCreateQueue(name: String)
Example 31
Source File: DynamoActor.scala From scala-spark-cab-rides-predictions with MIT License | 5 votes |
package actors import akka.actor.{Actor, ActorLogging, Status} import com.amazonaws.services.dynamodbv2.model.BatchWriteItemResult import dynamodb.{CabImpl, WeatherImp} import models.{CabPriceBatch, WeatherBatch} import scala.concurrent.Future import scala.util.{Failure, Success} def putCabPrices(cabPriceBatch: CabPriceBatch): Unit = { val cabPrices = cabPriceBatch.cabPrices.toSeq log.info("received " + cabPrices.size + " number of cab price records") val result: Future[Seq[BatchWriteItemResult]] = CabImpl.put(cabPrices) result onComplete { case Success(_) => log.info("Cab Prices Batch processed on DynamoDB") case Failure(exception) => log.error("error process Cab Prices batch on dynamoDB :" + exception.getStackTrace) } } }
Example 32
Source File: WsConnection.scala From matcher with MIT License | 5 votes |
package com.wavesplatform.dex.load.ws import akka.Done import akka.actor.{ActorRef, ActorSystem, Status} import akka.http.scaladsl.Http import akka.http.scaladsl.model.ws.{BinaryMessage, Message, TextMessage, WebSocketRequest} import akka.stream.scaladsl.{Flow, Sink, Source} import akka.stream.{CompletionStrategy, Materializer, OverflowStrategy} import com.wavesplatform.dex.api.ws.connection.TestWsHandlerActor import com.wavesplatform.dex.api.ws.protocol.{WsClientMessage, WsMessage, WsServerMessage} import com.wavesplatform.dex.domain.utils.ScorexLogging import play.api.libs.json.Json import scala.concurrent.Future import scala.concurrent.duration.DurationInt import scala.util.{Failure, Success, Try} class WsConnection(uri: String, receive: WsServerMessage => Option[WsClientMessage])(implicit system: ActorSystem) extends ScorexLogging { import system.dispatcher private implicit val materializer = Materializer(system) private val wsHandlerRef = system.actorOf(TestWsHandlerActor.props(keepAlive = true)) log.info(s"Connecting to Matcher WS API: $uri") protected def stringifyClientMessage(cm: WsClientMessage): TextMessage.Strict = WsMessage.toStrictTextMessage(cm)(WsClientMessage.wsClientMessageWrites) // To server private val source: Source[TextMessage.Strict, ActorRef] = { val completionMatcher: PartialFunction[Any, CompletionStrategy] = { case akka.actor.Status.Success(_) => CompletionStrategy.draining } val failureMatcher: PartialFunction[Any, Throwable] = { case Status.Failure(cause) => cause } Source .actorRef[WsClientMessage](completionMatcher, failureMatcher, 10, OverflowStrategy.fail) .map(stringifyClientMessage) .mapMaterializedValue { source => wsHandlerRef.tell(TestWsHandlerActor.AssignSourceRef, source) source } } // To client private val sink: Sink[Message, Future[Done]] = Sink.foreach { case tm: TextMessage => // TODO move to tests for { strictText <- tm.toStrict(1.second).map(_.getStrictText) clientMessage <- { log.trace(s"Got $strictText") Try { Json.parse(strictText).as[WsServerMessage] } match { case Failure(exception) => Future.failed(exception) case Success(x) => Future.successful { receive(x).foreach(wsHandlerRef ! _) } } } } yield clientMessage case bm: BinaryMessage => bm.dataStream.runWith(Sink.ignore) Future.failed { new IllegalArgumentException("Binary messages are not supported") } } private val flow: Flow[Message, TextMessage.Strict, Future[Done]] = Flow.fromSinkAndSourceCoupled(sink, source).watchTermination() { case (_, f) => f.onComplete { case Success(_) => log.info(s"WebSocket connection to $uri successfully closed") case Failure(e) => log.error(s"WebSocket connection to $uri closed with an error", e) }(materializer.executionContext) f } val (connectionResponse, closed) = Http().singleWebSocketRequest(WebSocketRequest(uri), flow) def send(message: WsClientMessage): Unit = wsHandlerRef ! TestWsHandlerActor.SendToServer(message) def isClosed: Boolean = closed.isCompleted def close(): Future[Done] = { if (!isClosed) wsHandlerRef ! TestWsHandlerActor.CloseConnection closed } }
Example 33
Source File: WsConnection.scala From matcher with MIT License | 5 votes |
package com.wavesplatform.dex.api.ws.connection import java.util.concurrent.ConcurrentLinkedQueue import akka.Done import akka.actor.{ActorRef, ActorSystem, Status} import akka.http.scaladsl.Http import akka.http.scaladsl.model.ws.{BinaryMessage, Message, TextMessage, WebSocketRequest} import akka.stream.scaladsl.{Flow, Sink, Source} import akka.stream.{CompletionStrategy, Materializer, OverflowStrategy} import com.wavesplatform.dex.api.ws.protocol.{WsClientMessage, WsMessage, WsPingOrPong, WsServerMessage} import com.wavesplatform.dex.domain.utils.ScorexLogging import play.api.libs.json.Json import scala.collection.JavaConverters._ import scala.concurrent.Future import scala.concurrent.duration._ import scala.util.{Failure, Success, Try} class WsConnection(uri: String, keepAlive: Boolean = true)(implicit system: ActorSystem, materializer: Materializer) extends ScorexLogging { log.info(s"""Connecting to Matcher WS API: | URI = $uri | Keep alive = $keepAlive""".stripMargin) import materializer.executionContext private val wsHandlerRef = system.actorOf(TestWsHandlerActor props keepAlive) protected def stringifyClientMessage(cm: WsClientMessage): TextMessage.Strict = WsMessage.toStrictTextMessage(cm)(WsClientMessage.wsClientMessageWrites) // From test to server private val source: Source[TextMessage.Strict, ActorRef] = { val completionMatcher: PartialFunction[Any, CompletionStrategy] = { case akka.actor.Status.Success(_) => CompletionStrategy.draining } val failureMatcher: PartialFunction[Any, Throwable] = { case Status.Failure(cause) => cause } Source .actorRef[WsClientMessage](completionMatcher, failureMatcher, 10, OverflowStrategy.fail) .map(stringifyClientMessage) .mapMaterializedValue { source => wsHandlerRef.tell(TestWsHandlerActor.AssignSourceRef, source) source } } private val messagesBuffer: ConcurrentLinkedQueue[WsServerMessage] = new ConcurrentLinkedQueue[WsServerMessage]() // From server to test private val sink: Sink[Message, Future[Done]] = Sink.foreach { case tm: TextMessage => for { strictText <- tm.toStrict(1.second).map(_.getStrictText) clientMessage <- { log.trace(s"Got $strictText") Try { Json.parse(strictText).as[WsServerMessage] } match { case Failure(exception) => Future.failed(exception) case Success(x) => { messagesBuffer.add(x) if (keepAlive) x match { case value: WsPingOrPong => wsHandlerRef ! value case _ => } Future.successful(x) } } } } yield clientMessage case bm: BinaryMessage => bm.dataStream.runWith(Sink.ignore) Future.failed { new IllegalArgumentException("Binary messages are not supported") } } private val flow: Flow[Message, TextMessage.Strict, Future[Done]] = Flow.fromSinkAndSourceCoupled(sink, source).watchTermination() { case (_, f) => f.onComplete { case Success(_) => log.info(s"WebSocket connection to $uri successfully closed") case Failure(e) => log.error(s"WebSocket connection to $uri closed with an error", e) }(materializer.executionContext) f } val (connectionResponse, closed) = Http().singleWebSocketRequest(WebSocketRequest(uri), flow) val connectionOpenedTs: Long = System.currentTimeMillis val connectionClosedTs: Future[Long] = closed.map(_ => System.currentTimeMillis) val connectionLifetime: Future[FiniteDuration] = connectionClosedTs.map(cc => FiniteDuration(cc - connectionOpenedTs, MILLISECONDS)) def messages: List[WsServerMessage] = messagesBuffer.iterator().asScala.toList def clearMessages(): Unit = messagesBuffer.clear() def send(message: WsClientMessage): Unit = wsHandlerRef ! TestWsHandlerActor.SendToServer(message) def close(): Unit = if (!isClosed) wsHandlerRef ! TestWsHandlerActor.CloseConnection def isClosed: Boolean = closed.isCompleted }