akka.actor.SupervisorStrategy.Stop Scala Examples
The following examples show how to use akka.actor.SupervisorStrategy.Stop.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: IngestionHandlerGateway.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.ingest.services import akka.actor.SupervisorStrategy.Stop import akka.actor.{OneForOneStrategy, _} import akka.util.Timeout import hydra.core.protocol.{InitiateHttpRequest, InitiateRequest} import scala.concurrent.duration._ class IngestionHandlerGateway(registryPath: String) extends Actor with ActorLogging { private lazy val registry = context.actorSelection(registryPath).resolveOne()(Timeout(10.seconds)) private implicit val ec = context.dispatcher override def receive = { case InitiateRequest(request, timeout, requestorOpt) => val requestor = requestorOpt getOrElse sender ingest( registryRef => DefaultIngestionHandler .props(request, registryRef, requestor, timeout), requestor ) case InitiateHttpRequest(request, timeout, ctx) => val requestor = sender ingest( registryRef => HttpIngestionHandler.props(request, timeout, ctx, registryRef), requestor ) } private def ingest(props: ActorRef => Props, requestor: ActorRef) = { registry .map(registryRef => context.actorOf(props(registryRef))) .recover { case e: Exception => requestor ! e } } override val supervisorStrategy = OneForOneStrategy() { case _ => Stop //stop ingestion IngestionRequestHandler always } } object IngestionHandlerGateway { val GroupName = "ingestion-handlers" def props(registryPath: String) = Props(classOf[IngestionHandlerGateway], registryPath) }
Example 2
Source File: IngestionHandler.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.ingest.services import akka.actor.SupervisorStrategy.Stop import akka.actor.{Actor, ActorRef, OneForOneStrategy, ReceiveTimeout} import akka.http.scaladsl.model.{StatusCode, StatusCodes} import hydra.core.ingest.{HydraRequest, IngestionReport, RequestParams} import hydra.ingest.services.IngestorRegistry.{ FindAll, FindByName, LookupResult } import scala.concurrent.duration.FiniteDuration trait IngestionHandler { this: Actor => def timeout: FiniteDuration def request: HydraRequest //we require an actorref here for performance reasons def registry: ActorRef private val targetIngestor = request.metadataValue(RequestParams.HYDRA_INGESTOR_PARAM) targetIngestor match { case Some(ingestor) => registry ! FindByName(ingestor) case None => registry ! FindAll } override def receive: Receive = { case LookupResult(Nil) => val errorCode = targetIngestor .map(i => StatusCodes .custom(404, s"No ingestor named $i was found in the registry.") ) .getOrElse(StatusCodes.BadRequest) complete(errorWith(errorCode)) case LookupResult(ingestors) => context.actorOf( IngestionSupervisor.props(request, self, ingestors, timeout) ) case report: IngestionReport => complete(report) } override val supervisorStrategy = OneForOneStrategy() { case e: Exception => fail(e) Stop } private def errorWith(statusCode: StatusCode) = { IngestionReport(request.correlationId, Map.empty, statusCode.intValue()) } def complete(report: IngestionReport) def fail(e: Throwable) }
Example 3
Source File: SyncDaemon.scala From estuary with Apache License 2.0 | 5 votes |
package com.neighborhood.aka.laplace.estuary.core.akkaUtil import akka.actor.SupervisorStrategy.{Restart, Stop} import akka.actor.{Actor, ActorLogging, ActorRef, InvalidActorNameException, OneForOneStrategy, Props} import com.neighborhood.aka.laplace.estuary.bean.exception.other.WorkerInitialFailureException import com.neighborhood.aka.laplace.estuary.core.akkaUtil.SyncDaemonCommand._ import com.neighborhood.aka.laplace.estuary.core.task.Mysql2MysqlSyncTask import scala.util.Try private def startNewTask(prop: Props, name: String): (ActorRef, String) = { context.child(name).fold { val (actor, result) = (context.actorOf(prop, name), s"syncTaskId:$name create success") actor ! ExternalStartCommand //发送启动命令 log.info(s"start task,id:$name,time:${System.currentTimeMillis}") (actor, result) } { actorRef => log.warning(s"syncTaskId:$name has already exists"); (actorRef, s"syncTaskId:$name has already exists") } } override def supervisorStrategy = { OneForOneStrategy() { case InvalidActorNameException(_) => Stop case _: WorkerInitialFailureException => Stop //出现这个异常,说明启动信息和实际加载的类不匹配,应该被停止 case _ => Restart } } } object SyncDaemon { def props = Props(new SyncDaemon) }
Example 4
Source File: Worker.scala From akka-iot-mqtt-v2 with GNU Lesser General Public License v3.0 | 5 votes |
package akkaiot import java.util.UUID import scala.concurrent.duration._ import akka.actor.{ Props, ActorRef, Actor, ActorLogging, ReceiveTimeout, Terminated } import akka.cluster.client.ClusterClient.SendToAll import akka.actor.OneForOneStrategy import akka.actor.SupervisorStrategy.Stop import akka.actor.SupervisorStrategy.Restart import akka.actor.ActorInitializationException import akka.actor.DeathPactException object Worker { def props(clusterClient: ActorRef, workProcessorProps: Props, registerInterval: FiniteDuration = 10.seconds): Props = Props(classOf[Worker], clusterClient, workProcessorProps, registerInterval) case class WorkProcessed(result: WorkResult) } class Worker(clusterClient: ActorRef, workProcessorProps: Props, registerInterval: FiniteDuration) extends Actor with ActorLogging { import Worker._ import MasterWorkerProtocol._ val workerId = UUID.randomUUID().toString import context.dispatcher val registerTask = context.system.scheduler.schedule(0.seconds, registerInterval, clusterClient, SendToAll("/user/master/singleton", RegisterWorker(workerId))) val workProcessor = context.watch(context.actorOf(workProcessorProps, "work-processor")) var currentWorkId: Option[String] = None def workId: String = currentWorkId match { case Some(workId) => workId case None => throw new IllegalStateException("Not working") } override def supervisorStrategy = OneForOneStrategy() { case _: ActorInitializationException => Stop case _: DeathPactException => Stop case _: Exception => currentWorkId foreach { workId => sendToMaster(WorkFailed(workerId, workId)) } context.become(idle) Restart } override def postStop(): Unit = registerTask.cancel() def receive = idle def idle: Receive = { case WorkIsReady => sendToMaster(WorkerRequestsWork(workerId)) case work @ Work(workId, deviceType, deviceId, state, setting) => log.info("Worker -> Received work request from {}-{} | State {} | Setting {}", deviceType, deviceId, state, setting) currentWorkId = Some(workId) workProcessor ! work context.become(working) } def working: Receive = { case WorkProcessed(result: WorkResult) => log.info("Worker -> Processed work: {}-{} | Work Id {}", result.deviceType, result.deviceId, workId) sendToMaster(WorkIsDone(workerId, workId, result)) context.setReceiveTimeout(5.seconds) context.become(waitForWorkIsDoneAck(result)) case work: Work => log.info("Worker -> ALERT: Worker Id {} NOT AVAILABLE for Work Id {}", workerId, work.workId) } def waitForWorkIsDoneAck(result: WorkResult): Receive = { case Ack(id) if id == workId => sendToMaster(WorkerRequestsWork(workerId)) context.setReceiveTimeout(Duration.Undefined) context.become(idle) case ReceiveTimeout => log.info("Worker -> ALERT: NO ACK from cluster master, retrying ... ") sendToMaster(WorkIsDone(workerId, workId, result)) } override def unhandled(message: Any): Unit = message match { case Terminated(`workProcessor`) => context.stop(self) case WorkIsReady => case _ => super.unhandled(message) } def sendToMaster(msg: Any): Unit = { clusterClient ! SendToAll("/user/master/singleton", msg) } }
Example 5
Source File: CacheDataActor.scala From distributed-cache-on-k8s-poc with MIT License | 5 votes |
package cluster import java.util.UUID import akka.actor.SupervisorStrategy.Stop import akka.actor.{ Actor, ActorLogging, Props, ReceiveTimeout } import akka.cluster.sharding.ShardRegion import akka.cluster.sharding.ShardRegion.Passivate import cluster.CacheDataActor.Get class CacheDataActor extends Actor with ActorLogging { override def receive: Receive = { case Get(id) => sender ! s"cached data for id: $id" case ReceiveTimeout => log.info(s"sending Passivate to metadata parent: {${context.parent.path.name}} for ${self.path.name}") context.parent ! Passivate(stopMessage = Stop) case Stop => context.stop(self) log.info(s"Passivating metadata actor for ${self.path.name}") } } object CacheDataActor { final val numOfShards = 50 // Planned num of cluster nodes val extractEntityId: ShardRegion.ExtractEntityId = { case msg@Get(id) => (id.toString, msg) } val extractShardId: ShardRegion.ExtractShardId = { case Get(id) => (id.hashCode() % numOfShards).toString } case class Get(id: UUID) def props: Props = Props(new CacheDataActor()) }