akka.actor.Terminated Scala Examples

The following examples show how to use akka.actor.Terminated. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: DistributedProcessingSupervisor.scala    From aecor   with MIT License 5 votes vote down vote up
package aecor.distributedprocessing

import aecor.distributedprocessing.DistributedProcessingSupervisor.{
  GracefulShutdown,
  ShutdownCompleted,
  Tick
}
import aecor.distributedprocessing.DistributedProcessingWorker.KeepRunning
import akka.actor.{ Actor, ActorLogging, ActorRef, Props, Terminated }
import akka.cluster.sharding.ShardRegion

import scala.concurrent.duration.{ FiniteDuration, _ }

object DistributedProcessingSupervisor {
  private final case object Tick
  final case object GracefulShutdown
  final case object ShutdownCompleted

  def props(processCount: Int, shardRegion: ActorRef, heartbeatInterval: FiniteDuration): Props =
    Props(new DistributedProcessingSupervisor(processCount, shardRegion, heartbeatInterval))
}

final class DistributedProcessingSupervisor(processCount: Int,
                                            shardRegion: ActorRef,
                                            heartbeatInterval: FiniteDuration)
    extends Actor
    with ActorLogging {

  import context.dispatcher

  private val heartbeat =
    context.system.scheduler.schedule(0.seconds, heartbeatInterval, self, Tick)

  context.watch(shardRegion)

  override def postStop(): Unit = {
    heartbeat.cancel()
    ()
  }

  override def receive: Receive = {
    case Tick =>
      (0 until processCount).foreach { processId =>
        shardRegion ! KeepRunning(processId)
      }
    case Terminated(`shardRegion`) =>
      context.stop(self)
    case GracefulShutdown =>
      log.info(s"Performing graceful shutdown of [$shardRegion]")
      shardRegion ! ShardRegion.GracefulShutdown
      val replyTo = sender()
      context.become {
        case Terminated(`shardRegion`) =>
          log.info(s"Graceful shutdown completed for [$shardRegion]")
          context.stop(self)
          replyTo ! ShutdownCompleted
      }

  }
} 
Example 2
Source File: Warmup.scala    From cloudstate   with Apache License 2.0 5 votes vote down vote up
package io.cloudstate.proxy

import akka.actor.{Actor, ActorLogging, ActorRef, Props, SupervisorStrategy, Terminated}
import com.google.protobuf.ByteString
import io.cloudstate.proxy.eventsourced.EventSourcedEntity.{Configuration, Stop}
import Warmup.Ready
import io.cloudstate.protocol.entity.{ClientAction, Reply}
import io.cloudstate.protocol.event_sourced.{EventSourcedReply, EventSourcedStreamIn, EventSourcedStreamOut}
import io.cloudstate.proxy.entity.{EntityCommand, UserFunctionReply}
import io.cloudstate.proxy.eventsourced.EventSourcedEntity

import scala.concurrent.duration._

object Warmup {
  def props(needsWarmup: Boolean): Props = Props(new Warmup(needsWarmup))

  case object Ready
}


class Warmup(needsWarmup: Boolean) extends Actor with ActorLogging {

  if (needsWarmup) {
    log.debug("Starting warmup...")

    val stateManager = context.watch(
      context.actorOf(EventSourcedEntity.props(
                        Configuration("warmup.Service", "###warmup", 30.seconds, 100),
                        "###warmup-entity",
                        self,
                        self,
                        self
                      ),
                      "entity")
    )

    stateManager ! EntityCommand(
      entityId = "###warmup-entity",
      name = "foo",
      payload = Some(com.google.protobuf.any.Any("url", ByteString.EMPTY))
    )

    context become warmingUp(stateManager)
  }

  // Default will be overriden above if we need to warm up
  override def receive = warm

  private def warmingUp(eventSourcedEntityManager: ActorRef): Receive = {
    case Ready => sender ! false
    case ConcurrencyEnforcer.Action(_, start) =>
      log.debug("Warmup received action, starting it.")
      start()
    case EventSourcedStreamIn(EventSourcedStreamIn.Message.Event(_), _) =>
    // Ignore
    case EventSourcedStreamIn(EventSourcedStreamIn.Message.Init(_), _) =>
      log.debug("Warmup got init.")
    // Ignore
    case EventSourcedStreamIn(EventSourcedStreamIn.Message.Command(cmd), _) =>
      log.debug("Warmup got forwarded command")
      // It's forwarded us our command, send it a reply
      eventSourcedEntityManager ! EventSourcedStreamOut(
        EventSourcedStreamOut.Message.Reply(
          EventSourcedReply(
            commandId = cmd.id,
            clientAction = Some(
              ClientAction(ClientAction.Action.Reply(Reply(Some(com.google.protobuf.any.Any("url", ByteString.EMPTY)))))
            )
          )
        )
      )
    case _: UserFunctionReply =>
      log.debug("Warmup got forwarded reply")
      // It's forwarded the reply, now stop it
      eventSourcedEntityManager ! Stop
    case Terminated(_) =>
      log.info("Warmup complete")
      context.become(warm)
    case other =>
      // There are a few other messages we'll receive that we don't care about
      log.debug("Warmup received {}", other.getClass)
  }

  private def warm: Receive = {
    case Ready => sender ! true
  }

  override def supervisorStrategy: SupervisorStrategy = SupervisorStrategy.stoppingStrategy
} 
Example 3
Source File: SubscriptionSessionManager.scala    From graphcool-framework   with Apache License 2.0 5 votes vote down vote up
package cool.graph.subscriptions.protocol

import akka.actor.{Actor, ActorRef, PoisonPill, Props, Terminated}
import cool.graph.akkautil.{LogUnhandled, LogUnhandledExceptions}
import cool.graph.bugsnag.BugSnagger
import cool.graph.messagebus.PubSubPublisher
import cool.graph.subscriptions.protocol.SubscriptionProtocolV05.Requests.{InitConnection, SubscriptionSessionRequestV05}
import cool.graph.subscriptions.protocol.SubscriptionProtocolV05.Responses.SubscriptionSessionResponseV05
import cool.graph.subscriptions.protocol.SubscriptionProtocolV07.Requests.{GqlConnectionInit, SubscriptionSessionRequest}
import cool.graph.subscriptions.protocol.SubscriptionProtocolV07.Responses.SubscriptionSessionResponse
import cool.graph.subscriptions.protocol.SubscriptionSessionManager.Requests.{EnrichedSubscriptionRequest, EnrichedSubscriptionRequestV05, StopSession}

import scala.collection.mutable

object SubscriptionSessionManager {
  object Requests {
    trait SubscriptionSessionManagerRequest

    case class EnrichedSubscriptionRequestV05(
        sessionId: String,
        projectId: String,
        request: SubscriptionSessionRequestV05
    ) extends SubscriptionSessionManagerRequest

    case class EnrichedSubscriptionRequest(
        sessionId: String,
        projectId: String,
        request: SubscriptionSessionRequest
    ) extends SubscriptionSessionManagerRequest

    case class StopSession(sessionId: String) extends SubscriptionSessionManagerRequest
  }
}

case class SubscriptionSessionManager(subscriptionsManager: ActorRef, bugsnag: BugSnagger)(
    implicit responsePublisher05: PubSubPublisher[SubscriptionSessionResponseV05],
    responsePublisher07: PubSubPublisher[SubscriptionSessionResponse]
) extends Actor
    with LogUnhandledExceptions
    with LogUnhandled {

  val sessions: mutable.Map[String, ActorRef] = mutable.Map.empty

  override def receive: Receive = logUnhandled {
    case EnrichedSubscriptionRequest(sessionId, projectId, request: GqlConnectionInit) =>
      val session = startSessionActorForCurrentProtocolVersion(sessionId, projectId)
      session ! request

    case EnrichedSubscriptionRequest(sessionId, _, request: SubscriptionSessionRequest) =>
      // we might receive session requests that are not meant for this box. So we might not find an actor for this session.
      sessions.get(sessionId).foreach { session =>
        session ! request
      }

    case EnrichedSubscriptionRequestV05(sessionId, projectId, request: InitConnection) =>
      val session = startSessionActorForProtocolVersionV05(sessionId, projectId)
      session ! request

    case EnrichedSubscriptionRequestV05(sessionId, _, request) =>
      // we might receive session requests that are not meant for this box. So we might not find an actor for this session.
      sessions.get(sessionId).foreach { session =>
        session ! request
      }

    case StopSession(sessionId) =>
      sessions.get(sessionId).foreach { session =>
        session ! PoisonPill
        sessions.remove(sessionId)
      }

    case Terminated(terminatedActor) =>
      sessions.find { _._2 == terminatedActor } match {
        case Some((sessionId, _)) => sessions.remove(sessionId)
        case None                 => // nothing to do; should not happen though
      }
  }

  private def startSessionActorForProtocolVersionV05(sessionId: String, projectId: String): ActorRef = {
    val props = Props(SubscriptionSessionActorV05(sessionId, projectId, subscriptionsManager, bugsnag, responsePublisher05))
    startSessionActor(sessionId, props)
  }

  private def startSessionActorForCurrentProtocolVersion(sessionId: String, projectId: String): ActorRef = {
    val props = Props(SubscriptionSessionActor(sessionId, projectId, subscriptionsManager, bugsnag, responsePublisher07))
    startSessionActor(sessionId, props)
  }

  private def startSessionActor(sessionId: String, props: Props): ActorRef = {
    sessions.get(sessionId) match {
      case None =>
        val ref = context.actorOf(props, sessionId)
        sessions += sessionId -> ref
        context.watch(ref)

      case Some(ref) =>
        ref
    }
  }
} 
Example 4
Source File: SubscriptionsManager.scala    From graphcool-framework   with Apache License 2.0 5 votes vote down vote up
package cool.graph.subscriptions.resolving

import java.util.concurrent.TimeUnit

import akka.actor.{Actor, ActorRef, Props, Terminated}
import akka.util.Timeout
import cool.graph.akkautil.{LogUnhandled, LogUnhandledExceptions}
import cool.graph.bugsnag.BugSnagger
import cool.graph.messagebus.pubsub.Only
import cool.graph.shared.models.ModelMutationType.ModelMutationType
import cool.graph.subscriptions.SimpleSubscriptionInjector
import cool.graph.subscriptions.protocol.StringOrInt
import cool.graph.subscriptions.resolving.SubscriptionsManager.Requests.CreateSubscription
import play.api.libs.json._

import scala.collection.mutable

object SubscriptionsManager {
  object Requests {
    sealed trait SubscriptionsManagerRequest

    case class CreateSubscription(
        id: StringOrInt,
        projectId: String,
        sessionId: String,
        query: sangria.ast.Document,
        variables: Option[JsObject],
        authHeader: Option[String],
        operationName: Option[String]
    ) extends SubscriptionsManagerRequest

    case class EndSubscription(
        id: StringOrInt,
        sessionId: String,
        projectId: String
    ) extends SubscriptionsManagerRequest
  }

  object Responses {
    sealed trait CreateSubscriptionResponse

    case class CreateSubscriptionSucceeded(request: CreateSubscription)                      extends CreateSubscriptionResponse
    case class CreateSubscriptionFailed(request: CreateSubscription, errors: Seq[Exception]) extends CreateSubscriptionResponse
    case class SubscriptionEvent(subscriptionId: StringOrInt, payload: JsValue)
    case class ProjectSchemaChanged(subscriptionId: StringOrInt)
  }

  object Internal {
    case class ResolverType(modelId: String, mutation: ModelMutationType)
  }
}

case class SubscriptionsManager(bugsnag: BugSnagger)(implicit injector: SimpleSubscriptionInjector)
    extends Actor
    with LogUnhandled
    with LogUnhandledExceptions {

  import SubscriptionsManager.Requests._

  val invalidationSubscriber  = injector.invalidationSubscriber
  implicit val timeout        = Timeout(10, TimeUnit.SECONDS)
  private val projectManagers = mutable.HashMap.empty[String, ActorRef]

  override def receive: Receive = logUnhandled {
    case create: CreateSubscription => projectActorFor(create.projectId).forward(create)
    case end: EndSubscription       => projectActorFor(end.projectId).forward(end)
    case Terminated(ref)            => projectManagers.retain { case (_, projectActor) => projectActor != ref }
  }

  private def projectActorFor(projectId: String): ActorRef = {
    projectManagers.getOrElseUpdate(
      projectId, {
        val ref = context.actorOf(Props(SubscriptionsManagerForProject(projectId, bugsnag)), projectId)
        invalidationSubscriber.subscribe(Only(projectId), ref)
        context.watch(ref)
      }
    )
  }
} 
Example 5
Source File: PubSubRouter.scala    From graphcool-framework   with Apache License 2.0 5 votes vote down vote up
package cool.graph.messagebus.pubsub

import akka.actor.{Actor, ActorRef, Terminated}
import akka.routing._
import cool.graph.messagebus.pubsub.PubSubProtocol.{Envelope, Publish, Subscribe, Unsubscribe}

import scala.collection.{immutable, mutable}

object PubSubProtocol {
  case class Subscribe(topic: String, actorRef: ActorRef)
  case class Publish(topic: String, message: Any)
  case class Unsubscribe(topic: String, ref: ActorRef)
  case class Envelope(actualTopic: String, message: Any)
}

case class PubSubRouterAlt() extends Actor {
  val pubSubLogic = PubSubRoutingLogic()
  var router      = Router(pubSubLogic, Vector.empty)

  override def receive: Receive = {
    case Subscribe(topic, ref) =>
      context.watch(ref)
      router = router.addRoutee(PubSubRoutee(topic, ref))

    case Publish(topic, message) =>
      router.route(Envelope(topic, message), sender())

    case Unsubscribe(topic, ref) =>
      router = router.removeRoutee(PubSubRoutee(topic, ref))

    case Terminated(a) =>
      router = router.withRoutees(router.routees.collect {
        case routee @ PubSubRoutee(_, ref) if ref != a => routee
      })
  }
}

case class PubSubRouter() extends Actor {
  val subscribers = mutable.HashMap[String, mutable.Set[ActorRef]]()

  override def receive: Receive = {
    case Subscribe(topic, ref) =>
      context.watch(ref)
      subscribers.getOrElseUpdate(topic, mutable.Set.empty) += ref

    case Publish(topic, message) =>
      subscribers.getOrElse(topic, mutable.Set.empty).foreach(_.tell(message, sender()))

    case Unsubscribe(topic, ref) =>
      subscribers.getOrElse(topic, mutable.Set.empty).remove(ref)

    case Terminated(a) =>
      subscribers.values.foreach(_.remove(a))
  }
}

case class PubSubRoutee(topic: String, ref: ActorRef) extends Routee {
  override def send(message: Any, sender: ActorRef): Unit = {
    message match {
      case Envelope(_, payload) => ref.tell(payload, sender)
      case _                    =>
    }
  }
}

case class PubSubRoutingLogic() extends RoutingLogic {
  def select(message: Any, routees: immutable.IndexedSeq[Routee]): Routee = {
    val pubSubRoutees = routees.collect {
      case pubSubRoutee: PubSubRoutee => pubSubRoutee
    }

    message match {
      case Envelope(topic, _) =>
        val targets = pubSubRoutees.filter(_.topic == topic)
        SeveralRoutees(targets.asInstanceOf[immutable.IndexedSeq[Routee]])

      case _ =>
        NoRoutee
    }
  }
} 
Example 6
Source File: Actors.scala    From graphcool-framework   with Apache License 2.0 5 votes vote down vote up
package cool.graph.messagebus.pubsub

import akka.actor.{Actor, ActorRef, Terminated}
import cool.graph.messagebus.Conversions.Converter
import cool.graph.messagebus.pubsub.PubSubProtocol.{Subscribe, Unsubscribe}


case class IntermediateCallbackActor[T, U](topic: String, mediator: ActorRef, callback: Message[U] => Unit)(implicit converter: Converter[T, U]) extends Actor {
  mediator ! Subscribe(topic, self)

  override def receive: Receive = {
    case Message(t, msg) =>
      callback(Message(t, converter(msg.asInstanceOf[T])))

    case Unsubscribe =>
      mediator ! Unsubscribe(topic, self)
      context.stop(self)
  }
} 
Example 7
Source File: Actors.scala    From graphcool-framework   with Apache License 2.0 5 votes vote down vote up
package cool.graph.messagebus.queue.inmemory

import akka.actor.{Actor, ActorRef, Terminated}
import akka.routing.{ActorRefRoutee, RoundRobinRoutingLogic, Router}
import cool.graph.messagebus.QueueConsumer.ConsumeFn
import cool.graph.messagebus.queue.BackoffStrategy
import cool.graph.messagebus.queue.inmemory.InMemoryQueueingMessages._


object InMemoryQueueingMessages {
  case class AddWorker(ref: ActorRef)
  object StopWork

  case class Delivery[T](payload: T, tries: Int = 0) {
    def nextTry: Delivery[T] = copy(tries = tries + 1)
  }

  case class DeferredDelivery[T](item: Delivery[T])
}

case class RouterActor[T](backoff: BackoffStrategy) extends Actor {
  import context.dispatcher

  var router = Router(RoundRobinRoutingLogic(), Vector.empty)

  override def receive = {
    case AddWorker(ref: ActorRef) =>
      context watch ref
      router = router.addRoutee(ActorRefRoutee(ref))

    case item: Delivery[T] =>
      router.route(item, sender())

    case deferred: DeferredDelivery[T] =>
      val dur = BackoffStrategy.backoffDurationFor(deferred.item.tries, backoff)
      BackoffStrategy.backoff(dur).map(_ => router.route(deferred.item, sender()))

    case Terminated(a) =>
      // todo: Restart worker actor if terminated abnormally?
      router = router.removeRoutee(a)
  }
}

case class WorkerActor[T](router: ActorRef, fn: ConsumeFn[T]) extends Actor {
  import context.dispatcher

  override def receive = {
    case i: Delivery[T] =>
      if (i.tries < 5) {
        fn(i.payload).onFailure {
          case _ => router ! DeferredDelivery(i.nextTry)
        }
      } else {
        println(s"Discarding message, tries exceeded: $i")
      }

    case StopWork =>
      context.stop(self)
  }
} 
Example 8
Source File: WebsocketSession.scala    From graphcool-framework   with Apache License 2.0 5 votes vote down vote up
package cool.graph.websockets

import java.util.concurrent.TimeUnit

import akka.actor.{Actor, ActorRef, PoisonPill, Props, ReceiveTimeout, Stash, Terminated}
import cool.graph.akkautil.{LogUnhandled, LogUnhandledExceptions}
import cool.graph.bugsnag.BugSnagger
import cool.graph.messagebus.QueuePublisher
import cool.graph.websockets.protocol.Request
import scala.collection.mutable
import scala.concurrent.duration._ // if you don't supply your own Protocol (see below)

object WebsocketSessionManager {
  object Requests {
    case class OpenWebsocketSession(projectId: String, sessionId: String, outgoing: ActorRef)
    case class CloseWebsocketSession(sessionId: String)

    case class IncomingWebsocketMessage(projectId: String, sessionId: String, body: String)
    case class IncomingQueueMessage(sessionId: String, body: String)
  }

  object Responses {
    case class OutgoingMessage(text: String)
  }
}

case class WebsocketSessionManager(
    requestsPublisher: QueuePublisher[Request],
    bugsnag: BugSnagger
) extends Actor
    with LogUnhandled
    with LogUnhandledExceptions {
  import WebsocketSessionManager.Requests._

  val websocketSessions = mutable.Map.empty[String, ActorRef]

  override def receive: Receive = logUnhandled {
    case OpenWebsocketSession(projectId, sessionId, outgoing) =>
      val ref = context.actorOf(Props(WebsocketSession(projectId, sessionId, outgoing, requestsPublisher, bugsnag)))
      context.watch(ref)
      websocketSessions += sessionId -> ref

    case CloseWebsocketSession(sessionId) =>
      websocketSessions.get(sessionId).foreach(context.stop)

    case req: IncomingWebsocketMessage =>
      websocketSessions.get(req.sessionId) match {
        case Some(session) => session ! req
        case None =>
          println(s"No session actor found for ${req.sessionId} | ${req.projectId} when processing websocket message. This should only happen very rarely.")
      }

    case req: IncomingQueueMessage =>
      websocketSessions.get(req.sessionId) match {
        case Some(session) => session ! req
        case None          => // Session already closed
      }

    case Terminated(terminatedActor) =>
      websocketSessions.retain {
        case (_, sessionActor) => sessionActor != terminatedActor
      }
  }
}

case class WebsocketSession(
    projectId: String,
    sessionId: String,
    outgoing: ActorRef,
    requestsPublisher: QueuePublisher[Request],
    bugsnag: BugSnagger
) extends Actor
    with LogUnhandled
    with LogUnhandledExceptions
    with Stash {
  import WebsocketSessionManager.Requests._
  import WebsocketSessionManager.Responses._
  import metrics.SubscriptionWebsocketMetrics._

  activeWsConnections.inc
  context.setReceiveTimeout(FiniteDuration(60, TimeUnit.MINUTES))

  def receive: Receive = logUnhandled {
    case IncomingWebsocketMessage(_, _, body) => requestsPublisher.publish(Request(sessionId, projectId, body))
    case IncomingQueueMessage(_, body)        => outgoing ! OutgoingMessage(body)
    case ReceiveTimeout                       => context.stop(self)
  }

  override def postStop = {
    activeWsConnections.dec
    outgoing ! PoisonPill
    requestsPublisher.publish(Request(sessionId, projectId, "STOP"))
  }
} 
Example 9
Source File: Reader.scala    From Raphtory   with Apache License 2.0 5 votes vote down vote up
package com.raphtory.core.components.PartitionManager

import akka.actor.Actor
import akka.actor.ActorLogging
import akka.actor.ActorRef
import akka.actor.Props
import akka.actor.Terminated
import akka.cluster.pubsub.DistributedPubSubMediator.SubscribeAck
import akka.cluster.pubsub.DistributedPubSub
import akka.cluster.pubsub.DistributedPubSubMediator
import com.raphtory.core.analysis.API.Analyser
import com.raphtory.core.components.PartitionManager.Workers.ReaderWorker
import com.raphtory.core.model.communication._
import com.raphtory.core.storage.EntityStorage
import com.raphtory.core.utils.Utils
import com.twitter.util.Eval

import scala.collection.parallel.mutable.ParTrieMap
import scala.util.Try

class Reader(
    id: Int,
    test: Boolean,
    managerCountVal: Int,
    storage: ParTrieMap[Int, EntityStorage],
    workerCount: Int = 10
) extends Actor
        with ActorLogging {

  implicit var managerCount: Int = managerCountVal

  // Id which refers to the partitions position in the graph manager map
  val managerId: Int = id

  val mediator: ActorRef = DistributedPubSub(context.system).mediator

  mediator ! DistributedPubSubMediator.Put(self)
  mediator ! DistributedPubSubMediator.Subscribe(Utils.readersTopic, self)

  var readers: ParTrieMap[Int, ActorRef] = new ParTrieMap[Int, ActorRef]()

  for (i <- 0 until workerCount) {
    log.debug("Initialising [{}] worker children for Reader [{}}.", workerCount, managerId)

    // create threads for writing
    val child = context.system.actorOf(
            Props(new ReaderWorker(managerCount, managerId, i, storage(i))).withDispatcher("reader-dispatcher"),
            s"Manager_${id}_reader_$i"
    )

    context.watch(child)
    readers.put(i, child)
  }

  override def preStart(): Unit =
    log.debug("Reader [{}] is being started.", managerId)

  override def receive: Receive = {
    case ReaderWorkersOnline()     => sender ! ReaderWorkersACK()
    case req: AnalyserPresentCheck => processAnalyserPresentCheckRequest(req)
    case req: UpdatedCounter       => processUpdatedCounterRequest(req)
    case SubscribeAck              =>
    case Terminated(child) =>
      log.warning(s"ReaderWorker with path [{}] belonging to Reader [{}] has died.", child.path, managerId)
    case x => log.warning(s"Reader [{}] received unknown [{}] message.", managerId, x)
  }

  def processAnalyserPresentCheckRequest(req: AnalyserPresentCheck): Unit = {
    log.debug(s"Reader [{}] received [{}] request.", managerId, req)

    val className   = req.className
    val classExists = Try(Class.forName(className))

    classExists.toEither.fold(
            { _: Throwable =>
              log.debug("Class [{}] was not found within this image.", className)

              sender ! ClassMissing()
            }, { _: Class[_] =>
              log.debug(s"Class [{}] exists. Proceeding.", className)

              sender ! AnalyserPresent()
            }
    )
  }





  def processUpdatedCounterRequest(req: UpdatedCounter): Unit = {
    log.debug("Reader [{}] received [{}] request.", managerId, req)

    managerCount = req.newValue
    readers.foreach(x => x._2 ! UpdatedCounter(req.newValue))
  }
} 
Example 10
Source File: BulkIndexerActorTest.scala    From elasticsearch-client   with Apache License 2.0 5 votes vote down vote up
package com.sumologic.elasticsearch.akkahelpers

import java.util.concurrent.TimeUnit

import akka.actor.{ActorSystem, Terminated}
import akka.testkit.{ImplicitSender, TestActorRef, TestKit}
import com.sumologic.elasticsearch.akkahelpers.BulkIndexerActor.{BulkSession, CreateRequest, DocumentIndexed, ForceFlush}
import com.sumologic.elasticsearch.restlastic.{RestlasticSearchClient, RestlasticSearchClient6}
import com.sumologic.elasticsearch.restlastic.RestlasticSearchClient.ReturnTypes.BulkItem
import com.sumologic.elasticsearch.restlastic.dsl.Dsl._
import org.junit.runner.RunWith
import org.mockito.ArgumentMatchers._
import org.mockito.Matchers._
import org.mockito.Mockito._
import org.scalatest._
import org.scalatest.concurrent.Eventually
import org.scalatest.mock.MockitoSugar
import org.scalatestplus.junit.JUnitRunner

import scala.concurrent.duration._
import scala.concurrent.{Await, Future}
import scala.concurrent.duration.Duration

@RunWith(classOf[JUnitRunner])
class BulkIndexerActorTest extends TestKit(ActorSystem("TestSystem")) with WordSpecLike with Matchers
with BeforeAndAfterAll with BeforeAndAfterEach with MockitoSugar with ImplicitSender with Eventually {

  val executionContext = scala.concurrent.ExecutionContext.Implicits.global
  var indexerActor: TestActorRef[BulkIndexerActor] = _
  var mockEs = mock[RestlasticSearchClient]
  var flushTimeoutMs = 100000L
  var maxMessages = 100000

  override def beforeEach(): Unit = {
    mockEs = mock[RestlasticSearchClient]
    when(mockEs.indexExecutionCtx).thenReturn(executionContext)
    def timeout() = Duration(flushTimeoutMs, TimeUnit.MILLISECONDS)
    def max() = maxMessages
    val config = BulkConfig(timeout, max)
    indexerActor = TestActorRef[BulkIndexerActor](BulkIndexerActor.props(mockEs, config))

  }

  override def afterAll(): Unit = {
    val terminationFuture: Future[Terminated] = system.terminate()
    Await.result(terminationFuture, 5.seconds)
  }

  "BulkIndexerActor" should {
    "flush every message when set to 1" in {
      maxMessages = 1
      when(mockEs.bulkIndex(any())).thenReturn(Future.successful(Seq(BulkItem("index","type", "_id", 201, None))))
      val sess = BulkSession.create()
      indexerActor ! CreateRequest(sess, Index("i"), Type("tpe"), Document("id", Map("k" -> "v")))
      eventually {
        mockEs.bulkIndex(any())
      }
      val msg = expectMsgType[DocumentIndexed]
      msg.sessionId should be(sess)
    }

    "not flush when set to 2" in {
      maxMessages = 2
      indexerActor ! CreateRequest(BulkSession.create(), Index("i"), Type("tpe"), Document("id", Map("k" -> "v")))
      verify(mockEs, times(0)).bulkIndex(any())
    }

    "not flush when there are no messages" in {
      indexerActor ! ForceFlush
      verify(mockEs, times(0)).bulkIndex(any())
    }
  }


} 
Example 11
Source File: CapturingDomainFactory.scala    From ncdbg   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package com.programmaticallyspeaking.ncd.chrome.net

import akka.actor.{Actor, ActorRef, ActorRefFactory, ActorSystem, Inbox, Props, Terminated}
import com.programmaticallyspeaking.ncd.chrome.domains.{DefaultDomainFactory, DomainFactory}
import com.programmaticallyspeaking.ncd.ioc.Container

import scala.concurrent.duration._

class CapturingDomainFactory(implicit container: Container, system: ActorSystem) extends DomainFactory {
  import CapturingDomainFactory._
  private val defaultFactory = new DefaultDomainFactory(container)

  private var actorMustNotExist = false
  private val watcher = system.actorOf(Props(new ActorWatcher))
  private val inbox = Inbox.create(system)

  private def sendAndWait(msg: AnyRef): Any = {
    inbox.send(watcher, msg)
    inbox.receive(2.seconds)
  }

  def actorByName(name: String): Option[ActorRef] = {
    sendAndWait(FindActor(name)) match {
      case FindActorResponse(maybeRef) => maybeRef
      case other => throw new UnsupportedOperationException("Unexpected FindActor response: " + other)
    }
  }

  def requireNoOldActor(): Unit = {
    actorMustNotExist = true
  }

  override def create(domain: String)(implicit factory: ActorRefFactory): ActorRef = {
    actorByName(domain) match {
      case Some(ar) if actorMustNotExist =>
        throw new IllegalStateException("Found an old domain actor: " + ar)
      case _ => // noop
    }

    val actor = defaultFactory.create(domain)
    sendAndWait(DomainActor(actor, domain))
    actor
  }
}

object CapturingDomainFactory {

  case class DomainActor(actor: ActorRef, domain: String)
  case class FindActor(domain: String)
  case class FindActorResponse(actor: Option[ActorRef])

  class ActorWatcher extends Actor {

    private var actors = Map[String, ActorRef]()

    override def receive: Receive = {
      case DomainActor(actorRef, domain) =>
        actors += domain -> actorRef
        context.watch(actorRef)
        sender ! "ok"

      case Terminated(actorRef) =>
        actors.find(_._2 == actorRef).map(_._1).foreach(actors -= _)

      case FindActor(domain) =>
        sender ! FindActorResponse(actors.get(domain))
    }
  }
} 
Example 12
Source File: EntitySupport.scala    From akka-cqrs   with Apache License 2.0 5 votes vote down vote up
package com.productfoundry.akka.cqrs

import akka.actor.{ActorRef, ActorSystem, PoisonPill, Terminated}
import akka.testkit.{ImplicitSender, TestKit}
import akka.util.Timeout
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{Millis, Second, Span}
import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll, Matchers, WordSpecLike}

import scala.concurrent.duration._

abstract class EntitySupport(_system: ActorSystem)
  extends TestKit(_system)
  with ImplicitSender
  with WordSpecLike
  with Matchers
  with BeforeAndAfterAll
  with BeforeAndAfter
  with Eventually {

  
  override def afterAll(): Unit = {
    TestKit.shutdownActorSystem(system)
  }
} 
Example 13
Source File: Worker.scala    From akka-iot-mqtt-v2   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package akkaiot

import java.util.UUID
import scala.concurrent.duration._
import akka.actor.{ Props, ActorRef, Actor, ActorLogging, ReceiveTimeout, Terminated }
import akka.cluster.client.ClusterClient.SendToAll
import akka.actor.OneForOneStrategy
import akka.actor.SupervisorStrategy.Stop
import akka.actor.SupervisorStrategy.Restart
import akka.actor.ActorInitializationException
import akka.actor.DeathPactException

object Worker {

  def props(clusterClient: ActorRef, workProcessorProps: Props, registerInterval: FiniteDuration = 10.seconds): Props =
    Props(classOf[Worker], clusterClient, workProcessorProps, registerInterval)

  case class WorkProcessed(result: WorkResult)
}

class Worker(clusterClient: ActorRef, workProcessorProps: Props, registerInterval: FiniteDuration)
  extends Actor with ActorLogging {
  import Worker._
  import MasterWorkerProtocol._

  val workerId = UUID.randomUUID().toString

  import context.dispatcher
  val registerTask = context.system.scheduler.schedule(0.seconds, registerInterval, clusterClient,
    SendToAll("/user/master/singleton", RegisterWorker(workerId)))

  val workProcessor = context.watch(context.actorOf(workProcessorProps, "work-processor"))

  var currentWorkId: Option[String] = None
  def workId: String = currentWorkId match {
    case Some(workId) => workId
    case None => throw new IllegalStateException("Not working")
  }

  override def supervisorStrategy = OneForOneStrategy() {
    case _: ActorInitializationException => Stop
    case _: DeathPactException => Stop
    case _: Exception =>
      currentWorkId foreach { workId => sendToMaster(WorkFailed(workerId, workId)) }
      context.become(idle)
      Restart
  }

  override def postStop(): Unit = registerTask.cancel()

  def receive = idle

  def idle: Receive = {
    case WorkIsReady =>
      sendToMaster(WorkerRequestsWork(workerId))

    case work @ Work(workId, deviceType, deviceId, state, setting) =>
      log.info("Worker -> Received work request from {}-{} | State {} | Setting {}", deviceType, deviceId, state, setting)
      currentWorkId = Some(workId)
      workProcessor ! work
      context.become(working)
  }

  def working: Receive = {
    case WorkProcessed(result: WorkResult) =>
      log.info("Worker -> Processed work: {}-{} | Work Id {}", result.deviceType, result.deviceId, workId)
      sendToMaster(WorkIsDone(workerId, workId, result))
      context.setReceiveTimeout(5.seconds)
      context.become(waitForWorkIsDoneAck(result))

    case work: Work =>
      log.info("Worker -> ALERT: Worker Id {} NOT AVAILABLE for Work Id {}", workerId, work.workId)
  }

  def waitForWorkIsDoneAck(result: WorkResult): Receive = {
    case Ack(id) if id == workId =>
      sendToMaster(WorkerRequestsWork(workerId))
      context.setReceiveTimeout(Duration.Undefined)
      context.become(idle)
    case ReceiveTimeout =>
      log.info("Worker -> ALERT: NO ACK from cluster master, retrying ... ")
      sendToMaster(WorkIsDone(workerId, workId, result))
  }

  override def unhandled(message: Any): Unit = message match {
    case Terminated(`workProcessor`) => context.stop(self)
    case WorkIsReady =>
    case _ => super.unhandled(message)
  }

  def sendToMaster(msg: Any): Unit = {
    clusterClient ! SendToAll("/user/master/singleton", msg)
  }
} 
Example 14
Source File: IotManager.scala    From akka-iot-mqtt-v2   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package akkaiot

import java.util.concurrent.ThreadLocalRandom
import scala.concurrent.duration._
import akka.actor.{ Props, ActorRef, Actor, ActorLogging, Terminated }
import akka.pattern._
import akka.util.Timeout
import akka.cluster.client.ClusterClient.SendToAll

import com.sandinh.paho.akka._
import com.sandinh.paho.akka.MqttPubSub._

object IotManager {
  def props(clusterClient: ActorRef, numOfDevices: Int, mqttPubSub: ActorRef): Props = Props(
    new IotManager(clusterClient, numOfDevices, mqttPubSub)
  )

  case class Ok(work: Work)
  case class NotOk(work: Work)
}

class IotManager(clusterClient: ActorRef, numOfDevices: Int, mqttPubSub: ActorRef) extends Actor with ActorLogging {
  import IotManager._
  import context.dispatcher

  private var idToActorMap = Map.empty[String, ActorRef]
  private var actorToIdMap = Map.empty[ActorRef, String]

  val deviceTypes = List("thermostat", "lamp", "security-alarm")
  def random = ThreadLocalRandom.current

  mqttPubSub ! Subscribe(MqttConfig.topic, self)

  override def preStart(): Unit = {
    log.info("IoT Manager -> Creating devices ...")

    (1 to numOfDevices).foreach { n =>
      val deviceType = deviceTypes(random.nextInt(0, deviceTypes.size))
      val deviceId = (1000 + n).toString
      val deviceActor = context.actorOf(Device.props(deviceType, deviceId, mqttPubSub), s"$deviceType-$deviceId")
      context.watch(deviceActor)
      actorToIdMap += deviceActor -> deviceId
      idToActorMap += deviceId -> deviceActor
    }

    log.info("IoT Manager -> Created {} devices!", numOfDevices)
  }

  override def postStop(): Unit = log.info("IoT Manager -> Stopped")

  override def receive: Receive = {
    case SubscribeAck(Subscribe(MqttConfig.topic, `self`, _)) => {
      log.info("IoT Manager -> MQTT subscription to {} acknowledged", MqttConfig.topic)
      context.become(ready)
    }

    case x =>
      log.info("IoT Manager -> ALERT: Problem receiving message ... {}", x)
  }

  def ready: Receive = {
    case msg: Message => {
      val work = MqttConfig.readFromByteArray[Work](msg.payload)
      log.info("IoT Agent -> Received MQTT message: {}-{} | State {} | Setting {}", work.deviceType, work.deviceId, work.currState, work.currSetting)

      log.info("IoT Manager -> Sending work to cluster master")
      implicit val timeout = Timeout(5.seconds)
      (clusterClient ? SendToAll("/user/master/singleton", work)) map {
        case Master.Ack(_) => Ok(work)
      } recover { case _ => NotOk(work)
      } pipeTo {
        idToActorMap.getOrElse(work.deviceId, `self`)
      }
    }

    case result @ WorkResult(workId, deviceType, deviceId, nextState, nextSetting) =>
      idToActorMap.get(deviceId) match {
        case Some(deviceActor) =>
          deviceActor forward result
          log.info("IoT Manager -> Work result forwarded to {}-{} ", deviceType, deviceId)
        case None =>
          log.info("IoT Manager -> ALERT: {}-{} NOT in registry!", deviceType, deviceId)
      }

    case Terminated(deviceActor) =>
      val deviceId = actorToIdMap(deviceActor)
      log.info("IoT Manager -> ALERT: Device actor terminated! Device Id {} will be removed.", deviceId)
      actorToIdMap -= deviceActor
      idToActorMap -= deviceId

    case Ok(work) =>
      log.info("IoT Manager -> ALERT: Receive ack from Master but Device Id of {}-{} NOT in registry!", work.deviceType, work.deviceId)

    case NotOk(work) =>
      log.info("IoT Manager -> ALERT: Did not receive ack from Master and Device Id of {}-{} NOT in registry!", work.deviceType, work.deviceId)

    case x =>
      log.info("IoT Manager -> ALERT: Problem with received message ... {}", x)
  }
} 
Example 15
Source File: LogStationColorizer.scala    From logstation   with Apache License 2.0 5 votes vote down vote up
package com.jdrews.logstation.utils

import akka.actor.{Actor, ActorLogging, Terminated}
import com.jdrews.logstation.config.BridgeController
import com.jdrews.logstation.service.ServiceShutdown
import com.jdrews.logstation.webserver.LogMessage

import scala.util.control.Breaks
import scala.util.matching.Regex


class LogStationColorizer extends Actor with ActorLogging {
    // contains a map of syntaxName to regular expression.
    var syntaxList = scala.collection.mutable.Map[String, Regex]()
    private val bridge = BridgeController.getBridgeActor
    def receive = {
        case syntax: scala.collection.mutable.Map[String, Regex] =>
            log.debug(s"Got config $syntax}")
            // load up the syntaxes
            syntaxList = syntax

        case lm: LogMessage =>
            var msg = lm.logMessage
            // colorize it!
            val loop = new Breaks
            loop.breakable {
                // for each syntax in list
                syntaxList.foreach(syntax =>
                    // get the first syntax regex, and find the first one to match the log message
                    if (syntax._2.findFirstIn(lm.logMessage).isDefined) {
                        // log.debug(s"got a match! ${syntax._1}")
                        // wrap log message in new colors
                        msg = s"<span style='color:${syntax._1}'>${xml.Utility.escape(lm.logMessage)}</span>"
                        loop.break
                    }
                )
            }

            // send it to bridge actor
            bridge ! LogMessage(msg, lm.logFile)

        case ServiceShutdown =>
            context stop self
        case actTerminated: Terminated => log.info(actTerminated.toString)
        case something => log.warning(s"huh? $something")
    }
} 
Example 16
Source File: RestartSupervisor.scala    From mist   with Apache License 2.0 5 votes vote down vote up
package io.hydrosphere.mist.utils.akka

import akka.pattern.pipe
import akka.actor.{Actor, ActorLogging, ActorRef, ActorRefFactory, Props, ReceiveTimeout, SupervisorStrategy, Terminated, Timers}
import io.hydrosphere.mist.utils.Logger

import scala.concurrent.{Future, Promise}
import scala.concurrent.duration._

class RestartSupervisor(
  name: String,
  start: () => Future[ActorRef],
  timeout: FiniteDuration,
  maxRetry: Int
) extends Actor with ActorLogging with Timers {

  override def receive: Receive = init

  import context._
  import RestartSupervisor._

  private def init: Receive = {
    case Event.Start(req) =>
      start().map(Event.Started) pipeTo self
      context become await(Some(req), 0)
  }

  private def await(req: Option[Promise[ActorRef]], attempts: Int): Receive = {
    case Event.Started(ref) =>
      req.foreach(_.success(self))
      context watch ref
      context become proxy(ref)

    case akka.actor.Status.Failure(e)  if maxRetry == attempts + 1 =>
      req.foreach(_.failure(e))
      log.error(e, "Starting child for {} failed, maxRetry reached", name)
      context stop self

    case akka.actor.Status.Failure(e) =>
      log.error(e, "Starting child for {} failed", name)
      timers.startSingleTimer("timeout", Event.Timeout, timeout)
      context become restartTimeout(req, attempts)
  }

  private def proxy(ref: ActorRef): Receive = {
    case Terminated(_) =>
      log.error(s"Reference for {} was terminated. Restarting", name)
      timers.startSingleTimer("timeout", Event.Timeout, timeout)
      context become restartTimeout(None, 0)

    case x => ref.forward(x)
  }

  private def restartTimeout(req: Option[Promise[ActorRef]], attempts: Int): Receive = {
    case Event.Timeout =>
      start().map(Event.Started) pipeTo self
      context become await(req, attempts + 1)
  }
}

object RestartSupervisor {

  sealed trait Event
  object Event {
    final case class Start(req: Promise[ActorRef]) extends Event
    case object Restart extends Event
    final case class Started(ref: ActorRef) extends Event
    case object Timeout extends Event
  }


  def props(
    name: String,
    start: () => Future[ActorRef],
    timeout: FiniteDuration,
    maxRetry: Int
  ): Props = {
    Props(classOf[RestartSupervisor], name, start, timeout, maxRetry)
  }

  def wrap(
    name: String,
    start: () => Future[ActorRef],
    timeout: FiniteDuration,
    maxRetry: Int
  )(implicit af: ActorRefFactory): Future[ActorRef] = {

    val ref = af.actorOf(props(name, start, timeout, maxRetry))
    val promise = Promise[ActorRef]
    ref ! Event.Start(promise)
    promise.future
  }

  def wrap(
    name: String,
    maxRetry: Int,
    start: () => Future[ActorRef]
  )(implicit af: ActorRefFactory): Future[ActorRef] = wrap(name, start, 5 seconds, maxRetry)(af)

} 
Example 17
Source File: TelnetDubboProviderActor.scala    From asura   with MIT License 5 votes vote down vote up
package asura.dubbo.actor

import akka.actor.{ActorRef, Props, Status, Terminated}
import akka.util.ByteString
import asura.common.actor.{BaseActor, ErrorActorEvent, NotifyActorEvent, SenderMessage}
import asura.common.util.LogUtils

class TelnetDubboProviderActor(address: String, port: Int) extends BaseActor {

  override def receive: Receive = {
    case SenderMessage(sender) =>
      val providerActor = context.actorOf(TelnetClientActor.props(address, port, self))
      context.watch(providerActor)
      context.become(handleRequest(sender, providerActor))
  }

  def handleRequest(wsActor: ActorRef, providerActor: ActorRef): Receive = {
    case cmd: String =>
      if (cmd == TelnetDubboProviderActor.CMD_EXIT || cmd == TelnetDubboProviderActor.CMD_QUIT) {
        providerActor ! ByteString(TelnetClientActor.CMD_CLOSE)
        wsActor ! NotifyActorEvent(TelnetDubboProviderActor.MSG_BYE)
      } else {
        providerActor ! ByteString(cmd)
      }
    case data: ByteString =>
      wsActor ! NotifyActorEvent(data.utf8String)
    case Terminated(_) =>
      wsActor ! Status.Success
    case Status.Failure(t) =>
      val stackTrace = LogUtils.stackTraceToString(t)
      log.warning(stackTrace)
      wsActor ! ErrorActorEvent(t.getMessage)
      providerActor ! ByteString(TelnetClientActor.CMD_CLOSE)
      wsActor ! Status.Success
  }

  override def postStop(): Unit = {
    log.debug(s"${address}:${port} stopped")
  }
}

object TelnetDubboProviderActor {

  val CMD_QUIT = "quit"
  val CMD_EXIT = "exit"
  val MSG_BYE = "Bye!"

  def props(address: String, port: Int) = Props(new TelnetDubboProviderActor(address, port))
} 
Example 18
Source File: ContainerServiceSpec.scala    From service-container   with Apache License 2.0 5 votes vote down vote up
package com.github.vonnagy.service.container.service

import akka.actor.{ActorSystem, Terminated}
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.{HttpRequest, StatusCodes}
import com.github.vonnagy.service.container.{AkkaTestkitSpecs2Support, TestUtils}
import com.typesafe.config.ConfigFactory
import org.specs2.concurrent.ExecutionEnv
import org.specs2.matcher.FutureMatchers
import org.specs2.mutable.SpecificationLike

class ContainerServiceSpec extends AkkaTestkitSpecs2Support(ActorSystem("test", {
  val http = TestUtils.temporaryServerHostnameAndPort()

  ConfigFactory.parseString(
  s"""
      container.http.interface="${http._2}"
      container.http.port=${http._3}
    """)})) with SpecificationLike with FutureMatchers {

  sequential
  val cont = new ContainerService(Nil, Nil, name = "test")

  "The ContainerService" should {

    "create the appropriate parts during construction" in {
      cont.registeredHealthChecks must be equalTo Nil
      cont.registeredRoutes must be equalTo Nil
      cont.started must beFalse
    }

    "start properly and respond to a `/ping` request" in {
      cont.start()
      cont.started must beTrue

      val host = system.settings.config.getString("container.http.interface")
      val port = system.settings.config.getInt("container.http.port")

      val resp = Http().singleRequest(HttpRequest(uri = s"http://$host:$port/ping"))
      resp.value.get.get.status must eventually(be_==(StatusCodes.OK))

    }

    "shut down properly when asked" in {
      cont.shutdown
      implicit val ec = ExecutionEnv.fromExecutionContext(system.dispatcher)
      cont.system.whenTerminated must beAnInstanceOf[Terminated].await
    }
  }
} 
Example 19
Source File: SystemShutdownSpec.scala    From service-container   with Apache License 2.0 5 votes vote down vote up
package com.github.vonnagy.service.container.core

import akka.actor.{ActorSystem, Terminated}
import org.specs2.concurrent.ExecutionEnv
import org.specs2.mutable.Specification


class SystemShutdownSpec extends Specification {

  "SystemShutdown" should {

    "allow the ActorSystem to be shutdown" in {
      val sys = ActorSystem()
      val shut = new SystemShutdown {
        val system = sys
      }

      shut.shutdownActorSystem(false) {}
      implicit val ec = ExecutionEnv.fromExecutionContext(sys.dispatcher)
      shut.system.whenTerminated must beAnInstanceOf[Terminated].await

      sys.whenTerminated.isCompleted must beTrue
      success
    }
  }
} 
Example 20
Source File: TransformationFrontend.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package sample.cluster.transformation

import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicInteger

import akka.actor.{ Actor, ActorRef, ActorSystem, Props, Terminated }
import akka.pattern.ask
import akka.util.Timeout
import com.typesafe.config.ConfigFactory

import scala.concurrent.ExecutionContext.Implicits
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.language.postfixOps

//#frontend
class TransformationFrontend extends Actor {
  var backends = IndexedSeq.empty[ActorRef]
  var jobCounter = 0

  def receive = {
    case job: TransformationJob if backends.isEmpty =>
      sender() ! JobFailed("Service unavailable, try again later", job)

    case job: TransformationJob =>
      jobCounter += 1
      backends(jobCounter % backends.size) forward job

    case BackendRegistration if !backends.contains(sender()) =>
      context watch sender()
      backends = backends :+ sender()

    case Terminated(a) =>
      backends = backends.filterNot(_ == a)
  }
}
//#frontend

object TransformationFrontend {
  def main(args: Array[String]): Unit = {
    // Override the configuration of the port when specified as program argument
    val port = if (args.isEmpty) "0" else args(0)
    val config = ConfigFactory
      .parseString(s"""
        akka.remote.netty.tcp.port=$port
        akka.remote.artery.canonical.port=$port
        """)
      .withFallback(ConfigFactory.parseString("akka.cluster.roles = [frontend]"))
      .withFallback(ConfigFactory.load("simple-cluster"))

    val system = ActorSystem("ClusterSystem", config)
    val frontend =
      system.actorOf(Props[TransformationFrontend], name = "frontend")

    val counter = new AtomicInteger
    import system.dispatcher
    system.scheduler.schedule(2.seconds, 2.seconds) {
      implicit val timeout = Timeout(5 seconds)
      (frontend ? TransformationJob("hello-" + counter.incrementAndGet())) foreach {
        case result => println(result)
      }
    }
    Future {
      TimeUnit.SECONDS.sleep(80)
      system.terminate()
    }(Implicits.global)
  }
} 
Example 21
Source File: Sender.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package sample.remote.benchmark

import akka.actor.{ Actor, ActorIdentity, ActorRef, ActorSystem, Identify, Props, ReceiveTimeout, Terminated }
import com.typesafe.config.ConfigFactory

import scala.concurrent.duration._

class Sender(path: String, totalMessages: Int, burstSize: Int, payloadSize: Int) extends Actor {
  import Sender._
  val payload: Array[Byte] = Vector.fill(payloadSize)("a").mkString.getBytes
  println(s"payload bytes: ${payload.length}")
  var startTime = 0L
  var maxRoundTripMillis = 0L

  context.setReceiveTimeout(3.seconds) // 设置Actor自身接收消息超时时长
  sendIdentifyRequest() // 发请求确认远程actor的路径是否有效。

  override def receive: Receive = identifying

  def identifying: Receive = {
    case ActorIdentity(`path`, Some(actor)) =>
      context.watch(actor)
      context.become(active(actor))
      context.setReceiveTimeout(Duration.Undefined) // 重置超时时间
      self ! Warmup

    case ActorIdentity(`path`, None) =>
      println(s"远程actor无效:$path")

    case ReceiveTimeout =>
      sendIdentifyRequest() // 超时,再次确认远程actor是否有效
  }

  def active(actor: ActorRef): Receive = {
    case Warmup => // 热身,不计入统计
      sendBatch(actor, burstSize)
      actor ! Start

    case Start =>
      println(s"启动基准测试一共 $totalMessages 消息,分帧大小 $burstSize,有效负载 $payloadSize")
      startTime = System.nanoTime()
      val remaining = sendBatch(actor, totalMessages)
      if (remaining == 0)
        actor ! Done
      else
        actor ! Continue(remaining, startTime, startTime, burstSize)

    case c @ Continue(remaining, t0, t1, n) =>
      val now = System.nanoTime()
      val duration = (now - t0).nanos.toMillis // 从发出 Continue 指令到收到指令回复花费的时间
      val roundTripMillis = (now - t1).nanos.toMillis
      maxRoundTripMillis = math.max(maxRoundTripMillis, roundTripMillis)
      if (duration >= 500) { // 以500ms为间隔作统计
        val throughtput = (n * 1000.0 / duration).toInt
        println(s"花费 ${duration}ms 发送了 $n 条消息,吞吐量 ${throughtput}msg/s,")
      }

      val nextRemaining = sendBatch(actor, remaining)
      if (nextRemaining == 0)
        actor ! Done
      else if (duration >= 500) // 一个批次的数量已发完
        actor ! Continue(nextRemaining, now, now, burstSize)
      else // 间隔时间不足500ms,更新 剩余数量、(分帧)起始时间、分帧发送数量
        actor ! c.copy(remaining = nextRemaining, burstStartTime = now, n = n + burstSize)

    case Done =>
      val took = (System.nanoTime - startTime).nanos.toMillis
      val throughtput = (totalMessages * 1000.0 / took).toInt
      println(
        s"一共花费 ${took}ms 发送了 ${totalMessages}消息, 吞吐量 ${throughtput}msg/s, " +
        s"最大往返时间 ${maxRoundTripMillis}ms, 分帧数据大小 $burstSize, " +
        s"有效负载 $payloadSize")
      actor ! Shutdown

    case Terminated(`actor`) =>
      println("Receiver terminated")
      context.system.terminate()
  }

  
  case class Continue(remaining: Int, startTime: Long, burstStartTime: Long, n: Int) extends Echo

  def main(args: Array[String]): Unit = {
    val system = ActorSystem("Sys", ConfigFactory.load("calculator"))
    val remoteHostPort = if (args.nonEmpty) args(0) else "127.0.0.1:2553"
    val remotePath = s"akka.tcp://Sys@$remoteHostPort/user/rcv"
    val totalMessages = if (args.length >= 2) args(1).toInt else 500000
    val burstSize = if (args.length >= 3) args(2).toInt else 5000
    val payloadSize = if (args.length >= 4) args(3).toInt else 100

    system.actorOf(Sender.props(remotePath, totalMessages, burstSize, payloadSize), "snd")
  }

  def props(path: String, totalMessages: Int, burstSize: Int, payloadSize: Int) =
    Props(new Sender(path, totalMessages, burstSize, payloadSize))
} 
Example 22
Source File: ChatApp.scala    From gabbler   with Apache License 2.0 5 votes vote down vote up
package de.heikoseeberger.gabbler.chat

import akka.NotUsed
import akka.actor.{ Actor, ActorLogging, ActorSystem, Props, SupervisorStrategy, Terminated }
import akka.cluster.Cluster
import akka.cluster.singleton.{
  ClusterSingletonManager,
  ClusterSingletonManagerSettings,
  ClusterSingletonProxy,
  ClusterSingletonProxySettings
}
import akka.http.scaladsl.model.Uri
import scala.concurrent.Await
import scala.concurrent.duration.Duration

object ChatApp {

  private final class Root extends Actor with ActorLogging {

    override val supervisorStrategy = SupervisorStrategy.stoppingStrategy

    private val userRepository = {
      val userEvents =
        Uri(context.system.settings.config.getString("gabbler-chat.user-repository.user-events"))
      val userRepository =
        context.actorOf(
          ClusterSingletonManager.props(UserRepository(userEvents),
                                        NotUsed,
                                        ClusterSingletonManagerSettings(context.system)),
          UserRepository.Name
        )
      context.actorOf(
        ClusterSingletonProxy.props(userRepository.path.elements.mkString("/", "/", ""),
                                    ClusterSingletonProxySettings(context.system)),
        s"${UserRepository.Name}-proxy"
      )
    }

    context.watch(userRepository)
    log.info("gabbler-chat up and running")

    override def receive = {
      case Terminated(actor) =>
        log.error("Terminating the system because {} terminated!", actor.path)
        context.system.terminate()
    }
  }

  def main(args: Array[String]): Unit = {
    val system = ActorSystem("gabbler-chat")
    Cluster(system).registerOnMemberUp(system.actorOf(Props(new Root), "root"))
  }
} 
Example 23
Source File: UserApp.scala    From gabbler   with Apache License 2.0 5 votes vote down vote up
package de.heikoseeberger.gabbler.user

import akka.NotUsed
import akka.actor.{ Actor, ActorLogging, ActorSystem, Props, SupervisorStrategy, Terminated }
import akka.cluster.Cluster
import akka.cluster.singleton.{
  ClusterSingletonManager,
  ClusterSingletonManagerSettings,
  ClusterSingletonProxy,
  ClusterSingletonProxySettings
}
import akka.persistence.cassandra.query.scaladsl.CassandraReadJournal
import akka.persistence.query.PersistenceQuery

object UserApp {

  final class Root extends Actor with ActorLogging {

    override val supervisorStrategy = SupervisorStrategy.stoppingStrategy

    private val userRepository = {
      val readJournal =
        PersistenceQuery(context.system)
          .readJournalFor[CassandraReadJournal](CassandraReadJournal.Identifier)
      val userRepository =
        context.actorOf(
          ClusterSingletonManager.props(UserRepository(readJournal),
                                        NotUsed,
                                        ClusterSingletonManagerSettings(context.system)),
          UserRepository.Name
        )
      context.actorOf(
        ClusterSingletonProxy.props(userRepository.path.elements.mkString("/", "/", ""),
                                    ClusterSingletonProxySettings(context.system)),
        s"${UserRepository.Name}-proxy"
      )
    }

    private val userApi = {
      val config  = context.system.settings.config
      val address = config.getString("gabbler-user.user-api.address")
      val port    = config.getInt("gabbler-user.user-api.port")
      val timeout = config.getDuration("gabbler-user.user-api.user-repository-timeout").asScala
      context.actorOf(UserApi(address, port, userRepository, timeout), UserApi.Name)
    }

    context.watch(userRepository)
    context.watch(userApi)
    log.info("gabbler-user up and running")

    override def receive = {
      case Terminated(actor) =>
        log.error("Terminating the system because {} terminated!", actor.path)
        context.system.terminate()
    }
  }

  def main(args: Array[String]): Unit = {
    val system = ActorSystem("gabbler-user")
    Cluster(system).registerOnMemberUp(system.actorOf(Props(new Root), "root"))
  }
} 
Example 24
Source File: Constructr.scala    From constructr   with Apache License 2.0 5 votes vote down vote up
package de.heikoseeberger.constructr

import akka.actor.{ Actor, ActorLogging, ActorRef, Props, SupervisorStrategy, Terminated }
import akka.cluster.{ Cluster, Member }
import akka.cluster.ClusterEvent.{ InitialStateAsEvents, MemberExited, MemberLeft, MemberRemoved }
import akka.cluster.MemberStatus.Up
import de.heikoseeberger.constructr.coordination.Coordination
import scala.concurrent.duration.{ FiniteDuration, NANOSECONDS }

object Constructr {

  final val Name = "constructr"

  def props: Props =
    Props(new Constructr)
}

final class Constructr private extends Actor with ActorLogging {

  override val supervisorStrategy = SupervisorStrategy.stoppingStrategy

  private val cluster = Cluster(context.system)

  if (cluster.settings.SeedNodes.isEmpty) {
    log.info("Creating constructr-machine, because no seed-nodes defined")
    cluster.subscribe(self,
                      InitialStateAsEvents,
                      classOf[MemberLeft],
                      classOf[MemberExited],
                      classOf[MemberRemoved])
    context.become(active(context.watch(createConstructrMachine())))
  } else {
    log.info("Stopping self, because seed-nodes defined")
    context.stop(self)
  }

  override def receive = Actor.emptyBehavior

  private def active(machine: ActorRef): Receive = {
    case Terminated(`machine`) =>
      val selfAddress = cluster.selfAddress
      def isSelfAndUp(member: Member) =
        member.address == selfAddress && member.status == Up
      if (cluster.state.members.exists(isSelfAndUp)) {
        log.error("Leaving, because constructr-machine terminated!")
        cluster.leave(selfAddress)
      } else {
        log.error("Terminating system, because constructr-machine terminated!")
        context.system.terminate()
      }

    case MemberRemoved(member, _) if member.address == cluster.selfAddress =>
      log.error("Terminating system, because member has been removed!")
      context.system.terminate()
  }

  private def createConstructrMachine() = {
    val config = context.system.settings.config
    def getDuration(key: String) =
      FiniteDuration(config.getDuration(key).toNanos, NANOSECONDS)

    val coordinationTimeout   = getDuration("constructr.coordination-timeout")
    val nrOfRetries           = config.getInt("constructr.nr-of-retries")
    val retryDelay            = getDuration("constructr.retry-delay")
    val refreshInterval       = getDuration("constructr.refresh-interval")
    val ttlFactor             = config.getDouble("constructr.ttl-factor")
    val maxNrOfSeedNodes      = config.getInt("constructr.max-nr-of-seed-nodes")
    val joinTimeout           = getDuration("constructr.join-timeout")
    val abortOnJoinTimeout    = config.getBoolean("constructr.abort-on-join-timeout")
    val ignoreRefreshFailures = config.getBoolean("constructr.ignore-refresh-failures")

    context.actorOf(
      ConstructrMachine.props(
        cluster.selfAddress,
        Coordination(context.system.name, context.system),
        coordinationTimeout,
        nrOfRetries,
        retryDelay,
        refreshInterval,
        ttlFactor,
        if (maxNrOfSeedNodes <= 0) Int.MaxValue else maxNrOfSeedNodes,
        joinTimeout,
        abortOnJoinTimeout,
        ignoreRefreshFailures
      ),
      ConstructrMachine.Name
    )
  }
} 
Example 25
Source File: CreateZipcodes.scala    From apache-spark-test   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend

import java.nio.file.Paths
import java.nio.file.StandardOpenOption._

import akka.NotUsed
import akka.actor.{ ActorSystem, Terminated }
import akka.stream.scaladsl.{ FileIO, Source }
import akka.stream.{ ActorMaterializer, Materializer }
import akka.util.ByteString
import play.api.libs.json.Json

import scala.concurrent.{ ExecutionContext, Future }

object CreateZipcodes extends App {
  implicit val system: ActorSystem = ActorSystem()
  implicit val mat: Materializer = ActorMaterializer()
  implicit val ec: ExecutionContext = system.dispatcher

  sys.addShutdownHook {
    terminate
  }

  object Zipcode {
    implicit val format = Json.format[Zipcode]
  }
  final case class Zipcode(value: String)

  val numZips = 50000000

  def zips(range: Range): Source[ByteString, NotUsed] =
    Source(range).flatMapConcat { district =>
      Source('A' to 'Z').flatMapConcat { l1 =>
        Source('A' to 'Z').flatMapConcat { l2 =>
          Source(1 to 399).map(num => f"$district$l1$l2-$num%03d")
        }
      }
    }.map(Zipcode.apply).map(Json.toJson(_).toString).map(json => ByteString(json + "\n"))

  zips(1000 until 2000)
    .merge(zips(2000 until 3000))
    .merge(zips(3000 until 4000))
    .merge(zips(4000 until 5000))
    .merge(zips(5000 until 6000))
    .merge(zips(6000 until 7000))
    .merge(zips(7000 until 8000))
    .merge(zips(8000 until 9000))
    .take(numZips)
    .via(LogProgress.flow(each = 250000))
    .runWith(FileIO.toPath(Paths.get("/tmp/zips.json"), Set(WRITE, TRUNCATE_EXISTING, CREATE)))
    .flatMap { done =>
      println(done)
      terminate
    }.recoverWith {
      case cause: Throwable =>
        cause.printStackTrace()
        terminate
    }

  def terminate: Future[Terminated] =
    system.terminate()
} 
Example 26
Source File: CreatePosts.scala    From apache-spark-test   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend

import java.nio.file.Paths
import java.nio.file.StandardOpenOption._
import java.text.SimpleDateFormat
import java.util.Date

import akka.actor.{ ActorSystem, Terminated }
import akka.stream.scaladsl.{ FileIO, Source }
import akka.stream.{ ActorMaterializer, Materializer }
import akka.util.ByteString
import play.api.libs.json.Json

import scala.concurrent.{ ExecutionContext, Future }
import scala.util.Random

object CreatePosts extends App {
  implicit val system: ActorSystem = ActorSystem()
  implicit val mat: Materializer = ActorMaterializer()
  implicit val ec: ExecutionContext = system.dispatcher

  def terminate: Future[Terminated] =
    system.terminate()

  sys.addShutdownHook {
    terminate
  }

  object Post {
    implicit val format = Json.format[Post]
  }

  final case class Post(
    commentCount: Int,
    lastActivityDate: String,
    ownerUserId: Long,
    body: String,
    score: Int,
    creationDate: String,
    viewCount: Int,
    title: String,
    tags: String,
    answerCount: Int,
    acceptedAnswerId: Long,
    postTypeId: Long,
    id: Long
  )

  def rng = Random.nextInt(20000)

  def now: String = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSXXX").format(new Date())

  val lorem = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nam fringilla magna et pharetra vestibulum."
  val title = " Ut id placerat sapien. Aliquam vel metus orci."
  Source.fromIterator(() => Iterator from 0).map { id =>
    Post(rng, now, rng, List.fill(Random.nextInt(5))(lorem).mkString("\n"), rng, now, rng, s"$rng - $title", title, rng, rng, rng, id)
  }.map(Json.toJson(_).toString)
    .map(json => ByteString(json + "\n"))
    .take(1000000)
    .via(LogProgress.flow())
    .runWith(FileIO.toPath(Paths.get("/tmp/posts.json"), Set(WRITE, TRUNCATE_EXISTING, CREATE)))
    .flatMap { done =>
      println(done)
      terminate
    }.recoverWith {
      case cause: Throwable =>
        cause.printStackTrace()
        terminate
    }

} 
Example 27
Source File: Main.scala    From whirlwind-tour-akka-typed   with Apache License 2.0 5 votes vote down vote up
package de.heikoseeberger.wtat

import akka.actor.{ Actor, ActorLogging, ActorSystem, Props, Terminated }
import akka.actor.typed.SupervisorStrategy.restartWithBackoff
import akka.actor.typed.scaladsl.Actor.supervise
import akka.cluster.Cluster
import akka.cluster.bootstrap.ClusterBootstrap
import akka.cluster.http.management.ClusterHttpManagement
import akka.cluster.typed.{ ClusterSingleton, ClusterSingletonSettings }
import akka.persistence.cassandra.query.scaladsl.CassandraReadJournal
import akka.persistence.query.PersistenceQuery
import akka.stream.{ ActorMaterializer, Materializer }
import pureconfig.loadConfigOrThrow

object Main {
  import akka.actor.typed.scaladsl.adapter._

  final class Root(config: Config) extends Actor with ActorLogging {

    private implicit val mat: Materializer = ActorMaterializer()

    private val clusterSingletonSettings = ClusterSingletonSettings(context.system.toTyped)

    private val userRepository =
      ClusterSingleton(context.system.toTyped).spawn(UserRepository(),
                                                     UserRepository.Name,
                                                     akka.actor.typed.Props.empty,
                                                     clusterSingletonSettings,
                                                     UserRepository.Stop)

    private val userView = context.spawn(UserView(), UserView.Name)

    private val userProjection = {
      import config.userProjection._
      val readJournal =
        PersistenceQuery(context.system)
          .readJournalFor[CassandraReadJournal](CassandraReadJournal.Identifier)
      val userProjection =
        supervise(UserProjection(readJournal, userView, askTimeout))
          .onFailure[UserProjection.EventStreamCompleteException](
            restartWithBackoff(minBackoff, maxBackoff, 0)
          )
      ClusterSingleton(context.system.toTyped).spawn(userProjection,
                                                     UserProjection.Name,
                                                     akka.actor.typed.Props.empty,
                                                     clusterSingletonSettings,
                                                     UserProjection.Stop)
    }

    private val api = {
      import config.api._
      context.spawn(Api(address, port, userRepository, userView, askTimeout), Api.Name)
    }

    context.watch(userRepository)
    context.watch(userView)
    context.watch(userProjection)
    context.watch(api)
    log.info(s"${context.system.name} up and running")

    override def receive = {
      case Terminated(actor) =>
        log.error(s"Shutting down, because actor ${actor.path} terminated!")
        context.system.terminate()
    }
  }

  def main(args: Array[String]): Unit = {
    sys.props += "log4j2.contextSelector" -> "org.apache.logging.log4j.core.async.AsyncLoggerContextSelector"

    val config  = loadConfigOrThrow[Config]("wtat")
    val system  = ActorSystem("wtat")
    val cluster = Cluster(system)

    if (config.useClusterBootstrap) {
      ClusterHttpManagement(cluster).start()
      ClusterBootstrap(system).start()
    }

    cluster.registerOnMemberUp(system.actorOf(Props(new Root(config)), "root"))
  }
} 
Example 28
Source File: Main.scala    From perf_tester   with Apache License 2.0 5 votes vote down vote up
package akka

import akka.actor.ActorSystem
import akka.actor.ExtendedActorSystem
import akka.actor.Actor
import akka.actor.Terminated
import akka.actor.ActorLogging
import akka.actor.Props
import akka.actor.ActorRef
import scala.util.control.NonFatal


  def main(args: Array[String]): Unit = {
    if (args.length != 1) {
      println("you need to provide exactly one argument: the class of the application supervisor actor")
    } else {
      val system = ActorSystem("Main")
      try {
        val appClass = system.asInstanceOf[ExtendedActorSystem].dynamicAccess.getClassFor[Actor](args(0)).get
        val app = system.actorOf(Props(appClass), "app")
        val terminator = system.actorOf(Props(classOf[Terminator], app), "app-terminator")
      } catch {
        case NonFatal(e) ⇒ system.terminate(); throw e
      }
    }
  }

  class Terminator(app: ActorRef) extends Actor with ActorLogging {
    context watch app
    def receive = {
      case Terminated(_) ⇒
        log.info("application supervisor has terminated, shutting down")
        context.system.terminate()
    }
  }

} 
Example 29
Source File: AddressDirectoryActor.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.actors.address

import akka.actor.{Actor, ActorRef, Props, SupervisorStrategy, Terminated}
import com.wavesplatform.dex.db.OrderDB
import com.wavesplatform.dex.domain.account.Address
import com.wavesplatform.dex.domain.utils.{EitherExt2, ScorexLogging}
import com.wavesplatform.dex.history.HistoryRouter._
import com.wavesplatform.dex.model.Events
import com.wavesplatform.dex.model.Events.OrderCancelFailed

import scala.collection.mutable

class AddressDirectoryActor(orderDB: OrderDB, addressActorProps: (Address, Boolean) => Props, historyRouter: Option[ActorRef])
    extends Actor
    with ScorexLogging {

  import AddressDirectoryActor._
  import context._

  private var startSchedules: Boolean = false
  private[this] val children          = mutable.AnyRefMap.empty[Address, ActorRef]

  override def supervisorStrategy: SupervisorStrategy = SupervisorStrategy.stoppingStrategy

  private def createAddressActor(address: Address): ActorRef = {
    log.debug(s"Creating address actor for $address")
    watch(actorOf(addressActorProps(address, startSchedules), address.toString))
  }

  private def forward(address: Address, msg: Any): Unit = (children get address, msg) match {
    case (None, _: AddressActor.Message.BalanceChanged) =>
    case _                                              => children getOrElseUpdate (address, createAddressActor(address)) forward msg
  }

  override def receive: Receive = {
    case Envelope(address, cmd) => forward(address, cmd)

    case e @ Events.OrderAdded(lo, timestamp) =>
      forward(lo.order.sender, e)
      historyRouter foreach { _ ! SaveOrder(lo, timestamp) }

    case e: Events.OrderExecuted =>
      import e.{counter, submitted}
      forward(submitted.order.sender, e)
      if (counter.order.sender != submitted.order.sender) forward(counter.order.sender, e)
      historyRouter foreach { _ ! SaveEvent(e) }

    case e: Events.OrderCanceled =>
      forward(e.acceptedOrder.order.sender, e)
      historyRouter foreach { _ ! SaveEvent(e) }

    case e: OrderCancelFailed =>
      orderDB.get(e.id) match {
        case Some(order) => forward(order.sender.toAddress, e)
        case None        => log.warn(s"The order '${e.id}' not found")
      }

    case StartSchedules =>
      if (!startSchedules) {
        startSchedules = true
        context.children.foreach(_ ! StartSchedules)
      }

    case Terminated(child) =>
      val addressString = child.path.name
      val address       = Address.fromString(addressString).explicitGet()
      children.remove(address)
      log.warn(s"Address handler for $addressString terminated")
  }
}

object AddressDirectoryActor {
  case class Envelope(address: Address, cmd: AddressActor.Message)
  case object StartSchedules
} 
Example 30
Source File: Master.scala    From asyspark   with MIT License 5 votes vote down vote up
package org.apache.spark.asyspark.core

import java.util.concurrent.TimeUnit

import akka.actor.{Actor, ActorLogging, ActorRef, ActorSystem, Address, Props, Terminated}
import akka.util.Timeout
import com.typesafe.config.Config
import com.typesafe.scalalogging.slf4j.StrictLogging
import org.apache.spark.asyspark.core.messages.master.{ClientList, RegisterClient, RegisterServer, ServerList}

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}



  var clients = Set.empty[ActorRef]

  override def receive: Receive = {
    case RegisterServer(server) =>
      log.info(s"Registering server ${server.path.toString}")
      println("register server")
      servers += server
      context.watch(server)
      sender ! true

    case RegisterClient(client)  =>
      log.info(s"Registering client ${sender.path.toString}")
      clients += client
      context.watch(client)
      sender ! true

    case ServerList() =>
      log.info(s"Sending current server list to ${sender.path.toString}")
      sender ! servers.toArray

    case ClientList() =>
      log.info(s"Sending current client list to ${sender.path.toString}")
      sender ! clients.toArray


    case Terminated(actor) =>
      actor match {
        case server: ActorRef if servers contains server =>
          log.info(s"Removing server ${server.path.toString}")
          servers -= server
        case client: ActorRef if clients contains client =>
          log.info(s"Removing client ${client.path.toString}")
          clients -= client
        case actor: ActorRef =>
          log.warning(s"Actor ${actor.path.toString} will be terminated for some unknown reason")
      }
  }

}

object Master extends StrictLogging {
  def run(config: Config): Future[(ActorSystem, ActorRef)] = {
    logger.debug("Starting master actor system")
    val system = ActorSystem(config.getString("asyspark.master.system"), config.getConfig("asyspark.master"))
    logger.debug("Starting master")
    val master = system.actorOf(Props[Master], config.getString("asyspark.master.name"))
    implicit val timeout = Timeout(config.getDuration("asyspark.master.startup-timeout", TimeUnit.MILLISECONDS) milliseconds)
    implicit val ec = ExecutionContext.Implicits.global
    val address = Address("akka.tcp", config.getString("asyspark.master.system"), config.getString("asyspark.master.host"),
    config.getString("asyspark.master.port").toInt)
    system.actorSelection(master.path.toSerializationFormat).resolveOne().map {
      case actor: ActorRef =>
        logger.debug("Master successfully started")
        (system, master)

    }
  }

} 
Example 31
Source File: Replica.scala    From Principles-of-Reactive-Programming   with GNU General Public License v3.0 5 votes vote down vote up
package kvstore

import akka.actor.{ OneForOneStrategy, Props, ActorRef, Actor }
import kvstore.Arbiter._
import scala.collection.immutable.Queue
import akka.actor.SupervisorStrategy.Restart
import scala.annotation.tailrec
import akka.pattern.{ ask, pipe }
import akka.actor.Terminated
import scala.concurrent.duration._
import akka.actor.PoisonPill
import akka.actor.OneForOneStrategy
import akka.actor.SupervisorStrategy
import akka.util.Timeout

object Replica {
  sealed trait Operation {
    def key: String
    def id: Long
  }
  case class Insert(key: String, value: String, id: Long) extends Operation
  case class Remove(key: String, id: Long) extends Operation
  case class Get(key: String, id: Long) extends Operation

  sealed trait OperationReply
  case class OperationAck(id: Long) extends OperationReply
  case class OperationFailed(id: Long) extends OperationReply
  case class GetResult(key: String, valueOption: Option[String], id: Long) extends OperationReply

  def props(arbiter: ActorRef, persistenceProps: Props): Props = Props(new Replica(arbiter, persistenceProps))
}

class Replica(val arbiter: ActorRef, persistenceProps: Props) extends Actor {
  import Replica._
  import Replicator._
  import Persistence._
  import context.dispatcher

  
  val replica: Receive = {
    case _ =>
  }

} 
Example 32
Source File: GlobalPerformer.scala    From incubator-retired-iota   with Apache License 2.0 5 votes vote down vote up
package org.apache.iota.fey

import akka.actor.SupervisorStrategy.Restart
import akka.actor.{Actor, ActorLogging, ActorRef, OneForOneStrategy, Props, Terminated}
import akka.routing._
import play.api.libs.json.JsObject

import scala.collection.mutable.HashMap
import scala.concurrent.duration._

protected class GlobalPerformer(val orchestrationID: String,
                                val orchestrationName: String,
                                val globalPerformers: List[JsObject],
                                val ensemblesSpec :  List[JsObject]) extends Actor with ActorLogging{

  val monitoring_actor = FEY_MONITOR.actorRef
  var global_metadata: Map[String, Performer] = Map.empty[String, Performer]

  override def receive: Receive = {

    case GlobalPerformer.PRINT_GLOBAL =>
      context.actorSelection(s"*") ! FeyGenericActor.PRINT_PATH

    case Terminated(actor) =>
      monitoring_actor  ! Monitor.TERMINATE(actor.path.toString, Utils.getTimestamp)
      log.error(s"DEAD Global Performers ${actor.path.name}")
      context.children.foreach{ child =>
        context.unwatch(child)
        context.stop(child)
      }
      throw new RestartGlobalPerformers(s"DEAD Global Performer ${actor.path.name}")

    case GetRoutees => //Discard

    case x => log.warning(s"Message $x not treated by Global Performers")
  }

  
  private def loadClazzFromJar(classPath: String, jarLocation: String, jarName: String):Class[FeyGenericActor] = {
    try {
      Utils.loadActorClassFromJar(jarLocation,classPath,jarName)
    }catch {
      case e: Exception =>
        log.error(e,s"Could not load class $classPath from jar $jarLocation. Please, check the Jar repository path as well the jar name")
        throw e
    }
  }

}

object GlobalPerformer{

  val activeGlobalPerformers:HashMap[String, Map[String, ActorRef]] = HashMap.empty[String, Map[String, ActorRef]]

  case object PRINT_GLOBAL
} 
Example 33
Source File: TestHttpProxy.scala    From akka-stream-sqs   with Apache License 2.0 5 votes vote down vote up
package me.snov.akka.sqs

import akka.actor.{ActorSystem, Terminated}
import akka.http.scaladsl.Http
import akka.http.scaladsl.server.{RequestContext, Route}
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Sink, Source}

import scala.concurrent.{Await, Future}
import scala.concurrent.duration._

class TestHttpProxy(
                     interface: String = "localhost",
                     port: Int,
                     remoteHost: String = "localhost",
                     remotePort: Int = 9324
                   ) {

  implicit var system: ActorSystem = createActorSystem()

  private def createActorSystem() = ActorSystem("test-http-server")

  def start(): Unit = {
    implicit val materializer: ActorMaterializer = ActorMaterializer()
    implicit val executionContext = system.dispatcher

    val proxy = Route { context: RequestContext =>
      context.log.debug("Opening connection to %s:%d".format(remoteHost, remotePort))
      Source.single(context.request)
        .via(Http(system).outgoingConnection(remoteHost, remotePort))
        .runWith(Sink.head)
        .flatMap(context.complete(_))
    }

    Http().bindAndHandle(handler = proxy, interface = interface, port = port)
  }

  def stop(): Unit = {
    Await.ready(system.terminate(), 1.second)
  }

  def asyncStartAfter(d: FiniteDuration) = {
    system = createActorSystem()
    system.scheduler.scheduleOnce(d, new Runnable {
      override def run(): Unit = start()
    })(system.dispatcher)
  }
} 
Example 34
Source File: SharedFlowManager.scala    From trucking-iot   with Apache License 2.0 5 votes vote down vote up
package com.orendainx.trucking.simulator.flows

import akka.actor.{ActorRef, PoisonPill, Props, Terminated}
import com.orendainx.trucking.simulator.flows.FlowManager.ShutdownFlow
import com.orendainx.trucking.simulator.transmitters.DataTransmitter.Transmit


object SharedFlowManager {
  def props(transmitter: ActorRef) =
    Props(new SharedFlowManager(transmitter))
}
class SharedFlowManager(transmitter: ActorRef) extends FlowManager {

  def receive = {
    case msg: Transmit => transmitter ! msg

    case ShutdownFlow =>
      transmitter ! PoisonPill
      context watch transmitter

    case Terminated(`transmitter`) =>
      context stop self
  }
} 
Example 35
Source File: TruckAndTrafficFlowManager.scala    From trucking-iot   with Apache License 2.0 5 votes vote down vote up
package com.orendainx.trucking.simulator.flows

import akka.actor.{ActorRef, PoisonPill, Props, Terminated}
import com.orendainx.trucking.commons.models.{TrafficData, TruckData}
import com.orendainx.trucking.simulator.flows.FlowManager.ShutdownFlow
import com.orendainx.trucking.simulator.transmitters.DataTransmitter.Transmit


object TruckAndTrafficFlowManager {

  def props(truckTransmitter: ActorRef, trafficTransmitter: ActorRef) =
    Props(new TruckAndTrafficFlowManager(truckTransmitter, trafficTransmitter))
}

class TruckAndTrafficFlowManager(truckTransmitter: ActorRef, trafficTransmitter: ActorRef) extends FlowManager {

  var transmittersTerminated = 0

  def receive = {
    case Transmit(data: TruckData) => truckTransmitter ! Transmit(data)
    case Transmit(data: TrafficData) => trafficTransmitter ! Transmit(data)

    case ShutdownFlow =>
      truckTransmitter ! PoisonPill
      trafficTransmitter ! PoisonPill
      context watch truckTransmitter
      context watch trafficTransmitter

    case Terminated(_) =>
      transmittersTerminated += 1
      if (transmittersTerminated == 2) context stop self
  }
} 
Example 36
Source File: AutomaticCoordinator.scala    From trucking-iot   with Apache License 2.0 5 votes vote down vote up
package com.orendainx.trucking.simulator.coordinators

import akka.actor.{ActorLogging, ActorRef, PoisonPill, Props, Terminated}
import com.orendainx.trucking.simulator.coordinators.AutomaticCoordinator.TickGenerator
import com.orendainx.trucking.simulator.coordinators.GeneratorCoordinator.AcknowledgeTick
import com.orendainx.trucking.simulator.flows.FlowManager
import com.orendainx.trucking.simulator.generators.DataGenerator
import com.typesafe.config.Config

import scala.collection.mutable
import scala.concurrent.duration._
import scala.util.Random


  def props(eventCount: Int, generators: Seq[ActorRef], flowManager: ActorRef)(implicit config: Config) =
    Props(new AutomaticCoordinator(eventCount, generators, flowManager))
}

class AutomaticCoordinator(eventCount: Int, generators: Seq[ActorRef], flowManager: ActorRef)(implicit config: Config) extends GeneratorCoordinator with ActorLogging {

  // For receive messages and an execution context
  import context.dispatcher

  // Event delay settings, and initialize a counter for each data generator
  val eventDelay = config.getInt("generator.event-delay")
  val eventDelayJitter = config.getInt("generator.event-delay-jitter")
  val generateCounters = mutable.Map(generators.map((_, 0)): _*)

  // Insert each new generator into the simulation (at a random scheduled point) and begin "ticking"
  generators.foreach { generator =>
    context.system.scheduler.scheduleOnce(Random.nextInt(eventDelay + eventDelayJitter).milliseconds, self, TickGenerator(generator))
  }

  def receive = {
    case AcknowledgeTick(generator) =>
      self ! TickGenerator(generator) // Each ack triggers another tick

    case TickGenerator(generator) =>
      generateCounters.update(generator, generateCounters(generator)+1)

      if (generateCounters(generator) <= eventCount) {
        context.system.scheduler.scheduleOnce((eventDelay + Random.nextInt(eventDelayJitter)).milliseconds, generator, DataGenerator.GenerateData)
      } else {
        // Kill the individual generator, since we are done with it.
        generator ! PoisonPill

        // If all other generators have met their count, tell flow manager to shutdown
        if (!generateCounters.values.exists(_ <= eventCount)) {
          flowManager ! FlowManager.ShutdownFlow
          context watch flowManager
        }
      }

    // Once the flow manager and its transmitters terminate, shut it all down
    case Terminated(`flowManager`) =>
      context.system.terminate()
  }
} 
Example 37
Source File: StorageNodeActor.scala    From JustinDB   with Apache License 2.0 5 votes vote down vote up
package justin.db.actors

import akka.actor.{Actor, ActorRef, Props, RootActorPath, Terminated}
import akka.cluster.ClusterEvent.{CurrentClusterState, MemberUp}
import akka.cluster.{Cluster, Member, MemberStatus}
import com.typesafe.scalalogging.StrictLogging
import justin.db.actors.protocol.{RegisterNode, _}
import justin.db.cluster.ClusterMembers
import justin.db.cluster.datacenter.Datacenter
import justin.db.consistenthashing.{NodeId, Ring}
import justin.db.replica._
import justin.db.replica.read.{ReplicaLocalReader, ReplicaReadCoordinator, ReplicaRemoteReader}
import justin.db.replica.write.{ReplicaLocalWriter, ReplicaRemoteWriter, ReplicaWriteCoordinator}
import justin.db.storage.PluggableStorageProtocol

import scala.concurrent.ExecutionContext

class StorageNodeActor(nodeId: NodeId, datacenter: Datacenter, storage: PluggableStorageProtocol, ring: Ring, n: N) extends Actor with StrictLogging {

  private[this] implicit val ec: ExecutionContext = context.dispatcher
  private[this] val cluster = Cluster(context.system)

  private[this] var clusterMembers   = ClusterMembers.empty
  private[this] val readCoordinator  = new ReplicaReadCoordinator(nodeId, ring, n, new ReplicaLocalReader(storage), new ReplicaRemoteReader)
  private[this] val writeCoordinator = new ReplicaWriteCoordinator(nodeId, ring, n, new ReplicaLocalWriter(storage), new ReplicaRemoteWriter)

  private[this] val coordinatorRouter = context.actorOf(
    props = RoundRobinCoordinatorRouter.props(readCoordinator, writeCoordinator),
    name  = RoundRobinCoordinatorRouter.routerName
  )

  private[this] val name = self.path.name

  override def preStart(): Unit = cluster.subscribe(this.self, classOf[MemberUp])
  override def postStop(): Unit = cluster.unsubscribe(this.self)

  def receive: Receive = {
    receiveDataPF orElse receiveClusterDataPF orElse receiveRegisterNodePR orElse notHandledPF
  }

  private[this] def receiveDataPF: Receive = {
    case readReq: StorageNodeReadRequest              =>
      coordinatorRouter ! ReadData(sender(), clusterMembers, readReq)
    case writeLocalDataReq: StorageNodeWriteDataLocal =>
      coordinatorRouter ! WriteData(sender(), clusterMembers, writeLocalDataReq)
    case writeClientReplicaReq: Internal.WriteReplica =>
      coordinatorRouter ! WriteData(sender(), clusterMembers, writeClientReplicaReq)
  }

  private[this] def receiveClusterDataPF: Receive = {
    case "members"                  => sender() ! clusterMembers
    case MemberUp(member)           => register(nodeId, ring, member)
    case state: CurrentClusterState => state.members.filter(_.status == MemberStatus.Up).foreach(member => register(nodeId, ring, member))
    case Terminated(actorRef)       => clusterMembers = clusterMembers.removeByRef(StorageNodeActorRef(actorRef))
  }

  private[this] def receiveRegisterNodePR: Receive = {
    case RegisterNode(senderNodeId) if clusterMembers.notContains(senderNodeId) =>
      val senderRef = sender()
      context.watch(senderRef)
      clusterMembers = clusterMembers.add(senderNodeId, StorageNodeActorRef(senderRef))
      senderRef ! RegisterNode(nodeId)
      logger.info(s"Actor[$name]: Successfully registered node [id-${senderNodeId.id}]")
    case RegisterNode(senderNodeId) =>
      logger.info(s"Actor[$name]: Node [id-${senderNodeId.id}] is already registered")
  }

  private[this] def register(nodeId: NodeId, ring: Ring, member: Member) = {
    (member.hasRole(StorageNodeActor.role), datacenter.name == member.dataCenter) match {
      case (true, true) => register()
      case (_,   false) => logger.info(s"Actor[$name]: $member doesn't belong to datacenter [${datacenter.name}]")
      case (false,   _) => logger.info(s"Actor[$name]: $member doesn't have [${StorageNodeActor.role}] role (it has roles ${member.roles}")
    }

    def register() = for {
      ringNodeId    <- ring.nodesId
      nodeName       = StorageNodeActor.name(ringNodeId, Datacenter(member.dataCenter))
      nodeRef        = context.actorSelection(RootActorPath(member.address) / "user" / nodeName)
    } yield nodeRef ! RegisterNode(nodeId)
  }

  private[this] def notHandledPF: Receive = {
    case t => logger.warn(s"Actor[$name]: Not handled message [$t]")
  }
}

object StorageNodeActor {
  def role: String = "storagenode"
  def name(nodeId: NodeId, datacenter: Datacenter): String = s"${datacenter.name}-id-${nodeId.id}"
  def props(nodeId: NodeId, datacenter: Datacenter, storage: PluggableStorageProtocol, ring: Ring, n: N): Props = {
    Props(new StorageNodeActor(nodeId, datacenter, storage, ring, n))
  }
}

case class StorageNodeActorRef(ref: ActorRef) extends AnyVal 
Example 38
Source File: CodebaseAnalyzeAggregatorActor.scala    From CodeAnalyzerTutorial   with Apache License 2.0 5 votes vote down vote up
package tutor

import java.util.Date

import akka.actor.{Actor, ActorLogging, ActorRef, Cancellable, Props, Terminated}
import akka.routing.{ActorRefRoutee, RoundRobinRoutingLogic, Router}
import tutor.CodebaseAnalyzeAggregatorActor.{AnalyzeDirectory, Complete, Report, Timeout}
import tutor.SourceCodeAnalyzerActor.NewFile
import tutor.utils.BenchmarkUtil

import scala.concurrent.duration._
import scala.util.{Failure, Success, Try}

object CodebaseAnalyzeAggregatorActor {
  def props(): Props = Props(new CodebaseAnalyzeAggregatorActor)

  final case class AnalyzeDirectory(path: String)

  final case class Complete(result: Try[SourceCodeInfo])

  final case object Timeout

  final case class Report(codebaseInfo: CodebaseInfo)

}

class CodebaseAnalyzeAggregatorActor extends Actor with ActorLogging with DirectoryScanner with ReportFormatter {
  var controller: ActorRef = _
  var currentPath: String = _
  var beginTime: Date = _
  var fileCount = 0
  var completeCount = 0
  var failCount = 0
  var result: CodebaseInfo = CodebaseInfo.empty
  var timeoutTimer: Cancellable = _

  var router: Router = {
    val routees = Vector.fill(8) {
      val r = context.actorOf(SourceCodeAnalyzerActor.props())
      context watch r
      ActorRefRoutee(r)
    }
    Router(RoundRobinRoutingLogic(), routees)
  }

  override def receive: Receive = {
    case AnalyzeDirectory(path) => {
      controller = sender()
      currentPath = path
      beginTime = BenchmarkUtil.recordStart(s"analyze folder $currentPath")
      foreachFile(path, PresetFilters.knownFileTypes, PresetFilters.ignoreFolders) { file =>
        fileCount += 1
        router.route(NewFile(file.getAbsolutePath), context.self)
      }
      import context.dispatcher
      timeoutTimer = context.system.scheduler.scheduleOnce((fileCount / 1000).seconds, context.self, Timeout)
    }
    case Complete(Success(sourceCodeInfo: SourceCodeInfo)) => {
      completeCount += 1
      result = result + sourceCodeInfo
      finishIfAllComplete()
    }
    case Complete(Failure(exception)) => {
      completeCount += 1
      failCount += 1
      log.warning("processing file failed {}", exception)
      finishIfAllComplete()
    }
    case Timeout => {
      println(s"${result.totalFileNums} of $fileCount files processed before timeout")
      controller ! Report(result)
      BenchmarkUtil.recordElapse(s"analyze folder $currentPath", beginTime)
    }
    case Terminated(a) =>
      router = router.removeRoutee(a)
      val r = context.actorOf(Props[SourceCodeAnalyzerActor])
      context watch r
      router = router.addRoutee(r)
    case x@_ => log.error(s"receive unknown message $x")
  }

  def finishIfAllComplete(): Unit = {
    if (completeCount == fileCount) {
      timeoutTimer.cancel()
      controller ! Report(result)
      BenchmarkUtil.recordElapse(s"analyze folder $currentPath", beginTime)
      context.stop(self)
    }
  }
} 
Example 39
Source File: LocalTransformServiceActor.scala    From mleap   with Apache License 2.0 5 votes vote down vote up
package ml.combust.mleap.executor.service

import akka.actor.{Actor, ActorRef, Props, Status, Terminated}
import akka.stream.{ActorMaterializer, Materializer}
import ml.combust.mleap.executor.repository.RepositoryBundleLoader
import ml.combust.mleap.executor._
import ml.combust.mleap.executor.error.NotFoundException

import scala.util.{Failure, Success, Try}

object LocalTransformServiceActor {
  def props(loader: RepositoryBundleLoader,
            config: ExecutorConfig): Props = {
    Props(new LocalTransformServiceActor(loader, config))
  }

  object Messages {
    case object Close
  }
}

class LocalTransformServiceActor(loader: RepositoryBundleLoader,
                                 config: ExecutorConfig) extends Actor {
  import LocalTransformServiceActor.Messages

  private implicit val materializer: Materializer = ActorMaterializer()(context.system)

  private var lookup: Map[String, ActorRef] = Map()
  private var modelNameLookup: Map[ActorRef, String] = Map()

  override def postStop(): Unit = {
    for (child <- context.children) {
      context.unwatch(child)
      context.stop(child)
    }
  }

  override def receive: Receive = {
    case request: TransformFrameRequest => handleModelRequest(request)
    case request: GetBundleMetaRequest => handleModelRequest(request)
    case request: GetModelRequest => handleModelRequest(request)
    case request: CreateFrameStreamRequest => handleModelRequest(request)
    case request: CreateRowStreamRequest => handleModelRequest(request)
    case request: GetRowStreamRequest => handleModelRequest(request)
    case request: CreateFrameFlowRequest => handleModelRequest(request)
    case request: GetFrameStreamRequest => handleModelRequest(request)
    case request: CreateRowFlowRequest => handleModelRequest(request)
    case request: UnloadModelRequest => handleModelRequest(request)
    case request: LoadModelRequest => loadModel(request)
    case Messages.Close => context.stop(self)

    case Terminated(actor) => terminated(actor)
  }

  def handleModelRequest(request: ModelRequest): Unit = {
    lookup.get(request.modelName) match {
      case Some(actor) => actor.tell(request, sender)
      case None => sender ! Status.Failure(new NotFoundException(s"no model with name ${request.modelName}"))
    }
  }

  def loadModel(request: LoadModelRequest): Unit = {
    Try(context.actorOf(BundleActor.props(request, loader, config), request.modelName)) match {
      case Success(actor) =>
        lookup += (request.modelName -> actor)
        modelNameLookup += (actor -> request.modelName)
        context.watch(actor)
        actor.tell(request, sender)
      case Failure(err) => sender ! Status.Failure(err)
    }
  }

  private def terminated(ref: ActorRef): Unit = {
    val uri = modelNameLookup(ref)
    modelNameLookup -= ref
    lookup -= uri
  }
} 
Example 40
Source File: MetricsRouter.scala    From akka-visualmailbox   with Apache License 2.0 5 votes vote down vote up
package de.aktey.akka.visualmailbox

import akka.actor.{Actor, ActorLogging, ActorRef, Props, Terminated}

class MetricsRouter extends Actor with ActorLogging {

  import context._

  var routees: Set[ActorRef] = Set.empty

  override def postStop() {
    routees foreach unwatch
  }

  def receive = {
    case registrar: ActorRef =>
      watch(registrar)
      routees += registrar
      if (log.isDebugEnabled) log.debug(s"""{"type":"registerd","registered":"$registrar","routees":${routees.size}}""")
    case Terminated(ref) =>
      unwatch(ref)
      routees -= ref
      if (log.isDebugEnabled) log.debug(s"""{"type":"unregistered","terminated":"$ref","routees":${routees.size}}""")
    case msg =>
      routees foreach (_ forward msg)
  }
}

object MetricsRouter {
  def props() = Props(new MetricsRouter)
} 
Example 41
Source File: WatchDistributedCompletionActor.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.actors

import akka.actor.{Actor, ActorRef, Cancellable, Props, Terminated}
import com.wavesplatform.dex.domain.utils.ScorexLogging

import scala.concurrent.duration.FiniteDuration

class WatchDistributedCompletionActor(workers: Set[ActorRef],
                                      completionReceiver: ActorRef,
                                      startWorkCommand: Any,
                                      workCompleted: Any,
                                      timeout: FiniteDuration)
    extends Actor
    with ScorexLogging {

  import context.dispatcher

  if (workers.isEmpty) stop(Cancellable.alreadyCancelled)
  else
    workers.foreach { x =>
      context.watch(x)
      x ! startWorkCommand
    }

  override def receive: Receive = state(workers, context.system.scheduler.scheduleOnce(timeout, self, TimedOut))

  private def state(rest: Set[ActorRef], timer: Cancellable): Receive = {
    case `workCompleted` =>
      switchTo(rest - sender(), timer)
      context.unwatch(sender())

    case Terminated(ref) =>
      switchTo(rest - ref, timer)

    case TimedOut =>
      val workerPairs = workers.iterator.map(_.path.name).mkString(", ")
      log.error(s"$startWorkCommand is timed out! Workers those didn't respond: $workerPairs")
      stop(timer)
  }

  private def switchTo(updatedRest: Set[ActorRef], timer: Cancellable): Unit =
    if (updatedRest.isEmpty) stop(timer) else context.become(state(updatedRest, timer))

  private def stop(timer: Cancellable): Unit = {
    timer.cancel()
    completionReceiver ! workCompleted
    context.stop(self)
  }
}

object WatchDistributedCompletionActor {
  def props(workers: Set[ActorRef], completionReceiver: ActorRef, startWorkCommand: Any, workCompleted: Any, timeout: FiniteDuration): Props =
    Props(new WatchDistributedCompletionActor(workers, completionReceiver, startWorkCommand, workCompleted, timeout))
} 
Example 42
Source File: TestProbeUtil.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump

import scala.language.implicitConversions

import akka.actor.{Actor, Props, Terminated}
import akka.testkit.TestProbe

object TestProbeUtil {
  implicit def toProps(probe: TestProbe): Props = {
    Props(new Actor {
      val probeRef = probe.ref
      context.watch(probeRef)
      def receive: Receive = {
        case Terminated(probeRef) => context.stop(self)
        case x => probeRef.forward(x)
      }
    })
  }
} 
Example 43
Source File: JobReceptionist.scala    From 006877   with MIT License 5 votes vote down vote up
package aia.cluster
package words

import java.net.URLEncoder

import akka.actor._
import akka.actor.Terminated


object JobReceptionist {
  def props = Props(new JobReceptionist)
  def name = "receptionist"

  case class JobRequest(name: String, text: List[String])

  sealed trait Response
  case class JobSuccess(name: String, map: Map[String, Int]) extends Response
  case class JobFailure(name: String) extends Response

  case class WordCount(name: String, map: Map[String, Int])

  case class Job(name: String, text: List[String], respondTo: ActorRef, jobMaster: ActorRef)
}

class JobReceptionist extends Actor
                         with ActorLogging
                         with CreateMaster {
  import JobReceptionist._
  import JobMaster.StartJob
  import context._

  override def supervisorStrategy: SupervisorStrategy =
    SupervisorStrategy.stoppingStrategy

  var jobs = Set[Job]()
  var retries = Map[String, Int]()
  val maxRetries = 3


  def receive = {
    case JobRequest(name, text) =>
      log.info(s"Received job $name")

      val masterName = "master-"+URLEncoder.encode(name, "UTF8")
      val jobMaster = createMaster(masterName)

      val job = Job(name, text, sender, jobMaster)
      jobs = jobs + job

      jobMaster ! StartJob(name, text)
      watch(jobMaster)

    case WordCount(jobName, map) =>
      log.info(s"Job $jobName complete.")
      log.info(s"result:${map}")
      jobs.find(_.name == jobName).foreach { job =>
        job.respondTo ! JobSuccess(jobName, map)
        stop(job.jobMaster)
        jobs = jobs - job
      }

    case Terminated(jobMaster) =>
      jobs.find(_.jobMaster == jobMaster).foreach { failedJob =>
        log.error(s"Job Master $jobMaster terminated before finishing job.")

        val name = failedJob.name
        log.error(s"Job ${name} failed.")
        val nrOfRetries = retries.getOrElse(name, 0)

        if(maxRetries > nrOfRetries) {
          if(nrOfRetries == maxRetries -1) {
            // Simulating that the Job worker will work just before max retries
            val text = failedJob.text.filterNot(_.contains("FAIL"))
            self.tell(JobRequest(name, text), failedJob.respondTo)
          } else self.tell(JobRequest(name, failedJob.text), failedJob.respondTo)

          retries = retries + retries.get(name).map(r=> name -> (r + 1)).getOrElse(name -> 1)
        }
      }
  }
}

trait CreateMaster {
  def context: ActorContext
  def createMaster(name: String) = context.actorOf(JobMaster.props, name)
} 
Example 44
Source File: OrderedKiller.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter10

import akka.actor.{Actor, ActorLogging, ActorRef, Terminated}
import akka.pattern._

import scala.concurrent.duration._
import scala.concurrent.Future

object OrderedKiller {
  case object AllChildrenStopped
  case class GetChildren(parentActor: ActorRef)
  case class Children(children: Iterable[ActorRef])
}

abstract class OrderedKiller extends Actor with ActorLogging {
  import OrderedKiller._
  import context._

  def killChildrenOrderly(orderedChildren: List[ActorRef]): Future[Any] = {
    orderedChildren.foldLeft(Future(AllChildrenStopped))(
      (p, child) => p.flatMap(_ => gracefulStop(child, 2 seconds).map(_ => AllChildrenStopped))
    )
  }

  def noChildrenRegistered: Receive = {
    case GetChildren(parentActor) =>
      watch(parentActor)
      parentActor ! Children(children)
      become(childrenRegistered(parentActor))
  }

  def childrenRegistered(to: ActorRef): Receive = {
    case GetChildren(parentActor) if sender == to =>
      parentActor ! Children(children)
    case Terminated(`to`) =>
      killChildrenOrderly(orderChildren(children)) pipeTo self
    case AllChildrenStopped =>
      stop(self)
  }

  def orderChildren(unorderedChildren: Iterable[ActorRef]) : List[ActorRef]

  def receive = noChildrenRegistered
} 
Example 45
Source File: Reaper.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter10

import akka.actor.{Actor, ActorLogging, ActorRef, Terminated}

import scala.collection.mutable

object Reaper {
  case class WatchMe(ref: ActorRef)
}

class Reaper extends Actor with ActorLogging {
  import Reaper._

  val watched = mutable.Set.empty[ActorRef]

  def allActorsTerminated() = {
    log.info("All actors terminated. Proceeding to shutdown system.")
    context.system.terminate()
  }

  def receive = {
    case WatchMe(ref) =>
      log.info(s"Registering ${ref.path.name}.")
      context.watch(ref)
      watched += ref
    case Terminated(ref) =>
      log.info(s"Terminated ${ref.path.name}")
      watched -= ref
      if (watched.isEmpty) allActorsTerminated()
  }

  override def preStart() = log.info(s"${self.path.name} is running")
} 
Example 46
Source File: MasterWorkPulling.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter10

import akka.actor.{Actor, ActorLogging, ActorRef, Terminated}
import com.packt.chapter10.WorkerWorkPulling._

import scala.collection.mutable

object MasterWorkPulling {
  case object JoinWorker
  case object DeregisterWorker
}

class MasterWorkPulling(maxQueueSize: Int) extends Actor with ActorLogging {
  import MasterWorkPulling._

  val workers = mutable.Map.empty[ActorRef, WorkerState]
  val pendingWork = mutable.Queue.empty[Work]

  def receive = {
    case JoinWorker =>
      workers += sender -> Idle
      context.watch(sender)
      log.info(s"New worker registered [$sender]")
    case Terminated(actorRef) =>
      workers -= actorRef
      log.info(s"Worker terminated [$actorRef]")
    case DeregisterWorker =>
      workers -= sender
      log.info(s"Worker deregistered [$sender]")
    case PullWork if pendingWork.nonEmpty =>
      log.info(s"Idle worker asking for work. Setting [$sender] to [Working] state")
      sender ! pendingWork.dequeue
      workers += sender -> Working
    case PullWork =>
      log.info(s"Idle worker asking for work but no work available. Setting [$sender] to [Idle] state")
      workers += sender -> Idle
    case work : Work if pendingWork.size > maxQueueSize =>
      log.info(s"Work received but max pending work tasks reached. Rejecting [$work]")
      sender ! RejectWork(work)
    case work : Work =>
      pendingWork.enqueue(work)
      workers.find(_._2 == Idle) match {
        case Some((worker, _)) =>
          val nextWork = pendingWork.dequeue
          worker ! nextWork
          workers += worker -> Working
          log.info(s"Work received and found idle worker. Submitting [$nextWork] to [$worker]")
        case None =>
          log.info(s"Work received and no idle worker found. Adding to pending work tasks queue.")
      }
  }
} 
Example 47
Source File: MasterActor.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter7

import akka.actor.{Actor, ActorRef, Terminated}
import scala.util.Random

case class RegisterWorker(workerActor: ActorRef)

class MasterActor extends Actor {
  var workers = List.empty[ActorRef]

  def receive = {
    case RegisterWorker(workerActor) =>
      context.watch(workerActor)
      workers =  workerActor :: workers
    case Terminated(actorRef) =>
      println(s"Actor ${actorRef.path.address} has been terminated. Removing from available workers.")
      workers = workers.filterNot(_ == actorRef)
    case work: Work if workers.isEmpty =>
      println("We cannot process your work since there is no workers.")
    case work: Work =>
      workers(Random.nextInt(workers.size)) ! work
    case WorkDone(workId) =>
      println(s"Work with id $workId is done.")
  }
} 
Example 48
Source File: ChatServer.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter7

import akka.actor.{Actor, ActorRef, Props, Terminated}

object ChatServer {
  case object Connect
  case object Disconnect
  case object Disconnected
  case class Message(author: ActorRef, body: String, creationTimestamp : Long = System.currentTimeMillis())

  def props = Props(new ChatServer())
}

class ChatServer extends Actor {
  import ChatServer._

  var onlineClients = Set.empty[ActorRef]

  def receive = {
    case Connect =>
      onlineClients += sender
      context.watch(sender)
    case Disconnect =>
      onlineClients -= sender
      context.unwatch(sender)
      sender ! Disconnected
    case Terminated(ref) =>
      onlineClients -= ref
    case msg: Message =>
      onlineClients.filter(_ != sender).foreach(_ ! msg)
  }
} 
Example 49
Source File: TopicBufferActor.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.internal.testkit

import akka.actor.Actor
import akka.actor.ActorLogging
import akka.actor.ActorRef
import akka.actor.Props
import akka.actor.Terminated

import scala.collection.mutable

private[lagom] object TopicBufferActor {
  def props(): Props = Props(new TopicBufferActor())

  case class SubscribeToBuffer(groupId: String, actorRef: ActorRef)
}

private[lagom] class TopicBufferActor extends Actor with ActorLogging {
  import TopicBufferActor._

  var downstreams                    = Map.empty[String, ActorRef]
  val buffer: mutable.Buffer[AnyRef] = mutable.Buffer.empty[AnyRef]

  override def receive: Receive = {
    case SubscribeToBuffer(groupId, ref) => {
      downstreams = downstreams + (groupId -> ref)
      buffer.foreach(msg => ref.tell(msg, ActorRef.noSender))
      context.watch(ref)
    }

    case Terminated(deadWatch) =>
      log.warning(
        "Downstream actor {} terminated. This could mean that code consuming from the topic ended prematurely.",
        deadWatch
      )

    case message: AnyRef => {
      downstreams.values.foreach(ref => ref ! message)
      buffer.append(message)
    }
  }
} 
Example 50
Source File: PerChatRequestsBot.scala    From telegram   with Apache License 2.0 5 votes vote down vote up
import akka.actor.{Actor, ActorRef, Props, Terminated}
import cats.syntax.functor._
import cats.instances.future._
import com.bot4s.telegram.api.declarative.Commands
import com.bot4s.telegram.api.{ActorBroker, AkkaDefaults}
import com.bot4s.telegram.future.Polling
import com.bot4s.telegram.methods.SendMessage
import com.bot4s.telegram.models.{Message, Update}

import scala.concurrent.Future

trait PerChatRequests extends ActorBroker with AkkaDefaults {

  override val broker = Some(system.actorOf(Props(new Broker), "broker"))

  class Broker extends Actor {
    val chatActors = collection.mutable.Map[Long, ActorRef]()

    def receive = {
      case u: Update =>
        u.message.foreach { m =>
          val id = m.chat.id
          val handler = chatActors.getOrElseUpdate(m.chat.id, {
            val worker = system.actorOf(Props(new Worker), s"worker_$id")
            context.watch(worker)
            worker
          })
          handler ! m
        }

      case Terminated(worker) =>
        // This should be faster
        chatActors.find(_._2 == worker).foreach {
          case (k, _) => chatActors.remove(k)
        }

      case _ =>
    }
  }

  // Fo every chat a new worker actor will be spawned.
  // All requests will be routed through this worker actor; allowing to maintain a per-chat state.
  class Worker extends Actor {
    def receive = {
      case m: Message =>
        request(SendMessage(m.source, self.toString))

      case _ =>
    }
  }
}

class PerChatRequestsBot(token: String) extends ExampleBot(token)
  with Polling
  with Commands[Future]
  with PerChatRequests {

  // Commands work as usual.
  onCommand("/hello") { implicit msg =>
    reply("Hello World!").void
  }
} 
Example 51
Source File: MongoClient.scala    From tepkin   with Apache License 2.0 5 votes vote down vote up
package net.fehmicansaglam.tepkin


import akka.actor.ActorDSL._
import akka.actor.{ActorRef, ActorRefFactory, ActorSystem, Terminated}
import net.fehmicansaglam.tepkin.TepkinMessage.ShutDown
import net.fehmicansaglam.tepkin.protocol.ReadPreference

import scala.concurrent.ExecutionContext

class MongoClient(_context: ActorRefFactory, uri: MongoClientUri, nConnectionsPerNode: Int) {
  val poolManager = _context.actorOf(
    MongoPoolManager
      .props(uri, nConnectionsPerNode, uri.option("readPreference").map(ReadPreference.apply))
      .withMailbox("tepkin-mailbox"),
    name = "tepkin-pool")


  implicit def context: ActorRefFactory = _context

  implicit def ec: ExecutionContext = _context.dispatcher

  def apply(databaseName: String): MongoDatabase = {
    require(databaseName != null && databaseName.getBytes("UTF-8").size < 123,
      "Database name must be shorter than 123 bytes")
    new MongoDatabase(poolManager, databaseName)
  }

  def db(databaseName: String): MongoDatabase = {
    apply(databaseName)
  }

  def shutdown(): Unit = {
    poolManager ! ShutDown
  }

  
  def shutdown(ref: ActorRef, refs: ActorRef*): Unit = {
    val allRefs = refs :+ ref

    actor(new Act {
      var remaining = allRefs.length

      whenStarting {
        allRefs.foreach(context.watch)
      }

      become {
        case _: Terminated =>
          remaining -= 1
          if (remaining == 0) {
            poolManager ! ShutDown
            context.stop(self)
          }
      }
    })

    ()
  }
}

object MongoClient {

  def apply(uri: String,
            nConnectionsPerNode: Int = 10,
            context: ActorRefFactory = ActorSystem("tepkin-system")): MongoClient = {
    new MongoClient(context, MongoClientUri(uri), nConnectionsPerNode)
  }

} 
Example 52
Source File: Register.scala    From eclair   with Apache License 2.0 5 votes vote down vote up
package fr.acinq.eclair.channel

import akka.actor.Status.Failure
import akka.actor.{Actor, ActorLogging, ActorRef, Terminated}
import fr.acinq.bitcoin.ByteVector32
import fr.acinq.bitcoin.Crypto.PublicKey
import fr.acinq.eclair.ShortChannelId
import fr.acinq.eclair.channel.Register._



class Register extends Actor with ActorLogging {

  context.system.eventStream.subscribe(self, classOf[ChannelCreated])
  context.system.eventStream.subscribe(self, classOf[ChannelRestored])
  context.system.eventStream.subscribe(self, classOf[ChannelIdAssigned])
  context.system.eventStream.subscribe(self, classOf[ShortChannelIdAssigned])

  override def receive: Receive = main(Map.empty, Map.empty, Map.empty)

  def main(channels: Map[ByteVector32, ActorRef], shortIds: Map[ShortChannelId, ByteVector32], channelsTo: Map[ByteVector32, PublicKey]): Receive = {
    case ChannelCreated(channel, _, remoteNodeId, _, temporaryChannelId, _, _) =>
      context.watch(channel)
      context become main(channels + (temporaryChannelId -> channel), shortIds, channelsTo + (temporaryChannelId -> remoteNodeId))

    case ChannelRestored(channel, _, remoteNodeId, _, channelId, _) =>
      context.watch(channel)
      context become main(channels + (channelId -> channel), shortIds, channelsTo + (channelId -> remoteNodeId))

    case ChannelIdAssigned(channel, remoteNodeId, temporaryChannelId, channelId) =>
      context become main(channels + (channelId -> channel) - temporaryChannelId, shortIds, channelsTo + (channelId -> remoteNodeId) - temporaryChannelId)

    case ShortChannelIdAssigned(_, channelId, shortChannelId, _) =>
      context become main(channels, shortIds + (shortChannelId -> channelId), channelsTo)

    case Terminated(actor) if channels.values.toSet.contains(actor) =>
      val channelId = channels.find(_._2 == actor).get._1
      val shortChannelId = shortIds.find(_._2 == channelId).map(_._1).getOrElse(ShortChannelId(0L))
      context become main(channels - channelId, shortIds - shortChannelId, channelsTo - channelId)

    case Symbol("channels") => sender ! channels

    case Symbol("shortIds") => sender ! shortIds

    case Symbol("channelsTo") => sender ! channelsTo

    case fwd@Forward(channelId, msg) =>
      channels.get(channelId) match {
        case Some(channel) => channel forward msg
        case None => sender ! Failure(ForwardFailure(fwd))
      }

    case fwd@ForwardShortId(shortChannelId, msg) =>
      shortIds.get(shortChannelId).flatMap(channels.get) match {
        case Some(channel) => channel forward msg
        case None => sender ! Failure(ForwardShortIdFailure(fwd))
      }
  }
}

object Register {

  // @formatter:off
  case class Forward[T](channelId: ByteVector32, message: T)
  case class ForwardShortId[T](shortChannelId: ShortChannelId, message: T)

  case class ForwardFailure[T](fwd: Forward[T]) extends RuntimeException(s"channel ${fwd.channelId} not found")
  case class ForwardShortIdFailure[T](fwd: ForwardShortId[T]) extends RuntimeException(s"channel ${fwd.shortChannelId} not found")
  // @formatter:on
} 
Example 53
Source File: Controller.scala    From eclair   with Apache License 2.0 5 votes vote down vote up
package fr.acinq.eclair.tor

import java.net.InetSocketAddress

import akka.actor.{Actor, ActorLogging, OneForOneStrategy, Props, SupervisorStrategy, Terminated}
import akka.io.{IO, Tcp}
import akka.util.ByteString

import scala.concurrent.ExecutionContext


class Controller(address: InetSocketAddress, protocolHandlerProps: Props)
                (implicit ec: ExecutionContext = ExecutionContext.global) extends Actor with ActorLogging {

  import Controller._
  import Tcp._
  import context.system

  IO(Tcp) ! Connect(address)

  def receive = {
    case e@CommandFailed(_: Connect) =>
      e.cause match {
        case Some(ex) => log.error(ex, "Cannot connect")
        case _ => log.error("Cannot connect")
      }
      context stop self
    case c: Connected =>
      val protocolHandler = context actorOf protocolHandlerProps
      protocolHandler ! c
      val connection = sender()
      connection ! Register(self)
      context watch connection
      context become {
        case data: ByteString =>
          connection ! Write(data)
        case CommandFailed(w: Write) =>
          // O/S buffer was full
          protocolHandler ! SendFailed
          log.error("Tor command failed")
        case Received(data) =>
          protocolHandler ! data
        case _: ConnectionClosed =>
          context stop self
        case Terminated(actor) if actor == connection =>
          context stop self
      }
  }

  // we should not restart a failing tor session
  override val supervisorStrategy = OneForOneStrategy(loggingEnabled = true) { case _ => SupervisorStrategy.Escalate }

}

object Controller {
  def props(address: InetSocketAddress, protocolHandlerProps: Props)(implicit ec: ExecutionContext = ExecutionContext.global) =
    Props(new Controller(address, protocolHandlerProps))

  case object SendFailed

}