akka.actor.ActorLogging Scala Examples

The following examples show how to use akka.actor.ActorLogging. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: Main.scala    From whirlwind-tour-akka-typed   with Apache License 2.0 5 votes vote down vote up
package de.heikoseeberger.wtat

import akka.actor.{ Actor, ActorLogging, ActorSystem, Props, Terminated }
import akka.actor.typed.SupervisorStrategy.restartWithBackoff
import akka.actor.typed.scaladsl.Actor.supervise
import akka.cluster.Cluster
import akka.cluster.bootstrap.ClusterBootstrap
import akka.cluster.http.management.ClusterHttpManagement
import akka.cluster.typed.{ ClusterSingleton, ClusterSingletonSettings }
import akka.persistence.cassandra.query.scaladsl.CassandraReadJournal
import akka.persistence.query.PersistenceQuery
import akka.stream.{ ActorMaterializer, Materializer }
import pureconfig.loadConfigOrThrow

object Main {
  import akka.actor.typed.scaladsl.adapter._

  final class Root(config: Config) extends Actor with ActorLogging {

    private implicit val mat: Materializer = ActorMaterializer()

    private val clusterSingletonSettings = ClusterSingletonSettings(context.system.toTyped)

    private val userRepository =
      ClusterSingleton(context.system.toTyped).spawn(UserRepository(),
                                                     UserRepository.Name,
                                                     akka.actor.typed.Props.empty,
                                                     clusterSingletonSettings,
                                                     UserRepository.Stop)

    private val userView = context.spawn(UserView(), UserView.Name)

    private val userProjection = {
      import config.userProjection._
      val readJournal =
        PersistenceQuery(context.system)
          .readJournalFor[CassandraReadJournal](CassandraReadJournal.Identifier)
      val userProjection =
        supervise(UserProjection(readJournal, userView, askTimeout))
          .onFailure[UserProjection.EventStreamCompleteException](
            restartWithBackoff(minBackoff, maxBackoff, 0)
          )
      ClusterSingleton(context.system.toTyped).spawn(userProjection,
                                                     UserProjection.Name,
                                                     akka.actor.typed.Props.empty,
                                                     clusterSingletonSettings,
                                                     UserProjection.Stop)
    }

    private val api = {
      import config.api._
      context.spawn(Api(address, port, userRepository, userView, askTimeout), Api.Name)
    }

    context.watch(userRepository)
    context.watch(userView)
    context.watch(userProjection)
    context.watch(api)
    log.info(s"${context.system.name} up and running")

    override def receive = {
      case Terminated(actor) =>
        log.error(s"Shutting down, because actor ${actor.path} terminated!")
        context.system.terminate()
    }
  }

  def main(args: Array[String]): Unit = {
    sys.props += "log4j2.contextSelector" -> "org.apache.logging.log4j.core.async.AsyncLoggerContextSelector"

    val config  = loadConfigOrThrow[Config]("wtat")
    val system  = ActorSystem("wtat")
    val cluster = Cluster(system)

    if (config.useClusterBootstrap) {
      ClusterHttpManagement(cluster).start()
      ClusterBootstrap(system).start()
    }

    cluster.registerOnMemberUp(system.actorOf(Props(new Root(config)), "root"))
  }
} 
Example 2
Source File: PredictionLogger.scala    From ForestFlow   with Apache License 2.0 5 votes vote down vote up
package ai.forestflow.event.subscribers

import java.nio.ByteOrder

import ai.forestflow.domain.{PredictionEvent, PredictionEventGP}
import ai.forestflow.serving.config.ApplicationEnvironment
import akka.actor.{Actor, ActorLogging, Props}
import akka.kafka.ProducerSettings
import ai.forestflow.domain.{PredictionEvent, PredictionEventGP}
import graphpipe.InferRequest
import org.apache.kafka.clients.producer.ProducerRecord
import org.apache.kafka.common.serialization.{ByteArraySerializer, StringSerializer}
//import scalapb.json4s.JsonFormat

import scala.util.{Success, Try}

object PredictionLogger {
  

  private lazy val binaryProducerSettings =
    ProducerSettings(producerConfig, new StringSerializer, new ByteArraySerializer)
  private lazy val binaryProducer = binaryProducerSettings.createKafkaProducer()

  override def preStart(): Unit = {
    if (basic_topic.isDefined)
      context.system.eventStream.subscribe(self, classOf[PredictionEvent])

    if (gp_topic.isDefined)
      context.system.eventStream.subscribe(self, classOf[PredictionEventGP])
    super.preStart()
  }
  override def receive: Receive = {
    case event@PredictionEvent(prediction, servedRequest, inferenceRequest, loggingSettings) =>

      val key = loggingSettings
        .keyFeatures
        .flatMap(inferenceRequest.configs.get)
        .mkString(loggingSettings.getKeyFeaturesSeparator)

      if (key.length > 0 )
        binaryProducer.send(new ProducerRecord(basic_topic.get, key, event.toByteArray))
      else
        binaryProducer.send(new ProducerRecord(basic_topic.get, event.toByteArray))

    case event@PredictionEventGP(prediction, servedRequest, inferBytes, loggingSettings) =>
      Try {
        val req = graphpipe.Request.getRootAsRequest(inferBytes.asReadOnlyByteBuffer().order(ByteOrder.LITTLE_ENDIAN))
        val inferRequest = req.req(new InferRequest()).asInstanceOf[InferRequest]
        val inferConfigs = inferRequest.config()
          .split(",")
          .map(_.split(":"))
          .flatMap{ case Array(k, v) =>  Some((k, v)) case _ => None}.toMap

        loggingSettings
          .keyFeatures
          .flatMap(inferConfigs.get)
          .mkString(loggingSettings.getKeyFeaturesSeparator)

      } match {
        case Success(key) =>
          binaryProducer.send(new ProducerRecord(gp_topic.get, key, event.toByteArray))
        case _ =>
          binaryProducer.send(new ProducerRecord(gp_topic.get, event.toByteArray))
      }

    case _ => // ignore
  }
} 
Example 3
Source File: HasPersistence.scala    From ForestFlow   with Apache License 2.0 5 votes vote down vote up
package ai.forestflow.serving.cluster

import akka.actor.{Actor, ActorLogging}
import akka.persistence.PersistentActor

trait HasPersistence extends ActorLogging {
  this: PersistentActor =>

  def persistencePrefix: String

  override def persistenceId: String = {
    /*log.info(s"Getting persistenceId: akka.serialization.Serialization.serializedActorPath(self) = ${akka.serialization.Serialization.serializedActorPath(self)}")
    log.info(s"self.path.address = ${self.path.address}")
    log.info(s"self.path.elements.toList.mkString('-') = ${self.path.elements.toList.mkString("-")}")
    log.info(s"self.path.elements.toString() = ${self.path.elements.toString()}")
    log.info(s"self.path.toStringWithAddress(self.path.address) = ${self.path.toStringWithAddress(self.path.address)}")
    log.info(s"self.path.toString = ${self.path.toString}")*/
    s"$persistencePrefix-${context.parent.path.name}-${self.path.name}"
  }
} 
Example 4
Source File: ClusterEventSubscription.scala    From ForestFlow   with Apache License 2.0 5 votes vote down vote up
package ai.forestflow.serving.cluster

import akka.actor.{Actor, ActorLogging}
import akka.cluster.ClusterEvent.{MemberEvent, MemberRemoved, MemberUp, UnreachableMember}
import akka.cluster.{Cluster, ClusterEvent}

trait ClusterEventSubscription {
  this: Actor with ActorLogging =>

  implicit val cluster: Cluster = Cluster(context.system)

  // subscribe to cluster changes, re-subscribe when restart
  override def preStart(): Unit = {
    cluster.subscribe(self, ClusterEvent.initialStateAsEvents, classOf[MemberEvent], classOf[UnreachableMember])
  }
  override def postStop(): Unit = cluster.unsubscribe(self)

  def clusterEvents : Receive = {
    case MemberUp(member) =>
      log.info("Member is Up: {}", member.address)
    case UnreachableMember(member) =>
      log.info("Member detected as unreachable: {}", member)
    case MemberRemoved(member, previousStatus) =>
      log.info("Member is Removed: {} after {}",
        member.address, previousStatus)
    case _: MemberEvent => // ignore
  }
} 
Example 5
Source File: NodeActor.scala    From ForestFlow   with Apache License 2.0 5 votes vote down vote up
package ai.forestflow.serving.cluster

import java.io.File

import akka.actor.{Actor, ActorLogging, ActorRef, Props, Timers}
import akka.cluster.Cluster
import akka.cluster.pubsub.DistributedPubSub
import akka.cluster.pubsub.DistributedPubSubMediator.Subscribe
import ai.forestflow.domain.CleanupLocalStorage
import org.apache.commons.io.FileUtils
import com.typesafe.scalalogging.LazyLogging
import ai.forestflow.utils.ThrowableImplicits._

import scala.util.{Failure, Success, Try}

/***
 * This actor is responsible for node-level (host-level) stuff that should be done on a per-node basis.
 * A good example of this is file system cleanup tasks.
 */
object NodeActor extends LazyLogging {
  
  def props(): Props =
    Props(new NodeActor)
      .withDispatcher("blocking-io-dispatcher")

  def cleanupLocalStorage(path: String): Unit = {
    val localDir = new File(path)
    val localDirExists = localDir.exists()
    logger.info(s"Cleaning up local storage: Local Directory: $localDir , exists? $localDirExists")
    if (localDirExists)
      Try(FileUtils.deleteDirectory(localDir)) match {
        case Success(_) => logger.info(s"Local Directory $localDir cleaned up successfully")
        case Failure(ex) => logger.error(s"Local Directory $localDir cleanup failed! Reason: ${ex.printableStackTrace}")
      }
  }
}

class NodeActor extends Actor
  with ActorLogging
  with Timers {

  
  implicit val cluster: Cluster = Cluster(context.system)
  val mediator: ActorRef = DistributedPubSub(context.system).mediator

  mediator ! Subscribe(classOf[CleanupLocalStorage].getSimpleName, self)

  override def receive: Receive = {
    case CleanupLocalStorage(path) =>
      NodeActor.cleanupLocalStorage(path)
  }
} 
Example 6
Source File: Reader.scala    From Raphtory   with Apache License 2.0 5 votes vote down vote up
package com.raphtory.core.components.PartitionManager

import akka.actor.Actor
import akka.actor.ActorLogging
import akka.actor.ActorRef
import akka.actor.Props
import akka.actor.Terminated
import akka.cluster.pubsub.DistributedPubSubMediator.SubscribeAck
import akka.cluster.pubsub.DistributedPubSub
import akka.cluster.pubsub.DistributedPubSubMediator
import com.raphtory.core.analysis.API.Analyser
import com.raphtory.core.components.PartitionManager.Workers.ReaderWorker
import com.raphtory.core.model.communication._
import com.raphtory.core.storage.EntityStorage
import com.raphtory.core.utils.Utils
import com.twitter.util.Eval

import scala.collection.parallel.mutable.ParTrieMap
import scala.util.Try

class Reader(
    id: Int,
    test: Boolean,
    managerCountVal: Int,
    storage: ParTrieMap[Int, EntityStorage],
    workerCount: Int = 10
) extends Actor
        with ActorLogging {

  implicit var managerCount: Int = managerCountVal

  // Id which refers to the partitions position in the graph manager map
  val managerId: Int = id

  val mediator: ActorRef = DistributedPubSub(context.system).mediator

  mediator ! DistributedPubSubMediator.Put(self)
  mediator ! DistributedPubSubMediator.Subscribe(Utils.readersTopic, self)

  var readers: ParTrieMap[Int, ActorRef] = new ParTrieMap[Int, ActorRef]()

  for (i <- 0 until workerCount) {
    log.debug("Initialising [{}] worker children for Reader [{}}.", workerCount, managerId)

    // create threads for writing
    val child = context.system.actorOf(
            Props(new ReaderWorker(managerCount, managerId, i, storage(i))).withDispatcher("reader-dispatcher"),
            s"Manager_${id}_reader_$i"
    )

    context.watch(child)
    readers.put(i, child)
  }

  override def preStart(): Unit =
    log.debug("Reader [{}] is being started.", managerId)

  override def receive: Receive = {
    case ReaderWorkersOnline()     => sender ! ReaderWorkersACK()
    case req: AnalyserPresentCheck => processAnalyserPresentCheckRequest(req)
    case req: UpdatedCounter       => processUpdatedCounterRequest(req)
    case SubscribeAck              =>
    case Terminated(child) =>
      log.warning(s"ReaderWorker with path [{}] belonging to Reader [{}] has died.", child.path, managerId)
    case x => log.warning(s"Reader [{}] received unknown [{}] message.", managerId, x)
  }

  def processAnalyserPresentCheckRequest(req: AnalyserPresentCheck): Unit = {
    log.debug(s"Reader [{}] received [{}] request.", managerId, req)

    val className   = req.className
    val classExists = Try(Class.forName(className))

    classExists.toEither.fold(
            { _: Throwable =>
              log.debug("Class [{}] was not found within this image.", className)

              sender ! ClassMissing()
            }, { _: Class[_] =>
              log.debug(s"Class [{}] exists. Proceeding.", className)

              sender ! AnalyserPresent()
            }
    )
  }





  def processUpdatedCounterRequest(req: UpdatedCounter): Unit = {
    log.debug("Reader [{}] received [{}] request.", managerId, req)

    managerCount = req.newValue
    readers.foreach(x => x._2 ! UpdatedCounter(req.newValue))
  }
} 
Example 7
Source File: SeedActor.scala    From Raphtory   with Apache License 2.0 5 votes vote down vote up
package com.raphtory.core.components.ClusterManagement

import akka.actor.Actor
import akka.actor.ActorLogging
import akka.cluster.Cluster
import akka.cluster.ClusterEvent._
import com.raphtory.core.clustersetup.DocSvr

class SeedActor(svr: DocSvr) extends Actor with ActorLogging {
  val cluster: Cluster = Cluster(context.system)

  override def preStart(): Unit = {
    log.debug("SeedActor is being started.")

    cluster.subscribe(self, initialStateMode = InitialStateAsEvents, classOf[MemberEvent], classOf[UnreachableMember])
  }

  override def postStop(): Unit = cluster.unsubscribe(self)

  def receive: Receive = {

    case evt: MemberUp          => processMemberUpEvent(evt)
    case evt: MemberRemoved     => processMemberRemovedEvent(evt)
    case evt: UnreachableMember => processUnreachableMemberEvent(evt)
    case evt: MemberExited      => processMemberExitedEvent(evt)
    case x                      => log.warning("SeedActor received unknown [{}] message.", x)
  }

  private def processMemberUpEvent(evt: MemberUp): Unit = {
    log.debug(s"SeedActor received [{}] event.", evt)

    svr.nodes.synchronized {
      svr.nodes += evt.member
    }
  }

  private def processMemberRemovedEvent(evt: MemberRemoved): Unit = {
    log.debug(s"SeedActor received [{}] event.", evt)

    svr.nodes.synchronized {
      svr.nodes -= evt.member
    }
  }
  private def processMemberExitedEvent(evt: MemberExited): Unit = {
    log.debug(s"SeedActor received [{}] event.", evt)

    svr.nodes.synchronized {
      svr.nodes -= evt.member
    }
  }

  private def processUnreachableMemberEvent(evt: UnreachableMember): Unit = {
    log.debug(s"SeedActor received [{}] event.", evt)

    log.warning("processUnreachableMemberEvent in SeedActor has not been implemented. Ignoring request.")
  }
} 
Example 8
Source File: WatermarkManager.scala    From Raphtory   with Apache License 2.0 5 votes vote down vote up
package com.raphtory.core.components.ClusterManagement

import akka.actor.{Actor, ActorLogging, ActorRef}
import akka.cluster.pubsub.{DistributedPubSub, DistributedPubSubMediator}
import com.raphtory.core.model.communication.{UpdateArrivalTime, WatermarkTime}
import kamon.Kamon

import scala.collection.mutable
import scala.collection.parallel.mutable.ParTrieMap

case class queueItem(wallclock:Long,timestamp:Long)extends Ordered[queueItem] {
  def compare(that: queueItem): Int = (that.timestamp-this.timestamp).toInt
}

class WatermarkManager(managerCount: Int) extends Actor with ActorLogging  {

  val spoutWallClock = Kamon.histogram("Raphtory_Wall_Clock").withTag("Actor","Watchdog")
  val safeTime       = Kamon.gauge("Raphtory_Safe_Time").withTag("actor",s"WatermarkManager")

  val watermarkqueue = mutable.PriorityQueue[queueItem]()
  private val safeMessageMap = ParTrieMap[String, Long]()
  var counter = 0;
  val mediator: ActorRef = DistributedPubSub(context.system).mediator
  mediator ! DistributedPubSubMediator.Put(self)
  override def receive: Receive = {
    case u:UpdateArrivalTime => processUpdateArrivalTime(u)
    case u:WatermarkTime => processWatermarkTime(u)
  }

  def processUpdateArrivalTime(u: UpdateArrivalTime):Unit = watermarkqueue += queueItem(u.wallClock,u.time)

  def processWatermarkTime(u:WatermarkTime):Unit = {
    val currentTime = System.currentTimeMillis()
    safeMessageMap put(sender().toString(),u.time)
    counter +=1
    if(counter%(10*managerCount)==0) {
      val watermark = safeMessageMap.map(x=>x._2).min
      safeTime.update(watermark)
      while((watermarkqueue nonEmpty) && (watermarkqueue.head.timestamp<= watermark)) {
        spoutWallClock.record(currentTime-watermarkqueue.dequeue().wallclock)
      }
    }
  }
} 
Example 9
Source File: KafkaClientActor.scala    From remora   with MIT License 5 votes vote down vote up
import KafkaClientActor.{DescribeKafkaCluster, DescribeKafkaConsumerGroup, ListConsumers}
import akka.actor.{Actor, ActorLogging, Props}
import akka.pattern.pipe
import kafka.admin.RemoraKafkaConsumerGroupService
import nl.grons.metrics.scala.{ActorInstrumentedLifeCycle, ReceiveCounterActor, ReceiveExceptionMeterActor, ReceiveTimerActor}

object KafkaClientActor {

  sealed trait Command

  case class DescribeKafkaConsumerGroup(consumerGroupName: String) extends Command
  object DescribeKafkaCluster extends Command
  object ListConsumers extends Command

  def props(kafkaConsumerGroupService: RemoraKafkaConsumerGroupService) = Props(classOf[KafkaClientActor], kafkaConsumerGroupService)

}

class BaseKafkaClientActor(kafkaConsumerGroupService: RemoraKafkaConsumerGroupService) extends Actor with ActorLogging
  with nl.grons.metrics.scala.DefaultInstrumented with ActorInstrumentedLifeCycle {

  import context.dispatcher

  def receive: Receive = {
    case DescribeKafkaCluster =>
      log.info(s"Received request for cluster description")
      kafkaConsumerGroupService.describeCluster() pipeTo sender
    case DescribeKafkaConsumerGroup(consumerGroupName) =>
      log.info(s"Received request for $consumerGroupName")
      kafkaConsumerGroupService.describeConsumerGroup(consumerGroupName) pipeTo sender
    case ListConsumers =>
      log.info(s"Received request for consumer list")
      kafkaConsumerGroupService.list() pipeTo sender
  }
}

class KafkaClientActor(kafkaConsumerGroupService: RemoraKafkaConsumerGroupService)
  extends BaseKafkaClientActor(kafkaConsumerGroupService) with ReceiveCounterActor with ReceiveTimerActor with ReceiveExceptionMeterActor 
Example 10
Source File: TestTaggingActor.scala    From akka-persistence-cassandra   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.cassandra

import akka.actor.{ ActorLogging, ActorRef, Props }
import akka.persistence.cassandra.journal.TagWriterSpec.TestEx
import akka.persistence.{ PersistentActor, RecoveryCompleted, SaveSnapshotSuccess }
import akka.persistence.journal.Tagged

object TestTaggingActor {
  case object Ack
  case object Crash
  case object DoASnapshotPlease
  case object SnapShotAck
  case object Stop

  def props(pId: String, tags: Set[String] = Set(), probe: Option[ActorRef] = None): Props =
    Props(new TestTaggingActor(pId, tags, probe))
}

class TestTaggingActor(val persistenceId: String, tags: Set[String], probe: Option[ActorRef])
    extends PersistentActor
    with ActorLogging {
  import TestTaggingActor._

  def receiveRecover: Receive = {
    case RecoveryCompleted =>
      probe.foreach(_ ! RecoveryCompleted)
    case _ =>
  }

  def receiveCommand: Receive = normal

  def normal: Receive = {
    case event: String =>
      log.debug("Persisting {}", event)
      persist(Tagged(event, tags)) { e =>
        processEvent(e)
        sender() ! Ack
      }
    case Crash =>
      throw TestEx("oh dear")
    case DoASnapshotPlease =>
      saveSnapshot("i don't have any state :-/")
      context.become(waitingForSnapshot(sender()))
    case Stop =>
      context.stop(self)

  }

  def waitingForSnapshot(who: ActorRef): Receive = {
    case SaveSnapshotSuccess(_) =>
      who ! SnapShotAck
      context.become(normal)
  }

  def processEvent: Receive = {
    case _ =>
  }
} 
Example 11
Source File: ClusterShardingQuickTerminationSpec.scala    From akka-persistence-cassandra   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.cassandra.sharding

import akka.actor.{ ActorLogging, ActorRef, Props, ReceiveTimeout }
import akka.cluster.{ Cluster, MemberStatus }
import akka.cluster.sharding.{ ClusterSharding, ClusterShardingSettings, ShardRegion }
import akka.persistence.PersistentActor
import akka.persistence.cassandra.CassandraSpec
import akka.testkit.TestProbe

import scala.concurrent.duration._

object ClusterShardingQuickTerminationSpec {

  case object Increment
  case object Decrement
  final case class Get(counterId: Long)
  final case class EntityEnvelope(id: Long, payload: Any)
  case object Ack

  case object Stop
  final case class CounterChanged(delta: Int)

  class Counter extends PersistentActor with ActorLogging {
    import ShardRegion.Passivate

    context.setReceiveTimeout(5.seconds)

    // self.path.name is the entity identifier (utf-8 URL-encoded)
    override def persistenceId: String = "Counter-" + self.path.name

    var count = 0

    def updateState(event: CounterChanged): Unit =
      count += event.delta

    override def receiveRecover: Receive = {
      case evt: CounterChanged => updateState(evt)
      case other               => log.debug("Other: {}", other)
    }

    override def receiveCommand: Receive = {
      case Increment      => persist(CounterChanged(+1))(updateState)
      case Decrement      => persist(CounterChanged(-1))(updateState)
      case Get(_)         => sender() ! count
      case ReceiveTimeout => context.parent ! Passivate(stopMessage = Stop)
      case Stop =>
        sender() ! Ack
        context.stop(self)
    }
  }
  val extractEntityId: ShardRegion.ExtractEntityId = {
    case EntityEnvelope(id, payload) => (id.toString, payload)
    case msg @ Get(id)               => (id.toString, msg)
  }

  val numberOfShards = 100

  val extractShardId: ShardRegion.ExtractShardId = {
    case EntityEnvelope(id, _) => (id % numberOfShards).toString
    case Get(id)               => (id % numberOfShards).toString
  }

}

class ClusterShardingQuickTerminationSpec extends CassandraSpec("""
    akka.actor.provider = cluster
  """.stripMargin) {

  import ClusterShardingQuickTerminationSpec._

  "Cassandra Plugin with Cluster Sharding" must {
    "clear state if persistent actor shuts down" in {
      Cluster(system).join(Cluster(system).selfMember.address)
      awaitAssert {
        Cluster(system).selfMember.status shouldEqual MemberStatus.Up
      }
      ClusterSharding(system).start(
        typeName = "tagging",
        entityProps = Props[Counter],
        settings = ClusterShardingSettings(system),
        extractEntityId = extractEntityId,
        extractShardId = extractShardId)

      (0 to 100).foreach { i =>
        val counterRegion: ActorRef = ClusterSharding(system).shardRegion("tagging")
        awaitAssert {
          val sender = TestProbe()
          counterRegion.tell(Get(123), sender.ref)
          sender.expectMsg(500.millis, i)
        }

        counterRegion ! EntityEnvelope(123, Increment)
        counterRegion ! Get(123)
        expectMsg(i + 1)

        counterRegion ! EntityEnvelope(123, Stop)
        expectMsg(Ack)
      }
    }
  }
} 
Example 12
Source File: Blackboard.scala    From CSYE7200_Old   with MIT License 5 votes vote down vote up
package edu.neu.coe.csye7200.hedge_fund.actors

import akka.actor.{ Actor, Props, ActorLogging, ActorRef }


class Blackboard(forwardMap: Map[Class[_ <: Any], String], actors: Map[String, Class[_ <: BlackboardActor]]) extends Actor with ActorLogging {

  val actorMap: Map[String, ActorRef] = actors map {
    case (k, v) => k -> context.actorOf(Props.create(v, self), k)
  }

  // To encode specific, non-forwarding behavior, override this method
  override def receive: PartialFunction[Any, Unit] = {
    case message =>
      forwardMap.get(message.getClass) match {
        case Some(s) => actorMap.get(s) match {
          case Some(k) => k forward message
          case _ => log.warning(s"no actor established for key $s")
        }
        case _ => log.warning(s"no forward mapping established for message class ${message.getClass}")
      }
  }
} 
Example 13
Source File: Blackboard.scala    From CSYE7200_Old   with MIT License 5 votes vote down vote up
package edu.neu.coe.csye7200.actors

import akka.actor.{Actor, ActorLogging, ActorRef, Props}


class Blackboard(forwardMap: Map[Class[_ <: Any], String], actors: Map[String, Class[_ <: BlackboardActor]]) extends Actor with ActorLogging {

  val actorMap: Map[String, ActorRef] = actors map {
    case (k, v) => k -> context.actorOf(Props.create(v, self), k)
  }

  // To encode specific, non-forwarding behavior, override this method
  override def receive: PartialFunction[Any, Unit] = {
    case message =>
      forwardMap.get(message.getClass) match {
        case Some(s) => actorMap.get(s) match {
          case Some(k) => k forward message
          case _ => log.warning(s"no actor established for key $s")
        }
        case _ => log.warning(s"no forward mapping established for message class ${message.getClass}")
      }
  }
} 
Example 14
Source File: Mapper.scala    From CSYE7200_Old   with MIT License 5 votes vote down vote up
package edu.neu.coe.csye7200.mapreduce

import akka.actor.{Actor, ActorLogging, ActorRef}

import scala.collection.mutable
import scala.collection.mutable.HashMap
import scala.util._


class Mapper_Forgiving[K1,V1,K2,V2](f: (K1,V1)=>(K2,V2)) extends Mapper[K1,V1,K2,V2](f) {
  
  override def prepareReply(v2k2ts: Seq[Try[(K2,V2)]]) = {
      val v2sK2m = mutable.HashMap[K2,Seq[V2]]() // mutable
      val xs = Seq[Throwable]() // mutable
      for (v2k2t <- v2k2ts; v2k2e = Master.sequence(v2k2t))
        v2k2e match {
          case Right((k2,v2)) => v2sK2m put(k2, v2+:v2sK2m.getOrElse((k2), (Nil)))
          case Left(x) => xs :+ x
      }
      (v2sK2m.toMap, xs)
  }
}

case class Incoming[K, V](m: Seq[(K,V)]) {
  override def toString = s"Incoming: with ${m.size} elements"
}

object Incoming {
  def sequence[K,V](vs: Seq[V]): Incoming[K,V] = Incoming((vs zip Stream.continually(null.asInstanceOf[K])).map{_.swap})
  def map[K, V](vKm: Map[K,V]): Incoming[K,V] = Incoming(vKm.toSeq)
}

object Mapper {
} 
Example 15
Source File: Reducer.scala    From CSYE7200_Old   with MIT License 5 votes vote down vote up
package edu.neu.coe.csye7200.mapreduce

import akka.actor.{ Actor, ActorLogging, ActorRef }
//import scala.collection.mutable.HashMap
import scala.util._


class Reducer_Fold[K2,V2,V3](g: (V3,V2)=>V3, z: =>V3) extends ReducerBase[K2,V2,V3] {  
  def getValue(vs: Seq[V2]): V3 = vs.foldLeft(z)(g)
}

abstract class ReducerBase[K2,V2,V3] extends Actor with ActorLogging {
  
  override def receive = {
    case i: Intermediate[K2,V2] =>
      log.info(s"received $i")
      log.debug(s"with elements ${i.vs}")
      sender ! (i.k, Master.sequence(Try(getValue(i.vs))))
    case q =>
      log.warning(s"received unknown message type: $q")
  }
  
  override def postStop = {
    log.debug("has shut down")
  }
  
  def getValue(vs: Seq[V2]): V3
}


case class Intermediate[K2, V2](k: K2, vs: Seq[V2]) {
  override def toString = s"Intermediate: with k=$k and ${vs.size} elements"
} 
Example 16
Source File: UserHandler.scala    From reactive-microservices   with MIT License 5 votes vote down vote up
import akka.actor.{ActorLogging, ActorRef, PoisonPill, Props}
import akka.persistence.PersistentActor
import akka.routing.{RemoveRoutee, ActorRefRoutee, AddRoutee}
import btc.common.UserHandlerMessages._
import btc.common.WebSocketHandlerMessages.{OperationSuccessful, Alarm, AllSubscriptions}
import scala.collection.mutable
import scala.concurrent.duration._
import UserHandler._

object UserHandler {
  case object KeepAlive

  case class Ticker(max: BigDecimal, min: BigDecimal, last: BigDecimal, bid: BigDecimal, ask: BigDecimal, vwap: BigDecimal, average: BigDecimal, volume: BigDecimal)

  def props(userId: Long, wsActor: ActorRef, broadcaster: ActorRef, keepAliveTimeout: FiniteDuration) = {
    Props(new UserHandler(userId, wsActor, broadcaster, keepAliveTimeout))
  }
}

class UserHandler(userId: Long, wsActor: ActorRef, broadcaster: ActorRef, keepAliveTimeout: FiniteDuration) extends PersistentActor with ActorLogging {
  override val persistenceId: String = userId.toString

  override def preStart(): Unit = {
    super.preStart()
    broadcaster ! AddRoutee(ActorRefRoutee(self))
  }

  override def postStop(): Unit = {
    super.postStop()
    broadcaster ! RemoveRoutee(ActorRefRoutee(self))
  }

  override def receiveRecover: Receive = {
    case subscribe: Subscribe => updateState(subscribe)
    case unsubscribe: Unsubscribe => updateState(unsubscribe)
  }

  override def receiveCommand: Receive = {
    case KeepAlive if System.currentTimeMillis() - lastHeartBeatTime > keepAliveTimeout.toMillis =>
      log.info(s"Timeout while waiting for heartbeat for user $userId, stopping")
      self ! PoisonPill
    case Heartbeat =>
      log.debug(s"Got heartbeat for user $userId")
      lastHeartBeatTime = System.currentTimeMillis()
      sender() ! Heartbeat
    case QuerySubscriptions =>
      log.info(s"Got request for subscriptions for user $userId")
      wsActor ! AllSubscriptions(subscriptions.values.toList)
    case ticker: Ticker =>
      val alarms = getAlarmsForTicker(ticker)
      log.debug(s"Got ticker and sending alarms $alarms for user $userId")
      alarms.foreach(wsActor ! _)
    case subscribe: Subscribe =>
      log.debug(s"Got subscribe request $subscribe for user $userId")
      persist(subscribe) { e =>
        updateState(e)
        wsActor ! OperationSuccessful(e.id)
      }
    case unsubscribe: Unsubscribe =>
      log.debug(s"Got unsubscribe request $unsubscribe for user $userId")
      persist(unsubscribe) { e =>
        updateState(e)
        wsActor ! OperationSuccessful(e.id)
      }
  }

  private def updateState(subscribe: Subscribe) = subscriptions.put(subscribe.id, subscribe)

  private def updateState(unsubscribe: Unsubscribe) = subscriptions.remove(unsubscribe.id)

  private def getAlarmsForTicker(ticker: Ticker): List[Alarm] = {
    subscriptions.values.map {
      case SubscribeRateChange(id) => Option(Alarm(id, ticker.average))
      case SubscribeBidOver(id, threshold) => if (ticker.bid > threshold) Option(Alarm(id, ticker.bid)) else None
      case SubscribeAskBelow(id, threshold) => if (ticker.ask < threshold) Option(Alarm(id, ticker.ask)) else None
      case SubscribeVolumeOver(id, threshold) => if (ticker.volume > threshold) Option(Alarm(id, ticker.volume)) else None
      case SubscribeVolumeBelow(id, threshold) => if (ticker.volume < threshold) Option(Alarm(id, ticker.volume)) else None
    }.toList.flatten
  }

  private val subscriptions = mutable.Map.empty[Long, Subscribe]
  private var lastHeartBeatTime = System.currentTimeMillis()
} 
Example 17
Source File: DataFetcher.scala    From reactive-microservices   with MIT License 5 votes vote down vote up
import akka.actor.{ActorLogging, Props, Actor, ActorRef}
import com.ning.http.client.AsyncHttpClientConfig.Builder
import play.api.libs.json.Json
import play.api.libs.ws.ning.NingWSClient
import UserHandler.Ticker

class DataFetcher(broadcaster: ActorRef) extends Actor with ActorLogging {
  override def receive: Receive = {
    case DataFetcher.Tick =>
      client.url(url).get().map { response =>
        if (response.status == 200) {
          val ticker = Json.parse(response.body).as[Ticker]
          log.debug(s"Broadcasting ticker $ticker")
          broadcaster ! ticker
        }
      }.onFailure { case t => log.warning(s"Requesting ticker failed because ${t.getMessage}") }
  }

  private implicit val tickerFormat = Json.format[Ticker]
  private implicit val dispatcher = context.dispatcher
  private val url = "https://bitbay.net/API/Public/BTCUSD/ticker.json"
  private val client = new NingWSClient(new Builder().build())
}

object DataFetcher {
  case object Tick

  def props(broadcaster: ActorRef): Props = Props(new DataFetcher(broadcaster))
} 
Example 18
Source File: NakadiClientImpl.scala    From reactive-nakadi   with MIT License 5 votes vote down vote up
package org.zalando.react.nakadi.client

import akka.actor.{Actor, ActorLogging, ActorRef, Props}
import akka.stream.{ActorMaterializer, ActorMaterializerSettings}
import org.zalando.react.nakadi.NakadiMessages.{EventTypeMessage, ProducerMessage}
import org.zalando.react.nakadi.client.providers._
import org.zalando.react.nakadi.properties.{ConsumerProperties, ProducerProperties, ServerProperties}
import akka.pattern._
import scala.concurrent.Future


case class Properties(
  serverProperties: ServerProperties,
  tokenProvider: Option[() => String],
  consumerProperties: Option[ConsumerProperties] = None,
  producerProperties: Option[ProducerProperties] = None
)

object NakadiClientImpl {

  def props(consumerProperties: ConsumerProperties) = {
    val p = Properties(
      serverProperties = consumerProperties.serverProperties,
      tokenProvider = consumerProperties.tokenProvider,
      consumerProperties = Option(consumerProperties)
    )
    Props(new NakadiClientImpl(p))
  }

  def props(producerProperties: ProducerProperties) = {
    val p = Properties(
      serverProperties = producerProperties.serverProperties,
      tokenProvider = producerProperties.tokenProvider,
      producerProperties = Option(producerProperties)
    )
    Props(new NakadiClientImpl(p))
  }

  case object MessagePublished
}


class NakadiClientImpl(val properties: Properties) extends Actor
  with ActorLogging
  with NakadiClient {

  import NakadiClientImpl.MessagePublished

  final implicit val materializer = ActorMaterializer(ActorMaterializerSettings(context.system))

  implicit val ec = context.dispatcher

  val clientProvider = new HttpClientProvider(
    actorContext = context,
    server = properties.serverProperties.host,
    port = properties.serverProperties.port,
    isConnectionSSL = properties.serverProperties.isConnectionSSL,
    acceptAnyCertificate = properties.serverProperties.acceptAnyCertificate,
    connectionTimeout = properties.serverProperties.connectionTimeout
  )

  override def postStop() = clientProvider.http.shutdownAllConnectionPools()

  override def receive: Receive = {
    case ConsumeCommand.Start => listenForEvents(sender())
    case producerMessage: ProducerMessage => publishEvent(producerMessage).map(_ => MessagePublished) pipeTo sender()
    case eventTypeMessage: EventTypeMessage => postEventType(eventTypeMessage: EventTypeMessage)
  }

  override def postEventType(eventTypeMessage: EventTypeMessage): Future[Unit] = {
    val postEvents = new PostEventType(properties, context, log, clientProvider)
    postEvents.post(eventTypeMessage)
  }

  override def publishEvent(producerMessage: ProducerMessage): Future[Unit] = {
    val p = properties.producerProperties.getOrElse(sys.error("Producer Properties cannon be None"))
    val produceEvents = new ProduceEvents(p, context, log, clientProvider)
    produceEvents.publish(producerMessage)
  }

  override def listenForEvents(receiverActorRef: ActorRef): Unit = {
    val p = properties.consumerProperties.getOrElse(sys.error("Consumer Properties cannon be None"))
    val consumeEvents = new ConsumeEvents(p, context, log, clientProvider)
    consumeEvents.stream(receiverActorRef)
  }
} 
Example 19
Source File: NakadiActorSubscriber.scala    From reactive-nakadi   with MIT License 5 votes vote down vote up
package org.zalando.react.nakadi

import akka.actor.{ActorLogging, ActorRef, Props}
import akka.stream.actor.{ActorSubscriber, ActorSubscriberMessage, RequestStrategy}
import org.zalando.react.nakadi.NakadiMessages._


object NakadiActorSubscriber {

  def props(consumerAndProps: ReactiveNakadiProducer, requestStrategyProvider: () => RequestStrategy) = {
    Props(new NakadiActorSubscriber(consumerAndProps, requestStrategyProvider))
  }
}

class NakadiActorSubscriber(producerAndProps: ReactiveNakadiProducer, requestStrategyProvider: () => RequestStrategy)
  extends ActorSubscriber
  with ActorLogging {

  override protected val requestStrategy = requestStrategyProvider()
  private val client: ActorRef = producerAndProps.nakadiClient

  override def receive: Receive = {
    case ActorSubscriberMessage.OnNext(element)   => processElement(element.asInstanceOf[StringProducerMessage])
    case ActorSubscriberMessage.OnError(ex)       => handleError(ex)
    case ActorSubscriberMessage.OnComplete        => stop()
  }

  private def processElement(message: StringProducerMessage) = client ! message

  private def handleError(ex: Throwable) = {
    log.error(ex, "Stopping Nakadi subscriber due to fatal error.")
    stop()
  }

  def stop() = {
    context.stop(self)
  }
} 
Example 20
Source File: LeaseManagerActor.scala    From reactive-nakadi   with MIT License 5 votes vote down vote up
package org.zalando.react.nakadi

import akka.actor.{Actor, ActorLogging, ActorRef, Props}
import org.zalando.react.nakadi.commit.OffsetMap

import scala.util.{Failure, Success}


object LeaseManagerActor {

  case object LeaseAvailable
  case object LeaseUnavailable
  case class RequestLease(groupId: String, eventType: String, partitionId: String)
  case class ReleaseLease(groupId: String, eventType: String, partitionId: String)
  case class Flush(groupId: String, eventType: String, partitionId: String, offsetMap: OffsetMap)

  def props(leaseManager: LeaseManager) = {
    Props(new LeaseManagerActor(leaseManager))
  }
}

class LeaseManagerActor(leaseManager: LeaseManager) extends Actor with ActorLogging {
  import context.dispatcher  // TODO setup different execution contexts for aws access
  import LeaseManagerActor._

  override def receive: Receive = {
    case msg: Flush => flush(msg)
    case msg: RequestLease  => requestLease(msg)
    case msg: ReleaseLease  => releaseLease(msg)
  }

  private def flush(msg: Flush) = {
    val senderRef = sender
    leaseManager.flush(msg.groupId, msg.eventType, msg.partitionId, msg.offsetMap).onComplete {
      case Failure(err) => log.error(err, "Lease Management error when flushing:")
      case Success(status) if status => senderRef ! LeaseAvailable
      case Success(status) if !status => {
        log.error(s"Lease is not usable for event-type '${msg.eventType}' partition '${msg.partitionId}' group '${msg.groupId}'")
        senderRef ! LeaseUnavailable
      }
    }
  }

  private def requestLease(msg: RequestLease): Unit = {
    val senderRef = sender
    leaseManager.requestLease(msg.groupId, msg.eventType, msg.partitionId).onComplete {
      case Failure(err) => log.error(err, "Lease Management error when requesting lease:")
      case Success(status) if status => senderRef ! LeaseAvailable
      case Success(status) if !status => {
        log.error(s"Lease is not usable for event-type '${msg.eventType}' partition '${msg.partitionId}' group '${msg.groupId}'")
        senderRef ! LeaseUnavailable
      }
    }
  }

  private def releaseLease(msg: ReleaseLease): Unit = {
    val senderRef = sender
    leaseManager.releaseLease(msg.groupId, msg.eventType, msg.partitionId).onComplete {
      case Failure(err) => log.error(err, "Lease Management error when releasing lease:")
      case Success(status) => senderRef ! LeaseUnavailable
    }
  }

  private def sendLeaseUnavailable(senderRef: ActorRef) = {
    senderRef ! LeaseUnavailable
  }
} 
Example 21
Source File: Logging.scala    From akka-dddd-template   with Apache License 2.0 5 votes vote down vote up
package com.boldradius.util

import akka.actor.{Actor, ActorLogging}
import com.typesafe.scalalogging.LazyLogging
import scala.language.implicitConversions

trait ALogging extends ActorLogging{  this: Actor =>

  implicit def toLogging[V](v: V) : FLog[V] = FLog(v)

  case class FLog[V](v : V)  {
    def logInfo(f: V => String): V = {log.info(f(v)); v}
    def logDebug(f: V => String): V = {log.debug(f(v)); v}
    def logError(f: V => String): V = {log.error(f(v)); v}
    def logWarn(f: V => String): V = {log.warning(f(v)); v}
    def logTest(f: V => String): V = {println(f(v)); v}
  }
}
trait LLogging extends LazyLogging{

  implicit def toLogging[V](v: V) : FLog[V] = FLog(v)

  case class FLog[V](v : V)  {
    def logInfo(f: V => String): V = {logger.info(f(v)); v}
    def logDebug(f: V => String): V = {logger.debug(f(v)); v}
    def logError(f: V => String): V = {logger.error(f(v)); v}
    def logWarn(f: V => String): V = {logger.warn(f(v)); v}
    def logTest(f: V => String): V = {println(f(v)); v}
  }
} 
Example 22
Source File: CircuitBreakerExample.scala    From Scala-Reactive-Programming   with MIT License 5 votes vote down vote up
package com.packt.publishing.reactive.patterns.circuitbreaker

import scala.concurrent.duration._
import akka.pattern.CircuitBreaker
import akka.pattern.pipe
import akka.actor.{Actor, ActorLogging, ActorSystem, Props}
import scala.concurrent.Future
import akka.util.Timeout
import akka.pattern.ask

case class MessageOne(msg:String = "is my middle name")
case class MessageTwo(msg:String = "block for me")

class DangerousActor extends Actor with ActorLogging {

  import context.dispatcher

  val breaker =
    new CircuitBreaker(
      context.system.scheduler,
      maxFailures = 5,
      callTimeout = 10.seconds,
      resetTimeout = 1.minute).onOpen(notifyMeOnOpen())

  def notifyMeOnOpen(): Unit =
    log.warning("My CircuitBreaker is now open, and will not close for one minute")

  def dangerousCall: String = "This really isn't that dangerous of a call after all"

  def receive = {
    case MessageOne ⇒
      breaker.withCircuitBreaker(Future(dangerousCall)) pipeTo sender()
    case MessageTwo ⇒
      sender() ! breaker.withSyncCircuitBreaker(dangerousCall)
  }

}

object CircuitBreakerExample extends App {
  implicit val timeout: Timeout = 2.seconds
  val system = ActorSystem("WFSystem")
  val wfTeller = system.actorOf(Props[DangerousActor], "DangerousActor")

  val future1 = wfTeller ? MessageTwo
  val future2 = wfTeller ? MessageTwo
  val future3 = wfTeller ? MessageTwo
  val future4 = wfTeller ? MessageTwo
  val future5 = wfTeller ? MessageTwo
  val future6 = wfTeller ? MessageTwo

  import scala.concurrent.ExecutionContext.Implicits.global
  future1.onComplete { value =>
    println("************ value1 = "  + value)
  }
  future2.onComplete { value =>
    println("************ value2 = "  + value)
  }
  future3.onComplete { value =>
    println("************ value3 = "  + value)
  }
  future4.onComplete { value =>
    println("************ value4 = "  + value)
  }
  future5.onComplete { value =>
    println("************ value5 = "  + value)
  }
  future6.onComplete { value =>
    println("************ value6 = "  + value)
  }
  Thread.sleep(10000)

  system.terminate()
} 
Example 23
Source File: AskPatternApp.scala    From Scala-Reactive-Programming   with MIT License 5 votes vote down vote up
package com.packt.publishing.reactive.patterns.ask

import akka.actor.{Actor, ActorLogging, ActorRef, ActorSystem, Props}
import akka.util.Timeout
import akka.pattern.ask
import scala.concurrent.duration._
import scala.concurrent.ExecutionContext.Implicits.global

case object GetWeather
case class WeatherForecast(city:String, temperature:String)

class WFClient(wfTeller: ActorRef) extends Actor with ActorLogging {
  implicit val timeout: Timeout = 2.seconds

  def receive = {
    case GetWeather =>
      val wf = (wfTeller ? GetWeather)
      wf.onComplete{ wfValue =>
        log.info("Weather Forecast = " + wfValue)
      }
  }
}

class WFTeller extends Actor {
  def receive = {
    case GetWeather => sender() ! WeatherForecast("London", "12")
  }
}

object AskPatternApp extends App{
  val system = ActorSystem("WFSystem")
  val wfTeller = system.actorOf(Props[WFTeller], "WFTeller")
  val clientActor = system.actorOf(Props(new WFClient(wfTeller)), "WFClient")

  clientActor ! GetWeather
  Thread.sleep(1000)
  system.terminate()
} 
Example 24
Source File: WFActor.scala    From Scala-Reactive-Programming   with MIT License 5 votes vote down vote up
package com.packt.publishing.cassandra.actor

import akka.actor.{ActorLogging, Props}
import akka.persistence.{PersistentActor, Recovery, RecoveryCompleted, SnapshotOffer}
import com.packt.publishing.cassandra.commands.{AddWF, PrintWF, RemoveWF, SnapshotWF}
import com.packt.publishing.cassandra.events.{WFAdded, WFEvent, WFRemoved}
import com.packt.publishing.cassandra.state.WFState

object WFActor {
  def props(id: String, recovery: Recovery) = Props(new WFActor(id, recovery))
}

class WFActor(id: String, rec: Recovery) extends PersistentActor with ActorLogging {

  override val persistenceId = id
  override val recovery = rec

  var state = WFState()

  def updateState(event: WFEvent) = state = state.update(event)

   val receiveRecover: Receive = {
    case evt: WFEvent =>  log.info(s"Replaying event: $evt")
                              updateState(evt)
    case SnapshotOffer(_, recoveredState : WFState) =>
                              log.info(s"Snapshot offered: $recoveredState")
                              state = recoveredState
    case RecoveryCompleted => log.info(s"Recovery completed. Current state: $state")
   }

   val receiveCommand: Receive = {
      case AddWF(wf) => persist(WFAdded(wf))(updateState)
      case RemoveWF(wf) => persist(WFRemoved(wf))(updateState)
      case SnapshotWF =>  saveSnapshot(state)
      case PrintWF => log.info(s"Current state: $state")
   }
} 
Example 25
Source File: TaskActor.scala    From CM-Well   with Apache License 2.0 5 votes vote down vote up
package k.grid

import akka.actor.Actor.Receive
import akka.actor.{Actor, ActorLogging, Cancellable}
import scala.concurrent.duration._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.language.postfixOps


case object PauseTask
case object ResumeTask
class TaskActor(start: Long, interval: Long, block: () => Unit) extends Actor with ActorLogging {

  var s: Cancellable = _

  def startTask: Cancellable = {
    Grid.system.scheduler.schedule(start milli, interval milli) {
      block()
    }
  }

  s = startTask

  override def receive: Receive = {
    case PauseTask  => if (!s.isCancelled) s.cancel()
    case ResumeTask => if (s.isCancelled) s = startTask
  }
} 
Example 26
Source File: CommandActor.scala    From CM-Well   with Apache License 2.0 5 votes vote down vote up
package cmwell.ctrl.server

import akka.actor.{Actor, ActorLogging, ActorRef, ActorSelection}
import akka.pattern.pipe
import cmwell.ctrl.checkers._
import cmwell.ctrl.commands.{ControlCommand, StartElasticsearchMaster}
import cmwell.ctrl.config.{Config, Jvms}
import cmwell.ctrl.utils.ProcUtil
import k.grid.{Grid, GridJvm, GridJvm$}
import scala.concurrent.Future
import scala.sys.process._
import Config._

import scala.util.{Failure, Success, Try}
import scala.concurrent.ExecutionContext.Implicits.global


case class BashCommand(com: String)

object CommandActor {
  def select(host: String): ActorSelection = {
    Grid.selectActor(commandActorName, GridJvm(host, Some(Jvms.node)))
  }

  def all: Set[ActorSelection] = {
    Grid.availableMachines.map(host => Grid.selectActor(commandActorName, GridJvm(host, Some(Jvms.node))))
  }
}

class CommandActor extends Actor with ActorLogging {

  override def receive: Receive = {
    case BashCommand(com)   => sender ! ProcUtil.executeCommand(com)
    case CheckWeb           => WebChecker.check.pipeTo(sender())
    case CheckElasticsearch => ElasticsearchChecker.check.pipeTo(sender())
    case CheckCassandra     => CassandraChecker.check.pipeTo(sender())
    case cc: ControlCommand => cc.execute
  }
} 
Example 27
Source File: ProcessManager.scala    From akka-cqrs   with Apache License 2.0 5 votes vote down vote up
package com.productfoundry.akka.cqrs.process

import akka.actor.{ActorLogging, ReceiveTimeout}
import akka.persistence.fsm.PersistentFSM
import akka.persistence.fsm.PersistentFSM.FSMState
import akka.productfoundry.contrib.pattern.ReceivePipeline
import com.productfoundry.akka.cqrs.Entity
import com.productfoundry.akka.cqrs.publish.EventPublicationInterceptor


trait FsmProcessManager[S <: FSMState, D, E <: ProcessManagerEvent]
  extends ProcessManager
  with PersistentFSM[S, D, E]
  with ReceivePipeline
  with EventPublicationInterceptor {

  def passivationStateFunction: StateFunction = {
    case Event(ReceiveTimeout, _) => stop()
  }

  whenUnhandled(passivationStateFunction)
} 
Example 28
Source File: WorkProcessor.scala    From akka-iot-mqtt-v2   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package akkaiot

import akka.actor.{ Props, Actor, ActorLogging }
import java.util.concurrent.ThreadLocalRandom

object WorkProcessor {
  def props(): Props = Props(new WorkProcessor)

  case class DeviceStateSetting(deviceType: String, state: Int, setting: Int)
}

class WorkProcessor extends Actor with ActorLogging {
  import WorkProcessor._
  def random = ThreadLocalRandom.current

  def receive = {
    case work @ Work(workId, deviceType, deviceId, state, setting) => {
      val newStateSetting: DeviceStateSetting = deviceType match {
        case "thermostat" =>
          nextDeviceStateSetting(work, Map(0->"OFF", 1->"HEAT", 2->"COOL"), "temperature", (60, 75), (-2, 2))

        case "lamp" => 
          nextDeviceStateSetting(work, Map(0->"OFF", 1->"ON"), "brightness", (1, 3), (-1, 1))

        case "security-alarm" => 
          nextDeviceStateSetting(work, Map(0->"OFF", 1->"ON"), "level", (1, 5), (-2, 2))

        case _ =>
          // Shouldn't happen (keep state/setting as is)
          log.info("Work Processor -> ALERT: Device type undefined! {}-{}", work.deviceType, work.deviceId)
          DeviceStateSetting(deviceType, state, setting)
      }

      val result = WorkResult(workId, deviceType, deviceId, newStateSetting.state, newStateSetting.setting)
      sender() ! Worker.WorkProcessed(result)
    }

    case _ =>
      log.info("Work Processor -> ALERT: Received unknown message!")
  }

  def nextDeviceStateSetting(
      work: Work, stateMap: Map[Int, String], settingType: String, settingLimit: (Int, Int), changeLimit: (Int, Int)
    ): DeviceStateSetting = {

    val nextState = random.nextInt(0, stateMap.size)

    val nextStateText = if (nextState == work.currState) "Keep state " + stateMap(work.currState) else
      "Switch to " + stateMap(nextState)

    val randomChange = random.nextInt(changeLimit._1, changeLimit._2 + 1)
    val randomSetting = work.currSetting + randomChange

    val nextSettingChange = if (randomChange == 0) 0 else {
      if (randomSetting < settingLimit._1 || randomSetting > settingLimit._2) 0 else randomChange
    }
    val nextSetting = work.currSetting + nextSettingChange

    val nextSettingText = if (nextSettingChange == 0) s"NO $settingType change" else {
      if (nextSettingChange > 0) s"RAISE $settingType by $nextSettingChange" else
        s"LOWER $settingType by $nextSettingChange"
    }

    log.info("Work Processor -> {}-{}: {} | {}", work.deviceType, work.deviceId, nextStateText, nextSettingText)
    DeviceStateSetting(work.deviceType, nextState, nextSetting)
  }
} 
Example 29
Source File: Device.scala    From akka-iot-mqtt-v2   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package akkaiot

import java.util.UUID
import java.util.concurrent.ThreadLocalRandom
import scala.concurrent.duration._
import akka.actor.{ Props, ActorRef, Actor, ActorLogging }

import com.sandinh.paho.akka._
import com.sandinh.paho.akka.MqttPubSub._

object Device {
  def props(deviceType: String, deviceId: String, mqttPubSub: ActorRef): Props = Props(new Device(deviceType, deviceId, mqttPubSub))

  case object Tick
}

class Device(deviceType: String, deviceId: String, mqttPubSub: ActorRef) extends Actor with ActorLogging {

  import Device._
  import context.dispatcher

  // deviceTypes = List("thermostat", "lamp", "security-alarm")
  private var opState: Int = 0  // 0|1|2 (OFF|HEAT|COOL) for thermostat, 0|1 (OFF|ON) for lamp|security-alarm
  private var setting: Int = 0  // 60-75 for themostat, 1-3 for lamp, 1-5 for security-alarm

  def scheduler = context.system.scheduler
  def random = ThreadLocalRandom.current
  def nextWorkId(): String = UUID.randomUUID().toString

  override def preStart(): Unit = {
    opState = deviceType match {
      case "thermostat" => random.nextInt(0, 2+1)
      case "lamp" => random.nextInt(0, 1+1)
      case "security-alarm" => random.nextInt(0, 1+1)
      case _ => 0
    }

    setting = deviceType match {
      case "thermostat" => random.nextInt(60, 75+1)
      case "lamp" => random.nextInt(1, 3+1)
      case "security-alarm" => random.nextInt(1, 5+1)
      case _ => 0
    }

    scheduler.scheduleOnce(5.seconds, self, Tick)
    log.info("Device -> {}-{} started", deviceType, deviceId)
  }

  override def postRestart(reason: Throwable): Unit = ()

  override def postStop(): Unit = log.info("Device -> {}-{} stopped.", deviceType, deviceId)

  def receive = {
    case Tick => {
      val workId = nextWorkId()
      val work = Work(workId, deviceType, deviceId, opState, setting)
      log.info("Device -> {}-{} with state {} created work (Id: {}) ", deviceType, deviceId, opState, workId)

      val payload = MqttConfig.writeToByteArray(work)
      log.info("Device -> Publishing MQTT Topic {}: Device {}-{}", MqttConfig.topic, deviceType, deviceId)

      mqttPubSub ! new Publish(MqttConfig.topic, payload)

      context.become(waitAccepted(work, payload), discardOld = false)
    }

    case WorkResult(workId, deviceType, deviceId, nextState, nextSetting) => {
      log.info("Device -> {}-{} received work result with work Id {}.", deviceType, deviceId, workId)

      opState = nextState
      setting = nextSetting

      log.info("Device -> Updated {}-{} with state {} and setting {}.", deviceType, deviceId, opState, setting)
    }
  }

  def waitAccepted(work: Work, payload: Array[Byte]): Receive = {
    case IotManager.Ok(_) =>
      log.info("Device -> Work for {}-{} accepted | Work Id {}", work.deviceType, work.deviceId, work.workId)
      context.unbecome()
      scheduler.scheduleOnce(random.nextInt(3, 10).seconds, self, Tick)

    case IotManager.NotOk(_) =>
      log.info("Device -> ALERT: Work from {}-{} NOT ACCEPTED | Work Id {} | Retrying ... ", work.deviceType, work.deviceId, work.workId)
      scheduler.scheduleOnce(3.seconds, mqttPubSub, new Publish(MqttConfig.topic, payload))
  }
} 
Example 30
Source File: Worker.scala    From akka-iot-mqtt-v2   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package akkaiot

import java.util.UUID
import scala.concurrent.duration._
import akka.actor.{ Props, ActorRef, Actor, ActorLogging, ReceiveTimeout, Terminated }
import akka.cluster.client.ClusterClient.SendToAll
import akka.actor.OneForOneStrategy
import akka.actor.SupervisorStrategy.Stop
import akka.actor.SupervisorStrategy.Restart
import akka.actor.ActorInitializationException
import akka.actor.DeathPactException

object Worker {

  def props(clusterClient: ActorRef, workProcessorProps: Props, registerInterval: FiniteDuration = 10.seconds): Props =
    Props(classOf[Worker], clusterClient, workProcessorProps, registerInterval)

  case class WorkProcessed(result: WorkResult)
}

class Worker(clusterClient: ActorRef, workProcessorProps: Props, registerInterval: FiniteDuration)
  extends Actor with ActorLogging {
  import Worker._
  import MasterWorkerProtocol._

  val workerId = UUID.randomUUID().toString

  import context.dispatcher
  val registerTask = context.system.scheduler.schedule(0.seconds, registerInterval, clusterClient,
    SendToAll("/user/master/singleton", RegisterWorker(workerId)))

  val workProcessor = context.watch(context.actorOf(workProcessorProps, "work-processor"))

  var currentWorkId: Option[String] = None
  def workId: String = currentWorkId match {
    case Some(workId) => workId
    case None => throw new IllegalStateException("Not working")
  }

  override def supervisorStrategy = OneForOneStrategy() {
    case _: ActorInitializationException => Stop
    case _: DeathPactException => Stop
    case _: Exception =>
      currentWorkId foreach { workId => sendToMaster(WorkFailed(workerId, workId)) }
      context.become(idle)
      Restart
  }

  override def postStop(): Unit = registerTask.cancel()

  def receive = idle

  def idle: Receive = {
    case WorkIsReady =>
      sendToMaster(WorkerRequestsWork(workerId))

    case work @ Work(workId, deviceType, deviceId, state, setting) =>
      log.info("Worker -> Received work request from {}-{} | State {} | Setting {}", deviceType, deviceId, state, setting)
      currentWorkId = Some(workId)
      workProcessor ! work
      context.become(working)
  }

  def working: Receive = {
    case WorkProcessed(result: WorkResult) =>
      log.info("Worker -> Processed work: {}-{} | Work Id {}", result.deviceType, result.deviceId, workId)
      sendToMaster(WorkIsDone(workerId, workId, result))
      context.setReceiveTimeout(5.seconds)
      context.become(waitForWorkIsDoneAck(result))

    case work: Work =>
      log.info("Worker -> ALERT: Worker Id {} NOT AVAILABLE for Work Id {}", workerId, work.workId)
  }

  def waitForWorkIsDoneAck(result: WorkResult): Receive = {
    case Ack(id) if id == workId =>
      sendToMaster(WorkerRequestsWork(workerId))
      context.setReceiveTimeout(Duration.Undefined)
      context.become(idle)
    case ReceiveTimeout =>
      log.info("Worker -> ALERT: NO ACK from cluster master, retrying ... ")
      sendToMaster(WorkIsDone(workerId, workId, result))
  }

  override def unhandled(message: Any): Unit = message match {
    case Terminated(`workProcessor`) => context.stop(self)
    case WorkIsReady =>
    case _ => super.unhandled(message)
  }

  def sendToMaster(msg: Any): Unit = {
    clusterClient ! SendToAll("/user/master/singleton", msg)
  }
} 
Example 31
Source File: IotManager.scala    From akka-iot-mqtt-v2   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package akkaiot

import java.util.concurrent.ThreadLocalRandom
import scala.concurrent.duration._
import akka.actor.{ Props, ActorRef, Actor, ActorLogging, Terminated }
import akka.pattern._
import akka.util.Timeout
import akka.cluster.client.ClusterClient.SendToAll

import com.sandinh.paho.akka._
import com.sandinh.paho.akka.MqttPubSub._

object IotManager {
  def props(clusterClient: ActorRef, numOfDevices: Int, mqttPubSub: ActorRef): Props = Props(
    new IotManager(clusterClient, numOfDevices, mqttPubSub)
  )

  case class Ok(work: Work)
  case class NotOk(work: Work)
}

class IotManager(clusterClient: ActorRef, numOfDevices: Int, mqttPubSub: ActorRef) extends Actor with ActorLogging {
  import IotManager._
  import context.dispatcher

  private var idToActorMap = Map.empty[String, ActorRef]
  private var actorToIdMap = Map.empty[ActorRef, String]

  val deviceTypes = List("thermostat", "lamp", "security-alarm")
  def random = ThreadLocalRandom.current

  mqttPubSub ! Subscribe(MqttConfig.topic, self)

  override def preStart(): Unit = {
    log.info("IoT Manager -> Creating devices ...")

    (1 to numOfDevices).foreach { n =>
      val deviceType = deviceTypes(random.nextInt(0, deviceTypes.size))
      val deviceId = (1000 + n).toString
      val deviceActor = context.actorOf(Device.props(deviceType, deviceId, mqttPubSub), s"$deviceType-$deviceId")
      context.watch(deviceActor)
      actorToIdMap += deviceActor -> deviceId
      idToActorMap += deviceId -> deviceActor
    }

    log.info("IoT Manager -> Created {} devices!", numOfDevices)
  }

  override def postStop(): Unit = log.info("IoT Manager -> Stopped")

  override def receive: Receive = {
    case SubscribeAck(Subscribe(MqttConfig.topic, `self`, _)) => {
      log.info("IoT Manager -> MQTT subscription to {} acknowledged", MqttConfig.topic)
      context.become(ready)
    }

    case x =>
      log.info("IoT Manager -> ALERT: Problem receiving message ... {}", x)
  }

  def ready: Receive = {
    case msg: Message => {
      val work = MqttConfig.readFromByteArray[Work](msg.payload)
      log.info("IoT Agent -> Received MQTT message: {}-{} | State {} | Setting {}", work.deviceType, work.deviceId, work.currState, work.currSetting)

      log.info("IoT Manager -> Sending work to cluster master")
      implicit val timeout = Timeout(5.seconds)
      (clusterClient ? SendToAll("/user/master/singleton", work)) map {
        case Master.Ack(_) => Ok(work)
      } recover { case _ => NotOk(work)
      } pipeTo {
        idToActorMap.getOrElse(work.deviceId, `self`)
      }
    }

    case result @ WorkResult(workId, deviceType, deviceId, nextState, nextSetting) =>
      idToActorMap.get(deviceId) match {
        case Some(deviceActor) =>
          deviceActor forward result
          log.info("IoT Manager -> Work result forwarded to {}-{} ", deviceType, deviceId)
        case None =>
          log.info("IoT Manager -> ALERT: {}-{} NOT in registry!", deviceType, deviceId)
      }

    case Terminated(deviceActor) =>
      val deviceId = actorToIdMap(deviceActor)
      log.info("IoT Manager -> ALERT: Device actor terminated! Device Id {} will be removed.", deviceId)
      actorToIdMap -= deviceActor
      idToActorMap -= deviceId

    case Ok(work) =>
      log.info("IoT Manager -> ALERT: Receive ack from Master but Device Id of {}-{} NOT in registry!", work.deviceType, work.deviceId)

    case NotOk(work) =>
      log.info("IoT Manager -> ALERT: Did not receive ack from Master and Device Id of {}-{} NOT in registry!", work.deviceType, work.deviceId)

    case x =>
      log.info("IoT Manager -> ALERT: Problem with received message ... {}", x)
  }
} 
Example 32
Source File: ResultProcessor.scala    From akka-iot-mqtt-v2   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package akkaiot

import akka.actor.{ Props, ActorRef, Actor, ActorLogging, ActorPath }
import akka.cluster.pubsub.DistributedPubSub
import akka.cluster.pubsub.DistributedPubSubMediator

object ResultProcessor {
  def props(iotPath: ActorPath): Props = Props(new ResultProcessor(iotPath))
}

class ResultProcessor(iotPath: ActorPath) extends Actor with ActorLogging {

  val mediator = DistributedPubSub(context.system).mediator
  mediator ! DistributedPubSubMediator.Subscribe(Master.ResultsTopic, self)

  def receive = {
    case _: DistributedPubSubMediator.SubscribeAck =>
    case result: WorkResult =>
      log.info("Result Processor -> Got work result: {}-{} | State {} | Setting {}", result.deviceType, result.deviceId, result.nextState, result.nextSetting)

      context.actorSelection(iotPath) ! result
      log.info("Result Processor -> Sent work result for {}-{} to IoT Manager", result.deviceType, result.deviceId)
  }
} 
Example 33
Source File: ClusterApp.scala    From reactive-lib   with Apache License 2.0 5 votes vote down vote up
package foo

import akka.actor.{ Actor, ActorLogging, ActorSystem, PoisonPill, Props }
import akka.cluster.ClusterEvent.ClusterDomainEvent
import akka.cluster.singleton.{ ClusterSingletonManager, ClusterSingletonManagerSettings }
import akka.cluster.{ Cluster, ClusterEvent }
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Directives._
import akka.management.AkkaManagement
import akka.management.cluster.bootstrap.ClusterBootstrap
import akka.stream.ActorMaterializer

object ClusterApp {

  def main(args: Array[String]): Unit = {

    implicit val system = ActorSystem()
    implicit val materializer = ActorMaterializer()
    implicit val executionContext = system.dispatcher

    val cluster = Cluster(system)
    system.log.info("Starting Akka Management")
    system.log.info("something2")
    // AkkaManagement(system).start()
    // ClusterBootstrap(system).start()

    system.actorOf(
      ClusterSingletonManager.props(
        Props[NoisySingleton],
        PoisonPill,
        ClusterSingletonManagerSettings(system)))
    Cluster(system).subscribe(
      system.actorOf(Props[ClusterWatcher]),
      ClusterEvent.InitialStateAsEvents,
      classOf[ClusterDomainEvent])

    // add real app routes here
    val routes =
      path("hello") {
        get {
          complete(
            HttpEntity(ContentTypes.`text/html(UTF-8)`, "<h1>Hello</h1>"))
        }
      }

    Http().bindAndHandle(routes, "0.0.0.0", 8080)

    system.log.info(
      s"Server online at http://localhost:8080/\nPress RETURN to stop...")

    cluster.registerOnMemberUp(() => {
      system.log.info("Cluster member is up!")
    })
  }

  class ClusterWatcher extends Actor with ActorLogging {
    val cluster = Cluster(context.system)

    override def receive = {
      case msg ⇒ log.info(s"Cluster ${cluster.selfAddress} >>> " + msg)
    }
  }
} 
Example 34
Source File: DemoApp.scala    From reactive-lib   with Apache License 2.0 5 votes vote down vote up
package foo

import akka.actor.{ Actor, ActorLogging, ActorSystem, Props }
import akka.cluster.ClusterEvent.ClusterDomainEvent
import akka.cluster.{ Cluster, ClusterEvent }
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Directives._
import akka.management.AkkaManagement
import akka.management.cluster.bootstrap.ClusterBootstrap
import akka.stream.ActorMaterializer

object DemoApp extends App {

  implicit val system = ActorSystem("Appka")

  import system.log
  implicit val mat = ActorMaterializer()
  val cluster = Cluster(system)

  log.info(s"Started [$system], cluster.selfAddress = ${cluster.selfAddress}")
  log.info("something2")
  //#start-akka-management
  AkkaManagement(system).start()
  //#start-akka-management
  ClusterBootstrap(system).start()

  cluster.subscribe(
    system.actorOf(Props[ClusterWatcher]),
    ClusterEvent.InitialStateAsEvents,
    classOf[ClusterDomainEvent])

  // add real app routes here
  val routes =
    path("hello") {
      get {
        complete(
          HttpEntity(
            ContentTypes.`text/html(UTF-8)`,
            "<h1>Hello</h1>"))
      }
    }
  Http().bindAndHandle(routes, "0.0.0.0", 8080)

  Cluster(system).registerOnMemberUp({
    log.info("Cluster member is up!")
  })

}

class ClusterWatcher extends Actor with ActorLogging {
  val cluster = Cluster(context.system)

  override def receive = {
    case msg ⇒ log.info(s"Cluster ${cluster.selfAddress} >>> " + msg)
  }
} 
Example 35
Source File: BucketRateLimiter.scala    From shield   with MIT License 5 votes vote down vote up
package shield.actors.middleware

import akka.actor.{Actor, ActorLogging, Props}
import nl.grons.metrics.scala.{Meter, Timer}
import shield.actors._
import shield.config.{ServiceLocation, Settings}
import shield.implicits.FutureUtil
import shield.kvstore.KVStore
import shield.metrics.Instrumented
import spray.http.{HttpResponse, StatusCodes}

object BucketRateLimiter {
  def props(id: String, bypassHeader: String, callsPer: Int, perSeconds: Int, store: KVStore, location: ServiceLocation) : Props = Props(new BucketRateLimiter(id, bypassHeader, callsPer, perSeconds, store, location))
}
class BucketRateLimiter(id: String, bypassHeader: String, callsPer: Int, perSeconds: Int, store: KVStore, location: ServiceLocation) extends Actor with ActorLogging with RestartLogging with Instrumented{
  import context._

  val settings = Settings(context.system)
  val localWork: Timer = metrics.timer("localWork", id)
  val bypassMeter: Meter = metrics.meter("bypass", id)
  val blockMeter: Meter = metrics.meter("block", id)
  val passMeter: Meter = metrics.meter("pass", id)
  val kvWorkTimer = timing("kvWork", id)

  def receive = {
    // todo: x-ratelimit response headers?
    case ur : DownstreamRequest => localWork.time {
      val _sender = sender()
      if (ur.request.headers.exists(_.lowercaseName == bypassHeader)) {
        bypassMeter.mark()
        _sender ! ForwardRequestCmd(ur.stage, ur.request, None)
      } else kvWorkTimer {
        // todo: profiling optimization - can we get this from the connection instead of the header?
        // similarly we spend a fair amount of time stringifying request.uri.  Let's do this once per request and cache it in the request context
        val ip = ur.request.headers.find(_.lowercaseName == "client-address").get.value
        // todo: reasonable timeout
        store.tokenRateLimit(ip, callsPer, perSeconds).andThen(FutureUtil.logFailure("BucketRateLimiter::checkLimit")).recover {
          case _ => true
        }.map {
          if (_) {
            passMeter.mark()
            _sender ! ForwardRequestCmd(ur.stage, ur.request)
          } else {
            blockMeter.mark()
            _sender ! ForwardResponseCmd(ur.stage, ResponseDetails(location, settings.LocalServiceName, ur.destination.template, None, HttpResponse(StatusCodes.TooManyRequests)))
          }
        }
      }
    }
  }
} 
Example 36
Source File: ApiKeyAuth.scala    From shield   with MIT License 5 votes vote down vote up
package shield.actors.middleware

import akka.actor.{Actor, ActorLogging, Props}
import shield.actors._
import shield.config.{ServiceLocation, Settings}
import shield.metrics.Instrumented
import spray.http.{HttpResponse, StatusCodes}

object ApiKeyAuth {
  def props(header: String, allowed: Set[String], caseSensitive: Boolean, location: ServiceLocation) : Props = Props(new ApiKeyAuth(header, allowed, caseSensitive, location))
}
class ApiKeyAuth(headerName: String, allowedKeys: Set[String], caseSensitive: Boolean, location: ServiceLocation) extends Actor with ActorLogging with RestartLogging with Instrumented {

  val settings = Settings(context.system)
  val headerNameLower = headerName.toLowerCase
  val allowedValues : Set[String] = if (caseSensitive) allowedKeys else allowedKeys.map(_.toLowerCase)

  val timer = metrics.timer("api-key-auth")
  def receive = {
    case r: DownstreamRequest => timer.time {
      val header = r.request.headers.find(_.lowercaseName == headerNameLower)
      val allowed = header.exists(h => if (caseSensitive) allowedValues.contains(h.value) else allowedValues.contains(h.value.toLowerCase))

      if (allowed) {
        sender ! ForwardRequestCmd(r.stage, r.request, None)
      } else {
        sender ! ForwardResponseCmd(
          r.stage,
          ResponseDetails(
            location,
            settings.LocalServiceName,
            r.destination.template,
            None,
            HttpResponse(if (header.isDefined) StatusCodes.Unauthorized else StatusCodes.Forbidden)
          )
        )
      }
    }
  }
} 
Example 37
Source File: WeightWatcher.scala    From shield   with MIT License 5 votes vote down vote up
package shield.actors.config

import akka.actor.{Actor, ActorLogging, Cancellable, Props}
import shield.config.ServiceLocation

import scala.concurrent.duration._

object WeightWatcherMsgs {
  case class SetTargetWeights(services: Map[ServiceLocation, ServiceDetails])
  case object Tick
  case class SetWeights(weights: Map[ServiceLocation, Int]) {
    require(weights.values.forall(_ >= 0), "Negative weight not allowed")
  }
}

object TransitionDetails {
  def default(details: ServiceDetails): TransitionDetails = {
    TransitionDetails(details.weight, 0, details.weight)
  }
}
case class TransitionDetails(targetWeight: Int, delta: Double, currentWeight: Double) {
  require(targetWeight >= 0, "target weight can't be negative")
  require(currentWeight >= 0, "current weight can't be negative")

  def setTarget(newTarget: Int, stepCount: Int) : TransitionDetails =
    if (newTarget != targetWeight) {
      copy(
        targetWeight = newTarget,
        delta = (newTarget - currentWeight) / stepCount
      )
    } else {
      this
    }

  def advanceStep() : TransitionDetails = {
    val next = currentWeight + delta
    if (delta == 0) {
      this
    } else if ((delta < 0 && next <= targetWeight) || (delta > 0 && next >= targetWeight)) {
      copy(delta=0, currentWeight=targetWeight)
    } else {
      copy(currentWeight=next)
    }
  }
}

// Why do we have one weight watcher for all hosts instead of one weight watcher for each host?
// Having a watcher for each host would cause multiple hosts to update their state per step.
// Having one watcher for all hosts will cause one state update per step.
// The one-for-all approach significantly lowers the number of times that ConfigWatcher will have to rebuild the router
object WeightWatcher {
  def props(stepTime: FiniteDuration, stepCount: Int) : Props = Props(new WeightWatcher(stepTime, stepCount))
}
class WeightWatcher(stepTime: FiniteDuration, stepCount: Int) extends Actor with ActorLogging {
  require(stepCount > 0, "Must have at least one step")
  import WeightWatcherMsgs._
  import context.dispatcher

  var state : Map[ServiceLocation, TransitionDetails] = Map.empty

  var ticker : Cancellable = context.system.scheduler.schedule(stepTime, stepTime, self, Tick)

  override def preRestart(reason: Throwable, message: Option[Any]): Unit = {
    self ! SetWeights(state.map { case (loc, transition) => loc -> transition.targetWeight })
    super.preRestart(reason, message)
  }

  override def postStop() = {
    ticker.cancel()
  }

  def receive = {
    case SetTargetWeights(proxyDetails) =>
      state = proxyDetails.map { case (location, proxyDetail) =>
          location -> state.get(location).map(_.setTarget(proxyDetail.weight, stepCount)).getOrElse(TransitionDetails.default(proxyDetail))
      }

    case Tick =>
      val oldWeights = state.map { case (loc, transition) => loc -> transition.currentWeight.toInt }
      state = state.map { case (loc, transition) => loc -> transition.advanceStep() }
      val newWeights = state.map { case (loc, transition) => loc -> transition.currentWeight.toInt }
      if (oldWeights != newWeights) {
        context.parent ! SetWeights(newWeights)
      }

    case SetWeights(weights) =>
      state = weights.map { case (loc, weight) => loc -> TransitionDetails(weight, 0 , weight) }
      context.parent ! SetWeights(weights)
  }
} 
Example 38
Source File: ConsoleLogBuilder.scala    From shield   with MIT License 5 votes vote down vote up
package shield.actors.config.listener

import akka.actor.{Actor, ActorLogging, Props}
import shield.actors.RestartLogging
import shield.actors.config.ConfigWatcherMsgs
import shield.actors.listeners.{ConsoleLogger, LogCollector}
import shield.config.DomainSettings

class ConsoleLogBuilder(id: String, domain: DomainSettings) extends Actor with ActorLogging with ListenerBuilder with RestartLogging {

  val c = domain.ConfigForListener(id)
  val forwarder = context.actorOf(Props(new ConsoleLogger(id)))
  val collector = context.actorOf(Props(new LogCollector(id, domain, List(forwarder), 1)))

  context.parent ! ConfigWatcherMsgs.ListenerUpdated(id, collector)

  log.info(s"Built console logger $id")

  def receive = {
    case _ =>
  }
} 
Example 39
Source File: AlternateUpstreamBuilder.scala    From shield   with MIT License 5 votes vote down vote up
package shield.actors.config.listener

import akka.actor.{Actor, ActorLogging, Props}
import akka.routing.SmallestMailboxPool
import shield.actors.RestartLogging
import shield.actors.config.ConfigWatcherMsgs
import shield.actors.listeners.AlternateUpstream
import shield.aws.S3DiffUploader
import shield.config.DomainSettings

class AlternateUpstreamBuilder(id: String, domain: DomainSettings) extends Actor with ActorLogging with ListenerBuilder with RestartLogging {
  val c = domain.ConfigForListener(id)

  val hostUri = c.getString("serviceLocation")
  val hostType = c.getString("serviceType")
  val freq = c.getInt("freq")
  val bucket = c.getString("bucket")
  val folder = if (c.hasPath("folder")) c.getString("folder") else "/"

  // since the s3 upload is synchronous, we want a pool of workers
  val uploader = context.actorOf(SmallestMailboxPool(5).props(S3DiffUploader.props(bucket, folder)), "s3UploadRouter")

  context.parent ! ConfigWatcherMsgs.ListenerUpdated(id, context.actorOf(Props(new AlternateUpstream(id, settings.DefaultServiceLocation, hostUri, hostType, freq, uploader))))

  def receive = {
    case _ =>
  }
} 
Example 40
Source File: KibanaBuilder.scala    From shield   with MIT License 5 votes vote down vote up
package shield.actors.config.listener

import akka.actor.{Actor, ActorLogging, Props}
import shield.actors.RestartLogging
import shield.actors.config.ConfigWatcherMsgs
import shield.actors.listeners.{KibanaForwarder, LogCollector}
import shield.aws.AWSSigningConfig
import shield.config.DomainSettings

class KibanaBuilder(id: String, domain: DomainSettings) extends Actor with ActorLogging with ListenerBuilder with RestartLogging {

  val c = domain.ConfigForListener(id)
  val forwarder = context.actorOf(Props(new KibanaForwarder(id, c.getString("host"), c.getString("index-prefix"), c.getString("type"), c.getInt("max-outstanding"), AWSSigningConfig(c))))
  val collector = context.actorOf(Props(new LogCollector(id, domain, List(forwarder), c.getInt("buffer-size"))))

  context.parent ! ConfigWatcherMsgs.ListenerUpdated(id, collector)

  log.info(s"Built kibana listener $id")

  def receive = {
    case _ =>
  }
} 
Example 41
Source File: FluentdHttpBuilder.scala    From shield   with MIT License 5 votes vote down vote up
package shield.actors.config.listener

import akka.actor.{Actor, ActorLogging, Props}
import shield.actors.RestartLogging
import shield.actors.config.ConfigWatcherMsgs
import shield.actors.listeners.{FluentdHttpForwarder, LogCollector}
import shield.config.DomainSettings

class FluentdHttpBuilder(id: String, domain: DomainSettings) extends Actor with ActorLogging with ListenerBuilder with RestartLogging {

  val c = domain.ConfigForListener(id)
  val forwarder = context.actorOf(Props(new FluentdHttpForwarder(id, c.getString("host"), c.getInt("max-outstanding"))))
  val collector = context.actorOf(Props(new LogCollector(id, domain, List(forwarder), c.getInt("buffer-size"))))

  context.parent ! ConfigWatcherMsgs.ListenerUpdated(id, collector)

  log.info(s"Built FluentD listener $id")

  def receive = {
    case _ =>
  }
} 
Example 42
Source File: ApiKeyAuthBuilder.scala    From shield   with MIT License 5 votes vote down vote up
package shield.actors.config.middleware

import akka.actor.{Actor, ActorLogging}
import shield.actors.{Middleware, RestartLogging}
import shield.actors.config.ConfigWatcherMsgs
import shield.actors.middleware.ApiKeyAuth
import shield.config.DomainSettings

import scala.collection.JavaConversions._

class ApiKeyAuthBuilder(id: String, domain: DomainSettings) extends Actor with ActorLogging with MiddlewareBuilder with RestartLogging {
  val c = domain.ConfigForMiddleware(id)

  log.info(s"Building ApiKeyAuth '$id' with config $c")

  domain.MiddlewareChain.find(_.id == id) match {
    case None => log.warning(s"Could not find SLA for middleware $id")
    case Some(mw) =>
      context.parent ! ConfigWatcherMsgs.MiddlewareUpdated(Middleware(
        id,
        mw.sla,
        context.actorOf(ApiKeyAuth.props(
          c.getString("header-name"),
          c.getStringList("allowed").toSet,
          c.getBoolean("case-sensitive"),
          settings.DefaultServiceLocation
        ))
      ))
  }

  def receive = {
    case _ =>
  }
} 
Example 43
Source File: BucketRateLimitBuilder.scala    From shield   with MIT License 5 votes vote down vote up
package shield.actors.config.middleware

import akka.actor.{Actor, ActorLogging}
import shield.actors.{Middleware, RestartLogging}
import shield.actors.config.ConfigWatcherMsgs
import shield.actors.middleware.BucketRateLimiter
import shield.config.DomainSettings

class BucketRateLimitBuilder(id: String, domain: DomainSettings) extends Actor with ActorLogging with MiddlewareBuilder with RestartLogging {
  val c = domain.ConfigForMiddleware(id)

  log.info(s"Building BucketRateLimiter '$id' with config $c")

  domain.MiddlewareChain.find(_.id == id) match {
    case None => log.warning(s"Could not find SLA for middleware $id")
    case Some(mw) =>
      context.parent ! ConfigWatcherMsgs.MiddlewareUpdated(Middleware(
        id,
        mw.sla,
        context.actorOf(BucketRateLimiter.props(
          id,
          c.getString("bypass-header"),
          c.getInt("calls-per"),
          c.getInt("per-seconds"),
          domain.KVStores(c.getString("kvstore")),
          settings.DefaultServiceLocation
        ))
      ))
  }


  def receive = {
    case _ =>
  }
} 
Example 44
Source File: ResponseCacheBuilder.scala    From shield   with MIT License 5 votes vote down vote up
package shield.actors.config.middleware

import akka.actor.{Actor, ActorLogging}
import shield.actors.{Middleware, RestartLogging}
import shield.actors.config.ConfigWatcherMsgs
import shield.actors.middleware.ResponseCache
import shield.config.DomainSettings


class ResponseCacheBuilder(id: String, domain: DomainSettings) extends Actor with ActorLogging with MiddlewareBuilder with RestartLogging {
  val c = domain.ConfigForMiddleware(id)

  log.info(s"Building ResponseCache '$id' with config $c")

  domain.MiddlewareChain.find(_.id == id) match {
    case None => log.warning(s"Could not find SLA for middleware $id")
    case Some(mw) =>
      context.parent ! ConfigWatcherMsgs.MiddlewareUpdated(Middleware(
        id,
        mw.sla,
        context.actorOf(ResponseCache.props(
          id,
          domain.KVStores(c.getString("kvstore")),
          settings.DefaultServiceLocation
        ))
      ))
  }

  def receive = {
    case _ =>
  }
} 
Example 45
Source File: S3ObjectWatcher.scala    From shield   with MIT License 5 votes vote down vote up
package shield.actors.config

import akka.actor.{Actor, ActorLogging}
import com.amazonaws.services.s3.AmazonS3Client

sealed trait S3ObjectWatcherMessage
case object Refresh extends S3ObjectWatcherMessage
case class ChangedContents(contents: String) extends S3ObjectWatcherMessage


class S3ObjectWatcher(bucketName: String, configFilename: String) extends Actor with ActorLogging {
  val s3Client = new AmazonS3Client()
  var lastContents = ""

  def receive = {
    case Refresh =>
      val s3Object = s3Client.getObject(bucketName, configFilename)
      val newContents = scala.io.Source.fromInputStream(s3Object.getObjectContent).mkString

      if (newContents != lastContents) {
        log.info("Detected change in s3 file contents")
        log.debug(s"Fetched from s3: $newContents")
        context.parent ! ChangedContents(newContents)
        lastContents = newContents
      }
  }
} 
Example 46
Source File: StaticUpstreamWatcher.scala    From shield   with MIT License 5 votes vote down vote up
package shield.actors.config.upstream

import akka.actor.{Actor, ActorLogging, Props}
import com.typesafe.config.Config
import shield.actors.RestartLogging
import shield.actors.config.{ServiceDetails, UpstreamAggregatorMsgs}
import shield.config.ServiceLocation

import scala.collection.JavaConversions._
import scala.util.Try

object StaticUpstreamWatcher {
  def props(domainConfig: Config): Props = Props(new StaticUpstreamWatcher(domainConfig))
}

class StaticUpstreamWatcher(domainConfig: Config) extends Actor with ActorLogging with UpstreamWatcher with RestartLogging with UpstreamParser{
  val rawServices = if (domainConfig.hasPath("upstreams"))
    domainConfig.getConfigList("upstreams").map(c => Try {parseUpstreamEntry(c.getString("serviceType"), c.getString("serviceLocation"), if (c.hasPath("weight")) c.getInt("weight") else 1) } ).toList
  else List[Try[(ServiceLocation, ServiceDetails)]]()

  for (attempt <- rawServices.filter(_.isFailure)) {
    log.warning(s"Bad upstream host in the config (${attempt.failed.get.getMessage})")
  }

  context.parent ! UpstreamAggregatorMsgs.DiscoveredUpstreams(rawServices.flatMap(_.toOption).toMap)

  def receive = {
    case _ =>
  }
} 
Example 47
Source File: S3DomainWatcher.scala    From shield   with MIT License 5 votes vote down vote up
package shield.actors.config.domain

import java.util.concurrent.TimeUnit

import akka.actor.{ActorLogging, ActorRef, PoisonPill, Props}
import com.typesafe.config.ConfigFactory
import shield.actors.config._
import shield.actors.{RestartLogging, ShieldActorMsgs}
import shield.config.DomainSettings

import scala.collection.JavaConversions._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
import scala.util.{Failure, Success, Try}

class S3DomainWatcher extends DomainWatcher with ActorLogging with RestartLogging {

  import context.system

  var domains = Map[String, DomainSettings]()
  var configWatchers = Map[String, ActorRef]()

  val config = settings.config.getConfig("shield.s3-domain-watcher")
  val s3WatcherService = context.actorOf(Props(
    classOf[S3ObjectWatcher],
    config.getString("bucket-name"),
    config.getString("config-filename")))

  val refreshInterval = Duration(config.getDuration("refresh-interval", TimeUnit.MILLISECONDS), TimeUnit.MILLISECONDS)
  var cancellable = system.scheduler.schedule(
    0.seconds,
    refreshInterval,
    s3WatcherService,
    Refresh)

  override def postStop() = {
    cancellable.cancel()
  }

  def teardownConfigWatcher(configWatcher: ActorRef) = {
    context.system.scheduler.scheduleOnce(60.seconds) {
      configWatcher ! PoisonPill
    }
  }

  def receive: Receive = {
    case ChangedContents(contents) =>
      Try { ConfigFactory.parseString(contents) } match {
        case Success(domainsConfig) =>
          log.debug("new parsed domains config")
          val foundDomains = domainsConfig.getConfigList("domains").map(c => c.getString("domain-name") -> new DomainSettings(c, context.system)).toMap
          val newDomains = foundDomains.keySet.diff(domains.keySet)
          for (d <- newDomains) {
            configWatchers += d -> context.actorOf(ConfigWatcher.props(foundDomains(d), context.parent), "config-watcher-" + d)
          }
          val removedDomains = domains.keySet.diff(foundDomains.keySet)
          for (d <- removedDomains) {
            if (configWatchers.contains(d)){
              teardownConfigWatcher(configWatchers(d))
              configWatchers -= d
            }
          }
          domains = foundDomains

          context.parent ! ShieldActorMsgs.DomainsUpdated(foundDomains)
        case Failure(e) => log.warning(s"Error encountered while parsing domain conf: $e")
    }
  }
} 
Example 48
Source File: StaticDomainWatcher.scala    From shield   with MIT License 5 votes vote down vote up
package shield.actors.config.domain

import akka.actor.{ActorLogging, ActorRef}
import shield.actors.config.ConfigWatcher
import shield.actors.{RestartLogging, ShieldActorMsgs}
import shield.config.DomainSettings
import shield.metrics.Instrumented

import scala.collection.JavaConversions._


class StaticDomainWatcher extends DomainWatcher with ActorLogging with RestartLogging with Instrumented {
  val domains = settings.config.getConfigList("shield.domains").map(c => c.getString("domain-name") -> new DomainSettings(c, context.system)).toMap

  var configWatchers = Map[String, ActorRef]()
  for ((hostname, domain) <- domains) {
    configWatchers += hostname -> context.actorOf(ConfigWatcher.props(domain, context.parent), "config-watcher-" + hostname)
  }

  context.parent ! ShieldActorMsgs.DomainsUpdated(domains)

  def receive = {
    case _ =>
  }
} 
Example 49
Source File: LogCollector.scala    From shield   with MIT License 5 votes vote down vote up
package shield.actors.listeners

import akka.actor.{Actor, ActorLogging, ActorRef}
import akka.event.LoggingAdapter
import nl.grons.metrics.scala.{Meter, Timer}
import shield.actors.{RequestProcessorCompleted, RestartLogging}
import org.joda.time.format.ISODateTimeFormat
import shield.config.{HttpServiceLocation, Settings}
import shield.config.{DomainSettings, Settings}
import shield.metrics.Instrumented
import spray.http.{HttpHeader, HttpResponse}
import spray.json._

import scala.collection.mutable.ArrayBuffer
import scala.concurrent.duration._
import scala.util.{Failure, Success, Try}


case object FlushLogs
case object LogsFlushed
case class AccessLogs(buffer: Seq[JsObject])

object LogCollector {
  def handleResults(self: ActorRef, droppedMeter: Meter, log: LoggingAdapter, logCount: Int) : PartialFunction[Try[HttpResponse], Unit] = {
    case Success(r) =>
      self ! LogsFlushed
      if (r.status.isFailure) {
        droppedMeter.mark(logCount)
        log.warning(s"Error forwarding access logs: ${r.entity.asString}")
      }
    case Failure(f) =>
      self ! LogsFlushed
      droppedMeter.mark(logCount)
      log.warning(s"Error forwarding access logs: $f")
  }
}

class LogCollector(id: String, domain: DomainSettings, forwarders: Seq[ActorRef], maxBufferSize: Int) extends Actor with ActorLogging with RestartLogging with Instrumented {
  import context.dispatcher

  val settings = Settings(context.system)
  val shieldHost = JsString(settings.DefaultServiceLocation.baseUrl.toString)

  var buffer = ArrayBuffer[JsObject]()

  val dateTimeFormat = ISODateTimeFormat.dateTime()
  val logSerializationTimer: Timer = metrics.timer("log-serialization")

  // todo: profiling optimization - 1% of CPU time is spent here while under load
  def logJson(r: RequestProcessorCompleted): JsObject = logSerializationTimer.time {
    JsObject(Map(
      // todo: profiling optimization: use seconds, and cache it per second
      "@timestamp" -> JsString(dateTimeFormat.print(System.currentTimeMillis() - r.overallTiming)),
      "method" -> JsString(r.completion.request.method.toString()),
      // todo: profiling optimization: uri.toString is used in several places - can we cache it?
      "request_headers" -> JsObject(extractHeaders(r.completion.request.headers, domain.loggedRequestHeaders)),
      "response_headers" -> JsObject(extractHeaders(r.completion.details.response.headers, domain.loggedResponseHeaders)),
      "path" -> JsString(r.completion.request.uri.toString()),
      "template" -> JsString(r.completion.details.template.path.toString),
      "responding_service" -> JsString(r.completion.details.serviceName),
      "responding_host" -> JsString(r.completion.details.serviceLocation.locationName),
      "shield_host" -> shieldHost,
      "overall_time" -> JsNumber(r.overallTiming),
      "middleware_time" -> JsObject(r.middlewareTiming.map { case (attr, timing) => attr -> JsNumber(timing) }),
      // todo: cache header name should be config driven
      "cache_status" -> JsString(r.completion.details.response.headers.find(_.lowercaseName == "x-cache").map(_.value).getOrElse("nocache")),
      "response_size" -> JsNumber(r.completion.details.response.entity.data.length),
      "response_status" -> JsNumber(r.completion.details.response.status.intValue)
    ))
  }

  val bufferSizeHistogram = metrics.histogram("bufferSizeOnFlush", id)
  var flushTimer = context.system.scheduler.scheduleOnce(100.millis, self, FlushLogs)
  def flushLogs() = {
    flushTimer.cancel()
    bufferSizeHistogram += buffer.length
    if (buffer.nonEmpty) {
      val msg = AccessLogs(buffer)
      forwarders.foreach {
        _ ! msg
      }
      buffer = ArrayBuffer()
    }
    flushTimer = context.system.scheduler.scheduleOnce(100.millis, self, FlushLogs)
  }

  def receive: Receive = {
    case r: RequestProcessorCompleted =>
      buffer += logJson(r)
      if (buffer.length >= maxBufferSize) {
        flushLogs()
      }

    case FlushLogs =>
      flushLogs()
  }

  def extractHeaders(headers: List[HttpHeader], toExtract: Set[String]): Map[String, JsString] = {
    headers.filter(h => toExtract.contains(h.lowercaseName)).map(h => h.name -> JsString(h.value)).toMap
  }
} 
Example 50
Source File: KibanaForwarder.scala    From shield   with MIT License 5 votes vote down vote up
package shield.actors.listeners

import akka.actor.{Actor, ActorLogging}
import com.amazonaws.auth.{AWSCredentials, DefaultAWSCredentialsProviderChain}
import com.typesafe.config.Config
import shield.actors.RestartLogging
import org.joda.time.format.DateTimeFormat
import org.joda.time.{DateTimeZone, DateTime}
import shield.aws.AWSSigningConfig
import shield.metrics.Instrumented
import spray.client.pipelining._
import spray.http.HttpResponse
import shield.aws.AWSImplicits._
import spray.json.DefaultJsonProtocol._
import spray.json._

// todo: ensure useful mapping on the index
class KibanaForwarder(id: String, host: String, indexPrefix: String, ttype: String, maxOutstanding: Int, signingParams: AWSSigningConfig) extends Actor with ActorLogging with RestartLogging with Instrumented {
  implicit val ctx = context.dispatcher

  // todo: timeout?
  val awsSigningConfig = signingParams
  val pipeline = sendReceive
  val dayFormat = DateTimeFormat.forPattern("yyyy.MM.dd")
  val outstandingCounter = metrics.counter("outstandingPosts", id)
  val droppedMeter = metrics.meter("droppedAccessLogs", id)
  val postTimer = timing("postToKibana", id)

  def receive = {
    case LogsFlushed =>
      outstandingCounter -= 1

    case AccessLogs(buffer) =>
      if (buffer.nonEmpty) {
        if (outstandingCounter.count >= maxOutstanding) {
          droppedMeter.mark(buffer.length)
        } else postTimer {
          outstandingCounter += 1

          val date = DateTimeFormat.forPattern("yyyy.MM.dd").print(DateTime.now(DateTimeZone.UTC))
          // todo: CompactPrint is 1% cpu under load tests.  Faster serialization library?
          val orderedCommands = buffer.flatMap { doc =>
            List(
              JsObject(
                "index" -> JsObject(
                  "_index" -> JsString(s"$indexPrefix-$date"),
                  "_type" -> JsString(ttype)
                )
              ).toJson.compactPrint,
              doc.toJson.compactPrint
            )
          }
          val req = Post(s"$host/_bulk", orderedCommands.mkString("\n") + "\n").withAWSSigning(awsSigningConfig)
          pipeline(req) andThen LogCollector.handleResults(self, droppedMeter, log, buffer.length)
        }
      }
  }
} 
Example 51
Source File: FluentdHttpForwarder.scala    From shield   with MIT License 5 votes vote down vote up
package shield.actors.listeners

import akka.actor.{ActorRef, Actor, ActorLogging}
import shield.actors.RestartLogging
import shield.metrics.Instrumented
import spray.client.pipelining._
import spray.http.{HttpResponse, FormData}
import spray.json.DefaultJsonProtocol._
import spray.json._

import scala.concurrent.duration._
import scala.util.{Failure, Success}


class FluentdHttpForwarder(id: String, host: String, maxOutstanding: Int) extends Actor with ActorLogging with RestartLogging with Instrumented {
  implicit val ctx = context.dispatcher

  // todo: timeout?
  val pipeline = sendReceive
  var outstanding = metrics.counter("outstandingPosts", id)
  val droppedMeter = metrics.meter("droppedAccessLogs", id)
  val postTimer = timing("postToFluentd", id)

  def receive = {
    case LogsFlushed =>
      outstanding -= 1

    case AccessLogs(buffer) =>
      if (buffer.nonEmpty) {
        if (outstanding.count >= maxOutstanding) {
          droppedMeter.mark(buffer.length)
        } else postTimer {
          outstanding += 1

          val json = buffer.toJson.compactPrint
          val data = FormData(Map(("json", json)))
          val req = Post(host, data)
          pipeline(req) andThen LogCollector.handleResults(self, droppedMeter, log, buffer.length)
        }
      }
  }
} 
Example 52
Source File: S3DiffUploader.scala    From shield   with MIT License 5 votes vote down vote up
package shield.aws

import java.io.{ByteArrayInputStream, InputStream}
import java.nio.charset.StandardCharsets

import akka.actor.{Actor, ActorLogging, Props}
import com.amazonaws.auth.profile.ProfileCredentialsProvider
import com.amazonaws.services.s3.AmazonS3Client
import com.amazonaws.services.s3.model.ObjectMetadata
import shield.actors.listeners.ComparisonDiffFile

object S3DiffUploader{
  def props(bucket: String, folder: String) : Props = Props(new S3DiffUploader(bucket, folder))
}

class S3DiffUploader(bucket: String, folder: String)  extends Actor with ActorLogging {
  val s3Client = new AmazonS3Client()
  val charset = StandardCharsets.UTF_8
  val stripped = folder.stripPrefix("/").stripSuffix("/")
  val prefix = if (stripped.isEmpty) {
    stripped
  } else {
    stripped + "/"
  }

  def receive = {
    case file: ComparisonDiffFile =>
      val metadata = new ObjectMetadata()
      metadata.setContentLength(file.contents.length)
      s3Client.putObject(bucket, s"$prefix${file.fileName}", new ByteArrayInputStream(file.contents), metadata)
  }
} 
Example 53
Source File: HttpApi.scala    From jwt-akka-http   with MIT License 5 votes vote down vote up
package ba.codecentric

import java.util.concurrent.TimeUnit

import akka.actor.{ Actor, ActorLogging, Props }
import akka.http.scaladsl.Http
import akka.http.scaladsl.Http.ServerBinding
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.model.headers.RawHeader
import akka.http.scaladsl.server.{ Directive1, Route }
import akka.http.scaladsl.server.Directives._
import akka.stream.ActorMaterializer
import akka.pattern._

import scala.util.Failure

object HttpApi {
  import de.heikoseeberger.akkahttpcirce.FailFastCirceSupport._
  import io.circe.generic.auto._
  import authentikat.jwt._

  final val Name                  = "http-api"
  final val AccessTokenHeaderName = "X-Access-Token"

  final case class LoginRequest(username: String, password: String)

  private val tokenExpiryPeriodInDays = 1
  private val secretKey               = "super_secret_key"
  private val header                  = JwtHeader("HS256")

  private def login: Route = post {
    entity(as[LoginRequest]) {
      case lr @ LoginRequest("admin", "admin") =>
        val claims = setClaims(lr.username, tokenExpiryPeriodInDays)
        respondWithHeader(RawHeader(AccessTokenHeaderName, JsonWebToken(header, claims, secretKey))) {
          complete(StatusCodes.OK)
        }
      case LoginRequest(_, _) => complete(StatusCodes.Unauthorized)
    }
  }

  private def securedContent: Route = get {
    authenticated { claims =>
      complete(s"User: ${claims.getOrElse("user", "")} has accessed a secured content!")
    }
  }

  private def authenticated: Directive1[Map[String, Any]] =
    optionalHeaderValueByName("Authorization").flatMap {
      case Some(jwt) if isTokenExpired(jwt) =>
        complete(StatusCodes.Unauthorized -> "Session expired.")

      case Some(jwt) if JsonWebToken.validate(jwt, secretKey) =>
        provide(getClaims(jwt))

      case _ => complete(StatusCodes.Unauthorized)
    }

  private def setClaims(username: String, expiryPeriodInDays: Long): JwtClaimsSetMap =
    JwtClaimsSet(
      Map("user" -> username,
          "expiredAt" -> (System.currentTimeMillis() + TimeUnit.DAYS
            .toMillis(expiryPeriodInDays)))
    )

  private def getClaims(jwt: String): Map[String, String] = jwt match {
    case JsonWebToken(_, claims, _) => claims.asSimpleMap.getOrElse(Map.empty[String, String])
  }

  private def isTokenExpired(jwt: String): Boolean =
    getClaims(jwt).get("expiredAt").exists(_.toLong < System.currentTimeMillis())

  def routes: Route = login ~ securedContent

  def apply(host: String, port: Int) = Props(new HttpApi(host, port))
}

final class HttpApi(host: String, port: Int) extends Actor with ActorLogging {
  import HttpApi._
  import context.dispatcher

  private implicit val materializer: ActorMaterializer = ActorMaterializer()

  Http(context.system).bindAndHandle(routes, host, port).pipeTo(self)

  override def receive: Receive = {
    case ServerBinding(address) =>
      log.info("Server successfully bound at {}:{}", address.getHostName, address.getPort)
    case Failure(cause) =>
      log.error("Failed to bind server", cause)
      context.system.terminate()
  }
} 
Example 54
Source File: EventPersistorActor.scala    From akka-viz   with MIT License 5 votes vote down vote up
package akkaviz.persistence

import akka.actor.{ActorRef, Actor, ActorLogging}
import akkaviz.events.EventPublisherActor.Subscribe
import akkaviz.events.{FilteringRule, Helpers}
import akkaviz.events.Helpers.actorRefToString
import akkaviz.events.types._
import akkaviz.serialization.MessageSerialization
import com.datastax.driver.core.utils.UUIDs
import io.getquill._
import io.getquill.naming._

import scala.concurrent.duration._

class EventPersistorActor(publisherRef: ActorRef) extends Actor with ActorLogging {

  import context.dispatcher

  private[this] var queue = List[ReceivedRecord]()

  private[this] val maxItemsInQueue = 20

  override def preStart(): Unit = {
    super.preStart()
    context.system.scheduler.schedule(30.seconds, 30.seconds, self, DoInsert)
    publisherRef ! Subscribe
  }

  override def receive: Receive = {
    case DoInsert =>
      doInsert()

    case r: ReceivedWithId if FilteringRule.isUserActor(r.actorRef) && FilteringRule.isUserActor(r.sender) =>
      val msg = MessageSerialization.render(r.message)
      val id = UUIDs.timeBased()
      val time = System.currentTimeMillis()
      val records = List(
        ReceivedRecord(id, time, actorRefToString(r.sender), To, actorRefToString(r.actorRef), msg),
        ReceivedRecord(id, time, actorRefToString(r.actorRef), From, actorRefToString(r.sender), msg)
      )
      queue ++= records
      if (queue.size >= maxItemsInQueue) {
        doInsert()
      }

    case _ => {}

  }

  private[this] case object DoInsert

  private[this] def doInsert(): Unit = {
    if (queue.nonEmpty) {
      db.run(query[ReceivedRecord].insert)(queue)
      queue = List()
    }
  }

  private[this] val db = source(new CassandraSyncSourceConfig[SnakeCase]("akkaviz.cassandra"))

} 
Example 55
Source File: DistributedProcessingWorker.scala    From aecor   with MIT License 5 votes vote down vote up
package aecor.distributedprocessing

import aecor.distributedprocessing.DistributedProcessing._
import aecor.distributedprocessing.DistributedProcessingWorker.KeepRunning
import aecor.distributedprocessing.serialization.Message
import cats.effect.syntax.effect._
import akka.actor.{ Actor, ActorLogging, Props, Status }
import akka.pattern._
import cats.effect.Effect
import cats.implicits._

private[aecor] object DistributedProcessingWorker {
  def props[F[_]: Effect](processWithId: Int => Process[F], processName: String): Props =
    Props(new DistributedProcessingWorker[F](processWithId, processName))

  final case class KeepRunning(workerId: Int) extends Message
}

private[aecor] final class DistributedProcessingWorker[F[_]: Effect](
  processFor: Int => Process[F],
  processName: String
) extends Actor
    with ActorLogging {
  import context.dispatcher

  case class ProcessStarted(process: RunningProcess[F])
  case object ProcessTerminated

  var killSwitch: Option[F[Unit]] = None

  override def postStop: Unit =
    killSwitch.foreach(_.toIO.unsafeRunSync())

  def receive: Receive = {
    case KeepRunning(workerId) =>
      log.info("[{}] Starting process {}", workerId, processName)
      processFor(workerId).run
        .map(ProcessStarted)
        .toIO
        .unsafeToFuture() pipeTo self
      context.become {
        case ProcessStarted(RunningProcess(watchTermination, terminate)) =>
          log.info("[{}] Process started {}", workerId, processName)
          killSwitch = Some(terminate)
          watchTermination.toIO.map(_ => ProcessTerminated).unsafeToFuture() pipeTo self
          context.become {
            case Status.Failure(e) =>
              log.error(e, "Process failed {}", processName)
              throw e
            case ProcessTerminated =>
              log.error("Process terminated {}", processName)
              throw new IllegalStateException(s"Process terminated $processName")
          }
        case Status.Failure(e) =>
          log.error(e, "Process failed to start {}", processName)
          throw e
        case KeepRunning(_) => ()
      }
  }
} 
Example 56
Source File: DistributedProcessingSupervisor.scala    From aecor   with MIT License 5 votes vote down vote up
package aecor.distributedprocessing

import aecor.distributedprocessing.DistributedProcessingSupervisor.{
  GracefulShutdown,
  ShutdownCompleted,
  Tick
}
import aecor.distributedprocessing.DistributedProcessingWorker.KeepRunning
import akka.actor.{ Actor, ActorLogging, ActorRef, Props, Terminated }
import akka.cluster.sharding.ShardRegion

import scala.concurrent.duration.{ FiniteDuration, _ }

object DistributedProcessingSupervisor {
  private final case object Tick
  final case object GracefulShutdown
  final case object ShutdownCompleted

  def props(processCount: Int, shardRegion: ActorRef, heartbeatInterval: FiniteDuration): Props =
    Props(new DistributedProcessingSupervisor(processCount, shardRegion, heartbeatInterval))
}

final class DistributedProcessingSupervisor(processCount: Int,
                                            shardRegion: ActorRef,
                                            heartbeatInterval: FiniteDuration)
    extends Actor
    with ActorLogging {

  import context.dispatcher

  private val heartbeat =
    context.system.scheduler.schedule(0.seconds, heartbeatInterval, self, Tick)

  context.watch(shardRegion)

  override def postStop(): Unit = {
    heartbeat.cancel()
    ()
  }

  override def receive: Receive = {
    case Tick =>
      (0 until processCount).foreach { processId =>
        shardRegion ! KeepRunning(processId)
      }
    case Terminated(`shardRegion`) =>
      context.stop(self)
    case GracefulShutdown =>
      log.info(s"Performing graceful shutdown of [$shardRegion]")
      shardRegion ! ShardRegion.GracefulShutdown
      val replyTo = sender()
      context.become {
        case Terminated(`shardRegion`) =>
          log.info(s"Graceful shutdown completed for [$shardRegion]")
          context.stop(self)
          replyTo ! ShutdownCompleted
      }

  }
} 
Example 57
Source File: ClusterStateInformer.scala    From distributed-cache-on-k8s-poc   with MIT License 5 votes vote down vote up
package cluster

import akka.actor.{ Actor, ActorLogging, Props }
import akka.cluster.ClusterEvent._
import akka.cluster.{ Cluster, ClusterEvent }

class ClusterStateInformer extends Actor with ActorLogging {
  val cluster = Cluster(context.system)

  override def preStart(): Unit = {
    cluster.subscribe(
      subscriber = self,
      initialStateMode = ClusterEvent.InitialStateAsEvents,
      to = classOf[MemberEvent], classOf[UnreachableMember]
    )
  }

  override def postStop(): Unit = cluster.unsubscribe(self)

  override def receive: Receive = {
    case MemberJoined(member) => log.info(s"Member ${member.address} Joined")
    case MemberUp(member) => log.info("Member is Up: {}", member.address)
    case UnreachableMember(member) => log.info("Member detected as unreachable: {}", member)
    case MemberRemoved(member, previousStatus) =>
      log.info(
        "Member is Removed: {} after {}",
        member.address, previousStatus)
    case me: MemberEvent ⇒ log.info(s"Received Member event $me for Member: ${me.member.address}")
  }
}

object ClusterStateInformer {
  def props():Props = Props(new ClusterStateInformer)
} 
Example 58
Source File: CacheDataActor.scala    From distributed-cache-on-k8s-poc   with MIT License 5 votes vote down vote up
package cluster

import java.util.UUID

import akka.actor.SupervisorStrategy.Stop
import akka.actor.{ Actor, ActorLogging, Props, ReceiveTimeout }
import akka.cluster.sharding.ShardRegion
import akka.cluster.sharding.ShardRegion.Passivate
import cluster.CacheDataActor.Get

class CacheDataActor extends Actor with ActorLogging {

  override def receive: Receive = {
    case Get(id) => sender ! s"cached data for id: $id"
    case ReceiveTimeout =>
      log.info(s"sending Passivate to metadata parent: {${context.parent.path.name}} for ${self.path.name}")
      context.parent ! Passivate(stopMessage = Stop)
    case Stop =>
      context.stop(self)
      log.info(s"Passivating metadata actor for ${self.path.name}")
  }
}

object CacheDataActor {
  final val numOfShards = 50 // Planned num of cluster nodes
  val extractEntityId: ShardRegion.ExtractEntityId = {
    case msg@Get(id) => (id.toString, msg)
  }
  val extractShardId: ShardRegion.ExtractShardId = {
    case Get(id) => (id.hashCode() % numOfShards).toString
  }

  case class Get(id: UUID)

  def props: Props = Props(new CacheDataActor())
} 
Example 59
Source File: Librarian.scala    From reactive-application-development-scala   with Apache License 2.0 5 votes vote down vote up
package com.rarebooks.library

import akka.actor.{ Actor, ActorRef, ActorLogging, Props, Stash }
import scala.concurrent.duration.FiniteDuration

object Librarian {

  import Catalog._
  import RareBooksProtocol._

  final case class Done(e: Either[BookNotFound, BookFound], customer: ActorRef)

  def props(findBookDuration: FiniteDuration): Props =
      Props(new Librarian(findBookDuration))

  
  private def process(r: Either[BookNotFound, BookFound], sender: ActorRef): Unit = {
    r fold (
      f => {
        sender ! f
        log.info(f.toString)
      },
      s => sender ! s)
  }
} 
Example 60
Source File: RareBooks.scala    From reactive-application-development-scala   with Apache License 2.0 5 votes vote down vote up
package com.rarebooks.library

import akka.actor.{ Actor, ActorLogging, OneForOneStrategy, Props, Stash, SupervisorStrategy }
import akka.routing.{ ActorRefRoutee, Router, RoundRobinRoutingLogic }
import scala.concurrent.duration.{ MILLISECONDS => Millis, FiniteDuration, Duration }

object RareBooks {

  case object Close
  case object Open
  case object Report

  def props: Props =
    Props(new RareBooks)
}

class RareBooks extends Actor with ActorLogging with Stash {

  import context.dispatcher
  import RareBooks._
  import RareBooksProtocol._

  override val supervisorStrategy: SupervisorStrategy = {
    val decider: SupervisorStrategy.Decider = {
      case Librarian.ComplainException(complain, customer) =>
        customer ! Credit()
        log.info(s"RareBooks sent customer $customer a credit")
        SupervisorStrategy.Restart
    }
    OneForOneStrategy()(decider orElse super.supervisorStrategy.decider)
  }

  private val openDuration: FiniteDuration =
    Duration(context.system.settings.config.getDuration("rare-books.open-duration", Millis), Millis)

  private val closeDuration: FiniteDuration =
    Duration(context.system.settings.config.getDuration("rare-books.close-duration", Millis), Millis)

  private val nbrOfLibrarians: Int = context.system.settings.config getInt "rare-books.nbr-of-librarians"

  private val findBookDuration: FiniteDuration =
    Duration(context.system.settings.config.getDuration("rare-books.librarian.find-book-duration", Millis), Millis)

  private val maxComplainCount: Int = context.system.settings.config getInt "rare-books.librarian.max-complain-count"

  var requestsToday: Int = 0
  var totalRequests: Int = 0

  var router: Router = createLibrarian()

  context.system.scheduler.scheduleOnce(openDuration, self, Close)

  
  protected def createLibrarian(): Router = {
    var cnt: Int = 0
    val routees: Vector[ActorRefRoutee] = Vector.fill(nbrOfLibrarians) {
      val r = context.actorOf(Librarian.props(findBookDuration, maxComplainCount), s"librarian-$cnt")
      cnt += 1
      ActorRefRoutee(r)
    }
    Router(RoundRobinRoutingLogic(), routees)
  }
} 
Example 61
Source File: RareBooks.scala    From reactive-application-development-scala   with Apache License 2.0 5 votes vote down vote up
package com.rarebooks.library

import akka.actor.{Actor, ActorLogging, ActorPath, Address, OneForOneStrategy, Props, RootActorPath, Stash, SupervisorStrategy}
import akka.routing.{ActorRefRoutee, RoundRobinRoutingLogic, Router}

import scala.concurrent.duration.{Duration, FiniteDuration, MILLISECONDS => Millis}

object RareBooks {

  case object Close
  case object Open
  case object Report

//  val name: String =
//    "rare-books"
//
//  def pathFor(address: Address): ActorPath =
//    RootActorPath(address) / "user" / name
  
  def props: Props =
    Props(new RareBooks)
}

class RareBooks extends Actor with ActorLogging with Stash {

  import context.dispatcher
  import RareBooks._
  import LibraryProtocol._

  override val supervisorStrategy: SupervisorStrategy = {
    val decider: SupervisorStrategy.Decider = {
      case Librarian.ComplainException(complain, customer) =>
        customer ! Credit()
        log.info(s"RareBooks sent customer $customer a credit")
        SupervisorStrategy.Restart
    }
    OneForOneStrategy()(decider orElse super.supervisorStrategy.decider)
  }

  private val openDuration: FiniteDuration =
    Duration(context.system.settings.config.getDuration("rare-books.open-duration", Millis), Millis)

  private val closeDuration: FiniteDuration =
    Duration(context.system.settings.config.getDuration("rare-books.close-duration", Millis), Millis)

  private val nbrOfLibrarians: Int = context.system.settings.config getInt "rare-books.nbr-of-librarians"

  private val findBookDuration: FiniteDuration =
    Duration(context.system.settings.config.getDuration("rare-books.librarian.find-book-duration", Millis), Millis)

  private val maxComplainCount: Int = context.system.settings.config getInt "rare-books.librarian.max-complain-count"

  var requestsToday: Int = 0
  var totalRequests: Int = 0

  var router: Router = createLibrarian()

  context.system.scheduler.scheduleOnce(openDuration, self, Close)

  
  protected def createLibrarian(): Router = {
    var cnt: Int = 0
    val routees: Vector[ActorRefRoutee] = Vector.fill(nbrOfLibrarians) {
      val r = context.actorOf(Librarian.props(findBookDuration, maxComplainCount), s"librarian-$cnt")
      cnt += 1
      ActorRefRoutee(r)
    }
    Router(RoundRobinRoutingLogic(), routees)
  }
} 
Example 62
Source File: Librarian.scala    From reactive-application-development-scala   with Apache License 2.0 5 votes vote down vote up
package com.rarebooks.library

import akka.actor.{ Actor, ActorRef, ActorLogging, Props, Stash }
import scala.concurrent.duration.FiniteDuration

object Librarian {

  import Catalog._
  import RareBooksProtocol._

  final case class Done(e: Either[BookNotFound, BookFound], customer: ActorRef)

  def props(findBookDuration: FiniteDuration): Props =
    Props(new Librarian(findBookDuration))

  
  private def process(r: Either[BookNotFound, BookFound], sender: ActorRef): Unit = {
    r fold (
      f => {
        sender ! f
        log.info(f.toString)
      },
      s => sender ! s)
  }
} 
Example 63
Source File: RareBooks.scala    From reactive-application-development-scala   with Apache License 2.0 5 votes vote down vote up
package com.rarebooks.library

import akka.actor.{ Actor, ActorLogging, Props, Stash }
import akka.routing.{ ActorRefRoutee, Router, RoundRobinRoutingLogic }
import scala.concurrent.duration.{ MILLISECONDS => Millis, FiniteDuration, Duration }

object RareBooks {

  case object Close
  case object Open
  case object Report

  def props: Props =
    Props(new RareBooks)
}

class RareBooks extends Actor with ActorLogging with Stash {

  import context.dispatcher
  import RareBooks._
  import RareBooksProtocol._

  private val openDuration: FiniteDuration =
    Duration(context.system.settings.config.getDuration("rare-books.open-duration", Millis), Millis)

  private val closeDuration: FiniteDuration =
    Duration(context.system.settings.config.getDuration("rare-books.close-duration", Millis), Millis)

  private val nbrOfLibrarians: Int = context.system.settings.config getInt "rare-books.nbr-of-librarians"

  private val findBookDuration: FiniteDuration =
    Duration(context.system.settings.config.getDuration("rare-books.librarian.find-book-duration", Millis), Millis)

  var requestsToday: Int = 0
  var totalRequests: Int = 0

  var router: Router = createLibrarian()

  context.system.scheduler.scheduleOnce(openDuration, self, Close)

  
  protected def createLibrarian(): Router = {
    var cnt: Int = 0
    val routees: Vector[ActorRefRoutee] = Vector.fill(nbrOfLibrarians) {
      val r = context.actorOf(Librarian.props(findBookDuration), s"librarian-$cnt")
      cnt += 1
      ActorRefRoutee(r)
    }
    Router(RoundRobinRoutingLogic(), routees)
  }
} 
Example 64
Source File: ContextsMaster.scala    From mist   with Apache License 2.0 5 votes vote down vote up
package io.hydrosphere.mist.master.execution

import akka.actor.{Actor, ActorLogging, ActorRef, Props}
import io.hydrosphere.mist.master.models.ContextConfig
import io.hydrosphere.mist.utils.akka.{ActorF, ActorFSyntax}

class ContextsMaster(
  frontendF: ActorF[ContextConfig]
) extends Actor with ActorLogging with ActorFSyntax {

  type State = Map[String, ActorRef]

  override def receive: Receive = process(Map.empty)

  private def process(state: State): Receive = {
    case run: ContextEvent.RunJobCommand =>
      val (next, ref) = getOrCreate(state, run.context)
      ref forward run.request
      context become process(next)

    case c @ ContextEvent.CancelJobCommand(name, req) =>
      state.get(name) match {
        case Some(ref) => ref forward req
        case None => sender() ! akka.actor.Status.Failure(new IllegalStateException("Can't cancel job on stopped/unknown context"))
      }

    case upd @ ContextEvent.UpdateContext(ctx) =>
      state.get(ctx.name) match {
        case Some(ref) => ref forward upd
        case None =>
          val (next, ref) = getOrCreate(state, ctx)
          context become process(next)
      }

    case ContextsMaster.ContextTerminated(name) =>
      val next = state - name
      context become process(next)
  }

  private def getOrCreate(state: State, ctx: ContextConfig): (State, ActorRef) = {
    state.get(ctx.name) match {
      case Some(r) => (state, r)
      case None =>
        val ref = frontendF.create(ctx)
        val next = state + (ctx.name -> ref)
        context.watchWith(ref, ContextsMaster.ContextTerminated(ctx.name))
        (next, ref)
    }
  }

}


object ContextsMaster {

  case class ContextTerminated(name: String)

  def props(contextF: ActorF[ContextConfig]): Props = Props(classOf[ContextsMaster], contextF)

} 
Example 65
Source File: RestartSupervisor.scala    From mist   with Apache License 2.0 5 votes vote down vote up
package io.hydrosphere.mist.utils.akka

import akka.pattern.pipe
import akka.actor.{Actor, ActorLogging, ActorRef, ActorRefFactory, Props, ReceiveTimeout, SupervisorStrategy, Terminated, Timers}
import io.hydrosphere.mist.utils.Logger

import scala.concurrent.{Future, Promise}
import scala.concurrent.duration._

class RestartSupervisor(
  name: String,
  start: () => Future[ActorRef],
  timeout: FiniteDuration,
  maxRetry: Int
) extends Actor with ActorLogging with Timers {

  override def receive: Receive = init

  import context._
  import RestartSupervisor._

  private def init: Receive = {
    case Event.Start(req) =>
      start().map(Event.Started) pipeTo self
      context become await(Some(req), 0)
  }

  private def await(req: Option[Promise[ActorRef]], attempts: Int): Receive = {
    case Event.Started(ref) =>
      req.foreach(_.success(self))
      context watch ref
      context become proxy(ref)

    case akka.actor.Status.Failure(e)  if maxRetry == attempts + 1 =>
      req.foreach(_.failure(e))
      log.error(e, "Starting child for {} failed, maxRetry reached", name)
      context stop self

    case akka.actor.Status.Failure(e) =>
      log.error(e, "Starting child for {} failed", name)
      timers.startSingleTimer("timeout", Event.Timeout, timeout)
      context become restartTimeout(req, attempts)
  }

  private def proxy(ref: ActorRef): Receive = {
    case Terminated(_) =>
      log.error(s"Reference for {} was terminated. Restarting", name)
      timers.startSingleTimer("timeout", Event.Timeout, timeout)
      context become restartTimeout(None, 0)

    case x => ref.forward(x)
  }

  private def restartTimeout(req: Option[Promise[ActorRef]], attempts: Int): Receive = {
    case Event.Timeout =>
      start().map(Event.Started) pipeTo self
      context become await(req, attempts + 1)
  }
}

object RestartSupervisor {

  sealed trait Event
  object Event {
    final case class Start(req: Promise[ActorRef]) extends Event
    case object Restart extends Event
    final case class Started(ref: ActorRef) extends Event
    case object Timeout extends Event
  }


  def props(
    name: String,
    start: () => Future[ActorRef],
    timeout: FiniteDuration,
    maxRetry: Int
  ): Props = {
    Props(classOf[RestartSupervisor], name, start, timeout, maxRetry)
  }

  def wrap(
    name: String,
    start: () => Future[ActorRef],
    timeout: FiniteDuration,
    maxRetry: Int
  )(implicit af: ActorRefFactory): Future[ActorRef] = {

    val ref = af.actorOf(props(name, start, timeout, maxRetry))
    val promise = Promise[ActorRef]
    ref ! Event.Start(promise)
    promise.future
  }

  def wrap(
    name: String,
    maxRetry: Int,
    start: () => Future[ActorRef]
  )(implicit af: ActorRefFactory): Future[ActorRef] = wrap(name, start, 5 seconds, maxRetry)(af)

} 
Example 66
Source File: ClusterListener.scala    From akka-cluster-playground   with MIT License 5 votes vote down vote up
package com.elleflorio.cluster.playground.node.cluster

import akka.actor.{Actor, ActorLogging, Props}
import akka.cluster.Cluster
import akka.cluster.ClusterEvent._

object ClusterListener {
  def props(nodeId: String, cluster: Cluster) = Props(new ClusterListener(nodeId, cluster))
}

class ClusterListener(nodeId: String, cluster: Cluster) extends Actor with ActorLogging {

  override def preStart(): Unit = {
    cluster.subscribe(self, initialStateMode = InitialStateAsEvents,
      classOf[MemberEvent], classOf[UnreachableMember])
  }

  override def postStop(): Unit = cluster.unsubscribe(self)

  def receive = {
    case MemberUp(member) =>
      log.info("Node {} - Member is Up: {}", nodeId, member.address)
    case UnreachableMember(member) =>
      log.info(s"Node {} - Member detected as unreachable: {}", nodeId, member)
    case MemberRemoved(member, previousStatus) =>
      log.info(s"Node {} - Member is Removed: {} after {}",
        nodeId, member.address, previousStatus)
    case _: MemberEvent => // ignore
  }
} 
Example 67
Source File: ClusterManager.scala    From akka-cluster-playground   with MIT License 5 votes vote down vote up
package com.elleflorio.cluster.playground.node.cluster

import akka.actor.{Actor, ActorLogging, ActorRef, Props}
import akka.cluster.{Cluster, MemberStatus}
import com.elleflorio.cluster.playground.Server.system
import com.elleflorio.cluster.playground.node.cluster.ClusterManager.GetMembers

object ClusterManager {

  sealed trait ClusterMessage
  case object GetMembers extends ClusterMessage

  def props(nodeId: String) = Props(new ClusterManager(nodeId))
}

class ClusterManager(nodeId: String) extends Actor with ActorLogging {

  val cluster: Cluster = Cluster(context.system)
  val listener: ActorRef = context.actorOf(ClusterListener.props(nodeId, cluster), "clusterListener")

  override def receive: Receive = {
    case GetMembers => {
      sender() ! cluster.state.members.filter(_.status == MemberStatus.up)
        .map(_.address.toString)
        .toList
    }
  }
} 
Example 68
Source File: Mapper.scala    From CSYE7200   with MIT License 5 votes vote down vote up
package edu.neu.coe.csye7200.mapreduce

import akka.actor.{Actor, ActorLogging, ActorRef}

import scala.collection.mutable
import scala.collection.mutable.HashMap
import scala.util._


class Mapper_Forgiving[K1,V1,K2,V2](f: (K1,V1)=>(K2,V2)) extends Mapper[K1,V1,K2,V2](f) {
  
  override def prepareReply(v2k2ts: Seq[Try[(K2,V2)]]): (Map[K2, Seq[V2]], Seq[Throwable]) = {
      val v2sK2m = mutable.HashMap[K2,Seq[V2]]() // mutable
      val xs = Seq[Throwable]() // mutable
    // CONSIDER using traverse
    for (v2k2t <- v2k2ts; v2k2e = Master.sequence(v2k2t))
        v2k2e match {
          case Right((k2,v2)) => v2sK2m put(k2, v2+:v2sK2m.getOrElse(k2, Nil))
          case Left(x) => xs :+ x
      }
      (v2sK2m.toMap, xs)
  }
}

case class Incoming[K, V](m: Seq[(K,V)]) {
  override def toString = s"Incoming: with ${m.size} elements"
}

object Incoming {
  def sequence[K,V](vs: Seq[V]): Incoming[K,V] = Incoming((vs zip Stream.continually(null.asInstanceOf[K])).map{_.swap})
  def map[K, V](vKm: Map[K,V]): Incoming[K,V] = Incoming(vKm.toSeq)
}

object Mapper {
} 
Example 69
Source File: Reducer.scala    From CSYE7200   with MIT License 5 votes vote down vote up
package edu.neu.coe.csye7200.mapreduce

import akka.actor.{ Actor, ActorLogging, ActorRef }
//import scala.collection.mutable.HashMap
import scala.util._


class Reducer_Fold[K2,V2,V3](g: (V3,V2)=>V3, z: =>V3) extends ReducerBase[K2,V2,V3] {  
  def getValue(vs: Seq[V2]): V3 = vs.foldLeft(z)(g)
}

abstract class ReducerBase[K2,V2,V3] extends Actor with ActorLogging {
  
  override def receive = {
    case i: Intermediate[K2,V2] =>
      log.info(s"received $i")
      log.debug(s"with elements ${i.vs}")
      sender ! (i.k, Master.sequence(Try(getValue(i.vs))))
    case q =>
      log.warning(s"received unknown message type: $q")
  }
  
  override def postStop = {
    log.debug("has shut down")
  }
  
  def getValue(vs: Seq[V2]): V3
}


case class Intermediate[K2, V2](k: K2, vs: Seq[V2]) {
  override def toString = s"Intermediate: with k=$k and ${vs.size} elements"
} 
Example 70
Source File: ClusterListenerTestActor.scala    From NSDb   with Apache License 2.0 5 votes vote down vote up
package io.radicalbit.nsdb.cluster.actor

import akka.actor.{Actor, ActorLogging, Deploy}
import akka.cluster.Cluster
import akka.cluster.ClusterEvent.{InitialStateAsEvents, MemberEvent, MemberUp, UnreachableMember}
import akka.cluster.pubsub.DistributedPubSub
import akka.cluster.pubsub.DistributedPubSubMediator.Subscribe
import akka.remote.RemoteScope
import io.radicalbit.nsdb.cluster.PubSubTopics.NODE_GUARDIANS_TOPIC
import io.radicalbit.nsdb.cluster.actor.ClusterListener.{GetNodeMetrics, NodeMetricsGot}
import io.radicalbit.nsdb.cluster.createNodeName

class ClusterListenerTestActor extends Actor with ActorLogging {

  private lazy val cluster             = Cluster(context.system)
  private lazy val mediator = DistributedPubSub(context.system).mediator

  override def preStart(): Unit = {
    cluster.subscribe(self, initialStateMode = InitialStateAsEvents, classOf[MemberEvent], classOf[UnreachableMember])
  }

  override def receive: Receive = {
    case MemberUp(member) if member == cluster.selfMember =>

      val nodeName = createNodeName(member)

      val nodeActorsGuardian =
        context.system.actorOf(NodeActorsGuardian.props(self).withDeploy(Deploy(scope = RemoteScope(member.address))),
          name = s"guardian_$nodeName")

      mediator ! Subscribe(NODE_GUARDIANS_TOPIC, nodeActorsGuardian)
    case GetNodeMetrics =>
    sender() ! NodeMetricsGot(Set.empty)
  }

} 
Example 71
Source File: MockedCommitLogCoordinator.scala    From NSDb   with Apache License 2.0 5 votes vote down vote up
package io.radicalbit.nsdb.cluster.coordinator.mockedActors

import akka.actor.{Actor, ActorLogging, ActorRef, Props}
import io.radicalbit.nsdb.cluster.actor.MetricsDataActor.AddRecordToLocation
import io.radicalbit.nsdb.commit_log.CommitLogWriterActor.{
  WriteToCommitLog,
  WriteToCommitLogFailed,
  WriteToCommitLogSucceeded
}
import io.radicalbit.nsdb.protocol.MessageProtocol.Commands.DeleteRecordFromShard
import io.radicalbit.nsdb.protocol.MessageProtocol.Events.{RecordAdded, RecordRejected}

class MockedCommitLogCoordinator(probe: ActorRef) extends Actor with ActorLogging {
  override def receive: Receive = {
    case msg @ WriteToCommitLog(db, namespace, metric, timestamp, _, location)
        if location.node == "node1" && metric != "metric2" =>
      probe ! msg
      sender ! WriteToCommitLogSucceeded(db, namespace, timestamp, metric, location)
    case msg @ WriteToCommitLog(db, namespace, metric, timestamp, _, location)
        if location.node == "node2" && metric != "metric2" =>
      probe ! msg
      sender ! WriteToCommitLogFailed(db, namespace, timestamp, metric, "mock failure reason")
    case msg @ WriteToCommitLog(db, namespace, metric, timestamp, _, location) =>
      probe ! msg
      sender ! WriteToCommitLogSucceeded(db, namespace, timestamp, metric, location)
    case _ =>
      log.error("Not handled")
  }
}

case object MockedCommitLogCoordinator {
  def props(probe: ActorRef): Props =
    Props(new MockedCommitLogCoordinator(probe))
}

class MockedMetricsDataActor(probe: ActorRef) extends Actor with ActorLogging {

  override def receive: Receive = {
    case msg @ AddRecordToLocation(db, namespace, bit, location) if location.node == "node1" =>
      probe ! msg
      sender() ! RecordAdded(db, namespace, location.metric, bit, location, System.currentTimeMillis())
    case msg @ AddRecordToLocation(db, namespace, bit, location) if location.node == "node2" =>
      probe ! msg
      sender() ! RecordRejected(db,
                                namespace,
                                location.metric,
                                bit,
                                location,
                                List("errrrros"),
                                System.currentTimeMillis())
    case msg @ DeleteRecordFromShard(_, _, _, _) =>
      probe ! msg
  }
}

object MockedMetricsDataActor {
  def props(probe: ActorRef): Props =
    Props(new MockedMetricsDataActor(probe))
} 
Example 72
Source File: PersistentQueueCloserActor.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.pattern.stream

import akka.actor.{ActorLogging, Actor}

class PersistentQueueCloserActor[T](queue: PersistentQueue[T]) extends Actor with ActorLogging {

  val pushIndex = Array.ofDim[Long](queue.totalOutputPorts)
  val commitIndex = Array.ofDim[Long](queue.totalOutputPorts)

  override def receive: Receive = {
    case PushedAndCommitted(outportId, pIndex, cIndex) =>
      pushIndex(outportId) = pIndex
      if(cIndex > commitIndex(outportId)) commitIndex(outportId) = cIndex
    case Committed(outportId, index) => commitIndex(outportId) = index
    case UpstreamFailed => close()
    case UpstreamFinished =>
      if(pushIndex.sameElements(commitIndex)) close()
      else {
        context.become({
          case Committed(outportId, index) =>
            commitIndex(outportId) = index
            if (pushIndex.sameElements(commitIndex)) close()
        })
      }
  }

  private def close(): Unit = {
    queue.close()
    context.stop(self)
  }
}

case class PushedAndCommitted(outportId: Int, pushIndex: Long, commitIndex: Long)
case class Committed(outportId: Int, index: Long)
case object UpstreamFailed
case object UpstreamFinished 
Example 73
Source File: InitFailActor.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.unicomplex.initfail

import org.squbs.lifecycle.{GracefulStop, GracefulStopHelper}
import org.squbs.unicomplex.Unicomplex._
import scala.util.Try
import org.squbs.unicomplex.Initialized
import akka.actor.{Actor, ActorLogging}

class InitFailActor extends Actor with ActorLogging with GracefulStopHelper {

  // do initialization
  def init: InitReport = {
    log.info("initializing")
    Try {
      // do some tasks
      throw new Exception("Init failed")
    }
  }

  context.parent ! Initialized(init)

  def receive = {
    case GracefulStop => defaultLeafActorStop

    case other => sender ! other
  }

} 
Example 74
Source File: InitBlockActor.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.unicomplex.initblock

import org.squbs.unicomplex.Unicomplex._
import scala.util.Try
import org.squbs.lifecycle.{GracefulStopHelper, GracefulStop}
import akka.actor.{Actor, ActorLogging}

class InitBlockActor extends Actor with ActorLogging with GracefulStopHelper{

  // do initialization
  def init: InitReport = {
    log.info("initializing")
    Try {
      // do some tasks
      throw new Exception("Init blocked")
    }
  }

  // never send the the report
  //context.parent ! Initialized(init)

  def receive = {
    case GracefulStop => defaultLeafActorStop

    case other => sender ! other
  }

} 
Example 75
Source File: PingPongSvc.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.unicomplex.dummycubesvc

import akka.actor.{Actor, ActorLogging, ActorRef, Props}
import akka.http.scaladsl.server.Route
import akka.pattern.ask
import org.squbs.lifecycle.{GracefulStop, GracefulStopHelper}
import org.squbs.unicomplex.Timeouts._
import org.squbs.unicomplex.{Ping, Pong, RouteDefinition}

class PingPongSvc extends RouteDefinition{

  def route: Route = path("ping") {
    get {
      onSuccess((context.actorOf(Props(classOf[PingPongClient])) ? "ping").mapTo[String]) {
        case value => complete(value)
      }
    }
  } ~
  path("pong") {
    get {
      onSuccess((context.actorOf(Props(classOf[PingPongClient])) ? "pong").mapTo[String]) {
        case value => complete(value)
      }
    }
  }

}

private class PingPongClient extends Actor with ActorLogging {

  private val pingPongActor = context.actorSelection("/user/DummyCubeSvc/PingPongPlayer")

  def ping(responder: ActorRef): Receive = {
    case Pong => responder ! Pong.toString
  }

  def pong(responder: ActorRef): Receive = {
    case Ping => responder ! Ping.toString
  }

  def receive: Receive = {
    case "ping" => pingPongActor ! Ping
      context.become(ping(sender()))

    case "pong" => pingPongActor ! Pong
      context.become(pong(sender()))
  }

}

class PingPongActor extends Actor with ActorLogging with GracefulStopHelper{

  def receive = {
    case GracefulStop => defaultLeafActorStop

    case Ping => sender ! Pong

    case Pong => sender ! Ping
  }
} 
Example 76
Source File: DummySvc.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.unicomplex.dummysvc

import akka.actor.{Actor, ActorLogging, ActorRef, Props}
import akka.http.scaladsl.server.Route
import akka.pattern.ask
import org.squbs.unicomplex.Timeouts._
import org.squbs.unicomplex._

class DummySvc extends RouteDefinition with WebContext {
  def route: Route =
    get {
      path("msg" / Segment) { param =>
        onSuccess((context.actorOf(Props(classOf[DummyClient])) ? EchoMsg(param)).mapTo[String]) {
          value => complete(value)
        }
      } ~
      path("who") {
        extractClientIP { ip =>
          complete(ip.toString)
        }
      }
    }
}

class Dummy2VersionedSvc extends RouteDefinition with WebContext {
  def route: Route = path("msg" / Segment) {param =>
    get {
      onSuccess((context.actorOf(Props(classOf[DummyClient])) ? EchoMsg(param)).mapTo[String]) {
        case value => complete(value)
      }
    }
  }
}

class Dummy2Svc extends RouteDefinition with WebContext {
  def route: Route = path("msg" / Segment) {param =>
    get {
      onSuccess((context.actorOf(Props(classOf[DummyClient])) ? EchoMsg(param.reverse)).mapTo[String]) {
        case value => complete(value)
      }
    }
  }
}

private class DummyClient extends Actor with ActorLogging {

  private def receiveMsg(responder: ActorRef): Receive = {

    case AppendedMsg(appendedMsg) => context.actorSelection("/user/DummyCube/Prepender") ! EchoMsg(appendedMsg)

    case PrependedMsg(prependedMsg) =>
      responder ! prependedMsg
      context.stop(self)
  }

  def receive: Receive = {
    case msg: EchoMsg =>
      context.actorSelection("/user/DummyCube/Appender") ! msg
      context.become(receiveMsg(sender()))
  }
} 
Example 77
Source File: CubeB.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.unicomplex.cubeB

import akka.actor.{Actor, ActorLogging}
import org.squbs.lifecycle.{GracefulStop, GracefulStopHelper}
import org.squbs.unicomplex.Initialized
import org.squbs.unicomplex.Unicomplex.InitReport

import scala.util.Try

class InitCubeActorB extends Actor with ActorLogging with GracefulStopHelper  {

  // do initialization
  def init: InitReport = {
    log.info("initializing")
    Try {
      // do some tasks
      Some("InitCubeActorB")
    }
  }

  context.parent ! (Initialized(init))

  def receive = {
    case GracefulStop => defaultLeafActorStop

    case other => sender ! other
  }

} 
Example 78
Source File: LoadContentOnStartup.scala    From metronome   with Apache License 2.0 5 votes vote down vote up
package dcos.metronome
package repository

import akka.actor.{Actor, ActorLogging, Stash}
import mesosphere.marathon.StoreCommandFailedException
import org.apache.zookeeper.KeeperException.NoNodeException

import scala.concurrent.Future
import scala.util.control.NonFatal
import scala.util.{Failure, Success}

trait LoadContentOnStartup[Id, Model] extends Actor with Stash with ActorLogging {
  import LoadContentOnStartup._

  //TODO: change me to zk ec
  import context.dispatcher

  override def preStart(): Unit = {
    super.preStart()
    context.become(waitForInit)
    loadAll()
  }

  def repo: Repository[Id, Model]
  def initialize(specs: List[Model]): Unit

  def waitForInit: Receive = {
    case init: Init[Model] =>
      initialize(init.result)
      context.become(receive)
      unstashAll()
    case _ => stash()
  }

  def loadAll(): Unit = {
    val loadAllFuture = repo.ids().flatMap { ids =>
      Future.sequence(ids.map(id => getModel(id))).map(_.flatten.toList)
    }
    val me = self
    loadAllFuture.onComplete {
      case Success(result) => me ! Init(result)
      case Failure(ex) =>
        log.error(ex, "Can not load initial data. Give up.")
        System.exit(-1)
    }
  }

  private def getModel(id: Id): Future[Option[Model]] = {
    repo.get(id).recoverWith {
      case ex: StoreCommandFailedException =>
        ex.getCause match {
          case cause: NoNodeException =>
            log.error(
              s"ID $id or job-specs znode missing. Zk will need to be manually repaired.  Exception message: ${cause.getMessage}"
            )
            Future.successful(None)
          case NonFatal(cause) =>
            log
              .error(s"Unexpected exception occurred in reading zk at startup.  Exception message: ${cause.getMessage}")
            // We need crash strategy similar to marathon, for now we can NOT continue with such a zk failure.
            System.exit(-1)
            Future.failed(cause)
        }
    }
  }
}

object LoadContentOnStartup {
  case class Init[T](result: List[T])
} 
Example 79
Source File: NoConcurrentRepoChange.scala    From metronome   with Apache License 2.0 5 votes vote down vote up
package dcos.metronome
package repository

import akka.actor.{Actor, ActorLogging, ActorRef, Stash}

import scala.concurrent.{ExecutionContext, Future}
import scala.util.control.NonFatal
import scala.util.{Failure, Success}

trait NoConcurrentRepoChange[Id, Model, Data] extends Actor with ActorLogging with Stash {
  import NoConcurrentRepoChange._

  final def repoChange(
      change: => Future[Model],
      data: Data,
      onSuccess: (ActorRef, Model, Data) => Change,
      onFailed: (ActorRef, Throwable, Data) => Failed
  )(implicit ec: ExecutionContext): Unit = {
    val from = sender()
    try {
      val changed = change //can throw an exception, so execute before we enter waiting state
      context.become(waitForPersisted, discardOld = false)
      changed.onComplete {
        case Success(result) => self ! onSuccess(from, result, data)
        case Failure(ex) => self ! onFailed(from, ex, data)
      }
    } catch {
      case NonFatal(ex) =>
        log.error(ex, "Could not apply repository change")
        notifySender(from, onFailed(from, ex, data))
    }
  }

  private[this] def waitForPersisted: Receive = {
    case event: Failed =>
      log.error(event.ex, "Repository change failed")
      notifySender(event.sender, event)
    case event: Change =>
      notifySender(event.sender, event)
    case _ => stash()
  }

  private[this] def notifySender(recipient: ActorRef, message: Any): Unit = {
    context.unbecome()
    recipient ! message
    unstashAll()
  }
}

object NoConcurrentRepoChange {

  trait Change {
    def sender: ActorRef
  }

  trait Failed {
    def sender: ActorRef
    def ex: Throwable
  }
} 
Example 80
Source File: ConnectedCarActor.scala    From cloudflow   with Apache License 2.0 5 votes vote down vote up
package connectedcar.actors

import akka.actor.{ Actor, ActorLogging, ActorRef }
import akka.cluster.sharding.ShardRegion
import connectedcar.data.{ ConnectedCarAgg, ConnectedCarERecord }

import scala.concurrent.ExecutionContext

object ConnectedCarActor {
  val extractEntityId: ShardRegion.ExtractEntityId = {
    case msg: ConnectedCarERecord ⇒ (msg.carId.toString, msg)
  }

  private val numberOfShards = 100

  val extractShardId: ShardRegion.ExtractShardId = {
    case msg: ConnectedCarERecord ⇒ (msg.carId % numberOfShards).toString
  }
}

class ConnectedCarActor extends Actor with ActorLogging {

  val carId: String      = "Car-" + self.path.name
  var driverName: String = null
  var currentSpeed       = 0.0
  var averageSpeed       = 0.0
  var numberOfRecords    = 0

  var treeActor: ActorRef           = null
  implicit val ec: ExecutionContext = context.dispatcher

  override def receive: Receive = {
    case record: ConnectedCarERecord ⇒ {
      if (numberOfRecords == 0) {
        driverName = record.driver
        averageSpeed = record.speed
      } else {
        averageSpeed = ((averageSpeed * numberOfRecords) + record.speed) / (numberOfRecords + 1)
      }

      numberOfRecords += 1
      currentSpeed = record.speed

      log.info("Updated CarId: " + carId + " Driver Name: " + driverName + " CarSpeed: " + currentSpeed + " From Actor:" + sender().path)

      sender() ! ConnectedCarAgg(record.carId, record.driver, averageSpeed, numberOfRecords)
    }
  }
} 
Example 81
Source File: ArtifactS3Saver.scala    From marvin-engine-executor   with Apache License 2.0 5 votes vote down vote up
package org.marvin.artifact.manager

import java.io.File

import akka.Done
import akka.actor.{Actor, ActorLogging}
import com.amazonaws.services.s3.model.GetObjectRequest
import com.amazonaws.services.s3.{AmazonS3, AmazonS3ClientBuilder}
import org.apache.hadoop.fs.Path
import org.marvin.artifact.manager.ArtifactSaver.{SaveToLocal, SaveToRemote}
import org.marvin.model.EngineMetadata

class ArtifactS3Saver(metadata: EngineMetadata) extends Actor with ActorLogging {
  var s3Client: AmazonS3 = _

  override def preStart() = {
    log.info(s"${this.getClass().getCanonicalName} actor initialized...")

    //Create S3 Client with default credential informations(Environment Variable)
    s3Client = AmazonS3ClientBuilder.standard.withRegion(System.getenv("AWS_DEFAULT_REGION")).build

    log.info("Amazon S3 client initialized...")
  }

  def generatePaths(artifactName: String, protocol: String): Map[String, Path] = {
    var artifactsRemotePath: String = null
    if(metadata.artifactsRemotePath.startsWith("/")){
      artifactsRemotePath = metadata.artifactsRemotePath.substring(1)
    }
    Map(
      "localPath" -> new Path(s"${metadata.artifactsLocalPath}/${metadata.name}/$artifactName"),
      "remotePath" -> new Path(s"${artifactsRemotePath}/${metadata.name}/${metadata.version}/$artifactName/$protocol")
    )
  }

  def validatePath(path: Path, isRemote: Boolean): Boolean = {
    if (isRemote) {
      s3Client.doesObjectExist(metadata.s3BucketName, path.toString)
    } else {
      new java.io.File(path.toString).exists
    }
  }

  override def receive: Receive = {
    case SaveToLocal(artifactName, protocol) =>
      log.info("Receive message and starting to working...")
      val uris = generatePaths(artifactName, protocol)
      val localToSave = new File(uris("localPath").toString)

      // Validate if the protocol is correct
      if (validatePath(uris("remotePath"), true)) {
        log.info(s"Copying files from ${metadata.s3BucketName}: ${uris("remotePath")} to ${uris("localPath")}")
        //Get artifact named "uris("remotePath")" from S3 Bucket and save it to local
        s3Client.getObject(new GetObjectRequest(metadata.s3BucketName, uris("remotePath").toString), localToSave)
        log.info(s"File ${uris("localPath")} saved!")
      }
      else {
        log.error(s"Invalid protocol: ${protocol}, save process canceled!")
      }

      sender ! Done

    case SaveToRemote(artifactName, protocol) =>
      log.info("Receive message and starting to working...")
      val uris = generatePaths(artifactName, protocol)
      val fileToUpload = new File(uris("localPath").toString)

      // Validate if the protocol is correct
      if (validatePath(uris("localPath"), false)) {
        log.info(s"Copying files from ${uris("localPath")} to ${metadata.s3BucketName}: ${uris("remotePath")}")
        //Get local artifact and save to S3 Bucket with name "uris("remotePath")"
        s3Client.putObject(metadata.s3BucketName, uris("remotePath").toString, fileToUpload)
        log.info(s"File ${uris("localPath")} saved!")
      }
      else {
        log.error(s"Invalid protocol: ${protocol}, save process canceled!")
      }

      sender ! Done

    case _ =>
      log.warning("Received a bad format message...")
  }
} 
Example 82
Source File: ArtifactHdfsSaver.scala    From marvin-engine-executor   with Apache License 2.0 5 votes vote down vote up
package org.marvin.artifact.manager

import java.io.{File, FileInputStream}

import akka.Done
import akka.actor.{Actor, ActorLogging}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.marvin.artifact.manager.ArtifactSaver.{SaveToLocal, SaveToRemote}
import org.marvin.model.EngineMetadata

class ArtifactHdfsSaver(metadata: EngineMetadata) extends Actor with ActorLogging {
  var conf: Configuration = _

  override def preStart() = {
    log.info(s"${this.getClass().getCanonicalName} actor initialized...")
    conf = new Configuration()

    if (sys.env.get("HADOOP_CONF_DIR") != None){
      val confFiles:List[File] = getListOfFiles(sys.env.get("HADOOP_CONF_DIR").mkString)

      for(file <- confFiles){
        log.info(s"Loading ${file.getAbsolutePath} file to hdfs client configuration ..")
        conf.addResource(new FileInputStream(file))
      }
    }

    conf.set("fs.defaultFS", metadata.hdfsHost)
  }

  def generatePaths(artifactName: String, protocol: String): Map[String, Path] = {
    Map(
      "localPath" -> new Path(s"${metadata.artifactsLocalPath}/${metadata.name}/$artifactName"),
      "remotePath" -> new Path(s"${metadata.artifactsRemotePath}/${metadata.name}/${metadata.version}/$artifactName/$protocol")
    )
  }

  def getListOfFiles(path: String): List[File] = {
    val dir = new File(path)
    val extensions = List("xml")
    dir.listFiles.filter(_.isFile).toList.filter { file =>
      extensions.exists(file.getName.endsWith(_))
    }
  }

  def validatePath(path: Path, isRemote: Boolean, fs: FileSystem): Boolean = {
    if (isRemote) {
      fs.exists(path)
    } else {
      new java.io.File(path.toString).exists
    }
  }

  override def receive: Receive = {
    case SaveToLocal(artifactName, protocol) =>
      log.info("Receive message and starting to working...")
      val fs = FileSystem.get(conf)
      val uris = generatePaths(artifactName, protocol)

      if (validatePath(uris("remotePath"), true, fs)) {
        log.info(s"Copying files from ${uris("remotePath")} to ${uris("localPath")}")
        fs.copyToLocalFile(false, uris("remotePath"), uris("localPath"), false)
        fs.close()
        log.info(s"File ${uris("localPath")} saved!")
      }
      else {
        log.error(s"Invalid protocol: ${protocol}, save process canceled!")
      }

      sender ! Done

    case SaveToRemote(artifactName, protocol) =>
      log.info("Receive message and starting to working...")
      val fs = FileSystem.get(conf)
      val uris = generatePaths(artifactName, protocol)

      if (validatePath(uris("localPath"), false, fs)) {
        log.info(s"Copying files from ${uris("localPath")} to ${uris("remotePath")}")
        fs.copyFromLocalFile(uris("localPath"), uris("remotePath"))
        fs.close()
        log.info(s"File ${uris("localPath")} saved!")
      }
      else {
        log.error(s"Invalid protocol: ${protocol}, save process canceled!")
      }

      sender ! Done

    case _ =>
      log.warning("Received a bad format message...")
  }
} 
Example 83
Source File: ArtifactFSSaver.scala    From marvin-engine-executor   with Apache License 2.0 5 votes vote down vote up
package org.marvin.artifact.manager

import java.nio.file.{Files, Path, Paths, StandardCopyOption}

import akka.Done
import akka.actor.{Actor, ActorLogging}
import org.marvin.artifact.manager.ArtifactSaver.{GetArtifact, SaveToLocal, SaveToRemote}
import org.marvin.model.EngineMetadata

class ArtifactFSSaver(metadata: EngineMetadata) extends Actor with ActorLogging {
  override def preStart() = {
    log.info(s"${this.getClass().getCanonicalName} actor initialized...")
  }

  def generatePaths(artifactName: String, protocol: String): Map[String, Path] = {
    Map(
      "localPath" -> Paths.get(s"${metadata.artifactsLocalPath}/${metadata.name}/$artifactName"),
      "remotePath" -> Paths.get((s"${metadata.artifactsRemotePath}/${metadata.name}/${metadata.version}/$artifactName/$protocol"))
    )
  }

  def copyFile(origin: Path, destination: Path): Unit = {
    if (!destination.getParent.toFile.exists()) destination.getParent.toFile.mkdirs()

    log.info(s"Copying files from ${origin} to ${destination}")

    Files.copy(origin, destination, StandardCopyOption.REPLACE_EXISTING)

    log.info(s"File ${destination} saved!")
  }

  def validatePath(path: Path): Boolean = {
    new java.io.File(path.toString).exists
  }

  override def receive: Receive = {
    case SaveToLocal(artifactName, protocol) =>
      log.info("Receive message and starting to working...")
      val uris = generatePaths(artifactName, protocol)

      // Validate if the protocol is correct
      if (validatePath(uris("remotePath")))
        copyFile(uris("remotePath"), uris("localPath"))
      else
        log.error(s"Invalid protocol: ${protocol}, save process canceled!")

      sender ! Done

    case SaveToRemote(artifactName, protocol) =>
      log.info("Receive message and starting to working...")
      val uris = generatePaths(artifactName, protocol)

      // Validate if the protocol is correct
      if (validatePath(uris("localPath")))
        copyFile(uris("localPath"), uris("remotePath"))
      else
        log.error(s"Invalid protocol: ${protocol}, save process canceled!")

      sender ! Done

    case GetArtifact(artifactName, protocol) =>
      log.info("Receive message and starting to working...")
      val uris = generatePaths(artifactName, protocol)
      var response: String = ""

      // Validate if the protocol is correct
      if (validatePath(uris("localPath")))
        response = scala.io.Source.fromFile(uris("localPath").toString).getLines.mkString
      else
        log.error(s"Invalid protocol: ${protocol}, load process canceled!")

      sender ! response

    case _ =>
      log.warning("Received a bad format message...")
  }
} 
Example 84
Source File: ExecutorManager.scala    From marvin-engine-executor   with Apache License 2.0 5 votes vote down vote up
package org.marvin.executor.manager

import akka.actor.{Actor, ActorLogging}
import akka.util.Timeout
import org.marvin.executor.api.{GenericAPI, GenericAPIFunctions}
import org.marvin.executor.manager.ExecutorManager.{GetMetadata, StopActor}

import scala.concurrent.ExecutionContext
import scala.concurrent.duration._
import scala.util.{Failure, Success}

object ExecutorManager {
  case class StopActor(actorName: String)
  case class GetMetadata()
}

class ExecutorManager(api: GenericAPIFunctions) extends Actor with ActorLogging {
  implicit val ec = ExecutionContext.global
  implicit val futureTimeout = Timeout(30 seconds)

  override def preStart() = {
    log.info(s"Executor Manager enabled and starting!!!")
    log.info(s"Executor Manager path ${self.path}")
  }

  override def receive  = {
    case StopActor(actorName) =>

      if(api.manageableActors.contains(actorName)){
        val actorRef = api.manageableActors(actorName)

        log.info(s"Actor ${actorRef.path} found. Trying to stop selected actor..")

        context.stop(actorRef)

        log.info(s"Actor ${actorRef.path} successfully stopped!")

        sender ! Success

      }else{
        log.info(s"Actor related with the key ${actorName} is not a valid manageable actor.")
        sender ! Failure
      }

    case GetMetadata =>
      log.info(s"Getting Metadata object from engine ...")
      sender ! api.getMetadata

  }
} 
Example 85
Source File: ExecutorClusterListener.scala    From marvin-engine-executor   with Apache License 2.0 5 votes vote down vote up
package org.marvin.executor.manager

import akka.cluster.Cluster
import akka.cluster.ClusterEvent._
import akka.actor.{Actor, ActorLogging, Address}

import scala.collection.immutable

class ExecutorClusterListener(seedNodes: immutable.Seq[Address]) extends Actor with ActorLogging {

  var cluster: Cluster = _

  override def preStart(): Unit = {
    cluster = Cluster(context.system)

    log.info(s"Joining to the cluster ${context.system.name} ...")
    cluster.joinSeedNodes(seedNodes)

    log.info(s"Subscribing to the cluster ${context.system.name} ...")
    cluster.subscribe(self, initialStateMode = InitialStateAsEvents, classOf[MemberUp], classOf[MemberEvent], classOf[UnreachableMember])

    log.info(s"Cluster configuration done! :-P")
    log.info(s"Cluster Node Address is ${cluster.selfAddress}")
  }

  override def postStop(): Unit = {
    log.info(s"Leaving cluster ${context.system.name} :-( ...")
    cluster.unsubscribe(self)
    cluster.leave(cluster.selfAddress)
    log.info("Left cluster with success!")
  }

  def receive = {
    case MemberUp(member) =>
      log.info("Member is Up: {}", member.address)

    case UnreachableMember(member) =>
      log.info("Member detected as unreachable: {}", member)

    case MemberRemoved(member, previousStatus) =>
      log.info("Member is Removed: {} after {}", member.address, previousStatus)

    case _:MemberEvent =>
      log.info("Unknow Message received ...")
  }
} 
Example 86
Source File: OnlineAction.scala    From marvin-engine-executor   with Apache License 2.0 5 votes vote down vote up
package org.marvin.executor.actions

import akka.Done
import akka.actor.SupervisorStrategy._
import akka.actor.{Actor, ActorLogging, ActorRef, OneForOneStrategy, Props, Status}
import akka.pattern.{ask, pipe}
import akka.util.Timeout
import io.grpc.StatusRuntimeException
import org.marvin.artifact.manager.ArtifactSaver
import org.marvin.executor.actions.OnlineAction.{OnlineExecute, OnlineHealthCheck, OnlineReload}
import org.marvin.executor.proxies.EngineProxy.{ExecuteOnline, HealthCheck, Reload}
import org.marvin.executor.proxies.OnlineActionProxy
import org.marvin.artifact.manager.ArtifactSaver.SaveToLocal
import org.marvin.model.{EngineActionMetadata, EngineMetadata}
import org.marvin.util.ProtocolUtil

import scala.collection.mutable.ListBuffer
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util.{Failure, Success}

object OnlineAction {
  case class OnlineExecute(message: String, params: String)
  case class OnlineReload(protocol: String)
  case class OnlineHealthCheck()
}

class OnlineAction(actionName: String, metadata: EngineMetadata) extends Actor with ActorLogging {
  var onlineActionProxy: ActorRef = _
  var artifactSaver: ActorRef = _
  var engineActionMetadata: EngineActionMetadata = _
  var artifactsToLoad: String = _
  implicit val ec = context.dispatcher

  override def preStart() = {
    engineActionMetadata = metadata.actionsMap(actionName)
    artifactsToLoad = engineActionMetadata.artifactsToLoad.mkString(",")
    onlineActionProxy = context.actorOf(Props(new OnlineActionProxy(engineActionMetadata)), name = "onlineActionProxy")
    artifactSaver = context.actorOf(ArtifactSaver.build(metadata), name = "artifactSaver")
  }

  override val supervisorStrategy =
    OneForOneStrategy(maxNrOfRetries = 10, withinTimeRange = metadata.onlineActionTimeout milliseconds) {
      case _: StatusRuntimeException => Restart
      case _: Exception => Escalate
  }

  override def receive  = {
    case OnlineExecute(message, params) =>
      implicit val futureTimeout = Timeout(metadata.onlineActionTimeout milliseconds)

      log.info(s"Starting to process execute to $actionName. Message: [$message] and params: [$params].")

      val originalSender = sender
      ask(onlineActionProxy, ExecuteOnline(message, params)) pipeTo originalSender


    case OnlineReload(protocol) =>
      implicit val futureTimeout = Timeout(metadata.reloadTimeout milliseconds)

      log.info(s"Starting to process reload to $actionName. Protocol: [$protocol].")

      if(protocol == null || protocol.isEmpty){
        onlineActionProxy forward Reload()

      }else{
        val splitedProtocols = ProtocolUtil.splitProtocol(protocol, metadata)

        val futures:ListBuffer[Future[Any]] = ListBuffer[Future[Any]]()
        for(artifactName <- engineActionMetadata.artifactsToLoad) {
          futures += (artifactSaver ? SaveToLocal(artifactName, splitedProtocols(artifactName)))
        }

        val origSender = sender()
        Future.sequence(futures).onComplete{
          case Success(_) => onlineActionProxy.ask(Reload(protocol)) pipeTo origSender
          case Failure(e) => {
            log.error(s"Failure to reload artifacts using protocol $protocol.")
            origSender ! Status.Failure(e)
          }
        }
      }

    case OnlineHealthCheck =>
      implicit val futureTimeout = Timeout(metadata.healthCheckTimeout milliseconds)
      log.info(s"Starting to process health to $actionName.")

      val originalSender = sender
      ask(onlineActionProxy, HealthCheck) pipeTo originalSender

    case Done =>
      log.info("Work Done!")

    case _ =>
      log.warning(s"Not valid message !!")

  }
} 
Example 87
Source File: PipelineAction.scala    From marvin-engine-executor   with Apache License 2.0 5 votes vote down vote up
package org.marvin.executor.actions

import java.time.LocalDateTime
import java.util.NoSuchElementException

import akka.Done
import akka.actor.{Actor, ActorLogging, ActorRef, Props}
import akka.pattern.ask
import akka.util.Timeout
import org.marvin.artifact.manager.ArtifactSaver
import org.marvin.artifact.manager.ArtifactSaver.SaveToRemote
import org.marvin.exception.MarvinEExecutorException
import org.marvin.executor.actions.PipelineAction.{PipelineExecute, PipelineExecutionStatus}
import org.marvin.executor.proxies.BatchActionProxy
import org.marvin.executor.proxies.EngineProxy.{ExecuteBatch, Reload}
import org.marvin.model._
import org.marvin.util.{JsonUtil, LocalCache}

import scala.collection.mutable.ListBuffer
import scala.concurrent.duration._
import scala.concurrent.{Await, Future}
import scala.util.Success

object PipelineAction {
  case class PipelineExecute(protocol:String, params:String)
  case class PipelineExecutionStatus(protocol:String)
}

class PipelineAction(metadata: EngineMetadata) extends Actor with ActorLogging{
  implicit val ec = context.dispatcher

  var artifactSaver: ActorRef = _
  var cache: LocalCache[BatchExecution] = _

  override def preStart() = {
    artifactSaver = context.actorOf(ArtifactSaver.build(metadata), name = "artifactSaver")
    cache = new LocalCache[BatchExecution](maximumSize = 10000L, defaultTTL = 30.days)
  }

  override def receive  = {
    case PipelineExecute(protocol, params) =>
      implicit val futureTimeout = Timeout(metadata.pipelineTimeout milliseconds)

      log.info(s"Starting to process pipeline process with. Protocol: [$protocol] and Params: [$params].")
      cache.save(protocol, new BatchExecution("pipeline", protocol, LocalDateTime.now, Working))

      try{
        for(actionName <- metadata.pipelineActions){
          val engineActionMetadata = metadata.actionsMap(actionName)
          val _actor: ActorRef = context.actorOf(Props(new BatchActionProxy(engineActionMetadata)), name = actionName.concat("Actor"))
          Await.result((_actor ? Reload(protocol)), futureTimeout.duration)
          Await.result((_actor ? ExecuteBatch(protocol, params)), futureTimeout.duration)
          context stop _actor

          val futures:ListBuffer[Future[Done]] = ListBuffer[Future[Done]]()

          for(artifactName <- engineActionMetadata.artifactsToPersist) {
            futures += (artifactSaver ? SaveToRemote(artifactName, protocol)).mapTo[Done]
          }

          if (!futures.isEmpty) Future.sequence(futures).onComplete{
            case Success(response) =>
              log.info(s"All artifacts from [$actionName] were saved with success!! [$response]")
          }
        }
      }catch {
        case e: Exception =>
          cache.save(protocol, new BatchExecution("pipeline", protocol, LocalDateTime.now, Failed))
          throw e
      }

      cache.save(protocol, new BatchExecution("pipeline", protocol, LocalDateTime.now, Finished))

    case PipelineExecutionStatus(protocol) =>
      log.info(s"Getting pipeline execution status to protocol $protocol.")

      try {
        sender ! JsonUtil.toJson(cache.load(protocol).get)

      }catch {
        case _: NoSuchElementException =>
          sender ! akka.actor.Status.Failure(new MarvinEExecutorException(s"Protocol $protocol not found!"))
      }

    case Done =>
      log.info("Work Done!")

    case _ =>
      log.warning(s"Not valid message !!")

  }
} 
Example 88
Source File: AkkaKubernetes.scala    From akka-kubernetes-tests   with Apache License 2.0 5 votes vote down vote up
package akka.kubernetes.sample

import akka.actor.{Actor, ActorLogging, ActorSystem, PoisonPill, Props}
import akka.cluster.ClusterEvent.ClusterDomainEvent
import akka.cluster.sharding.{ClusterSharding, ClusterShardingSettings}
import akka.cluster.singleton.{
  ClusterSingletonManager,
  ClusterSingletonManagerSettings,
  ClusterSingletonProxy,
  ClusterSingletonProxySettings
}
import akka.cluster.{Cluster, ClusterEvent}
import akka.http.scaladsl.Http
import akka.http.scaladsl.server.Directives._
import akka.management.cluster.bootstrap.ClusterBootstrap
import akka.management.scaladsl.AkkaManagement
import akka.stream.ActorMaterializer

object DemoApp extends App {

  implicit val system = ActorSystem("KubernetesTest")

  import system.{dispatcher, log}

  implicit val mat = ActorMaterializer()
  implicit val cluster = Cluster(system)

  log.info("Running with [{}]", new Resources())
  log.info(s"Started [$system], cluster.selfAddress = ${cluster.selfAddress}")

  AkkaManagement(system).start()
  ClusterBootstrap(system).start()

  system.actorOf(
    ClusterSingletonManager.props(singletonProps = Props(new AkkaBoss("patriknw")),
                                  terminationMessage = PoisonPill,
                                  settings = ClusterSingletonManagerSettings(system)),
    "boss"
  )

  val bossProxy = system.actorOf(
    ClusterSingletonProxy.props(singletonManagerPath = "/user/boss", settings = ClusterSingletonProxySettings(system)),
    name = "bossProxy"
  )

  val teamMembers = ClusterSharding(system).start(
    "team-member",
    Props(new AkkaMember()),
    ClusterShardingSettings(system),
    AkkaMember.extractEntityId,
    AkkaMember.extractShardId
  )

  cluster.subscribe(system.actorOf(Props[ClusterWatcher]),
                    ClusterEvent.InitialStateAsEvents,
                    classOf[ClusterDomainEvent])

  val talkToTheBoss = new TalkToTheBossRouteRoute(bossProxy)
  val talkToATeamMember = new TalkToATeamMemberRoute(teamMembers)

  Http().bindAndHandle(
    concat(talkToTheBoss.route(),
           talkToATeamMember.route(),
           ClusterStateRoute.routeGetMembers(cluster),
           VersionRoute.versionRoute),
    "0.0.0.0",
    8080
  )

  Cluster(system).registerOnMemberUp({
    log.info("Cluster member is up!")
  })

}

class ClusterWatcher extends Actor with ActorLogging {
  implicit val cluster = Cluster(context.system)

  override def receive = {
    case msg ⇒ log.info(s"Cluster ${cluster.selfAddress} >>> " + msg)
  }
} 
Example 89
Source File: EventProcessor.scala    From akka-kubernetes-tests   with Apache License 2.0 5 votes vote down vote up
package akka.kubernetes.couchbase

import akka.actor.{Actor, ActorLogging, Props}
import akka.persistence.couchbase.UUIDs
import akka.persistence.couchbase.scaladsl.CouchbaseReadJournal
import akka.persistence.query._
import akka.stream.{ActorMaterializer, KillSwitches, Materializer}
import akka.stream.scaladsl.{RestartSource, Sink}

import scala.concurrent.ExecutionContext
import scala.concurrent.duration._

object EventProcessor {
  def props: Props =
    Props(new EventProcessor)
}

class EventProcessor extends Actor with ActorLogging {

  private val settings = Settings(context.system)
  private val eventProcessorId = settings.eventProcessorSettings.id
  private val tag = self.path.name
  private implicit val ec: ExecutionContext = context.dispatcher
  private implicit val materializer: Materializer = ActorMaterializer()(context.system)
  private val query =
    PersistenceQuery(context.system).readJournalFor[CouchbaseReadJournal](CouchbaseReadJournal.Identifier)
  private val killSwitch = KillSwitches.shared("eventProcessorSwitch")
  override val log = super.log // eager initialization because used from inside stream

  override def preStart(): Unit = {
    super.preStart()
    log.info("Starting event processor for tag: {}", tag)
    runQueryStream()
  }

  override def postStop(): Unit = {
    super.postStop()
    killSwitch.shutdown()
  }

  def receive = {
    case KeepAlive.Ping =>
      sender() ! KeepAlive.Pong
      log.debug(
        s"Event processor(${self.path.name}) @ ${context.system.settings.config
          .getString("akka.remote.artery.canonical.hostname")}:${context.system.settings.config.getInt("akka.remote.artery.canonical.port")}"
      )

    case message =>
      log.error("Got unexpected message: {}", message)
  }

  private def runQueryStream(): Unit =
    RestartSource
      .withBackoff(minBackoff = 500.millis, maxBackoff = 20.seconds, randomFactor = 0.1) { () =>
        // TODO offsets, this just starts from the beginning each time
        query
          .eventsByTag(tag, NoOffset)
          .map { eventEnvelope: EventEnvelope =>
            val now = System.currentTimeMillis()
            val publishTime = eventEnvelope.offset match {
              case t: TimeBasedUUID => UUIDs.timestampFrom(t)
            }
            val delay = now - publishTime
            log.info(s"#Eventprocessor($tag) got $eventEnvelope. Event is {} ms delayed", delay) // You would write to Kafka here
            eventEnvelope.offset
          }
      }
      .via(killSwitch.flow)
      .runWith(Sink.ignore)

} 
Example 90
Source File: ShardedBankAccountAggregates.scala    From akka-ddd-cqrs-es-example   with MIT License 5 votes vote down vote up
package com.github.j5ik2o.bank.adaptor.aggregate

import akka.actor.{ Actor, ActorLogging, ActorRef, ActorSystem, Props }
import akka.cluster.sharding.{ ClusterSharding, ClusterShardingSettings }
import com.github.j5ik2o.bank.adaptor.aggregate.BankAccountAggregate.Protocol.BankAccountCommandRequest

object ShardedBankAccountAggregates {
  def props: Props = Props(new ShardedBankAccountAggregates())
  def name: String = "sharded-bank-accounts"

  def start(system: ActorSystem): ActorRef = {
    system.log.debug("ShardedBankAccounts#start: start")
    val actorRef = ClusterSharding(system).start(
      ShardedBankAccountAggregate.shardName,
      ShardedBankAccountAggregate.props,
      ClusterShardingSettings(system),
      ShardedBankAccountAggregate.extractEntityId,
      ShardedBankAccountAggregate.extractShardId
    )
    system.log.debug("ShardedBankAccounts#start: finish")
    actorRef
  }

  def shardRegion(system: ActorSystem): ActorRef =
    ClusterSharding(system).shardRegion(ShardedBankAccountAggregate.shardName)

}

class ShardedBankAccountAggregates extends Actor with ActorLogging {

  ShardedBankAccountAggregates.start(context.system)

  override def receive: Receive = {
    case cmd: BankAccountCommandRequest =>
      ShardedBankAccountAggregates.shardRegion(context.system) forward cmd
  }

} 
Example 91
Source File: HelloActor.scala    From swagger-akka-http-sample   with Apache License 2.0 5 votes vote down vote up
package com.example.akka.hello

import akka.actor.{Actor, ActorLogging}

object HelloActor {
  case object AnonymousHello
  case class Hello(name: String)
  case class Greeting(greeting: String)
}

class HelloActor extends Actor with ActorLogging {
  import HelloActor._

  def receive: Receive = {
    case AnonymousHello => sender ! Greeting("Hello")
    case Hello(name) => sender ! Greeting(s"Hello, $name")
  }
} 
Example 92
Source File: LoopActor.scala    From lemon-schedule   with GNU General Public License v2.0 5 votes vote down vote up
package com.gabry.job.experiment

import akka.actor.{Actor, ActorLogging, ActorSystem, Props}
import com.typesafe.config.ConfigFactory


case object Loop
class LoopedActor extends Actor with ActorLogging{
  var counter = 0L
  override def aroundReceive(receive: Receive, msg: Any): Unit = {
    counter +=1
    if(counter % 1000==0){
      self ! s"counter=$counter"
    }
    super.aroundReceive(receive, msg)
  }
  override def receive: Receive = {
    case Loop =>
      Thread.sleep(0)
      log.info(s"receive loop message at ${System.currentTimeMillis()}")
      self ! Loop
    case any=>
      log.warning(s"receive any message $any")
  }
}
object LoopActor {
  def main(args: Array[String]): Unit = {
    val system = ActorSystem("testLoop", ConfigFactory.load())
    val loop = system.actorOf(Props[LoopedActor])
    loop ! Loop
  }

} 
Example 93
Source File: BalancingPoolDemo.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package example

import akka.actor.{ Actor, ActorLogging, ActorRef, ActorSystem, Props }
import example.Worker.FibonacciNumber

import scala.annotation.tailrec
import scala.concurrent.duration._
import scala.io.StdIn

object Worker {
  case class FibonacciNumber(nbr: Int, delay: FiniteDuration)

  case class GetResult(nr: Int, source: ActorRef)

  def props: Props = Props(new Worker)
}

class Worker extends Actor with ActorLogging {
  import Worker._
  import context.dispatcher

  override def preStart(): Unit =
    log.info(s"$self started")

  override def postStop(): Unit =
    log.info(s"$self stopped")

  override def receive: Receive = {
    case FibonacciNumber(nr, delay) =>
      context.system.scheduler.scheduleOnce(delay, self, GetResult(nr, sender()))

    case GetResult(nr, source) =>
      val result = fibonacci(nr)
      log.info(s"$nr! = $result")
  }

  private def fibonacci(n: Int): Int = {
    @tailrec
    def fib(n: Int, b: Int, a: Int): Int = n match {
      case 0 => a
      case _ =>
        fib(n - 1, a + b, b)
    }
    fib(n, 1, 0)
  }
}

object BalancingPoolDemo extends App {
  implicit val system = ActorSystem()

  val worker = system.actorOf(Worker.props, "worker")
  worker ! FibonacciNumber(50, 50.millis)
  worker ! FibonacciNumber(33, 50.millis)
  worker ! FibonacciNumber(68, 50.millis)
  worker ! FibonacciNumber(53, 50.millis)
  worker ! FibonacciNumber(45, 50.millis)

  StdIn.readLine()
  system.terminate()
} 
Example 94
Source File: FactorialFrontend.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package sample.cluster.factorial

import akka.actor.{ Actor, ActorLogging, ActorSystem, Props, ReceiveTimeout }
import akka.cluster.Cluster
import akka.routing.FromConfig
import com.typesafe.config.ConfigFactory

import scala.concurrent.Await
import scala.concurrent.duration._
import scala.util.Try

class FactorialFrontend(upToN: Int, repeat: Boolean) extends Actor with ActorLogging {
  val backend =
    context.actorOf(FromConfig.props(), name = "factorialBackendRouter")

  override def preStart(): Unit = {
    sendJobs()
    if (repeat) {
      context.setReceiveTimeout(10.seconds)
    }
  }

  def receive = {
    case (n: Int, factorial: BigInt) =>
      log.info("{}! = {}", n, factorial)
      if (n == upToN) {
        if (repeat) sendJobs()
        else context.stop(self)
      }
    case ReceiveTimeout =>
      log.info("Timeout")
      sendJobs()
  }

  def sendJobs(): Unit =
    //    log.info("Starting batch of factorials up to [{}]", upToN)
    1 to upToN foreach { backend ! _ }
}

object FactorialFrontend {
  def main(args: Array[String]): Unit = {
    val upToN = 200

    val config =
      ConfigFactory.parseString("akka.cluster.roles = [frontend]").withFallback(ConfigFactory.load("factorial"))

    val system = ActorSystem("ClusterSystem", config)
    system.log.info("Factorials will start when 2 backend members in the cluster.")

    Cluster(system) registerOnMemberUp {
      system.actorOf(Props(classOf[FactorialFrontend], upToN, false), name = "factorialFrontend")
    }

    Cluster(system).registerOnMemberRemoved {
      // exit JVM when ActorSystem has been terminated
      system.registerOnTermination(System.exit(0))
      // shut down ActorSystem
      system.terminate()

      // In case ActorSystem shutdown takes longer than 10 seconds,
      // exit the JVM forcefully anyway.
      // We must spawn a separate thread to not block current thread,
      // since that would have blocked the shutdown of the ActorSystem.
      new Thread {
        override def run(): Unit =
          if (Try(Await.ready(system.whenTerminated, 10.seconds)).isFailure)
            System.exit(-1)
      }.start()
    }
  }
} 
Example 95
Source File: MetricsListener.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package sample.cluster.factorial

import akka.actor.{ Actor, ActorLogging }
import akka.cluster.Cluster
import akka.cluster.ClusterEvent.CurrentClusterState
import akka.cluster.metrics.{ ClusterMetricsChanged, ClusterMetricsExtension, NodeMetrics }
import akka.cluster.metrics.StandardMetrics.{ Cpu, HeapMemory }

class MetricsListener extends Actor with ActorLogging {
  val selfAddress = Cluster(context.system).selfAddress
  val extension = ClusterMetricsExtension(context.system)

  // Subscribe unto ClusterMetricsEvent events.
  override def preStart(): Unit = extension.subscribe(self)

  // Unsubscribe from ClusterMetricsEvent events.
  override def postStop(): Unit = extension.unsubscribe(self)

  def receive = {
    case ClusterMetricsChanged(clusterMetrics) =>
      clusterMetrics.filter(_.address == selfAddress) foreach { nodeMetrics =>
        logHeap(nodeMetrics)
        logCpu(nodeMetrics)
      }
    case state: CurrentClusterState => // Ignore.
  }

  def logHeap(nodeMetrics: NodeMetrics): Unit = nodeMetrics match {
    case HeapMemory(address, timestamp, used, committed, max) =>
      log.info("Used heap: {} MB", used.doubleValue / 1024 / 1024)
    case _ => // No heap info.
  }

  def logCpu(nodeMetrics: NodeMetrics): Unit = nodeMetrics match {
    case Cpu(address, timestamp, Some(systemLoadAverage), cpuCombined, cpuStolen, processors) =>
      log.info("Load: {} ({} processors)", systemLoadAverage, processors)
    case _ => // No cpu info.
  }
} 
Example 96
Source File: FactorialBackend.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package sample.cluster.factorial

import akka.actor.{ Actor, ActorLogging, ActorSystem, Props }
import akka.pattern.pipe
import com.typesafe.config.ConfigFactory

import scala.annotation.tailrec
import scala.concurrent.Future

class FactorialBackend extends Actor with ActorLogging {
  import context.dispatcher

  def receive = {
    case (n: Int) =>
      Future(factorial(n)) map { result =>
        (n, result)
      } pipeTo sender()
  }

  def factorial(n: Int): BigInt = {
    @tailrec def factorialAcc(acc: BigInt, n: Int): BigInt =
      if (n <= 1) acc
      else factorialAcc(acc * n, n - 1)
    factorialAcc(BigInt(1), n)
  }
}

object FactorialBackend {
  def main(args: Array[String]): Unit = {
    // Override the configuration of the port when specified as program argument
    val port = if (args.isEmpty) "0" else args(0)
    val config = ConfigFactory
      .parseString(s"""
        akka.remote.netty.tcp.port=$port
        akka.remote.artery.canonical.port=$port
        """)
      .withFallback(ConfigFactory.parseString("akka.cluster.roles = [backend]"))
      .withFallback(ConfigFactory.load("factorial"))

    val system = ActorSystem("ClusterSystem", config)
    system.actorOf(Props[FactorialBackend], name = "factorialBackend")

    system.actorOf(Props[MetricsListener], name = "metricsListener")
  }
} 
Example 97
Source File: HostSlaveActor.scala    From speedo   with Apache License 2.0 5 votes vote down vote up
package com.htc.speedo.akka

import akka.actor.{ Actor, ActorIdentity, ActorLogging, Identify }

import com.twitter.scalding.Args


  val workerCount = args.int("worker", 1)

  // Tries to identify if the master exists
  context.actorSelection(masterPath) ! Identify(masterPath)

  override def receive = {
    // If found master actor, join
    case ActorIdentity(`masterPath`, Some(master)) =>
      // Each join message will create a worker actor
      (1 to workerCount).foreach(_ => master ! Join)
    // If not found master actor, log and exit
    case ActorIdentity(`masterPath`, None) =>
      log.error(s"Cannot found master at $masterPath, stopping!")
      context.system.shutdown
    // stop slave akka system
    case StopAkka => context.system.shutdown
  }
} 
Example 98
Source File: ParameterActor.scala    From speedo   with Apache License 2.0 5 votes vote down vote up
package com.htc.speedo.akka

import akka.actor.{ Actor, ActorLogging }

import com.twitter.scalding.Args

import com.htc.speedo.caffe.CaffeWorker._


  def updateParameter(args: Args): Unit = {
    synchronous = args.boolean("sync")
    movingRate = args.optional("movingRate").map(_.toFloat)
    weightUpdate = movingRate match {
      case Some(_) if !synchronous => SelfPaceFullUpdate
      case _ => args.boolean("gradientOnly") match {
        case true => GradientOnly
        case false => FullUpdate
      }
    }
  }
} 
Example 99
Source File: UserRepository.scala    From gabbler   with Apache License 2.0 5 votes vote down vote up
package de.heikoseeberger.gabbler.chat

import akka.actor.{ ActorLogging, Props }
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.Uri
import akka.persistence.{ PersistentActor, RecoveryCompleted }
import akka.stream.ActorMaterializer
import akka.stream.alpakka.sse.scaladsl.EventSource
import de.heikoseeberger.akkasse.ServerSentEvent
import io.circe.parser.decode

object UserRepository {

  private sealed trait UserEvent

  final case class FindUserByUsername(username: String)
  final case class UsernameUnknown(username: String)

  private final case class AddUser(id: Long, username: String, nickname: String)
  private final case class UserAdded(eventId: String, user: User)

  private final case class RemoveUser(id: Long)
  private final case class UserRemoved(eventId: String, user: User)

  final case class User(id: Long, username: String, nickname: String)

  final val Name = "user-repository"

  def apply(userEventsEndpoint: Uri): Props =
    Props(new UserRepository(userEventsEndpoint))
}

final class UserRepository(userEventsEndpoint: Uri) extends PersistentActor with ActorLogging {
  import UserRepository._
  import io.circe.generic.auto._

  override val persistenceId = Name

  private implicit val mat = ActorMaterializer()

  private var users = Map.empty[String, User]

  private var lastEventId = Option.empty[String]

  override def receiveCommand = {
    case FindUserByUsername(n)               => handleFindUserByUsername(n)
    case (eventId: String, AddUser(i, u, n)) => handleAddUser(eventId, i, u, n)
    case (eventId: String, RemoveUser(i))    => handleRemoveUser(eventId, i)
  }

  override def receiveRecover = {
    case RecoveryCompleted =>
      userEvents(lastEventId).runForeach(self ! _)

    case UserAdded(eventId, user) =>
      lastEventId = Some(eventId)
      users += user.username -> user
      log.info("Added user with username {}", user.username)

    case UserRemoved(eventId, user) =>
      lastEventId = Some(eventId)
      users -= user.username
      log.info("Removed user with username {}", user.username)
  }

  private def handleFindUserByUsername(username: String) =
    users.get(username) match {
      case Some(user) => sender() ! user
      case None       => sender() ! UsernameUnknown(username)
    }

  private def handleAddUser(eventId: String, id: Long, username: String, nickname: String) =
    persist(UserAdded(eventId, User(id, username, nickname)))(receiveRecover)

  private def handleRemoveUser(eventId: String, id: Long) =
    users.values.find(_.id == id) match {
      case Some(user) => persist(UserRemoved(eventId, user))(receiveRecover)
      case None       => log.warning("User with id {} does not exist!", id)
    }

  private def userEvents(lastEventId: Option[String]) =
    EventSource(userEventsEndpoint, Http(context.system).singleRequest(_), lastEventId)
      .collect {
        case ServerSentEvent(Some(data), Some("user-added"), Some(eventId), _) =>
          eventId -> decode[AddUser](data)
        case ServerSentEvent(Some(data), Some("user-removed"), Some(eventId), _) =>
          eventId -> decode[RemoveUser](data)
      }
      .collect { case (eventId, Right(userEvent)) => eventId -> userEvent }
} 
Example 100
Source File: ChatApp.scala    From gabbler   with Apache License 2.0 5 votes vote down vote up
package de.heikoseeberger.gabbler.chat

import akka.NotUsed
import akka.actor.{ Actor, ActorLogging, ActorSystem, Props, SupervisorStrategy, Terminated }
import akka.cluster.Cluster
import akka.cluster.singleton.{
  ClusterSingletonManager,
  ClusterSingletonManagerSettings,
  ClusterSingletonProxy,
  ClusterSingletonProxySettings
}
import akka.http.scaladsl.model.Uri
import scala.concurrent.Await
import scala.concurrent.duration.Duration

object ChatApp {

  private final class Root extends Actor with ActorLogging {

    override val supervisorStrategy = SupervisorStrategy.stoppingStrategy

    private val userRepository = {
      val userEvents =
        Uri(context.system.settings.config.getString("gabbler-chat.user-repository.user-events"))
      val userRepository =
        context.actorOf(
          ClusterSingletonManager.props(UserRepository(userEvents),
                                        NotUsed,
                                        ClusterSingletonManagerSettings(context.system)),
          UserRepository.Name
        )
      context.actorOf(
        ClusterSingletonProxy.props(userRepository.path.elements.mkString("/", "/", ""),
                                    ClusterSingletonProxySettings(context.system)),
        s"${UserRepository.Name}-proxy"
      )
    }

    context.watch(userRepository)
    log.info("gabbler-chat up and running")

    override def receive = {
      case Terminated(actor) =>
        log.error("Terminating the system because {} terminated!", actor.path)
        context.system.terminate()
    }
  }

  def main(args: Array[String]): Unit = {
    val system = ActorSystem("gabbler-chat")
    Cluster(system).registerOnMemberUp(system.actorOf(Props(new Root), "root"))
  }
} 
Example 101
Source File: UserApp.scala    From gabbler   with Apache License 2.0 5 votes vote down vote up
package de.heikoseeberger.gabbler.user

import akka.NotUsed
import akka.actor.{ Actor, ActorLogging, ActorSystem, Props, SupervisorStrategy, Terminated }
import akka.cluster.Cluster
import akka.cluster.singleton.{
  ClusterSingletonManager,
  ClusterSingletonManagerSettings,
  ClusterSingletonProxy,
  ClusterSingletonProxySettings
}
import akka.persistence.cassandra.query.scaladsl.CassandraReadJournal
import akka.persistence.query.PersistenceQuery

object UserApp {

  final class Root extends Actor with ActorLogging {

    override val supervisorStrategy = SupervisorStrategy.stoppingStrategy

    private val userRepository = {
      val readJournal =
        PersistenceQuery(context.system)
          .readJournalFor[CassandraReadJournal](CassandraReadJournal.Identifier)
      val userRepository =
        context.actorOf(
          ClusterSingletonManager.props(UserRepository(readJournal),
                                        NotUsed,
                                        ClusterSingletonManagerSettings(context.system)),
          UserRepository.Name
        )
      context.actorOf(
        ClusterSingletonProxy.props(userRepository.path.elements.mkString("/", "/", ""),
                                    ClusterSingletonProxySettings(context.system)),
        s"${UserRepository.Name}-proxy"
      )
    }

    private val userApi = {
      val config  = context.system.settings.config
      val address = config.getString("gabbler-user.user-api.address")
      val port    = config.getInt("gabbler-user.user-api.port")
      val timeout = config.getDuration("gabbler-user.user-api.user-repository-timeout").asScala
      context.actorOf(UserApi(address, port, userRepository, timeout), UserApi.Name)
    }

    context.watch(userRepository)
    context.watch(userApi)
    log.info("gabbler-user up and running")

    override def receive = {
      case Terminated(actor) =>
        log.error("Terminating the system because {} terminated!", actor.path)
        context.system.terminate()
    }
  }

  def main(args: Array[String]): Unit = {
    val system = ActorSystem("gabbler-user")
    Cluster(system).registerOnMemberUp(system.actorOf(Props(new Root), "root"))
  }
} 
Example 102
Source File: UserRepository.scala    From gabbler   with Apache License 2.0 5 votes vote down vote up
package de.heikoseeberger.gabbler.user

import akka.NotUsed
import akka.actor.{ ActorLogging, Props }
import akka.persistence.PersistentActor
import akka.persistence.query.EventEnvelope
import akka.persistence.query.scaladsl.EventsByPersistenceIdQuery
import akka.stream.scaladsl.Source

object UserRepository {

  sealed trait UserEvent

  final case object GetUsers
  final case class Users(users: Set[User])

  final case class AddUser(username: String, nickname: String, email: String)
  final case class UserAdded(user: User) extends UserEvent
  final case class UsernameTaken(username: String)

  final case class RemoveUser(id: Long)
  final case class UserRemoved(user: User) extends UserEvent
  final case class IdUnknown(id: Long)

  final case class GetUserEvents(fromSeqNo: Long)
  final case class UserEvents(userEvents: Source[(Long, UserEvent), NotUsed])

  final case class User(id: Long, username: String, nickname: String, email: String)

  final val Name = "user-repository"

  def apply(readJournal: EventsByPersistenceIdQuery): Props =
    Props(new UserRepository(readJournal))
}

final class UserRepository(readJournal: EventsByPersistenceIdQuery)
    extends PersistentActor
    with ActorLogging {
  import UserRepository._

  override val persistenceId = Name

  private var users = Map.empty[String, User]

  override def receiveCommand = {
    case GetUsers                           => sender() ! Users(users.valuesIterator.to[Set])
    case AddUser(username, nickname, email) => handleAddUser(username, nickname, email)
    case RemoveUser(id)                     => handleRemoveUser(id)
    case GetUserEvents(fromSeqNo)           => handleGetUserEvents(fromSeqNo)
  }

  override def receiveRecover = {
    case UserAdded(user)   => users += user.username -> user
    case UserRemoved(user) => users -= user.username
  }

  private def handleAddUser(username: String, nickname: String, email: String) = {
    def add() =
      persist(UserAdded(User(lastSequenceNr, username, nickname, email))) { userAdded =>
        receiveRecover(userAdded)
        log.info("Added user with username {}", username)
        sender() ! userAdded
      }
    if (!users.contains(username)) add() else sender() ! UsernameTaken(username)
  }

  private def handleRemoveUser(id: Long) = {
    def remove(user: User) =
      persist(UserRemoved(user)) { userRemoved =>
        receiveRecover(userRemoved)
        log.info("Removed user with id {} and username {}", id, user.username)
        sender() ! userRemoved
      }
    users.valuesIterator.find(_.id == id) match {
      case Some(user) => remove(user)
      case None       => sender() ! IdUnknown(id)
    }
  }

  private def handleGetUserEvents(fromSeqNo: Long) = {
    val userEvents =
      readJournal
        .eventsByPersistenceId(Name, fromSeqNo, Long.MaxValue)
        .collect { case EventEnvelope(_, _, seqNo, event: UserEvent) => seqNo -> event }
    sender() ! UserEvents(userEvents)
  }
} 
Example 103
Source File: AkkaActorsKafkaConsumer.scala    From kafka-scala-api   with Apache License 2.0 5 votes vote down vote up
package com.example

import akka.actor.{Actor, ActorLogging, ActorRef, ActorSystem, OneForOneStrategy, Props, SupervisorStrategy}
import cakesolutions.kafka.KafkaConsumer
import cakesolutions.kafka.akka.KafkaConsumerActor.{Confirm, Subscribe}
import cakesolutions.kafka.akka.{ConsumerRecords, Extractor, KafkaConsumerActor}

import scala.concurrent.duration._

object AkkaActorsKafkaConsumer extends App {
  ConsumerRecovery()
}

object ConsumerRecovery {
  def apply(): ActorRef ={
    val actorConf = KafkaConsumerActor.Conf(1.seconds, 3.seconds)
    val system = ActorSystem()
    system.actorOf(Props(new ConsumerRecovery(kafkaConsumerConf, actorConf)))
  }
}

class ConsumerRecovery(kafkaConfig: KafkaConsumer.Conf[String, String],
                        actorConfig: KafkaConsumerActor.Conf) extends Actor with ActorLogging {

  override def supervisorStrategy: SupervisorStrategy = OneForOneStrategy(maxNrOfRetries = 10) {
    case _: KafkaConsumerActor.ConsumerException =>
      log.info("Consumer exception caught. Restarting consumer.")
      SupervisorStrategy.Restart
    case _ =>
      SupervisorStrategy.Escalate
  }

  val recordsExt: Extractor[Any, ConsumerRecords[String, String]] = ConsumerRecords.extractor[String, String]

  val consumer: ActorRef = context.actorOf(
    KafkaConsumerActor.props(kafkaConfig, actorConfig, self)
  )

  consumer ! Subscribe.AutoPartition(List(topic))

  override def receive: Receive = {
    // Consume from Kafka
    case recordsExt(records) =>
      processRecords(records.pairs)
      sender() ! Confirm(records.offsets, commit = true)
  }

  private def processRecords(records: Seq[(Option[String], String)]) =
    records.foreach { case (key, value) =>
      log.info(s"Received [$key,$value]")
    }
} 
Example 104
Source File: Constructr.scala    From constructr   with Apache License 2.0 5 votes vote down vote up
package de.heikoseeberger.constructr

import akka.actor.{ Actor, ActorLogging, ActorRef, Props, SupervisorStrategy, Terminated }
import akka.cluster.{ Cluster, Member }
import akka.cluster.ClusterEvent.{ InitialStateAsEvents, MemberExited, MemberLeft, MemberRemoved }
import akka.cluster.MemberStatus.Up
import de.heikoseeberger.constructr.coordination.Coordination
import scala.concurrent.duration.{ FiniteDuration, NANOSECONDS }

object Constructr {

  final val Name = "constructr"

  def props: Props =
    Props(new Constructr)
}

final class Constructr private extends Actor with ActorLogging {

  override val supervisorStrategy = SupervisorStrategy.stoppingStrategy

  private val cluster = Cluster(context.system)

  if (cluster.settings.SeedNodes.isEmpty) {
    log.info("Creating constructr-machine, because no seed-nodes defined")
    cluster.subscribe(self,
                      InitialStateAsEvents,
                      classOf[MemberLeft],
                      classOf[MemberExited],
                      classOf[MemberRemoved])
    context.become(active(context.watch(createConstructrMachine())))
  } else {
    log.info("Stopping self, because seed-nodes defined")
    context.stop(self)
  }

  override def receive = Actor.emptyBehavior

  private def active(machine: ActorRef): Receive = {
    case Terminated(`machine`) =>
      val selfAddress = cluster.selfAddress
      def isSelfAndUp(member: Member) =
        member.address == selfAddress && member.status == Up
      if (cluster.state.members.exists(isSelfAndUp)) {
        log.error("Leaving, because constructr-machine terminated!")
        cluster.leave(selfAddress)
      } else {
        log.error("Terminating system, because constructr-machine terminated!")
        context.system.terminate()
      }

    case MemberRemoved(member, _) if member.address == cluster.selfAddress =>
      log.error("Terminating system, because member has been removed!")
      context.system.terminate()
  }

  private def createConstructrMachine() = {
    val config = context.system.settings.config
    def getDuration(key: String) =
      FiniteDuration(config.getDuration(key).toNanos, NANOSECONDS)

    val coordinationTimeout   = getDuration("constructr.coordination-timeout")
    val nrOfRetries           = config.getInt("constructr.nr-of-retries")
    val retryDelay            = getDuration("constructr.retry-delay")
    val refreshInterval       = getDuration("constructr.refresh-interval")
    val ttlFactor             = config.getDouble("constructr.ttl-factor")
    val maxNrOfSeedNodes      = config.getInt("constructr.max-nr-of-seed-nodes")
    val joinTimeout           = getDuration("constructr.join-timeout")
    val abortOnJoinTimeout    = config.getBoolean("constructr.abort-on-join-timeout")
    val ignoreRefreshFailures = config.getBoolean("constructr.ignore-refresh-failures")

    context.actorOf(
      ConstructrMachine.props(
        cluster.selfAddress,
        Coordination(context.system.name, context.system),
        coordinationTimeout,
        nrOfRetries,
        retryDelay,
        refreshInterval,
        ttlFactor,
        if (maxNrOfSeedNodes <= 0) Int.MaxValue else maxNrOfSeedNodes,
        joinTimeout,
        abortOnJoinTimeout,
        ignoreRefreshFailures
      ),
      ConstructrMachine.Name
    )
  }
} 
Example 105
Source File: Leadership.scala    From cave   with MIT License 5 votes vote down vote up
package actors

import akka.actor.{Actor, ActorLogging, Address}
import akka.cluster.ClusterEvent._
import akka.cluster.{Cluster, Member}

object Leadership {
  object IsLeader
}

class Leadership(address: Address) extends Actor with ActorLogging {

  private val cluster = Cluster(context.system)
  private var members = Set.empty[Member]

  private var isLeader = false

  override def preStart(): Unit =
    cluster.subscribe(self, initialStateMode = InitialStateAsEvents,
      classOf[MemberEvent],
      classOf[UnreachableMember],
      classOf[ClusterDomainEvent])

  override def postStop(): Unit = cluster.unsubscribe(self)

  import actors.Leadership._

  def receive = {

    case IsLeader =>
      sender ! isLeader

    case state: CurrentClusterState =>
      log.warning("Initial state: " + state.leader)
      setLeader(state.leader)

    case MemberUp(member) =>
      log.warning(s"Member up($member)")
      members += member

    case MemberRemoved(member, previousStatus) =>
      log.warning(s"Member removed($member)")
      members.find(_.address == member.address) foreach (members -= _)

    case LeaderChanged(member) =>
      log.warning("Leader changed, now: " + member)
      setLeader(member)

    case e: MemberEvent =>
      log.warning(s"Member event($e)")
  }

  private def setLeader(leader: Option[Address]): Unit = {
    isLeader = leader exists (_ == address)
  }
} 
Example 106
Source File: Scheduler.scala    From cave   with MIT License 5 votes vote down vote up
package actors

import java.util.concurrent.{Executor, TimeUnit}

import akka.actor.{Actor, ActorLogging}
import akka.pattern.ask
import akka.util.Timeout
import com.cave.metrics.data.evaluator.AlertParser
import com.cave.metrics.data.{Check, Schedule}
import init.{AwsWrapper, Init}
import org.joda.time.format.ISODateTimeFormat
import org.joda.time.{Minutes, LocalTime, DateTime, DateTimeZone}

import scala.concurrent.ExecutionContext
import scala.concurrent.duration._

object Scheduler {
  object DoWork
  object Die
  case class NotificationUrlChange(newUrl: String)
}
class Scheduler(schedule: Schedule, awsWrapper: AwsWrapper) extends Actor with ActorLogging with AlertParser {

  private[actors] def leader = Init.leader
  var notificationUrl: String = schedule.notificationUrl
  implicit val timeout = Timeout(2, TimeUnit.SECONDS)

  val (waitTime, period) = getSchedule(schedule.alert.period)

  val Formatter = ISODateTimeFormat.dateTimeNoMillis()

  implicit val executor = context.dispatcher.asInstanceOf[Executor with ExecutionContext]
  private val queueCheckSchedule = context.system.scheduler.schedule(waitTime, period, self, Scheduler.DoWork)

  override def receive = {
    case Scheduler.DoWork =>
      leader ? Leadership.IsLeader onComplete {
        case scala.util.Success(imLeader: Boolean) =>
          if (imLeader) {
            awsWrapper.sendMessage(Check(Schedule(schedule.orgName, schedule.teamName, schedule.clusterName, notificationUrl, schedule.alert), now()))
          }

        case scala.util.Success(e) =>
          log.error("Unexpected result returned by the leader actor: " + e)

        case scala.util.Failure(t) =>
          log.error("Failed to query the leader actor, error was " + t)
      }


    case Scheduler.NotificationUrlChange(url) =>
      log.debug(s"Updating the notification URL, from $notificationUrl to $url.")
      notificationUrl = url

    case Scheduler.Die =>
      context stop self
  }

  override def postStop(): Unit = queueCheckSchedule.cancel()

  
  private[actors] def getSchedule(alertPeriod: String): (FiniteDuration, FiniteDuration) =
    parseAll(duration, alertPeriod) match {
      case Success(p, _) => (0.minutes, p)

      case NoSuccess(_, message) =>
        parseAll(daily, alertPeriod) match {
          case Success(time, _) => (getWait(nowLocal(), time), 1.day)

          case NoSuccess(_, message2) =>
            sys.error(s"Unexpected alert period $alertPeriod. Not a duration ($message) and not a daily scheduler ($message2).")
        }
    }

  private[actors] def getWait(now: LocalTime, until: LocalTime): FiniteDuration = {
    val wait = Minutes.minutesBetween(now, until).getMinutes
    val minutes = if (wait < 0) 1440 + wait else wait
    minutes.minutes
  }
} 
Example 107
Source File: Checker.scala    From cave   with MIT License 5 votes vote down vote up
package worker

import java.util.concurrent.Executor

import akka.actor.{Actor, ActorLogging, Status}
import akka.pattern.pipe
import com.cave.metrics.data._
import com.cave.metrics.data.evaluator.{CheckEvaluator, DataFetcher}
import init.Init

import scala.concurrent.{ExecutionContext, Future}
import scala.util.Try

object Checker {
  type Result = Try[Boolean]

  case class Done(alarm: Result)
  case class Aborted(reason: String)
}

class Checker(check: Check) extends Actor with ActorLogging {

  implicit val exec = context.dispatcher.asInstanceOf[Executor with ExecutionContext]
  val evaluator = new CheckEvaluator(check)
  def fetcher = new DataFetcher(Init.influxClientFactory)

  this run check pipeTo self

  def receive = {
    case alarm: Checker.Result =>
      context.parent ! Checker.Done(alarm)
      stop()

    case x: Status.Failure =>
      context.parent ! Checker.Aborted(x.cause.getMessage)
      stop()
  }

  def stop(): Unit = {
    context stop self
  }

  private[worker] def run(check: Check)(implicit ec: ExecutionContext): Future[Try[Boolean]] = {
    val result = evaluator.evaluate(fetcher)
    result map { v =>
      log.warning("Result of evaluation: " + v)
    }
    result
  }
} 
Example 108
Source File: LinearRegressionActor.scala    From coral   with Apache License 2.0 5 votes vote down vote up
package io.coral.actors.transform

import akka.actor.{ActorLogging, Props}
import org.json4s._
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods.{render, pretty}
import io.coral.actors.{SimpleEmitTrigger, CoralActor}

object LinearRegressionActor {
	implicit val formats = org.json4s.DefaultFormats

	def getParams(json: JValue) = {
		for {
			intercept <- (json \ "params" \ "intercept").extractOpt[Double]
			weights <- (json \ "params" \ "weights").extractOpt[Map[String, Double]]
		} yield {
			val outcome = (json \ "params" \ "outcome").extractOpt[String]
			(intercept, weights, outcome)
		}
	}

	def apply(json: JValue): Option[Props] = {
		getParams(json).map(_ => Props(classOf[LinearRegressionActor], json))
	}
}

class LinearRegressionActor(json: JObject)
	extends CoralActor(json)
	with ActorLogging
	with SimpleEmitTrigger {
	val (intercept, weights, outcome) = LinearRegressionActor.getParams(json).get

	override def simpleEmitTrigger(json: JObject): Option[JValue] = {
		val inputVector = weights.keys.map(key => {
			(json \ key).extractOpt[Double] match {
				case Some(value) => Some(value)
				case None => None
			}
		}).toVector

		if (inputVector.exists(!_.isDefined)) {
			None
		} else {
			val result = intercept + (inputVector.flatten zip weights.values).map(x => x._1 * x._2).sum
			val name = if (outcome.isDefined) outcome.get else "score"
			Some(render(name -> result) merge json)
		}
	}
} 
Example 109
Source File: GroupByActor.scala    From coral   with Apache License 2.0 5 votes vote down vote up
package io.coral.actors.transform

import akka.actor.{ActorLogging, Props}
import org.json4s._
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
import io.coral.actors.{NoEmitTrigger, CoralActor, CoralActorFactory}
import scaldi.Injector
import akka.pattern.pipe

object GroupByActor {
	implicit val formats = org.json4s.DefaultFormats

	def getParams(json: JValue) = {
		for {
			by <- (json \ "group" \ "by").extractOpt[String]
		} yield {
			by
		}
	}

	def apply(json: JValue)(implicit injector: Injector): Option[Props] = {
		getParams(json).map(_ => Props(classOf[GroupByActor], json, injector))
	}
}

class GroupByActor(json: JObject)(implicit injector: Injector)
	extends CoralActor(json)
	with NoEmitTrigger
	with ActorLogging {
	val Diff(_, _, jsonChildrenDef) = json diff JObject(("group", json \ "group"))
	val Diff(_, _, jsonDefinition) = json diff JObject(("timeout", json \ "timeout"))
	val by = GroupByActor.getParams(json).get
	override def jsonDef = jsonDefinition.asInstanceOf[JObject]
	override def state = Map(("actors", render(children)))

	override def noEmitTrigger(json: JObject) = {
		for {
			value <- (json \ by).extractOpt[String]
		} yield {
			val found = children.get(value) flatMap (id => actorRefFactory.child(id.toString))

			found match {
				case Some(actorRef) =>
					// We found the child, send it the original message
					actorRef forward json
				case None =>
					// Not found, create a new child
					val props = CoralActorFactory.getProps(jsonChildrenDef)
					props map { p =>
						val actor = actorRefFactory.actorOf(p, s"blabla")
						children += (value -> 1L)
						actor forward json
					}
			}
		}
	}
} 
Example 110
Source File: StatsActor.scala    From coral   with Apache License 2.0 5 votes vote down vote up
package io.coral.actors.transform

import akka.actor.{ActorLogging, Props}
import org.json4s.JsonAST.JValue
import org.json4s.JsonDSL._
import org.json4s._
import org.json4s.jackson.JsonMethods.render
import io.coral.actors.{SimpleTimer, NoEmitTrigger, CoralActor}
import io.coral.lib.SummaryStatistics

import scala.language.implicitConversions

object StatsActor {
	implicit val formats = org.json4s.DefaultFormats

	def getParams(json: JValue) = {
		for {
			field <- (json \ "params" \ "field").extractOpt[String]
		} yield {
			field
		}
	}

	def apply(json: JValue): Option[Props] = {
		getParams(json).map(_ => Props(classOf[StatsActor], json))
	}
}

class StatsActor(json: JObject)
	extends CoralActor(json)
	with ActorLogging
	with NoEmitTrigger
	with SimpleTimer {
	implicit def double2jvalue(x: Double): JValue = if (x.isNaN) JNull else JDouble(x)

	val field = StatsActor.getParams(json).get
	val statistics = SummaryStatistics.mutable

	override def simpleTimer = {
		statistics.reset()
		Some(JNothing)
	}

	override def state = Map(
		("count", render(statistics.count)),
		("avg", render(statistics.average)),
		("sd", render(statistics.populationSd)),
		("min", render(statistics.min)),
		("max", render(statistics.max))
	)

	override def noEmitTrigger(json: JObject) = {
		for {
			value <- (json \ field).extractOpt[Double]
		} yield {
			statistics.append(value)
		}
	}
} 
Example 111
Source File: HttpBroadcastActor.scala    From coral   with Apache License 2.0 5 votes vote down vote up
package io.coral.actors.transform

import akka.actor.{ActorLogging, Props}
import org.json4s._
import io.coral.actors.{SimpleEmitTrigger, CoralActor}

object HttpBroadcastActor {
	implicit val formats = org.json4s.DefaultFormats

	def apply(json: JValue): Option[Props] = {
		Some(Props(classOf[HttpBroadcastActor], json))
	}
}

class HttpBroadcastActor(json: JObject)
	extends CoralActor(json)
	with ActorLogging
	with SimpleEmitTrigger {

	override def simpleEmitTrigger(json: JObject): Option[JValue] = {
		Some(json)
	}
} 
Example 112
Source File: FsmActor.scala    From coral   with Apache License 2.0 5 votes vote down vote up
package io.coral.actors.transform

import akka.actor.{ActorLogging, Props}
import org.json4s._
import io.coral.actors.{NoEmitTrigger, CoralActor}

object FsmActor {
	implicit val formats = org.json4s.DefaultFormats

	def getParams(json: JValue) = {
		for {
			key <- (json \ "params" \ "key").extractOpt[String]
			table <- (json \ "params" \ "table").extractOpt[Map[String, Map[String, String]]]
			s0 <- (json \ "params" \ "s0").extractOpt[String]
			if (table.contains(s0))
		} yield {
			(key, table, s0)
		}
	}

	def apply(json: JValue): Option[Props] = {
		getParams(json).map(_ => Props(classOf[FsmActor], json))
	}
}

class FsmActor(json: JObject)
	extends CoralActor(json)
	with ActorLogging
	with NoEmitTrigger {

	val (key, table, s0) = FsmActor.getParams(json).get
	// fsm state
	var s = s0
	override def state = Map(("s", JString(s)))

	override def noEmitTrigger(json: JObject) = {
		for {
			value <- (json \ key).extractOpt[String]
		} yield {
			// compute (local variables & update state)
			val e = table.getOrElse(s, table(s0))
			s = e.getOrElse(value, s)
		}
	}
} 
Example 113
Source File: ThresholdActor.scala    From coral   with Apache License 2.0 5 votes vote down vote up
package io.coral.actors.transform

import akka.actor.{ActorLogging, Props}
import org.json4s._
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods.render
import io.coral.actors.{SimpleEmitTrigger, CoralActor}

object ThresholdActor {
	implicit val formats = org.json4s.DefaultFormats

	def getParams(json: JValue) = {
		for {
			key <- (json \ "params" \ "key").extractOpt[String]
			threshold <- (json \ "params" \ "threshold").extractOpt[Double]
		} yield (key, threshold)
	}

	def apply(json: JValue): Option[Props] = {
		getParams(json).map(_ => Props(classOf[ThresholdActor], json))
	}
}

class ThresholdActor(json: JObject) extends CoralActor(json) with ActorLogging with SimpleEmitTrigger {
	val (key, threshold) = ThresholdActor.getParams(json).get

	override def simpleEmitTrigger(json: JObject): Option[JValue] = {
		for {
			value <- (json \ key).extractOpt[Double]
		} yield {
			value >= threshold match {
				case true => json
				case false => JNothing
			}
		}
	}
} 
Example 114
Source File: LogActor.scala    From coral   with Apache License 2.0 5 votes vote down vote up
package io.coral.actors.connector

import java.io.FileWriter
import akka.actor.{ActorLogging, Props}
import io.coral.actors.CoralActor
import org.json4s.JsonAST.JObject
import org.json4s._
import org.json4s.jackson.JsonMethods._
import scala.concurrent.Future


object LogActor {
	implicit val formats = org.json4s.DefaultFormats

	def getParams(json: JValue) = {
		val file = (json \ "params" \ "file").extractOpt[String]
		val append = (json \ "params" \ "append").extractOpt[Boolean]
		Some((file, append getOrElse false))
	}

	def apply(json: JValue): Option[Props] = {
		getParams(json).map(_ => Props(classOf[LogActor], json))
	}
}

class LogActor(json: JObject) extends CoralActor(json) with ActorLogging {
	val (file, append) = LogActor.getParams(json).get
	var fileWriter: Option[FileWriter] = None

	override def preStart() = {
		if (file.isDefined) {
			fileWriter = Some(new FileWriter(file.get, append))
		}
	}

	override def postStop() = {
		fileWriter match {
			case None =>
			case Some(f) => f.close()

		}
	}

	override def trigger = {
		json => Future {
			fileWriter match {
				case None =>
					log.info(compact(json))
				case Some(f) =>
					f.write(compact(json) + "\n")
					f.flush()
			}

			Some(JNothing)
		}
	}
} 
Example 115
Source File: KafkaProducerActor.scala    From coral   with Apache License 2.0 5 votes vote down vote up
package io.coral.actors.connector

import java.util.Properties

import akka.actor.{Props, ActorLogging}
import io.coral.actors.{NoEmitTrigger, CoralActor}
import io.coral.lib.KafkaJsonProducer.KafkaEncoder
import io.coral.lib.{KafkaJsonProducer, ConfigurationBuilder}
import org.json4s.JsonAST.{JObject, JValue}
import kafka.serializer.Encoder

object KafkaProducerActor {
	implicit val formats = org.json4s.DefaultFormats
	val builder = new ConfigurationBuilder("kafka.producer")

	def getParams(json: JValue) = {
		for {
			kafka <- (json \ "params" \ "kafka").extractOpt[JObject]
			topic <- (json \ "params" \ "topic").extractOpt[String]
		} yield {
			val properties = producerProperties(kafka)
			(properties, topic)
		}
	}

	private def producerProperties(json: JObject): Properties = {
		val properties = builder.properties
		json.values.foreach { case (k: String, v: String) => properties.setProperty(k, v) }
		properties
	}

	def apply(json: JValue): Option[Props] = {
		getParams(json).map(_ => Props(classOf[KafkaProducerActor[KafkaEncoder]], json, KafkaJsonProducer()))
	}

	def apply[T <: KafkaEncoder](json: JValue, encoder: Class[T]): Option[Props] = {
		getParams(json).map(_ => Props(classOf[KafkaProducerActor[T]], json, KafkaJsonProducer(encoder)))
	}
}

class KafkaProducerActor[T <: Encoder[JValue]](json: JObject, connection: KafkaJsonProducer[T])
	extends CoralActor(json)
	with NoEmitTrigger
	with ActorLogging {
	val (properties, topic) = KafkaProducerActor.getParams(json).get
	lazy val kafkaSender = connection.createSender(topic, properties)

	override def noEmitTrigger(json: JObject) = {
		val key = (json \ "key").extractOpt[String]
		val message = (json \ "message").extract[JObject]
		send(key, message)
	}

	private def send(key: Option[String], message: JObject) = {
		try {
			kafkaSender.send(key, message)
		} catch {
			case e: Exception => log.error(e, "failed to send message to Kafka")
		}
	}
} 
Example 116
Source File: KMClusterListener.scala    From Swallow   with Apache License 2.0 5 votes vote down vote up
package examples



import swallow.core._
import akka.cluster.Cluster
import akka.cluster.ClusterEvent._
import akka.actor.ActorLogging
import akka.actor.Actor


object KMClusterListener {
  final case class SuperviseFlow(flow: KMFlow)
}

class KMClusterListener extends Actor with ActorLogging {
  import KMClusterListener._

  val cluster = Cluster(context.system)

  // subscribe to cluster changes, re-subscribe when restart
  override def preStart(): Unit = {
    cluster.subscribe(self, initialStateMode = InitialStateAsEvents,
      classOf[MemberEvent], classOf[UnreachableMember])
  }
  override def postStop(): Unit = cluster.unsubscribe(self)

  def receive = {
    case state: CurrentClusterState =>
      log.info("****** Current members: {} ******", state.members.mkString(", "))
    case MemberUp(member) =>
      log.info("****** Member is Up: {} ******", member.address)
    case UnreachableMember(member) =>
      log.info("****** Member detected as unreachable: {} ******", member)
    case MemberRemoved(member, previousStatus) =>
      log.info("****** Member is Removed: {} after {} ******",
        member.address, previousStatus)
    case _: MemberEvent => // ignore


    case SuperviseFlow(flow: KMFlow) =>
      log.info(s"[KMClusterListener] superviseFlow; [From sender]: $sender")
      log.info(s"[Flow Info] from: ${flow.flowInfo.from}; to: ${flow.flowInfo.to}; content: ${flow.flowInfo.content}")
  }
} 
Example 117
Source File: ConsumerSelfManaged.scala    From scala-kafka-client   with MIT License 5 votes vote down vote up
package cakesolutions.kafka.examples

import akka.actor.{Actor, ActorLogging, ActorRef, ActorSystem, Props}
import cakesolutions.kafka.KafkaConsumer
import cakesolutions.kafka.akka.KafkaConsumerActor.{Confirm, Subscribe}
import cakesolutions.kafka.akka.{ConsumerRecords, Extractor, KafkaConsumerActor, Offsets}
import com.typesafe.config.Config
import org.apache.kafka.clients.consumer.OffsetResetStrategy
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.StringDeserializer

import scala.concurrent.duration._


  def apply(config: Config): ActorRef = {
    val consumerConf = KafkaConsumer.Conf(
      new StringDeserializer,
      new StringDeserializer,
      groupId = "groupId",
      enableAutoCommit = false,
      autoOffsetReset = OffsetResetStrategy.EARLIEST)
      .withConf(config)

    val actorConf = KafkaConsumerActor.Conf(1.seconds, 3.seconds)

    val system = ActorSystem()
    system.actorOf(Props(new ConsumerSelfManaged(consumerConf, actorConf)))
  }
}

class ConsumerSelfManaged(
  kafkaConfig: KafkaConsumer.Conf[String, String],
  actorConfig: KafkaConsumerActor.Conf) extends Actor with ActorLogging {

  val recordsExt: Extractor[Any, ConsumerRecords[String, String]] = ConsumerRecords.extractor[String, String]

  val consumer: ActorRef = context.actorOf(
    KafkaConsumerActor.props(kafkaConfig, actorConfig, self)
  )

  consumer ! Subscribe.ManualOffset(Offsets(Map((new TopicPartition("topic1", 0), 1))))

  override def receive: Receive = {

    // Records from Kafka
    case recordsExt(records) =>
      processRecords(records)
      sender() ! Confirm(records.offsets)
  }

  private def processRecords(records: ConsumerRecords[String, String]) = {
    records.pairs.foreach { case (key, value) =>
      log.info(s"Received [$key,$value]")
    }
    log.info(s"Batch complete, offsets: ${records.offsets}")
  }
} 
Example 118
Source File: AutoPartitionConsumer.scala    From scala-kafka-client   with MIT License 5 votes vote down vote up
package cakesolutions.kafka.examples

import akka.actor.{Actor, ActorLogging, ActorRef, ActorSystem, Props}
import cakesolutions.kafka.KafkaConsumer
import cakesolutions.kafka.akka.KafkaConsumerActor.{Confirm, Subscribe}
import cakesolutions.kafka.akka.{ConsumerRecords, KafkaConsumerActor}
import com.typesafe.config.{Config, ConfigFactory}
import org.apache.kafka.clients.consumer.OffsetResetStrategy
import org.apache.kafka.common.serialization.StringDeserializer

import scala.concurrent.duration._


  def apply(config: Config): ActorRef = {
    val consumerConf = KafkaConsumer.Conf(
      new StringDeserializer,
      new StringDeserializer,
      groupId = "test_group",
      enableAutoCommit = false,
      autoOffsetReset = OffsetResetStrategy.EARLIEST)
      .withConf(config)

    val actorConf = KafkaConsumerActor.Conf(1.seconds, 3.seconds)

    val system = ActorSystem()
    system.actorOf(Props(new AutoPartitionConsumer(consumerConf, actorConf)))
  }
}

class AutoPartitionConsumer(
  kafkaConfig: KafkaConsumer.Conf[String, String],
  actorConfig: KafkaConsumerActor.Conf) extends Actor with ActorLogging {

  private val recordsExt = ConsumerRecords.extractor[String, String]

  private val consumer = context.actorOf(
    KafkaConsumerActor.props(kafkaConfig, actorConfig, self)
  )
  context.watch(consumer)

  consumer ! Subscribe.AutoPartition(List("topic1"))

  override def receive: Receive = {

    // Records from Kafka
    case recordsExt(records) =>
      processRecords(records.pairs)
      sender() ! Confirm(records.offsets, commit = true)
  }

  private def processRecords(records: Seq[(Option[String], String)]) =
    records.foreach { case (key, value) =>
      log.info(s"Received [$key,$value]")
    }
} 
Example 119
Source File: ConsumerToProducer.scala    From scala-kafka-client   with MIT License 5 votes vote down vote up
package cakesolutions.kafka.examples

import akka.actor.{Actor, ActorLogging, ActorRef, ActorSystem, Props}
import cakesolutions.kafka.akka.KafkaConsumerActor.{Confirm, Subscribe}
import cakesolutions.kafka.akka._
import cakesolutions.kafka.{KafkaConsumer, KafkaProducer}
import com.typesafe.config.{Config, ConfigFactory}
import org.apache.kafka.clients.consumer.OffsetResetStrategy
import org.apache.kafka.common.serialization.{StringDeserializer, StringSerializer}

import scala.concurrent.duration._


  def apply(consumerConfig: Config, producerConfig: Config): ActorRef = {

    // Create KafkaConsumerActor config with bootstrap.servers specified in Typesafe config
    val consumerConf = KafkaConsumer.Conf(
      new StringDeserializer,
      new StringDeserializer,
      groupId = "test_group",
      enableAutoCommit = false,
      autoOffsetReset = OffsetResetStrategy.EARLIEST)
      .withConf(consumerConfig)

    val actorConf = KafkaConsumerActor.Conf(1.seconds, 3.seconds, 5)

    // Create KafkaProducerActor config with defaults and bootstrap.servers specified in Typesafe config
    val producerConf = KafkaProducer.Conf(new StringSerializer, new StringSerializer).withConf(producerConfig)

    val system = ActorSystem()
    system.actorOf(Props(new ConsumerToProducer(consumerConf, actorConf, producerConf)))
  }
}

class ConsumerToProducer(
  kafkaConfig: KafkaConsumer.Conf[String, String],
  actorConfig: KafkaConsumerActor.Conf,
  producerConf: KafkaProducer.Conf[String, String]) extends Actor with ActorLogging {

  private val recordsExt = ConsumerRecords.extractor[String, String]

  // The KafkaConsumerActor
  private val consumer = context.actorOf(
    KafkaConsumerActor.props(kafkaConfig, actorConfig, self)
  )
  context.watch(consumer)

  // The KafkaProducerActor
  private val producer = context.actorOf(KafkaProducerActor.props(producerConf))

  consumer ! Subscribe.AutoPartition(List("topic1"))

  override def receive: Receive = {

    // Records from Kafka
    case recordsExt(records) =>
      processRecords(records)

    // Confirmed Offsets from KafkaProducer
    case o: Offsets =>
      consumer ! Confirm(o, commit = true)
  }

  // Demonstrates some transformation of the messages before forwarding to KafkaProducer
  private def processRecords(records: ConsumerRecords[String, String]) = {
    val transformedRecords = records.pairs.map { case (key, value) =>
      (key, value + ".")
    }

    // Send records to Topic2.  Offsets will be sent back to this actor once confirmed.
    producer ! ProducerRecords.fromKeyValues[String, String]("topic2", transformedRecords, Some(records.offsets), None)

    // Could have sent them like this if we didn't first transform:
    // producer ! ProducerRecords.fromConsumerRecords("topic2", records, None)
  }
} 
Example 120
Source File: ConsumerRecovery.scala    From scala-kafka-client   with MIT License 5 votes vote down vote up
package cakesolutions.kafka.examples

import akka.actor.{Actor, ActorLogging, ActorRef, ActorSystem, OneForOneStrategy, Props, SupervisorStrategy}
import cakesolutions.kafka.KafkaConsumer
import cakesolutions.kafka.akka.KafkaConsumerActor.{Confirm, Subscribe}
import cakesolutions.kafka.akka.{ConsumerRecords, Extractor, KafkaConsumerActor}
import com.typesafe.config.{Config, ConfigFactory}
import org.apache.kafka.clients.consumer.OffsetResetStrategy
import org.apache.kafka.common.serialization.StringDeserializer

import scala.concurrent.duration._


  override def supervisorStrategy: SupervisorStrategy = OneForOneStrategy(maxNrOfRetries = 10) {
    case _: KafkaConsumerActor.ConsumerException =>
      log.info("Consumer exception caught. Restarting consumer.")
      SupervisorStrategy.Restart
    case _ =>
      SupervisorStrategy.Escalate
  }

  val recordsExt: Extractor[Any, ConsumerRecords[String, String]] = ConsumerRecords.extractor[String, String]

  val consumer: ActorRef = context.actorOf(
    KafkaConsumerActor.props(kafkaConfig, actorConfig, self)
  )

  consumer ! Subscribe.AutoPartition(List("topic1"))

  override def receive: Receive = {

    // Records from Kafka
    case recordsExt(records) =>
      processRecords(records.pairs)
      sender() ! Confirm(records.offsets, commit = true)
  }

  private def processRecords(records: Seq[(Option[String], String)]) =
    records.foreach { case (key, value) =>
      log.info(s"Received [$key,$value]")
    }
} 
Example 121
Source File: AutoPartitionConsumerWithManualOffset.scala    From scala-kafka-client   with MIT License 5 votes vote down vote up
package cakesolutions.kafka.examples

import akka.actor.{Actor, ActorLogging, ActorRef, ActorSystem, Props}
import cakesolutions.kafka.KafkaConsumer
import cakesolutions.kafka.akka.KafkaConsumerActor._
import cakesolutions.kafka.akka.{ConsumerRecords, KafkaConsumerActor, Offsets}
import com.typesafe.config.{Config, ConfigFactory}
import org.apache.kafka.clients.consumer.OffsetResetStrategy
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.StringDeserializer

import scala.concurrent.duration._


  def apply(config: Config): ActorRef = {
    val consumerConf = KafkaConsumer.Conf(
      new StringDeserializer,
      new StringDeserializer,
      groupId = "test_group",
      enableAutoCommit = false,
      autoOffsetReset = OffsetResetStrategy.EARLIEST)
      .withConf(config)

    val actorConf = KafkaConsumerActor.Conf(1.seconds, 3.seconds)

    val system = ActorSystem()
    system.actorOf(Props(new AutoPartitionConsumerWithManualOffset(consumerConf, actorConf)))
  }
}

class AutoPartitionConsumerWithManualOffset(
  kafkaConfig: KafkaConsumer.Conf[String, String],
  actorConfig: KafkaConsumerActor.Conf) extends Actor with ActorLogging {

  private val recordsExt = ConsumerRecords.extractor[String, String]

  private val consumer = context.actorOf(
    KafkaConsumerActor.props(kafkaConfig, actorConfig, self)
  )

  consumer ! Subscribe.AutoPartitionWithManualOffset(List("topic1"), assignedListener, revokedListener)

  override def receive: Receive = {

    // Records from Kafka
    case recordsExt(records) =>
      processRecords(records.pairs)
      sender() ! Confirm(records.offsets)
  }

  private def processRecords(records: Seq[(Option[String], String)]) =
    records.foreach { case (key, value) =>
      log.info(s"Received [$key,$value]")
    }

  private def assignedListener(tps: List[TopicPartition]): Offsets = {
    log.info("Partitions have been assigned" + tps.toString())

    // Should load the offsets from a persistent store and any related state
    val offsetMap = tps.map{ tp =>
      tp -> 0l
    }.toMap

    // Return the required offsets for the assigned partitions
    Offsets(offsetMap)
  }

  private def revokedListener(tps: List[TopicPartition]): Unit = {
    log.info("Partitions have been revoked" + tps.toString())
    // Opportunity to clear any state for the revoked partitions
    ()
  }
} 
Example 122
Source File: Blackboard.scala    From Scalaprof   with GNU General Public License v2.0 5 votes vote down vote up
package com.phasmid.hedge_fund.actors

import akka.actor.{ Actor, Props, ActorLogging, ActorRef }


class Blackboard(forwardMap: Map[Class[_ <: Any], String], actors: Map[String, Class[_ <: BlackboardActor]]) extends Actor with ActorLogging {

  val actorMap: Map[String, ActorRef] = actors map {
    case (k, v) => k -> context.actorOf(Props.create(v, self), k)
  }

  // To encode specific, non-forwarding behavior, override this method
  override def receive = {
    case message =>
      forwardMap.get(message.getClass) match {
        case Some(s) => actorMap.get(s) match {
          case Some(k) => k forward message
          case _ => log.warning(s"no actor established for key $s")
        }
        case _ => log.warning(s"no forward mapping established for message class ${message.getClass}")
      }
  }
} 
Example 123
Source File: Calculator.scala    From Scalaprof   with GNU General Public License v2.0 5 votes vote down vote up
package actors

import akka.actor.{ Actor, ActorLogging, ActorRef }
import scala.util._
import models._


class Calculator[A](mill: Mill[A], parser: ExpressionParser[A]) extends Actor with ActorLogging {
  
  override def receive = {
    case View => sender ! mill.toSeq
    case x: String =>
      log.info(s"we got $x")
      try {
        val response = mill.parse(x)(parser)
        log.info(s"response: $response")
        sender ! response
      }
      catch {
        case t: Throwable => println("should never hit this line"); log.error(t, "logic error: should never log this issue")
      }
    case z =>
      log.warning(s"received unknown message type: $z")
  }
}

object View 
Example 124
Source File: Mapper.scala    From Scalaprof   with GNU General Public License v2.0 5 votes vote down vote up
package edu.neu.coe.scala.mapreduce

import akka.actor.{ Actor, ActorLogging, ActorRef }
import scala.collection.mutable.HashMap
import scala.util._


class Mapper_Forgiving[K1,V1,K2,V2](f: (K1,V1)=>(K2,V2)) extends Mapper[K1,V1,K2,V2](f) {
  
  override def prepareReply(v2k2ts: Seq[Try[(K2,V2)]]) = {
      val v2sK2m = HashMap[K2,Seq[V2]]() // mutable
      val xs = Seq[Throwable]() // mutable
      for (v2k2t <- v2k2ts; v2k2e = Master.sequence(v2k2t))
        v2k2e match {
          case Right((k2,v2)) => v2sK2m put(k2, v2+:(v2sK2m get(k2) getOrElse(Nil)))
          case Left(x) => xs :+ x
      }
      (v2sK2m.toMap, xs.toSeq)
  }
}

case class Incoming[K, V](m: Seq[(K,V)]) {
  override def toString = s"Incoming: with ${m.size} elements"
}

object Incoming {
  def sequence[K,V](vs: Seq[V]): Incoming[K,V] = Incoming((vs zip Stream.continually(null.asInstanceOf[K])).map{_.swap})
  def map[K, V](vKm: Map[K,V]): Incoming[K,V] = Incoming(vKm.toSeq)
}

object Mapper {
} 
Example 125
Source File: Reducer.scala    From Scalaprof   with GNU General Public License v2.0 5 votes vote down vote up
package edu.neu.coe.scala.mapreduce

import akka.actor.{ Actor, ActorLogging, ActorRef }
//import scala.collection.mutable.HashMap
import scala.util._


class Reducer_Fold[K2,V2,V3](g: (V3,V2)=>V3, z: =>V3) extends ReducerBase[K2,V2,V3] {  
  def getValue(vs: Seq[V2]): V3 = vs.foldLeft(z)(g)
}

abstract class ReducerBase[K2,V2,V3] extends Actor with ActorLogging {
  
  override def receive = {
    case i: Intermediate[K2,V2] =>
      log.info(s"received $i")
      log.debug(s"with elements ${i.vs}")
      sender ! (i.k, Master.sequence(Try(getValue(i.vs))))
    case q =>
      log.warning(s"received unknown message type: $q")
  }
  
  override def postStop = {
    log.debug("has shut down")
  }
  
  def getValue(vs: Seq[V2]): V3
}


case class Intermediate[K2, V2](k: K2, vs: Seq[V2]) {
  override def toString = s"Intermediate: with k=$k and ${vs.size} elements"
} 
Example 126
Source File: ClusterApp.scala    From reactive-cli   with Apache License 2.0 5 votes vote down vote up
package foo

import akka.actor.{ Actor, ActorLogging, ActorSystem, PoisonPill, Props }
import akka.cluster.ClusterEvent.ClusterDomainEvent
import akka.cluster.singleton.{ ClusterSingletonManager, ClusterSingletonManagerSettings }
import akka.cluster.{ Cluster, ClusterEvent }
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Directives._
import akka.stream.ActorMaterializer

object ClusterApp {

  def main(args: Array[String]): Unit = {

    implicit val system = ActorSystem()
    implicit val materializer = ActorMaterializer()
    implicit val executionContext = system.dispatcher

    val cluster = Cluster(system)
    system.log.info("Starting Akka Management")
    system.log.info("something2")
    // AkkaManagement(system).start()
    // ClusterBootstrap(system).start()

    system.actorOf(
      ClusterSingletonManager.props(
        Props[NoisySingleton],
        PoisonPill,
        ClusterSingletonManagerSettings(system)))
    Cluster(system).subscribe(
      system.actorOf(Props[ClusterWatcher]),
      ClusterEvent.InitialStateAsEvents,
      classOf[ClusterDomainEvent])

    // add real app routes here
    val routes =
      path("hello") {
        get {
          complete(
            HttpEntity(ContentTypes.`text/html(UTF-8)`, "<h1>Hello</h1>"))
        }
      }

    Http().bindAndHandle(routes, "0.0.0.0", 8080)

    system.log.info(
      s"Server online at http://localhost:8080/\nPress RETURN to stop...")

    cluster.registerOnMemberUp(() => {
      system.log.info("Cluster member is up!")
    })
  }

  class ClusterWatcher extends Actor with ActorLogging {
    val cluster = Cluster(context.system)

    override def receive = {
      case msg ⇒ log.info(s"Cluster ${cluster.selfAddress} >>> " + msg)
    }
  }
} 
Example 127
Source File: ClusterApp.scala    From reactive-cli   with Apache License 2.0 5 votes vote down vote up
package foo

import akka.actor.{ Actor, ActorLogging, ActorSystem, PoisonPill, Props }
import akka.cluster.ClusterEvent.ClusterDomainEvent
import akka.cluster.singleton.{ ClusterSingletonManager, ClusterSingletonManagerSettings }
import akka.cluster.{ Cluster, ClusterEvent }
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Directives._
import akka.stream.ActorMaterializer

object ClusterApp {

  def main(args: Array[String]): Unit = {

    implicit val system = ActorSystem()
    implicit val materializer = ActorMaterializer()
    implicit val executionContext = system.dispatcher

    val cluster = Cluster(system)
    system.log.info("Starting Akka Management")
    system.log.info("something2")
    // AkkaManagement(system).start()
    // ClusterBootstrap(system).start()

    system.actorOf(
      ClusterSingletonManager.props(
        Props[NoisySingleton],
        PoisonPill,
        ClusterSingletonManagerSettings(system)))
    Cluster(system).subscribe(
      system.actorOf(Props[ClusterWatcher]),
      ClusterEvent.InitialStateAsEvents,
      classOf[ClusterDomainEvent])

    // add real app routes here
    val routes =
      path("hello") {
        get {
          complete(
            HttpEntity(ContentTypes.`text/html(UTF-8)`, "<h1>Hello</h1>"))
        }
      }

    Http().bindAndHandle(routes, "0.0.0.0", 8080)

    system.log.info(
      s"Server online at http://localhost:8080/\nPress RETURN to stop...")

    cluster.registerOnMemberUp(() => {
      system.log.info("Cluster member is up!")
    })
  }

  class ClusterWatcher extends Actor with ActorLogging {
    val cluster = Cluster(context.system)

    override def receive = {
      case msg ⇒ log.info(s"Cluster ${cluster.selfAddress} >>> " + msg)
    }
  }
} 
Example 128
Source File: HttpIncomingReceiver.scala    From sumobot   with Apache License 2.0 5 votes vote down vote up
package com.sumologic.sumobot.http_frontend

import java.time.Instant
import java.util.concurrent.TimeUnit

import akka.actor.{Actor, ActorLogging, ActorRef}
import akka.pattern.pipe
import scala.concurrent.ExecutionContext.Implicits.global
import akka.http.scaladsl.model.ws.TextMessage
import akka.stream.ActorMaterializer
import com.sumologic.sumobot.core.HttpReceptionist
import com.sumologic.sumobot.core.model.{IncomingMessage, UserSender}

import scala.concurrent.duration.Duration

object HttpIncomingReceiver {
  case class StreamEnded()
  private val StrictTimeout = Duration.create(5, TimeUnit.SECONDS)
}

class HttpIncomingReceiver(outcomingRef: ActorRef) extends Actor with ActorLogging {
  private implicit val materializer = ActorMaterializer()

  override def receive: Receive = {
    case streamedMsg: TextMessage.Streamed =>
      streamedMsg.toStrict(HttpIncomingReceiver.StrictTimeout).pipeTo(self)(sender())

    case strictMsg: TextMessage.Strict =>
      val contents = strictMsg.getStrictText
      val incomingMessage = IncomingMessage(contents, true, HttpReceptionist.DefaultSumoBotChannel,
        formatDateNow(), None, Seq.empty, UserSender(HttpReceptionist.DefaultClientUser))
      context.system.eventStream.publish(incomingMessage)

    case HttpIncomingReceiver.StreamEnded =>
      context.stop(outcomingRef)
      context.stop(self)
  }

  private def formatDateNow(): String = {
    s"${Instant.now().getEpochSecond}.000000"
  }
} 
Example 129
Source File: HttpOutcomingSender.scala    From sumobot   with Apache License 2.0 5 votes vote down vote up
package com.sumologic.sumobot.http_frontend

import akka.actor.{Actor, ActorLogging, ActorRef}
import akka.http.scaladsl.model.ws.TextMessage
import com.sumologic.sumobot.core.model.OutgoingMessage

class HttpOutcomingSender(publisherRef: ActorRef) extends Actor with ActorLogging {
  override def preStart(): Unit = {
    Seq(classOf[OutgoingMessage]).foreach(context.system.eventStream.subscribe(self, _))
  }

  override def receive: Receive = {
    case OutgoingMessage(_, text, _) =>
      publisherRef ! TextMessage(text)
  }

  override def postStop(): Unit = {
    context.stop(publisherRef)
    context.system.eventStream.unsubscribe(self)
  }
} 
Example 130
Source File: HttpReceptionist.scala    From sumobot   with Apache License 2.0 5 votes vote down vote up
package com.sumologic.sumobot.core

import java.time.Instant

import akka.actor.{Actor, ActorLogging, ActorRef, Props}
import com.sumologic.sumobot.core.model.PublicChannel
import com.sumologic.sumobot.plugins.BotPlugin.{InitializePlugin, PluginAdded, PluginRemoved}
import play.api.libs.json.{JsObject, JsValue}
import slack.api.RtmStartState
import slack.models.{Channel, Group, Im, Team, User}
import slack.rtm.RtmState

object HttpReceptionist {
  private[core] val DefaultChannel = Channel("C0001SUMO", "sumobot", Instant.now().getEpochSecond(),
    Some("U0001SUMO"), Some(false), Some(true), Some(false), Some(false), Some(true), None, Some(false), Some(false), None, None, None, None, None, None, None, None)
  val DefaultSumoBotChannel = PublicChannel(DefaultChannel.id, DefaultChannel.name)

  val DefaultBotUser = User("U0001SUMO", "sumobot-bot", None, None, None, None, None, None, None, None, None, None, None, None, None, None)
  val DefaultClientUser = User("U0002SUMO", "sumobot-client", None, None, None, None, None, None, None, None, None, None, None, None, None, None)

  private[core] val StateUrl = ""
  private[core] val StateTeam = Team("T0001SUMO", "Sumo Bot", "sumobot", "sumologic.com", 30, false, new JsObject(Map.empty), "std")
  private[core] val StateUsers: Seq[User] = Array(DefaultBotUser, DefaultClientUser)
  private[core] val StateChannels: Seq[Channel] = Array(DefaultChannel)
  private[core] val StateGroups: Seq[Group] = Seq.empty
  private[core] val StateIms: Seq[Im] = Seq.empty
  private[core] val StateBots: Seq[JsValue] = Seq.empty

  private[core] val StartState = RtmStartState(StateUrl, DefaultBotUser, StateTeam, StateUsers, StateChannels, StateGroups, StateIms, StateBots)
  private[core] val State = new RtmState(StartState)
}

class HttpReceptionist(brain: ActorRef) extends Actor with ActorLogging {
  private val pluginRegistry = context.system.actorOf(Props(classOf[PluginRegistry]), "plugin-registry")

  override def receive: Receive = {
    case message@PluginAdded(plugin, _) =>
      plugin ! InitializePlugin(HttpReceptionist.State, brain, pluginRegistry)
      pluginRegistry ! message

    case message@PluginRemoved(_) =>
      pluginRegistry ! message
  }
} 
Example 131
Source File: PluginRegistry.scala    From sumobot   with Apache License 2.0 5 votes vote down vote up
package com.sumologic.sumobot.core

import akka.actor.{Actor, ActorLogging, ActorRef}
import com.sumologic.sumobot.core.PluginRegistry.{Plugin, PluginList, RequestPluginList}
import com.sumologic.sumobot.plugins.BotPlugin.{PluginAdded, PluginRemoved}

object PluginRegistry {

  case class Plugin(plugin: ActorRef, help: String)

  case object RequestPluginList
  case class PluginList(plugins: Seq[Plugin])
}

class PluginRegistry extends Actor with ActorLogging {

  private var list = List.empty[Plugin]

  override def receive: Receive = {
    case PluginAdded(plugin, help) =>
      val name = plugin.path.name
      log.info(s"Plugin added: $name")
      if (list.exists(_.plugin.path.name == name)) {
        log.error(s"Attempt to register duplicate plugin: $name")
      } else {
        list +:= Plugin(plugin, help)
      }

    case PluginRemoved(plugin) =>
      val name = plugin.path.name
      list = list.filterNot(_.plugin.path.name == name)
      log.info(s"Plugin removed: $name")

    case RequestPluginList =>
      sender() ! PluginList(list)
  }
} 
Example 132
Source File: Help.scala    From sumobot   with Apache License 2.0 5 votes vote down vote up
package com.sumologic.sumobot.plugins.help

import akka.actor.ActorLogging
import akka.pattern.ask
import akka.util.Timeout
import com.sumologic.sumobot.core.PluginRegistry.{PluginList, RequestPluginList}
import com.sumologic.sumobot.core.model.IncomingMessage
import com.sumologic.sumobot.plugins.BotPlugin

import scala.concurrent.duration._
import scala.util.Success

object Help {
  private[help] val ListPlugins = BotPlugin.matchText("(help|\\?)\\W*")
  private[help] val HelpForPlugin = BotPlugin.matchText("(help|\\?) ([\\-\\w]+).*")
}

class Help extends BotPlugin with ActorLogging {
  override protected def help =
    s"""I can help you understand plugins.
       |
       |help - I'll tell you what plugins I've got.
       |help <plugin>. - I'll tell you how <plugin> works.
     """.stripMargin

  import Help._

  override protected def receiveIncomingMessage = {
    case message@IncomingMessage(ListPlugins(_), true, _, _, _, _, _) =>
      val msg = message
      implicit val timeout = Timeout(5.seconds)
      pluginRegistry ? RequestPluginList onComplete {
        case Success(result) => result match {
          case PluginList(plugins) =>
            msg.say(plugins.map(_.plugin.path.name).sorted.mkString("\n"))
        }
        case _ =>
      }

    case message@IncomingMessage(HelpForPlugin(_, pluginName), addressedToUs, _, _, _, _, _) =>
      val msg = message
      implicit val timeout = Timeout(5.seconds)
      pluginRegistry ? RequestPluginList onComplete {
        case Success(result) => result match {
          case PluginList(plugins) =>
            plugins.find(_.plugin.path.name.equalsIgnoreCase(pluginName)) match {
              case Some(plugin) =>
                msg.say(plugin.help)
              case None =>
                if (addressedToUs) {
                  msg.respond(s"Sorry, I don't know $pluginName")
                }
            }
        }
        case _ =>
      }
  }
} 
Example 133
Source File: AWSSupport.scala    From sumobot   with Apache License 2.0 5 votes vote down vote up
package com.sumologic.sumobot.plugins.awssupport

import akka.actor.ActorLogging
import com.amazonaws.auth.{AWSCredentials, AWSStaticCredentialsProvider}
import com.amazonaws.services.support.AWSSupportClientBuilder
import com.amazonaws.services.support.model.{CaseDetails, DescribeCasesRequest}
import com.sumologic.sumobot.core.aws.AWSAccounts
import com.sumologic.sumobot.core.model.IncomingMessage
import com.sumologic.sumobot.plugins.BotPlugin

import scala.collection.JavaConverters._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.util.{Failure, Success, Try}

class AWSSupport
  extends BotPlugin
    with ActorLogging {

  case class CaseInAccount(account: String, caseDetails: CaseDetails)

  private val credentials: Map[String, AWSCredentials] =
    AWSAccounts.load(context.system.settings.config)

  private val clients = credentials.map{case (id, credentials) =>
    id ->
      AWSSupportClientBuilder.standard().withCredentials(new AWSStaticCredentialsProvider(credentials)).build()}

  override protected def help: String =
    s"""
       |I can tell you about AWS support tickets.
       |
       |list aws cases - List all AWS support tickets.
       |show aws case <case> - I'll show you more details about that case.
     """.stripMargin

  private val CaseDetails = matchText("show aws case (\\d+).*")

  private val ListCases = matchText("list aws cases")

  override protected def receiveIncomingMessage: ReceiveIncomingMessage = {

    case message@IncomingMessage(ListCases(), _, _, _, _, _, _) =>
      message.respondInFuture {
        msg =>
          val caseList = getAllCases.map(summary(_) + "\n").mkString("\n")
          msg.message(caseList)
      }

    case message@IncomingMessage(CaseDetails(caseId), _, _, _, _, _, _) =>
      message.respondInFuture {
        msg =>
          log.info(s"Looking for case $caseId")

          Try(getAllCases) match {
            case Success(cases) =>
              cases.find(_.caseDetails.getDisplayId == caseId) match {
                case None =>
                  msg.response("Not a known support case.")
                case Some(cse) =>
                  msg.message(details(cse))
              }
            case Failure(e) if e.getMessage.contains("Invalid case ID:") =>
              msg.response(s"Invalid case ID: $caseId")
          }
      }
  }

  private def getAllCases: Seq[CaseInAccount] = {
    clients.toSeq.par.flatMap {
      tpl =>
        val client = tpl._2
        val unresolved = client.describeCases(new DescribeCasesRequest()).getCases.asScala.toList
        val resolved = client.describeCases(new DescribeCasesRequest().withIncludeResolvedCases(true)).getCases.asScala.toList
        (unresolved ++ resolved).map(CaseInAccount(tpl._1, _))
    }.seq
  }

  private def summary(cia: CaseInAccount): String =
    s"*# ${cia.caseDetails.getDisplayId}:* ${cia.caseDetails.getSubject}\n" +
      s" - account: ${cia.account}, submitted by: ${cia.caseDetails.getSubmittedBy}, status: ${cia.caseDetails.getStatus}"

  private def details(cia: CaseInAccount): String = {
    val latest = cia.caseDetails.getRecentCommunications.getCommunications.asScala.head
    summary(cia) + "\n\n" +
      s"""
         |_${latest.getSubmittedBy} at ${latest.getTimeCreated}_
         |${latest.getBody}
    """.stripMargin
  }
} 
Example 134
Source File: RestInterface.scala    From akka-sharding-example   with MIT License 5 votes vote down vote up
package com.michalplachta.shoesorter.api

import akka.actor.{Actor, ActorLogging, ActorRef}
import akka.io.IO
import akka.pattern.ask
import com.michalplachta.shoesorter.Domain.{Container, Junction}
import com.michalplachta.shoesorter.Messages._
import spray.can.Http
import spray.httpx.SprayJsonSupport._
import spray.routing._

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._

class RestInterface(decider: ActorRef, exposedPort: Int) extends Actor with HttpServiceBase with ActorLogging {
  val route: Route = {
    path("junctions" / IntNumber / "decisionForContainer" / IntNumber) { (junctionId, containerId) =>
      get {
        complete {
          log.info(s"Request for junction $junctionId and container $containerId")
          val junction = Junction(junctionId)
          val container = Container(containerId)
          decider.ask(WhereShouldIGo(junction, container))(5 seconds).mapTo[Go]
        }
      }
    }
  }

  def receive = runRoute(route)

  implicit val system = context.system
  IO(Http) ! Http.Bind(self, interface = "0.0.0.0", port = exposedPort)
} 
Example 135
Source File: SortingDecider.scala    From akka-sharding-example   with MIT License 5 votes vote down vote up
package com.michalplachta.shoesorter

import akka.actor.{ActorLogging, Actor, Props}
import akka.cluster.sharding.ShardRegion
import akka.cluster.sharding.ShardRegion.{ExtractEntityId, ExtractShardId}
import com.michalplachta.shoesorter.Domain.{Container, Junction}
import com.michalplachta.shoesorter.Messages._

object SortingDecider {
  def name = "sortingDecider"

  def props = Props[SortingDecider]

  def extractShardId: ExtractShardId = {
    case WhereShouldIGo(junction, _) =>
      (junction.id % 2).toString
  }

  def extractEntityId: ExtractEntityId = {
    case msg @ WhereShouldIGo(junction, _) =>
      (junction.id.toString, msg)
  }
}

class SortingDecider extends Actor with ActorLogging {
  def receive = {
    case WhereShouldIGo(junction, container) =>
      val decision = Decisions.whereShouldContainerGo(junction, container)
      log.info("Decision on junction {} for container {}: {}", junction.id, container.id, decision)
      sender ! Go(decision)
  }
} 
Example 136
Source File: AkkaExecutionSequencer.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.grpc.adapter

import akka.Done
import akka.actor.{Actor, ActorLogging, ActorRef, ActorSystem, ExtendedActorSystem, Props}
import akka.pattern.{AskTimeoutException, ask}
import akka.util.Timeout
import com.daml.grpc.adapter.RunnableSequencingActor.ShutdownRequest

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{ExecutionContext, Future}
import scala.util.control.NonFatal
import com.daml.dec.DirectExecutionContext


  def closeAsync(implicit ec: ExecutionContext): Future[Done] =
    (actorRef ? ShutdownRequest).mapTo[Done].recover {
      case askTimeoutException: AskTimeoutException if actorIsTerminated(askTimeoutException) =>
        Done
    }

  private def actorIsTerminated(askTimeoutException: AskTimeoutException) = {
    AkkaExecutionSequencer.actorTerminatedRegex.findFirstIn(askTimeoutException.getMessage).nonEmpty
  }
}

object AkkaExecutionSequencer {
  def apply(name: String, terminationTimeout: FiniteDuration)(
      implicit system: ActorSystem): AkkaExecutionSequencer = {
    system match {
      case extendedSystem: ExtendedActorSystem =>
        new AkkaExecutionSequencer(
          extendedSystem.systemActorOf(Props[RunnableSequencingActor], name))(
          Timeout.durationToTimeout(terminationTimeout))
      case _ =>
        new AkkaExecutionSequencer(system.actorOf(Props[RunnableSequencingActor], name))(
          Timeout.durationToTimeout(terminationTimeout))

    }
  }

  private val actorTerminatedRegex = """Recipient\[.*]\] had already been terminated.""".r
}

private[grpc] class RunnableSequencingActor extends Actor with ActorLogging {
  @SuppressWarnings(Array("org.wartremover.warts.Any"))
  override val receive: Receive = {
    case runnable: Runnable =>
      try {
        runnable.run()
      } catch {
        case NonFatal(t) => log.error("Unexpected exception while executing Runnable", t)
      }
    case ShutdownRequest =>
      context.stop(self) // processing of the current message will continue
      sender() ! Done
  }
}

private[grpc] object RunnableSequencingActor {
  case object ShutdownRequest
} 
Example 137
Source File: ChaosInterface.scala    From eventuate-chaos   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate.chaos

import java.net.InetSocketAddress

import akka.actor.Actor
import akka.actor.ActorLogging
import akka.actor.ActorRef
import akka.io.IO
import akka.io.Tcp
import akka.util.ByteString

abstract class ChaosInterface extends Actor with ActorLogging {
  val port = 8080
  val endpoint = new InetSocketAddress(port)
  val command = """(?s)(\w+)\s+(\d+).*""".r

  implicit val ec = context.dispatcher

  IO(Tcp)(context.system) ! Tcp.Bind(self, endpoint)

  println(s"Now listening on port $port")

  def handleCommand: PartialFunction[(String, Option[Int], ActorRef), Unit]

  protected def reply(message: String, receiver: ActorRef) = {
    receiver ! Tcp.Write(ByteString(message))
    receiver ! Tcp.Close
  }

  protected def closeOnError(receiver: ActorRef): PartialFunction[Throwable, Unit] = {
    case err: Throwable =>
      receiver ! Tcp.Close
  }

  def receive: Receive = {
    case Tcp.Connected(remote, _) =>
      sender ! Tcp.Register(self)

    case Tcp.Received(bs) =>
      val content = bs.utf8String

      content match {
        case command(c, value) if handleCommand.isDefinedAt(c, Some(value.toInt), sender) =>
          handleCommand(c, Some(value.toInt), sender)
        case c if c.startsWith("quit") =>
          context.system.terminate()
        case c if handleCommand.isDefinedAt(c, None, sender) =>
          handleCommand(c, None, sender)
        case _ =>
          sender ! Tcp.Close
      }

    case Tcp.Closed =>
    case Tcp.PeerClosed =>
  }
} 
Example 138
Source File: KnownNodesManager.scala    From mantis   with Apache License 2.0 5 votes vote down vote up
package io.iohk.ethereum.network

import java.net.URI

import akka.actor.{Actor, ActorLogging, Props, Scheduler}
import io.iohk.ethereum.db.storage.KnownNodesStorage
import io.iohk.ethereum.network.KnownNodesManager.KnownNodesManagerConfig

import scala.concurrent.duration._
import scala.concurrent.ExecutionContext.Implicits.global

class KnownNodesManager(
    config: KnownNodesManagerConfig,
    knownNodesStorage: KnownNodesStorage,
    externalSchedulerOpt: Option[Scheduler] = None)
  extends Actor with ActorLogging {

  import KnownNodesManager._

  private def scheduler = externalSchedulerOpt getOrElse context.system.scheduler

  var knownNodes: Set[URI] = knownNodesStorage.getKnownNodes()

  var toAdd: Set[URI] = Set.empty

  var toRemove: Set[URI] = Set.empty

  scheduler.schedule(config.persistInterval, config.persistInterval, self, PersistChanges)

  override def receive: Receive = {
    case AddKnownNode(uri) =>
      if (!knownNodes.contains(uri)) {
        knownNodes += uri
        toAdd += uri
        toRemove -= uri
      }

    case RemoveKnownNode(uri) =>
      if (knownNodes.contains(uri)) {
        knownNodes -= uri
        toAdd -= uri
        toRemove += uri
      }

    case GetKnownNodes =>
      sender() ! KnownNodes(knownNodes)

    case PersistChanges =>
      persistChanges()
  }

  private def persistChanges(): Unit = {
    log.debug(s"Persisting ${knownNodes.size} known nodes.")
    if (knownNodes.size > config.maxPersistedNodes) {
      val toAbandon = knownNodes.take(knownNodes.size - config.maxPersistedNodes)
      toRemove ++= toAbandon
      toAdd --= toAbandon
    }
    if (toAdd.nonEmpty || toRemove.nonEmpty) {
      knownNodesStorage.updateKnownNodes(
        toAdd = toAdd,
        toRemove = toRemove)
      toAdd = Set.empty
      toRemove = Set.empty
    }
  }

}

object KnownNodesManager {
  def props(config: KnownNodesManagerConfig, knownNodesStorage: KnownNodesStorage): Props =
    Props(new KnownNodesManager(config, knownNodesStorage))

  case class AddKnownNode(uri: URI)
  case class RemoveKnownNode(uri: URI)
  case object GetKnownNodes
  case class KnownNodes(nodes: Set[URI])

  private case object PersistChanges

  case class KnownNodesManagerConfig(persistInterval: FiniteDuration, maxPersistedNodes: Int)

  object KnownNodesManagerConfig {
    def apply(etcClientConfig: com.typesafe.config.Config): KnownNodesManagerConfig = {
      val knownNodesManagerConfig = etcClientConfig.getConfig("network.known-nodes")
      KnownNodesManagerConfig(
        persistInterval = knownNodesManagerConfig.getDuration("persist-interval").toMillis.millis,
        maxPersistedNodes = knownNodesManagerConfig.getInt("max-persisted-nodes"))
    }
  }
} 
Example 139
Source File: ServerActor.scala    From mantis   with Apache License 2.0 5 votes vote down vote up
package io.iohk.ethereum.network

import java.net.InetSocketAddress

import akka.actor.{Actor, ActorLogging, ActorRef, Props}
import akka.agent.Agent
import akka.io.Tcp.{Bind, Bound, CommandFailed, Connected}
import akka.io.{IO, Tcp}
import io.iohk.ethereum.utils.{NodeStatus, ServerStatus}
import org.spongycastle.util.encoders.Hex

class ServerActor(nodeStatusHolder: Agent[NodeStatus], peerManager: ActorRef) extends Actor with ActorLogging {

  import ServerActor._
  import context.system

  override def receive: Receive = {
    case StartServer(address) =>
      IO(Tcp) ! Bind(self, address)
      context become waitingForBindingResult
  }

  def waitingForBindingResult: Receive = {
    case Bound(localAddress) =>
      val nodeStatus = nodeStatusHolder()
      log.info("Listening on {}", localAddress)
      log.info("Node address: enode://{}@{}:{}",
        Hex.toHexString(nodeStatus.nodeId),
        getHostName(localAddress.getAddress),
        localAddress.getPort)
      nodeStatusHolder.send(_.copy(serverStatus = ServerStatus.Listening(localAddress)))
      context become listening

    case CommandFailed(b: Bind) =>
      log.warning("Binding to {} failed", b.localAddress)
      context stop self
  }

  def listening: Receive = {
    case Connected(remoteAddress, _) =>
      val connection = sender()
      peerManager ! PeerManagerActor.HandlePeerConnection(connection, remoteAddress)
  }
}

object ServerActor {
  def props(nodeStatusHolder: Agent[NodeStatus], peerManager: ActorRef): Props =
    Props(new ServerActor(nodeStatusHolder, peerManager))

  case class StartServer(address: InetSocketAddress)
} 
Example 140
Source File: BlacklistSupport.scala    From mantis   with Apache License 2.0 5 votes vote down vote up
package io.iohk.ethereum.blockchain.sync

import scala.concurrent.duration.FiniteDuration
import akka.actor.{Actor, ActorLogging, Cancellable, Scheduler}
import io.iohk.ethereum.network.PeerId

import scala.concurrent.ExecutionContext.Implicits.global

trait BlacklistSupport {
  selfActor: Actor with ActorLogging =>

  import BlacklistSupport._

  def scheduler: Scheduler

  var blacklistedPeers: Seq[(PeerId, Cancellable)] = Nil

  def blacklist(peerId: PeerId, duration: FiniteDuration, reason: String): Unit = {
    undoBlacklist(peerId)
    log.debug(s"Blacklisting peer ($peerId), $reason")
    val unblacklistCancellable = scheduler.scheduleOnce(duration, self, UnblacklistPeer(peerId))
    blacklistedPeers :+= (peerId, unblacklistCancellable)
  }

  def undoBlacklist(peerId: PeerId): Unit = {
    blacklistedPeers.find(_._1 == peerId).foreach(_._2.cancel())
    blacklistedPeers = blacklistedPeers.filterNot(_._1 == peerId)
  }

  def isBlacklisted(peerId: PeerId): Boolean =
    blacklistedPeers.exists(_._1 == peerId)

  def handleBlacklistMessages: Receive = {
    case UnblacklistPeer(ref) => undoBlacklist(ref)
  }
}

object BlacklistSupport {
  private case class UnblacklistPeer(peerId: PeerId)
} 
Example 141
Source File: BlockchainHostActor.scala    From mantis   with Apache License 2.0 5 votes vote down vote up
package io.iohk.ethereum.blockchain.sync

import akka.actor.{Actor, ActorLogging, ActorRef, Props}
import akka.util.ByteString
import io.iohk.ethereum.domain.{BlockHeader, Blockchain}
import io.iohk.ethereum.network.PeerEventBusActor.PeerEvent.MessageFromPeer
import io.iohk.ethereum.network.PeerEventBusActor.SubscriptionClassifier.MessageClassifier
import io.iohk.ethereum.network.PeerEventBusActor.{PeerSelector, Subscribe}
import io.iohk.ethereum.network.PeerManagerActor.PeerConfiguration
import io.iohk.ethereum.network.p2p.{Message, MessageSerializable}
import io.iohk.ethereum.network.p2p.messages.PV62.{BlockBodies, BlockHeaders, GetBlockBodies, GetBlockHeaders}
import io.iohk.ethereum.network.p2p.messages.PV63.{GetNodeData, GetReceipts, NodeData, Receipts}
import io.iohk.ethereum.network.p2p.messages.PV63.MptNodeEncoders._
import io.iohk.ethereum.network.EtcPeerManagerActor


  private def handleBlockFastDownload(message: Message): Option[MessageSerializable] = message match {
    case request: GetReceipts =>
      val receipts = request.blockHashes.take(peerConfiguration.fastSyncHostConfiguration.maxReceiptsPerMessage)
        .flatMap(hash => blockchain.getReceiptsByHash(hash))

      Some(Receipts(receipts))

    case request: GetBlockBodies =>
      val blockBodies = request.hashes.take(peerConfiguration.fastSyncHostConfiguration.maxBlocksBodiesPerMessage)
        .flatMap(hash => blockchain.getBlockBodyByHash(hash))

      Some(BlockBodies(blockBodies))

    case request: GetBlockHeaders =>
      val blockNumber = request.block.fold(a => Some(a), b => blockchain.getBlockHeaderByHash(b).map(_.number))

      blockNumber match {
        case Some(startBlockNumber) if startBlockNumber >= 0 && request.maxHeaders >= 0 && request.skip >= 0 =>

          val headersCount: BigInt = request.maxHeaders min peerConfiguration.fastSyncHostConfiguration.maxBlocksHeadersPerMessage

          val range = if (request.reverse) {
            startBlockNumber to (startBlockNumber - (request.skip + 1) * headersCount + 1) by -(request.skip + 1)
          } else {
            startBlockNumber to (startBlockNumber + (request.skip + 1) * headersCount - 1) by (request.skip + 1)
          }

          val blockHeaders: Seq[BlockHeader] = range.flatMap { a: BigInt => blockchain.getBlockHeaderByNumber(a) }

          Some(BlockHeaders(blockHeaders))

        case _ =>
          log.warning("got request for block headers with invalid block hash/number: {}", request)
          None
      }

    case _ => None

  }

}

object BlockchainHostActor {

  def props(blockchain: Blockchain, peerConfiguration: PeerConfiguration,
            peerEventBusActor: ActorRef, etcPeerManagerActor: ActorRef): Props =
    Props(new BlockchainHostActor(blockchain, peerConfiguration, peerEventBusActor, etcPeerManagerActor))

} 
Example 142
Source File: PeerListSupport.scala    From mantis   with Apache License 2.0 5 votes vote down vote up
package io.iohk.ethereum.blockchain.sync

import akka.actor.{Actor, ActorLogging, ActorRef, Scheduler}
import io.iohk.ethereum.network.{EtcPeerManagerActor, Peer, PeerId}
import io.iohk.ethereum.network.EtcPeerManagerActor.PeerInfo
import io.iohk.ethereum.network.PeerEventBusActor.PeerEvent.PeerDisconnected
import io.iohk.ethereum.network.PeerEventBusActor.SubscriptionClassifier.PeerDisconnectedClassifier
import io.iohk.ethereum.network.PeerEventBusActor.{PeerSelector, Subscribe, Unsubscribe}
import io.iohk.ethereum.utils.Config.SyncConfig

import scala.concurrent.duration._
import scala.concurrent.ExecutionContext.Implicits.global

trait PeerListSupport {
  self: Actor with ActorLogging with BlacklistSupport =>

  def etcPeerManager: ActorRef
  def peerEventBus: ActorRef
  def syncConfig: SyncConfig
  def scheduler: Scheduler

  var handshakedPeers: Map[Peer, PeerInfo] = Map.empty

  scheduler.schedule(0.seconds, syncConfig.peersScanInterval, etcPeerManager, EtcPeerManagerActor.GetHandshakedPeers)(global, context.self)

  def removePeer(peerId: PeerId): Unit = {
    peerEventBus ! Unsubscribe(PeerDisconnectedClassifier(PeerSelector.WithId(peerId)))
    handshakedPeers.find(_._1.id == peerId).foreach { case (peer, _) => undoBlacklist(peer.id) }
    handshakedPeers = handshakedPeers.filterNot(_._1.id == peerId)
  }

  def peersToDownloadFrom: Map[Peer, PeerInfo] =
    handshakedPeers.filterNot { case (p, s) => isBlacklisted(p.id) }

  def handlePeerListMessages: Receive = {
    case EtcPeerManagerActor.HandshakedPeers(peers) =>
      peers.keys.filterNot(handshakedPeers.contains).foreach { peer =>
        peerEventBus ! Subscribe(PeerDisconnectedClassifier(PeerSelector.WithId(peer.id)))
      }
      handshakedPeers = peers

    case PeerDisconnected(peerId) if handshakedPeers.exists(_._1.id == peerId) =>
      removePeer(peerId)
  }
} 
Example 143
Source File: FastSyncStateStorageActor.scala    From mantis   with Apache License 2.0 5 votes vote down vote up
package io.iohk.ethereum.blockchain.sync

import akka.actor.{Actor, ActorLogging}
import akka.pattern.pipe
import io.iohk.ethereum.blockchain.sync.FastSync.SyncState
import io.iohk.ethereum.blockchain.sync.FastSyncStateStorageActor.GetStorage
import io.iohk.ethereum.db.storage.FastSyncStateStorage

import scala.concurrent.Future
import scala.util.{Failure, Success, Try}


class FastSyncStateStorageActor extends Actor with ActorLogging {

  def receive: Receive = {
    // after initialization send a valid Storage reference
    case storage: FastSyncStateStorage => context become idle(storage)
  }

  def idle(storage: FastSyncStateStorage): Receive = {
    // begin saving of the state to the storage and become busy
    case state: SyncState => persistState(storage, state)

    case GetStorage => sender() ! storage.getSyncState()
  }

  def busy(storage: FastSyncStateStorage, stateToPersist: Option[SyncState]): Receive = {
    // update state waiting to be persisted later. we only keep newest state
    case state: SyncState => context become busy(storage, Some(state))
    // exception was thrown during persisting of a state. push
    case Failure(e) => throw e
    // state was saved in the storage. become idle
    case Success(s: FastSyncStateStorage) if stateToPersist.isEmpty => context become idle(s)
    // state was saved in the storage but new state is already waiting to be saved.
    case Success(s: FastSyncStateStorage) if stateToPersist.isDefined => stateToPersist.foreach(persistState(s, _))

    case GetStorage => sender() ! storage.getSyncState()
  }

  private def persistState(storage: FastSyncStateStorage, syncState: SyncState): Unit = {
    import context.dispatcher
    val persistingQueues: Future[Try[FastSyncStateStorage]] = Future {
      lazy val result = Try { storage.putSyncState(syncState) }
      if (log.isDebugEnabled) {
        val now = System.currentTimeMillis()
        result
        val end = System.currentTimeMillis()
        log.debug(s"Saving snapshot of a fast sync took ${end - now} ms")
        result
      } else {
        result
      }
    }
    persistingQueues pipeTo self
    context become busy(storage, None)
  }

}

object FastSyncStateStorageActor {
  case object GetStorage
} 
Example 144
Source File: Person.scala    From akka-serialization-test   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend.domain

import akka.actor.ActorLogging
import akka.event.LoggingReceive
import akka.persistence.{ PersistentActor, RecoveryCompleted }

object Person {
  sealed trait PersonEvent
  final case class NameRegisteredEvent(name: String, surname: String) extends PersonEvent
  final case class NameChangedEvent(name: String) extends PersonEvent
  final case class SurnameChangedEvent(surname: String) extends PersonEvent

  sealed trait PersonCommand
  final case class RegisterNameCommand(name: String, surname: String) extends PersonCommand
  final case class ChangeNameCommand(name: String) extends PersonCommand
  final case class ChangeSurnameCommand(surname: String) extends PersonCommand
}

class Person(val persistenceId: String) extends PersistentActor with ActorLogging {
  import Person._
  var name: String = _
  var surname: String = _

  override def receiveRecover: Receive = LoggingReceive {
    case e: NameRegisteredEvent ⇒

      handleEvent(e)
    case e: NameChangedEvent    ⇒ handleEvent(e)
    case e: SurnameChangedEvent ⇒ handleEvent(e)
    case RecoveryCompleted      ⇒ println("==> Recovery completed")
    case e                      ⇒ println("Dropping event: " + e.getClass.getName)
  }

  def handleEvent(event: NameRegisteredEvent): Unit = {
    this.name = event.name
    this.surname = event.surname
    log.debug(s"[NameRegistered]: Person $persistenceId => name: $name, surname: $surname")
  }

  def handleEvent(event: NameChangedEvent): Unit = {
    this.name = event.name
    log.debug(s"[NameChanged]: Person $persistenceId => name: $name, surname: $surname")
  }

  def handleEvent(event: SurnameChangedEvent): Unit = {
    this.surname = event.surname
    log.debug(s"[SurnameChanged]: Person $persistenceId => name: $name, surname: $surname")
  }

  override def receiveCommand: Receive = LoggingReceive {
    case RegisterNameCommand(name, surname) ⇒
      persist(NameRegisteredEvent(name, surname)) { e ⇒
        handleEvent(e)
        sender() ! akka.actor.Status.Success("")
      }
    case ChangeNameCommand(newName) ⇒
      persist(NameChangedEvent(newName)) { e ⇒
        handleEvent(e)
        sender() ! akka.actor.Status.Success("")
      }
    case ChangeSurnameCommand(newSurname) ⇒
      persist(SurnameChangedEvent(newSurname)) { e ⇒
        handleEvent(e)
        sender() ! akka.actor.Status.Success("")
      }
  }

  override def postStop(): Unit = {
    log.debug(s"Stopped $persistenceId")
    super.postStop()
  }
} 
Example 145
Source File: Music.scala    From akka-serialization-test   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend.domain

import java.time.Duration

import akka.actor.ActorLogging
import akka.event.LoggingReceive
import akka.persistence.PersistentActor

object Music {

  type Title = String
  type Year = Int

  final case class Song(title: Title, duration: Duration)

  sealed trait AlbumEvent

  final case class TitleChanged(title: Title) extends AlbumEvent

  final case class YearChanged(year: Year) extends AlbumEvent

  final case class SongAdded(song: Song) extends AlbumEvent

  final case class SongRemoved(song: Song) extends AlbumEvent

  sealed trait AlbumCommand

  final case class ChangeAlbumTitle(title: Title) extends AlbumCommand

  final case class ChangeAlbumYear(year: Year) extends AlbumCommand

  final case class AddSong(song: Song) extends AlbumCommand

  final case class RemoveSong(song: Song) extends AlbumCommand

}

class Album(val persistenceId: String) extends PersistentActor with ActorLogging {

  import Music._

  var title: Title = _
  var year: Year = _
  var songs: Set[Song] = Set[Song]()

  override def receiveRecover: Receive = LoggingReceive {
    case e: TitleChanged ⇒ handleEvent(e)
    case e: YearChanged  ⇒ handleEvent(e)
    case e: SongAdded    ⇒ handleEvent(e)
    case e: SongRemoved  ⇒ handleEvent(e)
  }

  def handleEvent(event: TitleChanged): Unit = {
    this.title = event.title
    log.debug(s"[TitleChanged]: Album $persistenceId => title: $title, year: $year songs: $songs")
  }

  def handleEvent(event: YearChanged): Unit = {
    this.year = event.year
    log.debug(s"[YearChanged]: Album $persistenceId => title: $title, year: $year songs: $songs")
  }

  def handleEvent(event: SongAdded): Unit = {
    this.songs = this.songs + event.song
    log.debug(s"[SongAdded]: Album $persistenceId => title: $title, year: $year songs: $songs")
  }

  def handleEvent(event: SongRemoved): Unit = {
    this.songs = this.songs - event.song
    log.debug(s"[SongRemoved]: Album $persistenceId => title: $title, year: $year songs: $songs")
  }

  override def receiveCommand: Receive = LoggingReceive {
    case ChangeAlbumTitle(newTitle) ⇒
      persistAll(List(TitleChanged(newTitle))) { e ⇒
        handleEvent(e)
        sender() ! akka.actor.Status.Success("")
      }
    case ChangeAlbumYear(newYear) ⇒
      persistAll(List(YearChanged(newYear))) { e ⇒
        handleEvent(e)
        sender() ! akka.actor.Status.Success("")
      }
    case AddSong(newSong) ⇒
      persistAll(List(SongAdded(newSong))) { e ⇒
        handleEvent(e)
        sender() ! akka.actor.Status.Success("")
      }
    case RemoveSong(oldSong) ⇒
      persistAll(List(SongRemoved(oldSong))) { e ⇒
        handleEvent(e)
        sender() ! akka.actor.Status.Success("")
      }
  }

  override def postStop(): Unit = {
    log.debug(s"Stopped $persistenceId")
    super.postStop()
  }
} 
Example 146
Source File: Master.scala    From asyspark   with MIT License 5 votes vote down vote up
package org.apache.spark.asyspark.core

import java.util.concurrent.TimeUnit

import akka.actor.{Actor, ActorLogging, ActorRef, ActorSystem, Address, Props, Terminated}
import akka.util.Timeout
import com.typesafe.config.Config
import com.typesafe.scalalogging.slf4j.StrictLogging
import org.apache.spark.asyspark.core.messages.master.{ClientList, RegisterClient, RegisterServer, ServerList}

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}



  var clients = Set.empty[ActorRef]

  override def receive: Receive = {
    case RegisterServer(server) =>
      log.info(s"Registering server ${server.path.toString}")
      println("register server")
      servers += server
      context.watch(server)
      sender ! true

    case RegisterClient(client)  =>
      log.info(s"Registering client ${sender.path.toString}")
      clients += client
      context.watch(client)
      sender ! true

    case ServerList() =>
      log.info(s"Sending current server list to ${sender.path.toString}")
      sender ! servers.toArray

    case ClientList() =>
      log.info(s"Sending current client list to ${sender.path.toString}")
      sender ! clients.toArray


    case Terminated(actor) =>
      actor match {
        case server: ActorRef if servers contains server =>
          log.info(s"Removing server ${server.path.toString}")
          servers -= server
        case client: ActorRef if clients contains client =>
          log.info(s"Removing client ${client.path.toString}")
          clients -= client
        case actor: ActorRef =>
          log.warning(s"Actor ${actor.path.toString} will be terminated for some unknown reason")
      }
  }

}

object Master extends StrictLogging {
  def run(config: Config): Future[(ActorSystem, ActorRef)] = {
    logger.debug("Starting master actor system")
    val system = ActorSystem(config.getString("asyspark.master.system"), config.getConfig("asyspark.master"))
    logger.debug("Starting master")
    val master = system.actorOf(Props[Master], config.getString("asyspark.master.name"))
    implicit val timeout = Timeout(config.getDuration("asyspark.master.startup-timeout", TimeUnit.MILLISECONDS) milliseconds)
    implicit val ec = ExecutionContext.Implicits.global
    val address = Address("akka.tcp", config.getString("asyspark.master.system"), config.getString("asyspark.master.host"),
    config.getString("asyspark.master.port").toInt)
    system.actorSelection(master.path.toSerializationFormat).resolveOne().map {
      case actor: ActorRef =>
        logger.debug("Master successfully started")
        (system, master)

    }
  }

} 
Example 147
Source File: PartialVector.scala    From asyspark   with MIT License 5 votes vote down vote up
package org.apache.spark.asyspark.core.models.server

import akka.actor.{Actor, ActorLogging}
import spire.algebra.Semiring
import spire.implicits._
import org.apache.spark.asyspark.core.partitions.Partition
import scala.reflect.ClassTag


  //todo I thinks this imp can be optimized
  def update(keys: Array[Long], values: Array[V]): Boolean = {
    var i = 0
    try {
      while (i < keys.length) {
        val key = partition.globalToLocal(keys(i))
        // this is imp with the help of spire.implicits._
        data(key) += values(i)
        i += 1
      }
      true
    } catch {
      case e: Exception => false
    }
  }

  def get(keys: Array[Long]): Array[V] = {
    var i =0
    val a = new Array[V](keys.length)
    while(i < keys.length) {
      val key = partition.globalToLocal(keys(i))
      a(i)  = data(key)
      i += 1
    }
    a
  }

  log.info(s"Constructed PartialVector[${implicitly[ClassTag[V]]}] of size $size (partition id: ${partition.index})")

} 
Example 148
Source File: PersistenceTestActor.scala    From akka-persistence-dynamodb   with Apache License 2.0 5 votes vote down vote up
package com.github.j5ik2o.akka.persistence.dynamodb.query

import akka.actor.Status.Success
import akka.actor.{ ActorLogging, ActorRef }
import akka.event.LoggingReceive
import akka.persistence._
import akka.persistence.journal.Tagged

object PersistenceTestActor {
  final case class DeleteCmd(toSequenceNr: Long = Long.MaxValue) extends Serializable
}

class PersistenceTestActor(id: Int) extends PersistentActor with ActorLogging {
  import PersistenceTestActor._
  val pluginName = context.system.settings.config.getString("akka.persistence.journal.plugin")

  override def persistenceId: String = "my-" + id

  private val label = s"$persistenceId - $pluginName"

  log.debug("==> Created test actor: " + persistenceId)

  var state: Int = 1

  def debug(msg: String): Unit = log.debug(s"$msg in state $label")

  def deleteCmd(ref: ActorRef): Receive = LoggingReceive.withLabel(label) {
    case msg @ DeleteMessagesSuccess(toSequenceNr) =>
      debug(s"Deleted up to: $msg")
      ref ! Success(s"deleted-$toSequenceNr")
      context.become(receiveCommand)
    case msg @ DeleteMessagesFailure(t, toSequenceNr) =>
      debug(s"Failed deleting events: $msg")
      ref ! akka.actor.Status.Failure(t)
      context.become(receiveCommand)
  }

  override def receiveCommand: Receive = LoggingReceive.withLabel(label) {
    case DeleteCmd(toSequenceNr) =>
      deleteMessages(toSequenceNr)
      debug(s"Deleting up to: '$toSequenceNr'")
      context.become(deleteCmd(sender()))

    case event @ Tagged(payload: Any, tags) =>
      persist(event.copy(payload = s"$payload-$state")) { _ =>
        increment()
        sender() ! event
      }

    case event =>
      persist(s"$event-$state") { _ =>
        increment()
        sender() ! event
      }
  }

  def increment(): Unit = state += 1

  override def receiveRecover: Receive = LoggingReceive.withLabel(label) {
    case _: String         => increment()
    case RecoveryCompleted =>
  }

  override protected def onPersistFailure(cause: Throwable, event: Any, seqNr: Long): Unit = {
    sender() ! akka.actor.Status.Failure(cause)
    context.become(receiveCommand)
    super.onPersistFailure(cause, event, seqNr)
  }

  override protected def onPersistRejected(cause: Throwable, event: Any, seqNr: Long): Unit = {
    sender() ! akka.actor.Status.Failure(cause)
    context.become(receiveCommand)
    super.onPersistRejected(cause, event, seqNr)
  }
} 
Example 149
Source File: PhilosopherMessages.scala    From didactic-computing-machine   with GNU Affero General Public License v3.0 5 votes vote down vote up
package DiningPhilosophers

import akka.actor.{ Actor, ActorLogging, ActorRef, ActorSystem, Props }

object PhilosopherMessages {
  case object Eat
  case object Think
}


object ForkMessages {
  case object Take
  case object Put
  case object ForkBeingUsed
  case object ForkTaken
} 
Example 150
Source File: Philosopher.scala    From didactic-computing-machine   with GNU Affero General Public License v3.0 5 votes vote down vote up
package DiningPhilosophers

import DiningPhilosophers.ForkMessages._
import DiningPhilosophers.PhilosopherMessages._
import akka.actor.{Actor, ActorLogging, ActorRef, ActorSystem, Props}
import scala.concurrent.duration.FiniteDuration
import scala.concurrent.duration.DurationInt
import scala.concurrent.duration.FiniteDuration
import scala.concurrent.ExecutionContext.Implicits.global

class Philosopher(val leftFork: ActorRef, val rightFork: ActorRef) extends Actor with ActorLogging {

  def name = self.path.name

  private val eatingTime = 2500.millis
  private val thinkingTime = 5000.millis
  private val retryTime = 10.millis


  def thinkFor(duration: FiniteDuration) = {
    context.system.scheduler.scheduleOnce(duration, self, Eat)
    context.become(thinking)
  }

  def thinking: Receive = {
    case Eat =>
      log.info(s"Philosopher ${self.path.name} wants to eat")
      leftFork ! Take
      rightFork ! Take
      context.become(hungry)
  }

  def hungry: Receive = {
    case ForkBeingUsed => handleForkBeingUsed()
    case ForkTaken =>
      log.info(s"Philosopher ${self.path.name} found one fork to be taken by other philosopher")
      context.become(waitingForOtherFork)
  }

  def waitingForOtherFork: Receive = {
    case ForkBeingUsed => handleForkBeingUsed()
    case ForkTaken =>
      log.info(s"Philosopher ${self.path.name} starts to eat")
      context.system.scheduler.scheduleOnce(eatingTime, self, Think)
      context.become(eating)
  }

  def eating: Receive = {
    case Think =>
      log.info(s"Philosopher ${self.path.name} starts to think")
      leftFork ! Put
      rightFork ! Put
      thinkFor(thinkingTime)
  }

  def handleForkBeingUsed(): Unit = {
    log.info(s"Philosopher ${self.path.name} found one fork to be in use")
    
    leftFork ! Put
    rightFork ! Put
    thinkFor(retryTime)
  }

  def receive = {
    case Think =>
      log.info(s"Philosopher ${self.path.name} started thinking")
      thinkFor(thinkingTime)

  }
} 
Example 151
Source File: Fork.scala    From didactic-computing-machine   with GNU Affero General Public License v3.0 5 votes vote down vote up
package DiningPhilosophers

import DiningPhilosophers.ForkMessages._
import akka.actor.{Actor, ActorLogging, ActorRef, ActorSystem, Props}

class Fork extends Actor with ActorLogging {

  

  def available: Receive = {
    case Take =>
      log.info(s"Fork ${self.path.name} by ${sender.path.name}")
      sender ! ForkTaken
      context.become(inUse(sender))
  }

  def inUse(philosopher: ActorRef): Receive = {
    case Take =>
      log.info(s"Fork ${self.path.name} already being used by ${philosopher.path.name}")
      sender ! ForkBeingUsed
    case Put =>
      log.info(s"Fork ${self.path.name} put down by ${sender.path.name}")
      sender ! Put
      context.become(available)
  }

  def receive = available

} 
Example 152
Source File: PartialVector.scala    From glint   with MIT License 5 votes vote down vote up
package glint.models.server

import akka.actor.{Actor, ActorLogging}
import glint.partitioning.Partition
import spire.algebra.Semiring
import spire.implicits._

import scala.reflect.ClassTag


  def get(keys: Array[Long]): Array[V] = {
    var i = 0
    val a = new Array[V](keys.length)
    while (i < keys.length) {
      val key = partition.globalToLocal(keys(i))
      a(i) = data(key)
      i += 1
    }
    a
  }

  log.info(s"Constructed PartialVector[${implicitly[ClassTag[V]]}] of size $size (partition id: ${partition.index})")

} 
Example 153
Source File: PartialMatrix.scala    From glint   with MIT License 5 votes vote down vote up
package glint.models.server

import akka.actor.{Actor, ActorLogging}
import spire.algebra.Semiring
import spire.implicits._
import glint.partitioning.Partition

import scala.reflect.ClassTag


  def update(rows: Array[Long], cols: Array[Int], values: Array[V]): Boolean = {
    var i = 0
    while (i < rows.length) {
      val row = partition.globalToLocal(rows(i))
      val col = cols(i)
      data(row)(col) += values(i)
      i += 1
    }
    true
  }

  log.info(s"Constructed PartialMatrix[${implicitly[ClassTag[V]]}] with $rows rows and $cols columns (partition id: ${partition.index})")

} 
Example 154
Source File: DynamoActor.scala    From scala-spark-cab-rides-predictions   with MIT License 5 votes vote down vote up
package actors

import akka.actor.{Actor, ActorLogging, Status}
import com.amazonaws.services.dynamodbv2.model.BatchWriteItemResult
import dynamodb.{CabImpl, WeatherImp}
import models.{CabPriceBatch, WeatherBatch}

import scala.concurrent.Future
import scala.util.{Failure, Success}


  def putCabPrices(cabPriceBatch: CabPriceBatch): Unit = {
    val cabPrices = cabPriceBatch.cabPrices.toSeq
    log.info("received " + cabPrices.size + " number of cab price records")
    val result: Future[Seq[BatchWriteItemResult]] = CabImpl.put(cabPrices)
    result onComplete {
      case Success(_) => log.info("Cab Prices Batch processed on DynamoDB")
      case Failure(exception) => log.error("error process Cab Prices batch on dynamoDB :" + exception.getStackTrace)
    }
  }
} 
Example 155
Source File: View.scala    From sangria-subscriptions-example   with Apache License 2.0 5 votes vote down vote up
package generic

import language.postfixOps

import akka.actor.{ActorLogging, ActorRef}
import akka.stream.actor.{OneByOneRequestStrategy, ActorSubscriber}

import akka.stream.actor.ActorSubscriberMessage._

import scala.collection.immutable.ListMap
import scala.concurrent.duration._

abstract class View[Entity <: Versioned, Ev <: Event] extends ActorSubscriber with ActorLogging {
  import View._

  private var entities = ListMap.empty[String, Entity]
  private var waiting = Map.empty[(String, Long), ActorRef]

  import context.dispatcher

  def receive = {
    case OnNext(event: Event) if handleEvent.isDefinedAt(event.asInstanceOf[Ev]) ⇒
      handleEvent(event.asInstanceOf[Ev])

      val waitingKey = event.id → event.version

      waiting.get(waitingKey) foreach { senderRef ⇒
        senderRef ! entities.get(event.id)
        waiting = waiting.filterNot(_._1 == waitingKey)
      }
    case RemoveWaiting(key) ⇒
      waiting.get(key) foreach { senderRef ⇒
        senderRef ! None
        waiting = waiting.filterNot(_._1 == key)
      }
    case List(offset, limit) ⇒
      sender() ! entities.values.slice(offset, offset + limit)
    case Get(id, None) ⇒
      sender() ! entities.get(id)
    case GetMany(ids) ⇒
      sender() ! entities.collect{case (key, value) if ids.contains(key) ⇒ value}.toVector
    case Get(id, Some(version)) ⇒
      entities.get(id) match {
        case Some(entity) if entity.version == version ⇒
          sender() ! entities.get(id)
        case _ ⇒
          waiting = waiting + ((id → version) → sender())
          context.system.scheduler.scheduleOnce(5 seconds, self, RemoveWaiting(id → version))
      }
  }

  def add(entity: Entity) =
    entities = entities + (entity.id → entity)

  def update(event: Ev)(fn: Entity ⇒ Entity) =
    change(event)(entity ⇒ entities = entities.updated(entity.id, fn(entity)))

  def delete(event: Ev) =
    change(event)(entity ⇒ entities = entities.filterNot(_._1 == entity.id))

  private def change(event: Ev)(fn: Entity ⇒ Unit) =
    entities.get(event.id) match {
      case Some(entity) if entity.version != event.version - 1 ⇒
        log.error(s"Entity ${event.id}: version mismatch: expected ${entity.version + 1}, but got ${event.version}")
      case Some(entity) ⇒
        fn(entity)
      case None ⇒
        log.error(s"Entity ${event.id}: not found")
    }

  val requestStrategy = OneByOneRequestStrategy

  def handleEvent: Handler

  type Handler = PartialFunction[Ev, Unit]
}

object View {
  case class List(offset: Int, limit: Int)
  case class Get(id: String, version: Option[Long] = None)
  case class GetMany(ids: Seq[String])

  private case class RemoveWaiting(key: (String, Long))
} 
Example 156
Source File: QueueActor.scala    From reliable-http-client   with Apache License 2.0 5 votes vote down vote up
package rhttpc.transport.inmem

import akka.pattern._
import akka.actor.{Actor, ActorLogging, ActorRef, Props, Stash}
import akka.routing.{RoundRobinRoutingLogic, Routee, Router}
import akka.util.Timeout
import rhttpc.transport.{Message, RejectingMessage}

import scala.concurrent.Future
import scala.concurrent.duration.FiniteDuration
import scala.util.control.NonFatal

private class QueueActor(consumeTimeout: FiniteDuration,
                         retryDelay: FiniteDuration) extends Actor with Stash with ActorLogging {

  import context.dispatcher

  private var consumers = Map.empty[ActorRef, AskingActorRefRouteeWithSpecifiedMessageType]

  private var router = Router(RoundRobinRoutingLogic(), collection.immutable.IndexedSeq.empty)

  override def receive: Receive = {
    case RegisterConsumer(consumer, fullMessage) =>
      val routee = AskingActorRefRouteeWithSpecifiedMessageType(consumer, consumeTimeout, handleResponse, fullMessage)
      consumers += consumer -> routee
      router = router.addRoutee(routee)
      log.debug(s"${self.path.name}: registered consumer, unstashing")
      unstashAll()
    case UnregisterConsumer(consumer) =>
      log.debug(s"${self.path.name}: unregistered consumer")
      consumers.get(consumer).foreach { routee =>
        consumers -= consumer
        router = router.removeRoutee(routee)
      }
      sender() ! ((): Unit)
    case msg: Message[_] =>
      if (consumers.isEmpty) {
        log.debug(s"${self.path.name}: got message when no consumer registered, stashing")
        stash()
        implicit val timeout = Timeout(consumeTimeout)
        sender() ! ((): Unit)
      } else {
        router.route(msg, sender())
      }
  }

  private def handleResponse(future: Future[Any], msg: Message[_]): Unit =
    future.recover {
      case ex: AskTimeoutException =>
        log.error(ex, s"${self.path.name}: REJECT [${msg.content.getClass.getName}] because of ask timeout")
      case ex: Exception with RejectingMessage =>
        log.error(ex, s"${self.path.name}: REJECT [${msg.content.getClass.getName}] because of rejecting failure")
      case NonFatal(ex) =>
        log.error(ex, s"${self.path.name}: will RETRY [${msg.content.getClass.getName}] after $retryDelay because of failure")
        context.system.scheduler.scheduleOnce(retryDelay, self, msg)
    }

}

object QueueActor {
  def props(consumeTimeout: FiniteDuration,
            retryDelay: FiniteDuration): Props = Props(
    new QueueActor(
      consumeTimeout = consumeTimeout,
      retryDelay = retryDelay))
}

private[inmem] case class AskingActorRefRouteeWithSpecifiedMessageType(ref: ActorRef,
                                                                       askTimeout: FiniteDuration,
                                                                       handleResponse: (Future[Any], Message[_]) => Unit,
                                                                       fullMessage: Boolean)
  extends Routee {

  override def send(message: Any, sender: ActorRef): Unit = {
    val typedMessage = message.asInstanceOf[Message[_]]
    val msgToSend = if (fullMessage) message else typedMessage.content
    handleResponse(ref.ask(msgToSend)(askTimeout, sender), typedMessage)
  }
}

private[inmem] case class RegisterConsumer(consumer: ActorRef, fullMessage: Boolean)

private[inmem] case class UnregisterConsumer(consumer: ActorRef) 
Example 157
Source File: PersistentActorWithNotifications.scala    From reliable-http-client   with Apache License 2.0 5 votes vote down vote up
package rhttpc.akkapersistence.impl

import java.io.{PrintWriter, StringWriter}

import akka.actor.{ActorLogging, ActorRef}
import akka.persistence._

private[akkapersistence] trait PersistentActorWithNotifications { this: AbstractSnapshotter with ActorLogging =>
  override def persistenceId: String = SnapshotsRegistry.persistenceId(persistenceCategory, id)

  protected def persistenceCategory: String

  protected def id: String

  private var listenersForSnapshotSave: Map[Long, RecipientWithMsg] = Map.empty

  protected def deleteSnapshotsLogging(): Unit = {
    deleteSnapshotsLogging(None)
  }

  private def deleteSnapshotsLogging(maxSequenceNr: Option[Long]): Unit = {
    log.debug(s"Deleting all snapshots for $persistenceId until (inclusive): $maxSequenceNr...")
    deleteSnapshots(SnapshotSelectionCriteria(maxSequenceNr = maxSequenceNr.getOrElse(Int.MaxValue)))
  }

  protected def saveSnapshotNotifying(snapshot: Any, sequenceNr: Long, listener: Option[RecipientWithMsg]): Unit = {
    log.debug(s"Saving snapshot for $persistenceId with sequenceNr: $sequenceNr: $snapshot ...")
    listener.foreach { listener =>
      listenersForSnapshotSave += sequenceNr -> listener
    }
    saveSnapshotWithSeqNr(snapshot, sequenceNr)
  }

  protected val handleSnapshotEvents: Receive = {
    case SaveSnapshotSuccess(metadata) =>
      log.debug(s"State saved for: $metadata")
      deleteSnapshotsLogging(Some(metadata.sequenceNr-1))
      replyToListenerForSaveIfWaiting(metadata)
    case DeleteSnapshotsSuccess(criteria) =>
      log.debug(s"Snapshots with criteria: $criteria deleted")
    case SaveSnapshotFailure(metadata, cause) =>
      log.error(cause, s"State save failure for: $metadata")
    case DeleteSnapshotsFailure(criteria, cause) =>
      log.warning(s"Delete snapshots with criteria failure: $criteria.\nError: ${printStackTrace(cause)}")
  }

  private def printStackTrace(cause: Throwable): String = {
    val stringWriter = new StringWriter()
    val printWriter = new PrintWriter(stringWriter)
    cause.printStackTrace(printWriter)
    stringWriter.toString
  }

  private def replyToListenerForSaveIfWaiting(metadata: SnapshotMetadata): Unit = {
    listenersForSnapshotSave.get(metadata.sequenceNr).foreach { listener =>
      listener.reply()
      listenersForSnapshotSave -= metadata.sequenceNr
    }
  }
}

class RecipientWithMsg(recipient: ActorRef, msg: Any) {
  def reply() = recipient ! msg
} 
Example 158
Source File: MessageDispatcherActor.scala    From reliable-http-client   with Apache License 2.0 5 votes vote down vote up
package rhttpc.client.subscription

import akka.actor.{Actor, ActorLogging, ActorRef, Status}
import rhttpc.client.protocol.{Correlated, Exchange}

import scala.util.{Failure, Success}

private[subscription] class MessageDispatcherActor extends Actor with ActorLogging {

  private var promisesOnPending: Map[SubscriptionOnResponse, Option[PendingMessage]] = Map.empty

  private var subscriptions: Map[SubscriptionOnResponse, ActorRef] = Map.empty

  override def receive: Actor.Receive = {
    case RegisterSubscriptionPromise(sub) =>
      log.debug(s"Registering subscription promise: $sub")
      promisesOnPending += sub -> None
    case ConfirmOrRegisterSubscription(sub, consumer) =>
      promisesOnPending.get(sub).foreach { pending =>
        if (pending.nonEmpty) {
          log.debug(s"Confirming subscription: $sub. Sending outstanding messages: ${pending.size}.")
          pending.foreach { pending =>
            consumer.tell(MessageFromSubscription(pending.msg, sub), pending.sender)
          }
        } else {
          log.debug(s"Confirming subscription: $sub")
        }
        promisesOnPending -= sub
      }
      subscriptions += sub -> consumer
    case AbortSubscription(sub) =>
      promisesOnPending.get(sub) match {
        case Some(pending) if pending.isEmpty =>
          log.debug(s"Aborted subscription: $sub.")
          promisesOnPending -= sub
        case Some(pending) =>
          log.error(s"Aborted subscription: $sub. There were pending messages: ${pending.size}.")
          promisesOnPending -= sub
        case None =>
          log.warning(s"Confirmed subscription promise: $sub was missing")
      }
    case Correlated(msg: Exchange[_, _], correlationId) =>
      val sub = SubscriptionOnResponse(correlationId)
      val underlyingOrFailure = msg.tryResponse match {
        case Success(underlying) => underlying
        case Failure(ex) => Status.Failure(ex)
      }
      (subscriptions.get(sub), promisesOnPending.get(sub)) match {
        case (Some(consumer), optionalPending) =>
          optionalPending.foreach { pending =>
            log.error(s"There were both registered subscription and subscription promise with pending messages: ${pending.size}.")
          }
          log.debug(s"Consuming message: $correlationId")
          subscriptions -= sub
          consumer forward MessageFromSubscription(underlyingOrFailure, sub) // consumer should ack
        case (None, Some(None)) =>
          log.debug(s"Adding pending message: $correlationId")
          promisesOnPending = promisesOnPending.updated(sub, Some(PendingMessage(underlyingOrFailure)))
        case (None, Some(Some(pending))) =>
          log.error(s"There already was pending message: $pending for subscription. Overriding it.")
          pending.ack()
          promisesOnPending = promisesOnPending.updated(sub, Some(PendingMessage(underlyingOrFailure)))
        case (None, None) =>
          log.error(s"No subscription (promise) registered for $correlationId. Will be skipped.")
          // TODO: DLQ
          sender() ! Unit //  ack
      }
  }

  class PendingMessage private (val msg: Any, val sender: ActorRef) {
    def ack() = sender ! Unit
  }

  object PendingMessage {
    def apply(msg: Any): PendingMessage = new PendingMessage(msg, sender())
  }
}

private[subscription] case class RegisterSubscriptionPromise(sub: SubscriptionOnResponse)

private[subscription] case class ConfirmOrRegisterSubscription(sub: SubscriptionOnResponse, consumer: ActorRef)

private[subscription] case class AbortSubscription(sub: SubscriptionOnResponse) 
Example 159
Source File: ModelTrainer.scala    From recommendersystem   with Apache License 2.0 5 votes vote down vote up
package com.infosupport.recommendedcontent.core

import akka.actor.{Props, ActorLogging, Actor}
import org.apache.spark.SparkContext
import org.apache.spark.mllib.recommendation.{Rating, ALS, MatrixFactorizationModel}

import com.datastax.spark.connector._


  private def trainModel() = {
    val table = context.system.settings.config.getString("cassandra.table")
    val keyspace = context.system.settings.config.getString("cassandra.keyspace")

    // Retrieve the ratings given by users from the database.
    // Map them to the rating structure needed by the Alternate Least Squares algorithm.
val ratings = sc.cassandraTable(keyspace, table).map(record => Rating(record.get[Int]("user_id"),
  record.get[Int]("item_id"), record.get[Double]("rating")))

// These settings control how well the predictions are going
// to fit the actual observations we loaded from Cassandra.
// Modify these to optimize the model!
val rank = 10
val iterations = 10
val lambda = 0.01

val model = ALS.train(ratings, rank, iterations, lambda)
    sender ! TrainingResult(model)

    context.stop(self)
  }
} 
Example 160
Source File: RecommenderSystem.scala    From recommendersystem   with Apache License 2.0 5 votes vote down vote up
package com.infosupport.recommendedcontent.core

import java.io.Serializable

import akka.actor.{Props, Actor, ActorLogging}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.spark.SparkContext
import org.apache.spark.mllib.recommendation.MatrixFactorizationModel


  private def generateRecommendations(userId: Int, count: Int) = {
    log.info(s"Generating ${count} recommendations for user with ID ${userId}")

    // Generate recommendations based on the machine learning model.
    // When there's no trained model return an empty list instead.
    val results = model match {
      case Some(m) => m.recommendProducts(userId,count)
        .map(rating => Recommendation(rating.product,rating.rating))
        .toList

      case None => Nil
    }

    sender ! Recommendations(results)
  }
} 
Example 161
Source File: WindTurbineSimulator.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package sample.stream_actor

import akka.actor.{Actor, ActorLogging, Props}
import akka.http.scaladsl.model.StatusCode
import sample.stream_actor.WindTurbineSimulator._

case class WindTurbineSimulatorException(id: String) extends RuntimeException


object WindTurbineSimulator {
  def props(id: String, endpoint: String) =
    Props(new WindTurbineSimulator(id, endpoint))

  final case object Upgraded
  final case object Connected
  final case object Terminated
  final case class ConnectionFailure(ex: Throwable)
  final case class FailedUpgrade(statusCode: StatusCode)
}

class WindTurbineSimulator(id: String, endpoint: String)
  extends Actor with ActorLogging {
  implicit private val system = context.system
  implicit private val executionContext = system.dispatcher

  val webSocketClient = WebSocketClient(id, endpoint, self)

  override def receive: Receive = startup //initial state

  private def startup:  Receive = {
    case Upgraded =>
      log.info(s"$id : WebSocket upgraded")
    case FailedUpgrade(statusCode) =>
      log.error(s"$id : Failed to upgrade WebSocket connection: $statusCode")
      throw WindTurbineSimulatorException(id)
    case ConnectionFailure(ex) =>
      log.error(s"$id : Failed to establish WebSocket connection: $ex")
      throw WindTurbineSimulatorException(id)
    case Connected =>
      log.info(s"$id : WebSocket connected")
      context.become(running)
  }

  private def running: Receive = {
    case Terminated =>
      log.error(s"$id : WebSocket connection terminated")
      throw WindTurbineSimulatorException(id)
    case ConnectionFailure(ex) =>
      log.error(s"$id : ConnectionFailure occurred: $ex")
      throw WindTurbineSimulatorException(id)
  }
} 
Example 162
Source File: WebsocketClientActor.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package alpakka.tcp_to_websockets.websockets

import akka.actor.{Actor, ActorLogging, ActorRef, Props}
import akka.http.scaladsl.model.StatusCode
import alpakka.tcp_to_websockets.websockets.WebsocketClientActor._
import org.apache.commons.lang3.exception.ExceptionUtils

import scala.concurrent.duration._


case class ConnectionException(cause: String) extends RuntimeException

object WebsocketClientActor {
  def props(id: String, endpoint: String, websocketConnectionStatusActor: ActorRef) =
    Props(new WebsocketClientActor(id, endpoint, websocketConnectionStatusActor))

  final case object Upgraded
  final case object Connected
  final case object Terminated
  final case class ConnectionFailure(ex: Throwable)
  final case class FailedUpgrade(statusCode: StatusCode)
  final case class SendMessage(msg: String)

}

class WebsocketClientActor(id: String, endpoint: String, websocketConnectionStatusActor: ActorRef)
  extends Actor with ActorLogging {
  implicit private val system = context.system
  implicit private val executionContext = system.dispatcher

  val webSocketClient = WebSocketClient(id, endpoint, self)

  override def receive: Receive = startup //initial state

  private def startup: Receive = {
    case Upgraded =>
      log.info(s"Client$id: WebSocket upgraded")
    case FailedUpgrade(statusCode) =>
      log.error(s"Client$id: failed to upgrade WebSocket connection: $statusCode")
      websocketConnectionStatusActor ! WebsocketConnectionStatusActor.Terminated
      throw ConnectionException(statusCode.toString())
    case ConnectionFailure(ex) =>
      log.error(s"Client $id: failed to establish WebSocket connection: $ex")
      websocketConnectionStatusActor ! WebsocketConnectionStatusActor.Terminated
      throw ConnectionException(ExceptionUtils.getRootCause(ex).getMessage)
    case Connected =>
      log.info(s"Client $id: WebSocket connected")
      websocketConnectionStatusActor ! WebsocketConnectionStatusActor.Connected
      context.become(running)
    case SendMessage(msg) =>
      log.warning(s"In state startup. Can not receive message: $msg. Resend after 2 seconds")
      system.scheduler.scheduleOnce(2.seconds, self, SendMessage(msg))
  }

  private def running: Receive = {
    case SendMessage(msg) =>
      log.info(s"About to send message to WebSocket: $msg")
      webSocketClient.sendToWebsocket(msg)
    case Terminated =>
      log.error(s"Client $id: WebSocket connection terminated")
      websocketConnectionStatusActor ! WebsocketConnectionStatusActor.Terminated
      throw ConnectionException(s"Client $id: WebSocket connection terminated")
    case ConnectionFailure(ex) =>
      log.error(s"Client $id: ConnectionFailure occurred: $ex")
      websocketConnectionStatusActor ! WebsocketConnectionStatusActor.Terminated
      throw ConnectionException(ExceptionUtils.getRootCause(ex).getMessage)
  }

  override def postStop(): Unit = {
    websocketConnectionStatusActor ! WebsocketConnectionStatusActor.Terminated
  }
} 
Example 163
Source File: WebsocketConnectionStatusActor.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package alpakka.tcp_to_websockets.websockets

import akka.actor.{Actor, ActorLogging, Props}
import alpakka.tcp_to_websockets.websockets.WebsocketConnectionStatusActor.{Connected, ConnectionStatus, Terminated}


object WebsocketConnectionStatusActor {
  def props(id: String, endpoint: String) =
    Props(new WebsocketConnectionStatusActor(id, endpoint))

  final case object Connected
  final case object Terminated
  final case object ConnectionStatus

}

class WebsocketConnectionStatusActor(id: String, endpoint: String)
  extends Actor with ActorLogging {
  implicit private val system = context.system
  implicit private val executionContext = system.dispatcher

  var isConnected = false

  override def receive: Receive = {
    case Connected =>
      isConnected = true
      log.info(s"Client $id: connected to: $endpoint")

    case Terminated =>
      isConnected = false
      log.info(s"Client $id: terminated from: $endpoint")

    case ConnectionStatus =>
      sender() ! isConnected
  }
} 
Example 164
Source File: DemoApp.scala    From sbt-reactive-app   with Apache License 2.0 5 votes vote down vote up
package foo

import akka.actor.{ Actor, ActorLogging, ActorSystem, Props }
import akka.cluster.ClusterEvent.ClusterDomainEvent
import akka.cluster.{ Cluster, ClusterEvent }
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Directives._
import akka.management.AkkaManagement
import akka.management.cluster.bootstrap.ClusterBootstrap
import akka.stream.ActorMaterializer

object DemoApp extends App {

  implicit val system = ActorSystem("Appka")

  import system.log
  implicit val mat = ActorMaterializer()
  val cluster = Cluster(system)

  log.info(s"Started [$system], cluster.selfAddress = ${cluster.selfAddress}")
  log.info("something2")
  //#start-akka-management
  AkkaManagement(system).start()
  //#start-akka-management
  ClusterBootstrap(system).start()

  cluster.subscribe(
    system.actorOf(Props[ClusterWatcher]),
    ClusterEvent.InitialStateAsEvents,
    classOf[ClusterDomainEvent])

  // add real app routes here
  val routes =
    path("hello") {
      get {
        complete(
          HttpEntity(
            ContentTypes.`text/html(UTF-8)`,
            "<h1>Hello</h1>"))
      }
    }
  Http().bindAndHandle(routes, "0.0.0.0", 8080)

  Cluster(system).registerOnMemberUp({
    log.info("Cluster member is up!")
  })

}

class ClusterWatcher extends Actor with ActorLogging {
  val cluster = Cluster(context.system)

  override def receive = {
    case msg ⇒ log.info(s"Cluster ${cluster.selfAddress} >>> " + msg)
  }
} 
Example 165
Source File: LocalApplicationMaster.scala    From DataXServer   with Apache License 2.0 5 votes vote down vote up
package org.tianlangstudio.data.hamal.yarn.local

import java.util.UUID

import akka.actor.{Actor, ActorLogging, Props}
import org.slf4j.LoggerFactory
import org.tianlangstudio.data.hamal.core.{Constants, HamalConf}
import org.tianlangstudio.data.hamal.server.thrift.ThriftServerApp
import org.tianlangstudio.data.hamal.yarn.{ApplyExecutor, TaskScheduler}
import org.tianlangstudio.data.hamal.yarn.thrift.AkkaThriftTaskHandler
import org.tianlangstudio.data.hamal.yarn.util.AkkaUtils
import org.tianlangstudio.data.hamal.core.HamalConf

/**
 * Created by zhuhq on 2016/5/3.
  * 在本机申请运行资源,多进程方式批量运行任务
 */
object LocalApplicationMaster extends App{
  val logging = org.slf4j.LoggerFactory.getLogger(classOf[LocalApplicationMaster])
  val dataxConf = new HamalConf()
  logging.info("create master actor system begin");
  val schedulerHost = dataxConf.getString(Constants.DATAX_MASTER_HOST,"127.0.0.1")
  val (schedulerSystem,schedulerPort) = AkkaUtils.createActorSystem(Constants.AKKA_JOB_SCHEDULER_SYSTEM,schedulerHost,0,dataxConf)
  logging.info(s"create master actor system end on port $schedulerPort");
  val amActor = schedulerSystem.actorOf(Props(classOf[LocalApplicationMaster],dataxConf),Constants.AKKA_AM_ACTOR)
  val taskSchedulerActor = schedulerSystem.actorOf(Props(classOf[TaskScheduler],dataxConf,amActor),Constants.AKKA_JOB_SCHEDULER_ACTOR)
  taskSchedulerActor ! "start taskSchedulerActor"
  logging.info(s"start thrift server begin")
  val thriftPort = dataxConf.getInt(Constants.THRIFT_SERVER_PORT,9777)
  val thriftHost = dataxConf.getString(Constants.THRIFT_SERVER_HOST,"127.0.0.1")
  val thriftConcurrence = dataxConf.getInt(Constants.THRIFT_SERVER_CONCURRENCE,8)
  val thriftServerHandler = new AkkaThriftTaskHandler(taskSchedulerActor)

  logging.info(s"start thrift server on  $thriftHost:$thriftPort")
  ThriftServerApp.start(thriftHost,thriftPort,thriftServerHandler)

}
class LocalApplicationMaster(dataxConf: HamalConf) extends Actor with ActorLogging{
  private val logger = LoggerFactory.getLogger(getClass)
  val runEnv = dataxConf.getString(Constants.RUN_ENV, Constants.RUN_ENV_PRODUCTION).toLowerCase()
  logger.info("run env:{}", runEnv)
  val containerCmd = if(Constants.RUN_ENV_DEVELOPMENT.equals(runEnv)) {
    s"""
       |java ${System.getProperty("java.class.path")}
       | -Ddatax.home=${dataxConf.getString(Constants.DATAX_HOME)} -Xms512M -Xmx1024M
       |  -XX:PermSize=128M -XX:MaxPermSize=512M com.tianlangstudio.data.datax.Executor
     """.stripMargin
  }else {
    dataxConf.getString(Constants.DATAX_EXECUTOR_CMD, "./startLocalExecutor.sh")
  }


  override def receive: Receive = {
    case msg:String =>
      log.info(s"${self.path} receive msg: $msg")
    case ApplyExecutor(num) =>
      applyExecutor(num)
  }
  private def applyExecutor(num:Int): Unit = {

    log.info(s"apply executor num $num");
    for(i <- 0 until num) {

      sys.process.stringToProcess(
          containerCmd + " " +
          LocalApplicationMaster.schedulerHost + ":" + LocalApplicationMaster.schedulerPort + " " +
          UUID.randomUUID().toString).run()
      log.info(s"apply executor ${i+1}/$num")
    }

  }
} 
Example 166
Source File: CouchbaseStatements.scala    From akka-persistence-couchbase   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.couchbase.journal

import java.util.concurrent.TimeUnit

import akka.actor.{Actor, ActorLogging}
import akka.persistence.couchbase.CouchbaseJournalConfig
import com.couchbase.client.java.Bucket
import com.couchbase.client.java.document.JsonDocument
import com.couchbase.client.java.document.json.JsonArray
import com.couchbase.client.java.view._
import rx.Observable
import rx.functions.Func1

import scala.collection.immutable.Seq
import scala.concurrent.ExecutionContext
import scala.util.{Failure, Try}

trait CouchbaseStatements extends Actor with ActorLogging {

  def config: CouchbaseJournalConfig

  def bucket: Bucket

  implicit def executionContext: ExecutionContext

  def bySequenceNr(persistenceId: String, from: Long, to: Long): ViewQuery = {
    ViewQuery
      .from("journal", "by_sequenceNr")
      .stale(config.stale)
      .startKey(JsonArray.from(persistenceId, from.asInstanceOf[AnyRef]))
      .endKey(JsonArray.from(persistenceId, to.asInstanceOf[AnyRef]))
  }

  
  def nextKey(name: String): Try[String] = {
    Try {
      val counterKey = s"counter::$name"
      val counter = bucket.counter(counterKey, 1L, 0L).content()
      s"$name-$counter"
    }
  }
} 
Example 167
Source File: CouchbaseSnapshotStore.scala    From akka-persistence-couchbase   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.couchbase.snapshot

import java.util.concurrent.TimeUnit

import akka.actor.ActorLogging
import akka.persistence.couchbase.{CouchbaseExtension, Message}
import akka.persistence.serialization.Snapshot
import akka.persistence.snapshot.SnapshotStore
import akka.persistence.{SelectedSnapshot, SnapshotMetadata, SnapshotSelectionCriteria}
import akka.serialization.SerializationExtension
import com.couchbase.client.java.view.ViewRow

import scala.collection.JavaConverters._
import scala.concurrent.Future
import scala.util.Try

class CouchbaseSnapshotStore extends SnapshotStore with CouchbaseStatements with ActorLogging {

  implicit val executionContext = context.dispatcher

  val couchbase = CouchbaseExtension(context.system)
  val serialization = SerializationExtension(context.system)

  def config = couchbase.snapshotStoreConfig

  def bucket = couchbase.snapshotStoreBucket

  
  override def saveAsync(metadata: SnapshotMetadata, data: Any): Future[Unit] = {
    Future.fromTry[Unit](
      Try {
        val snapshot = Snapshot(data)
        val message = Message(serialization.findSerializerFor(snapshot).toBinary(snapshot))
        SnapshotMessage.create(metadata, message)
      } flatMap executeSave
    )
  }

  override def deleteAsync(metadata: SnapshotMetadata): Future[Unit] = {
    Future.fromTry[Unit](
      Try {
        bucket.remove(SnapshotMessageKey.fromMetadata(metadata).value)
      }
    )
  }

  override def deleteAsync(persistenceId: String, criteria: SnapshotSelectionCriteria): Future[Unit] = {
    Future.fromTry[Unit](
      Try {
        query(persistenceId, criteria, Integer.MAX_VALUE).foreach { snapshotMessage =>
          bucket.remove(SnapshotMessageKey.fromMetadata(snapshotMessage.metadata).value)
        }
      }
    )
  }
} 
Example 168
Source File: CouchbaseStatements.scala    From akka-persistence-couchbase   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.couchbase.snapshot

import java.util.concurrent.TimeUnit

import akka.actor.{Actor, ActorLogging}
import akka.persistence.couchbase.CouchbaseSnapshotStoreConfig
import com.couchbase.client.java.Bucket
import com.couchbase.client.java.document.JsonDocument
import com.couchbase.client.java.document.json.JsonArray
import com.couchbase.client.java.view.ViewQuery

import scala.concurrent.ExecutionContext
import scala.util.{Failure, Try}

trait CouchbaseStatements extends Actor with ActorLogging {

  def config: CouchbaseSnapshotStoreConfig

  def bucket: Bucket

  implicit def executionContext: ExecutionContext

  def bySequenceNr(persistenceId: String, maxSequenceNr: Long): ViewQuery = {
    ViewQuery
      .from("snapshots", "by_sequenceNr")
      .stale(config.stale)
      .descending(true)
      .startKey(JsonArray.from(persistenceId, maxSequenceNr.asInstanceOf[AnyRef]))
      .endKey(JsonArray.from(persistenceId, Long.MinValue.asInstanceOf[AnyRef]))
  }

  def byTimestamp(persistenceId: String, maxTimestamp: Long): ViewQuery = {
    ViewQuery
      .from("snapshots", "by_timestamp")
      .stale(config.stale)
      .descending(true)
      .startKey(JsonArray.from(persistenceId, maxTimestamp.asInstanceOf[AnyRef]))
      .endKey(JsonArray.from(persistenceId, Long.MinValue.asInstanceOf[AnyRef]))
  }

  def all(persistenceId: String): ViewQuery = {
    ViewQuery
      .from("snapshots", "all")
      .stale(config.stale)
      .descending(true)
      .key(persistenceId)
  }

  
  def executeSave(snapshotMessage: SnapshotMessage): Try[Unit] = {
    Try(SnapshotMessageKey.fromMetadata(snapshotMessage.metadata).value).flatMap { key =>
      Try {
        val jsonObject = SnapshotMessage.serialize(snapshotMessage)
        val jsonDocument = JsonDocument.create(key, jsonObject)
        bucket.upsert(
          jsonDocument,
          config.persistTo,
          config.replicateTo,
          config.timeout.toSeconds,
          TimeUnit.SECONDS
        )
        log.debug("Wrote snapshot: {}", key)
      } recoverWith {
        case e =>
          log.error(e, "Writing snapshot: {}", key)
          Failure(e)
      }
    }
  }
} 
Example 169
Source File: GlobalWatchService.scala    From incubator-retired-iota   with Apache License 2.0 5 votes vote down vote up
package org.apache.iota.fey

import java.nio.file.{Files, Path, Paths, WatchEvent}

import akka.actor.{Actor, ActorLogging, ActorRef}
import org.apache.iota.fey.GlobalWatchService.REGISTER_WATCHER_PERFORMER
import org.apache.iota.fey.WatchingDirectories.STOPPED

class GlobalWatchService extends Actor with ActorLogging{

  //WatchService
  var watchThread:Thread = null
  val watchFileTask:GlobalWatchServiceTask = new GlobalWatchServiceTask(self)

  override def preStart(): Unit = {
    startWatcher("PRE-START")
  }

  override def postStop(): Unit = {
    stopWatcher("POST-STOP")
  }

  private def startWatcher(from: String) = {
    log.info(s"Starting Global Watcher from $from")
    watchThread = new Thread(watchFileTask, "FEY_GLOBAL_WATCH_SERVICE_PERFORMERS")
    watchThread.setDaemon(true)
    watchThread.start()
  }

  private def stopWatcher(from: String) = {
    log.info(s"Stopping Global Watcher from $from")
    if(watchThread != null && watchThread.isAlive){
      watchThread.interrupt()
      watchThread = null
    }
  }

  override def receive: Receive = {
    case REGISTER_WATCHER_PERFORMER(path, file_name, actor, events, loadExists) =>
      registerPath(path,file_name,actor,events,loadExists)
    case STOPPED =>
      stopWatcher("STOPPED-THREAD")
      startWatcher("STOPPED-THREAD")
    case x => log.error(s"Unknown message $x")
  }

  private def broadcastMessageIfFileExists(actor: ActorRef, pathWithFile: String) = {
    val filePath = Paths.get(pathWithFile)
    if(Files.exists(filePath)){
      log.info(s"File $pathWithFile exists. Broadcasting message to actor ${actor.path.toString}")
      actor ! GlobalWatchService.ENTRY_CREATED(filePath)
    }
  }

  private def registerPath(dir_path: String, file_name:Option[String], actor: ActorRef, events: Array[WatchEvent.Kind[_]], loadExists: Boolean) = {
    WatchingDirectories.actorsInfo.get((dir_path,file_name)) match {
      case Some(info) =>
        val newInfo:Map[WatchEvent.Kind[_], Array[ActorRef]] = events.map(event => {
          info.get(event) match {
            case Some(actors) => (event, (Array(actor) ++ actors))
            case None => (event, Array(actor))
          }
        }).toMap
        WatchingDirectories.actorsInfo.put((dir_path,file_name), info ++ newInfo)
        watchFileTask.watch(Paths.get(dir_path),actor.path.toString,events)
      case None =>
        val tmpEvents:Map[WatchEvent.Kind[_], Array[ActorRef]] = events.map(event => {(event, Array(actor))}).toMap
        WatchingDirectories.actorsInfo.put((dir_path,file_name), tmpEvents)
        watchFileTask.watch(Paths.get(dir_path),actor.path.toString,events)
    }

    if(file_name.isDefined && loadExists){
      log.info(s"Checking if file $dir_path/${file_name.get} already exist")
      broadcastMessageIfFileExists(actor, s"$dir_path/${file_name.get}")
    }

  }

}

object GlobalWatchService{
  sealed case class ENTRY_CREATED(path:Path)
  sealed case class ENTRY_MODIFIED(path:Path)
  sealed case class ENTRY_DELETED(path:Path)
  sealed case class REGISTER_WATCHER_PERFORMER(dir_path: String, file_name:Option[String],
                                               actor: ActorRef, events: Array[WatchEvent.Kind[_]],
                                               loadIfExists: Boolean)
} 
Example 170
Source File: JsonReceiverActor.scala    From incubator-retired-iota   with Apache License 2.0 5 votes vote down vote up
package org.apache.iota.fey

import java.nio.file.Paths
import java.io.File

import akka.actor.{Actor, ActorLogging, ActorRef, Props}
import play.api.libs.json.{JsValue, Json}

class JsonReceiverActor extends Actor with ActorLogging {

  import JsonReceiverActor._

  val monitoring_actor = FEY_MONITOR.actorRef
  var watchFileTask: WatchServiceReceiver = _
  var watchThread: Thread = _

  override def preStart() {
    prepareDynamicJarRepo()
    processCheckpointFiles()

    watchFileTask = new WatchServiceReceiver(self)
    watchThread = new Thread(watchFileTask, GLOBAL_DEFINITIONS.WATCH_SERVICE_THREAD)

    monitoring_actor  ! Monitor.START(Utils.getTimestamp)
    watchThread.setDaemon(true)
    watchThread.start()

    watchFileTask.watch(Paths.get(CONFIG.JSON_REPOSITORY))
  }

  private def prepareDynamicJarRepo() = {
    val jarDir = new File(CONFIG.DYNAMIC_JAR_REPO)
    if (!jarDir.exists()){
      jarDir.mkdir()
    }else if(CONFIG.DYNAMIC_JAR_FORCE_PULL){
      jarDir.listFiles().foreach(_.delete())
    }
  }


  private def processCheckpointFiles() = {
    if (CONFIG.CHEKPOINT_ENABLED) {
      val checkpoint = new CheckpointProcessor(self)
      checkpoint.run()
    }
  }

  override def postStop() {
    monitoring_actor  ! Monitor.STOP(Utils.getTimestamp)
    watchThread.interrupt()
    watchThread.join()
  }

  override def postRestart(reason: Throwable): Unit = {
    monitoring_actor  ! Monitor.RESTART(reason, Utils.getTimestamp)
    preStart()
  }

  override def receive: Receive = {
    case JSON_RECEIVED(json, file) =>
      log.info(s"JSON RECEIVED => ${Json.stringify(json)}")
      context.parent ! FeyCore.ORCHESTRATION_RECEIVED(json, Some(file))

    case _ =>
  }

}

object JsonReceiverActor {

  case class JSON_RECEIVED(json: JsValue, file: File)

} 
Example 171
Source File: GlobalPerformer.scala    From incubator-retired-iota   with Apache License 2.0 5 votes vote down vote up
package org.apache.iota.fey

import akka.actor.SupervisorStrategy.Restart
import akka.actor.{Actor, ActorLogging, ActorRef, OneForOneStrategy, Props, Terminated}
import akka.routing._
import play.api.libs.json.JsObject

import scala.collection.mutable.HashMap
import scala.concurrent.duration._

protected class GlobalPerformer(val orchestrationID: String,
                                val orchestrationName: String,
                                val globalPerformers: List[JsObject],
                                val ensemblesSpec :  List[JsObject]) extends Actor with ActorLogging{

  val monitoring_actor = FEY_MONITOR.actorRef
  var global_metadata: Map[String, Performer] = Map.empty[String, Performer]

  override def receive: Receive = {

    case GlobalPerformer.PRINT_GLOBAL =>
      context.actorSelection(s"*") ! FeyGenericActor.PRINT_PATH

    case Terminated(actor) =>
      monitoring_actor  ! Monitor.TERMINATE(actor.path.toString, Utils.getTimestamp)
      log.error(s"DEAD Global Performers ${actor.path.name}")
      context.children.foreach{ child =>
        context.unwatch(child)
        context.stop(child)
      }
      throw new RestartGlobalPerformers(s"DEAD Global Performer ${actor.path.name}")

    case GetRoutees => //Discard

    case x => log.warning(s"Message $x not treated by Global Performers")
  }

  
  private def loadClazzFromJar(classPath: String, jarLocation: String, jarName: String):Class[FeyGenericActor] = {
    try {
      Utils.loadActorClassFromJar(jarLocation,classPath,jarName)
    }catch {
      case e: Exception =>
        log.error(e,s"Could not load class $classPath from jar $jarLocation. Please, check the Jar repository path as well the jar name")
        throw e
    }
  }

}

object GlobalPerformer{

  val activeGlobalPerformers:HashMap[String, Map[String, ActorRef]] = HashMap.empty[String, Map[String, ActorRef]]

  case object PRINT_GLOBAL
} 
Example 172
Source File: IdentifyFeyActors.scala    From incubator-retired-iota   with Apache License 2.0 5 votes vote down vote up
package org.apache.iota.fey

import akka.actor.{Actor, ActorIdentity, ActorLogging, ActorPath, Identify}
import akka.routing.{ActorRefRoutee, GetRoutees, Routees}
import play.api.libs.json._

import scala.collection.mutable.HashSet

protected class IdentifyFeyActors extends Actor with ActorLogging {

  import IdentifyFeyActors._

  override def receive: Receive = {
    case IDENTIFY_TREE(startPath) =>
      log.info("Current Actors in system:")
      actorsPath = HashSet.empty
      rootPath = startPath
      log.info(startPath)
      self ! ActorPath.fromString(startPath)

    case path: ActorPath =>
      context.actorSelection(path / "*") ! Identify(())
      context.actorSelection(path / "*") ! GetRoutees

    case ActorIdentity(_, Some(ref)) =>
      actorsPath.add(ref.path.toString)
      log.info(ref.path.toString)
      self ! ref.path

    case routees:Routees =>
      routees.routees
        .map(_.asInstanceOf[ActorRefRoutee])
        .foreach(routee => {
          log.info(routee.ref.path.toString)
          actorsPath.add(routee.ref.path.toString)
        })

    case _ =>
  }
}

protected object IdentifyFeyActors{

  
  def generateTreeJson(): String = {
    val trie = new Trie("FEY-MANAGEMENT-SYSTEM")
    actorsPath.map(_.replace("user/","")).foreach(trie.append(_))

    Json.stringify(trie.print)
  }

  //Static HTML content from d3
  val html = scala.io.Source.fromInputStream(getClass.getResourceAsStream("/d3Tree.html"), "UTF-8")
    .getLines()
    .mkString("\n")

  def getHTMLTree(json: String): String = {
   html.replace("$MYJSONHIERARCHY", json)
  }

} 
Example 173
Source File: VertxSingleConfirmationSender.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate.adapter.vertx

import akka.actor.{ ActorLogging, ActorRef, Props }
import akka.pattern.pipe
import com.rbmhtechnology.eventuate.adapter.vertx.api.EndpointRouter
import com.rbmhtechnology.eventuate.{ ConfirmedDelivery, EventsourcedActor }
import io.vertx.core.Vertx

import scala.concurrent.duration.FiniteDuration
import scala.util.{ Failure, Success }

private[vertx] object VertxSingleConfirmationSender {

  case class DeliverEvent(evt: EventEnvelope, deliveryId: String)
  case class Confirm(deliveryId: String)
  case class DeliverFailed(evt: EventEnvelope, deliveryId: String, err: Throwable)
  case object Redeliver

  case class DeliveryConfirmed()

  def props(id: String, eventLog: ActorRef, endpointRouter: EndpointRouter, vertx: Vertx, confirmationTimeout: FiniteDuration): Props =
    Props(new VertxSingleConfirmationSender(id, eventLog, endpointRouter, vertx, confirmationTimeout))
}

private[vertx] class VertxSingleConfirmationSender(val id: String, val eventLog: ActorRef, val endpointRouter: EndpointRouter, val vertx: Vertx, confirmationTimeout: FiniteDuration)
  extends EventsourcedActor with ConfirmedDelivery with VertxSender with ActorLogging {

  import VertxSingleConfirmationSender._
  import context.dispatcher

  context.system.scheduler.schedule(confirmationTimeout, confirmationTimeout, self, Redeliver)

  override def onCommand: Receive = {
    case DeliverEvent(envelope, deliveryId) =>
      send[Any](envelope.address, envelope.evt, confirmationTimeout)
        .map(_ => Confirm(deliveryId))
        .recover {
          case err => DeliverFailed(envelope, deliveryId, err)
        }
        .pipeTo(self)

    case Confirm(deliveryId) if unconfirmed.contains(deliveryId) =>
      persistConfirmation(DeliveryConfirmed(), deliveryId) {
        case Success(evt) =>
        case Failure(err) => log.error(s"Confirmation for delivery with id '$deliveryId' could not be persisted.", err)
      }

    case Redeliver =>
      redeliverUnconfirmed()

    case DeliverFailed(evt, deliveryId, err) =>
      log.warning(s"Delivery with id '$deliveryId' for event [$evt] failed with $err. The delivery will be retried.")
  }

  override def onEvent: Receive = {
    case DeliveryConfirmed() =>
    // confirmations should not be published
    case ev =>
      endpointRouter.endpoint(ev) match {
        case Some(endpoint) =>
          val deliveryId = lastSequenceNr.toString
          deliver(deliveryId, DeliverEvent(EventEnvelope(endpoint, lastHandledEvent), deliveryId), self.path)
        case None =>
      }
  }
} 
Example 174
Source File: ClusterCustomDowning.scala    From akka-cluster-custom-downing   with Apache License 2.0 5 votes vote down vote up
package tanukki.akka.cluster.autodown

import akka.actor.{Address, ActorLogging, Scheduler}
import akka.cluster.Cluster
import akka.cluster.ClusterEvent.ClusterDomainEvent
import scala.concurrent.duration._

trait ClusterCustomDowning extends ActorLogging { base: CustomAutoDownBase =>

  val cluster = Cluster(context.system)

  override def selfAddress: Address = cluster.selfAddress

  override def scheduler: Scheduler = {
    if (context.system.scheduler.maxFrequency < 1.second / cluster.settings.SchedulerTickDuration) {
      log.warning("CustomDowning does not use a cluster dedicated scheduler. Cluster will use a dedicated scheduler if configured " +
        "with 'akka.scheduler.tick-duration' [{} ms] >  'akka.cluster.scheduler.tick-duration' [{} ms].",
        (1000 / context.system.scheduler.maxFrequency).toInt, cluster.settings.SchedulerTickDuration.toMillis)
    }
    context.system.scheduler
  }

  override def preStart(): Unit = {
    cluster.subscribe(self, classOf[ClusterDomainEvent])
  }
  override def postStop(): Unit = {
    cluster.unsubscribe(self)
  }
} 
Example 175
Source File: SimpleClusterListener.scala    From constructr-consul   with Apache License 2.0 5 votes vote down vote up
package com.tecsisa.constructr.coordination
package demo

import akka.actor.{ Actor, ActorLogging, Address, Props }
import akka.cluster.ClusterEvent.{ MemberEvent, MemberJoined, MemberRemoved, MemberUp, UnreachableMember }
import akka.cluster.Cluster

object SimpleClusterListener {

  case object GetMemberNodes

  final val Name = "clusterListener"

  def props: Props = Props(new SimpleClusterListener)
}

class SimpleClusterListener extends Actor with ActorLogging {
  import SimpleClusterListener._

  val cluster = Cluster(context.system)

  private var members = Set.empty[Address]

  override def preStart(): Unit =
    cluster.subscribe(self, classOf[MemberEvent], classOf[UnreachableMember])

  override def postStop(): Unit = cluster.unsubscribe(self)

  override def receive = {
    case GetMemberNodes =>
      sender() ! members
    case MemberJoined(member) =>
      log.info("Member joined: {}", member.address)
      members += member.address
    case MemberUp(member) =>
      log.info("Member up: {}", member.address)
      members += member.address
    case MemberRemoved(member, _) =>
      log.info("Member removed: {}", member.address)
      members -= member.address
  }
} 
Example 176
Source File: S3SnapshotStore.scala    From akka-persistence-s3   with MIT License 5 votes vote down vote up
package akka.persistence.s3
package snapshot

import java.io.ByteArrayInputStream
import akka.actor.ActorLogging
import akka.persistence.serialization.Snapshot
import akka.persistence.{ SelectedSnapshot, SnapshotMetadata, SnapshotSelectionCriteria }
import akka.persistence.snapshot.SnapshotStore
import akka.serialization.SerializationExtension
import com.amazonaws.services.s3.model.{ ObjectMetadata, S3ObjectInputStream, ListObjectsRequest }
import com.typesafe.config.Config
import scala.collection.JavaConversions._
import scala.collection.immutable
import scala.concurrent.Future
import scala.util.control.NonFatal

case class SerializationResult(stream: ByteArrayInputStream, size: Int)

class S3SnapshotStore(config: Config) extends SnapshotStore with ActorLogging with SnapshotKeySupport {
  import context.dispatcher

  val settings = new S3SnapshotConfig(config)

  val s3Client: S3Client = new S3Client {
    val s3ClientConfig = new S3ClientConfig(context.system.settings.config.getConfig("s3-client"))
  }

  private val serializationExtension = SerializationExtension(context.system)

  private val s3Dispatcher = context.system.dispatchers.lookup("s3-snapshot-store.s3-client-dispatcher")

  val extensionName = settings.extension

  override def loadAsync(persistenceId: String, criteria: SnapshotSelectionCriteria): Future[Option[SelectedSnapshot]] = {
    snapshotMetadatas(persistenceId, criteria)
      .map(_.sorted.takeRight(settings.maxLoadAttempts))
      .flatMap(load)
  }

  private def load(metadata: immutable.Seq[SnapshotMetadata]): Future[Option[SelectedSnapshot]] = metadata.lastOption match {
    case None => Future.successful(None)
    case Some(md) =>
      s3Client.getObject(settings.bucketName, snapshotKey(md))(s3Dispatcher)
        .map { obj =>
          val snapshot = deserialize(obj.getObjectContent)
          Some(SelectedSnapshot(md, snapshot.data))
        } recoverWith {
          case NonFatal(e) =>
            log.error(e, s"Error loading snapshot [${md}]")
            load(metadata.init) // try older snapshot
        }
  }

  override def saveAsync(metadata: SnapshotMetadata, snapshot: Any): Future[Unit] = {
    val serialized = serialize(Snapshot(snapshot))
    val objectMetadata = new ObjectMetadata()
    objectMetadata.setContentLength(serialized.size)
    s3Client.putObject(
      settings.bucketName,
      snapshotKey(metadata),
      serialized.stream,
      objectMetadata
    )(s3Dispatcher).map(_ => ())
  }

  override def deleteAsync(metadata: SnapshotMetadata): Future[Unit] = {
    if (metadata.timestamp == 0L)
      deleteAsync(metadata.persistenceId, SnapshotSelectionCriteria(metadata.sequenceNr, Long.MaxValue, metadata.sequenceNr, Long.MinValue))
    else
      s3Client.deleteObject(settings.bucketName, snapshotKey(metadata))(s3Dispatcher)
  }

  override def deleteAsync(persistenceId: String, criteria: SnapshotSelectionCriteria): Future[Unit] = {
    val metadatas = snapshotMetadatas(persistenceId, criteria)
    metadatas.map(list => Future.sequence(list.map(deleteAsync)))
  }

  private def snapshotMetadatas(persistenceId: String, criteria: SnapshotSelectionCriteria): Future[List[SnapshotMetadata]] = {
    s3Client.listObjects(
      new ListObjectsRequest()
        .withBucketName(settings.bucketName)
        .withPrefix(prefixFromPersistenceId(persistenceId))
        .withDelimiter("/")
    )(s3Dispatcher)
      .map(_.getObjectSummaries.toList.map(s => parseKeyToMetadata(s.getKey))
        .filter(m => m.sequenceNr >= criteria.minSequenceNr && m.sequenceNr <= criteria.maxSequenceNr && m.timestamp >= criteria.minTimestamp && m.timestamp <= criteria.maxTimestamp))

  }

  protected def deserialize(inputStream: S3ObjectInputStream): Snapshot =
    serializationExtension.deserialize(akka.persistence.serialization.streamToBytes(inputStream), classOf[Snapshot]).get

  protected def serialize(snapshot: Snapshot): SerializationResult = {
    val serialized = serializationExtension.findSerializerFor(snapshot).toBinary(snapshot)
    SerializationResult(new ByteArrayInputStream(serializationExtension.findSerializerFor(snapshot).toBinary(snapshot)), serialized.size)
  }
} 
Example 177
Source File: NoSharingDepot.scala    From trucking-iot   with Apache License 2.0 5 votes vote down vote up
package com.orendainx.trucking.simulator.depots

import akka.actor.{ActorLogging, Props, Stash}
import com.orendainx.trucking.simulator.depots.ResourceDepot.{RequestRoute, RequestTruck, ReturnRoute, ReturnTruck}
import com.orendainx.trucking.simulator.generators.DataGenerator.NewResource
import com.orendainx.trucking.simulator.models._
import com.orendainx.trucking.simulator.services.RouteParser
import com.orendainx.trucking.simulator.models.EmptyRoute
import com.typesafe.config.Config

import scala.util.Random


object NoSharingDepot {

  def props()(implicit config: Config) =
    Props(new NoSharingDepot())
}

class NoSharingDepot(implicit config: Config) extends ResourceDepot with Stash with ActorLogging {

  private val trucksAvailable = Random.shuffle(1 to config.getInt("resource-depot.trucks-available")).toList.map(Truck).toBuffer
  private val routesAvailable = RouteParser(config.getString("resource-depot.route-directory")).routes.toBuffer

  log.info("Trucks and routes initialized and ready for deployment")
  log.info(s"${trucksAvailable.length} trucks available.")
  log.info(s"${routesAvailable.length} routes available.")

  def receive = {
    case RequestTruck(previous) if previous != EmptyTruck =>
      val ind = trucksAvailable.indexWhere(_ != previous)
      if (ind >= 0) sender() ! NewResource(trucksAvailable.remove(ind))
      else stash() // None available, stash request for later

    case RequestTruck(_) =>
      if (trucksAvailable.nonEmpty) sender() ! NewResource(trucksAvailable.remove(0))
      else stash()

    case RequestRoute(previous) if previous != EmptyRoute =>
      val ind = routesAvailable.indexWhere(_ != previous)
      if (ind >= 0) sender() ! NewResource(routesAvailable.remove(ind))
      else stash()

    case RequestRoute(_) =>
      if (routesAvailable.nonEmpty) sender() ! NewResource(routesAvailable.remove(0))
      else stash()

    case ReturnTruck(truck) =>
      trucksAvailable.append(truck)
      unstashAll()

    case ReturnRoute(route) =>
      routesAvailable.append(route)
      unstashAll()
  }
} 
Example 178
Source File: KafkaTransmitter.scala    From trucking-iot   with Apache License 2.0 5 votes vote down vote up
package com.orendainx.trucking.simulator.transmitters

import java.util.Properties

import akka.actor.{ActorLogging, Props}
import com.orendainx.trucking.simulator.transmitters.DataTransmitter.Transmit
import org.apache.kafka.clients.CommonClientConfigs
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord}

import scala.sys.SystemProperties
import com.typesafe.config.Config


object KafkaTransmitter {
  def props(topic: String)(implicit config: Config) = Props(new KafkaTransmitter(topic))
}

class KafkaTransmitter(topic: String)(implicit config: Config) extends DataTransmitter with ActorLogging {

  private val props = new Properties()
  props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, config.getString("transmitter.kafka.bootstrap-servers"))
  props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, config.getString("transmitter.kafka.key-serializer"))
  props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, config.getString("transmitter.kafka.value-serializer"))

  // Enable settings for a secure environment, if necessary.
  // See: http://docs.hortonworks.com/HDPDocuments/HDP2/HDP-2.3.4/bk_secure-kafka-ambari/content/ch_secure-kafka-produce-events.html
  val systemProperties = new SystemProperties
  if (config.getBoolean("transmitter.kafka.security-enabled")) {
    props.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, config.getString("transmitter.kafka.security-protocol"))
    systemProperties.put("java.security.auth.login.config", config.getString("transmitter.kafka.jaas-file"))
  }

  private val producer = new KafkaProducer[String, String](props)

  def receive = {
    case Transmit(data) => producer.send(new ProducerRecord(topic, data.toCSV))
  }

  override def postStop(): Unit = {
    producer.close()
    log.info("KafkaTransmitter closed its producer.")
  }
} 
Example 179
Source File: BufferTransmitter.scala    From trucking-iot   with Apache License 2.0 5 votes vote down vote up
package com.orendainx.trucking.simulator.transmitters

import akka.actor.{ActorLogging, Props}
import com.orendainx.trucking.commons.models.TruckingData
import com.orendainx.trucking.simulator.transmitters.BufferTransmitter.Fetch
import com.orendainx.trucking.simulator.transmitters.DataTransmitter.Transmit

import scala.collection.mutable


object BufferTransmitter {
  case object Fetch

  def props() = Props(new BufferTransmitter)
}

class BufferTransmitter extends DataTransmitter with ActorLogging {

  val buffer = mutable.ListBuffer.empty[TruckingData]

  def receive = {
    case Transmit(data) =>
      buffer += data
      log.debug(s"Data received: buffered ${buffer.size}")

    case Fetch =>
      sender() ! buffer.toList
      log.debug(s"Sent ${buffer.size} data. ${buffer.toString()}")
      buffer.clear()
  }

  override def postStop(): Unit = {
    log.info(s"BufferTransmitter stopped with ${buffer.length} items unfetched.")
  }
} 
Example 180
Source File: ManualCoordinator.scala    From trucking-iot   with Apache License 2.0 5 votes vote down vote up
package com.orendainx.trucking.simulator.coordinators

import akka.actor.{ActorLogging, ActorRef, Props}
import com.orendainx.trucking.simulator.coordinators.GeneratorCoordinator.AcknowledgeTick
import com.orendainx.trucking.simulator.coordinators.ManualCoordinator.Tick
import com.orendainx.trucking.simulator.generators.DataGenerator
import com.typesafe.config.Config

import scala.collection.mutable


  def props(generators: Seq[ActorRef])(implicit config: Config) =
    Props(new ManualCoordinator(generators))
}

class ManualCoordinator(generators: Seq[ActorRef])(implicit config: Config) extends GeneratorCoordinator with ActorLogging {

  // Set all generators as ready
  val generatorsReady = mutable.Set(generators: _*)

  def receive = {
    case AcknowledgeTick(generator) =>
      generatorsReady += generator
      log.debug(s"Generator acknowledged tick - total ready: ${generatorsReady.size}")

    case Tick =>
      generatorsReady.foreach(_ ! DataGenerator.GenerateData)
      generatorsReady.clear()
  }

} 
Example 181
Source File: AutomaticCoordinator.scala    From trucking-iot   with Apache License 2.0 5 votes vote down vote up
package com.orendainx.trucking.simulator.coordinators

import akka.actor.{ActorLogging, ActorRef, PoisonPill, Props, Terminated}
import com.orendainx.trucking.simulator.coordinators.AutomaticCoordinator.TickGenerator
import com.orendainx.trucking.simulator.coordinators.GeneratorCoordinator.AcknowledgeTick
import com.orendainx.trucking.simulator.flows.FlowManager
import com.orendainx.trucking.simulator.generators.DataGenerator
import com.typesafe.config.Config

import scala.collection.mutable
import scala.concurrent.duration._
import scala.util.Random


  def props(eventCount: Int, generators: Seq[ActorRef], flowManager: ActorRef)(implicit config: Config) =
    Props(new AutomaticCoordinator(eventCount, generators, flowManager))
}

class AutomaticCoordinator(eventCount: Int, generators: Seq[ActorRef], flowManager: ActorRef)(implicit config: Config) extends GeneratorCoordinator with ActorLogging {

  // For receive messages and an execution context
  import context.dispatcher

  // Event delay settings, and initialize a counter for each data generator
  val eventDelay = config.getInt("generator.event-delay")
  val eventDelayJitter = config.getInt("generator.event-delay-jitter")
  val generateCounters = mutable.Map(generators.map((_, 0)): _*)

  // Insert each new generator into the simulation (at a random scheduled point) and begin "ticking"
  generators.foreach { generator =>
    context.system.scheduler.scheduleOnce(Random.nextInt(eventDelay + eventDelayJitter).milliseconds, self, TickGenerator(generator))
  }

  def receive = {
    case AcknowledgeTick(generator) =>
      self ! TickGenerator(generator) // Each ack triggers another tick

    case TickGenerator(generator) =>
      generateCounters.update(generator, generateCounters(generator)+1)

      if (generateCounters(generator) <= eventCount) {
        context.system.scheduler.scheduleOnce((eventDelay + Random.nextInt(eventDelayJitter)).milliseconds, generator, DataGenerator.GenerateData)
      } else {
        // Kill the individual generator, since we are done with it.
        generator ! PoisonPill

        // If all other generators have met their count, tell flow manager to shutdown
        if (!generateCounters.values.exists(_ <= eventCount)) {
          flowManager ! FlowManager.ShutdownFlow
          context watch flowManager
        }
      }

    // Once the flow manager and its transmitters terminate, shut it all down
    case Terminated(`flowManager`) =>
      context.system.terminate()
  }
} 
Example 182
Source File: TrafficGenerator.scala    From trucking-iot   with Apache License 2.0 5 votes vote down vote up
package com.orendainx.trucking.simulator.generators

import java.time.Instant

import akka.actor.{ActorLogging, ActorRef, Props, Stash}
import com.orendainx.trucking.commons.models._
import com.orendainx.trucking.simulator.coordinators.GeneratorCoordinator
import com.orendainx.trucking.simulator.depots.ResourceDepot.{RequestRoute, ReturnRoute}
import com.orendainx.trucking.simulator.generators.DataGenerator.{GenerateData, NewResource}
import com.orendainx.trucking.simulator.models._
import com.orendainx.trucking.simulator.transmitters.DataTransmitter.Transmit
import com.orendainx.trucking.simulator.models.{EmptyRoute, Route}
import com.typesafe.config.Config

import scala.collection.mutable
import scala.util.Random


  def props(depot: ActorRef, flowManager: ActorRef)(implicit config: Config) =
    Props(new TrafficGenerator(depot, flowManager))
}

class TrafficGenerator(depot: ActorRef, flowManager: ActorRef)(implicit config: Config) extends DataGenerator with Stash with ActorLogging {

  // Some settings
  val NumberOfRoutes = config.getInt("generator.routes-to-simulate")
  val CongestionDelta = config.getInt("generator.congestion.delta")

  var congestionLevel = config.getInt("generator.congestion.start")
  var routes = mutable.Buffer.empty[Route]

  // Request NumberOfRoutes routes
  (1 to NumberOfRoutes).foreach(_ => depot ! RequestRoute(EmptyRoute))

  context become waitingOnDepot

  def waitingOnDepot: Receive = {
    case NewResource(newRoute: Route) =>
      routes += newRoute
      unstashAll()
      context become driverActive
      log.info(s"Received new route: ${newRoute.name}")

    case GenerateData =>
      stash()
      log.debug("Received Tick command while waiting on route. Command stashed for later processing.")
  }

  def driverActive: Receive = {
    case GenerateData =>
      routes.foreach { route =>
        // Create traffic data and emit it
        congestionLevel += -CongestionDelta + Random.nextInt(CongestionDelta*2 + 1)
        val traffic = TrafficData(Instant.now().toEpochMilli, route.id, congestionLevel)
        flowManager ! Transmit(traffic)
      }

      // Tell the coordinator we've acknowledged the drive command
      sender() ! GeneratorCoordinator.AcknowledgeTick(self)
  }

  def receive = {
    case _ => log.error("This message should never be seen.")
  }

  // When this actor is stopped, release resources it may still be holding onto
  override def postStop(): Unit =
    routes.foreach(ReturnRoute)
} 
Example 183
Source File: LoadBalancerActor.scala    From reactive-consul   with MIT License 5 votes vote down vote up
package stormlantern.consul.client.loadbalancers

import akka.actor.Status.Failure
import akka.actor.{ Props, Actor, ActorLogging }
import LoadBalancerActor._
import stormlantern.consul.client.discovery.{ ConnectionProvider, ConnectionHolder }
import stormlantern.consul.client.ServiceUnavailableException
import scala.concurrent.ExecutionContext.Implicits.global
import scala.collection.mutable

class LoadBalancerActor(loadBalancer: LoadBalancer, key: String) extends Actor with ActorLogging {

  import akka.pattern.pipe

  // Actor state
  val connectionProviders = mutable.Map.empty[String, ConnectionProvider]

  override def postStop(): Unit = {
    log.debug(s"LoadBalancerActor for $key stopped, destroying all connection providers")
    connectionProviders.values.foreach(_.destroy())
  }

  def receive: PartialFunction[Any, Unit] = {

    case GetConnection ⇒
      selectConnection match {
        case Some((id, connectionProvider)) ⇒ connectionProvider.getConnectionHolder(id, self) pipeTo sender
        case None                           ⇒ sender ! Failure(ServiceUnavailableException(key))
      }
    case ReturnConnection(connection)        ⇒ returnConnection(connection)
    case AddConnectionProvider(id, provider) ⇒ addConnectionProvider(id, provider)
    case RemoveConnectionProvider(id)        ⇒ removeConnectionProvider(id)
    case HasAvailableConnectionProvider      ⇒ sender ! connectionProviders.nonEmpty
  }

  def selectConnection: Option[(String, ConnectionProvider)] =
    loadBalancer.selectConnection.flatMap(id ⇒ connectionProviders.get(id).map(id → _))

  def returnConnection(connection: ConnectionHolder): Unit = {
    connectionProviders.get(connection.id).foreach(_.returnConnection(connection))
    loadBalancer.connectionReturned(connection.id)
  }

  def addConnectionProvider(id: String, provider: ConnectionProvider): Unit = {
    connectionProviders.put(id, provider)
    loadBalancer.connectionProviderAdded(id)
  }

  def removeConnectionProvider(id: String): Unit = {
    connectionProviders.remove(id).foreach(_.destroy())
    loadBalancer.connectionProviderRemoved(id)
  }
}

object LoadBalancerActor {
  // Props
  def props(loadBalancer: LoadBalancer, key: String) = Props(new LoadBalancerActor(loadBalancer, key))
  // Messsages
  case object GetConnection
  case class ReturnConnection(connection: ConnectionHolder)
  case class AddConnectionProvider(id: String, provider: ConnectionProvider)
  case class RemoveConnectionProvider(id: String)
  case object HasAvailableConnectionProvider
} 
Example 184
Source File: CodebaseAnalyzeAggregatorActor.scala    From CodeAnalyzerTutorial   with Apache License 2.0 5 votes vote down vote up
package tutor

import java.util.Date

import akka.actor.{Actor, ActorLogging, ActorRef, Cancellable, Props, Terminated}
import akka.routing.{ActorRefRoutee, RoundRobinRoutingLogic, Router}
import tutor.CodebaseAnalyzeAggregatorActor.{AnalyzeDirectory, Complete, Report, Timeout}
import tutor.SourceCodeAnalyzerActor.NewFile
import tutor.utils.BenchmarkUtil

import scala.concurrent.duration._
import scala.util.{Failure, Success, Try}

object CodebaseAnalyzeAggregatorActor {
  def props(): Props = Props(new CodebaseAnalyzeAggregatorActor)

  final case class AnalyzeDirectory(path: String)

  final case class Complete(result: Try[SourceCodeInfo])

  final case object Timeout

  final case class Report(codebaseInfo: CodebaseInfo)

}

class CodebaseAnalyzeAggregatorActor extends Actor with ActorLogging with DirectoryScanner with ReportFormatter {
  var controller: ActorRef = _
  var currentPath: String = _
  var beginTime: Date = _
  var fileCount = 0
  var completeCount = 0
  var failCount = 0
  var result: CodebaseInfo = CodebaseInfo.empty
  var timeoutTimer: Cancellable = _

  var router: Router = {
    val routees = Vector.fill(8) {
      val r = context.actorOf(SourceCodeAnalyzerActor.props())
      context watch r
      ActorRefRoutee(r)
    }
    Router(RoundRobinRoutingLogic(), routees)
  }

  override def receive: Receive = {
    case AnalyzeDirectory(path) => {
      controller = sender()
      currentPath = path
      beginTime = BenchmarkUtil.recordStart(s"analyze folder $currentPath")
      foreachFile(path, PresetFilters.knownFileTypes, PresetFilters.ignoreFolders) { file =>
        fileCount += 1
        router.route(NewFile(file.getAbsolutePath), context.self)
      }
      import context.dispatcher
      timeoutTimer = context.system.scheduler.scheduleOnce((fileCount / 1000).seconds, context.self, Timeout)
    }
    case Complete(Success(sourceCodeInfo: SourceCodeInfo)) => {
      completeCount += 1
      result = result + sourceCodeInfo
      finishIfAllComplete()
    }
    case Complete(Failure(exception)) => {
      completeCount += 1
      failCount += 1
      log.warning("processing file failed {}", exception)
      finishIfAllComplete()
    }
    case Timeout => {
      println(s"${result.totalFileNums} of $fileCount files processed before timeout")
      controller ! Report(result)
      BenchmarkUtil.recordElapse(s"analyze folder $currentPath", beginTime)
    }
    case Terminated(a) =>
      router = router.removeRoutee(a)
      val r = context.actorOf(Props[SourceCodeAnalyzerActor])
      context watch r
      router = router.addRoutee(r)
    case x@_ => log.error(s"receive unknown message $x")
  }

  def finishIfAllComplete(): Unit = {
    if (completeCount == fileCount) {
      timeoutTimer.cancel()
      controller ! Report(result)
      BenchmarkUtil.recordElapse(s"analyze folder $currentPath", beginTime)
      context.stop(self)
    }
  }
} 
Example 185
Source File: SourceCodeAnalyzerActor.scala    From CodeAnalyzerTutorial   with Apache License 2.0 5 votes vote down vote up
package tutor

import akka.actor.{Actor, ActorLogging, Props}
import tutor.CodebaseAnalyzeAggregatorActor.Complete
import tutor.SourceCodeAnalyzerActor.NewFile


object SourceCodeAnalyzerActor {
  def props(): Props = Props(new SourceCodeAnalyzerActor)

  final case class NewFile(path: String)

}

class SourceCodeAnalyzerActor extends Actor with ActorLogging with SourceCodeAnalyzer {
  override def receive: Receive = {
    case NewFile(path) => {
      val sourceCodeInfo = processFile(path)
      sender() ! Complete(sourceCodeInfo)
    }
  }
} 
Example 186
Source File: CamelActorPublisher.scala    From reactive-activemq   with Apache License 2.0 5 votes vote down vote up
package akka.stream.integration
package camel

import akka.actor.{ ActorLogging, ActorRef, Props }
import akka.camel.{ CamelMessage, Consumer }
import akka.event.LoggingReceive
import akka.stream.actor.ActorPublisher
import akka.stream.actor.ActorPublisherMessage.Cancel
import akka.stream.scaladsl.Source

class CamelActorPublisher(val endpointUri: String) extends Consumer with ActorPublisher[(ActorRef, CamelMessage)] with ActorLogging {
  override val autoAck: Boolean = false

  override def receive: Receive = LoggingReceive {
    case CamelMessage if totalDemand == 0 =>
      sender() ! akka.actor.Status.Failure(new IllegalStateException("No demand for new messages"))

    case msg: CamelMessage => onNext((sender(), msg))

    case Cancel            => context stop self
  }
}

class CamelActorPublisherWithExtractor[A: CamelMessageExtractor](val endpointUri: String) extends Consumer with ActorPublisher[(ActorRef, A)] with ActorLogging {
  override val autoAck: Boolean = false

  override def receive: Receive = LoggingReceive {
    case CamelMessage if totalDemand == 0 =>
      sender() ! akka.actor.Status.Failure(new IllegalStateException("No demand for new messages"))

    case msg: CamelMessage =>
      try {
        onNext((sender(), implicitly[CamelMessageExtractor[A]].extract(msg)))
      } catch {
        case t: Throwable =>
          log.error(t, "Removing message from the broker because of error while extracting the message")
          sender() ! akka.camel.Ack
      }

    case Cancel => context stop self
  }
}

object CamelActorPublisher {
  def fromEndpointUri(endpointUri: String): Source[AckRefTup[CamelMessage], ActorRef] =
    Source.actorPublisher[AckRefTup[CamelMessage]](Props(new CamelActorPublisher(endpointUri)))

  def fromEndpointUriWithExtractor[A: CamelMessageExtractor](endpointUri: String): Source[AckRefTup[A], ActorRef] =
    Source.actorPublisher[AckRefTup[A]](Props(new CamelActorPublisherWithExtractor(endpointUri)))
} 
Example 187
Source File: RouterActor.scala    From BusFloatingData   with Apache License 2.0 5 votes vote down vote up
package de.nierbeck.floating.data.server.actors.websocket

import akka.actor.{Actor, ActorLogging}
import akka.routing.{AddRoutee, RemoveRoutee, Routee}


class RouterActor extends Actor with ActorLogging {
  var routees = Set[Routee]()

  def receive: Receive = {
    case ar: AddRoutee => {
      log.info(s"add routee ${ar.routee}")
      routees = routees + ar.routee
    }
    case rr: RemoveRoutee => {
      log.info(s"remove routee ${rr.routee}")
      routees = routees - rr.routee
    }
    case msg:Any => {
      routees.foreach(_.send(msg, sender))
    }
  }
} 
Example 188
Source File: DataSourceEndpoint.scala    From akka-visualmailbox   with Apache License 2.0 5 votes vote down vote up
package de.aktey.akka.visualmailbox.data

import akka.actor.{Actor, ActorLogging, ActorRef, Props}
import akka.io.Udp.Received
import de.aktey.akka.visualmailbox.packing.Packing
import de.aktey.akka.visualmailbox.{MetricEnvelope, VisualMailboxMetric}

import scala.util.{Failure, Success}


class DataSourceEndpoint(router: ActorRef) extends Actor with ActorLogging {
  def receive = {
    case Received(datagram, _) => Packing.unpack[MetricEnvelope](datagram.to[Array]) match {
      case Success(MetricEnvelope(1, payload)) =>
        Packing.unpack[List[VisualMailboxMetric]](payload) match {
          case Success(list) => list.foreach(router ! _)
          case Failure(e) => log.error(e, "unmarshal error")
        }
      case Success(MetricEnvelope(version, _)) => log.warning("unknown protocol version: " + version)
      case Failure(e) => log.error(e, "unmarshal error")
    }
  }
}

object DataSourceEndpoint {
  def props(router: ActorRef) = Props(new DataSourceEndpoint(router))
} 
Example 189
Source File: MetricsRouter.scala    From akka-visualmailbox   with Apache License 2.0 5 votes vote down vote up
package de.aktey.akka.visualmailbox

import akka.actor.{Actor, ActorLogging, ActorRef, Props, Terminated}

class MetricsRouter extends Actor with ActorLogging {

  import context._

  var routees: Set[ActorRef] = Set.empty

  override def postStop() {
    routees foreach unwatch
  }

  def receive = {
    case registrar: ActorRef =>
      watch(registrar)
      routees += registrar
      if (log.isDebugEnabled) log.debug(s"""{"type":"registerd","registered":"$registrar","routees":${routees.size}}""")
    case Terminated(ref) =>
      unwatch(ref)
      routees -= ref
      if (log.isDebugEnabled) log.debug(s"""{"type":"unregistered","terminated":"$ref","routees":${routees.size}}""")
    case msg =>
      routees foreach (_ forward msg)
  }
}

object MetricsRouter {
  def props() = Props(new MetricsRouter)
} 
Example 190
Source File: TypedCreatorExample.scala    From typed-actors   with Apache License 2.0 5 votes vote down vote up
package org.example

import akka.actor.{ ActorLogging, ActorSystem }
import de.knutwalker.akka.typed._

object TypedCreatorExample extends App {

  case class Ping(replyTo: ActorRef[Pong])
  case class Pong(replyTo: ActorRef[Ping])

  implicit val system = ActorSystem()
  case class PingActor() extends TypedActor.Of[Ping] with ActorLogging {
    private[this] var count = 0

    def typedReceive: TypedReceive = Total {
      case Ping(replyTo) ⇒
        count += 1
        replyTo ! Pong(typedSelf)
    }

    override def postStop(): Unit = {
      log.info(s"pings: $count")
    }
  }

  case class PongActor() extends TypedActor.Of[Pong] with ActorLogging {
    private[this] var count = 0

    def typedReceive: TypedReceive = Total {
      case Pong(replyTo) ⇒
        count += 1
        replyTo ! Ping(typedSelf)
    }

    override def postStop(): Unit = {
      log.info(s"pongs: $count")
    }
  }

  val ping = Typed[PingActor].create()
  val pong = Typed[PongActor].create()

  ping ! Ping(pong)

  Thread.sleep(1000)

  Shutdown(system)
} 
Example 191
Source File: TypedActorExample.scala    From typed-actors   with Apache License 2.0 5 votes vote down vote up
package org.example

import akka.actor.{ ActorLogging, ActorSystem }
import de.knutwalker.akka.typed._

object TypedActorExample extends App {

  case class Ping(replyTo: ActorRef[Pong])
  case class Pong(replyTo: ActorRef[Ping])

  implicit val system = ActorSystem()

  class PingActor extends TypedActor.Of[Ping] with ActorLogging {
    private[this] var count = 0

    def typedReceive: TypedReceive = Total {
      case Ping(replyTo) ⇒
        count += 1
        replyTo ! Pong(typedSelf)
    }

    override def postStop(): Unit = {
      log.info(s"pings: $count")
    }
  }

  class PongActor extends TypedActor.Of[Pong] with ActorLogging {
    private[this] var count = 0

    def typedReceive: TypedReceive = Total {
      case Pong(replyTo) ⇒
        count += 1
        replyTo ! Ping(typedSelf)
    }

    override def postStop(): Unit = {
      log.info(s"pongs: $count")
    }
  }

  val ping = ActorOf(PropsOf[Ping](new PingActor))
  val pong = ActorOf(PropsOf[Pong](new PongActor))

  ping ! Ping(pong)

  Thread.sleep(1000)

  Shutdown(system)
} 
Example 192
Source File: PersistenceExample.scala    From typed-actors   with Apache License 2.0 5 votes vote down vote up
package org.example

import akka.actor.{ ActorLogging, ActorSystem }
import akka.persistence.{ SnapshotOffer, PersistentActor }
import de.knutwalker.akka.typed._

import scala.concurrent.duration._

object PersistenceExample extends App {

  case class Ping(replyTo: ActorRef[Pong])
  case class Pong(replyTo: ActorRef[Ping])

  case class Evt(data: String)

  case class ExampleState(events: List[String] = Nil) {
    def updated(evt: Evt): ExampleState = copy(evt.data :: events)
    def size: Int = events.length
    override def toString: String = events.reverse.toString
  }

  class TypedPersistentPingActor extends TypedActor with PersistentActor with ActorLogging {
    type Message = Ping

    def persistenceId: String = "typed-persistent-ping-id"

    var state = ExampleState()

    def updateState(event: Evt): Unit =
      state = state.updated(event)

    def numEvents =
      state.size

    val receiveRecover: Receive = {
      case evt: Evt                                 => updateState(evt)
      case SnapshotOffer(_, snapshot: ExampleState) => state = snapshot
    }

    val typedReceive: TypedReceive = {
      case Ping(replyTo) ⇒
        persist(Evt(s"$numEvents"))(updateState)
        persist(Evt(s"${numEvents + 1}")) { event =>
          updateState(event)
          replyTo ! Pong(typedSelf)
        }
    }

    val receiveCommand: Receive =
      untypedFromTyped(typedReceive).orElse {
        case "snap"  => saveSnapshot(state)
        case "print" => println(state)
      }

    override def receive: Receive =
      receiveCommand

    override def postStop(): Unit = {
      log.info(s"state = $state")
      super.postStop()
    }
  }

  class TypedPongActor extends TypedActor.Of[Pong] with ActorLogging {
    private[this] var count = 0
    val typedReceive: TypedReceive = {
      case Pong(replyTo) ⇒
        count += 1
        replyTo ! Ping(typedSelf)
    }

    override def postStop(): Unit = {
      log.info(s"pings: $count")
      super.postStop()
    }

    override def preStart(): Unit = {
      import context.dispatcher
      super.preStart()
      context.system.scheduler.scheduleOnce(600.millis)(Shutdown(system))
      ()
    }
  }



  implicit val system = ActorSystem()
  val ping = ActorOf(PropsFor[TypedPersistentPingActor], "ping")
  val pong = ActorOf(PropsFor[TypedPongActor], "pong")

  ping ! Ping(pong)
} 
Example 193
Source File: SimpleExample.scala    From typed-actors   with Apache License 2.0 5 votes vote down vote up
package org.example

import akka.actor.{ Actor, ActorLogging, ActorSystem }
import de.knutwalker.akka.typed._

object SimpleExample extends App {

  case class Ping(replyTo: ActorRef[Pong])
  case class Pong(replyTo: ActorRef[Ping])

  implicit val system = ActorSystem()
  val ping = ActorOf(PropsOf[Ping](new Actor with ActorLogging {
    private[this] var count = 0
    def receive: Receive = {
      case Ping(replyTo) ⇒
        count += 1
        replyTo ! Pong(self.typed)
    }

    override def postStop(): Unit = {
      log.info(s"pings: $count")
    }
  }))
  val pong = ActorOf(PropsOf[Pong](new Actor with ActorLogging {
    private[this] var count = 0

    def receive: Receive = {
      case Pong(replyTo) ⇒
        count += 1
        replyTo ! Ping(self.typed)
    }

    override def postStop(): Unit = {
      log.info(s"pongs: $count")
    }
  }))

  ping ! Ping(pong)

  Thread.sleep(1000)

  Shutdown(system)
} 
Example 194
Source File: PeriodicActor.scala    From dependency   with MIT License 5 votes vote down vote up
package io.flow.dependency.actors

import javax.inject.Inject
import db.{InternalTasksDao, SyncsDao}
import akka.actor.{Actor, ActorLogging, ActorSystem}
import io.flow.akka.SafeReceive
import io.flow.akka.recurring.{ScheduleConfig, Scheduler}
import io.flow.log.RollbarLogger
import io.flow.play.util.ApplicationConfig
import org.joda.time.DateTime

import scala.concurrent.ExecutionContext

object PeriodicActor {

  sealed trait Message

  object Messages {
    case object Purge extends Message
    case object SyncBinaries extends Message
    case object SyncLibraries extends Message
    case object SyncProjects extends Message
  }

}

class PeriodicActor @Inject()(
  config: ApplicationConfig,
  system: ActorSystem,
  syncsDao: SyncsDao,
  internalTasksDao: InternalTasksDao,
  logger: RollbarLogger
) extends Actor with ActorLogging with Scheduler  {

  private[this] implicit val ec: ExecutionContext = system.dispatchers.lookup("periodic-actor-context")
  private[this] implicit val configuredRollbar: RollbarLogger = logger.fingerprint(getClass.getName)

  private[this] case object SyncAll
  private[this] case object Purge

  scheduleRecurring(
    ScheduleConfig.fromConfig(config.underlying.underlying, "io.flow.dependency.api.periodic.sync_all"),
    SyncAll
  )

  scheduleRecurring(
    ScheduleConfig.fromConfig(config.underlying.underlying, "io.flow.dependency.api.periodic.purge"),
    Purge
  )

  def receive: Receive = SafeReceive.withLogUnhandled {
    case Purge => {
      internalTasksDao.deleteAllNonProcessedTasks(DateTime.now.minusHours(12))
      syncsDao.purgeOld()
    }
    case SyncAll => internalTasksDao.queueAll()
  }

} 
Example 195
Source File: InventoryActor.scala    From Learn-Scala-Programming   with MIT License 5 votes vote down vote up
package ch14

import akka.actor.{Actor, ActorLogging, Props}
import akka.persistence.{PersistentActor, RecoveryCompleted, SnapshotOffer}
import ch14.Commands.{GetArticle, GetInventory}

object InventoryActor {
  def props: Props = Props[InventoryActor]
  val persistenceId = "Inventory"
}

class InventoryActor extends PersistentActor with Actor with ActorLogging {

  override def persistenceId: String = InventoryActor.persistenceId

  private var inventory: Inventory = Inventory(Map.empty)

  override def receiveRecover: Receive = {
    case event: Event                          => inventory = inventory.update(event)
    case SnapshotOffer(_, snapshot: Inventory) => inventory = snapshot
    case RecoveryCompleted                     => saveSnapshot(inventory)
  }

  override def receiveCommand: Receive = {
    case GetInventory =>
      sender() ! inventory

    case GetArticle(name) =>
      sender() ! Inventory(inventory.state.filter(_._1 == name))

    case cmd: Command =>
      inventory.canUpdate(cmd) match {
        case None =>
          sender() ! None
        case Some(event) =>
          persistAsync(event) { ev =>
            inventory = inventory.update(ev)
            sender() ! Some(ev)
          }
      }

  }
} 
Example 196
Source File: Main.scala    From perf_tester   with Apache License 2.0 5 votes vote down vote up
package akka

import akka.actor.ActorSystem
import akka.actor.ExtendedActorSystem
import akka.actor.Actor
import akka.actor.Terminated
import akka.actor.ActorLogging
import akka.actor.Props
import akka.actor.ActorRef
import scala.util.control.NonFatal


  def main(args: Array[String]): Unit = {
    if (args.length != 1) {
      println("you need to provide exactly one argument: the class of the application supervisor actor")
    } else {
      val system = ActorSystem("Main")
      try {
        val appClass = system.asInstanceOf[ExtendedActorSystem].dynamicAccess.getClassFor[Actor](args(0)).get
        val app = system.actorOf(Props(appClass), "app")
        val terminator = system.actorOf(Props(classOf[Terminator], app), "app-terminator")
      } catch {
        case NonFatal(e) ⇒ system.terminate(); throw e
      }
    }
  }

  class Terminator(app: ActorRef) extends Actor with ActorLogging {
    context watch app
    def receive = {
      case Terminated(_) ⇒
        log.info("application supervisor has terminated, shutting down")
        context.system.terminate()
    }
  }

} 
Example 197
Source File: SimpleDnsManager.scala    From perf_tester   with Apache License 2.0 5 votes vote down vote up
package akka.io

import java.util.concurrent.TimeUnit

import akka.actor.{ ActorLogging, Actor, Deploy, Props }
import akka.dispatch.{ RequiresMessageQueue, UnboundedMessageQueueSemantics }
import akka.routing.FromConfig

import scala.concurrent.duration.Duration

class SimpleDnsManager(val ext: DnsExt) extends Actor with RequiresMessageQueue[UnboundedMessageQueueSemantics] with ActorLogging {

  import context._

  private val resolver = actorOf(FromConfig.props(Props(ext.provider.actorClass, ext.cache, ext.Settings.ResolverConfig).withDeploy(Deploy.local).withDispatcher(ext.Settings.Dispatcher)), ext.Settings.Resolver)
  private val cacheCleanup = ext.cache match {
    case cleanup: PeriodicCacheCleanup ⇒ Some(cleanup)
    case _ ⇒ None
  }

  private val cleanupTimer = cacheCleanup map { _ ⇒
    val interval = Duration(ext.Settings.ResolverConfig.getDuration("cache-cleanup-interval", TimeUnit.MILLISECONDS), TimeUnit.MILLISECONDS)
    system.scheduler.schedule(interval, interval, self, SimpleDnsManager.CacheCleanup)
  }

  override def receive = {
    case r @ Dns.Resolve(name) ⇒
      log.debug("Resolution request for {} from {}", name, sender())
      resolver.forward(r)
    case SimpleDnsManager.CacheCleanup ⇒
      for (c ← cacheCleanup)
        c.cleanup()
  }

  override def postStop(): Unit = {
    for (t ← cleanupTimer) t.cancel()
  }
}

object SimpleDnsManager {
  private case object CacheCleanup
} 
Example 198
Source File: PiClusterSingleton.scala    From Pi-Akka-Cluster   with Apache License 2.0 5 votes vote down vote up
package org.neopixel

import akka.actor.{Actor, ActorLogging, Props}

object PiClusterSingleton {

  def props(strip: Adafruit_NeoPixel.type,logicalToPhysicalLEDMapping: Int => Int): Props =
    Props(new PiClusterSingleton(strip, logicalToPhysicalLEDMapping))
}

class PiClusterSingleton(strip: Adafruit_NeoPixel.type, logicalToPhysicalLEDMapping: Int => Int) extends Actor with ActorLogging {

  override def receive: Receive = akka.actor.Actor.emptyBehavior

  override def preStart(): Unit = {
    log.info(s"ClusterSingleton started")
    setPixelColorAndShow(strip, logicalToPhysicalLEDMapping(6), LightBlue)
    super.preStart()
  }

  override def postStop(): Unit = {
    log.info(s"ClusterSingleton stopped")
    setPixelColorAndShow(strip, logicalToPhysicalLEDMapping(6), Black)
    super.postStop()
  }

  private def setPixelColorAndShow(strip: Adafruit_NeoPixel.type ,
                                   ledNumber: Int,
                                   ledColor: Long): Unit = {
    strip.setPixelColor(ledNumber, ledColor)
    strip.show()
  }

} 
Example 199
Source File: PiClusterSingleton.scala    From Pi-Akka-Cluster   with Apache License 2.0 5 votes vote down vote up
package org.neopixel

import akka.actor.{Actor, ActorLogging, Props}

object PiClusterSingleton {

  def props(strip: Adafruit_NeoPixel.type,logicalToPhysicalLEDMapping: Int => Int): Props =
    Props(new PiClusterSingleton(strip, logicalToPhysicalLEDMapping))
}

class PiClusterSingleton(strip: Adafruit_NeoPixel.type, logicalToPhysicalLEDMapping: Int => Int) extends Actor with ActorLogging {

  override def receive: Receive = akka.actor.Actor.emptyBehavior

  override def preStart(): Unit = {
    log.info(s"ClusterSingleton started")
    setPixelColorAndShow(strip, logicalToPhysicalLEDMapping(6), LightBlue)
    super.preStart()
  }

  override def postStop(): Unit = {
    log.info(s"ClusterSingleton stopped")
    setPixelColorAndShow(strip, logicalToPhysicalLEDMapping(6), Black)
    super.postStop()
  }

  private def setPixelColorAndShow(strip: Adafruit_NeoPixel.type ,
                                   ledNumber: Int,
                                   ledColor: Long): Unit = {
    strip.setPixelColor(ledNumber, ledColor)
    strip.show()
  }

} 
Example 200
Source File: PiClusterSingleton.scala    From Pi-Akka-Cluster   with Apache License 2.0 5 votes vote down vote up
package org.neopixel

import akka.actor.{Actor, ActorLogging, Props}

object PiClusterSingleton {

  def props(strip: Adafruit_NeoPixel.type,logicalToPhysicalLEDMapping: Int => Int): Props =
    Props(new PiClusterSingleton(strip, logicalToPhysicalLEDMapping))
}

class PiClusterSingleton(strip: Adafruit_NeoPixel.type, logicalToPhysicalLEDMapping: Int => Int) extends Actor with ActorLogging {

  override def receive: Receive = akka.actor.Actor.emptyBehavior

  override def preStart(): Unit = {
    log.info(s"ClusterSingleton started")
    setPixelColorAndShow(strip, logicalToPhysicalLEDMapping(6), LightBlue)
    super.preStart()
  }

  override def postStop(): Unit = {
    log.info(s"ClusterSingleton stopped")
    setPixelColorAndShow(strip, logicalToPhysicalLEDMapping(6), Black)
    super.postStop()
  }

  private def setPixelColorAndShow(strip: Adafruit_NeoPixel.type ,
                                   ledNumber: Int,
                                   ledColor: Long): Unit = {
    strip.setPixelColor(ledNumber, ledColor)
    strip.show()
  }

}