akka.actor.Actor Scala Examples
The following examples show how to use akka.actor.Actor.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: TestAkka.scala From DataXServer with Apache License 2.0 | 6 votes |
package org.tianlangstudio.data.hamal.akka import akka.actor.{Props, ActorSystem, Actor} import akka.actor.Actor.Receive /** * Created by zhuhq on 2016/5/5. */ object TestAkka extends App{ val system = ActorSystem("test") val actor = system.actorOf(Props(classOf[TestAkka])) for(i <- 0 to 10) { actor ! Remove() actor ! Add() } } class TestAkka extends Actor{ override def receive: Receive = { case Remove() => println("remove begin") Thread.sleep((1000 * math.ceil(math.random) * 10).toLong) println("remove end") case Add() => println("add begin") Thread.sleep((1000 * math.ceil(math.random) * 10).toLong) println("add end") } } case class Remove() case class Add()
Example 2
Source File: AkkaExecutionSequencer.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.grpc.adapter import akka.Done import akka.actor.{Actor, ActorLogging, ActorRef, ActorSystem, ExtendedActorSystem, Props} import akka.pattern.{AskTimeoutException, ask} import akka.util.Timeout import com.daml.grpc.adapter.RunnableSequencingActor.ShutdownRequest import scala.concurrent.duration.FiniteDuration import scala.concurrent.{ExecutionContext, Future} import scala.util.control.NonFatal import com.daml.dec.DirectExecutionContext def closeAsync(implicit ec: ExecutionContext): Future[Done] = (actorRef ? ShutdownRequest).mapTo[Done].recover { case askTimeoutException: AskTimeoutException if actorIsTerminated(askTimeoutException) => Done } private def actorIsTerminated(askTimeoutException: AskTimeoutException) = { AkkaExecutionSequencer.actorTerminatedRegex.findFirstIn(askTimeoutException.getMessage).nonEmpty } } object AkkaExecutionSequencer { def apply(name: String, terminationTimeout: FiniteDuration)( implicit system: ActorSystem): AkkaExecutionSequencer = { system match { case extendedSystem: ExtendedActorSystem => new AkkaExecutionSequencer( extendedSystem.systemActorOf(Props[RunnableSequencingActor], name))( Timeout.durationToTimeout(terminationTimeout)) case _ => new AkkaExecutionSequencer(system.actorOf(Props[RunnableSequencingActor], name))( Timeout.durationToTimeout(terminationTimeout)) } } private val actorTerminatedRegex = """Recipient\[.*]\] had already been terminated.""".r } private[grpc] class RunnableSequencingActor extends Actor with ActorLogging { @SuppressWarnings(Array("org.wartremover.warts.Any")) override val receive: Receive = { case runnable: Runnable => try { runnable.run() } catch { case NonFatal(t) => log.error("Unexpected exception while executing Runnable", t) } case ShutdownRequest => context.stop(self) // processing of the current message will continue sender() ! Done } } private[grpc] object RunnableSequencingActor { case object ShutdownRequest }
Example 3
Source File: AkkaResourceOwnerSpec.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.resources.akka import akka.actor.{Actor, ActorSystem, Props} import akka.stream.Materializer import akka.stream.scaladsl.{Keep, Sink, Source} import akka.{Done, NotUsed} import com.daml.resources.ResourceOwner import org.scalatest.{AsyncWordSpec, Matchers} import scala.concurrent.{Future, Promise} class AkkaResourceOwnerSpec extends AsyncWordSpec with Matchers { "a function returning an ActorSystem" should { "convert to a ResourceOwner" in { val testPromise = Promise[Int]() class TestActor extends Actor { @SuppressWarnings(Array("org.wartremover.warts.Any")) override def receive: Receive = { case value: Int => testPromise.success(value) case value => testPromise.failure(new IllegalArgumentException(s"$value")) } } val resource = for { actorSystem <- AkkaResourceOwner .forActorSystem(() => ActorSystem("TestActorSystem")) .acquire() actor <- ResourceOwner .successful(actorSystem.actorOf(Props(new TestActor))) .acquire() } yield (actorSystem, actor) for { resourceFuture <- resource.asFuture (actorSystem, actor) = resourceFuture _ = actor ! 7 result <- testPromise.future _ <- resource.release() } yield { result should be(7) an[IllegalStateException] should be thrownBy actorSystem.actorOf(Props(new TestActor)) } } } "a function returning a Materializer" should { "convert to a ResourceOwner" in { val resource = for { actorSystem <- AkkaResourceOwner .forActorSystem(() => ActorSystem("TestActorSystem")) .acquire() materializer <- AkkaResourceOwner.forMaterializer(() => Materializer(actorSystem)).acquire() } yield materializer for { materializer <- resource.asFuture numbers <- Source(1 to 10) .toMat(Sink.seq)(Keep.right[NotUsed, Future[Seq[Int]]]) .run()(materializer) _ <- resource.release() } yield { numbers should be(1 to 10) an[IllegalStateException] should be thrownBy Source .single(0) .toMat(Sink.ignore)(Keep.right[NotUsed, Future[Done]]) .run()(materializer) } } } }
Example 4
Source File: ChaosInterface.scala From eventuate-chaos with Apache License 2.0 | 5 votes |
package com.rbmhtechnology.eventuate.chaos import java.net.InetSocketAddress import akka.actor.Actor import akka.actor.ActorLogging import akka.actor.ActorRef import akka.io.IO import akka.io.Tcp import akka.util.ByteString abstract class ChaosInterface extends Actor with ActorLogging { val port = 8080 val endpoint = new InetSocketAddress(port) val command = """(?s)(\w+)\s+(\d+).*""".r implicit val ec = context.dispatcher IO(Tcp)(context.system) ! Tcp.Bind(self, endpoint) println(s"Now listening on port $port") def handleCommand: PartialFunction[(String, Option[Int], ActorRef), Unit] protected def reply(message: String, receiver: ActorRef) = { receiver ! Tcp.Write(ByteString(message)) receiver ! Tcp.Close } protected def closeOnError(receiver: ActorRef): PartialFunction[Throwable, Unit] = { case err: Throwable => receiver ! Tcp.Close } def receive: Receive = { case Tcp.Connected(remote, _) => sender ! Tcp.Register(self) case Tcp.Received(bs) => val content = bs.utf8String content match { case command(c, value) if handleCommand.isDefinedAt(c, Some(value.toInt), sender) => handleCommand(c, Some(value.toInt), sender) case c if c.startsWith("quit") => context.system.terminate() case c if handleCommand.isDefinedAt(c, None, sender) => handleCommand(c, None, sender) case _ => sender ! Tcp.Close } case Tcp.Closed => case Tcp.PeerClosed => } }
Example 5
Source File: RouterMetricsTestActor.scala From prometheus-akka with Apache License 2.0 | 5 votes |
package com.workday.prometheus.akka import scala.concurrent.duration.Duration import akka.actor.Actor class RouterMetricsTestActor extends Actor { import RouterMetricsTestActor._ override def receive = { case Discard ⇒ case Fail ⇒ throw new ArithmeticException("Division by zero.") case Ping ⇒ sender ! Pong case RouterTrackTimings(sendTimestamp, sleep) ⇒ { val dequeueTimestamp = System.nanoTime() sleep.map(s ⇒ Thread.sleep(s.toMillis)) val afterReceiveTimestamp = System.nanoTime() sender ! RouterTrackedTimings(sendTimestamp, dequeueTimestamp, afterReceiveTimestamp) } } } object RouterMetricsTestActor { case object Ping case object Pong case object Fail case object Discard case class RouterTrackTimings(sendTimestamp: Long = System.nanoTime(), sleep: Option[Duration] = None) case class RouterTrackedTimings(sendTimestamp: Long, dequeueTimestamp: Long, afterReceiveTimestamp: Long) { def approximateTimeInMailbox: Long = dequeueTimestamp - sendTimestamp def approximateProcessingTime: Long = afterReceiveTimestamp - dequeueTimestamp } }
Example 6
Source File: EnvelopeSpec.scala From prometheus-akka with Apache License 2.0 | 5 votes |
package akka.monitor.instrumentation import com.workday.prometheus.akka.TestKitBaseSpec import akka.actor.{Actor, ExtendedActorSystem, Props} import akka.dispatch.Envelope class EnvelopeSpec extends TestKitBaseSpec("envelope-spec") { "EnvelopeInstrumentation" should { "mixin EnvelopeContext" in { val actorRef = system.actorOf(Props[NoReply]) val env = Envelope("msg", actorRef, system).asInstanceOf[Object] env match { case e: Envelope with InstrumentedEnvelope => e.setEnvelopeContext(EnvelopeContext()) case _ => fail("InstrumentedEnvelope is not mixed in") } env match { case s: Serializable => { import java.io._ val bos = new ByteArrayOutputStream val oos = new ObjectOutputStream(bos) oos.writeObject(env) oos.close() akka.serialization.JavaSerializer.currentSystem.withValue(system.asInstanceOf[ExtendedActorSystem]) { val ois = new ObjectInputStream(new ByteArrayInputStream(bos.toByteArray())) val obj = ois.readObject() ois.close() obj match { case e: Envelope with InstrumentedEnvelope => e.envelopeContext() should not be null case _ => fail("InstrumentedEnvelope is not mixed in") } } } case _ => fail("envelope is not serializable") } } } } class NoReply extends Actor { override def receive = { case any ⇒ } }
Example 7
Source File: CommitMarkerOffsetsActor.scala From kmq with Apache License 2.0 | 5 votes |
package com.softwaremill.kmq.redelivery import akka.actor.Actor import com.softwaremill.kmq.KafkaClients import com.typesafe.scalalogging.StrictLogging import org.apache.kafka.clients.consumer.OffsetAndMetadata import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.serialization.ByteArrayDeserializer import scala.collection.JavaConverters._ import scala.concurrent.duration._ class CommitMarkerOffsetsActor(markerTopic: String, clients: KafkaClients) extends Actor with StrictLogging { private val consumer = clients.createConsumer(null, classOf[ByteArrayDeserializer], classOf[ByteArrayDeserializer]) private var toCommit: Map[Partition, Offset] = Map() import context.dispatcher override def preStart(): Unit = { logger.info("Started commit marker offsets actor") } override def postStop(): Unit = { try consumer.close() catch { case e: Exception => logger.error("Cannot close commit offsets consumer", e) } logger.info("Stopped commit marker offsets actor") } override def receive: Receive = { case CommitOffset(p, o) => // only updating if the current offset is smaller if (toCommit.get(p).fold(true)(_ < o)) toCommit += p -> o case DoCommit => try { commitOffsets() toCommit = Map() } finally context.system.scheduler.scheduleOnce(1.second, self, DoCommit) } private def commitOffsets(): Unit = if (toCommit.nonEmpty) { consumer.commitSync(toCommit.map { case (partition, offset) => (new TopicPartition(markerTopic, partition), new OffsetAndMetadata(offset)) }.asJava) logger.debug(s"Committed marker offsets: $toCommit") } }
Example 8
Source File: RedeliverActor.scala From kmq with Apache License 2.0 | 5 votes |
package com.softwaremill.kmq.redelivery import akka.actor.Actor import com.softwaremill.kmq.MarkerKey import com.typesafe.scalalogging.StrictLogging import scala.concurrent.duration._ class RedeliverActor(p: Partition, redeliverer: Redeliverer) extends Actor with StrictLogging { private var toRedeliver: List[MarkerKey] = Nil import context.dispatcher override def preStart(): Unit = { logger.info(s"${self.path} Started redeliver actor for partition $p") } override def postStop(): Unit = { try redeliverer.close() catch { case e: Exception => logger.error(s"Cannot close redeliverer for partition $p", e) } logger.info(s"${self.path} Stopped redeliver actor for partition $p") } override def receive: Receive = { case RedeliverMarkers(m) => toRedeliver ++= m case DoRedeliver => val hadRedeliveries = toRedeliver.nonEmpty try { redeliverer.redeliver(toRedeliver) toRedeliver = Nil } finally { if (hadRedeliveries) { self ! DoRedeliver } else { context.system.scheduler.scheduleOnce(1.second, self, DoRedeliver) } } } }
Example 9
Source File: KnownNodesManager.scala From mantis with Apache License 2.0 | 5 votes |
package io.iohk.ethereum.network import java.net.URI import akka.actor.{Actor, ActorLogging, Props, Scheduler} import io.iohk.ethereum.db.storage.KnownNodesStorage import io.iohk.ethereum.network.KnownNodesManager.KnownNodesManagerConfig import scala.concurrent.duration._ import scala.concurrent.ExecutionContext.Implicits.global class KnownNodesManager( config: KnownNodesManagerConfig, knownNodesStorage: KnownNodesStorage, externalSchedulerOpt: Option[Scheduler] = None) extends Actor with ActorLogging { import KnownNodesManager._ private def scheduler = externalSchedulerOpt getOrElse context.system.scheduler var knownNodes: Set[URI] = knownNodesStorage.getKnownNodes() var toAdd: Set[URI] = Set.empty var toRemove: Set[URI] = Set.empty scheduler.schedule(config.persistInterval, config.persistInterval, self, PersistChanges) override def receive: Receive = { case AddKnownNode(uri) => if (!knownNodes.contains(uri)) { knownNodes += uri toAdd += uri toRemove -= uri } case RemoveKnownNode(uri) => if (knownNodes.contains(uri)) { knownNodes -= uri toAdd -= uri toRemove += uri } case GetKnownNodes => sender() ! KnownNodes(knownNodes) case PersistChanges => persistChanges() } private def persistChanges(): Unit = { log.debug(s"Persisting ${knownNodes.size} known nodes.") if (knownNodes.size > config.maxPersistedNodes) { val toAbandon = knownNodes.take(knownNodes.size - config.maxPersistedNodes) toRemove ++= toAbandon toAdd --= toAbandon } if (toAdd.nonEmpty || toRemove.nonEmpty) { knownNodesStorage.updateKnownNodes( toAdd = toAdd, toRemove = toRemove) toAdd = Set.empty toRemove = Set.empty } } } object KnownNodesManager { def props(config: KnownNodesManagerConfig, knownNodesStorage: KnownNodesStorage): Props = Props(new KnownNodesManager(config, knownNodesStorage)) case class AddKnownNode(uri: URI) case class RemoveKnownNode(uri: URI) case object GetKnownNodes case class KnownNodes(nodes: Set[URI]) private case object PersistChanges case class KnownNodesManagerConfig(persistInterval: FiniteDuration, maxPersistedNodes: Int) object KnownNodesManagerConfig { def apply(etcClientConfig: com.typesafe.config.Config): KnownNodesManagerConfig = { val knownNodesManagerConfig = etcClientConfig.getConfig("network.known-nodes") KnownNodesManagerConfig( persistInterval = knownNodesManagerConfig.getDuration("persist-interval").toMillis.millis, maxPersistedNodes = knownNodesManagerConfig.getInt("max-persisted-nodes")) } } }
Example 10
Source File: ServerActor.scala From mantis with Apache License 2.0 | 5 votes |
package io.iohk.ethereum.network import java.net.InetSocketAddress import akka.actor.{Actor, ActorLogging, ActorRef, Props} import akka.agent.Agent import akka.io.Tcp.{Bind, Bound, CommandFailed, Connected} import akka.io.{IO, Tcp} import io.iohk.ethereum.utils.{NodeStatus, ServerStatus} import org.spongycastle.util.encoders.Hex class ServerActor(nodeStatusHolder: Agent[NodeStatus], peerManager: ActorRef) extends Actor with ActorLogging { import ServerActor._ import context.system override def receive: Receive = { case StartServer(address) => IO(Tcp) ! Bind(self, address) context become waitingForBindingResult } def waitingForBindingResult: Receive = { case Bound(localAddress) => val nodeStatus = nodeStatusHolder() log.info("Listening on {}", localAddress) log.info("Node address: enode://{}@{}:{}", Hex.toHexString(nodeStatus.nodeId), getHostName(localAddress.getAddress), localAddress.getPort) nodeStatusHolder.send(_.copy(serverStatus = ServerStatus.Listening(localAddress))) context become listening case CommandFailed(b: Bind) => log.warning("Binding to {} failed", b.localAddress) context stop self } def listening: Receive = { case Connected(remoteAddress, _) => val connection = sender() peerManager ! PeerManagerActor.HandlePeerConnection(connection, remoteAddress) } } object ServerActor { def props(nodeStatusHolder: Agent[NodeStatus], peerManager: ActorRef): Props = Props(new ServerActor(nodeStatusHolder, peerManager)) case class StartServer(address: InetSocketAddress) }
Example 11
Source File: OmmersPool.scala From mantis with Apache License 2.0 | 5 votes |
package io.iohk.ethereum.ommers import akka.actor.{Actor, Props} import io.iohk.ethereum.domain.{BlockHeader, Blockchain} import io.iohk.ethereum.ommers.OmmersPool.{AddOmmers, GetOmmers, RemoveOmmers} import io.iohk.ethereum.utils.MiningConfig class OmmersPool(blockchain: Blockchain, miningConfig: MiningConfig) extends Actor { var ommersPool: Seq[BlockHeader] = Nil val ommerGenerationLimit: Int = 6 //Stated on section 11.1, eq. (143) of the YP val ommerSizeLimit: Int = 2 override def receive: Receive = { case AddOmmers(ommers) => ommersPool = (ommers ++ ommersPool).take(miningConfig.ommersPoolSize).distinct case RemoveOmmers(ommers) => val toDelete = ommers.map(_.hash).toSet ommersPool = ommersPool.filter(b => !toDelete.contains(b.hash)) case GetOmmers(blockNumber) => val ommers = ommersPool.filter { b => val generationDifference = blockNumber - b.number generationDifference > 0 && generationDifference <= ommerGenerationLimit }.filter { b => blockchain.getBlockHeaderByHash(b.parentHash).isDefined }.take(ommerSizeLimit) sender() ! OmmersPool.Ommers(ommers) } } object OmmersPool { def props(blockchain: Blockchain, miningConfig: MiningConfig): Props = Props(new OmmersPool(blockchain, miningConfig)) case class AddOmmers(ommers: List[BlockHeader]) object AddOmmers { def apply(b: BlockHeader*): AddOmmers = AddOmmers(b.toList) } case class RemoveOmmers(ommers: List[BlockHeader]) object RemoveOmmers { def apply(b: BlockHeader*): RemoveOmmers = RemoveOmmers(b.toList) } case class GetOmmers(blockNumber: BigInt) case class Ommers(headers: Seq[BlockHeader]) }
Example 12
Source File: BlacklistSupport.scala From mantis with Apache License 2.0 | 5 votes |
package io.iohk.ethereum.blockchain.sync import scala.concurrent.duration.FiniteDuration import akka.actor.{Actor, ActorLogging, Cancellable, Scheduler} import io.iohk.ethereum.network.PeerId import scala.concurrent.ExecutionContext.Implicits.global trait BlacklistSupport { selfActor: Actor with ActorLogging => import BlacklistSupport._ def scheduler: Scheduler var blacklistedPeers: Seq[(PeerId, Cancellable)] = Nil def blacklist(peerId: PeerId, duration: FiniteDuration, reason: String): Unit = { undoBlacklist(peerId) log.debug(s"Blacklisting peer ($peerId), $reason") val unblacklistCancellable = scheduler.scheduleOnce(duration, self, UnblacklistPeer(peerId)) blacklistedPeers :+= (peerId, unblacklistCancellable) } def undoBlacklist(peerId: PeerId): Unit = { blacklistedPeers.find(_._1 == peerId).foreach(_._2.cancel()) blacklistedPeers = blacklistedPeers.filterNot(_._1 == peerId) } def isBlacklisted(peerId: PeerId): Boolean = blacklistedPeers.exists(_._1 == peerId) def handleBlacklistMessages: Receive = { case UnblacklistPeer(ref) => undoBlacklist(ref) } } object BlacklistSupport { private case class UnblacklistPeer(peerId: PeerId) }
Example 13
Source File: BlockchainHostActor.scala From mantis with Apache License 2.0 | 5 votes |
package io.iohk.ethereum.blockchain.sync import akka.actor.{Actor, ActorLogging, ActorRef, Props} import akka.util.ByteString import io.iohk.ethereum.domain.{BlockHeader, Blockchain} import io.iohk.ethereum.network.PeerEventBusActor.PeerEvent.MessageFromPeer import io.iohk.ethereum.network.PeerEventBusActor.SubscriptionClassifier.MessageClassifier import io.iohk.ethereum.network.PeerEventBusActor.{PeerSelector, Subscribe} import io.iohk.ethereum.network.PeerManagerActor.PeerConfiguration import io.iohk.ethereum.network.p2p.{Message, MessageSerializable} import io.iohk.ethereum.network.p2p.messages.PV62.{BlockBodies, BlockHeaders, GetBlockBodies, GetBlockHeaders} import io.iohk.ethereum.network.p2p.messages.PV63.{GetNodeData, GetReceipts, NodeData, Receipts} import io.iohk.ethereum.network.p2p.messages.PV63.MptNodeEncoders._ import io.iohk.ethereum.network.EtcPeerManagerActor private def handleBlockFastDownload(message: Message): Option[MessageSerializable] = message match { case request: GetReceipts => val receipts = request.blockHashes.take(peerConfiguration.fastSyncHostConfiguration.maxReceiptsPerMessage) .flatMap(hash => blockchain.getReceiptsByHash(hash)) Some(Receipts(receipts)) case request: GetBlockBodies => val blockBodies = request.hashes.take(peerConfiguration.fastSyncHostConfiguration.maxBlocksBodiesPerMessage) .flatMap(hash => blockchain.getBlockBodyByHash(hash)) Some(BlockBodies(blockBodies)) case request: GetBlockHeaders => val blockNumber = request.block.fold(a => Some(a), b => blockchain.getBlockHeaderByHash(b).map(_.number)) blockNumber match { case Some(startBlockNumber) if startBlockNumber >= 0 && request.maxHeaders >= 0 && request.skip >= 0 => val headersCount: BigInt = request.maxHeaders min peerConfiguration.fastSyncHostConfiguration.maxBlocksHeadersPerMessage val range = if (request.reverse) { startBlockNumber to (startBlockNumber - (request.skip + 1) * headersCount + 1) by -(request.skip + 1) } else { startBlockNumber to (startBlockNumber + (request.skip + 1) * headersCount - 1) by (request.skip + 1) } val blockHeaders: Seq[BlockHeader] = range.flatMap { a: BigInt => blockchain.getBlockHeaderByNumber(a) } Some(BlockHeaders(blockHeaders)) case _ => log.warning("got request for block headers with invalid block hash/number: {}", request) None } case _ => None } } object BlockchainHostActor { def props(blockchain: Blockchain, peerConfiguration: PeerConfiguration, peerEventBusActor: ActorRef, etcPeerManagerActor: ActorRef): Props = Props(new BlockchainHostActor(blockchain, peerConfiguration, peerEventBusActor, etcPeerManagerActor)) }
Example 14
Source File: PeerListSupport.scala From mantis with Apache License 2.0 | 5 votes |
package io.iohk.ethereum.blockchain.sync import akka.actor.{Actor, ActorLogging, ActorRef, Scheduler} import io.iohk.ethereum.network.{EtcPeerManagerActor, Peer, PeerId} import io.iohk.ethereum.network.EtcPeerManagerActor.PeerInfo import io.iohk.ethereum.network.PeerEventBusActor.PeerEvent.PeerDisconnected import io.iohk.ethereum.network.PeerEventBusActor.SubscriptionClassifier.PeerDisconnectedClassifier import io.iohk.ethereum.network.PeerEventBusActor.{PeerSelector, Subscribe, Unsubscribe} import io.iohk.ethereum.utils.Config.SyncConfig import scala.concurrent.duration._ import scala.concurrent.ExecutionContext.Implicits.global trait PeerListSupport { self: Actor with ActorLogging with BlacklistSupport => def etcPeerManager: ActorRef def peerEventBus: ActorRef def syncConfig: SyncConfig def scheduler: Scheduler var handshakedPeers: Map[Peer, PeerInfo] = Map.empty scheduler.schedule(0.seconds, syncConfig.peersScanInterval, etcPeerManager, EtcPeerManagerActor.GetHandshakedPeers)(global, context.self) def removePeer(peerId: PeerId): Unit = { peerEventBus ! Unsubscribe(PeerDisconnectedClassifier(PeerSelector.WithId(peerId))) handshakedPeers.find(_._1.id == peerId).foreach { case (peer, _) => undoBlacklist(peer.id) } handshakedPeers = handshakedPeers.filterNot(_._1.id == peerId) } def peersToDownloadFrom: Map[Peer, PeerInfo] = handshakedPeers.filterNot { case (p, s) => isBlacklisted(p.id) } def handlePeerListMessages: Receive = { case EtcPeerManagerActor.HandshakedPeers(peers) => peers.keys.filterNot(handshakedPeers.contains).foreach { peer => peerEventBus ! Subscribe(PeerDisconnectedClassifier(PeerSelector.WithId(peer.id))) } handshakedPeers = peers case PeerDisconnected(peerId) if handshakedPeers.exists(_._1.id == peerId) => removePeer(peerId) } }
Example 15
Source File: FastSyncStateStorageActor.scala From mantis with Apache License 2.0 | 5 votes |
package io.iohk.ethereum.blockchain.sync import akka.actor.{Actor, ActorLogging} import akka.pattern.pipe import io.iohk.ethereum.blockchain.sync.FastSync.SyncState import io.iohk.ethereum.blockchain.sync.FastSyncStateStorageActor.GetStorage import io.iohk.ethereum.db.storage.FastSyncStateStorage import scala.concurrent.Future import scala.util.{Failure, Success, Try} class FastSyncStateStorageActor extends Actor with ActorLogging { def receive: Receive = { // after initialization send a valid Storage reference case storage: FastSyncStateStorage => context become idle(storage) } def idle(storage: FastSyncStateStorage): Receive = { // begin saving of the state to the storage and become busy case state: SyncState => persistState(storage, state) case GetStorage => sender() ! storage.getSyncState() } def busy(storage: FastSyncStateStorage, stateToPersist: Option[SyncState]): Receive = { // update state waiting to be persisted later. we only keep newest state case state: SyncState => context become busy(storage, Some(state)) // exception was thrown during persisting of a state. push case Failure(e) => throw e // state was saved in the storage. become idle case Success(s: FastSyncStateStorage) if stateToPersist.isEmpty => context become idle(s) // state was saved in the storage but new state is already waiting to be saved. case Success(s: FastSyncStateStorage) if stateToPersist.isDefined => stateToPersist.foreach(persistState(s, _)) case GetStorage => sender() ! storage.getSyncState() } private def persistState(storage: FastSyncStateStorage, syncState: SyncState): Unit = { import context.dispatcher val persistingQueues: Future[Try[FastSyncStateStorage]] = Future { lazy val result = Try { storage.putSyncState(syncState) } if (log.isDebugEnabled) { val now = System.currentTimeMillis() result val end = System.currentTimeMillis() log.debug(s"Saving snapshot of a fast sync took ${end - now} ms") result } else { result } } persistingQueues pipeTo self context become busy(storage, None) } } object FastSyncStateStorageActor { case object GetStorage }
Example 16
Source File: WriteExchangeTransactionActor.scala From matcher with MIT License | 5 votes |
package com.wavesplatform.dex.actors.tx import akka.actor.{Actor, Props} import com.wavesplatform.dex.db.DbKeys import com.wavesplatform.dex.db.leveldb.{DBExt, RW} import com.wavesplatform.dex.domain.bytes.ByteStr import com.wavesplatform.dex.domain.transaction.ExchangeTransaction import com.wavesplatform.dex.domain.utils.ScorexLogging import com.wavesplatform.dex.model.Events._ import org.iq80.leveldb.DB class WriteExchangeTransactionActor(db: DB) extends Actor with ScorexLogging { import WriteExchangeTransactionActor._ override def receive: Receive = { case ExchangeTransactionCreated(tx) => saveExchangeTx(tx) } private def saveExchangeTx(tx: ExchangeTransaction): Unit = db.readWrite { rw => log.trace(s"Appending ${tx.id()} to orders [${tx.buyOrder.idStr()}, ${tx.sellOrder.idStr()}]") val txKey = DbKeys.exchangeTransaction(tx.id()) if (!rw.has(txKey)) { rw.put(txKey, Some(tx)) appendTxId(rw, tx.buyOrder.id(), tx.id()) appendTxId(rw, tx.sellOrder.id(), tx.id()) } } } object WriteExchangeTransactionActor { def name: String = "WriteExchangeTransactionActor" def props(db: DB): Props = Props(new WriteExchangeTransactionActor(db)) def appendTxId(rw: RW, orderId: ByteStr, txId: ByteStr): Unit = { val key = DbKeys.orderTxIdsSeqNr(orderId) val nextSeqNr = rw.get(key) + 1 rw.put(key, nextSeqNr) rw.put(DbKeys.orderTxId(orderId, nextSeqNr), txId) } }
Example 17
Source File: CreateExchangeTransactionActor.scala From matcher with MIT License | 5 votes |
package com.wavesplatform.dex.actors.tx import akka.actor.{Actor, ActorRef, Props} import com.wavesplatform.dex.actors.tx.CreateExchangeTransactionActor.OrderExecutedObserved import com.wavesplatform.dex.domain.account.Address import com.wavesplatform.dex.domain.utils.ScorexLogging import com.wavesplatform.dex.model.Events.{ExchangeTransactionCreated, OrderExecuted} import com.wavesplatform.dex.model.ExchangeTransactionCreator.CreateTransaction import play.api.libs.json.Json import scala.collection.mutable class CreateExchangeTransactionActor(createTransaction: CreateTransaction, recipients: List[ActorRef]) extends Actor with ScorexLogging { private val pendingEvents = mutable.Set.empty[OrderExecuted] override def preStart(): Unit = context.system.eventStream.subscribe(self, classOf[OrderExecutedObserved]) override def receive: Receive = { case OrderExecutedObserved(sender, event) => val sameOwner = event.counter.order.sender == event.submitted.order.sender log.debug(s"Execution observed at $sender for OrderExecuted(${event.submitted.order.id()}, ${event.counter.order .id()}), amount=${event.executedAmount})${if (sameOwner) " Same owner for both orders" else ""}") if (sameOwner || pendingEvents.contains(event)) { import event.{counter, submitted} createTransaction(event) match { case Right(tx) => log.info(s"Created transaction: $tx") val created = ExchangeTransactionCreated(tx) recipients.foreach(_ ! created) case Left(ex) => log.warn( s"""Can't create tx: $ex |o1: (amount=${submitted.amount}, fee=${submitted.fee}): ${Json.prettyPrint(submitted.order.json())} |o2: (amount=${counter.amount}, fee=${counter.fee}): ${Json.prettyPrint(counter.order.json())}""".stripMargin ) } pendingEvents -= event } else pendingEvents += event } } object CreateExchangeTransactionActor { val name = "create-exchange-tx" case class OrderExecutedObserved(sender: Address, event: OrderExecuted) def props(createTransaction: CreateTransaction, recipients: List[ActorRef]): Props = Props(new CreateExchangeTransactionActor(createTransaction, recipients)) }
Example 18
Source File: OrderBookSnapshotStoreActor.scala From matcher with MIT License | 5 votes |
package com.wavesplatform.dex.actors.orderbook import akka.actor.{Actor, Props} import com.wavesplatform.dex.actors.orderbook.OrderBookSnapshotStoreActor._ import com.wavesplatform.dex.db.OrderBookSnapshotDB import com.wavesplatform.dex.domain.asset.AssetPair import com.wavesplatform.dex.model.OrderBookSnapshot import com.wavesplatform.dex.queue.QueueEventWithMeta.Offset class OrderBookSnapshotStoreActor(db: OrderBookSnapshotDB) extends Actor { override def receive: Receive = { case Message.GetSnapshot(p) => sender() ! Response.GetSnapshot(db.get(p)) case Message.Update(p, offset, newSnapshot) => db.update(p, offset, newSnapshot) sender() ! Response.Updated(offset) case Message.Delete(p) => db.delete(p) } } object OrderBookSnapshotStoreActor { sealed trait Message object Message { case class GetSnapshot(assetPair: AssetPair) extends Message case class Update(assetPair: AssetPair, offset: Offset, newSnapshot: Option[OrderBookSnapshot]) extends Message case class Delete(assetPair: AssetPair) extends Message } sealed trait Response object Response { case class GetSnapshot(result: Option[(Offset, OrderBookSnapshot)]) extends Response case class Updated(offset: Offset) extends Response case class Deleted(assetPair: AssetPair) extends Response } def props(db: OrderBookSnapshotDB): Props = Props(new OrderBookSnapshotStoreActor(db)) }
Example 19
Source File: AskActor.scala From matcher with MIT License | 5 votes |
package com.wavesplatform.dex.actors import akka.actor.{Actor, ActorRef, ActorSystem, Props, Status} import scala.concurrent.duration.FiniteDuration import scala.concurrent.{Future, Promise, TimeoutException} import scala.reflect.ClassTag class AskActor[T](p: Promise[T], timeout: FiniteDuration)(implicit ct: ClassTag[T]) extends Actor { import context.dispatcher private val timeoutCancelable = context.system.scheduler.scheduleOnce(timeout, self, AskActor.timeoutMessage) override val receive: Receive = { case x => // Fix in Scala 2.13 timeoutCancelable.cancel() context.stop(self) x match { case x: T if x.getClass == ct.runtimeClass => p.trySuccess(x) case e: Status.Failure => p.tryFailure(e.cause) case _ => p.tryFailure(new IllegalArgumentException(s"Expected ${ct.runtimeClass.getName}, but got $x")) } } } object AskActor { private val timeoutMessage = { val reason = new TimeoutException("Typed ask is timed out!") reason.setStackTrace(Array.empty) Status.Failure(reason) } def props[T](p: Promise[T], timeout: FiniteDuration)(implicit ct: ClassTag[T]) = Props(new AskActor(p, timeout)) def mk[T](timeout: FiniteDuration)(implicit ct: ClassTag[T], system: ActorSystem): (ActorRef, Future[T]) = { val p = Promise[T]() val ref = system.actorOf(props(p, timeout)) (ref, p.future) } }
Example 20
Source File: WatchDistributedCompletionActor.scala From matcher with MIT License | 5 votes |
package com.wavesplatform.dex.actors import akka.actor.{Actor, ActorRef, Cancellable, Props, Terminated} import com.wavesplatform.dex.domain.utils.ScorexLogging import scala.concurrent.duration.FiniteDuration class WatchDistributedCompletionActor(workers: Set[ActorRef], completionReceiver: ActorRef, startWorkCommand: Any, workCompleted: Any, timeout: FiniteDuration) extends Actor with ScorexLogging { import context.dispatcher if (workers.isEmpty) stop(Cancellable.alreadyCancelled) else workers.foreach { x => context.watch(x) x ! startWorkCommand } override def receive: Receive = state(workers, context.system.scheduler.scheduleOnce(timeout, self, TimedOut)) private def state(rest: Set[ActorRef], timer: Cancellable): Receive = { case `workCompleted` => switchTo(rest - sender(), timer) context.unwatch(sender()) case Terminated(ref) => switchTo(rest - ref, timer) case TimedOut => val workerPairs = workers.iterator.map(_.path.name).mkString(", ") log.error(s"$startWorkCommand is timed out! Workers those didn't respond: $workerPairs") stop(timer) } private def switchTo(updatedRest: Set[ActorRef], timer: Cancellable): Unit = if (updatedRest.isEmpty) stop(timer) else context.become(state(updatedRest, timer)) private def stop(timer: Cancellable): Unit = { timer.cancel() completionReceiver ! workCompleted context.stop(self) } } object WatchDistributedCompletionActor { def props(workers: Set[ActorRef], completionReceiver: ActorRef, startWorkCommand: Any, workCompleted: Any, timeout: FiniteDuration): Props = Props(new WatchDistributedCompletionActor(workers, completionReceiver, startWorkCommand, workCompleted, timeout)) }
Example 21
Source File: AddressDirectoryActor.scala From matcher with MIT License | 5 votes |
package com.wavesplatform.dex.actors.address import akka.actor.{Actor, ActorRef, Props, SupervisorStrategy, Terminated} import com.wavesplatform.dex.db.OrderDB import com.wavesplatform.dex.domain.account.Address import com.wavesplatform.dex.domain.utils.{EitherExt2, ScorexLogging} import com.wavesplatform.dex.history.HistoryRouter._ import com.wavesplatform.dex.model.Events import com.wavesplatform.dex.model.Events.OrderCancelFailed import scala.collection.mutable class AddressDirectoryActor(orderDB: OrderDB, addressActorProps: (Address, Boolean) => Props, historyRouter: Option[ActorRef]) extends Actor with ScorexLogging { import AddressDirectoryActor._ import context._ private var startSchedules: Boolean = false private[this] val children = mutable.AnyRefMap.empty[Address, ActorRef] override def supervisorStrategy: SupervisorStrategy = SupervisorStrategy.stoppingStrategy private def createAddressActor(address: Address): ActorRef = { log.debug(s"Creating address actor for $address") watch(actorOf(addressActorProps(address, startSchedules), address.toString)) } private def forward(address: Address, msg: Any): Unit = (children get address, msg) match { case (None, _: AddressActor.Message.BalanceChanged) => case _ => children getOrElseUpdate (address, createAddressActor(address)) forward msg } override def receive: Receive = { case Envelope(address, cmd) => forward(address, cmd) case e @ Events.OrderAdded(lo, timestamp) => forward(lo.order.sender, e) historyRouter foreach { _ ! SaveOrder(lo, timestamp) } case e: Events.OrderExecuted => import e.{counter, submitted} forward(submitted.order.sender, e) if (counter.order.sender != submitted.order.sender) forward(counter.order.sender, e) historyRouter foreach { _ ! SaveEvent(e) } case e: Events.OrderCanceled => forward(e.acceptedOrder.order.sender, e) historyRouter foreach { _ ! SaveEvent(e) } case e: OrderCancelFailed => orderDB.get(e.id) match { case Some(order) => forward(order.sender.toAddress, e) case None => log.warn(s"The order '${e.id}' not found") } case StartSchedules => if (!startSchedules) { startSchedules = true context.children.foreach(_ ! StartSchedules) } case Terminated(child) => val addressString = child.path.name val address = Address.fromString(addressString).explicitGet() children.remove(address) log.warn(s"Address handler for $addressString terminated") } } object AddressDirectoryActor { case class Envelope(address: Address, cmd: AddressActor.Message) case object StartSchedules }
Example 22
Source File: BatchOrderCancelActor.scala From matcher with MIT License | 5 votes |
package com.wavesplatform.dex.actors.address import akka.actor.{Actor, ActorRef, Cancellable, Props} import com.wavesplatform.dex.actors.TimedOut import com.wavesplatform.dex.actors.address.AddressActor.Command.CancelOrder import com.wavesplatform.dex.actors.address.AddressActor.Event import com.wavesplatform.dex.actors.address.BatchOrderCancelActor.CancelResponse.OrderCancelResult import com.wavesplatform.dex.domain.order.Order import com.wavesplatform.dex.domain.utils.ScorexLogging import com.wavesplatform.dex.error import scala.concurrent.duration.FiniteDuration class BatchOrderCancelActor private ( orderIds: Set[Order.Id], processorActor: ActorRef, clientActor: ActorRef, timeout: FiniteDuration, initResponse: Map[Order.Id, OrderCancelResult] ) extends Actor with ScorexLogging { import BatchOrderCancelActor._ import context.dispatcher orderIds.foreach(processorActor ! CancelOrder(_)) override def receive: Receive = state(orderIds, initResponse, context.system.scheduler.scheduleOnce(timeout, self, TimedOut)) private def state(restOrderIds: Set[Order.Id], response: Map[Order.Id, OrderCancelResult], timer: Cancellable): Receive = { case CancelResponse(id, x) => val updatedRestOrderIds = restOrderIds - id val updatedResponse = response.updated(id, x) if (updatedRestOrderIds.isEmpty) stop(Event.BatchCancelCompleted(updatedResponse), timer) else context.become(state(restOrderIds - id, updatedResponse, timer)) // case Terminated(ref) => // Can't terminate before processorActor, because processorActor is a parent case TimedOut => log.error(s"CancelOrder is timed out for orders: ${restOrderIds.mkString(", ")}") stop(Event.BatchCancelCompleted(response), timer) } private def stop(response: Event.BatchCancelCompleted, timer: Cancellable): Unit = { timer.cancel() clientActor ! response context.stop(self) } } object BatchOrderCancelActor { def props(orderIds: Set[Order.Id], processorActor: ActorRef, clientActor: ActorRef, timeout: FiniteDuration, initResponse: Map[Order.Id, OrderCancelResult] = Map.empty): Props = { require(orderIds.nonEmpty, "orderIds is empty") Props(new BatchOrderCancelActor(orderIds, processorActor, clientActor, timeout, initResponse)) } object CancelResponse { type OrderCancelResult = Either[error.MatcherError, Event.OrderCanceled] def unapply(arg: Any): Option[(Order.Id, OrderCancelResult)] = helper.lift(arg) private val helper: PartialFunction[Any, (Order.Id, OrderCancelResult)] = { case x @ Event.OrderCanceled(id) => (id, Right(x)) case x @ error.OrderNotFound(id) => (id, Left(x)) case x @ error.OrderCanceled(id) => (id, Left(x)) case x @ error.OrderFull(id) => (id, Left(x)) case x @ error.MarketOrderCancel(id) => (id, Left(x)) } } }
Example 23
Source File: HistoryMessagesBatchSender.scala From matcher with MIT License | 5 votes |
package com.wavesplatform.dex.history import akka.actor.{Actor, Cancellable} import com.wavesplatform.dex.history.HistoryRouter.{HistoryMsg, StopAccumulate} import scala.collection.mutable import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.duration._ import scala.reflect.ClassTag abstract class HistoryMessagesBatchSender[M <: HistoryMsg: ClassTag] extends Actor { val batchLinger: Long val batchEntries: Long def createAndSendBatch(batchBuffer: Iterable[M]): Unit private val batchBuffer: mutable.Set[M] = mutable.Set.empty[M] private def scheduleStopAccumulating: Cancellable = context.system.scheduler.scheduleOnce(batchLinger.millis, self, StopAccumulate) private def sendBatch(): Unit = { if (batchBuffer.nonEmpty) { createAndSendBatch(batchBuffer) batchBuffer.clear() } } def receive: Receive = awaitingHistoryMessages private def awaitingHistoryMessages: Receive = { case msg: M => scheduleStopAccumulating context become accumulateBuffer(scheduleStopAccumulating) batchBuffer += msg } private def accumulateBuffer(scheduledStop: Cancellable): Receive = { case msg: M => if (batchBuffer.size == batchEntries) { scheduledStop.cancel() sendBatch() context become accumulateBuffer(scheduleStopAccumulating) } batchBuffer += msg case StopAccumulate => sendBatch(); context become awaitingHistoryMessages } }
Example 24
Source File: RestartableActor.scala From matcher with MIT License | 5 votes |
package com.wavesplatform.dex.fixtures import akka.actor.Actor import com.wavesplatform.dex.fixtures.RestartableActor.{RestartActor, RestartActorException} trait RestartableActor extends Actor { override def unhandled(message: Any): Unit = { message match { case RestartActor => throw RestartActorException case _ => } super.unhandled(message) } } object RestartableActor { case object RestartActor private object RestartActorException extends Exception("Planned restart") }
Example 25
Source File: ClusterSingletonHelperTest.scala From akka-tools with MIT License | 5 votes |
package no.nextgentel.oss.akkatools.cluster import akka.actor.{Actor, ActorRef, ActorSystem, Props} import akka.testkit.{TestKit, TestProbe} import com.typesafe.config.ConfigFactory import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll, FunSuiteLike, Matchers} import org.slf4j.LoggerFactory import scala.util.Random object ClusterSingletonHelperTest { val port = 20000 + Random.nextInt(20000) } class ClusterSingletonHelperTest (_system:ActorSystem) extends TestKit(_system) with FunSuiteLike with Matchers with BeforeAndAfterAll with BeforeAndAfter { def this() = this(ActorSystem("test-actor-system", ConfigFactory.parseString( s"""akka.actor.provider = "akka.cluster.ClusterActorRefProvider" |akka.remote.enabled-transports = ["akka.remote.netty.tcp"] |akka.remote.netty.tcp.hostname="localhost" |akka.remote.netty.tcp.port=${ClusterSingletonHelperTest.port} |akka.cluster.seed-nodes = ["akka.tcp://test-actor-system@localhost:${ClusterSingletonHelperTest.port}"] """.stripMargin ).withFallback(ConfigFactory.load("application-test.conf")))) override def afterAll { TestKit.shutdownActorSystem(system) } val log = LoggerFactory.getLogger(getClass) test("start and communicate with cluster-singleton") { val started = TestProbe() val proxy = ClusterSingletonHelper.startClusterSingleton(system, Props(new OurClusterSingleton(started.ref)), "ocl") started.expectMsg("started") val sender = TestProbe() sender.send(proxy, "ping") sender.expectMsg("pong") } } class OurClusterSingleton(started:ActorRef) extends Actor { started ! "started" def receive = { case "ping" => sender ! "pong" } }
Example 26
Source File: Scheduler.scala From piflow with BSD 2-Clause "Simplified" License | 5 votes |
package cn.piflow.api import akka.actor.{Actor, ActorSystem, Props} import cn.piflow.api.HTTPService.config import cn.piflow.util.H2Util import com.typesafe.akka.extension.quartz.QuartzSchedulerExtension object ScheduleType { val FLOW = "Flow" val GROUP = "Group" } class ExecutionActor(id: String, scheduleType: String) extends Actor { override def receive: Receive = { case json: String => { scheduleType match { case ScheduleType.FLOW => { val (appId,process) = API.startFlow(json) H2Util.addScheduleEntry(id,appId,ScheduleType.FLOW) } case ScheduleType.GROUP => { val groupExecution = API.startGroup(json) H2Util.addScheduleEntry(id,groupExecution.getGroupId(),ScheduleType.GROUP) } } } case _ => println("error type!") } }
Example 27
Source File: Master.scala From asyspark with MIT License | 5 votes |
package org.apache.spark.asyspark.core import java.util.concurrent.TimeUnit import akka.actor.{Actor, ActorLogging, ActorRef, ActorSystem, Address, Props, Terminated} import akka.util.Timeout import com.typesafe.config.Config import com.typesafe.scalalogging.slf4j.StrictLogging import org.apache.spark.asyspark.core.messages.master.{ClientList, RegisterClient, RegisterServer, ServerList} import scala.concurrent.duration._ import scala.concurrent.{ExecutionContext, Future} var clients = Set.empty[ActorRef] override def receive: Receive = { case RegisterServer(server) => log.info(s"Registering server ${server.path.toString}") println("register server") servers += server context.watch(server) sender ! true case RegisterClient(client) => log.info(s"Registering client ${sender.path.toString}") clients += client context.watch(client) sender ! true case ServerList() => log.info(s"Sending current server list to ${sender.path.toString}") sender ! servers.toArray case ClientList() => log.info(s"Sending current client list to ${sender.path.toString}") sender ! clients.toArray case Terminated(actor) => actor match { case server: ActorRef if servers contains server => log.info(s"Removing server ${server.path.toString}") servers -= server case client: ActorRef if clients contains client => log.info(s"Removing client ${client.path.toString}") clients -= client case actor: ActorRef => log.warning(s"Actor ${actor.path.toString} will be terminated for some unknown reason") } } } object Master extends StrictLogging { def run(config: Config): Future[(ActorSystem, ActorRef)] = { logger.debug("Starting master actor system") val system = ActorSystem(config.getString("asyspark.master.system"), config.getConfig("asyspark.master")) logger.debug("Starting master") val master = system.actorOf(Props[Master], config.getString("asyspark.master.name")) implicit val timeout = Timeout(config.getDuration("asyspark.master.startup-timeout", TimeUnit.MILLISECONDS) milliseconds) implicit val ec = ExecutionContext.Implicits.global val address = Address("akka.tcp", config.getString("asyspark.master.system"), config.getString("asyspark.master.host"), config.getString("asyspark.master.port").toInt) system.actorSelection(master.path.toSerializationFormat).resolveOne().map { case actor: ActorRef => logger.debug("Master successfully started") (system, master) } } }
Example 28
Source File: PartialVector.scala From asyspark with MIT License | 5 votes |
package org.apache.spark.asyspark.core.models.server import akka.actor.{Actor, ActorLogging} import spire.algebra.Semiring import spire.implicits._ import org.apache.spark.asyspark.core.partitions.Partition import scala.reflect.ClassTag //todo I thinks this imp can be optimized def update(keys: Array[Long], values: Array[V]): Boolean = { var i = 0 try { while (i < keys.length) { val key = partition.globalToLocal(keys(i)) // this is imp with the help of spire.implicits._ data(key) += values(i) i += 1 } true } catch { case e: Exception => false } } def get(keys: Array[Long]): Array[V] = { var i =0 val a = new Array[V](keys.length) while(i < keys.length) { val key = partition.globalToLocal(keys(i)) a(i) = data(key) i += 1 } a } log.info(s"Constructed PartialVector[${implicitly[ClassTag[V]]}] of size $size (partition id: ${partition.index})") }
Example 29
Source File: PhilosopherMessages.scala From didactic-computing-machine with GNU Affero General Public License v3.0 | 5 votes |
package DiningPhilosophers import akka.actor.{ Actor, ActorLogging, ActorRef, ActorSystem, Props } object PhilosopherMessages { case object Eat case object Think } object ForkMessages { case object Take case object Put case object ForkBeingUsed case object ForkTaken }
Example 30
Source File: Philosopher.scala From didactic-computing-machine with GNU Affero General Public License v3.0 | 5 votes |
package DiningPhilosophers import DiningPhilosophers.ForkMessages._ import DiningPhilosophers.PhilosopherMessages._ import akka.actor.{Actor, ActorLogging, ActorRef, ActorSystem, Props} import scala.concurrent.duration.FiniteDuration import scala.concurrent.duration.DurationInt import scala.concurrent.duration.FiniteDuration import scala.concurrent.ExecutionContext.Implicits.global class Philosopher(val leftFork: ActorRef, val rightFork: ActorRef) extends Actor with ActorLogging { def name = self.path.name private val eatingTime = 2500.millis private val thinkingTime = 5000.millis private val retryTime = 10.millis def thinkFor(duration: FiniteDuration) = { context.system.scheduler.scheduleOnce(duration, self, Eat) context.become(thinking) } def thinking: Receive = { case Eat => log.info(s"Philosopher ${self.path.name} wants to eat") leftFork ! Take rightFork ! Take context.become(hungry) } def hungry: Receive = { case ForkBeingUsed => handleForkBeingUsed() case ForkTaken => log.info(s"Philosopher ${self.path.name} found one fork to be taken by other philosopher") context.become(waitingForOtherFork) } def waitingForOtherFork: Receive = { case ForkBeingUsed => handleForkBeingUsed() case ForkTaken => log.info(s"Philosopher ${self.path.name} starts to eat") context.system.scheduler.scheduleOnce(eatingTime, self, Think) context.become(eating) } def eating: Receive = { case Think => log.info(s"Philosopher ${self.path.name} starts to think") leftFork ! Put rightFork ! Put thinkFor(thinkingTime) } def handleForkBeingUsed(): Unit = { log.info(s"Philosopher ${self.path.name} found one fork to be in use") leftFork ! Put rightFork ! Put thinkFor(retryTime) } def receive = { case Think => log.info(s"Philosopher ${self.path.name} started thinking") thinkFor(thinkingTime) } }
Example 31
Source File: Fork.scala From didactic-computing-machine with GNU Affero General Public License v3.0 | 5 votes |
package DiningPhilosophers import DiningPhilosophers.ForkMessages._ import akka.actor.{Actor, ActorLogging, ActorRef, ActorSystem, Props} class Fork extends Actor with ActorLogging { def available: Receive = { case Take => log.info(s"Fork ${self.path.name} by ${sender.path.name}") sender ! ForkTaken context.become(inUse(sender)) } def inUse(philosopher: ActorRef): Receive = { case Take => log.info(s"Fork ${self.path.name} already being used by ${philosopher.path.name}") sender ! ForkBeingUsed case Put => log.info(s"Fork ${self.path.name} put down by ${sender.path.name}") sender ! Put context.become(available) } def receive = available }
Example 32
Source File: AkkaQuickstartSpec.scala From didactic-computing-machine with GNU Affero General Public License v3.0 | 5 votes |
//#full-example package com.lightbend.akka.sample import org.scalatest.{ BeforeAndAfterAll, FlatSpecLike, Matchers } import akka.actor.{ Actor, Props, ActorSystem } import akka.testkit.{ ImplicitSender, TestKit, TestActorRef, TestProbe } import scala.concurrent.duration._ import Greeter._ import Printer._ //#test-classes class AkkaQuickstartSpec(_system: ActorSystem) extends TestKit(_system) with Matchers with FlatSpecLike with BeforeAndAfterAll { //#test-classes def this() = this(ActorSystem("AkkaQuickstartSpec")) override def afterAll: Unit = { shutdown(system) } //#first-test //#specification-example "A Greeter Actor" should "pass on a greeting message when instructed to" in { //#specification-example val testProbe = TestProbe() val helloGreetingMessage = "hello" val helloGreeter = system.actorOf(Greeter.props(helloGreetingMessage, testProbe.ref)) val greetPerson = "Akka" helloGreeter ! WhoToGreet(greetPerson) helloGreeter ! Greet testProbe.expectMsg(500 millis, Greeting(s"$helloGreetingMessage, $greetPerson")) } //#first-test } //#full-example
Example 33
Source File: TestActor.scala From AI with Apache License 2.0 | 5 votes |
package com.bigchange.akka.actor import akka.actor.{Actor, ActorSystem, Props} import akka.event.Logging import com.bigchange.akka.message.MapData @scala.throws[Exception](classOf[Exception]) override def preStart(): Unit = { // 初始化Actor代码块 } // props val props1 = Props() val props2 = Props[TestActor] val props3 = Props(new TestActor) val props6 = props1.withDispatcher("my-dispatcher") // create actor val system = ActorSystem("MySystem") val myActor = system.actorOf(Props[TestActor].withDispatcher("my-dispatcher"), name = "myactor2") //使用匿名类创建Actor,在从某个actor中派生新的actor来完成特定的子任务时,可能使用匿名类来包含将要执行的代码会更方便 def receive = { case m: MapData ⇒ context.actorOf(Props(new Actor { def receive = { case Some(msg) ⇒ val replyMsg = doSomeDangerousWork(msg.toString) sender ! replyMsg context.stop(self) } def doSomeDangerousWork(msg: String): String = { "done" } })) forward m } }
Example 34
Source File: ActorRefWithAckTest.scala From intro-to-akka-streams with Apache License 2.0 | 5 votes |
package com.github.dnvriend.streams.sink import akka.actor.{ Actor, ActorRef, Props } import akka.stream.scaladsl.{ Sink, Source } import akka.stream.testkit.TestPublisher import akka.stream.testkit.scaladsl.TestSource import akka.testkit.TestProbe import com.github.dnvriend.streams.TestSpec import scala.concurrent.duration._ import scala.reflect.ClassTag // see: https://github.com/akka/akka/blob/4acc1cca6a27be0ff80f801de3640f91343dce94/akka-stream-tests/src/test/scala/akka/stream/scaladsl/ActorRefBackpressureSinkSpec.scala object ActorRefWithAckTest { final val InitMessage = "start" final val CompleteMessage = "done" final val AckMessage = "ack" class Forwarder(ref: ActorRef) extends Actor { def receive = { case msg @ `InitMessage` ⇒ sender() ! AckMessage ref forward msg case msg @ `CompleteMessage` ⇒ ref forward msg case msg ⇒ sender() ! AckMessage ref forward msg } } } class ActorRefWithAckTest extends TestSpec { import ActorRefWithAckTest._ def createActor[A: ClassTag](testProbeRef: ActorRef): ActorRef = system.actorOf(Props(implicitly[ClassTag[A]].runtimeClass, testProbeRef)) def withForwarder(xs: Int*)(f: TestProbe ⇒ Unit): Unit = { val tp = TestProbe() val ref = createActor[Forwarder](tp.ref) Source(xs.toList).runWith(Sink.actorRefWithAck(ref, InitMessage, AckMessage, CompleteMessage)) try f(tp) finally killActors(ref) } def withTestPublisher[A](f: (TestPublisher.Probe[A], TestProbe, ActorRef) ⇒ Unit): Unit = { val tp = TestProbe() val ref = createActor[Forwarder](tp.ref) val pub: TestPublisher.Probe[A] = TestSource.probe[A].to(Sink.actorRefWithAck(ref, InitMessage, AckMessage, CompleteMessage)).run() try f(pub, tp, ref) finally killActors(ref) } it should "send the elements to the ActorRef" in { // which means that the forwarder actor that acts as a sink // will initially receive an InitMessage // next it will receive each `payload` element, here 1, 2 and 3, // finally the forwarder will receive the CompletedMessage, stating that // the producer completes the stream because there are no more elements (a finite stream) withForwarder(1, 2, 3) { tp ⇒ tp.expectMsg(InitMessage) tp.expectMsg(1) tp.expectMsg(2) tp.expectMsg(3) tp.expectMsg(CompleteMessage) tp.expectNoMsg(100.millis) } } it should "send the elements to the ActorRef manually 1, 2 and 3" in { withTestPublisher[Int] { (pub, tp, _) ⇒ pub.sendNext(1) tp.expectMsg(InitMessage) tp.expectMsg(1) pub.sendNext(2) tp.expectMsg(2) pub.sendNext(3) tp.expectMsg(3) pub.sendComplete() tp.expectMsg(CompleteMessage) tp.expectNoMsg(100.millis) } } it should "cancel stream when actor terminates" in { withTestPublisher[Int] { (pub, tp, ref) ⇒ pub.sendNext(1) tp.expectMsg(InitMessage) tp.expectMsg(1) killActors(ref) pub.expectCancellation() } } }
Example 35
Source File: RandomDataProducer.scala From parquet4s with MIT License | 5 votes |
package com.github.mjakubowski84.parquet4s.indefinite import akka.actor.{Actor, ActorRef, Cancellable, Props, Scheduler} import akka.pattern.ask import akka.util.Timeout import scala.concurrent.duration._ import scala.concurrent.{Await, ExecutionContext} import scala.util.Random object RandomDataProducer { private val words = Seq("Example", "how", "to", "setup", "indefinite", "stream", "with", "Parquet", "writer") } trait RandomDataProducer { this: Akka with Logger with Kafka => import RandomDataProducer._ private def nextWord: String = words(Random.nextInt(words.size - 1)) private def action(): Unit = sendKafkaMessage(nextWord) private lazy val scheduler: ActorRef = system.actorOf(FluctuatingSchedulerActor.props(action)) implicit private val stopTimeout: Timeout = new Timeout(FluctuatingSchedulerActor.MaxDelay) def startDataProducer(): Unit = { logger.info("Starting scheduler that sends messages to Kafka...") scheduler ! FluctuatingSchedulerActor.Start } def stopDataProducer(): Unit = { logger.info("Stopping scheduler...") Await.ready(scheduler.ask(FluctuatingSchedulerActor.Stop), Duration.Inf) } } private object FluctuatingSchedulerActor { case object Start case object ScheduleNext case object Stop val MinDelay: FiniteDuration = 1.milli val MaxDelay: FiniteDuration = 500.millis val StartDelay: FiniteDuration = 100.millis trait Direction case object Up extends Direction case object Down extends Direction def props(action: () => Unit): Props = Props(new FluctuatingSchedulerActor(action)) } private class FluctuatingSchedulerActor(action: () => Unit) extends Actor { import FluctuatingSchedulerActor._ implicit def executionContext: ExecutionContext = context.system.dispatcher def scheduler: Scheduler = context.system.scheduler var scheduled: Option[Cancellable] = None override def receive: Receive = { case Start => self ! ScheduleNext context.become(scheduling(StartDelay, direction = Down), discardOld = true) } def scheduling(delay: FiniteDuration, direction: Direction): Receive = { case ScheduleNext => action() val rate = Random.nextFloat / 10.0f val step = (delay.toMillis * rate).millis val (newDirection, newDelay) = direction match { case Up if delay + step < MaxDelay => (Up, delay + step) case Up => (Down, delay - step) case Down if delay - step > MinDelay => (Down, delay - step) case Down => (Up, delay + step) } scheduled = Some(scheduler.scheduleOnce(delay, self, ScheduleNext)) context.become(scheduling(newDelay, newDirection), discardOld = true) case Stop => scheduled.foreach(_.cancel()) context.stop(self) } }
Example 36
Source File: PartialVector.scala From glint with MIT License | 5 votes |
package glint.models.server import akka.actor.{Actor, ActorLogging} import glint.partitioning.Partition import spire.algebra.Semiring import spire.implicits._ import scala.reflect.ClassTag def get(keys: Array[Long]): Array[V] = { var i = 0 val a = new Array[V](keys.length) while (i < keys.length) { val key = partition.globalToLocal(keys(i)) a(i) = data(key) i += 1 } a } log.info(s"Constructed PartialVector[${implicitly[ClassTag[V]]}] of size $size (partition id: ${partition.index})") }
Example 37
Source File: PartialMatrix.scala From glint with MIT License | 5 votes |
package glint.models.server import akka.actor.{Actor, ActorLogging} import spire.algebra.Semiring import spire.implicits._ import glint.partitioning.Partition import scala.reflect.ClassTag def update(rows: Array[Long], cols: Array[Int], values: Array[V]): Boolean = { var i = 0 while (i < rows.length) { val row = partition.globalToLocal(rows(i)) val col = cols(i) data(row)(col) += values(i) i += 1 } true } log.info(s"Constructed PartialMatrix[${implicitly[ClassTag[V]]}] with $rows rows and $cols columns (partition id: ${partition.index})") }
Example 38
Source File: TokenizerWrapper.scala From dbpedia-spotlight-model with Apache License 2.0 | 5 votes |
package org.dbpedia.spotlight.db.concurrent import java.io.IOException import java.util.concurrent.TimeUnit import akka.actor.SupervisorStrategy.Restart import akka.actor.{Actor, ActorSystem, OneForOneStrategy, Props} import akka.pattern.ask import akka.routing.SmallestMailboxRouter import akka.util import org.apache.commons.lang.NotImplementedException import org.dbpedia.spotlight.db.model.{StringTokenizer, TextTokenizer} import org.dbpedia.spotlight.model.{Text, Token} import scala.concurrent.Await class TokenizerWrapper(val tokenizers: Seq[TextTokenizer]) extends TextTokenizer { var requestTimeout = 60 val system = ActorSystem() val workers = tokenizers.map { case tokenizer: TextTokenizer => system.actorOf(Props(new TokenizerActor(tokenizer))) }.seq def size: Int = tokenizers.size val router = system.actorOf(Props[TokenizerActor].withRouter( // This might be a hack SmallestMailboxRouter(scala.collection.immutable.Iterable(workers:_*)).withSupervisorStrategy( OneForOneStrategy(maxNrOfRetries = 10) { case _: IOException => Restart }) ) ) implicit val timeout = util.Timeout(requestTimeout, TimeUnit.SECONDS) override def tokenizeMaybe(text: Text) { val futureResult = router ? TokenizerRequest(text) Await.result(futureResult, timeout.duration) } override def tokenize(text: Text): List[Token] = { tokenizeMaybe(text) text.featureValue[List[Token]]("tokens").get } def tokenizeRaw(text: String): Seq[String] = { throw new NotImplementedException() } def close() { system.shutdown() } def getStringTokenizer: StringTokenizer = tokenizers.head.getStringTokenizer } class TokenizerActor(val tokenizer: TextTokenizer) extends Actor { def receive = { case TokenizerRequest(text) => { try { sender ! tokenizer.tokenizeMaybe(text) } catch { case e: NullPointerException => throw new IOException("Could not tokenize.") } } } } case class TokenizerRequest(text: Text)
Example 39
Source File: DynamoActor.scala From scala-spark-cab-rides-predictions with MIT License | 5 votes |
package actors import akka.actor.{Actor, ActorLogging, Status} import com.amazonaws.services.dynamodbv2.model.BatchWriteItemResult import dynamodb.{CabImpl, WeatherImp} import models.{CabPriceBatch, WeatherBatch} import scala.concurrent.Future import scala.util.{Failure, Success} def putCabPrices(cabPriceBatch: CabPriceBatch): Unit = { val cabPrices = cabPriceBatch.cabPrices.toSeq log.info("received " + cabPrices.size + " number of cab price records") val result: Future[Seq[BatchWriteItemResult]] = CabImpl.put(cabPrices) result onComplete { case Success(_) => log.info("Cab Prices Batch processed on DynamoDB") case Failure(exception) => log.error("error process Cab Prices batch on dynamoDB :" + exception.getStackTrace) } } }
Example 40
Source File: AmqpSubscriberPerfSpec.scala From reliable-http-client with Apache License 2.0 | 5 votes |
package rhttpc.transport.amqp import akka.Done import akka.actor.{Actor, ActorSystem, Props} import akka.http.scaladsl.Http import akka.http.scaladsl.model.{HttpRequest, HttpResponse} import akka.pattern._ import akka.stream.ActorMaterializer import akka.testkit.{TestKit, TestProbe} import dispatch.url import org.scalatest.{BeforeAndAfterAll, FlatSpecLike, Ignore} import rhttpc.transport.{Deserializer, InboundQueueData, OutboundQueueData, Serializer} import scala.concurrent.duration._ import scala.concurrent.{Await, Future} import scala.util.{Random, Try} @Ignore class AmqpSubscriberPerfSpec extends TestKit(ActorSystem("AmqpSubscriberPerfSpec")) with FlatSpecLike with BeforeAndAfterAll { import system.dispatcher implicit val materializer = ActorMaterializer() implicit def serializer[Msg] = new Serializer[Msg] { override def serialize(obj: Msg): String = obj.toString } implicit def deserializer[Msg] = new Deserializer[Msg] { override def deserialize(value: String): Try[Msg] = Try(value.asInstanceOf[Msg]) } val queueName = "request" val outboundQueueData = OutboundQueueData(queueName, autoDelete = true, durability = false) val inboundQueueData = InboundQueueData(queueName, batchSize = 10, parallelConsumers = 10, autoDelete = true, durability = false) val count = 100 private val interface = "localhost" private val port = 8081 def handle(request: HttpRequest) = { val delay = 5 + Random.nextInt(10) after(delay.seconds, system.scheduler)(Future.successful(HttpResponse())) } it should "have a good throughput" in { val bound = Await.result( Http().bindAndHandleAsync( handle, interface, port ), 5.seconds ) val http = dispatch.Http() // .configure(_.setMaxConnections(count) // .setExecutorService(Executors.newFixedThreadPool(count))) val connection = Await.result(AmqpConnectionFactory.connect(system), 5 seconds) val transport = AmqpTransport( connection = connection ) val publisher = transport.publisher[String](outboundQueueData) val probe = TestProbe() val actor = system.actorOf(Props(new Actor { override def receive: Receive = { case str: String => http(url(s"http://$interface:$port") OK identity).map(_ => Done).pipeTo(self)(sender()) case Done => probe.ref ! Done sender() ! Done } })) val subscriber = transport.subscriber[String](inboundQueueData, actor) subscriber.start() try { measureMeanThroughput(count) { (1 to count).foreach { _ => publisher.publish("x") } probe.receiveWhile(10 minutes, messages = count) { case a => a } } } finally { Await.result(subscriber.stop(), 5.seconds) connection.close(5 * 1000) Await.result(bound.unbind(), 5.seconds) } } def measureMeanThroughput(count: Int)(consume: => Unit) = { val before = System.currentTimeMillis() consume val msgsPerSecond = count / ((System.currentTimeMillis() - before).toDouble / 1000) println(s"Throughput was: $msgsPerSecond msgs/sec") } override protected def afterAll(): Unit = { shutdown() } }
Example 41
Source File: QueueActor.scala From reliable-http-client with Apache License 2.0 | 5 votes |
package rhttpc.transport.inmem import akka.pattern._ import akka.actor.{Actor, ActorLogging, ActorRef, Props, Stash} import akka.routing.{RoundRobinRoutingLogic, Routee, Router} import akka.util.Timeout import rhttpc.transport.{Message, RejectingMessage} import scala.concurrent.Future import scala.concurrent.duration.FiniteDuration import scala.util.control.NonFatal private class QueueActor(consumeTimeout: FiniteDuration, retryDelay: FiniteDuration) extends Actor with Stash with ActorLogging { import context.dispatcher private var consumers = Map.empty[ActorRef, AskingActorRefRouteeWithSpecifiedMessageType] private var router = Router(RoundRobinRoutingLogic(), collection.immutable.IndexedSeq.empty) override def receive: Receive = { case RegisterConsumer(consumer, fullMessage) => val routee = AskingActorRefRouteeWithSpecifiedMessageType(consumer, consumeTimeout, handleResponse, fullMessage) consumers += consumer -> routee router = router.addRoutee(routee) log.debug(s"${self.path.name}: registered consumer, unstashing") unstashAll() case UnregisterConsumer(consumer) => log.debug(s"${self.path.name}: unregistered consumer") consumers.get(consumer).foreach { routee => consumers -= consumer router = router.removeRoutee(routee) } sender() ! ((): Unit) case msg: Message[_] => if (consumers.isEmpty) { log.debug(s"${self.path.name}: got message when no consumer registered, stashing") stash() implicit val timeout = Timeout(consumeTimeout) sender() ! ((): Unit) } else { router.route(msg, sender()) } } private def handleResponse(future: Future[Any], msg: Message[_]): Unit = future.recover { case ex: AskTimeoutException => log.error(ex, s"${self.path.name}: REJECT [${msg.content.getClass.getName}] because of ask timeout") case ex: Exception with RejectingMessage => log.error(ex, s"${self.path.name}: REJECT [${msg.content.getClass.getName}] because of rejecting failure") case NonFatal(ex) => log.error(ex, s"${self.path.name}: will RETRY [${msg.content.getClass.getName}] after $retryDelay because of failure") context.system.scheduler.scheduleOnce(retryDelay, self, msg) } } object QueueActor { def props(consumeTimeout: FiniteDuration, retryDelay: FiniteDuration): Props = Props( new QueueActor( consumeTimeout = consumeTimeout, retryDelay = retryDelay)) } private[inmem] case class AskingActorRefRouteeWithSpecifiedMessageType(ref: ActorRef, askTimeout: FiniteDuration, handleResponse: (Future[Any], Message[_]) => Unit, fullMessage: Boolean) extends Routee { override def send(message: Any, sender: ActorRef): Unit = { val typedMessage = message.asInstanceOf[Message[_]] val msgToSend = if (fullMessage) message else typedMessage.content handleResponse(ref.ask(msgToSend)(askTimeout, sender), typedMessage) } } private[inmem] case class RegisterConsumer(consumer: ActorRef, fullMessage: Boolean) private[inmem] case class UnregisterConsumer(consumer: ActorRef)
Example 42
Source File: TransportActor.scala From reliable-http-client with Apache License 2.0 | 5 votes |
package rhttpc.transport.inmem import akka.actor.{Actor, Props, Status} import scala.util.control.NonFatal private class TransportActor(queueActorProps: => Props) extends Actor { override def receive: Receive = { case GetOrCreateQueue(name) => try { val ref = context.child(name).getOrElse(context.actorOf(queueActorProps, name)) sender() ! ref } catch { case NonFatal(ex) => sender() ! Status.Failure(ex) } } } object TransportActor { def props(queueActorProps: => Props): Props = Props(new TransportActor(queueActorProps)) } private[inmem] case class GetOrCreateQueue(name: String)
Example 43
Source File: PromiseSubscriptionCommandsListener.scala From reliable-http-client with Apache License 2.0 | 5 votes |
package rhttpc.client.subscription import akka.actor.{Actor, Props, Status} import scala.concurrent.Promise private class PromiseSubscriptionCommandsListener(pubPromise: ReplyFuture, replyPromise: Promise[Any]) (subscriptionManager: SubscriptionManager) extends PublicationListener { import context.dispatcher override def subscriptionPromiseRegistered(sub: SubscriptionOnResponse): Unit = {} override def receive: Actor.Receive = { case RequestPublished(sub) => subscriptionManager.confirmOrRegister(sub, self) context.become(waitForMessage) case RequestAborted(sub, cause) => replyPromise.failure(cause) context.stop(self) } private val waitForMessage: Receive = { case MessageFromSubscription(Status.Failure(ex), sub) => replyPromise.failure(ex) context.stop(self) case MessageFromSubscription(msg, sub) => replyPromise.success(msg) context.stop(self) } pubPromise.pipeTo(this) } private[subscription] object PromiseSubscriptionCommandsListener { def props(pubPromise: ReplyFuture, replyPromise: Promise[Any]) (subscriptionManager: SubscriptionManager): Props = Props(new PromiseSubscriptionCommandsListener(pubPromise, replyPromise)(subscriptionManager)) }
Example 44
Source File: MessageDispatcherActor.scala From reliable-http-client with Apache License 2.0 | 5 votes |
package rhttpc.client.subscription import akka.actor.{Actor, ActorLogging, ActorRef, Status} import rhttpc.client.protocol.{Correlated, Exchange} import scala.util.{Failure, Success} private[subscription] class MessageDispatcherActor extends Actor with ActorLogging { private var promisesOnPending: Map[SubscriptionOnResponse, Option[PendingMessage]] = Map.empty private var subscriptions: Map[SubscriptionOnResponse, ActorRef] = Map.empty override def receive: Actor.Receive = { case RegisterSubscriptionPromise(sub) => log.debug(s"Registering subscription promise: $sub") promisesOnPending += sub -> None case ConfirmOrRegisterSubscription(sub, consumer) => promisesOnPending.get(sub).foreach { pending => if (pending.nonEmpty) { log.debug(s"Confirming subscription: $sub. Sending outstanding messages: ${pending.size}.") pending.foreach { pending => consumer.tell(MessageFromSubscription(pending.msg, sub), pending.sender) } } else { log.debug(s"Confirming subscription: $sub") } promisesOnPending -= sub } subscriptions += sub -> consumer case AbortSubscription(sub) => promisesOnPending.get(sub) match { case Some(pending) if pending.isEmpty => log.debug(s"Aborted subscription: $sub.") promisesOnPending -= sub case Some(pending) => log.error(s"Aborted subscription: $sub. There were pending messages: ${pending.size}.") promisesOnPending -= sub case None => log.warning(s"Confirmed subscription promise: $sub was missing") } case Correlated(msg: Exchange[_, _], correlationId) => val sub = SubscriptionOnResponse(correlationId) val underlyingOrFailure = msg.tryResponse match { case Success(underlying) => underlying case Failure(ex) => Status.Failure(ex) } (subscriptions.get(sub), promisesOnPending.get(sub)) match { case (Some(consumer), optionalPending) => optionalPending.foreach { pending => log.error(s"There were both registered subscription and subscription promise with pending messages: ${pending.size}.") } log.debug(s"Consuming message: $correlationId") subscriptions -= sub consumer forward MessageFromSubscription(underlyingOrFailure, sub) // consumer should ack case (None, Some(None)) => log.debug(s"Adding pending message: $correlationId") promisesOnPending = promisesOnPending.updated(sub, Some(PendingMessage(underlyingOrFailure))) case (None, Some(Some(pending))) => log.error(s"There already was pending message: $pending for subscription. Overriding it.") pending.ack() promisesOnPending = promisesOnPending.updated(sub, Some(PendingMessage(underlyingOrFailure))) case (None, None) => log.error(s"No subscription (promise) registered for $correlationId. Will be skipped.") // TODO: DLQ sender() ! Unit // ack } } class PendingMessage private (val msg: Any, val sender: ActorRef) { def ack() = sender ! Unit } object PendingMessage { def apply(msg: Any): PendingMessage = new PendingMessage(msg, sender()) } } private[subscription] case class RegisterSubscriptionPromise(sub: SubscriptionOnResponse) private[subscription] case class ConfirmOrRegisterSubscription(sub: SubscriptionOnResponse, consumer: ActorRef) private[subscription] case class AbortSubscription(sub: SubscriptionOnResponse)
Example 45
Source File: ModelTrainer.scala From recommendersystem with Apache License 2.0 | 5 votes |
package com.infosupport.recommendedcontent.core import akka.actor.{Props, ActorLogging, Actor} import org.apache.spark.SparkContext import org.apache.spark.mllib.recommendation.{Rating, ALS, MatrixFactorizationModel} import com.datastax.spark.connector._ private def trainModel() = { val table = context.system.settings.config.getString("cassandra.table") val keyspace = context.system.settings.config.getString("cassandra.keyspace") // Retrieve the ratings given by users from the database. // Map them to the rating structure needed by the Alternate Least Squares algorithm. val ratings = sc.cassandraTable(keyspace, table).map(record => Rating(record.get[Int]("user_id"), record.get[Int]("item_id"), record.get[Double]("rating"))) // These settings control how well the predictions are going // to fit the actual observations we loaded from Cassandra. // Modify these to optimize the model! val rank = 10 val iterations = 10 val lambda = 0.01 val model = ALS.train(ratings, rank, iterations, lambda) sender ! TrainingResult(model) context.stop(self) } }
Example 46
Source File: RecommenderSystem.scala From recommendersystem with Apache License 2.0 | 5 votes |
package com.infosupport.recommendedcontent.core import java.io.Serializable import akka.actor.{Props, Actor, ActorLogging} import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.Path import org.apache.spark.SparkContext import org.apache.spark.mllib.recommendation.MatrixFactorizationModel private def generateRecommendations(userId: Int, count: Int) = { log.info(s"Generating ${count} recommendations for user with ID ${userId}") // Generate recommendations based on the machine learning model. // When there's no trained model return an empty list instead. val results = model match { case Some(m) => m.recommendProducts(userId,count) .map(rating => Recommendation(rating.product,rating.rating)) .toList case None => Nil } sender ! Recommendations(results) } }
Example 47
Source File: WindTurbineSimulator.scala From akka_streams_tutorial with MIT License | 5 votes |
package sample.stream_actor import akka.actor.{Actor, ActorLogging, Props} import akka.http.scaladsl.model.StatusCode import sample.stream_actor.WindTurbineSimulator._ case class WindTurbineSimulatorException(id: String) extends RuntimeException object WindTurbineSimulator { def props(id: String, endpoint: String) = Props(new WindTurbineSimulator(id, endpoint)) final case object Upgraded final case object Connected final case object Terminated final case class ConnectionFailure(ex: Throwable) final case class FailedUpgrade(statusCode: StatusCode) } class WindTurbineSimulator(id: String, endpoint: String) extends Actor with ActorLogging { implicit private val system = context.system implicit private val executionContext = system.dispatcher val webSocketClient = WebSocketClient(id, endpoint, self) override def receive: Receive = startup //initial state private def startup: Receive = { case Upgraded => log.info(s"$id : WebSocket upgraded") case FailedUpgrade(statusCode) => log.error(s"$id : Failed to upgrade WebSocket connection: $statusCode") throw WindTurbineSimulatorException(id) case ConnectionFailure(ex) => log.error(s"$id : Failed to establish WebSocket connection: $ex") throw WindTurbineSimulatorException(id) case Connected => log.info(s"$id : WebSocket connected") context.become(running) } private def running: Receive = { case Terminated => log.error(s"$id : WebSocket connection terminated") throw WindTurbineSimulatorException(id) case ConnectionFailure(ex) => log.error(s"$id : ConnectionFailure occurred: $ex") throw WindTurbineSimulatorException(id) } }
Example 48
Source File: Total.scala From akka_streams_tutorial with MIT License | 5 votes |
package sample.stream_actor import java.text.SimpleDateFormat import java.util.{Date, TimeZone} import akka.Done import akka.actor.Actor import sample.stream_actor.Total.Increment object Total { case class Increment(value: Long, avg: Double, id: String) } class Total extends Actor { var total: Long = 0 override def receive: Receive = { case Increment(value, avg, id) => println(s"Received $value new measurements from turbine with id: $id - Avg wind speed is: $avg") total = total + value val date = new Date() val df = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss") df.setTimeZone(TimeZone.getTimeZone("Europe/Zurich")) println(s"${df.format(date) } - Current total of all measurements: $total") sender ! Done } }
Example 49
Source File: PrintMoreNumbers.scala From akka_streams_tutorial with MIT License | 5 votes |
package sample.stream_actor_simple import akka.actor.Actor import akka.stream.scaladsl.{Keep, Sink, Source} import akka.stream.{KillSwitches, UniqueKillSwitch} import scala.concurrent.duration._ class PrintMoreNumbers extends Actor { implicit val system = context.system implicit val executionContext = context.system.dispatcher private val (killSwitch: UniqueKillSwitch, done) = Source.tick(0.seconds, 1.second, 1) .scan(0)(_ + _) .map(_.toString) .viaMat(KillSwitches.single)(Keep.right) .toMat(Sink.foreach(println))(Keep.both) .run() done.map(_ => self ! "done") override def receive: Receive = { //When the actor is stopped, it will also stop the stream case "stop" => println("Stopping...") killSwitch.shutdown() case "done" => println("Done") context.stop(self) context.system.terminate() } }
Example 50
Source File: WebsocketClientActor.scala From akka_streams_tutorial with MIT License | 5 votes |
package alpakka.tcp_to_websockets.websockets import akka.actor.{Actor, ActorLogging, ActorRef, Props} import akka.http.scaladsl.model.StatusCode import alpakka.tcp_to_websockets.websockets.WebsocketClientActor._ import org.apache.commons.lang3.exception.ExceptionUtils import scala.concurrent.duration._ case class ConnectionException(cause: String) extends RuntimeException object WebsocketClientActor { def props(id: String, endpoint: String, websocketConnectionStatusActor: ActorRef) = Props(new WebsocketClientActor(id, endpoint, websocketConnectionStatusActor)) final case object Upgraded final case object Connected final case object Terminated final case class ConnectionFailure(ex: Throwable) final case class FailedUpgrade(statusCode: StatusCode) final case class SendMessage(msg: String) } class WebsocketClientActor(id: String, endpoint: String, websocketConnectionStatusActor: ActorRef) extends Actor with ActorLogging { implicit private val system = context.system implicit private val executionContext = system.dispatcher val webSocketClient = WebSocketClient(id, endpoint, self) override def receive: Receive = startup //initial state private def startup: Receive = { case Upgraded => log.info(s"Client$id: WebSocket upgraded") case FailedUpgrade(statusCode) => log.error(s"Client$id: failed to upgrade WebSocket connection: $statusCode") websocketConnectionStatusActor ! WebsocketConnectionStatusActor.Terminated throw ConnectionException(statusCode.toString()) case ConnectionFailure(ex) => log.error(s"Client $id: failed to establish WebSocket connection: $ex") websocketConnectionStatusActor ! WebsocketConnectionStatusActor.Terminated throw ConnectionException(ExceptionUtils.getRootCause(ex).getMessage) case Connected => log.info(s"Client $id: WebSocket connected") websocketConnectionStatusActor ! WebsocketConnectionStatusActor.Connected context.become(running) case SendMessage(msg) => log.warning(s"In state startup. Can not receive message: $msg. Resend after 2 seconds") system.scheduler.scheduleOnce(2.seconds, self, SendMessage(msg)) } private def running: Receive = { case SendMessage(msg) => log.info(s"About to send message to WebSocket: $msg") webSocketClient.sendToWebsocket(msg) case Terminated => log.error(s"Client $id: WebSocket connection terminated") websocketConnectionStatusActor ! WebsocketConnectionStatusActor.Terminated throw ConnectionException(s"Client $id: WebSocket connection terminated") case ConnectionFailure(ex) => log.error(s"Client $id: ConnectionFailure occurred: $ex") websocketConnectionStatusActor ! WebsocketConnectionStatusActor.Terminated throw ConnectionException(ExceptionUtils.getRootCause(ex).getMessage) } override def postStop(): Unit = { websocketConnectionStatusActor ! WebsocketConnectionStatusActor.Terminated } }
Example 51
Source File: WebsocketConnectionStatusActor.scala From akka_streams_tutorial with MIT License | 5 votes |
package alpakka.tcp_to_websockets.websockets import akka.actor.{Actor, ActorLogging, Props} import alpakka.tcp_to_websockets.websockets.WebsocketConnectionStatusActor.{Connected, ConnectionStatus, Terminated} object WebsocketConnectionStatusActor { def props(id: String, endpoint: String) = Props(new WebsocketConnectionStatusActor(id, endpoint)) final case object Connected final case object Terminated final case object ConnectionStatus } class WebsocketConnectionStatusActor(id: String, endpoint: String) extends Actor with ActorLogging { implicit private val system = context.system implicit private val executionContext = system.dispatcher var isConnected = false override def receive: Receive = { case Connected => isConnected = true log.info(s"Client $id: connected to: $endpoint") case Terminated => isConnected = false log.info(s"Client $id: terminated from: $endpoint") case ConnectionStatus => sender() ! isConnected } }
Example 52
Source File: TotalFake.scala From akka_streams_tutorial with MIT License | 5 votes |
package alpakka.kafka import akka.Done import akka.actor.Actor import alpakka.kafka.TotalFake.{IncrementMessage, IncrementWord} object TotalFake { case class IncrementWord(value: Int, id: String) case class IncrementMessage(value: Int, id: String) } class TotalFake extends Actor { var totalWords: Int = 0 var totalNews: Int = 0 override def receive: Receive = { case IncrementWord(value, id) => println(s"$id - WORD count fakeNews: $value (+ ${value - totalWords})") totalWords = value sender ! Done case IncrementMessage(value, id) => totalNews += value println(s"$id - MESSAGES count: $totalNews (+ $value)") sender ! Done } }
Example 53
Source File: TotalTweetsScheduler.scala From redrock with Apache License 2.0 | 5 votes |
package com.restapi import java.io.{File, FileInputStream} import akka.actor.{ActorRef, Actor, ActorSystem, Props} import akka.io.IO import org.slf4j.LoggerFactory import play.api.libs.json.Json import spray.can.Http import akka.pattern.ask import spray.http.DateTime import scala.concurrent.duration._ import akka.util.Timeout import scala.concurrent.ExecutionContext.Implicits.global import org.apache.commons.codec.digest.DigestUtils import scala.io.Source case object GetTotalTweetsScheduler object CurrentTotalTweets { @volatile var totalTweets: Long = 0 } class ExecuterTotalTweetsES(delay: FiniteDuration, interval: FiniteDuration) extends Actor { context.system.scheduler.schedule(delay, interval) { getTotalTweetsES } val logger = LoggerFactory.getLogger(this.getClass) override def receive: Actor.Receive = { case GetTotalTweetsScheduler => { logger.info(s"Getting Total of Tweets. Begin: ${CurrentTotalTweets.totalTweets}") } case _ => // just ignore any messages } def getTotalTweetsES: Unit = { val elasticsearchRequests = new GetElasticsearchResponse(0, Array[String](), Array[String](), LoadConf.restConf.getString("searchParam.defaulStartDatetime"), LoadConf.restConf.getString("searchParam.defaultEndDatetime"), LoadConf.esConf.getString("decahoseIndexName")) val totalTweetsResponse = Json.parse(elasticsearchRequests.getTotalTweetsESResponse()) logger.info(s"Getting Total of Tweets. Current: ${CurrentTotalTweets.totalTweets}") CurrentTotalTweets.totalTweets = (totalTweetsResponse \ "hits" \ "total").as[Long] logger.info(s"Total users updated. New: ${CurrentTotalTweets.totalTweets}") } }
Example 54
Source File: HelloAkka.scala From sbt-reactive-app with Apache License 2.0 | 5 votes |
package hello.akka import akka.cluster.Cluster import akka.cluster.ClusterEvent._ import akka.actor.{ Actor, ActorSystem, Props } import akka.discovery._ import com.typesafe.config.ConfigFactory final case class Greet(name: String) class GreeterActor extends Actor { val cluster = Cluster(context.system) override def preStart = { cluster.subscribe(self, initialStateMode = InitialStateAsEvents, classOf[MemberEvent], classOf[UnreachableMember]) } override def postStop = { cluster.unsubscribe(self) } def receive = { case Greet(name) => println(s"Hello, $name") case MemberUp(member) => println(s"Member up: $member") case MemberRemoved(member, previousStatus) => println(s"Member down: $member") case _: MemberEvent => // ignore } } object HelloAkka { def main(args: Array[String]) = { startup() } def startup() = { val system = ActorSystem("ClusterSystem") val discovery = ServiceDiscovery(system).discovery val actor = system.actorOf(Props[GreeterActor], name = "GreeterActor") actor ! Greet("[unnamed]") } }
Example 55
Source File: DemoApp.scala From sbt-reactive-app with Apache License 2.0 | 5 votes |
package foo import akka.actor.{ Actor, ActorLogging, ActorSystem, Props } import akka.cluster.ClusterEvent.ClusterDomainEvent import akka.cluster.{ Cluster, ClusterEvent } import akka.http.scaladsl.Http import akka.http.scaladsl.model._ import akka.http.scaladsl.server.Directives._ import akka.management.AkkaManagement import akka.management.cluster.bootstrap.ClusterBootstrap import akka.stream.ActorMaterializer object DemoApp extends App { implicit val system = ActorSystem("Appka") import system.log implicit val mat = ActorMaterializer() val cluster = Cluster(system) log.info(s"Started [$system], cluster.selfAddress = ${cluster.selfAddress}") log.info("something2") //#start-akka-management AkkaManagement(system).start() //#start-akka-management ClusterBootstrap(system).start() cluster.subscribe( system.actorOf(Props[ClusterWatcher]), ClusterEvent.InitialStateAsEvents, classOf[ClusterDomainEvent]) // add real app routes here val routes = path("hello") { get { complete( HttpEntity( ContentTypes.`text/html(UTF-8)`, "<h1>Hello</h1>")) } } Http().bindAndHandle(routes, "0.0.0.0", 8080) Cluster(system).registerOnMemberUp({ log.info("Cluster member is up!") }) } class ClusterWatcher extends Actor with ActorLogging { val cluster = Cluster(context.system) override def receive = { case msg ⇒ log.info(s"Cluster ${cluster.selfAddress} >>> " + msg) } }
Example 56
Source File: ActorDemo.scala From logging with Apache License 2.0 | 5 votes |
package demo.test import java.net.InetAddress import akka.actor.{Props, Actor, ActorSystem} import com.persist.logging._ import logging_demo.BuildInfo import scala.concurrent.duration._ import scala.language.postfixOps import scala.concurrent.Await object DemoActor { def props() = Props(new DemoActor()) } class DemoActor() extends Actor with ActorLogging { println(this.getClass.getSimpleName) def receive = { case "foo" => log.info("Saw foo") case "done" => context.stop(self) case x: Any => log.error(Map("@msg" -> "Unexpected actor message", "message" -> x.toString)) } } case class ActorDemo(system: ActorSystem) { def demo(): Unit = { val a = system.actorOf(DemoActor.props(), name = "Demo") a ! "foo" a ! "bar" a ! "done" } } object ActorDemo { def main(args: Array[String]) { val system = ActorSystem("test") val host = InetAddress.getLocalHost.getHostName val loggingSystem = LoggingSystem(system, BuildInfo.name, BuildInfo.version, host) val act = ActorDemo(system) act.demo() Await.result(loggingSystem.stop, 30 seconds) Await.result(system.terminate(), 20 seconds) } }
Example 57
Source File: OtherApis.scala From logging with Apache License 2.0 | 5 votes |
package demo.test import java.net.InetAddress import akka.actor.{Props, Actor, ActorSystem} import com.persist.logging._ import logging_demo.BuildInfo import scala.concurrent.duration._ import scala.language.postfixOps import scala.concurrent.Await import org.slf4j.LoggerFactory case class Slf4jDemo() { val slf4jlog = LoggerFactory.getLogger(classOf[Slf4jDemo]) def demo(): Unit = { slf4jlog.warn("slf4j") } } object AkkaActor { def props() = Props(new AkkaActor()) } class AkkaActor() extends Actor with akka.actor.ActorLogging { def receive = { case "foo" => log.warning("Saw foo") case "done" => context.stop(self) case x: Any => log.error(s"Unexpected actor message: ${x}") } } case class AkkaDemo(system: ActorSystem) { def demo(): Unit = { val a = system.actorOf(AkkaActor.props(), name="Demo") a ! "foo" a ! "bar" a ! "done" } } object OtherApis { def main(args: Array[String]) { val system = ActorSystem("test") val host = InetAddress.getLocalHost.getHostName val loggingSystem = LoggingSystem(system, BuildInfo.name, BuildInfo.version, host) val slf = Slf4jDemo() slf.demo() val act = AkkaDemo(system) act.demo() Await.result(loggingSystem.stop, 30 seconds) Await.result(system.terminate(), 20 seconds) } }
Example 58
Source File: AkkaLogger.scala From logging with Apache License 2.0 | 5 votes |
package com.persist.logging import akka.actor.Actor import akka.event.Logging._ import LogActor.AkkaMessage private[logging] class AkkaLogger extends Actor { import LoggingLevels._ private def log(level: Level, source: String, clazz: Class[_], msg: Any, time:Long, cause: Option[Throwable] = None) { val m = AkkaMessage(time, level, source, clazz, msg, cause) LoggingState.akkaMsg(m) } def receive: PartialFunction[Any, Unit] = { case InitializeLogger(_) => sender ! LoggerInitialized case event@Error(cause, logSource, logClass, message) => val c = if (cause.toString().contains("NoCause$")) { None } else { Some(cause) } log(ERROR, logSource, logClass, message, event.timestamp, c) case event@Warning(logSource, logClass, message) => log(WARN, logSource, logClass, message, event.timestamp) case event@Info(logSource, logClass, message) => log(INFO, logSource, logClass, message, event.timestamp) case event@Debug(logSource, logClass, message) => log(DEBUG, logSource, logClass, message, event.timestamp) } }
Example 59
Source File: Persistence.scala From Principles-of-Reactive-Programming with GNU General Public License v3.0 | 5 votes |
package kvstore import akka.actor.{Props, Actor} import scala.util.Random import java.util.concurrent.atomic.AtomicInteger object Persistence { case class Persist(key: String, valueOption: Option[String], id: Long) case class Persisted(key: String, id: Long) class PersistenceException extends Exception("Persistence failure") def props(flaky: Boolean): Props = Props(classOf[Persistence], flaky) } class Persistence(flaky: Boolean) extends Actor { import Persistence._ def receive = { case Persist(key, _, id) => if (!flaky || Random.nextBoolean()) sender ! Persisted(key, id) else throw new PersistenceException } }
Example 60
Source File: Replicator.scala From Principles-of-Reactive-Programming with GNU General Public License v3.0 | 5 votes |
package kvstore import akka.actor.Props import akka.actor.Actor import akka.actor.ActorRef import scala.concurrent.duration._ object Replicator { case class Replicate(key: String, valueOption: Option[String], id: Long) case class Replicated(key: String, id: Long) case class Snapshot(key: String, valueOption: Option[String], seq: Long) case class SnapshotAck(key: String, seq: Long) def props(replica: ActorRef): Props = Props(new Replicator(replica)) } class Replicator(val replica: ActorRef) extends Actor { import Replicator._ import Replica._ import context.dispatcher def receive: Receive = { case _ => } }
Example 61
Source File: Replica.scala From Principles-of-Reactive-Programming with GNU General Public License v3.0 | 5 votes |
package kvstore import akka.actor.{ OneForOneStrategy, Props, ActorRef, Actor } import kvstore.Arbiter._ import scala.collection.immutable.Queue import akka.actor.SupervisorStrategy.Restart import scala.annotation.tailrec import akka.pattern.{ ask, pipe } import akka.actor.Terminated import scala.concurrent.duration._ import akka.actor.PoisonPill import akka.actor.OneForOneStrategy import akka.actor.SupervisorStrategy import akka.util.Timeout object Replica { sealed trait Operation { def key: String def id: Long } case class Insert(key: String, value: String, id: Long) extends Operation case class Remove(key: String, id: Long) extends Operation case class Get(key: String, id: Long) extends Operation sealed trait OperationReply case class OperationAck(id: Long) extends OperationReply case class OperationFailed(id: Long) extends OperationReply case class GetResult(key: String, valueOption: Option[String], id: Long) extends OperationReply def props(arbiter: ActorRef, persistenceProps: Props): Props = Props(new Replica(arbiter, persistenceProps)) } class Replica(val arbiter: ActorRef, persistenceProps: Props) extends Actor { import Replica._ import Replicator._ import Persistence._ import context.dispatcher val replica: Receive = { case _ => } }
Example 62
Source File: Arbiter.scala From Principles-of-Reactive-Programming with GNU General Public License v3.0 | 5 votes |
package kvstore import akka.actor.{ActorRef, Actor} import scala.collection.immutable object Arbiter { case object Join case object JoinedPrimary case object JoinedSecondary case class Replicas(replicas: Set[ActorRef]) } class Arbiter extends Actor { import Arbiter._ var leader: Option[ActorRef] = None var replicas = Set.empty[ActorRef] def receive = { case Join => if (leader.isEmpty) { leader = Some(sender) replicas += sender sender ! JoinedPrimary } else { replicas += sender sender ! JoinedSecondary } leader foreach (_ ! Replicas(replicas)) } }
Example 63
Source File: Tools.scala From Principles-of-Reactive-Programming with GNU General Public License v3.0 | 5 votes |
package kvstore import akka.actor.ActorSystem import scala.concurrent.duration.FiniteDuration import akka.testkit.TestProbe import akka.actor.{ ActorRef, Actor } import org.scalatest.Matchers import org.scalatest.FunSuiteLike import akka.actor.Props import akka.testkit.TestKit import akka.testkit.ImplicitSender import scala.concurrent.duration._ object Tools { class TestRefWrappingActor(val probe: TestProbe) extends Actor { def receive = { case msg => probe.ref forward msg } } } trait Tools { this: TestKit with FunSuiteLike with Matchers with ImplicitSender => import Arbiter._ import Tools._ def probeProps(probe: TestProbe): Props = Props(classOf[TestRefWrappingActor], probe) class Session(val probe: TestProbe, val replica: ActorRef) { import Replica._ @volatile private var seq = 0L private def nextSeq: Long = { val next = seq seq += 1 next } @volatile private var referenceMap = Map.empty[String, String] def waitAck(s: Long): Unit = probe.expectMsg(OperationAck(s)) def waitFailed(s: Long): Unit = probe.expectMsg(OperationFailed(s)) def set(key: String, value: String): Long = { referenceMap += key -> value val s = nextSeq probe.send(replica, Insert(key, value, s)) s } def setAcked(key: String, value: String): Unit = waitAck(set(key, value)) def remove(key: String): Long = { referenceMap -= key val s = nextSeq probe.send(replica, Remove(key, s)) s } def removeAcked(key: String): Unit = waitAck(remove(key)) def getAndVerify(key: String): Unit = { val s = nextSeq probe.send(replica, Get(key, s)) probe.expectMsg(GetResult(key, referenceMap.get(key), s)) } def get(key: String): Option[String] = { val s = nextSeq probe.send(replica, Get(key, s)) probe.expectMsgType[GetResult].valueOption } def nothingHappens(duration: FiniteDuration): Unit = probe.expectNoMsg(duration) } def session(replica: ActorRef)(implicit system: ActorSystem) = new Session(TestProbe(), replica) }
Example 64
Source File: IntegrationSpec.scala From Principles-of-Reactive-Programming with GNU General Public License v3.0 | 5 votes |
package kvstore import akka.actor.{ Actor, Props, ActorRef, ActorSystem } import akka.testkit.{ TestProbe, ImplicitSender, TestKit } import org.scalatest.{ BeforeAndAfterAll, FlatSpec, Matchers } import scala.concurrent.duration._ import org.scalatest.FunSuiteLike import org.scalactic.ConversionCheckedTripleEquals class IntegrationSpec(_system: ActorSystem) extends TestKit(_system) with FunSuiteLike with Matchers with BeforeAndAfterAll with ConversionCheckedTripleEquals with ImplicitSender with Tools { import Replica._ import Replicator._ import Arbiter._ def this() = this(ActorSystem("ReplicatorSpec")) override def afterAll: Unit = system.shutdown() }
Example 65
Source File: SidechainTransactionActor.scala From Sidechains-SDK with MIT License | 5 votes |
package com.horizen.api.http import akka.actor.{Actor, ActorRef, ActorSystem, Props} import com.horizen.SidechainTypes import com.horizen.api.http.SidechainTransactionActor.ReceivableMessages.BroadcastTransaction import scorex.core.NodeViewHolder.ReceivableMessages.LocallyGeneratedTransaction import scorex.core.network.NodeViewSynchronizer.ReceivableMessages.{FailedTransaction, SuccessfulTransaction} import scorex.util.{ModifierId, ScorexLogging} import scala.collection.concurrent.TrieMap import scala.concurrent.{ExecutionContext, Promise} class SidechainTransactionActor[T <: SidechainTypes#SCBT](sidechainNodeViewHolderRef: ActorRef)(implicit ec: ExecutionContext) extends Actor with ScorexLogging { private var transactionMap : TrieMap[String, Promise[ModifierId]] = TrieMap() override def preStart(): Unit = { context.system.eventStream.subscribe(self, classOf[SuccessfulTransaction[T]]) context.system.eventStream.subscribe(self, classOf[FailedTransaction]) } protected def broadcastTransaction: Receive = { case BroadcastTransaction(transaction) => val promise = Promise[ModifierId] val future = promise.future transactionMap(transaction.id) = promise sender() ! future sidechainNodeViewHolderRef ! LocallyGeneratedTransaction[SidechainTypes#SCBT](transaction) } protected def sidechainNodeViewHolderEvents: Receive = { case SuccessfulTransaction(transaction) => transactionMap.remove(transaction.id) match { case Some(promise) => promise.success(transaction.id) case None => } case FailedTransaction(transactionId, throwable, _) => transactionMap.remove(transactionId) match { case Some(promise) => promise.failure(throwable) case None => } } override def receive: Receive = { broadcastTransaction orElse sidechainNodeViewHolderEvents orElse { case message: Any => log.error("SidechainTransactionActor received strange message: " + message) } } } object SidechainTransactionActor { object ReceivableMessages { case class BroadcastTransaction[T <: SidechainTypes#SCBT](transaction: T) } } object SidechainTransactionActorRef { def props(sidechainNodeViewHolderRef: ActorRef) (implicit ec: ExecutionContext): Props = Props(new SidechainTransactionActor(sidechainNodeViewHolderRef)) def apply(sidechainNodeViewHolderRef: ActorRef) (implicit system: ActorSystem, ec: ExecutionContext): ActorRef = system.actorOf(props(sidechainNodeViewHolderRef)) }
Example 66
Source File: Worker.scala From EncryCore with GNU General Public License v3.0 | 5 votes |
package encry.local.miner import java.util.Date import akka.actor.{Actor, ActorRef} import encry.EncryApp._ import scala.concurrent.duration._ import encry.consensus.{CandidateBlock, ConsensusSchemeReaders} import encry.local.miner.Miner.MinedBlock import encry.local.miner.Worker.{MineBlock, NextChallenge} import java.text.SimpleDateFormat import com.typesafe.scalalogging.StrictLogging import org.encryfoundation.common.utils.constants.TestNetConstants class Worker(myIdx: Int, numberOfWorkers: Int, miner: ActorRef) extends Actor with StrictLogging { val sdf: SimpleDateFormat = new SimpleDateFormat("HH:mm:ss") var challengeStartTime: Date = new Date(System.currentTimeMillis()) val initialNonce: Long = Long.MaxValue / numberOfWorkers * myIdx override def preRestart(reason: Throwable, message: Option[Any]): Unit = logger.warn(s"Worker $myIdx is restarting because of: $reason") override def receive: Receive = { case MineBlock(candidate: CandidateBlock, nonce: Long) => logger.info(s"Trying nonce: $nonce. Start nonce is: $initialNonce. " + s"Iter qty: ${nonce - initialNonce + 1} on worker: $myIdx with diff: ${candidate.difficulty}") ConsensusSchemeReaders .consensusScheme.verifyCandidate(candidate, nonce) .fold( e => { self ! MineBlock(candidate, nonce + 1) logger.info(s"Mining failed cause: $e") }, block => { logger.info(s"New block is found: (${block.header.height}, ${block.header.encodedId}, ${block.payload.txs.size} " + s"on worker $self at ${sdf.format(new Date(System.currentTimeMillis()))}. Iter qty: ${nonce - initialNonce + 1}") miner ! MinedBlock(block, myIdx) }) case NextChallenge(candidate: CandidateBlock) => challengeStartTime = new Date(System.currentTimeMillis()) logger.info(s"Start next challenge on worker: $myIdx at height " + s"${candidate.parentOpt.map(_.height + 1).getOrElse(TestNetConstants.PreGenesisHeight.toString)} at ${sdf.format(challengeStartTime)}") self ! MineBlock(candidate, Long.MaxValue / numberOfWorkers * myIdx) } } object Worker { case class NextChallenge(candidateBlock: CandidateBlock) case class MineBlock(candidateBlock: CandidateBlock, nonce: Long) }
Example 67
Source File: Zombie.scala From EncryCore with GNU General Public License v3.0 | 5 votes |
package encry.stats import akka.actor.{Actor, DeadLetter, UnhandledMessage} import com.typesafe.scalalogging.StrictLogging class Zombie extends Actor with StrictLogging { override def preStart(): Unit = { context.system.eventStream.subscribe(self, classOf[DeadLetter]) context.system.eventStream.subscribe(self, classOf[UnhandledMessage]) } override def receive: Receive = { case deadMessage: DeadLetter => logger.info(s"Dead letter: ${deadMessage.toString}." + s"From: ${deadMessage.sender}. To ${deadMessage.recipient}") case unhandled: UnhandledMessage => logger.info(s"Unhandled letter: ${unhandled.toString}. " + s"From: ${unhandled.sender}. To ${unhandled.recipient}") } }
Example 68
Source File: Process.scala From process with Apache License 2.0 | 5 votes |
package processframework import akka.actor.Actor object Process { case object GetState trait Event trait AbortCommand trait AbortEvent extends Event } trait Process[State] extends Actor { val process: ProcessStep[State] var state: State final def sendToProcess(msg: Any): Unit = unhandled(msg) override def unhandled(msg: Any): Unit = msg match { case x if process.handleReceiveCommand.isDefinedAt(x) ⇒ val event = process.handleReceiveCommand(x) self ! event case event: Process.Event ⇒ state = process.handleUpdateState(event)(state) case Process.GetState ⇒ sender() ! state } }
Example 69
Source File: ProcessStep.scala From process with Apache License 2.0 | 5 votes |
package processframework import scala.concurrent.duration.Duration import scala.concurrent.{ ExecutionContext, Future, Promise } import scala.reflect.ClassTag import akka.actor.{ Actor, ActorContext, ActorRef, Props } import akka.util.Timeout trait ProcessStep[S] { implicit def context: ActorContext private[processframework] val promise: Promise[Unit] = Promise[Unit]() type Execution = S ⇒ Unit type UpdateFunction = PartialFunction[Process.Event, S ⇒ S] type CommandToEvent = PartialFunction[Any, Process.Event] def execute()(implicit process: ActorRef): Execution def receiveCommand: CommandToEvent def updateState: UpdateFunction def retryInterval: Duration = Duration.Inf final def isCompleted = promise.isCompleted final def markDone(): Unit = promise.trySuccess(()) final def markDone(newState: S): S = { markDone() newState } private[processframework] def abort(): Unit = promise.tryFailure(new RuntimeException("Process aborted")) final def onComplete(completeFn: ((ActorContext, S)) ⇒ Unit)(implicit executionContext: ExecutionContext, process: ActorRef): Unit = promise.future.foreach { _ ⇒ process ! PersistentProcess.Perform(completeFn) } final def onCompleteAsync(completeFn: ⇒ Unit)(implicit executionContext: ExecutionContext): Unit = promise.future.foreach(_ ⇒ completeFn) final def ~>(next: ProcessStep[S]*)(implicit context: ActorContext): ProcessStep[S] = new Chain(this, next: _*) private[processframework] def run()(implicit process: ActorRef, executionContext: ExecutionContext, classTag: ClassTag[S]): Future[Unit] = runImpl private val innerActor = context.actorOf(Props(new Actor { def receive = { case msg if receiveCommand.isDefinedAt(msg) ⇒ val event = receiveCommand(msg) context.parent ! event } })) private[processframework] def handleUpdateState: UpdateFunction = if (isCompleted) PartialFunction.empty[Process.Event, S ⇒ S] else updateState private[processframework] def handleReceiveCommand: CommandToEvent = if (isCompleted) PartialFunction.empty[Any, Process.Event] else receiveCommand private[processframework] def executeWithPossibleRetry()(implicit process: ActorRef): Execution = { state ⇒ implicit val _ = context.dispatcher if (retryInterval.isFinite()) context.system.scheduler.scheduleOnce(Duration.fromNanos(retryInterval.toNanos)) { if (!isCompleted) executeWithPossibleRetry()(process)(state) } execute()(process)(state) } private[processframework] def runImpl()(implicit process: ActorRef, executionContext: ExecutionContext, classTag: ClassTag[S]): Future[Unit] = { import akka.pattern.ask import scala.concurrent.duration._ implicit val timeout: Timeout = 5 seconds if (!isCompleted) (process ? Process.GetState).mapTo[S].foreach(executeWithPossibleRetry()(innerActor)) promise.future } }
Example 70
Source File: ProcessStepTestSupport.scala From process with Apache License 2.0 | 5 votes |
package processframework import akka.pattern.ask import akka.actor.{ ActorRef, ActorContext, Actor, Props } import akka.util.Timeout import scala.concurrent.duration._ import scala.concurrent.Await import scala.reflect.ClassTag import akka.testkit.{ TestProbe, TestKit } import org.scalatest.BeforeAndAfterEach object ProcessStepTestSupport { case object GetStep case object ACommand case object AnEvent extends Process.Event } trait ProcessStepTestSupport[S, PS <: ProcessStep[S]] { this: TestKit with BeforeAndAfterEach ⇒ implicit val timeout: Timeout = 1 second var testProbe: TestProbe = null var processActor: ActorRef = null override protected def beforeEach(): Unit = { testProbe = createTestProbe() processActor = createProcessActor() } def createTestProbe(): TestProbe def createProcessStep(executeProbe: TestProbe)(implicit context: ActorContext): PS def createProcessActor() = system.actorOf(Props(new Actor { val step = createProcessStep(testProbe) def receive = { case msg if sender() == step ⇒ testActor forward msg case ProcessStepTestSupport.GetStep ⇒ sender() ! step case e: Process.Event ⇒ testActor ! e } })) def processStep()(implicit classTag: ClassTag[PS]): PS = Await.result[PS]((processActor ? ProcessStepTestSupport.GetStep).mapTo[PS], 2 seconds) }
Example 71
Source File: LocalApplicationMaster.scala From DataXServer with Apache License 2.0 | 5 votes |
package org.tianlangstudio.data.hamal.yarn.local import java.util.UUID import akka.actor.{Actor, ActorLogging, Props} import org.slf4j.LoggerFactory import org.tianlangstudio.data.hamal.core.{Constants, HamalConf} import org.tianlangstudio.data.hamal.server.thrift.ThriftServerApp import org.tianlangstudio.data.hamal.yarn.{ApplyExecutor, TaskScheduler} import org.tianlangstudio.data.hamal.yarn.thrift.AkkaThriftTaskHandler import org.tianlangstudio.data.hamal.yarn.util.AkkaUtils import org.tianlangstudio.data.hamal.core.HamalConf /** * Created by zhuhq on 2016/5/3. * 在本机申请运行资源,多进程方式批量运行任务 */ object LocalApplicationMaster extends App{ val logging = org.slf4j.LoggerFactory.getLogger(classOf[LocalApplicationMaster]) val dataxConf = new HamalConf() logging.info("create master actor system begin"); val schedulerHost = dataxConf.getString(Constants.DATAX_MASTER_HOST,"127.0.0.1") val (schedulerSystem,schedulerPort) = AkkaUtils.createActorSystem(Constants.AKKA_JOB_SCHEDULER_SYSTEM,schedulerHost,0,dataxConf) logging.info(s"create master actor system end on port $schedulerPort"); val amActor = schedulerSystem.actorOf(Props(classOf[LocalApplicationMaster],dataxConf),Constants.AKKA_AM_ACTOR) val taskSchedulerActor = schedulerSystem.actorOf(Props(classOf[TaskScheduler],dataxConf,amActor),Constants.AKKA_JOB_SCHEDULER_ACTOR) taskSchedulerActor ! "start taskSchedulerActor" logging.info(s"start thrift server begin") val thriftPort = dataxConf.getInt(Constants.THRIFT_SERVER_PORT,9777) val thriftHost = dataxConf.getString(Constants.THRIFT_SERVER_HOST,"127.0.0.1") val thriftConcurrence = dataxConf.getInt(Constants.THRIFT_SERVER_CONCURRENCE,8) val thriftServerHandler = new AkkaThriftTaskHandler(taskSchedulerActor) logging.info(s"start thrift server on $thriftHost:$thriftPort") ThriftServerApp.start(thriftHost,thriftPort,thriftServerHandler) } class LocalApplicationMaster(dataxConf: HamalConf) extends Actor with ActorLogging{ private val logger = LoggerFactory.getLogger(getClass) val runEnv = dataxConf.getString(Constants.RUN_ENV, Constants.RUN_ENV_PRODUCTION).toLowerCase() logger.info("run env:{}", runEnv) val containerCmd = if(Constants.RUN_ENV_DEVELOPMENT.equals(runEnv)) { s""" |java ${System.getProperty("java.class.path")} | -Ddatax.home=${dataxConf.getString(Constants.DATAX_HOME)} -Xms512M -Xmx1024M | -XX:PermSize=128M -XX:MaxPermSize=512M com.tianlangstudio.data.datax.Executor """.stripMargin }else { dataxConf.getString(Constants.DATAX_EXECUTOR_CMD, "./startLocalExecutor.sh") } override def receive: Receive = { case msg:String => log.info(s"${self.path} receive msg: $msg") case ApplyExecutor(num) => applyExecutor(num) } private def applyExecutor(num:Int): Unit = { log.info(s"apply executor num $num"); for(i <- 0 until num) { sys.process.stringToProcess( containerCmd + " " + LocalApplicationMaster.schedulerHost + ":" + LocalApplicationMaster.schedulerPort + " " + UUID.randomUUID().toString).run() log.info(s"apply executor ${i+1}/$num") } } }
Example 72
Source File: CouchbaseStatements.scala From akka-persistence-couchbase with Apache License 2.0 | 5 votes |
package akka.persistence.couchbase.journal import java.util.concurrent.TimeUnit import akka.actor.{Actor, ActorLogging} import akka.persistence.couchbase.CouchbaseJournalConfig import com.couchbase.client.java.Bucket import com.couchbase.client.java.document.JsonDocument import com.couchbase.client.java.document.json.JsonArray import com.couchbase.client.java.view._ import rx.Observable import rx.functions.Func1 import scala.collection.immutable.Seq import scala.concurrent.ExecutionContext import scala.util.{Failure, Try} trait CouchbaseStatements extends Actor with ActorLogging { def config: CouchbaseJournalConfig def bucket: Bucket implicit def executionContext: ExecutionContext def bySequenceNr(persistenceId: String, from: Long, to: Long): ViewQuery = { ViewQuery .from("journal", "by_sequenceNr") .stale(config.stale) .startKey(JsonArray.from(persistenceId, from.asInstanceOf[AnyRef])) .endKey(JsonArray.from(persistenceId, to.asInstanceOf[AnyRef])) } def nextKey(name: String): Try[String] = { Try { val counterKey = s"counter::$name" val counter = bucket.counter(counterKey, 1L, 0L).content() s"$name-$counter" } } }
Example 73
Source File: CouchbaseStatements.scala From akka-persistence-couchbase with Apache License 2.0 | 5 votes |
package akka.persistence.couchbase.snapshot import java.util.concurrent.TimeUnit import akka.actor.{Actor, ActorLogging} import akka.persistence.couchbase.CouchbaseSnapshotStoreConfig import com.couchbase.client.java.Bucket import com.couchbase.client.java.document.JsonDocument import com.couchbase.client.java.document.json.JsonArray import com.couchbase.client.java.view.ViewQuery import scala.concurrent.ExecutionContext import scala.util.{Failure, Try} trait CouchbaseStatements extends Actor with ActorLogging { def config: CouchbaseSnapshotStoreConfig def bucket: Bucket implicit def executionContext: ExecutionContext def bySequenceNr(persistenceId: String, maxSequenceNr: Long): ViewQuery = { ViewQuery .from("snapshots", "by_sequenceNr") .stale(config.stale) .descending(true) .startKey(JsonArray.from(persistenceId, maxSequenceNr.asInstanceOf[AnyRef])) .endKey(JsonArray.from(persistenceId, Long.MinValue.asInstanceOf[AnyRef])) } def byTimestamp(persistenceId: String, maxTimestamp: Long): ViewQuery = { ViewQuery .from("snapshots", "by_timestamp") .stale(config.stale) .descending(true) .startKey(JsonArray.from(persistenceId, maxTimestamp.asInstanceOf[AnyRef])) .endKey(JsonArray.from(persistenceId, Long.MinValue.asInstanceOf[AnyRef])) } def all(persistenceId: String): ViewQuery = { ViewQuery .from("snapshots", "all") .stale(config.stale) .descending(true) .key(persistenceId) } def executeSave(snapshotMessage: SnapshotMessage): Try[Unit] = { Try(SnapshotMessageKey.fromMetadata(snapshotMessage.metadata).value).flatMap { key => Try { val jsonObject = SnapshotMessage.serialize(snapshotMessage) val jsonDocument = JsonDocument.create(key, jsonObject) bucket.upsert( jsonDocument, config.persistTo, config.replicateTo, config.timeout.toSeconds, TimeUnit.SECONDS ) log.debug("Wrote snapshot: {}", key) } recoverWith { case e => log.error(e, "Writing snapshot: {}", key) Failure(e) } } } }
Example 74
Source File: Logging.scala From wookiee with Apache License 2.0 | 5 votes |
package com.webtrends.harness.logging import java.util.logging.Level import java.util.logging.Level._ import akka.actor.Actor import scala.util.Try trait LoggingAdapter { @transient protected lazy val log: Logger = Logger(getClass) // Will log the error if the input function throws one and return a Try def tryAndLogError[A](f: => A, messageOnFail: Option[String] = None, level: Level = WARNING): Try[A] = { val tried = Try(f) if (tried.isFailure) { val ex = tried.failed.get val message = messageOnFail.getOrElse(ex.getMessage) level match { case SEVERE => log.error(message, ex) case INFO => log.info(message, ex) case FINE => log.info(message, ex) case CONFIG => log.debug(message, ex) case FINER => log.debug(message, ex) case FINEST => log.trace(message, ex) case WARNING => log.warn(message, ex) case _ => log.warn(message, ex) } } tried } }
Example 75
Source File: LoggingActor.scala From wookiee with Apache License 2.0 | 5 votes |
package com.webtrends.harness.logging import akka.actor.Actor import akka.event.Logging.{LoggerInitialized, InitializeLogger} import com.webtrends.harness.health.ActorHealth class LoggingActor extends Actor with ActorHealth with Slf4jLogging with ActorLoggingAdapter { // Are we routing non-akka logging events to this actor for processing val routeLogging = context.system.settings.config.getBoolean("logging.use-actor") override def preStart(): Unit = { if (routeLogging) { Logger.registerMediator(self) } log.info("Logging Manager started: {}", context.self.path) } override def postStop(): Unit = { if (routeLogging) { Logger.unregisterMediator(self) } log.info("Logging Manager started: {}", context.self.path) } def receive = health orElse { case InitializeLogger(_) => sender() ! LoggerInitialized // webtrends log events case event: LogEvent => process(event) // akka events case event: akka.event.Logging.LogEvent => process(event) } }
Example 76
Source File: ActorWaitHelper.scala From wookiee with Apache License 2.0 | 5 votes |
package com.webtrends.harness.utils import java.util.concurrent.TimeUnit import akka.actor.{Actor, ActorRef, ActorSystem, Props} import akka.util.Timeout import scala.concurrent.Await object ActorWaitHelper { // Will wait until an actor has come up before returning its ActorRef def awaitActor(props: Props, system: ActorSystem, actorName: Option[String] = None)(implicit timeout: Timeout = Timeout(5, TimeUnit.SECONDS)): ActorRef = { val actor = actorName match { case Some(name) => system.actorOf(props, name) case None => system.actorOf(props) } awaitActorRef(actor, system) } // Will wait until an actor has come up before returning its ActorRef def awaitActorRef(actor: ActorRef, system: ActorSystem)(implicit timeout: Timeout = Timeout(5, TimeUnit.SECONDS)): ActorRef = { Await.result(system.actorSelection(actor.path).resolveOne(), timeout.duration) actor } } trait ActorWaitHelper { this: Actor => // Will wait until an actor has come up before returning its ActorRef def awaitActor(props: Props, actorName: Option[String] = None)(implicit timeout: Timeout = Timeout(5, TimeUnit.SECONDS)): ActorRef = ActorWaitHelper.awaitActor(props, context.system, actorName)(timeout) }
Example 77
Source File: InternalHTTP.scala From wookiee with Apache License 2.0 | 5 votes |
package com.webtrends.harness.http import akka.actor.{Props, ActorRef, Actor} trait InternalHTTP { this: Actor => var httpRef:Option[ActorRef] = None def startInternalHTTP(port:Int) : ActorRef = { httpRef = Some(context.actorOf(Props(classOf[SimpleHttpServer], port), InternalHTTP.InternalHttpName)) httpRef.get } } object InternalHTTP { val InternalHttpName = "Internal-Http" }
Example 78
Source File: HealthCheckProvider.scala From wookiee with Apache License 2.0 | 5 votes |
package com.webtrends.harness.health import java.util.jar.Attributes.Name import java.util.jar.{Attributes, JarFile} import akka.actor.Actor import akka.pattern._ import akka.util.Timeout import com.webtrends.harness.HarnessConstants import com.webtrends.harness.logging.ActorLoggingAdapter import com.webtrends.harness.service.messages.CheckHealth import com.webtrends.harness.utils.ConfigUtil import org.joda.time.DateTime import scala.collection.mutable import scala.concurrent.duration._ import scala.concurrent.{Future, Promise} import scala.util.{Failure, Success} trait HealthCheckProvider { this: Actor with ActorLoggingAdapter => val upTime = DateTime.now implicit val timeout = ConfigUtil.getDefaultTimeout(context.system.settings.config, HarnessConstants.KeyDefaultTimeout, Timeout(15 seconds)) val scalaVersion = util.Properties.versionString val file = getClass.getProtectionDomain.getCodeSource.getLocation.getFile val manifest = file match { case _ if file.endsWith(".jar") => new JarFile(file).getManifest case _ => val man = new java.util.jar.Manifest() man.getMainAttributes.put(Name.IMPLEMENTATION_TITLE, "Webtrends Harness Service") man.getMainAttributes.put(Name.IMPLEMENTATION_VERSION, "develop-SNAPSHOT") man.getMainAttributes.put(new Attributes.Name("Implementation-Build"), "N/A") man } val application = manifest.getMainAttributes.getValue(Name.IMPLEMENTATION_TITLE) val version = manifest.getMainAttributes.getValue(Name.IMPLEMENTATION_VERSION) val alerts: mutable.Buffer[ComponentHealth] = mutable.Buffer() def runChecks: Future[ApplicationHealth] = { import context.dispatcher // Ask for the health of each component val future = (context.actorSelection(HarnessConstants.ActorPrefix) ? CheckHealth).mapTo[Seq[HealthComponent]] val p = Promise[ApplicationHealth] future.onComplete({ case Success(checks) => // Rollup alerts for any critical or degraded components checks.foreach(checkComponents) // Rollup the statuses val overallHealth = rollupStatuses(alerts) alerts.clear() p success ApplicationHealth(application, version, upTime, overallHealth.state, overallHealth.details, checks) case Failure(e) => log.error("An error occurred while fetching the health request results", e) p success ApplicationHealth(application, version, upTime, ComponentState.CRITICAL, e.getMessage, Nil) }) p.future } }
Example 79
Source File: ActorHealth.scala From wookiee with Apache License 2.0 | 5 votes |
package com.webtrends.harness.health import akka.actor.{Actor, ActorRef} import akka.pattern._ import akka.util.Timeout import com.webtrends.harness.HarnessConstants import com.webtrends.harness.logging.Logger import com.webtrends.harness.service.messages.CheckHealth import com.webtrends.harness.utils.ConfigUtil import scala.concurrent.duration._ import scala.concurrent.{Future, Promise} import scala.util.{Failure, Success, Try} trait ActorHealth { this: Actor => private val _log = Logger(this, context.system) import context.dispatcher implicit val checkTimeout:Timeout = ConfigUtil.getDefaultTimeout(context.system.settings.config, HarnessConstants.KeyDefaultTimeout, Timeout(15 seconds)) def health:Receive = { case CheckHealth => pipe(Try(checkHealth) .recover({ case e: Exception => _log.error("Error fetching health", e) Future.successful(HealthComponent(getClass.getSimpleName, ComponentState.CRITICAL, "Exception when trying to check the health: %s".format(e.getMessage))) }).get ) to sender() } def checkHealth: Future[HealthComponent] = { val p = Promise[HealthComponent]() getHealth.onComplete { case Success(s) => val healthFutures = getHealthChildren map { ref => (ref ? CheckHealth).mapTo[HealthComponent] recover { case _: AskTimeoutException => _log.warn(s"Health Check time out on child actor ${ref.path.toStringWithoutAddress}") HealthComponent(getClass.getSimpleName, ComponentState.CRITICAL, "Time out on child: %s".format(ref.path.toStringWithoutAddress)) case ex: Exception => HealthComponent(ref.path.name, ComponentState.CRITICAL, s"Failure to get health of child component. ${ex.getMessage}") } } Future.sequence(healthFutures) onComplete { case Failure(f) => _log.debug(f, "Failed to retrieve health of children objects") p success HealthComponent(s.name, ComponentState.CRITICAL, s"Failure to get health of child components. ${f.getMessage}") case Success(healths) => healths foreach { it => s.addComponent(it) } p success s } case Failure(f) => _log.debug(f, "Failed to get health from component") p success HealthComponent(self.path.toString, ComponentState.CRITICAL, f.getMessage) } p.future } }
Example 80
Source File: TypedCommandManager.scala From wookiee with Apache License 2.0 | 5 votes |
package com.webtrends.harness.command.typed import akka.actor.{Actor, ActorRef, Props} import akka.routing.{FromConfig, RoundRobinPool} import com.webtrends.harness.HarnessConstants import com.webtrends.harness.health.{ActorHealth, ComponentState, HealthComponent} import com.webtrends.harness.logging.LoggingAdapter import scala.collection.mutable import scala.concurrent.Future import scala.util.Try case class RegisterCommand[T<:TypedCommand[_,_]](name:String, props: Props, checkHealth: Boolean) class TypedCommandManager extends Actor with ActorHealth with LoggingAdapter { val healthCheckChildren = mutable.ArrayBuffer.empty[ActorRef] val config = context.system.settings.config.getConfig("akka.actor.deployment") override def receive: Receive = health orElse { case RegisterCommand(name, props, checkHealth) => sender ! registerCommand(name, props, checkHealth) } def registerCommand[T<:TypedCommand[_,_]](name: String, actorProps: Props, checkHealth: Boolean): ActorRef = { TypedCommandManager.commands.get(name) match { case Some(commandRef) => log.warn(s"Command $name has already been added, not re-adding it.") commandRef case None => val props = if (config.hasPath(s"akka.actor.deployment.${HarnessConstants.TypedCommandFullName}/$name")) { FromConfig.props(actorProps) } else { val nrRoutees = Try { config.getInt(HarnessConstants.KeyCommandsNrRoutees) }.getOrElse(5) RoundRobinPool(nrRoutees).props(actorProps) } val commandRef = context.actorOf(props, name) TypedCommandManager.commands(name) = commandRef if (checkHealth) { healthCheckChildren += commandRef } commandRef } } override def getHealthChildren: Iterable[ActorRef] = { healthCheckChildren } override def getHealth: Future[HealthComponent] = { Future.successful( HealthComponent(self.path.toString, ComponentState.NORMAL, s"Managing ${TypedCommandManager.commands.size} typed commands") ) } } object TypedCommandManager { private[typed] val commands = mutable.Map[String, ActorRef]() def props = Props[TypedCommandManager] }
Example 81
Source File: TypedCommand.scala From wookiee with Apache License 2.0 | 5 votes |
package com.webtrends.harness.command.typed import akka.actor.Actor import akka.pattern._ import com.webtrends.harness.logging.ActorLoggingAdapter import com.webtrends.harness.utils.FutureExtensions._ import scala.concurrent.{ExecutionContext, Future} import scala.util.{Failure, Success} trait TypedCommand[T, V] extends Actor with ActorLoggingAdapter { implicit def executionContext: ExecutionContext = context.dispatcher def commandName: String def receive: Receive = { case ExecuteTypedCommand(args) => pipe{ val startTime = System.currentTimeMillis() execute(args.asInstanceOf[T]) mapAll { case Success(t) => log.info(s"Command $commandName succeeded in ${System.currentTimeMillis() - startTime}ms") t case Failure(f) => log.info(s"Command $commandName failed in ${System.currentTimeMillis() - startTime}ms") throw f } } to sender } def execute(args: T): Future[V] }
Example 82
Source File: TypedCommandHelper.scala From wookiee with Apache License 2.0 | 5 votes |
package com.webtrends.harness.command.typed import akka.actor.{Actor, ActorRef, Props} import akka.pattern._ import akka.util.Timeout import com.webtrends.harness.HarnessConstants import scala.concurrent.duration._ import scala.concurrent.{ExecutionContext, Future} trait TypedCommandHelper { this: Actor => var typedCommandManager: Option[ActorRef] = None implicit def ec: ExecutionContext = context.dispatcher def registerTypedCommand[T<:TypedCommand[_,_]](name: String, actorClass: Class[T], checkHealth: Boolean = false): Future[ActorRef] = { implicit val timeout = Timeout(2 seconds) getManager().flatMap { cm => (cm ? RegisterCommand(name, Props(actorClass), checkHealth)).mapTo[ActorRef] } } protected def getManager(): Future[ActorRef] = { typedCommandManager match { case Some(cm) => Future.successful(cm) case None => context.system.actorSelection(HarnessConstants.TypedCommandFullName).resolveOne()(2 seconds).map { s => typedCommandManager = Some(s) s } } } }
Example 83
Source File: CommandHelper.scala From wookiee with Apache License 2.0 | 5 votes |
package com.webtrends.harness.command import akka.actor.{Props, ActorRef, Actor} import akka.pattern.ask import akka.util.Timeout import com.webtrends.harness.app.Harness import scala.concurrent.duration._ import com.webtrends.harness.HarnessConstants import com.webtrends.harness.logging.ActorLoggingAdapter import scala.concurrent.{Promise, Future} import scala.util.{Failure, Success} trait CommandHelper extends ActorLoggingAdapter with BaseCommandHelper { this: Actor => override lazy implicit val actorSystem = context.system } def executeCommand[T:Manifest](name:String, bean:Option[CommandBean]=None, server:Option[String]=None, port:Int=2552)(implicit timeout:Timeout) : Future[BaseCommandResponse[T]] = { val p = Promise[BaseCommandResponse[T]] initCommandManager onComplete { case Success(_) => commandManager match { case Some(cm) => val msg = server match { case Some(srv) => ExecuteRemoteCommand(name, srv, port, bean, timeout) case None => ExecuteCommand(name, bean, timeout) } (cm ? msg)(timeout).mapTo[BaseCommandResponse[T]] onComplete { case Success(s) => p success s case Failure(f) => p failure CommandException("CommandManager", f) } case None => p failure CommandException("CommandManager", "CommandManager not found!") } case Failure(f) => p failure f } p.future } }
Example 84
Source File: ConfigSpec.scala From wookiee with Apache License 2.0 | 5 votes |
package com.webtrends.harness import java.io.{BufferedWriter, File, FileWriter} import java.util.concurrent.TimeUnit import akka.actor.{Actor, ActorSystem, Props} import akka.testkit.TestProbe import com.typesafe.config.ConfigFactory import com.webtrends.harness.app.HarnessActor.ConfigChange import com.webtrends.harness.config.ConfigWatcherActor import com.webtrends.harness.health.{ComponentState, HealthComponent} import com.webtrends.harness.service.messages.CheckHealth import org.specs2.mutable.SpecificationWithJUnit import scala.concurrent.ExecutionContextExecutor import scala.concurrent.duration.FiniteDuration import scala.reflect.io.{Directory, Path} class ConfigSpec extends SpecificationWithJUnit { implicit val dur = FiniteDuration(2, TimeUnit.SECONDS) new File("services/test/conf").mkdirs() implicit val sys = ActorSystem("system", ConfigFactory.parseString( """ akka.actor.provider = "akka.actor.LocalActorRefProvider" services { path = "services" } """).withFallback(ConfigFactory.load)) implicit val ec: ExecutionContextExecutor = sys.dispatcher val probe = TestProbe() val parent = sys.actorOf(Props(new Actor { val child = context.actorOf(ConfigWatcherActor.props, "child") def receive = { case x if sender == child => probe.ref forward x case x => child forward x } })) sequential "config " should { "be in good health" in { probe.send(parent, CheckHealth) val msg = probe.expectMsgClass(classOf[HealthComponent]) msg.state equals ComponentState.NORMAL } "detect changes in config" in { val file = new File("services/test/conf/test.conf") val bw = new BufferedWriter(new FileWriter(file)) bw.write("test = \"value\"") bw.close() val msg = probe.expectMsgClass(classOf[ConfigChange]) msg.isInstanceOf[ConfigChange] } } step { sys.terminate().onComplete { _ => Directory(Path(new File("services"))).deleteRecursively() } } }
Example 85
Source File: ActorWaitSpec.scala From wookiee with Apache License 2.0 | 5 votes |
package com.webtrends.harness import java.util.concurrent.TimeUnit import akka.actor.{Actor, ActorSystem, PoisonPill, Props} import akka.pattern.ask import akka.testkit.TestKit import akka.util.Timeout import com.webtrends.harness.utils.ActorWaitHelper import org.specs2.mutable.SpecificationLike import scala.concurrent.Await import scala.concurrent.duration.Duration class WaitedOnActor extends Actor with ActorWaitHelper { def receive: Receive = { case "message" => sender ! "waitedResponse" } } class WaitActor extends Actor with ActorWaitHelper { implicit val timeout = Timeout(5000, TimeUnit.MILLISECONDS) val waited = awaitActor(Props[WaitedOnActor]) def receive: Receive = { case "message" => sender ! "response" case "waited" => sender ! Await.result((waited ? "message").mapTo[String], Duration(5, "seconds")) } } class ActorWaitSpec extends TestKit(ActorSystem("wait-spec")) with SpecificationLike { implicit val timeout = Timeout(5000, TimeUnit.MILLISECONDS) val waitActor = ActorWaitHelper.awaitActor(Props[WaitActor], system) sequential "ActorWaitSpec" should { "await the WaitActor successfully " in { Await.result((waitActor ? "message").mapTo[String], Duration(5, "seconds")) must beEqualTo("response") } "the WaitActor's awaited actor must have come up " in { Await.result((waitActor ? "waited").mapTo[String], Duration(5, "seconds")) must beEqualTo("waitedResponse") } } step { waitActor ! PoisonPill } }
Example 86
Source File: IngestionHandler.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.ingest.services import akka.actor.SupervisorStrategy.Stop import akka.actor.{Actor, ActorRef, OneForOneStrategy, ReceiveTimeout} import akka.http.scaladsl.model.{StatusCode, StatusCodes} import hydra.core.ingest.{HydraRequest, IngestionReport, RequestParams} import hydra.ingest.services.IngestorRegistry.{ FindAll, FindByName, LookupResult } import scala.concurrent.duration.FiniteDuration trait IngestionHandler { this: Actor => def timeout: FiniteDuration def request: HydraRequest //we require an actorref here for performance reasons def registry: ActorRef private val targetIngestor = request.metadataValue(RequestParams.HYDRA_INGESTOR_PARAM) targetIngestor match { case Some(ingestor) => registry ! FindByName(ingestor) case None => registry ! FindAll } override def receive: Receive = { case LookupResult(Nil) => val errorCode = targetIngestor .map(i => StatusCodes .custom(404, s"No ingestor named $i was found in the registry.") ) .getOrElse(StatusCodes.BadRequest) complete(errorWith(errorCode)) case LookupResult(ingestors) => context.actorOf( IngestionSupervisor.props(request, self, ingestors, timeout) ) case report: IngestionReport => complete(report) } override val supervisorStrategy = OneForOneStrategy() { case e: Exception => fail(e) Stop } private def errorWith(statusCode: StatusCode) = { IngestionReport(request.correlationId, Map.empty, statusCode.intValue()) } def complete(report: IngestionReport) def fail(e: Throwable) }
Example 87
Source File: TransportRegistrar.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.ingest.services import java.lang.reflect.Method import akka.actor.{Actor, ActorRef, ActorRefFactory, Props} import com.typesafe.config.Config import hydra.common.config.ConfigSupport import hydra.common.logging.LoggingAdapter import hydra.common.reflect.ReflectionUtils import hydra.common.util.ActorUtils import hydra.core.transport.Transport import hydra.ingest.bootstrap.ClasspathHydraComponentLoader import hydra.ingest.services.TransportRegistrar.{ GetTransports, GetTransportsResponse } import scala.util.Try private def companion[T](clazz: Class[T]): Option[(T, Method)] = { try { val companion = ReflectionUtils.companionOf(clazz) companion.getClass.getMethods.toList.filter(m => m.getName == "props" && m.getParameterTypes.toList == List(classOf[Config]) ) match { case Nil => None case method :: Nil => Some(companion, method) case _ => None } } catch { case _: Throwable => None } } }
Example 88
Source File: IngestorRegistrar.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.ingest.services import akka.actor.Actor import hydra.common.config.ConfigSupport import ConfigSupport._ import hydra.common.logging.LoggingAdapter import hydra.common.util.ActorUtils import hydra.ingest.IngestorInfo import hydra.ingest.bootstrap.ClasspathHydraComponentLoader import hydra.ingest.services.IngestorRegistrar.UnregisterAll import hydra.ingest.services.IngestorRegistry.{ RegisterWithClass, Unregister, Unregistered } class IngestorRegistrar extends Actor with ConfigSupport with LoggingAdapter { private val ingestorRegistry = context.actorSelection( applicationConfig .getStringOpt("ingest.ingestor-registry.path") .getOrElse("/user/service/ingestor_registry") ) lazy val ingestors = ClasspathHydraComponentLoader.ingestors.map(h => ActorUtils.actorName(h) -> h ) override def receive = { case RegisterWithClass(group, name, clazz) => ingestorRegistry ! RegisterWithClass(group, name, clazz) case UnregisterAll => ingestors.foreach(h => ingestorRegistry ! Unregister(h._1)) case Unregistered(name) => log.info(s"Ingestor $name was removed from the registry.") case IngestorInfo(name, group, path, _) => log.info(s"Ingestor $name [$group] is available at $path") } override def preStart(): Unit = { ingestors.foreach(h => ingestorRegistry ! RegisterWithClass(h._2, "global", Some(h._1)) ) } } object IngestorRegistrar { case object UnregisterAll }
Example 89
Source File: IngestorsEndpointSpec.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.ingest.http import akka.actor.Actor import akka.http.scaladsl.testkit.ScalatestRouteTest import akka.testkit.{TestActorRef, TestKit, TestProbe} import hydra.common.util.ActorUtils import hydra.ingest.IngestorInfo import hydra.ingest.services.IngestorRegistry.{FindAll, FindByName, LookupResult} import org.joda.time.DateTime import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AnyWordSpecLike import scala.concurrent.duration._ class IngestorsEndpointSpec extends Matchers with AnyWordSpecLike with ScalatestRouteTest with HydraIngestJsonSupport { val ingestorsRoute = new IngestorRegistryEndpoint().route override def afterAll = { super.afterAll() TestKit.shutdownActorSystem( system, verifySystemShutdown = true, duration = 10 seconds ) } val probe = TestProbe() val ingestorInfo = IngestorInfo( ActorUtils.actorName(probe.ref), "test", probe.ref.path, DateTime.now ) val registry = TestActorRef( new Actor { override def receive = { case FindByName("tester") => sender ! LookupResult(Seq(ingestorInfo)) case FindAll => sender ! LookupResult(Seq(ingestorInfo)) } }, "ingestor_registry" ).underlyingActor "The ingestors endpoint" should { "returns all ingestors" in { Get("/ingestors") ~> ingestorsRoute ~> check { val r = responseAs[Seq[IngestorInfo]] r.size shouldBe 1 r(0).path shouldBe ingestorInfo.path r(0).group shouldBe ingestorInfo.group r(0).path shouldBe ingestorInfo.path r(0).registeredAt shouldBe ingestorInfo.registeredAt.withMillisOfSecond( 0 ) } } } }
Example 90
Source File: KafkaConsumerProxy.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.kafka.consumer import akka.actor.Actor import akka.pattern.pipe import hydra.kafka.consumer.KafkaConsumerProxy._ import hydra.kafka.util.KafkaUtils import org.apache.kafka.clients.consumer.Consumer import org.apache.kafka.common.{PartitionInfo, TopicPartition} import scala.collection.JavaConverters._ import scala.collection.immutable.Map import scala.concurrent.Future class KafkaConsumerProxy extends Actor { private var _defaultConsumer: Consumer[String, String] = _ private implicit val ec = context.dispatcher override def preStart(): Unit = { _defaultConsumer = KafkaUtils.stringConsumerSettings.createKafkaConsumer() } override def receive: Receive = { case GetLatestOffsets(topic) => val requestor = sender pipe(latestOffsets(topic).map(LatestOffsetsResponse(topic, _))) to requestor case GetPartitionInfo(topic) => val requestor = sender pipe(partitionInfo(topic).map(PartitionInfoResponse(topic, _))) to requestor case ListTopics => val requestor = sender pipe(listTopics().map(ListTopicsResponse(_))) to requestor } override def postStop(): Unit = { _defaultConsumer.close() } private def latestOffsets( topic: String ): Future[Map[TopicPartition, Long]] = { Future { val ts = _defaultConsumer .partitionsFor(topic) .asScala .map(pi => new TopicPartition(topic, pi.partition())) _defaultConsumer .endOffsets(ts.asJava) .asScala .map(tp => tp._1 -> tp._2.toLong) .toMap } } private def partitionInfo(topic: String): Future[Seq[PartitionInfo]] = Future(_defaultConsumer.partitionsFor(topic).asScala) private def listTopics(): Future[Map[String, Seq[PartitionInfo]]] = { Future(_defaultConsumer.listTopics().asScala.toMap) .map(res => res.mapValues(_.asScala.toSeq)) } } object KafkaConsumerProxy { case class GetLatestOffsets(topic: String) case class LatestOffsetsResponse( topic: String, offsets: Map[TopicPartition, Long] ) case class GetPartitionInfo(topic: String) case class PartitionInfoResponse( topic: String, partitionInfo: Seq[PartitionInfo] ) case object ListTopics case class ListTopicsResponse(topics: Map[String, Seq[PartitionInfo]]) }
Example 91
Source File: IngestionErrorHandler.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.kafka.ingestors import akka.actor.Actor import com.pluralsight.hydra.avro.JsonToAvroConversionException import hydra.common.config.ConfigSupport._ import hydra.avro.registry.JsonToAvroConversionExceptionWithMetadata import hydra.common.config.ConfigSupport import hydra.core.ingest.RequestParams.HYDRA_KAFKA_TOPIC_PARAM import hydra.core.protocol.GenericIngestionError import hydra.core.transport.Transport.Deliver import hydra.kafka.producer.AvroRecord import org.apache.avro.Schema import spray.json.DefaultJsonProtocol import scala.io.Source class IngestionErrorHandler extends Actor with ConfigSupport with DefaultJsonProtocol { import spray.json._ private implicit val ec = context.dispatcher private implicit val hydraIngestionErrorInfoFormat = jsonFormat6( HydraIngestionErrorInfo ) private val errorTopic = applicationConfig .getStringOpt("ingest.error-topic") .getOrElse("_hydra_ingest_errors") private lazy val kafkaTransport = context .actorSelection( applicationConfig .getStringOpt(s"transports.kafka.path") .getOrElse(s"/user/service/kafka_transport") ) private val errorSchema = new Schema.Parser() .parse(Source.fromResource("schemas/HydraIngestError.avsc").mkString) override def receive: Receive = { case error: GenericIngestionError => kafkaTransport ! Deliver(buildPayload(error)) } private[ingestors] def buildPayload( err: GenericIngestionError ): AvroRecord = { val schema: Option[String] = err.cause match { case e: JsonToAvroConversionException => Some(e.getSchema.toString) case e: JsonToAvroConversionExceptionWithMetadata => Some(e.location) case e: Exception => None } val topic = err.request.metadataValue(HYDRA_KAFKA_TOPIC_PARAM) val errorInfo = HydraIngestionErrorInfo( err.ingestor, topic, err.cause.getMessage, err.request.metadata, schema, err.request.payload ).toJson.compactPrint AvroRecord( errorTopic, errorSchema, topic, errorInfo, err.request.ackStrategy ) } } case class HydraIngestionErrorInfo( ingestor: String, destination: Option[String], errorMessage: String, metadata: Map[String, String], schema: Option[String], payload: String )
Example 92
Source File: LoggingAdapterSpec.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.common.logging import akka.actor.{Actor, ActorSystem} import akka.testkit.{TestActorRef, TestKit, TestProbe} import org.scalatest.matchers.should.Matchers import org.scalatest.funspec.AnyFunSpecLike import org.scalatest.BeforeAndAfterAll class LoggingAdapterSpec extends TestKit(ActorSystem("test")) with Matchers with AnyFunSpecLike with BeforeAndAfterAll { override def afterAll = TestKit.shutdownActorSystem(system) describe("The logging adapter") { it("allows an actor to use the logger") { val act = TestActorRef(new Actor with ActorLoggingAdapter { override def receive = { case _ => log.info("got it"); sender ! "got it" } }, "logger-test") act.underlyingActor.log.getName shouldBe "akka.testkit.TestActorRef" // Send a message and make sure we get a response back val probe = TestProbe() probe.send(act, "test") probe.expectMsgType[String] shouldBe "got it" } } }
Example 93
Source File: DummyActor.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.common.testing import akka.actor.Actor import hydra.common.config.ActorConfigSupport import hydra.common.logging.ActorLoggingAdapter class DummyActor extends Actor with ActorConfigSupport with ActorLoggingAdapter { override def receive: Receive = { case msg => log.info(msg.toString) sender ! msg } }
Example 94
Source File: Ingestor.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.core.ingest import akka.actor.{Actor, OneForOneStrategy, SupervisorStrategy} import akka.pattern.pipe import hydra.core.akka.InitializingActor import hydra.core.monitor.HydraMetrics import hydra.core.protocol._ import hydra.core.transport.{AckStrategy, HydraRecord, RecordFactory, Transport} import org.apache.commons.lang3.ClassUtils import scala.concurrent.Future import scala.util.{Success, Try} def validateRequest(request: HydraRequest): Try[HydraRequest] = Success(request) def doValidate(request: HydraRequest): Future[MessageValidationResult] = { Future .fromTry(validateRequest(request)) .flatMap[MessageValidationResult] { r => recordFactory.build(r).map { r => val destination = r.destination val ackStrategy = r.ackStrategy.toString HydraMetrics.incrementGauge( lookupKey = ReconciliationMetricName + s"_${destination}_$ackStrategy", metricName = ReconciliationMetricName, tags = Seq( "ingestor" -> name, "destination" -> destination, "ackStrategy" -> ackStrategy ) ) HydraMetrics.incrementCounter( lookupKey = IngestCounterMetricName + s"_${destination}_$ackStrategy", metricName = IngestCounterMetricName, tags = Seq( "ingestor" -> name, "destination" -> destination, "ackStrategy" -> ackStrategy ) ) ValidRequest(r) } } .recover { case e => InvalidRequest(e) } } override def initializationError(ex: Throwable): Receive = { case Publish(req) => sender ! IngestorError(ex) context.system.eventStream .publish(IngestorUnavailable(thisActorName, ex, req)) case _ => sender ! IngestorError(ex) } def ingest(next: Actor.Receive) = compose(next) override val supervisorStrategy = OneForOneStrategy() { case _ => SupervisorStrategy.Restart } def decrementGaugeOnReceipt( destination: String, ackStrategy: String ): Future[Unit] = { Future { HydraMetrics.decrementGauge( lookupKey = Ingestor.ReconciliationMetricName + s"_${destination}_$ackStrategy", metricName = Ingestor.ReconciliationMetricName, tags = Seq( "ingestor" -> name, "destination" -> destination, "ackStrategy" -> ackStrategy ) ) } } } object Ingestor { val ReconciliationMetricName = "hydra_ingest_reconciliation" val IngestCounterMetricName = "hydra_ingest_message_counter" }
Example 95
Source File: InitializingActor.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.core.akka import akka.actor.{Actor, ActorRef, ReceiveTimeout, Stash} import akka.pattern.pipe import hydra.common.config.ActorConfigSupport import hydra.common.logging.LoggingAdapter import hydra.core.HydraException import hydra.core.akka.InitializingActor.{InitializationError, Initialized} import hydra.core.protocol.HydraMessage import retry.Success import scala.concurrent.Future import scala.concurrent.duration._ import scala.util.control.NonFatal trait InitializingActor extends Actor with ActorConfigSupport with Stash with LoggingAdapter { def initializationError(ex: Throwable): Receive } object InitializingActor { case object Initialized extends HydraMessage case class InitializationError(cause: Throwable) extends HydraMessage } @SerialVersionUID(1L) class ActorInitializationException( ingestor: ActorRef, message: String, cause: Throwable ) extends HydraException( ActorInitializationException.enrichedMessage(ingestor, message), cause ) { def getActor: ActorRef = ingestor } object ActorInitializationException { private def enrichedMessage(actor: ActorRef, message: String) = Option(actor).map(a => s"${a.path}: $message").getOrElse(message) private[hydra] def apply( actor: ActorRef, message: String, cause: Throwable = null ) = new ActorInitializationException(actor, message, cause) def unapply( ex: ActorInitializationException ): Option[(ActorRef, String, Throwable)] = Some((ex.getActor, ex.getMessage, ex.getCause)) }
Example 96
Source File: ComposeReceiveSpec.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.core.akka import akka.actor.{Actor, ActorSystem, Props} import akka.testkit.{ImplicitSender, TestKit} import org.scalatest.BeforeAndAfterAll import org.scalatest.matchers.should.Matchers import org.scalatest.flatspec.AnyFlatSpecLike class ComposeReceiveSpec extends TestKit(ActorSystem("test")) with Matchers with AnyFlatSpecLike with BeforeAndAfterAll with ImplicitSender { override def afterAll = TestKit.shutdownActorSystem(system, verifySystemShutdown = true) "The ComposingReceiveTrait" should "compose" in { system.actorOf(Props[TestBaseActor]) ! "foo" expectMsg("bar") system.actorOf(Props[TestComposeActor]) ! "foo" expectMsg("new-bar") } } trait TestBase extends Actor with ComposingReceive { override def baseReceive = { case "foo" => sender ! "bar" } } class TestBaseActor extends TestBase { compose(Actor.emptyBehavior) } class TestComposeActor extends TestBase { compose { case "foo" => sender ! "new-bar" } }
Example 97
Source File: DummyActor.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.core.test import akka.actor.Actor import hydra.common.config.ActorConfigSupport import hydra.common.logging.ActorLoggingAdapter class DummyActor extends Actor with ActorConfigSupport with ActorLoggingAdapter { override def receive: Receive = { case msg => log.info(msg.toString) sender ! msg } }
Example 98
Source File: GlobalWatchService.scala From incubator-retired-iota with Apache License 2.0 | 5 votes |
package org.apache.iota.fey import java.nio.file.{Files, Path, Paths, WatchEvent} import akka.actor.{Actor, ActorLogging, ActorRef} import org.apache.iota.fey.GlobalWatchService.REGISTER_WATCHER_PERFORMER import org.apache.iota.fey.WatchingDirectories.STOPPED class GlobalWatchService extends Actor with ActorLogging{ //WatchService var watchThread:Thread = null val watchFileTask:GlobalWatchServiceTask = new GlobalWatchServiceTask(self) override def preStart(): Unit = { startWatcher("PRE-START") } override def postStop(): Unit = { stopWatcher("POST-STOP") } private def startWatcher(from: String) = { log.info(s"Starting Global Watcher from $from") watchThread = new Thread(watchFileTask, "FEY_GLOBAL_WATCH_SERVICE_PERFORMERS") watchThread.setDaemon(true) watchThread.start() } private def stopWatcher(from: String) = { log.info(s"Stopping Global Watcher from $from") if(watchThread != null && watchThread.isAlive){ watchThread.interrupt() watchThread = null } } override def receive: Receive = { case REGISTER_WATCHER_PERFORMER(path, file_name, actor, events, loadExists) => registerPath(path,file_name,actor,events,loadExists) case STOPPED => stopWatcher("STOPPED-THREAD") startWatcher("STOPPED-THREAD") case x => log.error(s"Unknown message $x") } private def broadcastMessageIfFileExists(actor: ActorRef, pathWithFile: String) = { val filePath = Paths.get(pathWithFile) if(Files.exists(filePath)){ log.info(s"File $pathWithFile exists. Broadcasting message to actor ${actor.path.toString}") actor ! GlobalWatchService.ENTRY_CREATED(filePath) } } private def registerPath(dir_path: String, file_name:Option[String], actor: ActorRef, events: Array[WatchEvent.Kind[_]], loadExists: Boolean) = { WatchingDirectories.actorsInfo.get((dir_path,file_name)) match { case Some(info) => val newInfo:Map[WatchEvent.Kind[_], Array[ActorRef]] = events.map(event => { info.get(event) match { case Some(actors) => (event, (Array(actor) ++ actors)) case None => (event, Array(actor)) } }).toMap WatchingDirectories.actorsInfo.put((dir_path,file_name), info ++ newInfo) watchFileTask.watch(Paths.get(dir_path),actor.path.toString,events) case None => val tmpEvents:Map[WatchEvent.Kind[_], Array[ActorRef]] = events.map(event => {(event, Array(actor))}).toMap WatchingDirectories.actorsInfo.put((dir_path,file_name), tmpEvents) watchFileTask.watch(Paths.get(dir_path),actor.path.toString,events) } if(file_name.isDefined && loadExists){ log.info(s"Checking if file $dir_path/${file_name.get} already exist") broadcastMessageIfFileExists(actor, s"$dir_path/${file_name.get}") } } } object GlobalWatchService{ sealed case class ENTRY_CREATED(path:Path) sealed case class ENTRY_MODIFIED(path:Path) sealed case class ENTRY_DELETED(path:Path) sealed case class REGISTER_WATCHER_PERFORMER(dir_path: String, file_name:Option[String], actor: ActorRef, events: Array[WatchEvent.Kind[_]], loadIfExists: Boolean) }
Example 99
Source File: JsonReceiverActor.scala From incubator-retired-iota with Apache License 2.0 | 5 votes |
package org.apache.iota.fey import java.nio.file.Paths import java.io.File import akka.actor.{Actor, ActorLogging, ActorRef, Props} import play.api.libs.json.{JsValue, Json} class JsonReceiverActor extends Actor with ActorLogging { import JsonReceiverActor._ val monitoring_actor = FEY_MONITOR.actorRef var watchFileTask: WatchServiceReceiver = _ var watchThread: Thread = _ override def preStart() { prepareDynamicJarRepo() processCheckpointFiles() watchFileTask = new WatchServiceReceiver(self) watchThread = new Thread(watchFileTask, GLOBAL_DEFINITIONS.WATCH_SERVICE_THREAD) monitoring_actor ! Monitor.START(Utils.getTimestamp) watchThread.setDaemon(true) watchThread.start() watchFileTask.watch(Paths.get(CONFIG.JSON_REPOSITORY)) } private def prepareDynamicJarRepo() = { val jarDir = new File(CONFIG.DYNAMIC_JAR_REPO) if (!jarDir.exists()){ jarDir.mkdir() }else if(CONFIG.DYNAMIC_JAR_FORCE_PULL){ jarDir.listFiles().foreach(_.delete()) } } private def processCheckpointFiles() = { if (CONFIG.CHEKPOINT_ENABLED) { val checkpoint = new CheckpointProcessor(self) checkpoint.run() } } override def postStop() { monitoring_actor ! Monitor.STOP(Utils.getTimestamp) watchThread.interrupt() watchThread.join() } override def postRestart(reason: Throwable): Unit = { monitoring_actor ! Monitor.RESTART(reason, Utils.getTimestamp) preStart() } override def receive: Receive = { case JSON_RECEIVED(json, file) => log.info(s"JSON RECEIVED => ${Json.stringify(json)}") context.parent ! FeyCore.ORCHESTRATION_RECEIVED(json, Some(file)) case _ => } } object JsonReceiverActor { case class JSON_RECEIVED(json: JsValue, file: File) }
Example 100
Source File: GlobalPerformer.scala From incubator-retired-iota with Apache License 2.0 | 5 votes |
package org.apache.iota.fey import akka.actor.SupervisorStrategy.Restart import akka.actor.{Actor, ActorLogging, ActorRef, OneForOneStrategy, Props, Terminated} import akka.routing._ import play.api.libs.json.JsObject import scala.collection.mutable.HashMap import scala.concurrent.duration._ protected class GlobalPerformer(val orchestrationID: String, val orchestrationName: String, val globalPerformers: List[JsObject], val ensemblesSpec : List[JsObject]) extends Actor with ActorLogging{ val monitoring_actor = FEY_MONITOR.actorRef var global_metadata: Map[String, Performer] = Map.empty[String, Performer] override def receive: Receive = { case GlobalPerformer.PRINT_GLOBAL => context.actorSelection(s"*") ! FeyGenericActor.PRINT_PATH case Terminated(actor) => monitoring_actor ! Monitor.TERMINATE(actor.path.toString, Utils.getTimestamp) log.error(s"DEAD Global Performers ${actor.path.name}") context.children.foreach{ child => context.unwatch(child) context.stop(child) } throw new RestartGlobalPerformers(s"DEAD Global Performer ${actor.path.name}") case GetRoutees => //Discard case x => log.warning(s"Message $x not treated by Global Performers") } private def loadClazzFromJar(classPath: String, jarLocation: String, jarName: String):Class[FeyGenericActor] = { try { Utils.loadActorClassFromJar(jarLocation,classPath,jarName) }catch { case e: Exception => log.error(e,s"Could not load class $classPath from jar $jarLocation. Please, check the Jar repository path as well the jar name") throw e } } } object GlobalPerformer{ val activeGlobalPerformers:HashMap[String, Map[String, ActorRef]] = HashMap.empty[String, Map[String, ActorRef]] case object PRINT_GLOBAL }
Example 101
Source File: IdentifyFeyActors.scala From incubator-retired-iota with Apache License 2.0 | 5 votes |
package org.apache.iota.fey import akka.actor.{Actor, ActorIdentity, ActorLogging, ActorPath, Identify} import akka.routing.{ActorRefRoutee, GetRoutees, Routees} import play.api.libs.json._ import scala.collection.mutable.HashSet protected class IdentifyFeyActors extends Actor with ActorLogging { import IdentifyFeyActors._ override def receive: Receive = { case IDENTIFY_TREE(startPath) => log.info("Current Actors in system:") actorsPath = HashSet.empty rootPath = startPath log.info(startPath) self ! ActorPath.fromString(startPath) case path: ActorPath => context.actorSelection(path / "*") ! Identify(()) context.actorSelection(path / "*") ! GetRoutees case ActorIdentity(_, Some(ref)) => actorsPath.add(ref.path.toString) log.info(ref.path.toString) self ! ref.path case routees:Routees => routees.routees .map(_.asInstanceOf[ActorRefRoutee]) .foreach(routee => { log.info(routee.ref.path.toString) actorsPath.add(routee.ref.path.toString) }) case _ => } } protected object IdentifyFeyActors{ def generateTreeJson(): String = { val trie = new Trie("FEY-MANAGEMENT-SYSTEM") actorsPath.map(_.replace("user/","")).foreach(trie.append(_)) Json.stringify(trie.print) } //Static HTML content from d3 val html = scala.io.Source.fromInputStream(getClass.getResourceAsStream("/d3Tree.html"), "UTF-8") .getLines() .mkString("\n") def getHTMLTree(json: String): String = { html.replace("$MYJSONHIERARCHY", json) } }
Example 102
Source File: NotificationChannel.scala From eventuate with Apache License 2.0 | 5 votes |
package com.rbmhtechnology.eventuate.log import java.util.concurrent.TimeUnit import akka.actor.Actor import akka.actor.ActorRef import com.rbmhtechnology.eventuate._ import com.rbmhtechnology.eventuate.ReplicationProtocol._ import com.typesafe.config.Config import scala.collection.immutable.Seq import scala.concurrent.duration.DurationLong import scala.concurrent.duration.FiniteDuration class NotificationChannelSettings(config: Config) { val registrationExpirationDuration: FiniteDuration = config.getDuration("eventuate.log.replication.retry-delay", TimeUnit.MILLISECONDS).millis } object NotificationChannel { case class Updated(events: Seq[DurableEvent]) private case class Registration(replicator: ActorRef, currentTargetVersionVector: VectorTime, filter: ReplicationFilter, registrationTime: Long) private object Registration { def apply(read: ReplicationRead): Registration = new Registration(read.replicator, read.currentTargetVersionVector, read.filter, System.nanoTime()) } } class NotificationChannel(logId: String) extends Actor { import NotificationChannel._ private val settings = new NotificationChannelSettings(context.system.settings.config) // targetLogId -> subscription private var registry: Map[String, Registration] = Map.empty // targetLogIds for which a read operation is in progress private var reading: Set[String] = Set.empty def receive = { case Updated(events) => val currentTime = System.nanoTime() registry.foreach { case (targetLogId, reg) => if (!reading.contains(targetLogId) && events.exists(_.replicable(reg.currentTargetVersionVector, reg.filter)) && currentTime - reg.registrationTime <= settings.registrationExpirationDuration.toNanos) reg.replicator ! ReplicationDue } case r: ReplicationRead => registry += (r.targetLogId -> Registration(r)) reading += r.targetLogId case r: ReplicationReadSuccess => reading -= r.targetLogId case r: ReplicationReadFailure => reading -= r.targetLogId case w: ReplicationWrite => for { id <- w.sourceLogIds rr <- registry.get(id) } registry += (id -> rr.copy(currentTargetVersionVector = w.metadata(id).currentVersionVector)) } }
Example 103
Source File: LogEventDispatcher.scala From eventuate with Apache License 2.0 | 5 votes |
package com.rbmhtechnology.eventuate.adapter.vertx import akka.actor.{ Actor, ActorRef, Props } import com.rbmhtechnology.eventuate.adapter.vertx.LogEventDispatcher.EndpointRoute import com.rbmhtechnology.eventuate.adapter.vertx.LogProducer.PersistMessage import io.vertx.core.Vertx import io.vertx.core.eventbus.{ Message, MessageConsumer } object LogEventDispatcher { case class EventProducerRef(id: String, log: ActorRef) case class EndpointRoute(sourceEndpoint: String, producer: EventProducerRef, filter: PartialFunction[Any, Boolean] = { case _ => true }) def props(routes: Seq[EndpointRoute], vertx: Vertx): Props = Props(new LogEventDispatcher(routes, vertx)) } class LogEventDispatcher(routes: Seq[EndpointRoute], vertx: Vertx) extends Actor { import VertxHandlerConverters._ val producers = routes .groupBy(_.producer) .map { case (producer, _) => producer.id -> context.actorOf(LogProducer.props(producer.id, producer.log)) } val consumers = routes .map { r => installMessageConsumer(r.sourceEndpoint, producers(r.producer.id), r.filter) } private def installMessageConsumer(endpoint: String, producer: ActorRef, filter: PartialFunction[Any, Boolean]): MessageConsumer[Any] = { val handler = (msg: Message[Any]) => { if (filter.applyOrElse(msg.body(), (_: Any) => false)) { producer ! PersistMessage(msg) } else { msg.reply(ProcessingResult.FILTERED) } } vertx.eventBus().consumer[Any](endpoint, handler.asVertxHandler) } override def receive: Receive = Actor.emptyBehavior override def postStop(): Unit = { consumers.foreach(_.unregister()) } }
Example 104
Source File: LeveldbDeletionActor.scala From eventuate with Apache License 2.0 | 5 votes |
package com.rbmhtechnology.eventuate.log.leveldb import java.io.Closeable import akka.actor.Actor import akka.actor.PoisonPill import akka.actor.Props import com.rbmhtechnology.eventuate.log.leveldb.LeveldbEventLog._ import org.iq80.leveldb.DB import org.iq80.leveldb.ReadOptions import org.iq80.leveldb.WriteOptions import scala.annotation.tailrec import scala.concurrent.Promise private object LeveldbDeletionActor { case object DeleteBatch def props(leveldb: DB, leveldbReadOptions: ReadOptions, leveldbWriteOptions: WriteOptions, batchSize: Int, toSequenceNr: Long, promise: Promise[Unit]): Props = Props(new LeveldbDeletionActor(leveldb, leveldbReadOptions, leveldbWriteOptions, batchSize, toSequenceNr, promise)) } private class LeveldbDeletionActor( val leveldb: DB, val leveldbReadOptions: ReadOptions, val leveldbWriteOptions: WriteOptions, batchSize: Int, toSequenceNr: Long, promise: Promise[Unit]) extends Actor with WithBatch { import LeveldbDeletionActor._ val eventKeyIterator: CloseableIterator[EventKey] = newEventKeyIterator override def preStart() = self ! DeleteBatch override def postStop() = eventKeyIterator.close() override def receive = { case DeleteBatch => withBatch { batch => eventKeyIterator.take(batchSize).foreach { eventKey => batch.delete(eventKeyBytes(eventKey.classifier, eventKey.sequenceNr)) } } if (eventKeyIterator.hasNext) { self ! DeleteBatch } else { promise.success(()) self ! PoisonPill } } private def newEventKeyIterator: CloseableIterator[EventKey] = { new Iterator[EventKey] with Closeable { val iterator = leveldb.iterator(leveldbReadOptions.snapshot(leveldb.getSnapshot)) iterator.seek(eventKeyBytes(EventKey.DefaultClassifier, 1L)) @tailrec override def hasNext: Boolean = { val key = eventKey(iterator.peekNext().getKey) key != eventKeyEnd && (key.sequenceNr <= toSequenceNr || { iterator.seek(eventKeyBytes(key.classifier + 1, 1L)) hasNext }) } override def next() = eventKey(iterator.next().getKey) override def close() = { iterator.close() leveldbReadOptions.snapshot().close() } } } }
Example 105
Source File: PersistOnEventWithRecoverySpecLeveldb.scala From eventuate with Apache License 2.0 | 5 votes |
package com.rbmhtechnology.eventuate import java.util.UUID import akka.actor.Actor import akka.actor.ActorRef import akka.actor.Props import akka.testkit.TestProbe import com.rbmhtechnology.eventuate.ReplicationIntegrationSpec.replicationConnection import com.rbmhtechnology.eventuate.utilities._ import org.apache.commons.io.FileUtils import org.scalatest.Matchers import org.scalatest.WordSpec import scala.concurrent.duration.DurationInt object PersistOnEventWithRecoverySpecLeveldb { class OnBEmitRandomActor(val eventLog: ActorRef, probe: TestProbe) extends EventsourcedActor with PersistOnEvent { override def id = getClass.getName override def onCommand = Actor.emptyBehavior override def onEvent = { case "A" => case "B" => persistOnEvent(UUID.randomUUID().toString) case uuid: String => probe.ref ! uuid } } def persistOnEventProbe(locationA1: Location, log: ActorRef) = { val probe = locationA1.probe locationA1.system.actorOf(Props(new OnBEmitRandomActor(log, probe))) probe } val noMsgTimeout = 100.millis } class PersistOnEventWithRecoverySpecLeveldb extends WordSpec with Matchers with MultiLocationSpecLeveldb { import RecoverySpecLeveldb._ import PersistOnEventWithRecoverySpecLeveldb._ override val logFactory: String => Props = id => SingleLocationSpecLeveldb.TestEventLog.props(id, batching = true) "An EventsourcedActor with PersistOnEvent" must { "not re-attempt persistence on successful write after reordering of events through disaster recovery" in { val locationB = location("B", customConfig = RecoverySpecLeveldb.config) def newLocationA = location("A", customConfig = RecoverySpecLeveldb.config) val locationA1 = newLocationA val endpointB = locationB.endpoint(Set("L1"), Set(replicationConnection(locationA1.port))) def newEndpointA(l: Location, activate: Boolean) = l.endpoint(Set("L1"), Set(replicationConnection(locationB.port)), activate = activate) val endpointA1 = newEndpointA(locationA1, activate = true) val targetA = endpointA1.target("L1") val logDirA = logDirectory(targetA) val targetB = endpointB.target("L1") val a1Probe = persistOnEventProbe(locationA1, targetA.log) write(targetA, List("A")) write(targetB, List("B")) val event = a1Probe.expectMsgClass(classOf[String]) assertConvergence(Set("A", "B", event), endpointA1, endpointB) locationA1.terminate().await FileUtils.deleteDirectory(logDirA) val locationA2 = newLocationA val endpointA2 = newEndpointA(locationA2, activate = false) endpointA2.recover().await val a2Probe = persistOnEventProbe(locationA2, endpointA2.logs("L1")) a2Probe.expectMsg(event) a2Probe.expectNoMsg(noMsgTimeout) assertConvergence(Set("A", "B", event), endpointA2, endpointB) } } }
Example 106
Source File: CustomAutoDownBase.scala From akka-cluster-custom-downing with Apache License 2.0 | 5 votes |
package tanukki.akka.cluster.autodown import akka.actor.{Cancellable, Scheduler, Address, Actor} import akka.cluster.ClusterEvent._ import akka.cluster.MemberStatus.{Exiting, Down} import akka.cluster._ import scala.concurrent.duration.{Duration, FiniteDuration} object CustomDowning { case class UnreachableTimeout(member: Member) } abstract class CustomAutoDownBase(autoDownUnreachableAfter: FiniteDuration) extends Actor { import CustomDowning._ def selfAddress: Address def down(node: Address): Unit def downOrAddPending(member: Member): Unit def downOrAddPendingAll(members: Set[Member]): Unit def scheduler: Scheduler import context.dispatcher val skipMemberStatus = Set[MemberStatus](Down, Exiting) private var scheduledUnreachable: Map[Member, Cancellable] = Map.empty private var pendingUnreachable: Set[Member] = Set.empty private var unstableUnreachable: Set[Member] = Set.empty override def postStop(): Unit = { scheduledUnreachable.values foreach { _.cancel } super.postStop() } def receiveEvent: Receive def receive: Receive = receiveEvent orElse predefinedReceiveEvent def predefinedReceiveEvent: Receive = { case state: CurrentClusterState => initialize(state) state.unreachable foreach unreachableMember case UnreachableTimeout(member) => if (scheduledUnreachable contains member) { scheduledUnreachable -= member if (scheduledUnreachable.isEmpty) { unstableUnreachable += member downOrAddPendingAll(unstableUnreachable) unstableUnreachable = Set.empty } else { unstableUnreachable += member } } case _: ClusterDomainEvent => } def initialize(state: CurrentClusterState) = {} def unreachableMember(m: Member): Unit = if (!skipMemberStatus(m.status) && !scheduledUnreachable.contains(m)) scheduleUnreachable(m) def scheduleUnreachable(m: Member): Unit = { if (autoDownUnreachableAfter == Duration.Zero) { downOrAddPending(m) } else { val task = scheduler.scheduleOnce(autoDownUnreachableAfter, self, UnreachableTimeout(m)) scheduledUnreachable += (m -> task) } } def remove(member: Member): Unit = { scheduledUnreachable.get(member) foreach { _.cancel } scheduledUnreachable -= member pendingUnreachable -= member unstableUnreachable -= member } def scheduledUnreachableMembers: Map[Member, Cancellable] = scheduledUnreachable def pendingUnreachableMembers: Set[Member] = pendingUnreachable def pendingAsUnreachable(member: Member): Unit = pendingUnreachable += member def downPendingUnreachableMembers(): Unit = { pendingUnreachable.foreach(member => down(member.address)) pendingUnreachable = Set.empty } def unstableUnreachableMembers: Set[Member] = unstableUnreachable }
Example 107
Source File: SimpleClusterListener.scala From constructr-consul with Apache License 2.0 | 5 votes |
package com.tecsisa.constructr.coordination package demo import akka.actor.{ Actor, ActorLogging, Address, Props } import akka.cluster.ClusterEvent.{ MemberEvent, MemberJoined, MemberRemoved, MemberUp, UnreachableMember } import akka.cluster.Cluster object SimpleClusterListener { case object GetMemberNodes final val Name = "clusterListener" def props: Props = Props(new SimpleClusterListener) } class SimpleClusterListener extends Actor with ActorLogging { import SimpleClusterListener._ val cluster = Cluster(context.system) private var members = Set.empty[Address] override def preStart(): Unit = cluster.subscribe(self, classOf[MemberEvent], classOf[UnreachableMember]) override def postStop(): Unit = cluster.unsubscribe(self) override def receive = { case GetMemberNodes => sender() ! members case MemberJoined(member) => log.info("Member joined: {}", member.address) members += member.address case MemberUp(member) => log.info("Member up: {}", member.address) members += member.address case MemberRemoved(member, _) => log.info("Member removed: {}", member.address) members -= member.address } }
Example 108
Source File: WebSocket.scala From trucking-iot with Apache License 2.0 | 5 votes |
package controllers import javax.inject.{Inject, Singleton} import akka.actor.{Actor, ActorRef, ActorSystem, Props} import akka.kafka.scaladsl.Consumer import akka.kafka.{ConsumerSettings, Subscriptions} import akka.stream.scaladsl.Sink import akka.stream.{Materializer, ThrottleMode} import com.typesafe.config.ConfigFactory import org.apache.kafka.clients.consumer.ConsumerConfig import org.apache.kafka.common.serialization.{ByteArrayDeserializer, StringDeserializer} import play.api.libs.streams.ActorFlow import play.api.mvc.{Controller, WebSocket} import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.Future import scala.concurrent.duration._ //@Singleton class KafkaWebSocket @Inject() (implicit system: ActorSystem, materializer: Materializer) extends Controller { def kafkaWS = WebSocket.accept[String, String] { request => ActorFlow.actorRef(out => KafkaWSActor.props(out)) } object KafkaWSActor { def props(outRef: ActorRef) = Props(new KafkaWSActor(outRef)) } class KafkaWSActor(outRef: ActorRef) extends Actor { val config = ConfigFactory.load() val combinedConfig = ConfigFactory.defaultOverrides() .withFallback(config) .withFallback(ConfigFactory.defaultApplication()) .getConfig("trucking-web-application.backend") val consumerSettings = ConsumerSettings(system, new ByteArrayDeserializer, new StringDeserializer) //.withBootstrapServers("sandbox-hdf.hortonworks.com:6667") .withBootstrapServers(combinedConfig.getString("kafka.bootstrap-servers")) .withGroupId("group1") .withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") Consumer.committableSource(consumerSettings, Subscriptions.topics("trucking_data_joined")) .mapAsync(1) { msg => Future(outRef ! msg.record.value).map(_ => msg) } //.mapAsync(1) { msg => msg.committableOffset.commitScaladsl() } // TODO: Disabling commits for debug .throttle(1, 250.milliseconds, 1, ThrottleMode.Shaping) .runWith(Sink.ignore) def receive = { case msg: String => outRef ! s"Ack: $msg" } } }
Example 109
Source File: EnrichToKafkaSimulator.scala From trucking-iot with Apache License 2.0 | 5 votes |
package com.orendainx.trucking.simulator.simulators import akka.actor.{Actor, ActorSystem, Inbox, Props} import better.files.File import com.orendainx.trucking.commons.models._ import com.orendainx.trucking.enrichment.WeatherAPI import com.orendainx.trucking.simulator.coordinators.AutomaticCoordinator import com.orendainx.trucking.simulator.depots.NoSharingDepot import com.orendainx.trucking.simulator.flows.SharedFlowManager import com.orendainx.trucking.simulator.generators.TruckAndTrafficGenerator import com.orendainx.trucking.simulator.services.DriverFactory import com.orendainx.trucking.simulator.transmitters.{ActorTransmitter, DataTransmitter, KafkaTransmitter} import com.typesafe.config.{Config, ConfigFactory} import scala.concurrent.Await import scala.concurrent.duration._ private class EnrichmentActor extends Actor { def receive = { case td: TruckData => kafkaTruckTransmitter ! DataTransmitter.Transmit( EnrichedTruckData(td, WeatherAPI.default.getFog(td.eventType), WeatherAPI.default.getRain(td.eventType), WeatherAPI.default.getWind(td.eventType)) ) case td: TrafficData => kafkaTrafficTransmitter ! DataTransmitter.Transmit(td) } } }
Example 110
Source File: WorkerStateReporter.scala From maha with Apache License 2.0 | 5 votes |
// Copyright 2018, Yahoo Inc. // Licensed under the terms of the Apache License 2.0. Please see LICENSE file in project root for terms. package com.yahoo.maha.worker.state import java.io.File import akka.actor.{Actor, ActorPath, ActorSystem, Props} import com.typesafe.config.{Config, ConfigFactory} import com.yahoo.maha.core.Engine import com.yahoo.maha.worker.state.actor._ import grizzled.slf4j.Logging object WorkerStateReporter extends Logging { // Use a bounded mailbox to prevent memory leaks in the rare case when jobs get piled up to be processed by the actor val defaultConfig: Config = ConfigFactory.parseString( """ |akka.actor.nonblocking_bounded_mailbox { | mailbox-type = akka.dispatch.NonBlockingBoundedMailbox | mailbox-capacity = 10000 |} |akka { | loggers = ["akka.event.slf4j.Slf4jLogger"] | loglevel = "INFO" |} |""".stripMargin) } case class WorkerStateReporter(akkaConf: String) extends Logging { val config: Config = { val file = new File(akkaConf) if(file.exists() && file.canRead) { info(s"Using akka conf file : ${file.getAbsolutePath}") ConfigFactory.parseFile(file) } else { info("Using default akka config") WorkerStateReporter.defaultConfig } } val system = ActorSystem("maha-workers", config) lazy val workerStateActorPath: ActorPath = { val actorConfig = WorkerStateActorConfig() val props: Props = Props(classOf[WorkerStateActor], actorConfig).withMailbox("akka.actor.nonblocking_bounded_mailbox") val path = system.actorOf(props, actorConfig.name).path info(s"Created WorkerStateActor: $path") path } def jobStarted(executionType: ExecutionType, jobId: Long, engine: Engine, cost: Long, estimatedRows: Long, userId: String): Unit = { sendMessage(JobStarted(executionType, jobId, engine, cost, estimatedRows, userId)) } def jobEnded(executionType: ExecutionType, jobId: Long, engine: Engine, cost: Long, estimatedRows: Long, userId: String): Unit = { sendMessage(JobEnded(executionType, jobId, engine, cost, estimatedRows, userId)) } def sendMessage(actorMessage:WorkerStateActorMessage) = { try { system.actorSelection(workerStateActorPath).tell(actorMessage, Actor.noSender) } catch { case t: Throwable => warn(s"Failed to send $actorMessage message to WorkerStateActor", t) } } }
Example 111
Source File: ReplicaCoordinatorActor.scala From JustinDB with Apache License 2.0 | 5 votes |
package justin.db.actors import akka.actor.{Actor, Props} import akka.routing.{DefaultResizer, RoundRobinPool} import justin.db.actors.protocol.{ReadData, WriteData} import justin.db.replica.read.ReplicaReadCoordinator import justin.db.replica.write.ReplicaWriteCoordinator import scala.concurrent.ExecutionContext class ReplicaCoordinatorActor(readCoordinator: ReplicaReadCoordinator, writeCoordinator: ReplicaWriteCoordinator) extends Actor { private implicit val ec: ExecutionContext = context.dispatcher override def receive: Receive = { case rd: ReadData => readCoordinator.apply(rd.cmd, rd.clusterMembers).foreach(rd.sender ! _) case wd: WriteData => writeCoordinator.apply(wd.cmd, wd.clusterMembers).foreach(wd.sender ! _) } } object ReplicaCoordinatorActor { def props(readCoordinator: ReplicaReadCoordinator, writeCoordinator: ReplicaWriteCoordinator): Props = { Props(new ReplicaCoordinatorActor(readCoordinator, writeCoordinator)) } } object RoundRobinCoordinatorRouter { def routerName: String = "CoordinatorRouter" private val pool = RoundRobinPool( nrOfInstances = 5, resizer = Some(DefaultResizer(lowerBound = 2, upperBound = 15)) ) def props(readCoordinator: ReplicaReadCoordinator, writeCoordinator: ReplicaWriteCoordinator): Props = { pool.props(ReplicaCoordinatorActor.props(readCoordinator, writeCoordinator)) } }
Example 112
Source File: StorageNodeActor.scala From JustinDB with Apache License 2.0 | 5 votes |
package justin.db.actors import akka.actor.{Actor, ActorRef, Props, RootActorPath, Terminated} import akka.cluster.ClusterEvent.{CurrentClusterState, MemberUp} import akka.cluster.{Cluster, Member, MemberStatus} import com.typesafe.scalalogging.StrictLogging import justin.db.actors.protocol.{RegisterNode, _} import justin.db.cluster.ClusterMembers import justin.db.cluster.datacenter.Datacenter import justin.db.consistenthashing.{NodeId, Ring} import justin.db.replica._ import justin.db.replica.read.{ReplicaLocalReader, ReplicaReadCoordinator, ReplicaRemoteReader} import justin.db.replica.write.{ReplicaLocalWriter, ReplicaRemoteWriter, ReplicaWriteCoordinator} import justin.db.storage.PluggableStorageProtocol import scala.concurrent.ExecutionContext class StorageNodeActor(nodeId: NodeId, datacenter: Datacenter, storage: PluggableStorageProtocol, ring: Ring, n: N) extends Actor with StrictLogging { private[this] implicit val ec: ExecutionContext = context.dispatcher private[this] val cluster = Cluster(context.system) private[this] var clusterMembers = ClusterMembers.empty private[this] val readCoordinator = new ReplicaReadCoordinator(nodeId, ring, n, new ReplicaLocalReader(storage), new ReplicaRemoteReader) private[this] val writeCoordinator = new ReplicaWriteCoordinator(nodeId, ring, n, new ReplicaLocalWriter(storage), new ReplicaRemoteWriter) private[this] val coordinatorRouter = context.actorOf( props = RoundRobinCoordinatorRouter.props(readCoordinator, writeCoordinator), name = RoundRobinCoordinatorRouter.routerName ) private[this] val name = self.path.name override def preStart(): Unit = cluster.subscribe(this.self, classOf[MemberUp]) override def postStop(): Unit = cluster.unsubscribe(this.self) def receive: Receive = { receiveDataPF orElse receiveClusterDataPF orElse receiveRegisterNodePR orElse notHandledPF } private[this] def receiveDataPF: Receive = { case readReq: StorageNodeReadRequest => coordinatorRouter ! ReadData(sender(), clusterMembers, readReq) case writeLocalDataReq: StorageNodeWriteDataLocal => coordinatorRouter ! WriteData(sender(), clusterMembers, writeLocalDataReq) case writeClientReplicaReq: Internal.WriteReplica => coordinatorRouter ! WriteData(sender(), clusterMembers, writeClientReplicaReq) } private[this] def receiveClusterDataPF: Receive = { case "members" => sender() ! clusterMembers case MemberUp(member) => register(nodeId, ring, member) case state: CurrentClusterState => state.members.filter(_.status == MemberStatus.Up).foreach(member => register(nodeId, ring, member)) case Terminated(actorRef) => clusterMembers = clusterMembers.removeByRef(StorageNodeActorRef(actorRef)) } private[this] def receiveRegisterNodePR: Receive = { case RegisterNode(senderNodeId) if clusterMembers.notContains(senderNodeId) => val senderRef = sender() context.watch(senderRef) clusterMembers = clusterMembers.add(senderNodeId, StorageNodeActorRef(senderRef)) senderRef ! RegisterNode(nodeId) logger.info(s"Actor[$name]: Successfully registered node [id-${senderNodeId.id}]") case RegisterNode(senderNodeId) => logger.info(s"Actor[$name]: Node [id-${senderNodeId.id}] is already registered") } private[this] def register(nodeId: NodeId, ring: Ring, member: Member) = { (member.hasRole(StorageNodeActor.role), datacenter.name == member.dataCenter) match { case (true, true) => register() case (_, false) => logger.info(s"Actor[$name]: $member doesn't belong to datacenter [${datacenter.name}]") case (false, _) => logger.info(s"Actor[$name]: $member doesn't have [${StorageNodeActor.role}] role (it has roles ${member.roles}") } def register() = for { ringNodeId <- ring.nodesId nodeName = StorageNodeActor.name(ringNodeId, Datacenter(member.dataCenter)) nodeRef = context.actorSelection(RootActorPath(member.address) / "user" / nodeName) } yield nodeRef ! RegisterNode(nodeId) } private[this] def notHandledPF: Receive = { case t => logger.warn(s"Actor[$name]: Not handled message [$t]") } } object StorageNodeActor { def role: String = "storagenode" def name(nodeId: NodeId, datacenter: Datacenter): String = s"${datacenter.name}-id-${nodeId.id}" def props(nodeId: NodeId, datacenter: Datacenter, storage: PluggableStorageProtocol, ring: Ring, n: N): Props = { Props(new StorageNodeActor(nodeId, datacenter, storage, ring, n)) } } case class StorageNodeActorRef(ref: ActorRef) extends AnyVal
Example 113
Source File: ReplicaRemoteWriterTest.scala From JustinDB with Apache License 2.0 | 5 votes |
package justin.db.replica.write import java.util.UUID import akka.actor.{Actor, ActorSystem} import akka.testkit.{TestActorRef, TestKit} import justin.db.Data import justin.db.actors.StorageNodeActorRef import justin.db.actors.protocol.{StorageNodeFailedWrite, StorageNodeSuccessfulWrite, StorageNodeWriteDataLocal} import org.scalatest.concurrent.ScalaFutures import org.scalatest.{FlatSpecLike, Matchers} import scala.concurrent.duration._ class ReplicaRemoteWriterTest extends TestKit(ActorSystem("test-system")) with FlatSpecLike with Matchers with ScalaFutures { behavior of "Replica Remote Writer" override implicit def patienceConfig: PatienceConfig = PatienceConfig(10.seconds, 50.millis) it should "get info back that one of the saving is successful and second one has failed" in { // given val service = new ReplicaRemoteWriter()(system.dispatcher) val data = Data(id = UUID.randomUUID(), value = "exemplary-value") val storageSuccessfulActorRef = testActorRef(msgBack = StorageNodeSuccessfulWrite(data.id)) val storageFailedActorRef = testActorRef(msgBack = StorageNodeFailedWrite(data.id)) val storageNodeRefs = List(storageSuccessfulActorRef, storageFailedActorRef).map(StorageNodeActorRef) // when val writingResult = service.apply(storageNodeRefs, data) // then whenReady(writingResult) { _ shouldBe List(StorageNodeSuccessfulWrite(data.id), StorageNodeFailedWrite(data.id)) } } it should "recover failed behavior of actor" in { // given val service = new ReplicaRemoteWriter()(system.dispatcher) val data = Data(id = UUID.randomUUID(), value = "exemplary-value") val storageActorRef = testActorRef(new Exception) val storageNodeRefs = List(StorageNodeActorRef(storageActorRef)) // when val writingResult = service.apply(storageNodeRefs, data) // then whenReady(writingResult) { _ shouldBe List(StorageNodeFailedWrite(data.id)) } } private def testActorRef(msgBack: => Any) = { TestActorRef(new Actor { override def receive: Receive = { case StorageNodeWriteDataLocal(id) => sender() ! msgBack } }) } }
Example 114
Source File: ReplicaRemoteReaderTest.scala From JustinDB with Apache License 2.0 | 5 votes |
package justin.db.replica.read import java.util.UUID import akka.actor.{Actor, ActorSystem} import akka.testkit.{TestActorRef, TestKit} import justin.db.Data import justin.db.actors.StorageNodeActorRef import justin.db.actors.protocol._ import org.scalatest.concurrent.ScalaFutures import org.scalatest.{FlatSpecLike, Matchers} import scala.concurrent.duration._ class ReplicaRemoteReaderTest extends TestKit(ActorSystem("test-system")) with FlatSpecLike with Matchers with ScalaFutures { behavior of "Replica Remote Reader" override implicit def patienceConfig: PatienceConfig = PatienceConfig(10.seconds, 50.millis) it should "get info back that one of the value could be found and second one is obsolete" in { // given val service = new ReplicaRemoteReader()(system.dispatcher) val id = UUID.randomUUID() val foundData = Data(id, "value") val notFoundId = UUID.randomUUID() val storageNotFoundActorRef = testActorRef(msgBack = StorageNodeNotFoundRead(notFoundId)) val storageFoundActorRef = testActorRef(msgBack = StorageNodeFoundRead(foundData)) val storageNodeRefs = List(storageNotFoundActorRef, storageFoundActorRef).map(StorageNodeActorRef) // when val readingResult = service.apply(storageNodeRefs, id) // then whenReady(readingResult) { _ shouldBe List(StorageNodeNotFoundRead(notFoundId), StorageNodeFoundRead(foundData)) } } it should "recover failed behavior of actor" in { // given val service = new ReplicaRemoteReader()(system.dispatcher) val id = UUID.randomUUID() val storageActorRef = testActorRef(new Exception) val storageNodeRefs = List(StorageNodeActorRef(storageActorRef)) // when val readingResult = service.apply(storageNodeRefs, id) // then whenReady(readingResult) { _ shouldBe List(StorageNodeFailedRead(id)) } } private def testActorRef(msgBack: => Any) = { TestActorRef(new Actor { override def receive: Receive = { case StorageNodeLocalRead(id) => sender() ! msgBack } }) } }
Example 115
Source File: ReactiveConsulHttpServiceActor.scala From reactive-consul with MIT License | 5 votes |
package stormlantern.consul.example import akka.actor.{ Actor, Props } import spray.routing.HttpService import scala.concurrent.ExecutionContext class ReactiveConsulHttpServiceActor extends Actor with ReactiveConsulHttpService { def actorRefFactory = context def receive = runRoute(reactiveConsulRoute) } object ReactiveConsulHttpServiceActor { def props() = Props(classOf[ReactiveConsulHttpServiceActor]) } trait ReactiveConsulHttpService extends HttpService { implicit def executionContext: ExecutionContext = actorRefFactory.dispatcher val reactiveConsulRoute = pathPrefix("api") { path("identify") { get { complete(s"Hi, I'm a ${System.getenv("SERVICE_NAME")} called ${System.getenv("INSTANCE_NAME")}") } } ~ path("talk") { get { complete("pong") } } } }
Example 116
Source File: LoadBalancerActor.scala From reactive-consul with MIT License | 5 votes |
package stormlantern.consul.client.loadbalancers import akka.actor.Status.Failure import akka.actor.{ Props, Actor, ActorLogging } import LoadBalancerActor._ import stormlantern.consul.client.discovery.{ ConnectionProvider, ConnectionHolder } import stormlantern.consul.client.ServiceUnavailableException import scala.concurrent.ExecutionContext.Implicits.global import scala.collection.mutable class LoadBalancerActor(loadBalancer: LoadBalancer, key: String) extends Actor with ActorLogging { import akka.pattern.pipe // Actor state val connectionProviders = mutable.Map.empty[String, ConnectionProvider] override def postStop(): Unit = { log.debug(s"LoadBalancerActor for $key stopped, destroying all connection providers") connectionProviders.values.foreach(_.destroy()) } def receive: PartialFunction[Any, Unit] = { case GetConnection ⇒ selectConnection match { case Some((id, connectionProvider)) ⇒ connectionProvider.getConnectionHolder(id, self) pipeTo sender case None ⇒ sender ! Failure(ServiceUnavailableException(key)) } case ReturnConnection(connection) ⇒ returnConnection(connection) case AddConnectionProvider(id, provider) ⇒ addConnectionProvider(id, provider) case RemoveConnectionProvider(id) ⇒ removeConnectionProvider(id) case HasAvailableConnectionProvider ⇒ sender ! connectionProviders.nonEmpty } def selectConnection: Option[(String, ConnectionProvider)] = loadBalancer.selectConnection.flatMap(id ⇒ connectionProviders.get(id).map(id → _)) def returnConnection(connection: ConnectionHolder): Unit = { connectionProviders.get(connection.id).foreach(_.returnConnection(connection)) loadBalancer.connectionReturned(connection.id) } def addConnectionProvider(id: String, provider: ConnectionProvider): Unit = { connectionProviders.put(id, provider) loadBalancer.connectionProviderAdded(id) } def removeConnectionProvider(id: String): Unit = { connectionProviders.remove(id).foreach(_.destroy()) loadBalancer.connectionProviderRemoved(id) } } object LoadBalancerActor { // Props def props(loadBalancer: LoadBalancer, key: String) = Props(new LoadBalancerActor(loadBalancer, key)) // Messsages case object GetConnection case class ReturnConnection(connection: ConnectionHolder) case class AddConnectionProvider(id: String, provider: ConnectionProvider) case class RemoveConnectionProvider(id: String) case object HasAvailableConnectionProvider }
Example 117
Source File: SessionActor.scala From reactive-consul with MIT License | 5 votes |
package stormlantern.consul.client.session import java.util.UUID import akka.actor.{ ActorRef, Props, Actor } import stormlantern.consul.client.dao.ConsulHttpClient import stormlantern.consul.client.session.SessionActor.{ MonitorSession, SessionAcquired, StartSession } import scala.concurrent.Future class SessionActor(httpClient: ConsulHttpClient, listener: ActorRef) extends Actor { import scala.concurrent.ExecutionContext.Implicits.global // Actor state var sessionId: Option[UUID] = None def receive = { case StartSession ⇒ startSession().map { id ⇒ self ! SessionAcquired(id) } case SessionAcquired(id) ⇒ sessionId = Some(id) listener ! SessionAcquired(id) self ! MonitorSession(0) case MonitorSession(lastIndex) ⇒ } // Internal methods def startSession(): Future[UUID] = { httpClient.putSession().map { id ⇒ sessionId = Some(id) id } } } object SessionActor { // Constructors def props(httpClient: ConsulHttpClient, listener: ActorRef) = Props(new SessionActor(httpClient, listener)) // Public messages case object StartSession case class SessionAcquired(sessionId: UUID) // Private messages private case class MonitorSession(lastIndex: Long) }
Example 118
Source File: LeaderFollowerActor.scala From reactive-consul with MIT License | 5 votes |
package stormlantern.consul.client.election import java.util.UUID import akka.actor.{ Actor, Props } import spray.json._ import stormlantern.consul.client.dao.{ AcquireSession, BinaryData, ConsulHttpClient, KeyData } import stormlantern.consul.client.election.LeaderFollowerActor._ class LeaderFollowerActor(httpClient: ConsulHttpClient, sessionId: UUID, key: String, host: String, port: Int) extends Actor with DefaultJsonProtocol { implicit val ec = context.dispatcher implicit val leaderInfoFormat = jsonFormat2(LeaderInfo) val leaderInfoBytes = LeaderInfo(host, port).toJson.compactPrint.getBytes("UTF-8") // Actor state var electionState: Option[ElectionState] = None // Behavior def receive = { case Participate ⇒ httpClient.putKeyValuePair(key, leaderInfoBytes, Some(AcquireSession(sessionId))).map { case true ⇒ self ! SetElectionState(Some(Leader)) self ! MonitorLock(0) case false ⇒ self ! MonitorLock(0) } case SetElectionState(state) ⇒ electionState = state case MonitorLock(index) ⇒ httpClient.getKeyValuePair(key, index = Some(index), wait = Some("1s")).map { case Seq(KeyData(_, _, newIndex, _, _, BinaryData(data), session)) ⇒ if (newIndex > index) { if (session.isEmpty) { self ! SetElectionState(None) self ! Participate } else if (session.get == sessionId) { self ! SetElectionState(Some(Leader)) self ! MonitorLock(newIndex) } else { val leaderInfo = new String(data, "UTF-8").parseJson.convertTo[LeaderInfo](leaderInfoFormat) self ! SetElectionState(Some(Follower(leaderInfo.host, leaderInfo.port))) self ! MonitorLock(newIndex) } } else { self ! MonitorLock(index) } } } } object LeaderFollowerActor { //Props def props(httpClient: ConsulHttpClient, sessionId: UUID, key: String, host: String, port: Int): Props = Props(new LeaderFollowerActor(httpClient, sessionId, key, host, port)) // Election state sealed trait ElectionState case object Leader extends ElectionState case class Follower(host: String, port: Int) extends ElectionState // Internal messages case object Participate case class SetElectionState(state: Option[ElectionState]) case class MonitorLock(lastIndex: Long) }
Example 119
Source File: EventProducer.scala From reactive-kafka-microservice-template with Apache License 2.0 | 5 votes |
package com.omearac.producers import akka.actor.{Actor, Props} import akka.event.Logging import akka.stream.scaladsl.SourceQueueWithComplete import com.omearac.shared.EventMessages.ActivatedProducerStream import com.omearac.shared.EventSourcing import com.omearac.shared.KafkaMessages.ExampleAppEvent object EventProducer { def props: Props = Props(new EventProducer) } class EventProducer extends Actor with EventSourcing { import context._ implicit val system = context.system val log = Logging(system, this.getClass.getName) var producerStream: SourceQueueWithComplete[Any] = null val subscribedMessageTypes = Seq(classOf[ExampleAppEvent]) override def preStart(): Unit = { super.preStart() subscribedMessageTypes.foreach(system.eventStream.subscribe(self, _)) } override def postStop(): Unit = { subscribedMessageTypes.foreach(system.eventStream.unsubscribe(self, _)) super.postStop() } def receive: Receive = { case ActivatedProducerStream(streamRef, _) => producerStream = streamRef become(publishEvent) case msg: ExampleAppEvent => if (producerStream == null) self ! msg else producerStream.offer(msg) case other => log.error("EventProducer got the unknown message while in idle: " + other) } def publishEvent: Receive = { case msg: ExampleAppEvent => producerStream.offer(msg) case other => log.error("EventProducer got the unknown message while producing: " + other) } }
Example 120
Source File: EventConsumerSpec.scala From reactive-kafka-microservice-template with Apache License 2.0 | 5 votes |
package akka.kafka import akka.actor.{Actor, ActorSystem, Props} import akka.testkit.{DefaultTimeout, ImplicitSender, TestActorRef, TestKit} import com.omearac.consumers.ConsumerStreamManager.{InitializeConsumerStream, TerminateConsumerStream} import com.omearac.consumers.DataConsumer.{ConsumerActorReply, ManuallyInitializeStream, ManuallyTerminateStream} import com.omearac.consumers.EventConsumer import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike} import scala.collection.mutable.ArrayBuffer class EventConsumerSpec extends TestKit(ActorSystem("EventConsumerSpec")) with DefaultTimeout with ImplicitSender with WordSpecLike with Matchers with BeforeAndAfterAll { //Creating the Actors val testConsumer = TestActorRef(new EventConsumer) val mockStreamAndManager = system.actorOf(Props(new MockStreamAndManager), "mockStreamAndManager") override def afterAll: Unit = { shutdown() } class MockStreamAndManager extends Actor { val receive: Receive = { case InitializeConsumerStream(_, _) => testConsumer ! "STREAM_INIT" case TerminateConsumerStream(_) => testConsumer ! "STREAM_DONE" } } "Sending ManuallyTerminateStream to EventConsumer in receive state" should { "return a Stream Already Stopped reply " in { testConsumer ! ManuallyTerminateStream expectMsg(ConsumerActorReply("Event Consumer Stream Already Stopped")) } } "Sending ManuallyInitializeStream to EventConsumer in receive state" should { "forward the message to the ConsumerStreamManager and change state to consuming" in { testConsumer.underlyingActor.consumerStreamManager = mockStreamAndManager testConsumer ! ManuallyInitializeStream expectMsg(ConsumerActorReply("Event Consumer Stream Started")) //Now check for state change Thread.sleep(750) testConsumer ! ManuallyInitializeStream expectMsg(ConsumerActorReply("Event Consumer Already Started")) } } "Sending STREAM_DONE to EventConsumer while in consuming state" should { "change state to idle state" in { val consuming = testConsumer.underlyingActor.consumingEvents testConsumer.underlyingActor.context.become(consuming) testConsumer ! "STREAM_DONE" //Now check for state change Thread.sleep(750) testConsumer ! ManuallyTerminateStream expectMsg(ConsumerActorReply("Event Consumer Stream Already Stopped")) } } "Sending ManuallyTerminateStream to EventConsumer while in consuming state" should { "forward the message to the ConsumerStreamManager and then upon reply, change state to idle" in { val consuming = testConsumer.underlyingActor.consumingEvents testConsumer.underlyingActor.context.become(consuming) testConsumer ! ManuallyTerminateStream expectMsg(ConsumerActorReply("Event Consumer Stream Stopped")) //Now check for state change Thread.sleep(750) testConsumer ! ManuallyTerminateStream expectMsg(ConsumerActorReply("Event Consumer Stream Already Stopped")) } } "Sending ConsumerMessageBatch message" should { "reply OK" in { val msgBatch: ArrayBuffer[String] = ArrayBuffer("test1") val consuming = testConsumer.underlyingActor.consumingEvents testConsumer.underlyingActor.context.become(consuming) testConsumer.underlyingActor.consumerStreamManager = mockStreamAndManager testConsumer ! msgBatch expectMsg("OK") } } }
Example 121
Source File: DataConsumerSpec.scala From reactive-kafka-microservice-template with Apache License 2.0 | 5 votes |
package akka.kafka import akka.actor.{Actor, ActorSystem, Props} import akka.testkit.{DefaultTimeout, ImplicitSender, TestActorRef, TestKit} import com.omearac.consumers.ConsumerStreamManager.{InitializeConsumerStream, TerminateConsumerStream} import com.omearac.consumers.DataConsumer import com.omearac.consumers.DataConsumer.{ConsumerActorReply, ManuallyInitializeStream, ManuallyTerminateStream} import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike} import scala.collection.mutable.ArrayBuffer class DataConsumerSpec extends TestKit(ActorSystem("DataConsumerSpec")) with DefaultTimeout with ImplicitSender with WordSpecLike with Matchers with BeforeAndAfterAll { //Creating the Actors val testConsumer = TestActorRef(new DataConsumer) val mockStreamAndManager = system.actorOf(Props(new MockStreamAndManager), "mockStreamAndManager") override def afterAll: Unit = { shutdown() } class MockStreamAndManager extends Actor { val receive: Receive = { case InitializeConsumerStream(_, _) => testConsumer ! "STREAM_INIT" case TerminateConsumerStream(_) => testConsumer ! "STREAM_DONE" } } "Sending ManuallyTerminateStream to DataConsumer in receive state" should { "return a Stream Already Stopped reply " in { testConsumer ! ManuallyTerminateStream expectMsg(ConsumerActorReply("Data Consumer Stream Already Stopped")) } } "Sending ManuallyInitializeStream to DataConsumer in receive state" should { "forward the message to the ConsumerStreamManager and change state to consuming" in { testConsumer.underlyingActor.consumerStreamManager = mockStreamAndManager testConsumer ! ManuallyInitializeStream expectMsg(ConsumerActorReply("Data Consumer Stream Started")) //Now check for state change Thread.sleep(750) testConsumer ! ManuallyInitializeStream expectMsg(ConsumerActorReply("Data Consumer Already Started")) } } "Sending STREAM_DONE to DataConsumer while in consuming state" should { "change state to idle state" in { val consuming = testConsumer.underlyingActor.consumingData testConsumer.underlyingActor.context.become(consuming) testConsumer ! "STREAM_DONE" //Now check for state change Thread.sleep(750) testConsumer ! ManuallyTerminateStream expectMsg(ConsumerActorReply("Data Consumer Stream Already Stopped")) } } "Sending ManuallyTerminateStream to DataConsumer while in consuming state" should { "forward the message to the ConsumerStreamManager and then upon reply, change state to idle" in { val consuming = testConsumer.underlyingActor.consumingData testConsumer.underlyingActor.context.become(consuming) testConsumer ! ManuallyTerminateStream expectMsg(ConsumerActorReply("Data Consumer Stream Stopped")) //Now check for state change Thread.sleep(750) testConsumer ! ManuallyTerminateStream expectMsg(ConsumerActorReply("Data Consumer Stream Already Stopped")) } } "Sending ConsumerMessageBatch message" should { "reply OK" in { val msgBatch: ArrayBuffer[String] = ArrayBuffer("test1") val consuming = testConsumer.underlyingActor.consumingData testConsumer.underlyingActor.context.become(consuming) testConsumer.underlyingActor.consumerStreamManager = mockStreamAndManager testConsumer ! msgBatch expectMsg("OK") } } }
Example 122
Source File: CodebaseAnalyzeAggregatorActor.scala From CodeAnalyzerTutorial with Apache License 2.0 | 5 votes |
package tutor import java.util.Date import akka.actor.{Actor, ActorLogging, ActorRef, Cancellable, Props, Terminated} import akka.routing.{ActorRefRoutee, RoundRobinRoutingLogic, Router} import tutor.CodebaseAnalyzeAggregatorActor.{AnalyzeDirectory, Complete, Report, Timeout} import tutor.SourceCodeAnalyzerActor.NewFile import tutor.utils.BenchmarkUtil import scala.concurrent.duration._ import scala.util.{Failure, Success, Try} object CodebaseAnalyzeAggregatorActor { def props(): Props = Props(new CodebaseAnalyzeAggregatorActor) final case class AnalyzeDirectory(path: String) final case class Complete(result: Try[SourceCodeInfo]) final case object Timeout final case class Report(codebaseInfo: CodebaseInfo) } class CodebaseAnalyzeAggregatorActor extends Actor with ActorLogging with DirectoryScanner with ReportFormatter { var controller: ActorRef = _ var currentPath: String = _ var beginTime: Date = _ var fileCount = 0 var completeCount = 0 var failCount = 0 var result: CodebaseInfo = CodebaseInfo.empty var timeoutTimer: Cancellable = _ var router: Router = { val routees = Vector.fill(8) { val r = context.actorOf(SourceCodeAnalyzerActor.props()) context watch r ActorRefRoutee(r) } Router(RoundRobinRoutingLogic(), routees) } override def receive: Receive = { case AnalyzeDirectory(path) => { controller = sender() currentPath = path beginTime = BenchmarkUtil.recordStart(s"analyze folder $currentPath") foreachFile(path, PresetFilters.knownFileTypes, PresetFilters.ignoreFolders) { file => fileCount += 1 router.route(NewFile(file.getAbsolutePath), context.self) } import context.dispatcher timeoutTimer = context.system.scheduler.scheduleOnce((fileCount / 1000).seconds, context.self, Timeout) } case Complete(Success(sourceCodeInfo: SourceCodeInfo)) => { completeCount += 1 result = result + sourceCodeInfo finishIfAllComplete() } case Complete(Failure(exception)) => { completeCount += 1 failCount += 1 log.warning("processing file failed {}", exception) finishIfAllComplete() } case Timeout => { println(s"${result.totalFileNums} of $fileCount files processed before timeout") controller ! Report(result) BenchmarkUtil.recordElapse(s"analyze folder $currentPath", beginTime) } case Terminated(a) => router = router.removeRoutee(a) val r = context.actorOf(Props[SourceCodeAnalyzerActor]) context watch r router = router.addRoutee(r) case x@_ => log.error(s"receive unknown message $x") } def finishIfAllComplete(): Unit = { if (completeCount == fileCount) { timeoutTimer.cancel() controller ! Report(result) BenchmarkUtil.recordElapse(s"analyze folder $currentPath", beginTime) context.stop(self) } } }
Example 123
Source File: SourceCodeAnalyzerActor.scala From CodeAnalyzerTutorial with Apache License 2.0 | 5 votes |
package tutor import akka.actor.{Actor, ActorLogging, Props} import tutor.CodebaseAnalyzeAggregatorActor.Complete import tutor.SourceCodeAnalyzerActor.NewFile object SourceCodeAnalyzerActor { def props(): Props = Props(new SourceCodeAnalyzerActor) final case class NewFile(path: String) } class SourceCodeAnalyzerActor extends Actor with ActorLogging with SourceCodeAnalyzer { override def receive: Receive = { case NewFile(path) => { val sourceCodeInfo = processFile(path) sender() ! Complete(sourceCodeInfo) } } }
Example 124
Source File: CodebaseAnalyzerControllerActor.scala From CodeAnalyzerTutorial with Apache License 2.0 | 5 votes |
package tutor import akka.actor.{Actor, Props} import tutor.CodebaseAnalyzeAggregatorActor.{AnalyzeDirectory, Report} object CodebaseAnalyzerControllerActor { def props(): Props = Props(new CodebaseAnalyzerControllerActor) } class CodebaseAnalyzerControllerActor extends Actor with ReportFormatter { override def receive: Receive = { case AnalyzeDirectory(path) => { context.actorOf(CodebaseAnalyzeAggregatorActor.props()) ! AnalyzeDirectory(path) } case Report(content) => { println(format(content)) } } }
Example 125
Source File: SpringIndirectActorProducer.scala From akka-spring-boot with Apache License 2.0 | 5 votes |
package com.github.scalaspring.akka import akka.actor.{Actor, IndirectActorProducer} import org.springframework.context.ConfigurableApplicationContext import scala.collection.immutable object SpringIndirectActorProducer { def getBeanNameForType(applicationContext: ConfigurableApplicationContext, clazz: Class[_]): String = { val beanNames = applicationContext.getBeanNamesForType(clazz) if (beanNames.length > 1) throw new IllegalArgumentException(s"Multiple beans found for actor class ${clazz.getName} (${beanNames}}). Please use name-based constructor to specify bean name to use.") beanNames.headOption.orElse(throw new IllegalArgumentException(s"No bean defined for actor class ${clazz.getName}")).get } def getTypeForBeanName(applicationContext: ConfigurableApplicationContext, beanName: String): Class[_ <: Actor] = { applicationContext.getBeanFactory.getType(beanName).asInstanceOf[Class[Actor]] } } import SpringIndirectActorProducer._ class SpringIndirectActorProducer(clazz: Class[_ <: Actor], applicationContext: ConfigurableApplicationContext, beanName: String, args: immutable.Seq[AnyRef]) extends IndirectActorProducer { def this(clazz: Class[_ <: Actor], applicationContext: ConfigurableApplicationContext, args: immutable.Seq[AnyRef]) = this(clazz, applicationContext, getBeanNameForType(applicationContext, clazz), args) def this(beanName: String, applicationContext: ConfigurableApplicationContext, args: immutable.Seq[AnyRef]) = this(getTypeForBeanName(applicationContext, beanName), applicationContext, beanName, args) validateActorBeanDefinition protected def validateActorBeanDefinition: Unit = { val beanClass = applicationContext.getBeanFactory.getType(beanName) val beanDefinition = applicationContext.getBeanFactory.getBeanDefinition(beanName) require(actorClass.isAssignableFrom(beanClass), s"""Invalid bean type. Bean "${beanName}" of type ${beanClass.getSimpleName} does not extend ${actorClass.getSimpleName}.""") require(!beanDefinition.isSingleton, s"""Actor beans must be non-singleton. Suggested fix: Annotate ${beanDefinition.getBeanClassName} with the @${classOf[ActorComponent].getSimpleName} annotation to create actor beans with prototype scope.""") // TODO: Validate actor constructor if arguments supplied to enable fail fast (see akka.util.Reflect.findConstructor) } override def actorClass: Class[_ <: Actor] = clazz override def produce(): Actor = { args match { case s if s.isEmpty => applicationContext.getBean(beanName).asInstanceOf[Actor] case _ => applicationContext.getBean(beanName, args: _*).asInstanceOf[Actor] } } }
Example 126
Source File: Scheduler.scala From openwhisk with Apache License 2.0 | 5 votes |
package org.apache.openwhisk.common import scala.concurrent.Future import scala.concurrent.duration._ import scala.util.Failure import scala.util.Success import scala.util.Try import akka.actor.Actor import akka.actor.ActorSystem import akka.actor.Cancellable import akka.actor.Props def scheduleWaitAtLeast(interval: FiniteDuration, initialDelay: FiniteDuration = Duration.Zero, name: String = "Scheduler")(f: () => Future[Any])(implicit system: ActorSystem, logging: Logging, transid: TransactionId = TransactionId.unknown) = { require(interval > Duration.Zero) system.actorOf(Props(new Worker(initialDelay, interval, true, name, f))) } }
Example 127
Source File: YARNComponentActor.scala From openwhisk with Apache License 2.0 | 5 votes |
package org.apache.openwhisk.core.yarn import akka.actor.{Actor, ActorSystem} import akka.http.scaladsl.model.{HttpMethods, StatusCodes} import akka.stream.ActorMaterializer import org.apache.openwhisk.common.Logging import org.apache.openwhisk.core.entity.ExecManifest.ImageName import org.apache.openwhisk.core.yarn.YARNComponentActor.{CreateContainerAsync, RemoveContainer} import spray.json.{JsArray, JsNumber, JsObject, JsString} import scala.concurrent.ExecutionContext object YARNComponentActor { case object CreateContainerAsync case class RemoveContainer(component_instance_name: String) } class YARNComponentActor(actorSystem: ActorSystem, logging: Logging, yarnConfig: YARNConfig, serviceName: String, imageName: ImageName) extends Actor { implicit val as: ActorSystem = actorSystem implicit val materializer: ActorMaterializer = ActorMaterializer() implicit val ec: ExecutionContext = actorSystem.dispatcher //Adding a container via the YARN REST API is actually done by flexing the component's container pool to a certain size. // This actor must track the current containerCount in order to make the correct scale-up request. var containerCount: Int = 0 def receive: PartialFunction[Any, Unit] = { case CreateContainerAsync => sender ! createContainerAsync case RemoveContainer(component_instance_name) => sender ! removeContainer(component_instance_name) case input => throw new IllegalArgumentException("Unknown input: " + input) sender ! false } def createContainerAsync(): Unit = { logging.info(this, s"Using YARN to create a container with image ${imageName.name}...") val body = JsObject("number_of_containers" -> JsNumber(containerCount + 1)).compactPrint val response = YARNRESTUtil.submitRequestWithAuth( yarnConfig.authType, HttpMethods.PUT, s"${yarnConfig.masterUrl}/app/v1/services/$serviceName/components/${imageName.name}", body) response match { case httpresponse(StatusCodes.OK, content) => logging.info(this, s"Added container: ${imageName.name}. Response: $content") containerCount += 1 case httpresponse(_, _) => YARNRESTUtil.handleYARNRESTError(logging) } } def removeContainer(component_instance_name: String): Unit = { logging.info(this, s"Removing ${imageName.name} container: $component_instance_name ") if (containerCount <= 0) { logging.warn(this, "Already at 0 containers") } else { val body = JsObject( "components" -> JsArray( JsObject( "name" -> JsString(imageName.name), "decommissioned_instances" -> JsArray(JsString(component_instance_name))))).compactPrint val response = YARNRESTUtil.submitRequestWithAuth( yarnConfig.authType, HttpMethods.PUT, s"${yarnConfig.masterUrl}/app/v1/services/$serviceName", body) response match { case httpresponse(StatusCodes.OK, content) => logging.info( this, s"Successfully removed ${imageName.name} container: $component_instance_name. Response: $content") containerCount -= 1 case httpresponse(_, _) => YARNRESTUtil.handleYARNRESTError(logging) } } } }
Example 128
Source File: LocalTransformServiceActor.scala From mleap with Apache License 2.0 | 5 votes |
package ml.combust.mleap.executor.service import akka.actor.{Actor, ActorRef, Props, Status, Terminated} import akka.stream.{ActorMaterializer, Materializer} import ml.combust.mleap.executor.repository.RepositoryBundleLoader import ml.combust.mleap.executor._ import ml.combust.mleap.executor.error.NotFoundException import scala.util.{Failure, Success, Try} object LocalTransformServiceActor { def props(loader: RepositoryBundleLoader, config: ExecutorConfig): Props = { Props(new LocalTransformServiceActor(loader, config)) } object Messages { case object Close } } class LocalTransformServiceActor(loader: RepositoryBundleLoader, config: ExecutorConfig) extends Actor { import LocalTransformServiceActor.Messages private implicit val materializer: Materializer = ActorMaterializer()(context.system) private var lookup: Map[String, ActorRef] = Map() private var modelNameLookup: Map[ActorRef, String] = Map() override def postStop(): Unit = { for (child <- context.children) { context.unwatch(child) context.stop(child) } } override def receive: Receive = { case request: TransformFrameRequest => handleModelRequest(request) case request: GetBundleMetaRequest => handleModelRequest(request) case request: GetModelRequest => handleModelRequest(request) case request: CreateFrameStreamRequest => handleModelRequest(request) case request: CreateRowStreamRequest => handleModelRequest(request) case request: GetRowStreamRequest => handleModelRequest(request) case request: CreateFrameFlowRequest => handleModelRequest(request) case request: GetFrameStreamRequest => handleModelRequest(request) case request: CreateRowFlowRequest => handleModelRequest(request) case request: UnloadModelRequest => handleModelRequest(request) case request: LoadModelRequest => loadModel(request) case Messages.Close => context.stop(self) case Terminated(actor) => terminated(actor) } def handleModelRequest(request: ModelRequest): Unit = { lookup.get(request.modelName) match { case Some(actor) => actor.tell(request, sender) case None => sender ! Status.Failure(new NotFoundException(s"no model with name ${request.modelName}")) } } def loadModel(request: LoadModelRequest): Unit = { Try(context.actorOf(BundleActor.props(request, loader, config), request.modelName)) match { case Success(actor) => lookup += (request.modelName -> actor) modelNameLookup += (actor -> request.modelName) context.watch(actor) actor.tell(request, sender) case Failure(err) => sender ! Status.Failure(err) } } private def terminated(ref: ActorRef): Unit = { val uri = modelNameLookup(ref) modelNameLookup -= ref lookup -= uri } }
Example 129
Source File: RouterActor.scala From BusFloatingData with Apache License 2.0 | 5 votes |
package de.nierbeck.floating.data.server.actors.websocket import akka.actor.{Actor, ActorLogging} import akka.routing.{AddRoutee, RemoveRoutee, Routee} class RouterActor extends Actor with ActorLogging { var routees = Set[Routee]() def receive: Receive = { case ar: AddRoutee => { log.info(s"add routee ${ar.routee}") routees = routees + ar.routee } case rr: RemoveRoutee => { log.info(s"remove routee ${rr.routee}") routees = routees - rr.routee } case msg:Any => { routees.foreach(_.send(msg, sender)) } } }
Example 130
Source File: VisualMailboxMetricClient.scala From akka-visualmailbox with Apache License 2.0 | 5 votes |
package de.aktey.akka.visualmailbox import java.net.InetSocketAddress import akka.actor.{Actor, ActorRef, ExtendedActorSystem, Extension, ExtensionId, ExtensionIdProvider, Props} import akka.io.{IO, Udp} import akka.util.ByteString import de.aktey.akka.visualmailbox.packing.Packing object VisualMailboxMetricClient extends ExtensionId[VisualMailboxMetricClient] with ExtensionIdProvider { override def createExtension(system: ExtendedActorSystem): VisualMailboxMetricClient = { new VisualMailboxMetricClient( system, VisualMailboxMetricClientConfig.fromConfig(system.settings.config) ) } override def lookup(): ExtensionId[_ <: Extension] = VisualMailboxMetricClient } class VisualMailboxMetricClient(system: ExtendedActorSystem, config: VisualMailboxMetricClientConfig) extends Extension { private val udpSender = system.systemActorOf( Props(new UdpSender(config.serverAddress)).withDispatcher("de.aktey.akka.visualmailbox.client.dispatcher"), "de-aktey-akka-visualmailbox-sender" ) system.systemActorOf( Props(new VisualMailboxMetricListener(udpSender)).withDispatcher("de.aktey.akka.visualmailbox.client.dispatcher"), "de-aktey-akka-visualmailbox-receiver" ) } class VisualMailboxMetricListener(udpSender: ActorRef) extends Actor { import context._ import concurrent.duration._ var buffer: List[VisualMailboxMetric] = Nil system.eventStream.subscribe(self, classOf[VisualMailboxMetric]) system.scheduler.schedule(1.second, 1.second, self, "flush") @scala.throws[Exception](classOf[Exception]) override def postStop(): Unit = { system.eventStream.unsubscribe(self) } def receive: Receive = { case v: VisualMailboxMetric => buffer ::= v if (buffer.size > 40) self ! "flush" case "flush" if buffer.nonEmpty => udpSender ! Packing.pack(MetricEnvelope(1, Packing.pack(buffer))) buffer = Nil } } class UdpSender(remote: InetSocketAddress) extends Actor { import context._ IO(Udp) ! Udp.SimpleSender def receive = { case Udp.SimpleSenderReady => context.become(ready(sender())) } def ready(send: ActorRef): Receive = { case msg: Array[Byte] => send ! Udp.Send(ByteString(msg), remote) } }
Example 131
Source File: DataSourceEndpoint.scala From akka-visualmailbox with Apache License 2.0 | 5 votes |
package de.aktey.akka.visualmailbox.data import akka.actor.{Actor, ActorLogging, ActorRef, Props} import akka.io.Udp.Received import de.aktey.akka.visualmailbox.packing.Packing import de.aktey.akka.visualmailbox.{MetricEnvelope, VisualMailboxMetric} import scala.util.{Failure, Success} class DataSourceEndpoint(router: ActorRef) extends Actor with ActorLogging { def receive = { case Received(datagram, _) => Packing.unpack[MetricEnvelope](datagram.to[Array]) match { case Success(MetricEnvelope(1, payload)) => Packing.unpack[List[VisualMailboxMetric]](payload) match { case Success(list) => list.foreach(router ! _) case Failure(e) => log.error(e, "unmarshal error") } case Success(MetricEnvelope(version, _)) => log.warning("unknown protocol version: " + version) case Failure(e) => log.error(e, "unmarshal error") } } } object DataSourceEndpoint { def props(router: ActorRef) = Props(new DataSourceEndpoint(router)) }
Example 132
Source File: MetricsRouter.scala From akka-visualmailbox with Apache License 2.0 | 5 votes |
package de.aktey.akka.visualmailbox import akka.actor.{Actor, ActorLogging, ActorRef, Props, Terminated} class MetricsRouter extends Actor with ActorLogging { import context._ var routees: Set[ActorRef] = Set.empty override def postStop() { routees foreach unwatch } def receive = { case registrar: ActorRef => watch(registrar) routees += registrar if (log.isDebugEnabled) log.debug(s"""{"type":"registerd","registered":"$registrar","routees":${routees.size}}""") case Terminated(ref) => unwatch(ref) routees -= ref if (log.isDebugEnabled) log.debug(s"""{"type":"unregistered","terminated":"$ref","routees":${routees.size}}""") case msg => routees foreach (_ forward msg) } } object MetricsRouter { def props() = Props(new MetricsRouter) }
Example 133
Source File: UnionExample.scala From typed-actors with Apache License 2.0 | 5 votes |
package org.example import akka.actor.{ Actor, ActorSystem } import de.knutwalker.akka.typed._ object UnionExample extends App { case class Foo() case class Bar() case class Baz() class UnionedActor extends Actor { def receive = { case x ⇒ println(x) } } implicit val system = ActorSystem() val props1: Props[Foo] = Props(new UnionedActor) val props0: Props[Foo | Bar] = props1.or[Bar] val ref0 : ActorRef[Foo | Bar] = ActorOf(props0, "union") val ref : ActorRef[Foo | Bar | Baz] = ref0.or[Baz] ref ! Foo() ref ! Bar() ref ! Baz() // [error] UnionExample.scala:49: // Cannot prove that message of type org.example.UnionExample.Foo.type is a member of org.example.UnionExample.ref.Message. // ref ! Foo Shutdown(system) }
Example 134
Source File: SimpleExample.scala From typed-actors with Apache License 2.0 | 5 votes |
package org.example import akka.actor.{ Actor, ActorLogging, ActorSystem } import de.knutwalker.akka.typed._ object SimpleExample extends App { case class Ping(replyTo: ActorRef[Pong]) case class Pong(replyTo: ActorRef[Ping]) implicit val system = ActorSystem() val ping = ActorOf(PropsOf[Ping](new Actor with ActorLogging { private[this] var count = 0 def receive: Receive = { case Ping(replyTo) ⇒ count += 1 replyTo ! Pong(self.typed) } override def postStop(): Unit = { log.info(s"pings: $count") } })) val pong = ActorOf(PropsOf[Pong](new Actor with ActorLogging { private[this] var count = 0 def receive: Receive = { case Pong(replyTo) ⇒ count += 1 replyTo ! Ping(self.typed) } override def postStop(): Unit = { log.info(s"pongs: $count") } })) ping ! Ping(pong) Thread.sleep(1000) Shutdown(system) }
Example 135
Source File: AuditSrv.scala From Cortex with GNU Affero General Public License v3.0 | 5 votes |
package org.thp.cortex.services import javax.inject.{Inject, Singleton} import scala.concurrent.ExecutionContext import scala.concurrent.duration.FiniteDuration import play.api.Logger import akka.actor.{Actor, ActorRef} import org.thp.cortex.models.JobStatus import org.elastic4play.models.BaseEntity import org.elastic4play.services._ object AuditActor { case class Register(jobId: String, timeout: FiniteDuration) case class Unregister(jobId: String, actorRef: ActorRef) case class JobEnded(jobId: String, status: JobStatus.Type) } @Singleton class AuditActor @Inject()(eventSrv: EventSrv, implicit val ec: ExecutionContext) extends Actor { import AuditActor._ object EntityExtractor { def unapply(e: BaseEntity) = Some((e.model, e.id, e.routing)) } var registration = Map.empty[String, Seq[ActorRef]] private[AuditActor] lazy val logger = Logger(getClass) override def preStart(): Unit = { eventSrv.subscribe(self, classOf[EventMessage]) super.preStart() } override def postStop(): Unit = { eventSrv.unsubscribe(self) super.postStop() } override def receive: Receive = { case Register(jobId, timeout) ⇒ logger.info(s"Register new listener for job $jobId ($sender)") val newActorList = registration.getOrElse(jobId, Nil) :+ sender registration += (jobId → newActorList) context.system.scheduler.scheduleOnce(timeout, self, Unregister(jobId, sender)) case Unregister(jobId, actorRef) ⇒ logger.info(s"Unregister listener for job $jobId ($actorRef)") val newActorList = registration.getOrElse(jobId, Nil).filterNot(_ == actorRef) registration += (jobId → newActorList) case AuditOperation(EntityExtractor(model, id, routing), action, details, authContext, date) ⇒ if (model.modelName == "job" && action == AuditableAction.Update) { logger.info(s"Job $id has be updated (${details \ "status"})") val status = (details \ "status").asOpt[JobStatus.Type].getOrElse(JobStatus.InProgress) if (status != JobStatus.InProgress) registration.getOrElse(id, Nil).foreach { aref ⇒ aref ! JobEnded(id, status) } } } }
Example 136
Source File: ClusterApp.scala From reactive-lib with Apache License 2.0 | 5 votes |
package foo import akka.actor.{ Actor, ActorLogging, ActorSystem, PoisonPill, Props } import akka.cluster.ClusterEvent.ClusterDomainEvent import akka.cluster.singleton.{ ClusterSingletonManager, ClusterSingletonManagerSettings } import akka.cluster.{ Cluster, ClusterEvent } import akka.http.scaladsl.Http import akka.http.scaladsl.model._ import akka.http.scaladsl.server.Directives._ import akka.management.AkkaManagement import akka.management.cluster.bootstrap.ClusterBootstrap import akka.stream.ActorMaterializer object ClusterApp { def main(args: Array[String]): Unit = { implicit val system = ActorSystem() implicit val materializer = ActorMaterializer() implicit val executionContext = system.dispatcher val cluster = Cluster(system) system.log.info("Starting Akka Management") system.log.info("something2") // AkkaManagement(system).start() // ClusterBootstrap(system).start() system.actorOf( ClusterSingletonManager.props( Props[NoisySingleton], PoisonPill, ClusterSingletonManagerSettings(system))) Cluster(system).subscribe( system.actorOf(Props[ClusterWatcher]), ClusterEvent.InitialStateAsEvents, classOf[ClusterDomainEvent]) // add real app routes here val routes = path("hello") { get { complete( HttpEntity(ContentTypes.`text/html(UTF-8)`, "<h1>Hello</h1>")) } } Http().bindAndHandle(routes, "0.0.0.0", 8080) system.log.info( s"Server online at http://localhost:8080/\nPress RETURN to stop...") cluster.registerOnMemberUp(() => { system.log.info("Cluster member is up!") }) } class ClusterWatcher extends Actor with ActorLogging { val cluster = Cluster(context.system) override def receive = { case msg ⇒ log.info(s"Cluster ${cluster.selfAddress} >>> " + msg) } } }
Example 137
Source File: DemoApp.scala From reactive-lib with Apache License 2.0 | 5 votes |
package foo import akka.actor.{ Actor, ActorLogging, ActorSystem, Props } import akka.cluster.ClusterEvent.ClusterDomainEvent import akka.cluster.{ Cluster, ClusterEvent } import akka.http.scaladsl.Http import akka.http.scaladsl.model._ import akka.http.scaladsl.server.Directives._ import akka.management.AkkaManagement import akka.management.cluster.bootstrap.ClusterBootstrap import akka.stream.ActorMaterializer object DemoApp extends App { implicit val system = ActorSystem("Appka") import system.log implicit val mat = ActorMaterializer() val cluster = Cluster(system) log.info(s"Started [$system], cluster.selfAddress = ${cluster.selfAddress}") log.info("something2") //#start-akka-management AkkaManagement(system).start() //#start-akka-management ClusterBootstrap(system).start() cluster.subscribe( system.actorOf(Props[ClusterWatcher]), ClusterEvent.InitialStateAsEvents, classOf[ClusterDomainEvent]) // add real app routes here val routes = path("hello") { get { complete( HttpEntity( ContentTypes.`text/html(UTF-8)`, "<h1>Hello</h1>")) } } Http().bindAndHandle(routes, "0.0.0.0", 8080) Cluster(system).registerOnMemberUp({ log.info("Cluster member is up!") }) } class ClusterWatcher extends Actor with ActorLogging { val cluster = Cluster(context.system) override def receive = { case msg ⇒ log.info(s"Cluster ${cluster.selfAddress} >>> " + msg) } }
Example 138
Source File: PublisherActor.scala From seahorse with Apache License 2.0 | 5 votes |
package ai.deepsense.workflowexecutor.rabbitmq import akka.actor.{Actor, Props} import ai.deepsense.commons.utils.Logging class PublisherActor(topic: String, publisher: MQPublisher) extends Actor with Logging { override def receive: Receive = { case message: Any => logger.info( "PublisherActor for topic: {} receives message {} from '{}'", topic, message.getClass.getName, sender().path.name) publisher.publish(topic, message) } } object PublisherActor { def props(topic: String, publisher: MQPublisher): Props = { Props(new PublisherActor(topic, publisher)) } }
Example 139
Source File: RetryActor.scala From seahorse with Apache License 2.0 | 5 votes |
package ai.deepsense.commons.utils import scala.concurrent.{ExecutionContext, Future} import scala.concurrent.duration.FiniteDuration import scala.util.{Failure, Success, Try} import akka.actor.{Actor, ActorRef, Status} class RetryActor[T]( retryInterval: FiniteDuration, retryCountLimit: Int, workCode: => Future[T], workDescription: Option[String]) extends Actor with Logging { import RetryActor._ private implicit val ec: ExecutionContext = context.system.dispatcher override def receive: Receive = { case Trigger => doWork(sender, 0) case Retry(initialSender, retryCount) => doWork(initialSender, retryCount) } val workDescriptionForLogs: String = workDescription.map(" " + _).getOrElse(" some work") private def doWork(initialSender: ActorRef, retryCount: Int): Unit = { workCode.onComplete { case Success(t) => initialSender ! t case Failure(RetriableException(msg, cause)) if retryCount < retryCountLimit => logFailure(msg, cause) logger.info(s"Will retry$workDescriptionForLogs in $retryInterval.") context.system.scheduler.scheduleOnce(retryInterval, self, Retry(initialSender, retryCount + 1)) case Failure(RetriableException(msg, cause)) if retryCount >= retryCountLimit => logFailure(msg, cause) val retryLimitReachedException = RetryLimitReachedException(s"Retry limit of $retryCountLimit reached, last error was $cause", cause) logger.error(s"Retry limit reached for$workDescriptionForLogs.", retryLimitReachedException) initialSender ! Status.Failure(retryLimitReachedException) case Failure(f) => logFailure(f.getMessage, Some(f)) logger.error(s"Unexpected exception when performing$workDescriptionForLogs.", f) initialSender ! Status.Failure(f) } } private def logFailure(msg: String, tOpt: Option[Throwable]): Unit = { val msgText = s"Exception when performing$workDescriptionForLogs. The message was: $msg" tOpt match { case Some(t) => logger.info(msgText, t) case None => logger.info(msgText) } } } object RetryActor { sealed trait Message case object Trigger extends Message case class Retry(initialSender: ActorRef, retryCount: Int) extends Message case class RetryLimitReachedException(msg: String, lastError: Option[Throwable]) extends Exception(msg) case class RetriableException(msg: String, cause: Option[Throwable]) extends Exception(msg, cause.orNull) }
Example 140
Source File: TerminationListenerActor.scala From seahorse with Apache License 2.0 | 5 votes |
package ai.deepsense.workflowexecutor.executor import scala.concurrent.Promise import akka.actor.{Actor, Props} import ai.deepsense.sparkutils.AkkaUtils import ai.deepsense.models.workflows.ExecutionReport class TerminationListenerActor(finishedExecutionStatus: Promise[ExecutionReport]) extends Actor { override def receive: Receive = { case status: ExecutionReport => finishedExecutionStatus.success(status) AkkaUtils.terminate(context.system) } } object TerminationListenerActor { def props(finishedExecutionReport: Promise[ExecutionReport]): Props = Props(new TerminationListenerActor(finishedExecutionReport)) }
Example 141
Source File: ExecutionReportSubscriberActor.scala From seahorse with Apache License 2.0 | 5 votes |
package ai.deepsense.sessionmanager.service.actors import scala.util.Success import akka.actor.{Actor, ActorRef, Props} import ai.deepsense.graph.Node import ai.deepsense.graph.nodestate.name.NodeStatusName import ai.deepsense.models.workflows.ExecutionReport import ai.deepsense.models.workflows.Workflow.Id import ai.deepsense.sessionmanager.rest.responses.NodeStatusesResponse class ExecutionReportSubscriberActor(val workflowId: Id) extends Actor { import ExecutionReportSubscriberActor._ private[this] var nodeStatusById: NodeStatusMap = Map() private[this] var initialized: Boolean = false override def receive: Receive = { case e: ExecutionReport => nodeStatusById = updateNodeStatusResponse(nodeStatusById, e) initialized = true case ReportQuery(onBehalfOf: ActorRef) => onBehalfOf ! Success(generateNodeStatusResponse(nodeStatusById, initialized)) } } object ExecutionReportSubscriberActor { type NodeStatusMap = Map[Node.Id, NodeStatusName] sealed trait ExecutionReportSubscriberActorMessage case class ReportQuery(onBehalfOf: ActorRef) extends ExecutionReportSubscriberActorMessage def apply(workflowId: Id): Props = Props(classOf[ExecutionReportSubscriberActor], workflowId) private def updateNodeStatusResponse( nodeStatusMap: NodeStatusMap, executionReport: ExecutionReport): NodeStatusMap = { nodeStatusMap ++ executionReport.nodesStatuses.mapValues(_.name) } // When this actor is first initialized, we don't have enough information about workflow - e.g. about nodes. // If we didn't handle this case, empty workflow could not be recognized from workflow for which this actor isn't // yet initialized. private def generateNodeStatusResponse(nodeStatusMap: NodeStatusMap, initialized: Boolean): NodeStatusesResponse = { if (initialized) { NodeStatusesResponse(Some(nodeStatusMap.groupBy(_._2).mapValues(_.size))) } else { NodeStatusesResponse(None) } } }
Example 142
Source File: RestServiceSupervisor.scala From seahorse with Apache License 2.0 | 5 votes |
package ai.deepsense.commons.rest import akka.actor.{Actor, ActorInitializationException, Props} import ai.deepsense.commons.utils.Logging import ai.deepsense.sparkutils.AkkaUtils class RestServiceSupervisor extends Actor with Logging { import akka.actor.OneForOneStrategy import akka.actor.SupervisorStrategy._ override def receive: Receive = { case (props: Props, name: String) => logger.debug(s"RestServiceSupervisor creates actor '$name': ${props.actorClass()}") sender() ! context.actorOf(props, name) case message => unhandled(message) } override val supervisorStrategy = OneForOneStrategy() { case exception: ActorInitializationException => logger.error("An ActorInitializationException occurred! Terminating!", exception) AkkaUtils.terminate(context.system) Stop } }
Example 143
Source File: RestService.scala From seahorse with Apache License 2.0 | 5 votes |
package ai.deepsense.commons.rest import scala.collection.JavaConversions.asScalaSet import akka.actor.{Actor, ActorContext} import com.google.inject.Inject import spray.http.StatusCodes import spray.routing._ import spray.util.LoggingContext override def timeoutRoute: Route = { complete( StatusCodes.ServiceUnavailable, "The server could not provide a timely response." ) } private def exceptionHandler(implicit log: LoggingContext): ExceptionHandler = { ExceptionHandler { case e: ExceptionWithStatus => complete(e.statusCode, e.msg) } } private val rejectionHandler: RejectionHandler = { RejectionHandler { case MissingQueryParamRejection(param) :: _ => complete(StatusCodes.BadRequest, s"Request is missing required query parameter '$param'") } } }
Example 144
Source File: Node.scala From affinity with Apache License 2.0 | 5 votes |
package io.amient.affinity.core.cluster import java.util.concurrent.{CountDownLatch, TimeUnit, TimeoutException} import akka.actor.{Actor, Props} import akka.event.Logging import akka.util.Timeout import com.typesafe.config.{Config, ConfigFactory} import io.amient.affinity.core.ack import io.amient.affinity.core.actor.Controller._ import io.amient.affinity.core.actor.Gateway.{GatewayClusterStatus, GatewayConf} import io.amient.affinity.core.actor._ import io.amient.affinity.core.config._ import io.amient.affinity.{AffinityActorSystem, Conf} import scala.concurrent.duration._ import scala.concurrent.{Await, Future, Promise} import scala.language.{implicitConversions, postfixOps} import scala.reflect.ClassTag object Node { class NodeConf extends CfgStruct[NodeConf] { val Containers: CfgGroup[CfgIntList] = group("container", classOf[CfgIntList], false) .doc("Array of partitions assigned to this node, <ID> represents the Keyspace, e.g. assigning first four partitions of MyKeySpace: affinity.node.container.MyKeySpace = [0,1,2,3] ") val Gateway: GatewayConf = struct("gateway", new GatewayConf, false) val SuspendQueueMaxSize = integer("suspend.queue.max.size", 1000).doc("Size of the queue when the cluster enters suspended mode") val StartupTimeoutMs = longint("startup.timeout.ms", Integer.MAX_VALUE).doc("Maximum time a node can take to startup - this number must account for any potential state bootstrap") val ShutdownTimeoutMs = longint("shutdown.timeout.ms", 30000).doc("Maximum time a node can take to shutdown gracefully") val DataDir = filepath("data.dir", false).doc("Location under which any local state or registers will be kept - this is required if running in a distributed mode or when using persisted kv stores") val DataAutoAssign = bool("data.auto.assign", true, false).doc("Determines whether this node auto-balances data its containers; if set tot false the fixed list of container partitions will be used") val DataAutoDelete = bool("data.auto.delete", true, false).doc("If set to true, any unassigned partitions will be deleted from the local storage") } } class Node(config: Config) { def this(configResource: String) = this(ConfigFactory.parseResources(configResource).resolve) val conf = Conf(config) val startupTimeout = conf.Affi.Node.StartupTimeoutMs().toLong milliseconds val shutdownTimeout = conf.Affi.Node.ShutdownTimeoutMs().toLong milliseconds implicit val system = AffinityActorSystem.create(config) private val log = Logging.getLogger(system, this) private val controller = system.actorOf(Props(new Controller), name = "controller") private val httpGatewayPort = Promise[List[Int]]() private val clusterReady = new CountDownLatch(1) @volatile private var shuttingDown = false @volatile private var fatalError: Option[Throwable] = None import scala.concurrent.ExecutionContext.Implicits.global val systemEventsWatcher = system.actorOf(Props(new Actor { override def receive: Receive = { case GatewayClusterStatus(false) => clusterReady.countDown() case FatalErrorShutdown(e) => fatalError = Some(e) shutdown() } })) system.eventStream.subscribe(systemEventsWatcher, classOf[GatewayClusterStatus]) system.eventStream.subscribe(systemEventsWatcher, classOf[FatalErrorShutdown]) sys.addShutdownHook { if (!shuttingDown) { log.info("process killed - attempting graceful shutdown") fatalError = None shutdown() } Await.ready(system.terminate, shutdownTimeout) } def start[T <: Gateway](creator: => T)(implicit tag: ClassTag[T]): Future[List[Int]] = { controller ! StartRebalance() implicit val timeout = Timeout(startupTimeout) val result = controller ?? CreateGateway(Props(creator)) httpGatewayPort.completeWith(result) result } }
Example 145
Source File: CoordinatorEmbeddedSpec.scala From affinity with Apache License 2.0 | 5 votes |
package io.amient.affinity.core.cluster import java.util.concurrent.atomic.AtomicReference import akka.actor.{Actor, ActorRef, Props} import com.typesafe.config.{ConfigFactory, ConfigValueFactory} import io.amient.affinity.avro.MemorySchemaRegistry import io.amient.affinity.core.cluster.Coordinator.MembershipUpdate import io.amient.affinity.{AffinityActorSystem, Conf} import org.scalatest.{FlatSpec, Matchers} import scala.collection.JavaConverters._ class CoordinatorEmbeddedSpec extends FlatSpec with Matchers { "CoordinatorEmbedded instances" should "share the underlying space for the same id and group" in { val config = ConfigFactory.empty() .withValue(Conf.Affi.SystemName.path, ConfigValueFactory.fromAnyRef("101")) .withValue(Conf.Affi.Node.path, ConfigValueFactory.fromMap(Map[String, String]().asJava)) .withValue(Conf.Affi.Avro.Class.path, ConfigValueFactory.fromAnyRef(classOf[MemorySchemaRegistry].getName)) .withValue(Conf.Affi.Coordinator.Class.path, ConfigValueFactory.fromAnyRef(classOf[CoordinatorEmbedded].getName)) val system = AffinityActorSystem.create(config) try { val coordinator1 = Coordinator.create(system, "group1") val actor1 = system.actorOf(Props(new Actor { override def receive: Receive = { case null => } }), "actor1") coordinator1.register(actor1.path) val update1 = new AtomicReference[scala.collection.Set[ActorRef]](Set.empty) update1 synchronized { coordinator1.watch(system.actorOf(Props(new Actor { override def receive: Receive = { case MembershipUpdate(masters) => update1 synchronized update1.set(masters.values.toSet) } }), "subscriber1")) } coordinator1.close() val coordinator2 = Coordinator.create(system, "group1") val update2 = new AtomicReference[scala.collection.Set[ActorRef]](Set.empty) update2 synchronized { coordinator2.watch(system.actorOf(Props(new Actor { override def receive: Receive = { case MembershipUpdate(masters) => update2 synchronized update2.set(masters.values.toSet) } }), "subscriber2")) update2.wait(1000) update2.get.map(_.path.toString) should be(Set("akka://101/user/actor1")) update1.get.map(_.path.toString) should be(Set("akka://101/user/actor1")) } coordinator2.close() } finally { system.terminate() } } }
Example 146
Source File: TransactionalProducer.scala From affinity with Apache License 2.0 | 5 votes |
package io.amient.affinity.kafka import java.util.Properties import akka.actor.Actor import akka.actor.Status.{Failure, Success} import akka.event.Logging import com.typesafe.config.Config import io.amient.affinity.Conf import io.amient.affinity.core.actor.{TransactionAbort, TransactionBegin, TransactionCommit, TransactionalRecord} import io.amient.affinity.core.config.CfgStruct import io.amient.affinity.core.storage.StorageConf import io.amient.affinity.kafka.KafkaStorage.{KafkaConsumerConf, KafkaProducerConf} import org.apache.kafka.clients.producer.{Callback, KafkaProducer, ProducerRecord, RecordMetadata} import org.apache.kafka.common.serialization.ByteArraySerializer import scala.collection.JavaConverters._ object KafkaConf extends KafkaConf { override def apply(config: Config): KafkaConf = new KafkaConf().apply(config) } class KafkaConf extends CfgStruct[KafkaConf](classOf[StorageConf]) { val BootstrapServers = string("kafka.bootstrap.servers", true).doc("kafka connection string used for consumer and/or producer") val Producer = struct("kafka.producer", new KafkaProducerConf, false).doc("any settings that the underlying version of kafka producer client supports") val Consumer = struct("kafka.consumer", new KafkaConsumerConf, false).doc("any settings that the underlying version of kafka consumer client supports") } class TransactionalProducer extends Actor { val logger = Logging.getLogger(context.system, this) private[this] var producer: KafkaProducer[Array[Byte], Array[Byte]] = null val kafkaConf = KafkaConf(Conf(context.system.settings.config).Affi.Storage) val producerConfig = new Properties() { if (kafkaConf.Producer.isDefined) { val producerConfig = kafkaConf.Producer.toMap() if (producerConfig.containsKey("bootstrap.servers")) throw new IllegalArgumentException("bootstrap.servers cannot be overriden for KafkaStroage producer") if (producerConfig.containsKey("key.serializer")) throw new IllegalArgumentException("Binary kafka stream cannot use custom key.serializer") if (producerConfig.containsKey("value.serializer")) throw new IllegalArgumentException("Binary kafka stream cannot use custom value.serializer") producerConfig.entrySet.asScala.filter(_.getValue.isDefined).foreach { case (entry) => put(entry.getKey, entry.getValue.apply.toString) } } put("bootstrap.servers", kafkaConf.BootstrapServers()) put("value.serializer", classOf[ByteArraySerializer].getName) put("key.serializer", classOf[ByteArraySerializer].getName) } override def receive: Receive = { case req@TransactionBegin(transactionalId) => req(sender) ! { if (producer == null) { producerConfig.put("transactional.id", transactionalId) producer = new KafkaProducer[Array[Byte], Array[Byte]](producerConfig) logger.debug(s"Transactions.Init(transactional.id = $transactionalId)") producer.initTransactions() } logger.debug("Transactions.Begin()") producer.beginTransaction() } case TransactionalRecord(topic, key, value, timestamp, partition) => val replyto = sender val producerRecord = new ProducerRecord( topic, partition.map(new Integer(_)).getOrElse(null), timestamp.map(new java.lang.Long(_)).getOrElse(null), key, value) logger.debug(s"Transactions.Append(topic=$topic)") producer.send(producerRecord, new Callback { override def onCompletion(metadata: RecordMetadata, exception: Exception): Unit = { if (exception != null) { replyto ! Failure(exception) } else { replyto ! Success(metadata.offset()) } } }) case req@TransactionCommit() => req(sender) ! { logger.debug("Transactions.Commit()") producer.commitTransaction() } case req@TransactionAbort() => req(sender) ! { logger.debug("Transactions.Abort()") producer.abortTransaction() } } }
Example 147
Source File: BucketRateLimiter.scala From shield with MIT License | 5 votes |
package shield.actors.middleware import akka.actor.{Actor, ActorLogging, Props} import nl.grons.metrics.scala.{Meter, Timer} import shield.actors._ import shield.config.{ServiceLocation, Settings} import shield.implicits.FutureUtil import shield.kvstore.KVStore import shield.metrics.Instrumented import spray.http.{HttpResponse, StatusCodes} object BucketRateLimiter { def props(id: String, bypassHeader: String, callsPer: Int, perSeconds: Int, store: KVStore, location: ServiceLocation) : Props = Props(new BucketRateLimiter(id, bypassHeader, callsPer, perSeconds, store, location)) } class BucketRateLimiter(id: String, bypassHeader: String, callsPer: Int, perSeconds: Int, store: KVStore, location: ServiceLocation) extends Actor with ActorLogging with RestartLogging with Instrumented{ import context._ val settings = Settings(context.system) val localWork: Timer = metrics.timer("localWork", id) val bypassMeter: Meter = metrics.meter("bypass", id) val blockMeter: Meter = metrics.meter("block", id) val passMeter: Meter = metrics.meter("pass", id) val kvWorkTimer = timing("kvWork", id) def receive = { // todo: x-ratelimit response headers? case ur : DownstreamRequest => localWork.time { val _sender = sender() if (ur.request.headers.exists(_.lowercaseName == bypassHeader)) { bypassMeter.mark() _sender ! ForwardRequestCmd(ur.stage, ur.request, None) } else kvWorkTimer { // todo: profiling optimization - can we get this from the connection instead of the header? // similarly we spend a fair amount of time stringifying request.uri. Let's do this once per request and cache it in the request context val ip = ur.request.headers.find(_.lowercaseName == "client-address").get.value // todo: reasonable timeout store.tokenRateLimit(ip, callsPer, perSeconds).andThen(FutureUtil.logFailure("BucketRateLimiter::checkLimit")).recover { case _ => true }.map { if (_) { passMeter.mark() _sender ! ForwardRequestCmd(ur.stage, ur.request) } else { blockMeter.mark() _sender ! ForwardResponseCmd(ur.stage, ResponseDetails(location, settings.LocalServiceName, ur.destination.template, None, HttpResponse(StatusCodes.TooManyRequests))) } } } } } }
Example 148
Source File: ApiKeyAuth.scala From shield with MIT License | 5 votes |
package shield.actors.middleware import akka.actor.{Actor, ActorLogging, Props} import shield.actors._ import shield.config.{ServiceLocation, Settings} import shield.metrics.Instrumented import spray.http.{HttpResponse, StatusCodes} object ApiKeyAuth { def props(header: String, allowed: Set[String], caseSensitive: Boolean, location: ServiceLocation) : Props = Props(new ApiKeyAuth(header, allowed, caseSensitive, location)) } class ApiKeyAuth(headerName: String, allowedKeys: Set[String], caseSensitive: Boolean, location: ServiceLocation) extends Actor with ActorLogging with RestartLogging with Instrumented { val settings = Settings(context.system) val headerNameLower = headerName.toLowerCase val allowedValues : Set[String] = if (caseSensitive) allowedKeys else allowedKeys.map(_.toLowerCase) val timer = metrics.timer("api-key-auth") def receive = { case r: DownstreamRequest => timer.time { val header = r.request.headers.find(_.lowercaseName == headerNameLower) val allowed = header.exists(h => if (caseSensitive) allowedValues.contains(h.value) else allowedValues.contains(h.value.toLowerCase)) if (allowed) { sender ! ForwardRequestCmd(r.stage, r.request, None) } else { sender ! ForwardResponseCmd( r.stage, ResponseDetails( location, settings.LocalServiceName, r.destination.template, None, HttpResponse(if (header.isDefined) StatusCodes.Unauthorized else StatusCodes.Forbidden) ) ) } } } }
Example 149
Source File: WeightWatcher.scala From shield with MIT License | 5 votes |
package shield.actors.config import akka.actor.{Actor, ActorLogging, Cancellable, Props} import shield.config.ServiceLocation import scala.concurrent.duration._ object WeightWatcherMsgs { case class SetTargetWeights(services: Map[ServiceLocation, ServiceDetails]) case object Tick case class SetWeights(weights: Map[ServiceLocation, Int]) { require(weights.values.forall(_ >= 0), "Negative weight not allowed") } } object TransitionDetails { def default(details: ServiceDetails): TransitionDetails = { TransitionDetails(details.weight, 0, details.weight) } } case class TransitionDetails(targetWeight: Int, delta: Double, currentWeight: Double) { require(targetWeight >= 0, "target weight can't be negative") require(currentWeight >= 0, "current weight can't be negative") def setTarget(newTarget: Int, stepCount: Int) : TransitionDetails = if (newTarget != targetWeight) { copy( targetWeight = newTarget, delta = (newTarget - currentWeight) / stepCount ) } else { this } def advanceStep() : TransitionDetails = { val next = currentWeight + delta if (delta == 0) { this } else if ((delta < 0 && next <= targetWeight) || (delta > 0 && next >= targetWeight)) { copy(delta=0, currentWeight=targetWeight) } else { copy(currentWeight=next) } } } // Why do we have one weight watcher for all hosts instead of one weight watcher for each host? // Having a watcher for each host would cause multiple hosts to update their state per step. // Having one watcher for all hosts will cause one state update per step. // The one-for-all approach significantly lowers the number of times that ConfigWatcher will have to rebuild the router object WeightWatcher { def props(stepTime: FiniteDuration, stepCount: Int) : Props = Props(new WeightWatcher(stepTime, stepCount)) } class WeightWatcher(stepTime: FiniteDuration, stepCount: Int) extends Actor with ActorLogging { require(stepCount > 0, "Must have at least one step") import WeightWatcherMsgs._ import context.dispatcher var state : Map[ServiceLocation, TransitionDetails] = Map.empty var ticker : Cancellable = context.system.scheduler.schedule(stepTime, stepTime, self, Tick) override def preRestart(reason: Throwable, message: Option[Any]): Unit = { self ! SetWeights(state.map { case (loc, transition) => loc -> transition.targetWeight }) super.preRestart(reason, message) } override def postStop() = { ticker.cancel() } def receive = { case SetTargetWeights(proxyDetails) => state = proxyDetails.map { case (location, proxyDetail) => location -> state.get(location).map(_.setTarget(proxyDetail.weight, stepCount)).getOrElse(TransitionDetails.default(proxyDetail)) } case Tick => val oldWeights = state.map { case (loc, transition) => loc -> transition.currentWeight.toInt } state = state.map { case (loc, transition) => loc -> transition.advanceStep() } val newWeights = state.map { case (loc, transition) => loc -> transition.currentWeight.toInt } if (oldWeights != newWeights) { context.parent ! SetWeights(newWeights) } case SetWeights(weights) => state = weights.map { case (loc, weight) => loc -> TransitionDetails(weight, 0 , weight) } context.parent ! SetWeights(weights) } }
Example 150
Source File: ConsoleLogBuilder.scala From shield with MIT License | 5 votes |
package shield.actors.config.listener import akka.actor.{Actor, ActorLogging, Props} import shield.actors.RestartLogging import shield.actors.config.ConfigWatcherMsgs import shield.actors.listeners.{ConsoleLogger, LogCollector} import shield.config.DomainSettings class ConsoleLogBuilder(id: String, domain: DomainSettings) extends Actor with ActorLogging with ListenerBuilder with RestartLogging { val c = domain.ConfigForListener(id) val forwarder = context.actorOf(Props(new ConsoleLogger(id))) val collector = context.actorOf(Props(new LogCollector(id, domain, List(forwarder), 1))) context.parent ! ConfigWatcherMsgs.ListenerUpdated(id, collector) log.info(s"Built console logger $id") def receive = { case _ => } }
Example 151
Source File: AlternateUpstreamBuilder.scala From shield with MIT License | 5 votes |
package shield.actors.config.listener import akka.actor.{Actor, ActorLogging, Props} import akka.routing.SmallestMailboxPool import shield.actors.RestartLogging import shield.actors.config.ConfigWatcherMsgs import shield.actors.listeners.AlternateUpstream import shield.aws.S3DiffUploader import shield.config.DomainSettings class AlternateUpstreamBuilder(id: String, domain: DomainSettings) extends Actor with ActorLogging with ListenerBuilder with RestartLogging { val c = domain.ConfigForListener(id) val hostUri = c.getString("serviceLocation") val hostType = c.getString("serviceType") val freq = c.getInt("freq") val bucket = c.getString("bucket") val folder = if (c.hasPath("folder")) c.getString("folder") else "/" // since the s3 upload is synchronous, we want a pool of workers val uploader = context.actorOf(SmallestMailboxPool(5).props(S3DiffUploader.props(bucket, folder)), "s3UploadRouter") context.parent ! ConfigWatcherMsgs.ListenerUpdated(id, context.actorOf(Props(new AlternateUpstream(id, settings.DefaultServiceLocation, hostUri, hostType, freq, uploader)))) def receive = { case _ => } }
Example 152
Source File: KibanaBuilder.scala From shield with MIT License | 5 votes |
package shield.actors.config.listener import akka.actor.{Actor, ActorLogging, Props} import shield.actors.RestartLogging import shield.actors.config.ConfigWatcherMsgs import shield.actors.listeners.{KibanaForwarder, LogCollector} import shield.aws.AWSSigningConfig import shield.config.DomainSettings class KibanaBuilder(id: String, domain: DomainSettings) extends Actor with ActorLogging with ListenerBuilder with RestartLogging { val c = domain.ConfigForListener(id) val forwarder = context.actorOf(Props(new KibanaForwarder(id, c.getString("host"), c.getString("index-prefix"), c.getString("type"), c.getInt("max-outstanding"), AWSSigningConfig(c)))) val collector = context.actorOf(Props(new LogCollector(id, domain, List(forwarder), c.getInt("buffer-size")))) context.parent ! ConfigWatcherMsgs.ListenerUpdated(id, collector) log.info(s"Built kibana listener $id") def receive = { case _ => } }
Example 153
Source File: FluentdHttpBuilder.scala From shield with MIT License | 5 votes |
package shield.actors.config.listener import akka.actor.{Actor, ActorLogging, Props} import shield.actors.RestartLogging import shield.actors.config.ConfigWatcherMsgs import shield.actors.listeners.{FluentdHttpForwarder, LogCollector} import shield.config.DomainSettings class FluentdHttpBuilder(id: String, domain: DomainSettings) extends Actor with ActorLogging with ListenerBuilder with RestartLogging { val c = domain.ConfigForListener(id) val forwarder = context.actorOf(Props(new FluentdHttpForwarder(id, c.getString("host"), c.getInt("max-outstanding")))) val collector = context.actorOf(Props(new LogCollector(id, domain, List(forwarder), c.getInt("buffer-size")))) context.parent ! ConfigWatcherMsgs.ListenerUpdated(id, collector) log.info(s"Built FluentD listener $id") def receive = { case _ => } }
Example 154
Source File: ApiKeyAuthBuilder.scala From shield with MIT License | 5 votes |
package shield.actors.config.middleware import akka.actor.{Actor, ActorLogging} import shield.actors.{Middleware, RestartLogging} import shield.actors.config.ConfigWatcherMsgs import shield.actors.middleware.ApiKeyAuth import shield.config.DomainSettings import scala.collection.JavaConversions._ class ApiKeyAuthBuilder(id: String, domain: DomainSettings) extends Actor with ActorLogging with MiddlewareBuilder with RestartLogging { val c = domain.ConfigForMiddleware(id) log.info(s"Building ApiKeyAuth '$id' with config $c") domain.MiddlewareChain.find(_.id == id) match { case None => log.warning(s"Could not find SLA for middleware $id") case Some(mw) => context.parent ! ConfigWatcherMsgs.MiddlewareUpdated(Middleware( id, mw.sla, context.actorOf(ApiKeyAuth.props( c.getString("header-name"), c.getStringList("allowed").toSet, c.getBoolean("case-sensitive"), settings.DefaultServiceLocation )) )) } def receive = { case _ => } }
Example 155
Source File: BucketRateLimitBuilder.scala From shield with MIT License | 5 votes |
package shield.actors.config.middleware import akka.actor.{Actor, ActorLogging} import shield.actors.{Middleware, RestartLogging} import shield.actors.config.ConfigWatcherMsgs import shield.actors.middleware.BucketRateLimiter import shield.config.DomainSettings class BucketRateLimitBuilder(id: String, domain: DomainSettings) extends Actor with ActorLogging with MiddlewareBuilder with RestartLogging { val c = domain.ConfigForMiddleware(id) log.info(s"Building BucketRateLimiter '$id' with config $c") domain.MiddlewareChain.find(_.id == id) match { case None => log.warning(s"Could not find SLA for middleware $id") case Some(mw) => context.parent ! ConfigWatcherMsgs.MiddlewareUpdated(Middleware( id, mw.sla, context.actorOf(BucketRateLimiter.props( id, c.getString("bypass-header"), c.getInt("calls-per"), c.getInt("per-seconds"), domain.KVStores(c.getString("kvstore")), settings.DefaultServiceLocation )) )) } def receive = { case _ => } }
Example 156
Source File: ResponseCacheBuilder.scala From shield with MIT License | 5 votes |
package shield.actors.config.middleware import akka.actor.{Actor, ActorLogging} import shield.actors.{Middleware, RestartLogging} import shield.actors.config.ConfigWatcherMsgs import shield.actors.middleware.ResponseCache import shield.config.DomainSettings class ResponseCacheBuilder(id: String, domain: DomainSettings) extends Actor with ActorLogging with MiddlewareBuilder with RestartLogging { val c = domain.ConfigForMiddleware(id) log.info(s"Building ResponseCache '$id' with config $c") domain.MiddlewareChain.find(_.id == id) match { case None => log.warning(s"Could not find SLA for middleware $id") case Some(mw) => context.parent ! ConfigWatcherMsgs.MiddlewareUpdated(Middleware( id, mw.sla, context.actorOf(ResponseCache.props( id, domain.KVStores(c.getString("kvstore")), settings.DefaultServiceLocation )) )) } def receive = { case _ => } }
Example 157
Source File: S3ObjectWatcher.scala From shield with MIT License | 5 votes |
package shield.actors.config import akka.actor.{Actor, ActorLogging} import com.amazonaws.services.s3.AmazonS3Client sealed trait S3ObjectWatcherMessage case object Refresh extends S3ObjectWatcherMessage case class ChangedContents(contents: String) extends S3ObjectWatcherMessage class S3ObjectWatcher(bucketName: String, configFilename: String) extends Actor with ActorLogging { val s3Client = new AmazonS3Client() var lastContents = "" def receive = { case Refresh => val s3Object = s3Client.getObject(bucketName, configFilename) val newContents = scala.io.Source.fromInputStream(s3Object.getObjectContent).mkString if (newContents != lastContents) { log.info("Detected change in s3 file contents") log.debug(s"Fetched from s3: $newContents") context.parent ! ChangedContents(newContents) lastContents = newContents } } }
Example 158
Source File: StaticUpstreamWatcher.scala From shield with MIT License | 5 votes |
package shield.actors.config.upstream import akka.actor.{Actor, ActorLogging, Props} import com.typesafe.config.Config import shield.actors.RestartLogging import shield.actors.config.{ServiceDetails, UpstreamAggregatorMsgs} import shield.config.ServiceLocation import scala.collection.JavaConversions._ import scala.util.Try object StaticUpstreamWatcher { def props(domainConfig: Config): Props = Props(new StaticUpstreamWatcher(domainConfig)) } class StaticUpstreamWatcher(domainConfig: Config) extends Actor with ActorLogging with UpstreamWatcher with RestartLogging with UpstreamParser{ val rawServices = if (domainConfig.hasPath("upstreams")) domainConfig.getConfigList("upstreams").map(c => Try {parseUpstreamEntry(c.getString("serviceType"), c.getString("serviceLocation"), if (c.hasPath("weight")) c.getInt("weight") else 1) } ).toList else List[Try[(ServiceLocation, ServiceDetails)]]() for (attempt <- rawServices.filter(_.isFailure)) { log.warning(s"Bad upstream host in the config (${attempt.failed.get.getMessage})") } context.parent ! UpstreamAggregatorMsgs.DiscoveredUpstreams(rawServices.flatMap(_.toOption).toMap) def receive = { case _ => } }
Example 159
Source File: LogCollector.scala From shield with MIT License | 5 votes |
package shield.actors.listeners import akka.actor.{Actor, ActorLogging, ActorRef} import akka.event.LoggingAdapter import nl.grons.metrics.scala.{Meter, Timer} import shield.actors.{RequestProcessorCompleted, RestartLogging} import org.joda.time.format.ISODateTimeFormat import shield.config.{HttpServiceLocation, Settings} import shield.config.{DomainSettings, Settings} import shield.metrics.Instrumented import spray.http.{HttpHeader, HttpResponse} import spray.json._ import scala.collection.mutable.ArrayBuffer import scala.concurrent.duration._ import scala.util.{Failure, Success, Try} case object FlushLogs case object LogsFlushed case class AccessLogs(buffer: Seq[JsObject]) object LogCollector { def handleResults(self: ActorRef, droppedMeter: Meter, log: LoggingAdapter, logCount: Int) : PartialFunction[Try[HttpResponse], Unit] = { case Success(r) => self ! LogsFlushed if (r.status.isFailure) { droppedMeter.mark(logCount) log.warning(s"Error forwarding access logs: ${r.entity.asString}") } case Failure(f) => self ! LogsFlushed droppedMeter.mark(logCount) log.warning(s"Error forwarding access logs: $f") } } class LogCollector(id: String, domain: DomainSettings, forwarders: Seq[ActorRef], maxBufferSize: Int) extends Actor with ActorLogging with RestartLogging with Instrumented { import context.dispatcher val settings = Settings(context.system) val shieldHost = JsString(settings.DefaultServiceLocation.baseUrl.toString) var buffer = ArrayBuffer[JsObject]() val dateTimeFormat = ISODateTimeFormat.dateTime() val logSerializationTimer: Timer = metrics.timer("log-serialization") // todo: profiling optimization - 1% of CPU time is spent here while under load def logJson(r: RequestProcessorCompleted): JsObject = logSerializationTimer.time { JsObject(Map( // todo: profiling optimization: use seconds, and cache it per second "@timestamp" -> JsString(dateTimeFormat.print(System.currentTimeMillis() - r.overallTiming)), "method" -> JsString(r.completion.request.method.toString()), // todo: profiling optimization: uri.toString is used in several places - can we cache it? "request_headers" -> JsObject(extractHeaders(r.completion.request.headers, domain.loggedRequestHeaders)), "response_headers" -> JsObject(extractHeaders(r.completion.details.response.headers, domain.loggedResponseHeaders)), "path" -> JsString(r.completion.request.uri.toString()), "template" -> JsString(r.completion.details.template.path.toString), "responding_service" -> JsString(r.completion.details.serviceName), "responding_host" -> JsString(r.completion.details.serviceLocation.locationName), "shield_host" -> shieldHost, "overall_time" -> JsNumber(r.overallTiming), "middleware_time" -> JsObject(r.middlewareTiming.map { case (attr, timing) => attr -> JsNumber(timing) }), // todo: cache header name should be config driven "cache_status" -> JsString(r.completion.details.response.headers.find(_.lowercaseName == "x-cache").map(_.value).getOrElse("nocache")), "response_size" -> JsNumber(r.completion.details.response.entity.data.length), "response_status" -> JsNumber(r.completion.details.response.status.intValue) )) } val bufferSizeHistogram = metrics.histogram("bufferSizeOnFlush", id) var flushTimer = context.system.scheduler.scheduleOnce(100.millis, self, FlushLogs) def flushLogs() = { flushTimer.cancel() bufferSizeHistogram += buffer.length if (buffer.nonEmpty) { val msg = AccessLogs(buffer) forwarders.foreach { _ ! msg } buffer = ArrayBuffer() } flushTimer = context.system.scheduler.scheduleOnce(100.millis, self, FlushLogs) } def receive: Receive = { case r: RequestProcessorCompleted => buffer += logJson(r) if (buffer.length >= maxBufferSize) { flushLogs() } case FlushLogs => flushLogs() } def extractHeaders(headers: List[HttpHeader], toExtract: Set[String]): Map[String, JsString] = { headers.filter(h => toExtract.contains(h.lowercaseName)).map(h => h.name -> JsString(h.value)).toMap } }
Example 160
Source File: KibanaForwarder.scala From shield with MIT License | 5 votes |
package shield.actors.listeners import akka.actor.{Actor, ActorLogging} import com.amazonaws.auth.{AWSCredentials, DefaultAWSCredentialsProviderChain} import com.typesafe.config.Config import shield.actors.RestartLogging import org.joda.time.format.DateTimeFormat import org.joda.time.{DateTimeZone, DateTime} import shield.aws.AWSSigningConfig import shield.metrics.Instrumented import spray.client.pipelining._ import spray.http.HttpResponse import shield.aws.AWSImplicits._ import spray.json.DefaultJsonProtocol._ import spray.json._ // todo: ensure useful mapping on the index class KibanaForwarder(id: String, host: String, indexPrefix: String, ttype: String, maxOutstanding: Int, signingParams: AWSSigningConfig) extends Actor with ActorLogging with RestartLogging with Instrumented { implicit val ctx = context.dispatcher // todo: timeout? val awsSigningConfig = signingParams val pipeline = sendReceive val dayFormat = DateTimeFormat.forPattern("yyyy.MM.dd") val outstandingCounter = metrics.counter("outstandingPosts", id) val droppedMeter = metrics.meter("droppedAccessLogs", id) val postTimer = timing("postToKibana", id) def receive = { case LogsFlushed => outstandingCounter -= 1 case AccessLogs(buffer) => if (buffer.nonEmpty) { if (outstandingCounter.count >= maxOutstanding) { droppedMeter.mark(buffer.length) } else postTimer { outstandingCounter += 1 val date = DateTimeFormat.forPattern("yyyy.MM.dd").print(DateTime.now(DateTimeZone.UTC)) // todo: CompactPrint is 1% cpu under load tests. Faster serialization library? val orderedCommands = buffer.flatMap { doc => List( JsObject( "index" -> JsObject( "_index" -> JsString(s"$indexPrefix-$date"), "_type" -> JsString(ttype) ) ).toJson.compactPrint, doc.toJson.compactPrint ) } val req = Post(s"$host/_bulk", orderedCommands.mkString("\n") + "\n").withAWSSigning(awsSigningConfig) pipeline(req) andThen LogCollector.handleResults(self, droppedMeter, log, buffer.length) } } } }
Example 161
Source File: FluentdHttpForwarder.scala From shield with MIT License | 5 votes |
package shield.actors.listeners import akka.actor.{ActorRef, Actor, ActorLogging} import shield.actors.RestartLogging import shield.metrics.Instrumented import spray.client.pipelining._ import spray.http.{HttpResponse, FormData} import spray.json.DefaultJsonProtocol._ import spray.json._ import scala.concurrent.duration._ import scala.util.{Failure, Success} class FluentdHttpForwarder(id: String, host: String, maxOutstanding: Int) extends Actor with ActorLogging with RestartLogging with Instrumented { implicit val ctx = context.dispatcher // todo: timeout? val pipeline = sendReceive var outstanding = metrics.counter("outstandingPosts", id) val droppedMeter = metrics.meter("droppedAccessLogs", id) val postTimer = timing("postToFluentd", id) def receive = { case LogsFlushed => outstanding -= 1 case AccessLogs(buffer) => if (buffer.nonEmpty) { if (outstanding.count >= maxOutstanding) { droppedMeter.mark(buffer.length) } else postTimer { outstanding += 1 val json = buffer.toJson.compactPrint val data = FormData(Map(("json", json))) val req = Post(host, data) pipeline(req) andThen LogCollector.handleResults(self, droppedMeter, log, buffer.length) } } } }
Example 162
Source File: S3DiffUploader.scala From shield with MIT License | 5 votes |
package shield.aws import java.io.{ByteArrayInputStream, InputStream} import java.nio.charset.StandardCharsets import akka.actor.{Actor, ActorLogging, Props} import com.amazonaws.auth.profile.ProfileCredentialsProvider import com.amazonaws.services.s3.AmazonS3Client import com.amazonaws.services.s3.model.ObjectMetadata import shield.actors.listeners.ComparisonDiffFile object S3DiffUploader{ def props(bucket: String, folder: String) : Props = Props(new S3DiffUploader(bucket, folder)) } class S3DiffUploader(bucket: String, folder: String) extends Actor with ActorLogging { val s3Client = new AmazonS3Client() val charset = StandardCharsets.UTF_8 val stripped = folder.stripPrefix("/").stripSuffix("/") val prefix = if (stripped.isEmpty) { stripped } else { stripped + "/" } def receive = { case file: ComparisonDiffFile => val metadata = new ObjectMetadata() metadata.setContentLength(file.contents.length) s3Client.putObject(bucket, s"$prefix${file.fileName}", new ByteArrayInputStream(file.contents), metadata) } }
Example 163
Source File: HttpApi.scala From jwt-akka-http with MIT License | 5 votes |
package ba.codecentric import java.util.concurrent.TimeUnit import akka.actor.{ Actor, ActorLogging, Props } import akka.http.scaladsl.Http import akka.http.scaladsl.Http.ServerBinding import akka.http.scaladsl.model.StatusCodes import akka.http.scaladsl.model.headers.RawHeader import akka.http.scaladsl.server.{ Directive1, Route } import akka.http.scaladsl.server.Directives._ import akka.stream.ActorMaterializer import akka.pattern._ import scala.util.Failure object HttpApi { import de.heikoseeberger.akkahttpcirce.FailFastCirceSupport._ import io.circe.generic.auto._ import authentikat.jwt._ final val Name = "http-api" final val AccessTokenHeaderName = "X-Access-Token" final case class LoginRequest(username: String, password: String) private val tokenExpiryPeriodInDays = 1 private val secretKey = "super_secret_key" private val header = JwtHeader("HS256") private def login: Route = post { entity(as[LoginRequest]) { case lr @ LoginRequest("admin", "admin") => val claims = setClaims(lr.username, tokenExpiryPeriodInDays) respondWithHeader(RawHeader(AccessTokenHeaderName, JsonWebToken(header, claims, secretKey))) { complete(StatusCodes.OK) } case LoginRequest(_, _) => complete(StatusCodes.Unauthorized) } } private def securedContent: Route = get { authenticated { claims => complete(s"User: ${claims.getOrElse("user", "")} has accessed a secured content!") } } private def authenticated: Directive1[Map[String, Any]] = optionalHeaderValueByName("Authorization").flatMap { case Some(jwt) if isTokenExpired(jwt) => complete(StatusCodes.Unauthorized -> "Session expired.") case Some(jwt) if JsonWebToken.validate(jwt, secretKey) => provide(getClaims(jwt)) case _ => complete(StatusCodes.Unauthorized) } private def setClaims(username: String, expiryPeriodInDays: Long): JwtClaimsSetMap = JwtClaimsSet( Map("user" -> username, "expiredAt" -> (System.currentTimeMillis() + TimeUnit.DAYS .toMillis(expiryPeriodInDays))) ) private def getClaims(jwt: String): Map[String, String] = jwt match { case JsonWebToken(_, claims, _) => claims.asSimpleMap.getOrElse(Map.empty[String, String]) } private def isTokenExpired(jwt: String): Boolean = getClaims(jwt).get("expiredAt").exists(_.toLong < System.currentTimeMillis()) def routes: Route = login ~ securedContent def apply(host: String, port: Int) = Props(new HttpApi(host, port)) } final class HttpApi(host: String, port: Int) extends Actor with ActorLogging { import HttpApi._ import context.dispatcher private implicit val materializer: ActorMaterializer = ActorMaterializer() Http(context.system).bindAndHandle(routes, host, port).pipeTo(self) override def receive: Receive = { case ServerBinding(address) => log.info("Server successfully bound at {}:{}", address.getHostName, address.getPort) case Failure(cause) => log.error("Failed to bind server", cause) context.system.terminate() } }
Example 164
Source File: Remoting.scala From spark1.52 with Apache License 2.0 | 5 votes |
package ch8 import org.learningconcurrency._ import ch8._ import akka.actor.Actor import akka.actor.ActorIdentity import akka.actor.ActorSelection.toScala import akka.actor.Identify import akka.actor.Props import akka.actor.actorRef2Scala import akka.event.Logging object RemotingPongySystem extends App { val system = remotingSystem("PongyDimension", 24321) val pongy = system.actorOf(Props[Pongy], "pongy") Thread.sleep(15000) system.shutdown() } class Runner extends Actor { val log = Logging(context.system, this) val pingy = context.actorOf(Props[Pingy], "pingy") def receive = { case "start" => val path = context.actorSelection("akka.tcp://[email protected]:24321/user/pongy") path ! Identify(0) case ActorIdentity(0, Some(ref)) => pingy ! ref case ActorIdentity(0, None) => log.info("Something's wrong -- no pongy anywhere!") context.stop(self) case "pong" => log.info("got a pong from another dimension.") context.stop(self) } } object RemotingPingySystem extends App { val system = remotingSystem("PingyDimension", 24567) val runner = system.actorOf(Props[Runner], "runner") runner ! "start" Thread.sleep(5000) system.shutdown() }
Example 165
Source File: ActorLogReceive.scala From spark1.52 with Apache License 2.0 | 5 votes |
package org.apache.spark.util import akka.actor.Actor import org.slf4j.Logger private[spark] trait ActorLogReceive { self: Actor => //自身类型,指定子类 override def receive: Actor.Receive = new Actor.Receive { private val _receiveWithLogging = receiveWithLogging override def isDefinedAt(o: Any): Boolean = { val handled = _receiveWithLogging.isDefinedAt(o) if (!handled) { log.debug(s"Received unexpected actor system event: $o") } handled } override def apply(o: Any): Unit = { if (log.isDebugEnabled) { log.debug(s"[actor] received message $o from ${self.sender}") } val start = System.nanoTime _receiveWithLogging.apply(o) val timeTaken = (System.nanoTime - start).toDouble / 1000000 if (log.isDebugEnabled) { log.debug(s"[actor] handled message ($timeTaken ms) $o from ${self.sender}") } } } def receiveWithLogging: Actor.Receive protected def log: Logger }
Example 166
Source File: HengHa.scala From spark1.52 with Apache License 2.0 | 5 votes |
package org.apache.sparktest import java.lang.management.ManagementFactory import akka.actor.Actor import akka.actor.ActorRef import akka.actor.ActorSystem import akka.actor.Props class Heng(ha: ActorRef) extends Actor { def receive = { case "start" => ha ! "heng" case "ha" => println("哈") ha ! "heng" case _ => println("heng what?") } } class Ha extends Actor { def receive = { case "heng" => println("哼") sender ! "ha" case _ => println("ha what?") } } object HengHa { def main(args: Array[String]): Unit = { //ActorSystem是重量级的对象,会创建1...N个线程,所以一个application一个ActorSystem val system = ActorSystem("HengHaSystem") //actorOf要创建Actor, val ha = system.actorOf(Props[Ha], name = "ha") val heng = system.actorOf(Props(new Heng(ha)), name = "heng") //heng ! "start" //ManagementFactory.getGarbageCollectorMXBeans.map(_.getCollectionTime).sum } }
Example 167
Source File: ActorSerializer.scala From akka-viz with MIT License | 5 votes |
package akkaviz.serialization.serializers import akka.actor.Actor import akkaviz.serialization._ import upickle.Js case object ActorSerializer extends AkkaVizSerializer with ReflectiveSerialization { private[this] val actorDefaultFields = CachingClassInspector.of(classOf[Actor]).fields.map(_.name).toSet override def fieldSelector(inspector: ClassInspector): Set[String] = { inspector.allFieldNames -- actorDefaultFields } override def serialize(obj: Any, context: SerializationContext): Js.Value = { reflectiveSerialize(obj, context) } override def canSerialize(obj: Any): Boolean = obj match { case t: Actor => true case _ => false } }
Example 168
Source File: EventPersistorActor.scala From akka-viz with MIT License | 5 votes |
package akkaviz.persistence import akka.actor.{ActorRef, Actor, ActorLogging} import akkaviz.events.EventPublisherActor.Subscribe import akkaviz.events.{FilteringRule, Helpers} import akkaviz.events.Helpers.actorRefToString import akkaviz.events.types._ import akkaviz.serialization.MessageSerialization import com.datastax.driver.core.utils.UUIDs import io.getquill._ import io.getquill.naming._ import scala.concurrent.duration._ class EventPersistorActor(publisherRef: ActorRef) extends Actor with ActorLogging { import context.dispatcher private[this] var queue = List[ReceivedRecord]() private[this] val maxItemsInQueue = 20 override def preStart(): Unit = { super.preStart() context.system.scheduler.schedule(30.seconds, 30.seconds, self, DoInsert) publisherRef ! Subscribe } override def receive: Receive = { case DoInsert => doInsert() case r: ReceivedWithId if FilteringRule.isUserActor(r.actorRef) && FilteringRule.isUserActor(r.sender) => val msg = MessageSerialization.render(r.message) val id = UUIDs.timeBased() val time = System.currentTimeMillis() val records = List( ReceivedRecord(id, time, actorRefToString(r.sender), To, actorRefToString(r.actorRef), msg), ReceivedRecord(id, time, actorRefToString(r.actorRef), From, actorRefToString(r.sender), msg) ) queue ++= records if (queue.size >= maxItemsInQueue) { doInsert() } case _ => {} } private[this] case object DoInsert private[this] def doInsert(): Unit = { if (queue.nonEmpty) { db.run(query[ReceivedRecord].insert)(queue) queue = List() } } private[this] val db = source(new CassandraSyncSourceConfig[SnakeCase]("akkaviz.cassandra")) }
Example 169
Source File: LightSnapshotTest.scala From akka-viz with MIT License | 5 votes |
package akkaviz.events import akka.actor.{ActorSystem, Actor} import akka.testkit.{TestKit, TestActorRef} import akkaviz.events.types._ import org.scalatest.{FunSuiteLike, Matchers} class LightSnapshotTest() extends TestKit(ActorSystem("SnapshotTests")) with FunSuiteLike with Matchers with Helpers { val firstRef = TestActorRef[SomeActor](new SomeActor, "first") val secondRef = TestActorRef[SomeActor](new SomeActor, "second") test("should include actors receiving messages as live") { val events = Seq( ReceivedWithId(1, firstRef, secondRef, "sup", true), ReceivedWithId(2, secondRef, firstRef, "sup", true) ) val snapshot = snapshotOf(events) snapshot.liveActors should contain allOf (firstRef.path.toString, secondRef.path.toString) } test("should contain dead actors") { val events = Seq( ReceivedWithId(1, firstRef, secondRef, "sup", true), ReceivedWithId(2, secondRef, firstRef, "sup", true), Killed(secondRef) ) val snapshot = snapshotOf(events) snapshot.liveActors should contain(actorRefToString(firstRef)) snapshot.liveActors should not contain actorRefToString(secondRef) snapshot.dead should contain(actorRefToString(secondRef)) } test("should contain classes of instantiated actors") { val events = Seq( Instantiated(firstRef, firstRef.underlyingActor), Instantiated(secondRef, secondRef.underlyingActor) ) val snapshot = snapshotOf(events) snapshot.classNameFor(firstRef.path.toString) should equal(Some(firstRef.underlyingActor.getClass.getName)) snapshot.classNameFor(secondRef.path.toString) should equal(Some(secondRef.underlyingActor.getClass.getName)) } test("should include recreated actor as live") { val events = Seq( Instantiated(firstRef, firstRef.underlyingActor), Killed(firstRef), Spawned(firstRef) ) val snapshot = snapshotOf(events) snapshot.liveActors should contain(actorRefToString(firstRef)) snapshot.dead should be('empty) } test("should ignore BackendEvents not pertaining to actor state") { import scala.concurrent.duration._ val events = Seq( ActorSystemCreated(system), ReportingDisabled, ReportingEnabled, ThroughputMeasurement(firstRef, 0.0, 0xDEB1L), ReceiveDelaySet(2000.seconds) ) snapshotOf(events) should equal(LightSnapshot()) } test("should include restarted actors as live") { val events = Seq( Instantiated(firstRef, firstRef.underlyingActor), Killed(firstRef), Restarted(firstRef) ) val snaphshot = snapshotOf(events) snaphshot.dead should be('empty) snaphshot.liveActors should contain(actorRefToString(firstRef)) } def snapshotOf(events: Seq[BackendEvent]): LightSnapshot = { events.foldLeft(LightSnapshot())(_.update(_)) } } class SomeActor extends Actor { override def receive: Receive = { case _ => () } }
Example 170
Source File: Main.scala From akka-viz with MIT License | 5 votes |
import akka.actor.{Actor, ActorSystem, Props} import ask.AskDemo import fsm.DiningHakkersOnFsm import postoffice.PostOffice import restartDemo.RestartDemo import roulette.RussianRoulette import spray.SprayDemo import tree.TreeDemo import scala.util.Random object Main extends App { DiningHakkersOnFsm.run(ActorSystem("fsm")) PostOffice.run(ActorSystem("postoffice")) SprayDemo.run(ActorSystem("spray")) TreeDemo.run(ActorSystem("tree")) new RussianRoulette(5).run(ActorSystem("russianroulette")) AskDemo.run(ActorSystem("ask")) RestartDemo.run(ActorSystem("restartdemo")) val system = ActorSystem("smalldemos") val lazyActorProps = Props(new Actor { var counter = 0 override def receive: Receive = { case msg => Thread.sleep(Random.nextInt(2000)) counter += 1 sender() ! msg } }) val lazyActor1 = system.actorOf(lazyActorProps, "lazy1") val lazyActor2 = system.actorOf(lazyActorProps, "lazy2") for (i <- 0 to 1000) { lazyActor1.tell("doit", lazyActor2) } }
Example 171
Source File: AskDemo.scala From akka-viz with MIT License | 5 votes |
package ask import akka.actor.{ActorRef, Actor, ActorSystem, Props} import akka.pattern._ import akka.util.Timeout import scala.concurrent.ExecutionContext import scala.concurrent.duration._ object AskDemo { case class StartAsking(ref: ActorRef) implicit val timeout = Timeout(2 seconds) implicit val ec = ExecutionContext.global def run(system: ActorSystem): Unit = { val answeringActor = system.actorOf(Props[AnsweringActor], "answerer") val askingActor = system.actorOf(Props[AskingActor], "asker") askingActor ! StartAsking(answeringActor) system.scheduler.schedule(0 second, 10 seconds, toRunnable(answeringActor ? 42)) } class AnsweringActor extends Actor { override def receive: Receive = { case _: Int => sender ! "That's an int" case _: String => sender ! "That's a string" case _ => // no answer - to trigger timeout } } class AskingActor extends Actor { def idle = PartialFunction.empty override def receive = { case StartAsking(ref) => context.system.scheduler.schedule(0 seconds, 10 seconds, toRunnable(ref ? Nil)) context.system.scheduler.schedule(5 second, 10 seconds, toRunnable(ref.ask("Hello"))) context.become(idle) } } private[this] def toRunnable(thunk: => Any) = new Runnable { override def run(): Unit = thunk } }
Example 172
Source File: DistributedProcessingWorker.scala From aecor with MIT License | 5 votes |
package aecor.distributedprocessing import aecor.distributedprocessing.DistributedProcessing._ import aecor.distributedprocessing.DistributedProcessingWorker.KeepRunning import aecor.distributedprocessing.serialization.Message import cats.effect.syntax.effect._ import akka.actor.{ Actor, ActorLogging, Props, Status } import akka.pattern._ import cats.effect.Effect import cats.implicits._ private[aecor] object DistributedProcessingWorker { def props[F[_]: Effect](processWithId: Int => Process[F], processName: String): Props = Props(new DistributedProcessingWorker[F](processWithId, processName)) final case class KeepRunning(workerId: Int) extends Message } private[aecor] final class DistributedProcessingWorker[F[_]: Effect]( processFor: Int => Process[F], processName: String ) extends Actor with ActorLogging { import context.dispatcher case class ProcessStarted(process: RunningProcess[F]) case object ProcessTerminated var killSwitch: Option[F[Unit]] = None override def postStop: Unit = killSwitch.foreach(_.toIO.unsafeRunSync()) def receive: Receive = { case KeepRunning(workerId) => log.info("[{}] Starting process {}", workerId, processName) processFor(workerId).run .map(ProcessStarted) .toIO .unsafeToFuture() pipeTo self context.become { case ProcessStarted(RunningProcess(watchTermination, terminate)) => log.info("[{}] Process started {}", workerId, processName) killSwitch = Some(terminate) watchTermination.toIO.map(_ => ProcessTerminated).unsafeToFuture() pipeTo self context.become { case Status.Failure(e) => log.error(e, "Process failed {}", processName) throw e case ProcessTerminated => log.error("Process terminated {}", processName) throw new IllegalStateException(s"Process terminated $processName") } case Status.Failure(e) => log.error(e, "Process failed to start {}", processName) throw e case KeepRunning(_) => () } } }
Example 173
Source File: DistributedProcessingSupervisor.scala From aecor with MIT License | 5 votes |
package aecor.distributedprocessing import aecor.distributedprocessing.DistributedProcessingSupervisor.{ GracefulShutdown, ShutdownCompleted, Tick } import aecor.distributedprocessing.DistributedProcessingWorker.KeepRunning import akka.actor.{ Actor, ActorLogging, ActorRef, Props, Terminated } import akka.cluster.sharding.ShardRegion import scala.concurrent.duration.{ FiniteDuration, _ } object DistributedProcessingSupervisor { private final case object Tick final case object GracefulShutdown final case object ShutdownCompleted def props(processCount: Int, shardRegion: ActorRef, heartbeatInterval: FiniteDuration): Props = Props(new DistributedProcessingSupervisor(processCount, shardRegion, heartbeatInterval)) } final class DistributedProcessingSupervisor(processCount: Int, shardRegion: ActorRef, heartbeatInterval: FiniteDuration) extends Actor with ActorLogging { import context.dispatcher private val heartbeat = context.system.scheduler.schedule(0.seconds, heartbeatInterval, self, Tick) context.watch(shardRegion) override def postStop(): Unit = { heartbeat.cancel() () } override def receive: Receive = { case Tick => (0 until processCount).foreach { processId => shardRegion ! KeepRunning(processId) } case Terminated(`shardRegion`) => context.stop(self) case GracefulShutdown => log.info(s"Performing graceful shutdown of [$shardRegion]") shardRegion ! ShardRegion.GracefulShutdown val replyTo = sender() context.become { case Terminated(`shardRegion`) => log.info(s"Graceful shutdown completed for [$shardRegion]") context.stop(self) replyTo ! ShutdownCompleted } } }
Example 174
Source File: RestServiceActor.scala From fraud with Apache License 2.0 | 5 votes |
package fraud.main import akka.actor.{ Actor, ActorRef } import com.datastax.driver.core._ import fraud.main.RandomTransaction._ import spray.http.MediaTypes.{ `application/json`, `text/html` } import spray.httpx.SprayJsonSupport.{ sprayJsonMarshaller, sprayJsonUnmarshaller } import spray.json.JsonParser import spray.routing._ import scala.collection.JavaConversions._ import scala.xml.Elem trait RestService extends HttpService { import TransactionJsonProtocol._ def communicate(t: Transaction) def indexHtml(): Elem def cleanHtml(): Elem def fraudHtml(): Elem val route = path("") { get { respondWithMediaType(`text/html`) { complete { indexHtml() } } } } ~ path("transaction") { post { entity(as[Transaction]) { transaction => complete { communicate(transaction) transaction } } } } ~ path("transactions") { get { respondWithMediaType(`application/json`) { complete { randomTransactions(10) } } } } ~ path("fraud") { get { respondWithMediaType(`text/html`) { complete { fraudHtml() } } } } ~ path("clean") { get { respondWithMediaType(`text/html`) { complete { cleanHtml() } } } } }
Example 175
Source File: ClusterStateInformer.scala From distributed-cache-on-k8s-poc with MIT License | 5 votes |
package cluster import akka.actor.{ Actor, ActorLogging, Props } import akka.cluster.ClusterEvent._ import akka.cluster.{ Cluster, ClusterEvent } class ClusterStateInformer extends Actor with ActorLogging { val cluster = Cluster(context.system) override def preStart(): Unit = { cluster.subscribe( subscriber = self, initialStateMode = ClusterEvent.InitialStateAsEvents, to = classOf[MemberEvent], classOf[UnreachableMember] ) } override def postStop(): Unit = cluster.unsubscribe(self) override def receive: Receive = { case MemberJoined(member) => log.info(s"Member ${member.address} Joined") case MemberUp(member) => log.info("Member is Up: {}", member.address) case UnreachableMember(member) => log.info("Member detected as unreachable: {}", member) case MemberRemoved(member, previousStatus) => log.info( "Member is Removed: {} after {}", member.address, previousStatus) case me: MemberEvent ⇒ log.info(s"Received Member event $me for Member: ${me.member.address}") } } object ClusterStateInformer { def props():Props = Props(new ClusterStateInformer) }
Example 176
Source File: CacheDataActor.scala From distributed-cache-on-k8s-poc with MIT License | 5 votes |
package cluster import java.util.UUID import akka.actor.SupervisorStrategy.Stop import akka.actor.{ Actor, ActorLogging, Props, ReceiveTimeout } import akka.cluster.sharding.ShardRegion import akka.cluster.sharding.ShardRegion.Passivate import cluster.CacheDataActor.Get class CacheDataActor extends Actor with ActorLogging { override def receive: Receive = { case Get(id) => sender ! s"cached data for id: $id" case ReceiveTimeout => log.info(s"sending Passivate to metadata parent: {${context.parent.path.name}} for ${self.path.name}") context.parent ! Passivate(stopMessage = Stop) case Stop => context.stop(self) log.info(s"Passivating metadata actor for ${self.path.name}") } } object CacheDataActor { final val numOfShards = 50 // Planned num of cluster nodes val extractEntityId: ShardRegion.ExtractEntityId = { case msg@Get(id) => (id.toString, msg) } val extractShardId: ShardRegion.ExtractShardId = { case Get(id) => (id.hashCode() % numOfShards).toString } case class Get(id: UUID) def props: Props = Props(new CacheDataActor()) }
Example 177
Source File: dProcessManager.scala From reactive-application-development-scala with Apache License 2.0 | 5 votes |
package com.airport import akka.actor.{ReceiveTimeout, Actor, ActorRef} import scala.concurrent.duration._ object BankTransferProcessProtocol { sealed trait BankTransferProcessMessage final case class TransferFunds( transactionId: String, fromAccount: ActorRef, toAccount: ActorRef, amount: Double) extends BankTransferProcessMessage } object BankTransferProcess { final case class FundsTransfered(transactionId: String) final case class TransferFailed(transactionId: String) } object AccountProtocol { sealed trait AccountProtocolMessage case class Withdraw(amount: Double) extends AccountProtocolMessage case class Deposit(amount: Double) extends AccountProtocolMessage final case object Acknowledgment } class BankTransferProcess extends Actor { import BankTransferProcess._ import BankTransferProcessProtocol._ import AccountProtocol._ context.setReceiveTimeout(30.minutes) override def receive = { case TransferFunds(transactionId, fromAccount, toAccount, amount) => fromAccount ! Withdraw(amount) val client = sender() context become awaitWithdrawal(transactionId, amount, toAccount, client) } def awaitWithdrawal(transactionId: String, amount: Double, toAccount: ActorRef, client: ActorRef): Receive = { case Acknowledgment => toAccount ! Deposit(amount) context become awaitDeposit(transactionId, client) case ReceiveTimeout => client ! TransferFailed(transactionId) context.stop(self) } def awaitDeposit(transactionId: String, client: ActorRef): Receive = { case Acknowledgment => client ! FundsTransfered(transactionId) context.stop(self) case ReceiveTimeout => client ! TransferFailed(transactionId) context.stop(self) } }
Example 178
Source File: cAircraftActor.scala From reactive-application-development-scala with Apache License 2.0 | 5 votes |
package com.airport import akka.actor.Actor class AircraftActor( id: String, callsign: String, altitude: Double, speed: Double, heading: Double, passengers: List[Passenger], weather: List[Weather]) extends Actor { import AircraftProtocol._ var currentState: Aircraft = Aircraft(id, callsign, altitude, speed, heading, passengers, weather) def receive = { case ChangeAltitude(altitude) => currentState = currentState.copy(altitude = altitude) sender() ! OK case ChangeSpeed(speed) => currentState.copy(speed = speed) sender() ! OK case ChangeHeading(heading) => currentState = currentState.copy(heading = heading) sender() ! OK case BoardPassenger(passenger) => currentState = currentState.copy(passengers = passenger :: passengers) sender() ! OK case AddWeather(incomingWeather) => currentState = currentState.copy(weather = incomingWeather :: weather) sender() ! OK } }
Example 179
Source File: Librarian.scala From reactive-application-development-scala with Apache License 2.0 | 5 votes |
package com.rarebooks.library import akka.actor.{ Actor, ActorRef, ActorLogging, Props, Stash } import scala.concurrent.duration.FiniteDuration object Librarian { import Catalog._ import RareBooksProtocol._ final case class Done(e: Either[BookNotFound, BookFound], customer: ActorRef) def props(findBookDuration: FiniteDuration): Props = Props(new Librarian(findBookDuration)) private def process(r: Either[BookNotFound, BookFound], sender: ActorRef): Unit = { r fold ( f => { sender ! f log.info(f.toString) }, s => sender ! s) } }
Example 180
Source File: RareBooks.scala From reactive-application-development-scala with Apache License 2.0 | 5 votes |
package com.rarebooks.library import akka.actor.{ Actor, ActorLogging, OneForOneStrategy, Props, Stash, SupervisorStrategy } import akka.routing.{ ActorRefRoutee, Router, RoundRobinRoutingLogic } import scala.concurrent.duration.{ MILLISECONDS => Millis, FiniteDuration, Duration } object RareBooks { case object Close case object Open case object Report def props: Props = Props(new RareBooks) } class RareBooks extends Actor with ActorLogging with Stash { import context.dispatcher import RareBooks._ import RareBooksProtocol._ override val supervisorStrategy: SupervisorStrategy = { val decider: SupervisorStrategy.Decider = { case Librarian.ComplainException(complain, customer) => customer ! Credit() log.info(s"RareBooks sent customer $customer a credit") SupervisorStrategy.Restart } OneForOneStrategy()(decider orElse super.supervisorStrategy.decider) } private val openDuration: FiniteDuration = Duration(context.system.settings.config.getDuration("rare-books.open-duration", Millis), Millis) private val closeDuration: FiniteDuration = Duration(context.system.settings.config.getDuration("rare-books.close-duration", Millis), Millis) private val nbrOfLibrarians: Int = context.system.settings.config getInt "rare-books.nbr-of-librarians" private val findBookDuration: FiniteDuration = Duration(context.system.settings.config.getDuration("rare-books.librarian.find-book-duration", Millis), Millis) private val maxComplainCount: Int = context.system.settings.config getInt "rare-books.librarian.max-complain-count" var requestsToday: Int = 0 var totalRequests: Int = 0 var router: Router = createLibrarian() context.system.scheduler.scheduleOnce(openDuration, self, Close) protected def createLibrarian(): Router = { var cnt: Int = 0 val routees: Vector[ActorRefRoutee] = Vector.fill(nbrOfLibrarians) { val r = context.actorOf(Librarian.props(findBookDuration, maxComplainCount), s"librarian-$cnt") cnt += 1 ActorRefRoutee(r) } Router(RoundRobinRoutingLogic(), routees) } }
Example 181
Source File: Subscriber.scala From reactive-application-development-scala with Apache License 2.0 | 5 votes |
import akka.actor.Actor object Subscriber { case object Register case class Work(m: String) } import Subscriber.{Register, Work} class Subscriber extends Actor { override def receive = { case Register => sender() ! Publisher.Ok case Work(m) => System.out.println(s"Working on $m") sender() ! Publisher.Ok } }
Example 182
Source File: Publisher.scala From reactive-application-development-scala with Apache License 2.0 | 5 votes |
import akka.actor.{Actor, ActorRef} import Subscriber.{Register, Work} object Publisher { case object Ok } class Publisher(subscriber: ActorRef) extends Actor { override def preStart = subscriber ! Register override def receive = { case Publisher.Ok => subscriber ! Work("Do something!") } }
Example 183
Source File: RareBooks.scala From reactive-application-development-scala with Apache License 2.0 | 5 votes |
package com.rarebooks.library import akka.actor.{Actor, ActorLogging, ActorPath, Address, OneForOneStrategy, Props, RootActorPath, Stash, SupervisorStrategy} import akka.routing.{ActorRefRoutee, RoundRobinRoutingLogic, Router} import scala.concurrent.duration.{Duration, FiniteDuration, MILLISECONDS => Millis} object RareBooks { case object Close case object Open case object Report // val name: String = // "rare-books" // // def pathFor(address: Address): ActorPath = // RootActorPath(address) / "user" / name def props: Props = Props(new RareBooks) } class RareBooks extends Actor with ActorLogging with Stash { import context.dispatcher import RareBooks._ import LibraryProtocol._ override val supervisorStrategy: SupervisorStrategy = { val decider: SupervisorStrategy.Decider = { case Librarian.ComplainException(complain, customer) => customer ! Credit() log.info(s"RareBooks sent customer $customer a credit") SupervisorStrategy.Restart } OneForOneStrategy()(decider orElse super.supervisorStrategy.decider) } private val openDuration: FiniteDuration = Duration(context.system.settings.config.getDuration("rare-books.open-duration", Millis), Millis) private val closeDuration: FiniteDuration = Duration(context.system.settings.config.getDuration("rare-books.close-duration", Millis), Millis) private val nbrOfLibrarians: Int = context.system.settings.config getInt "rare-books.nbr-of-librarians" private val findBookDuration: FiniteDuration = Duration(context.system.settings.config.getDuration("rare-books.librarian.find-book-duration", Millis), Millis) private val maxComplainCount: Int = context.system.settings.config getInt "rare-books.librarian.max-complain-count" var requestsToday: Int = 0 var totalRequests: Int = 0 var router: Router = createLibrarian() context.system.scheduler.scheduleOnce(openDuration, self, Close) protected def createLibrarian(): Router = { var cnt: Int = 0 val routees: Vector[ActorRefRoutee] = Vector.fill(nbrOfLibrarians) { val r = context.actorOf(Librarian.props(findBookDuration, maxComplainCount), s"librarian-$cnt") cnt += 1 ActorRefRoutee(r) } Router(RoundRobinRoutingLogic(), routees) } }
Example 184
Source File: Librarian.scala From reactive-application-development-scala with Apache License 2.0 | 5 votes |
package com.rarebooks.library import akka.actor.{ Actor, ActorRef, ActorLogging, Props, Stash } import scala.concurrent.duration.FiniteDuration object Librarian { import Catalog._ import RareBooksProtocol._ final case class Done(e: Either[BookNotFound, BookFound], customer: ActorRef) def props(findBookDuration: FiniteDuration): Props = Props(new Librarian(findBookDuration)) private def process(r: Either[BookNotFound, BookFound], sender: ActorRef): Unit = { r fold ( f => { sender ! f log.info(f.toString) }, s => sender ! s) } }
Example 185
Source File: RareBooks.scala From reactive-application-development-scala with Apache License 2.0 | 5 votes |
package com.rarebooks.library import akka.actor.{ Actor, ActorLogging, Props, Stash } import akka.routing.{ ActorRefRoutee, Router, RoundRobinRoutingLogic } import scala.concurrent.duration.{ MILLISECONDS => Millis, FiniteDuration, Duration } object RareBooks { case object Close case object Open case object Report def props: Props = Props(new RareBooks) } class RareBooks extends Actor with ActorLogging with Stash { import context.dispatcher import RareBooks._ import RareBooksProtocol._ private val openDuration: FiniteDuration = Duration(context.system.settings.config.getDuration("rare-books.open-duration", Millis), Millis) private val closeDuration: FiniteDuration = Duration(context.system.settings.config.getDuration("rare-books.close-duration", Millis), Millis) private val nbrOfLibrarians: Int = context.system.settings.config getInt "rare-books.nbr-of-librarians" private val findBookDuration: FiniteDuration = Duration(context.system.settings.config.getDuration("rare-books.librarian.find-book-duration", Millis), Millis) var requestsToday: Int = 0 var totalRequests: Int = 0 var router: Router = createLibrarian() context.system.scheduler.scheduleOnce(openDuration, self, Close) protected def createLibrarian(): Router = { var cnt: Int = 0 val routees: Vector[ActorRefRoutee] = Vector.fill(nbrOfLibrarians) { val r = context.actorOf(Librarian.props(findBookDuration), s"librarian-$cnt") cnt += 1 ActorRefRoutee(r) } Router(RoundRobinRoutingLogic(), routees) } }
Example 186
Source File: AtLeastOnce.scala From reactive-application-development-scala with Apache License 2.0 | 5 votes |
import akka.actor.{ Actor, ActorSelection } import akka.persistence.{ AtLeastOnceDelivery, PersistentActor } sealed trait Cmd case class SayHello(deliveryId: Long, s: String) extends Cmd case class ReceiveHello(deliveryId: Long) extends Cmd sealed trait Evt case class HelloSaid(s: String) extends Evt case class HelloReceived(deliveryId: Long) extends Evt class SendActor(destination: ActorSelection) extends PersistentActor with AtLeastOnceDelivery { override def persistenceId: String = "persistence-id" override def receiveCommand: Receive = { case s: String => persist(HelloSaid(s))(updateState) case ReceiveHello(deliveryId) => persist(HelloReceived(deliveryId))(updateState) } override def receiveRecover: Receive = { case evt: Evt => updateState(evt) } def updateState(evt: Evt): Unit = evt match { case HelloSaid(s) => deliver(destination)(deliveryId => SayHello(deliveryId, s)) case HelloReceived(deliveryId) => confirmDelivery(deliveryId) } } class ReceiveActor extends Actor { def receive = { case SayHello(deliveryId, s) => // ... do something with s sender() ! ReceiveHello(deliveryId) } }
Example 187
Source File: ContextsMaster.scala From mist with Apache License 2.0 | 5 votes |
package io.hydrosphere.mist.master.execution import akka.actor.{Actor, ActorLogging, ActorRef, Props} import io.hydrosphere.mist.master.models.ContextConfig import io.hydrosphere.mist.utils.akka.{ActorF, ActorFSyntax} class ContextsMaster( frontendF: ActorF[ContextConfig] ) extends Actor with ActorLogging with ActorFSyntax { type State = Map[String, ActorRef] override def receive: Receive = process(Map.empty) private def process(state: State): Receive = { case run: ContextEvent.RunJobCommand => val (next, ref) = getOrCreate(state, run.context) ref forward run.request context become process(next) case c @ ContextEvent.CancelJobCommand(name, req) => state.get(name) match { case Some(ref) => ref forward req case None => sender() ! akka.actor.Status.Failure(new IllegalStateException("Can't cancel job on stopped/unknown context")) } case upd @ ContextEvent.UpdateContext(ctx) => state.get(ctx.name) match { case Some(ref) => ref forward upd case None => val (next, ref) = getOrCreate(state, ctx) context become process(next) } case ContextsMaster.ContextTerminated(name) => val next = state - name context become process(next) } private def getOrCreate(state: State, ctx: ContextConfig): (State, ActorRef) = { state.get(ctx.name) match { case Some(r) => (state, r) case None => val ref = frontendF.create(ctx) val next = state + (ctx.name -> ref) context.watchWith(ref, ContextsMaster.ContextTerminated(ctx.name)) (next, ref) } } } object ContextsMaster { case class ContextTerminated(name: String) def props(contextF: ActorF[ContextConfig]): Props = Props(classOf[ContextsMaster], contextF) }
Example 188
Source File: FutureSubscribe.scala From mist with Apache License 2.0 | 5 votes |
package io.hydrosphere.mist.master.execution import akka.actor.{Actor, ActorRef} import scala.concurrent.Future trait FutureSubscribe extends Actor { private var realRef: Option[ActorRef] = None override def preStart(): Unit = { super.preStart() realRef = Some(self) } override def postStop(): Unit = { super.postStop() realRef = None } def subscribe0[A, B, C](future: Future[A])(f: A => B, errHandle: Throwable => C): Unit = { future.onComplete(res => { realRef match { case None => case Some(ref) => val msg = res match { case scala.util.Success(v) => f(v) case scala.util.Failure(e) => errHandle(e) } ref ! msg } })(context.dispatcher) } def subscribe[A, B](future: Future[A])(f: A => B): Unit = subscribe0(future)(f, e => akka.actor.Status.Failure(e)) def subscribeId[A](future: Future[A]): Unit = subscribe(future)(identity[A]) }
Example 189
Source File: FunctionInfoProviderRunner.scala From mist with Apache License 2.0 | 5 votes |
package io.hydrosphere.mist.master.jobs import akka.actor.{Actor, ActorRef, ActorSystem, Props, ReceiveTimeout} import io.hydrosphere.mist.core.CommonData import io.hydrosphere.mist.core.CommonData.RegisterJobInfoProvider import io.hydrosphere.mist.master.FunctionInfoProviderConfig import scala.concurrent.duration.{Duration, FiniteDuration} import scala.concurrent.{Future, Promise} import scala.sys.process.Process class FunctionInfoProviderRunner( runTimeout: FiniteDuration, cacheEntryTtl: FiniteDuration, masterHost: String, clusterPort: Int, sparkConf: Map[String, String] ) extends WithSparkConfArgs { def run()(implicit system: ActorSystem): Future[ActorRef] = { val refWaiter = ActorRefWaiter(runTimeout)(system) val cmd = Seq(s"${sys.env("MIST_HOME")}/bin/mist-function-info-provider", "--master", masterHost, "--cluster-port", clusterPort.toString, "--cache-entry-ttl", cacheEntryTtl.toMillis.toString) val builder = Process(cmd, None, ("SPARK_CONF", sparkConfArgs(sparkConf).mkString(" "))) builder.run(false) refWaiter.waitRef() } } trait WithSparkConfArgs { def sparkConfArgs(sparkConf: Map[String, String]): Seq[String] = { sparkConf.map { case (k, v) => s"--conf $k=$v" } .toSeq } } trait ActorRefWaiter { def waitRef(): Future[ActorRef] } object ActorRefWaiter { class IdentityActor(pr: Promise[ActorRef], initTimeout: Duration) extends Actor { override def preStart(): Unit = { context.setReceiveTimeout(initTimeout) } override def receive: Receive = { case RegisterJobInfoProvider(ref) => pr.success(ref) context stop self case ReceiveTimeout => pr.failure(new IllegalStateException("Initialization of FunctionInfoProvider failed of timeout")) context stop self } } def apply(initTimeout: Duration)(implicit system: ActorSystem): ActorRefWaiter = new ActorRefWaiter { override def waitRef(): Future[ActorRef] = { val pr = Promise[ActorRef] system.actorOf(Props(new IdentityActor(pr, initTimeout)), CommonData.FunctionInfoProviderRegisterActorName) pr.future } } } object FunctionInfoProviderRunner { def create(config: FunctionInfoProviderConfig, masterHost: String, clusterPort: Int): FunctionInfoProviderRunner = { sys.env.get("SPARK_HOME") match { case Some(_) => new FunctionInfoProviderRunner(config.runTimeout, config.cacheEntryTtl, masterHost, clusterPort, config.sparkConf) case None => throw new IllegalStateException("You should provide SPARK_HOME env variable for running mist") } } }
Example 190
Source File: FutureSubscribeSpec.scala From mist with Apache License 2.0 | 5 votes |
package io.hydrosphere.mist.master.execution import akka.actor.{Actor, ActorRef, Props} import akka.testkit.{TestActorRef, TestProbe} import io.hydrosphere.mist.master.ActorSpec import scala.concurrent.{Future, Promise} class FutureSubscribeSpec extends ActorSpec("future-subsribe-spec") { import FutureSubscribeSpec._ it("should handle success") { val actor = TestActorRef[TestActor](Props(classOf[TestActor])) val probe = TestProbe() val p = Promise[Unit] probe.send(actor, TestMessage(p.future)) p.success(()) probe.expectMsgType[Ok.type] } it("should handle failure") { val actor = TestActorRef[TestActor](Props(classOf[TestActor])) val probe = TestProbe() val p = Promise[Unit] probe.send(actor, TestMessage(p.future)) p.failure(new RuntimeException()) probe.expectMsgType[Err.type] } } object FutureSubscribeSpec { sealed trait Rsp case object Ok extends Rsp case object Err extends Rsp case class TestMessage(future: Future[Unit]) class TestActor extends Actor with FutureSubscribe { import context._ override def receive: Receive = { case TestMessage(future) => subscribe0(future)(_ => Ok, _ => Err) context become respond(sender()) } private def respond(respond: ActorRef): Receive = { case x: Rsp => respond ! x } } }
Example 191
Source File: RestartSupervisor.scala From mist with Apache License 2.0 | 5 votes |
package io.hydrosphere.mist.utils.akka import akka.pattern.pipe import akka.actor.{Actor, ActorLogging, ActorRef, ActorRefFactory, Props, ReceiveTimeout, SupervisorStrategy, Terminated, Timers} import io.hydrosphere.mist.utils.Logger import scala.concurrent.{Future, Promise} import scala.concurrent.duration._ class RestartSupervisor( name: String, start: () => Future[ActorRef], timeout: FiniteDuration, maxRetry: Int ) extends Actor with ActorLogging with Timers { override def receive: Receive = init import context._ import RestartSupervisor._ private def init: Receive = { case Event.Start(req) => start().map(Event.Started) pipeTo self context become await(Some(req), 0) } private def await(req: Option[Promise[ActorRef]], attempts: Int): Receive = { case Event.Started(ref) => req.foreach(_.success(self)) context watch ref context become proxy(ref) case akka.actor.Status.Failure(e) if maxRetry == attempts + 1 => req.foreach(_.failure(e)) log.error(e, "Starting child for {} failed, maxRetry reached", name) context stop self case akka.actor.Status.Failure(e) => log.error(e, "Starting child for {} failed", name) timers.startSingleTimer("timeout", Event.Timeout, timeout) context become restartTimeout(req, attempts) } private def proxy(ref: ActorRef): Receive = { case Terminated(_) => log.error(s"Reference for {} was terminated. Restarting", name) timers.startSingleTimer("timeout", Event.Timeout, timeout) context become restartTimeout(None, 0) case x => ref.forward(x) } private def restartTimeout(req: Option[Promise[ActorRef]], attempts: Int): Receive = { case Event.Timeout => start().map(Event.Started) pipeTo self context become await(req, attempts + 1) } } object RestartSupervisor { sealed trait Event object Event { final case class Start(req: Promise[ActorRef]) extends Event case object Restart extends Event final case class Started(ref: ActorRef) extends Event case object Timeout extends Event } def props( name: String, start: () => Future[ActorRef], timeout: FiniteDuration, maxRetry: Int ): Props = { Props(classOf[RestartSupervisor], name, start, timeout, maxRetry) } def wrap( name: String, start: () => Future[ActorRef], timeout: FiniteDuration, maxRetry: Int )(implicit af: ActorRefFactory): Future[ActorRef] = { val ref = af.actorOf(props(name, start, timeout, maxRetry)) val promise = Promise[ActorRef] ref ! Event.Start(promise) promise.future } def wrap( name: String, maxRetry: Int, start: () => Future[ActorRef] )(implicit af: ActorRefFactory): Future[ActorRef] = wrap(name, start, 5 seconds, maxRetry)(af) }
Example 192
Source File: GreeterActor.scala From akka-grpc with Apache License 2.0 | 5 votes |
package example.myapp.statefulhelloworld import akka.actor.Actor import akka.actor.Props // #actor object GreeterActor { case class ChangeGreeting(newGreeting: String) case object GetGreeting case class Greeting(greeting: String) def props(initialGreeting: String) = Props(new GreeterActor(initialGreeting)) } class GreeterActor(initialGreeting: String) extends Actor { import GreeterActor._ var greeting = Greeting(initialGreeting) def receive = { case GetGreeting => sender() ! greeting case ChangeGreeting(newGreeting) => greeting = Greeting(newGreeting) } } // #actor
Example 193
Source File: FileMonitorActor.scala From graphql-gateway with Apache License 2.0 | 5 votes |
package sangria.gateway.file import java.nio.file.{NoSuchFileException, StandardWatchEventKinds} import akka.actor.{Actor, ActorRef, Cancellable, PoisonPill, Props} import akka.event.Logging import better.files._ import sangria.gateway.file.FileWatcher._ import scala.collection.mutable import scala.concurrent.duration.FiniteDuration class FileMonitorActor(paths: Seq[File], threshold: FiniteDuration, globs: Seq[String], cb: Vector[File] ⇒ Unit) extends Actor { import FileMonitorActor._ import context.dispatcher val log = Logging(context.system, this) var watchers: Seq[ActorRef] = _ val pendingFiles: mutable.HashSet[File] = mutable.HashSet[File]() var scheduled: Option[Cancellable] = None override def preStart(): Unit = { watchers = paths.map(_.newWatcher(recursive = true)) watchers.foreach { watcher ⇒ watcher ! when(events = StandardWatchEventKinds.ENTRY_CREATE, StandardWatchEventKinds.ENTRY_MODIFY, StandardWatchEventKinds.ENTRY_DELETE) { case (_, file) ⇒ self ! FileChange(file) } } } def receive = { case FileChange(file) ⇒ try { if (file.exists && !file.isDirectory && globs.exists(file.glob(_, includePath = false).nonEmpty)) { pendingFiles += file if (scheduled.isEmpty) scheduled = Some(context.system.scheduler.scheduleOnce(threshold, self, Threshold)) } } catch { case _: NoSuchFileException ⇒ // ignore, it's ok } case Threshold ⇒ val files = pendingFiles.toVector.sortBy(_.name) if (files.nonEmpty) cb(files) pendingFiles.clear() scheduled = None } } object FileMonitorActor { case class FileChange(file: File) case object Threshold def props(paths: Seq[File], threshold: FiniteDuration, globs: Seq[String], cb: Vector[File] ⇒ Unit) = Props(new FileMonitorActor(paths, threshold, globs, cb)) }
Example 194
Source File: ProcessorFibonacci.scala From akka-cluster-playground with MIT License | 5 votes |
package com.elleflorio.cluster.playground.node.processor import akka.actor.{Actor, ActorRef, Props} import scala.annotation.tailrec object ProcessorFibonacci { sealed trait ProcessorFibonacciMessage case class Compute(n: Int, replyTo: ActorRef) extends ProcessorFibonacciMessage def props(nodeId: String) = Props(new ProcessorFibonacci(nodeId)) def fibonacci(x: Int): BigInt = { @tailrec def fibHelper(x: Int, prev: BigInt = 0, next: BigInt = 1): BigInt = x match { case 0 => prev case 1 => next case _ => fibHelper(x - 1, next, next + prev) } fibHelper(x) } } class ProcessorFibonacci(nodeId: String) extends Actor { import ProcessorFibonacci._ override def receive: Receive = { case Compute(value, replyTo) => { replyTo ! ProcessorResponse(nodeId, fibonacci(value)) } } }
Example 195
Source File: Node.scala From akka-cluster-playground with MIT License | 5 votes |
package com.elleflorio.cluster.playground.node import akka.actor.{Actor, ActorRef, Props} import akka.routing.FromConfig import com.elleflorio.cluster.playground.node.Node.{GetClusterMembers, GetFibonacci} import com.elleflorio.cluster.playground.node.cluster.ClusterManager import com.elleflorio.cluster.playground.node.cluster.ClusterManager.GetMembers import com.elleflorio.cluster.playground.node.processor.Processor import com.elleflorio.cluster.playground.node.processor.Processor.ComputeFibonacci object Node { sealed trait NodeMessage case class GetFibonacci(n: Int) case object GetClusterMembers def props(nodeId: String) = Props(new Node(nodeId)) } class Node(nodeId: String) extends Actor { val processor: ActorRef = context.actorOf(Processor.props(nodeId), "processor") val processorRouter: ActorRef = context.actorOf(FromConfig.props(Props.empty), "processorRouter") val clusterManager: ActorRef = context.actorOf(ClusterManager.props(nodeId), "clusterManager") override def receive: Receive = { case GetClusterMembers => clusterManager forward GetMembers case GetFibonacci(value) => processorRouter forward ComputeFibonacci(value) } }
Example 196
Source File: ClusterListener.scala From akka-cluster-playground with MIT License | 5 votes |
package com.elleflorio.cluster.playground.node.cluster import akka.actor.{Actor, ActorLogging, Props} import akka.cluster.Cluster import akka.cluster.ClusterEvent._ object ClusterListener { def props(nodeId: String, cluster: Cluster) = Props(new ClusterListener(nodeId, cluster)) } class ClusterListener(nodeId: String, cluster: Cluster) extends Actor with ActorLogging { override def preStart(): Unit = { cluster.subscribe(self, initialStateMode = InitialStateAsEvents, classOf[MemberEvent], classOf[UnreachableMember]) } override def postStop(): Unit = cluster.unsubscribe(self) def receive = { case MemberUp(member) => log.info("Node {} - Member is Up: {}", nodeId, member.address) case UnreachableMember(member) => log.info(s"Node {} - Member detected as unreachable: {}", nodeId, member) case MemberRemoved(member, previousStatus) => log.info(s"Node {} - Member is Removed: {} after {}", nodeId, member.address, previousStatus) case _: MemberEvent => // ignore } }
Example 197
Source File: ClusterManager.scala From akka-cluster-playground with MIT License | 5 votes |
package com.elleflorio.cluster.playground.node.cluster import akka.actor.{Actor, ActorLogging, ActorRef, Props} import akka.cluster.{Cluster, MemberStatus} import com.elleflorio.cluster.playground.Server.system import com.elleflorio.cluster.playground.node.cluster.ClusterManager.GetMembers object ClusterManager { sealed trait ClusterMessage case object GetMembers extends ClusterMessage def props(nodeId: String) = Props(new ClusterManager(nodeId)) } class ClusterManager(nodeId: String) extends Actor with ActorLogging { val cluster: Cluster = Cluster(context.system) val listener: ActorRef = context.actorOf(ClusterListener.props(nodeId, cluster), "clusterListener") override def receive: Receive = { case GetMembers => { sender() ! cluster.state.members.filter(_.status == MemberStatus.up) .map(_.address.toString) .toList } } }
Example 198
Source File: Mapper.scala From CSYE7200 with MIT License | 5 votes |
package edu.neu.coe.csye7200.mapreduce import akka.actor.{Actor, ActorLogging, ActorRef} import scala.collection.mutable import scala.collection.mutable.HashMap import scala.util._ class Mapper_Forgiving[K1,V1,K2,V2](f: (K1,V1)=>(K2,V2)) extends Mapper[K1,V1,K2,V2](f) { override def prepareReply(v2k2ts: Seq[Try[(K2,V2)]]): (Map[K2, Seq[V2]], Seq[Throwable]) = { val v2sK2m = mutable.HashMap[K2,Seq[V2]]() // mutable val xs = Seq[Throwable]() // mutable // CONSIDER using traverse for (v2k2t <- v2k2ts; v2k2e = Master.sequence(v2k2t)) v2k2e match { case Right((k2,v2)) => v2sK2m put(k2, v2+:v2sK2m.getOrElse(k2, Nil)) case Left(x) => xs :+ x } (v2sK2m.toMap, xs) } } case class Incoming[K, V](m: Seq[(K,V)]) { override def toString = s"Incoming: with ${m.size} elements" } object Incoming { def sequence[K,V](vs: Seq[V]): Incoming[K,V] = Incoming((vs zip Stream.continually(null.asInstanceOf[K])).map{_.swap}) def map[K, V](vKm: Map[K,V]): Incoming[K,V] = Incoming(vKm.toSeq) } object Mapper { }
Example 199
Source File: Reducer.scala From CSYE7200 with MIT License | 5 votes |
package edu.neu.coe.csye7200.mapreduce import akka.actor.{ Actor, ActorLogging, ActorRef } //import scala.collection.mutable.HashMap import scala.util._ class Reducer_Fold[K2,V2,V3](g: (V3,V2)=>V3, z: =>V3) extends ReducerBase[K2,V2,V3] { def getValue(vs: Seq[V2]): V3 = vs.foldLeft(z)(g) } abstract class ReducerBase[K2,V2,V3] extends Actor with ActorLogging { override def receive = { case i: Intermediate[K2,V2] => log.info(s"received $i") log.debug(s"with elements ${i.vs}") sender ! (i.k, Master.sequence(Try(getValue(i.vs)))) case q => log.warning(s"received unknown message type: $q") } override def postStop = { log.debug("has shut down") } def getValue(vs: Seq[V2]): V3 } case class Intermediate[K2, V2](k: K2, vs: Seq[V2]) { override def toString = s"Intermediate: with k=$k and ${vs.size} elements" }
Example 200
Source File: UnaryNode.scala From ingraph with Eclipse Public License 1.0 | 5 votes |
package ingraph.ire.nodes.unary import akka.actor.{Actor, Stash} import ingraph.ire.messages._ import ingraph.ire.messages.{Forwarder, TerminatorHandler} abstract class UnaryNode(val expectedTerminatorCount: Int = 1) extends Actor with Forwarder with Stash with TerminatorHandler { val name = self.path.name def onChangeSet(changeSet: ChangeSet) override def receive: Actor.Receive = { case pause: Pause => context.become({ case resume: Resume => { if (resume.messageID == pause.messageID) { context.unbecome() unstashAll() } else stash() } case terminator: TerminatorMessage => handleTerminator(terminator) case _ => stash() }) case changeSet: ChangeSet => onChangeSet(changeSet) case terminator: TerminatorMessage => handleTerminator(terminator) case Primary | Secondary => throw new UnsupportedOperationException(s"$name received Beta-wrapped message") case _:SizeRequest => sender() ! onSizeRequest() case _ => throw new UnsupportedOperationException(s"$name received unknown message") } def onSizeRequest(): Long }