akka.actor.Props Scala Examples
The following examples show how to use akka.actor.Props.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: TestAkka.scala From DataXServer with Apache License 2.0 | 6 votes |
package org.tianlangstudio.data.hamal.akka import akka.actor.{Props, ActorSystem, Actor} import akka.actor.Actor.Receive /** * Created by zhuhq on 2016/5/5. */ object TestAkka extends App{ val system = ActorSystem("test") val actor = system.actorOf(Props(classOf[TestAkka])) for(i <- 0 to 10) { actor ! Remove() actor ! Add() } } class TestAkka extends Actor{ override def receive: Receive = { case Remove() => println("remove begin") Thread.sleep((1000 * math.ceil(math.random) * 10).toLong) println("remove end") case Add() => println("add begin") Thread.sleep((1000 * math.ceil(math.random) * 10).toLong) println("add end") } } case class Remove() case class Add()
Example 2
Source File: AkkaExecutionSequencer.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.grpc.adapter import akka.Done import akka.actor.{Actor, ActorLogging, ActorRef, ActorSystem, ExtendedActorSystem, Props} import akka.pattern.{AskTimeoutException, ask} import akka.util.Timeout import com.daml.grpc.adapter.RunnableSequencingActor.ShutdownRequest import scala.concurrent.duration.FiniteDuration import scala.concurrent.{ExecutionContext, Future} import scala.util.control.NonFatal import com.daml.dec.DirectExecutionContext def closeAsync(implicit ec: ExecutionContext): Future[Done] = (actorRef ? ShutdownRequest).mapTo[Done].recover { case askTimeoutException: AskTimeoutException if actorIsTerminated(askTimeoutException) => Done } private def actorIsTerminated(askTimeoutException: AskTimeoutException) = { AkkaExecutionSequencer.actorTerminatedRegex.findFirstIn(askTimeoutException.getMessage).nonEmpty } } object AkkaExecutionSequencer { def apply(name: String, terminationTimeout: FiniteDuration)( implicit system: ActorSystem): AkkaExecutionSequencer = { system match { case extendedSystem: ExtendedActorSystem => new AkkaExecutionSequencer( extendedSystem.systemActorOf(Props[RunnableSequencingActor], name))( Timeout.durationToTimeout(terminationTimeout)) case _ => new AkkaExecutionSequencer(system.actorOf(Props[RunnableSequencingActor], name))( Timeout.durationToTimeout(terminationTimeout)) } } private val actorTerminatedRegex = """Recipient\[.*]\] had already been terminated.""".r } private[grpc] class RunnableSequencingActor extends Actor with ActorLogging { @SuppressWarnings(Array("org.wartremover.warts.Any")) override val receive: Receive = { case runnable: Runnable => try { runnable.run() } catch { case NonFatal(t) => log.error("Unexpected exception while executing Runnable", t) } case ShutdownRequest => context.stop(self) // processing of the current message will continue sender() ! Done } } private[grpc] object RunnableSequencingActor { case object ShutdownRequest }
Example 3
Source File: AkkaResourceOwnerSpec.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.resources.akka import akka.actor.{Actor, ActorSystem, Props} import akka.stream.Materializer import akka.stream.scaladsl.{Keep, Sink, Source} import akka.{Done, NotUsed} import com.daml.resources.ResourceOwner import org.scalatest.{AsyncWordSpec, Matchers} import scala.concurrent.{Future, Promise} class AkkaResourceOwnerSpec extends AsyncWordSpec with Matchers { "a function returning an ActorSystem" should { "convert to a ResourceOwner" in { val testPromise = Promise[Int]() class TestActor extends Actor { @SuppressWarnings(Array("org.wartremover.warts.Any")) override def receive: Receive = { case value: Int => testPromise.success(value) case value => testPromise.failure(new IllegalArgumentException(s"$value")) } } val resource = for { actorSystem <- AkkaResourceOwner .forActorSystem(() => ActorSystem("TestActorSystem")) .acquire() actor <- ResourceOwner .successful(actorSystem.actorOf(Props(new TestActor))) .acquire() } yield (actorSystem, actor) for { resourceFuture <- resource.asFuture (actorSystem, actor) = resourceFuture _ = actor ! 7 result <- testPromise.future _ <- resource.release() } yield { result should be(7) an[IllegalStateException] should be thrownBy actorSystem.actorOf(Props(new TestActor)) } } } "a function returning a Materializer" should { "convert to a ResourceOwner" in { val resource = for { actorSystem <- AkkaResourceOwner .forActorSystem(() => ActorSystem("TestActorSystem")) .acquire() materializer <- AkkaResourceOwner.forMaterializer(() => Materializer(actorSystem)).acquire() } yield materializer for { materializer <- resource.asFuture numbers <- Source(1 to 10) .toMat(Sink.seq)(Keep.right[NotUsed, Future[Seq[Int]]]) .run()(materializer) _ <- resource.release() } yield { numbers should be(1 to 10) an[IllegalStateException] should be thrownBy Source .single(0) .toMat(Sink.ignore)(Keep.right[NotUsed, Future[Done]]) .run()(materializer) } } } }
Example 4
Source File: ChaosSetup.scala From eventuate-chaos with Apache License 2.0 | 5 votes |
package com.rbmhtechnology.eventuate.chaos import akka.actor.ActorSystem import akka.actor.Props import akka.pattern.BackoffSupervisor import com.rbmhtechnology.eventuate.ReplicationConnection import com.rbmhtechnology.eventuate.ReplicationEndpoint import com.typesafe.config.ConfigFactory import scala.concurrent.duration.DurationInt trait ChaosSetup extends App { def getSystem: ActorSystem def getEndpoint(implicit system: ActorSystem): ReplicationEndpoint protected def baseConfig(hostname: String) = ConfigFactory.parseString( s""" |akka.actor.provider = "akka.remote.RemoteActorRefProvider" |akka.remote.enabled-transports = ["akka.remote.netty.tcp"] |akka.remote.netty.tcp.hostname = "$hostname" |akka.remote.netty.tcp.port = 2552 |akka.test.single-expect-default = 10s |akka.loglevel = "INFO" |eventuate.log.write-batch-size = 16 |eventuate.log.read-timeout = 3s |eventuate.log.retry-delay = 3s |akka.remote.netty.tcp.maximum-frame-size = 1024000b """.stripMargin) protected def quote(str: String) = "\"" + str + "\"" protected def supervised(props: Props, name: String): Props = BackoffSupervisor.props(props, name, 1.second, 30.seconds, 0.1) def name = { if (args == null || args.length < 1) { Console.err.println("no <nodename> specified") sys.exit(1) } else { args(0) } } def hostname = sys.env.getOrElse("HOSTNAME", s"$name.eventuate-chaos.docker") // replication connection to other node(s) def connections = args.drop(1).map { conn => conn.split(":") match { case Array(host, port) => ReplicationConnection(host, port.toInt) case Array(host) => ReplicationConnection(host, 2552) } }.toSet }
Example 5
Source File: ChaosCounter.scala From eventuate-chaos with Apache License 2.0 | 5 votes |
package com.rbmhtechnology.eventuate.chaos import akka.actor.Props import com.rbmhtechnology.eventuate.ReplicationEndpoint import com.rbmhtechnology.eventuate.crdt._ object ChaosCounterLeveldb extends ChaosLeveldbSetup { implicit val system = getSystem val endpoint = getEndpoint val service = new CounterService[Int](name, endpoint.logs(ReplicationEndpoint.DefaultLogName)) system.actorOf(Props(ChaosCounterInterface(service))) } object ChaosCounterCassandra extends ChaosCassandraSetup { implicit val system = getSystem val endpoint = getEndpoint val service = new CounterService[Int](name, endpoint.logs(ReplicationEndpoint.DefaultLogName)) system.actorOf(Props(ChaosCounterInterface(service))) } object ChaosPureCounterLeveldb extends ChaosLeveldbSetup { implicit val system = getSystem val endpoint = getEndpoint val service = new pure.CounterService[Int](name, endpoint.logs(ReplicationEndpoint.DefaultLogName)) system.actorOf(Props(ChaosCounterInterface(service))) }
Example 6
Source File: ChaosAWSet.scala From eventuate-chaos with Apache License 2.0 | 5 votes |
package com.rbmhtechnology.eventuate.chaos.crdt.pure import akka.actor.ActorRef import akka.actor.Props import akka.util.Timeout import com.rbmhtechnology.eventuate.ReplicationEndpoint import com.rbmhtechnology.eventuate.chaos.ChaosInterface import com.rbmhtechnology.eventuate.chaos.ChaosLeveldbSetup import com.rbmhtechnology.eventuate.crdt.pure.AWSetService class ChaosAWSetInterface(service: AWSetService[Int]) extends ChaosInterface { val setId = "test" import scala.concurrent.duration._ implicit val timeout = Timeout(1.seconds) private def writeSet(set: Set[Int], receiver: ActorRef) = { reply(s"[${set.mkString(",")}]", receiver) } def handleCommand = { case ("add", Some(v), recv) => service.add(setId, v).map(x => writeSet(x, recv)) case ("remove", Some(v), recv) => service.remove(setId, v).map(x => writeSet(x, recv)) case ("clear", None, recv) => service.clear(setId).map(x => writeSet(x, recv)) case ("get", None, recv) => service.value(setId).map(x => writeSet(x, recv)) } } object ChaosAWSetLeveldb extends ChaosLeveldbSetup { implicit val system = getSystem val endpoint = getEndpoint val service = new AWSetService[Int](name, endpoint.logs(ReplicationEndpoint.DefaultLogName)) system.actorOf(Props(new ChaosAWSetInterface(service))) }
Example 7
Source File: RouterInstrumentation.scala From prometheus-akka with Apache License 2.0 | 5 votes |
package akka.monitor.instrumentation import org.aspectj.lang.ProceedingJoinPoint import org.aspectj.lang.annotation.{ After, Around, Aspect, DeclareMixin, Pointcut } import akka.actor.{ ActorRef, ActorSystem, Cell, Props } import akka.dispatch.{ Envelope, MessageDispatcher } import akka.routing.RoutedActorCell @Aspect class RoutedActorCellInstrumentation { def routerInstrumentation(cell: Cell): RouterMonitor = cell.asInstanceOf[RouterInstrumentationAware].routerInstrumentation @Pointcut("execution(akka.routing.RoutedActorCell.new(..)) && this(cell) && args(system, ref, props, dispatcher, routeeProps, supervisor)") def routedActorCellCreation(cell: RoutedActorCell, system: ActorSystem, ref: ActorRef, props: Props, dispatcher: MessageDispatcher, routeeProps: Props, supervisor: ActorRef): Unit = {} @After("routedActorCellCreation(cell, system, ref, props, dispatcher, routeeProps, supervisor)") def afterRoutedActorCellCreation(cell: RoutedActorCell, system: ActorSystem, ref: ActorRef, props: Props, dispatcher: MessageDispatcher, routeeProps: Props, supervisor: ActorRef): Unit = { cell.asInstanceOf[RouterInstrumentationAware].setRouterInstrumentation( RouterMonitor.createRouterInstrumentation(cell)) } @Pointcut("execution(* akka.routing.RoutedActorCell.sendMessage(*)) && this(cell) && args(envelope)") def sendMessageInRouterActorCell(cell: RoutedActorCell, envelope: Envelope) = {} @Around("sendMessageInRouterActorCell(cell, envelope)") def aroundSendMessageInRouterActorCell(pjp: ProceedingJoinPoint, cell: RoutedActorCell, envelope: Envelope): Any = { routerInstrumentation(cell).processMessage(pjp) } } trait RouterInstrumentationAware { def routerInstrumentation: RouterMonitor def setRouterInstrumentation(ai: RouterMonitor): Unit } object RouterInstrumentationAware { def apply(): RouterInstrumentationAware = new RouterInstrumentationAware { private var _ri: RouterMonitor = _ override def setRouterInstrumentation(ai: RouterMonitor): Unit = _ri = ai override def routerInstrumentation: RouterMonitor = _ri } } @Aspect class MetricsIntoRouterCellsMixin { @DeclareMixin("akka.routing.RoutedActorCell") def mixinActorCellMetricsToRoutedActorCell: RouterInstrumentationAware = RouterInstrumentationAware() }
Example 8
Source File: EnvelopeSpec.scala From prometheus-akka with Apache License 2.0 | 5 votes |
package akka.monitor.instrumentation import com.workday.prometheus.akka.TestKitBaseSpec import akka.actor.{Actor, ExtendedActorSystem, Props} import akka.dispatch.Envelope class EnvelopeSpec extends TestKitBaseSpec("envelope-spec") { "EnvelopeInstrumentation" should { "mixin EnvelopeContext" in { val actorRef = system.actorOf(Props[NoReply]) val env = Envelope("msg", actorRef, system).asInstanceOf[Object] env match { case e: Envelope with InstrumentedEnvelope => e.setEnvelopeContext(EnvelopeContext()) case _ => fail("InstrumentedEnvelope is not mixed in") } env match { case s: Serializable => { import java.io._ val bos = new ByteArrayOutputStream val oos = new ObjectOutputStream(bos) oos.writeObject(env) oos.close() akka.serialization.JavaSerializer.currentSystem.withValue(system.asInstanceOf[ExtendedActorSystem]) { val ois = new ObjectInputStream(new ByteArrayInputStream(bos.toByteArray())) val obj = ois.readObject() ois.close() obj match { case e: Envelope with InstrumentedEnvelope => e.envelopeContext() should not be null case _ => fail("InstrumentedEnvelope is not mixed in") } } } case _ => fail("envelope is not serializable") } } } } class NoReply extends Actor { override def receive = { case any ⇒ } }
Example 9
Source File: RedeliveryActors.scala From kmq with Apache License 2.0 | 5 votes |
package com.softwaremill.kmq.redelivery import java.io.Closeable import java.util.Collections import akka.actor.{ActorSystem, Props} import com.softwaremill.kmq.{KafkaClients, KmqConfig} import com.typesafe.scalalogging.StrictLogging import scala.concurrent.Await import scala.concurrent.duration._ import scala.collection.JavaConverters._ object RedeliveryActors extends StrictLogging { def start(clients: KafkaClients, config: KmqConfig): Closeable = { val system = ActorSystem("kmq-redelivery") val consumeMakersActor = system.actorOf(Props(new ConsumeMarkersActor(clients, config)), "consume-markers-actor") consumeMakersActor ! DoConsume logger.info("Started redelivery actors") new Closeable { override def close(): Unit = Await.result(system.terminate(), 1.minute) } } }
Example 10
Source File: KnownNodesManager.scala From mantis with Apache License 2.0 | 5 votes |
package io.iohk.ethereum.network import java.net.URI import akka.actor.{Actor, ActorLogging, Props, Scheduler} import io.iohk.ethereum.db.storage.KnownNodesStorage import io.iohk.ethereum.network.KnownNodesManager.KnownNodesManagerConfig import scala.concurrent.duration._ import scala.concurrent.ExecutionContext.Implicits.global class KnownNodesManager( config: KnownNodesManagerConfig, knownNodesStorage: KnownNodesStorage, externalSchedulerOpt: Option[Scheduler] = None) extends Actor with ActorLogging { import KnownNodesManager._ private def scheduler = externalSchedulerOpt getOrElse context.system.scheduler var knownNodes: Set[URI] = knownNodesStorage.getKnownNodes() var toAdd: Set[URI] = Set.empty var toRemove: Set[URI] = Set.empty scheduler.schedule(config.persistInterval, config.persistInterval, self, PersistChanges) override def receive: Receive = { case AddKnownNode(uri) => if (!knownNodes.contains(uri)) { knownNodes += uri toAdd += uri toRemove -= uri } case RemoveKnownNode(uri) => if (knownNodes.contains(uri)) { knownNodes -= uri toAdd -= uri toRemove += uri } case GetKnownNodes => sender() ! KnownNodes(knownNodes) case PersistChanges => persistChanges() } private def persistChanges(): Unit = { log.debug(s"Persisting ${knownNodes.size} known nodes.") if (knownNodes.size > config.maxPersistedNodes) { val toAbandon = knownNodes.take(knownNodes.size - config.maxPersistedNodes) toRemove ++= toAbandon toAdd --= toAbandon } if (toAdd.nonEmpty || toRemove.nonEmpty) { knownNodesStorage.updateKnownNodes( toAdd = toAdd, toRemove = toRemove) toAdd = Set.empty toRemove = Set.empty } } } object KnownNodesManager { def props(config: KnownNodesManagerConfig, knownNodesStorage: KnownNodesStorage): Props = Props(new KnownNodesManager(config, knownNodesStorage)) case class AddKnownNode(uri: URI) case class RemoveKnownNode(uri: URI) case object GetKnownNodes case class KnownNodes(nodes: Set[URI]) private case object PersistChanges case class KnownNodesManagerConfig(persistInterval: FiniteDuration, maxPersistedNodes: Int) object KnownNodesManagerConfig { def apply(etcClientConfig: com.typesafe.config.Config): KnownNodesManagerConfig = { val knownNodesManagerConfig = etcClientConfig.getConfig("network.known-nodes") KnownNodesManagerConfig( persistInterval = knownNodesManagerConfig.getDuration("persist-interval").toMillis.millis, maxPersistedNodes = knownNodesManagerConfig.getInt("max-persisted-nodes")) } } }
Example 11
Source File: ServerActor.scala From mantis with Apache License 2.0 | 5 votes |
package io.iohk.ethereum.network import java.net.InetSocketAddress import akka.actor.{Actor, ActorLogging, ActorRef, Props} import akka.agent.Agent import akka.io.Tcp.{Bind, Bound, CommandFailed, Connected} import akka.io.{IO, Tcp} import io.iohk.ethereum.utils.{NodeStatus, ServerStatus} import org.spongycastle.util.encoders.Hex class ServerActor(nodeStatusHolder: Agent[NodeStatus], peerManager: ActorRef) extends Actor with ActorLogging { import ServerActor._ import context.system override def receive: Receive = { case StartServer(address) => IO(Tcp) ! Bind(self, address) context become waitingForBindingResult } def waitingForBindingResult: Receive = { case Bound(localAddress) => val nodeStatus = nodeStatusHolder() log.info("Listening on {}", localAddress) log.info("Node address: enode://{}@{}:{}", Hex.toHexString(nodeStatus.nodeId), getHostName(localAddress.getAddress), localAddress.getPort) nodeStatusHolder.send(_.copy(serverStatus = ServerStatus.Listening(localAddress))) context become listening case CommandFailed(b: Bind) => log.warning("Binding to {} failed", b.localAddress) context stop self } def listening: Receive = { case Connected(remoteAddress, _) => val connection = sender() peerManager ! PeerManagerActor.HandlePeerConnection(connection, remoteAddress) } } object ServerActor { def props(nodeStatusHolder: Agent[NodeStatus], peerManager: ActorRef): Props = Props(new ServerActor(nodeStatusHolder, peerManager)) case class StartServer(address: InetSocketAddress) }
Example 12
Source File: OmmersPool.scala From mantis with Apache License 2.0 | 5 votes |
package io.iohk.ethereum.ommers import akka.actor.{Actor, Props} import io.iohk.ethereum.domain.{BlockHeader, Blockchain} import io.iohk.ethereum.ommers.OmmersPool.{AddOmmers, GetOmmers, RemoveOmmers} import io.iohk.ethereum.utils.MiningConfig class OmmersPool(blockchain: Blockchain, miningConfig: MiningConfig) extends Actor { var ommersPool: Seq[BlockHeader] = Nil val ommerGenerationLimit: Int = 6 //Stated on section 11.1, eq. (143) of the YP val ommerSizeLimit: Int = 2 override def receive: Receive = { case AddOmmers(ommers) => ommersPool = (ommers ++ ommersPool).take(miningConfig.ommersPoolSize).distinct case RemoveOmmers(ommers) => val toDelete = ommers.map(_.hash).toSet ommersPool = ommersPool.filter(b => !toDelete.contains(b.hash)) case GetOmmers(blockNumber) => val ommers = ommersPool.filter { b => val generationDifference = blockNumber - b.number generationDifference > 0 && generationDifference <= ommerGenerationLimit }.filter { b => blockchain.getBlockHeaderByHash(b.parentHash).isDefined }.take(ommerSizeLimit) sender() ! OmmersPool.Ommers(ommers) } } object OmmersPool { def props(blockchain: Blockchain, miningConfig: MiningConfig): Props = Props(new OmmersPool(blockchain, miningConfig)) case class AddOmmers(ommers: List[BlockHeader]) object AddOmmers { def apply(b: BlockHeader*): AddOmmers = AddOmmers(b.toList) } case class RemoveOmmers(ommers: List[BlockHeader]) object RemoveOmmers { def apply(b: BlockHeader*): RemoveOmmers = RemoveOmmers(b.toList) } case class GetOmmers(blockNumber: BigInt) case class Ommers(headers: Seq[BlockHeader]) }
Example 13
Source File: BlockchainHostActor.scala From mantis with Apache License 2.0 | 5 votes |
package io.iohk.ethereum.blockchain.sync import akka.actor.{Actor, ActorLogging, ActorRef, Props} import akka.util.ByteString import io.iohk.ethereum.domain.{BlockHeader, Blockchain} import io.iohk.ethereum.network.PeerEventBusActor.PeerEvent.MessageFromPeer import io.iohk.ethereum.network.PeerEventBusActor.SubscriptionClassifier.MessageClassifier import io.iohk.ethereum.network.PeerEventBusActor.{PeerSelector, Subscribe} import io.iohk.ethereum.network.PeerManagerActor.PeerConfiguration import io.iohk.ethereum.network.p2p.{Message, MessageSerializable} import io.iohk.ethereum.network.p2p.messages.PV62.{BlockBodies, BlockHeaders, GetBlockBodies, GetBlockHeaders} import io.iohk.ethereum.network.p2p.messages.PV63.{GetNodeData, GetReceipts, NodeData, Receipts} import io.iohk.ethereum.network.p2p.messages.PV63.MptNodeEncoders._ import io.iohk.ethereum.network.EtcPeerManagerActor private def handleBlockFastDownload(message: Message): Option[MessageSerializable] = message match { case request: GetReceipts => val receipts = request.blockHashes.take(peerConfiguration.fastSyncHostConfiguration.maxReceiptsPerMessage) .flatMap(hash => blockchain.getReceiptsByHash(hash)) Some(Receipts(receipts)) case request: GetBlockBodies => val blockBodies = request.hashes.take(peerConfiguration.fastSyncHostConfiguration.maxBlocksBodiesPerMessage) .flatMap(hash => blockchain.getBlockBodyByHash(hash)) Some(BlockBodies(blockBodies)) case request: GetBlockHeaders => val blockNumber = request.block.fold(a => Some(a), b => blockchain.getBlockHeaderByHash(b).map(_.number)) blockNumber match { case Some(startBlockNumber) if startBlockNumber >= 0 && request.maxHeaders >= 0 && request.skip >= 0 => val headersCount: BigInt = request.maxHeaders min peerConfiguration.fastSyncHostConfiguration.maxBlocksHeadersPerMessage val range = if (request.reverse) { startBlockNumber to (startBlockNumber - (request.skip + 1) * headersCount + 1) by -(request.skip + 1) } else { startBlockNumber to (startBlockNumber + (request.skip + 1) * headersCount - 1) by (request.skip + 1) } val blockHeaders: Seq[BlockHeader] = range.flatMap { a: BigInt => blockchain.getBlockHeaderByNumber(a) } Some(BlockHeaders(blockHeaders)) case _ => log.warning("got request for block headers with invalid block hash/number: {}", request) None } case _ => None } } object BlockchainHostActor { def props(blockchain: Blockchain, peerConfiguration: PeerConfiguration, peerEventBusActor: ActorRef, etcPeerManagerActor: ActorRef): Props = Props(new BlockchainHostActor(blockchain, peerConfiguration, peerEventBusActor, etcPeerManagerActor)) }
Example 14
Source File: PersonTest.scala From akka-serialization-test with Apache License 2.0 | 5 votes |
package com.github.dnvriend.domain import akka.actor.{ ActorRef, Props } import akka.pattern.ask import akka.persistence.query.EventEnvelope import akka.stream.scaladsl.{ Sink, Source } import akka.testkit.TestProbe import com.github.dnvriend.TestSpec import com.github.dnvriend.domain.Person._ import com.github.dnvriend.persistence.ProtobufReader import proto.person.Command._ class PersonTest extends TestSpec { import com.github.dnvriend.persistence.ProtobufFormats._ def withPerson(id: String)(f: ActorRef ⇒ TestProbe ⇒ Unit): Unit = { val tp = TestProbe() val ref = system.actorOf(Props(new Person(id))) try f(ref)(tp) finally killActors(ref) } "Person" should "register a name" in { withPerson("p1") { ref ⇒ tp ⇒ Source(List(RegisterNameCommand("dennis", "vriend"))) .mapAsync(1)(ref ? _).runWith(Sink.ignore).futureValue } withPerson("p1") { ref ⇒ tp ⇒ Source(List(RegisterNameCommand("dennis", "vriend"))) .mapAsync(1)(ref ? _).runWith(Sink.ignore).futureValue } // note that the persistence-query does not use the deserializer // so the protobuf must be deserialized inline eventsForPersistenceIdSource("p1").collect { case EventEnvelope(_, _, _, proto: NameRegisteredMessage) ⇒ implicitly[ProtobufReader[NameRegisteredEvent]].read(proto) }.testProbe { tp ⇒ tp.request(Int.MaxValue) tp.expectNext(NameRegisteredEvent("dennis", "vriend")) tp.expectNext(NameRegisteredEvent("dennis", "vriend")) tp.expectComplete() } } it should "update its name and surname" in { withPerson("p2") { ref ⇒ tp ⇒ Source(List(RegisterNameCommand("dennis", "vriend"), ChangeNameCommand("jimi"), ChangeSurnameCommand("hendrix"))) .mapAsync(1)(ref ? _).runWith(Sink.ignore).futureValue } eventsForPersistenceIdSource("p2").collect { case EventEnvelope(_, _, _, proto: NameRegisteredMessage) ⇒ implicitly[ProtobufReader[NameRegisteredEvent]].read(proto) case EventEnvelope(_, _, _, proto: NameChangedMessage) ⇒ implicitly[ProtobufReader[NameChangedEvent]].read(proto) case EventEnvelope(_, _, _, proto: SurnameChangedMessage) ⇒ implicitly[ProtobufReader[SurnameChangedEvent]].read(proto) }.testProbe { tp ⇒ tp.request(Int.MaxValue) tp.expectNext(NameRegisteredEvent("dennis", "vriend")) tp.expectNext(NameChangedEvent("jimi")) tp.expectNext(SurnameChangedEvent("hendrix")) tp.expectComplete() } } }
Example 15
Source File: WriteExchangeTransactionActor.scala From matcher with MIT License | 5 votes |
package com.wavesplatform.dex.actors.tx import akka.actor.{Actor, Props} import com.wavesplatform.dex.db.DbKeys import com.wavesplatform.dex.db.leveldb.{DBExt, RW} import com.wavesplatform.dex.domain.bytes.ByteStr import com.wavesplatform.dex.domain.transaction.ExchangeTransaction import com.wavesplatform.dex.domain.utils.ScorexLogging import com.wavesplatform.dex.model.Events._ import org.iq80.leveldb.DB class WriteExchangeTransactionActor(db: DB) extends Actor with ScorexLogging { import WriteExchangeTransactionActor._ override def receive: Receive = { case ExchangeTransactionCreated(tx) => saveExchangeTx(tx) } private def saveExchangeTx(tx: ExchangeTransaction): Unit = db.readWrite { rw => log.trace(s"Appending ${tx.id()} to orders [${tx.buyOrder.idStr()}, ${tx.sellOrder.idStr()}]") val txKey = DbKeys.exchangeTransaction(tx.id()) if (!rw.has(txKey)) { rw.put(txKey, Some(tx)) appendTxId(rw, tx.buyOrder.id(), tx.id()) appendTxId(rw, tx.sellOrder.id(), tx.id()) } } } object WriteExchangeTransactionActor { def name: String = "WriteExchangeTransactionActor" def props(db: DB): Props = Props(new WriteExchangeTransactionActor(db)) def appendTxId(rw: RW, orderId: ByteStr, txId: ByteStr): Unit = { val key = DbKeys.orderTxIdsSeqNr(orderId) val nextSeqNr = rw.get(key) + 1 rw.put(key, nextSeqNr) rw.put(DbKeys.orderTxId(orderId, nextSeqNr), txId) } }
Example 16
Source File: CreateExchangeTransactionActor.scala From matcher with MIT License | 5 votes |
package com.wavesplatform.dex.actors.tx import akka.actor.{Actor, ActorRef, Props} import com.wavesplatform.dex.actors.tx.CreateExchangeTransactionActor.OrderExecutedObserved import com.wavesplatform.dex.domain.account.Address import com.wavesplatform.dex.domain.utils.ScorexLogging import com.wavesplatform.dex.model.Events.{ExchangeTransactionCreated, OrderExecuted} import com.wavesplatform.dex.model.ExchangeTransactionCreator.CreateTransaction import play.api.libs.json.Json import scala.collection.mutable class CreateExchangeTransactionActor(createTransaction: CreateTransaction, recipients: List[ActorRef]) extends Actor with ScorexLogging { private val pendingEvents = mutable.Set.empty[OrderExecuted] override def preStart(): Unit = context.system.eventStream.subscribe(self, classOf[OrderExecutedObserved]) override def receive: Receive = { case OrderExecutedObserved(sender, event) => val sameOwner = event.counter.order.sender == event.submitted.order.sender log.debug(s"Execution observed at $sender for OrderExecuted(${event.submitted.order.id()}, ${event.counter.order .id()}), amount=${event.executedAmount})${if (sameOwner) " Same owner for both orders" else ""}") if (sameOwner || pendingEvents.contains(event)) { import event.{counter, submitted} createTransaction(event) match { case Right(tx) => log.info(s"Created transaction: $tx") val created = ExchangeTransactionCreated(tx) recipients.foreach(_ ! created) case Left(ex) => log.warn( s"""Can't create tx: $ex |o1: (amount=${submitted.amount}, fee=${submitted.fee}): ${Json.prettyPrint(submitted.order.json())} |o2: (amount=${counter.amount}, fee=${counter.fee}): ${Json.prettyPrint(counter.order.json())}""".stripMargin ) } pendingEvents -= event } else pendingEvents += event } } object CreateExchangeTransactionActor { val name = "create-exchange-tx" case class OrderExecutedObserved(sender: Address, event: OrderExecuted) def props(createTransaction: CreateTransaction, recipients: List[ActorRef]): Props = Props(new CreateExchangeTransactionActor(createTransaction, recipients)) }
Example 17
Source File: OrderBookSnapshotStoreActor.scala From matcher with MIT License | 5 votes |
package com.wavesplatform.dex.actors.orderbook import akka.actor.{Actor, Props} import com.wavesplatform.dex.actors.orderbook.OrderBookSnapshotStoreActor._ import com.wavesplatform.dex.db.OrderBookSnapshotDB import com.wavesplatform.dex.domain.asset.AssetPair import com.wavesplatform.dex.model.OrderBookSnapshot import com.wavesplatform.dex.queue.QueueEventWithMeta.Offset class OrderBookSnapshotStoreActor(db: OrderBookSnapshotDB) extends Actor { override def receive: Receive = { case Message.GetSnapshot(p) => sender() ! Response.GetSnapshot(db.get(p)) case Message.Update(p, offset, newSnapshot) => db.update(p, offset, newSnapshot) sender() ! Response.Updated(offset) case Message.Delete(p) => db.delete(p) } } object OrderBookSnapshotStoreActor { sealed trait Message object Message { case class GetSnapshot(assetPair: AssetPair) extends Message case class Update(assetPair: AssetPair, offset: Offset, newSnapshot: Option[OrderBookSnapshot]) extends Message case class Delete(assetPair: AssetPair) extends Message } sealed trait Response object Response { case class GetSnapshot(result: Option[(Offset, OrderBookSnapshot)]) extends Response case class Updated(offset: Offset) extends Response case class Deleted(assetPair: AssetPair) extends Response } def props(db: OrderBookSnapshotDB): Props = Props(new OrderBookSnapshotStoreActor(db)) }
Example 18
Source File: AskActor.scala From matcher with MIT License | 5 votes |
package com.wavesplatform.dex.actors import akka.actor.{Actor, ActorRef, ActorSystem, Props, Status} import scala.concurrent.duration.FiniteDuration import scala.concurrent.{Future, Promise, TimeoutException} import scala.reflect.ClassTag class AskActor[T](p: Promise[T], timeout: FiniteDuration)(implicit ct: ClassTag[T]) extends Actor { import context.dispatcher private val timeoutCancelable = context.system.scheduler.scheduleOnce(timeout, self, AskActor.timeoutMessage) override val receive: Receive = { case x => // Fix in Scala 2.13 timeoutCancelable.cancel() context.stop(self) x match { case x: T if x.getClass == ct.runtimeClass => p.trySuccess(x) case e: Status.Failure => p.tryFailure(e.cause) case _ => p.tryFailure(new IllegalArgumentException(s"Expected ${ct.runtimeClass.getName}, but got $x")) } } } object AskActor { private val timeoutMessage = { val reason = new TimeoutException("Typed ask is timed out!") reason.setStackTrace(Array.empty) Status.Failure(reason) } def props[T](p: Promise[T], timeout: FiniteDuration)(implicit ct: ClassTag[T]) = Props(new AskActor(p, timeout)) def mk[T](timeout: FiniteDuration)(implicit ct: ClassTag[T], system: ActorSystem): (ActorRef, Future[T]) = { val p = Promise[T]() val ref = system.actorOf(props(p, timeout)) (ref, p.future) } }
Example 19
Source File: WatchDistributedCompletionActor.scala From matcher with MIT License | 5 votes |
package com.wavesplatform.dex.actors import akka.actor.{Actor, ActorRef, Cancellable, Props, Terminated} import com.wavesplatform.dex.domain.utils.ScorexLogging import scala.concurrent.duration.FiniteDuration class WatchDistributedCompletionActor(workers: Set[ActorRef], completionReceiver: ActorRef, startWorkCommand: Any, workCompleted: Any, timeout: FiniteDuration) extends Actor with ScorexLogging { import context.dispatcher if (workers.isEmpty) stop(Cancellable.alreadyCancelled) else workers.foreach { x => context.watch(x) x ! startWorkCommand } override def receive: Receive = state(workers, context.system.scheduler.scheduleOnce(timeout, self, TimedOut)) private def state(rest: Set[ActorRef], timer: Cancellable): Receive = { case `workCompleted` => switchTo(rest - sender(), timer) context.unwatch(sender()) case Terminated(ref) => switchTo(rest - ref, timer) case TimedOut => val workerPairs = workers.iterator.map(_.path.name).mkString(", ") log.error(s"$startWorkCommand is timed out! Workers those didn't respond: $workerPairs") stop(timer) } private def switchTo(updatedRest: Set[ActorRef], timer: Cancellable): Unit = if (updatedRest.isEmpty) stop(timer) else context.become(state(updatedRest, timer)) private def stop(timer: Cancellable): Unit = { timer.cancel() completionReceiver ! workCompleted context.stop(self) } } object WatchDistributedCompletionActor { def props(workers: Set[ActorRef], completionReceiver: ActorRef, startWorkCommand: Any, workCompleted: Any, timeout: FiniteDuration): Props = Props(new WatchDistributedCompletionActor(workers, completionReceiver, startWorkCommand, workCompleted, timeout)) }
Example 20
Source File: AddressDirectoryActor.scala From matcher with MIT License | 5 votes |
package com.wavesplatform.dex.actors.address import akka.actor.{Actor, ActorRef, Props, SupervisorStrategy, Terminated} import com.wavesplatform.dex.db.OrderDB import com.wavesplatform.dex.domain.account.Address import com.wavesplatform.dex.domain.utils.{EitherExt2, ScorexLogging} import com.wavesplatform.dex.history.HistoryRouter._ import com.wavesplatform.dex.model.Events import com.wavesplatform.dex.model.Events.OrderCancelFailed import scala.collection.mutable class AddressDirectoryActor(orderDB: OrderDB, addressActorProps: (Address, Boolean) => Props, historyRouter: Option[ActorRef]) extends Actor with ScorexLogging { import AddressDirectoryActor._ import context._ private var startSchedules: Boolean = false private[this] val children = mutable.AnyRefMap.empty[Address, ActorRef] override def supervisorStrategy: SupervisorStrategy = SupervisorStrategy.stoppingStrategy private def createAddressActor(address: Address): ActorRef = { log.debug(s"Creating address actor for $address") watch(actorOf(addressActorProps(address, startSchedules), address.toString)) } private def forward(address: Address, msg: Any): Unit = (children get address, msg) match { case (None, _: AddressActor.Message.BalanceChanged) => case _ => children getOrElseUpdate (address, createAddressActor(address)) forward msg } override def receive: Receive = { case Envelope(address, cmd) => forward(address, cmd) case e @ Events.OrderAdded(lo, timestamp) => forward(lo.order.sender, e) historyRouter foreach { _ ! SaveOrder(lo, timestamp) } case e: Events.OrderExecuted => import e.{counter, submitted} forward(submitted.order.sender, e) if (counter.order.sender != submitted.order.sender) forward(counter.order.sender, e) historyRouter foreach { _ ! SaveEvent(e) } case e: Events.OrderCanceled => forward(e.acceptedOrder.order.sender, e) historyRouter foreach { _ ! SaveEvent(e) } case e: OrderCancelFailed => orderDB.get(e.id) match { case Some(order) => forward(order.sender.toAddress, e) case None => log.warn(s"The order '${e.id}' not found") } case StartSchedules => if (!startSchedules) { startSchedules = true context.children.foreach(_ ! StartSchedules) } case Terminated(child) => val addressString = child.path.name val address = Address.fromString(addressString).explicitGet() children.remove(address) log.warn(s"Address handler for $addressString terminated") } } object AddressDirectoryActor { case class Envelope(address: Address, cmd: AddressActor.Message) case object StartSchedules }
Example 21
Source File: BatchOrderCancelActor.scala From matcher with MIT License | 5 votes |
package com.wavesplatform.dex.actors.address import akka.actor.{Actor, ActorRef, Cancellable, Props} import com.wavesplatform.dex.actors.TimedOut import com.wavesplatform.dex.actors.address.AddressActor.Command.CancelOrder import com.wavesplatform.dex.actors.address.AddressActor.Event import com.wavesplatform.dex.actors.address.BatchOrderCancelActor.CancelResponse.OrderCancelResult import com.wavesplatform.dex.domain.order.Order import com.wavesplatform.dex.domain.utils.ScorexLogging import com.wavesplatform.dex.error import scala.concurrent.duration.FiniteDuration class BatchOrderCancelActor private ( orderIds: Set[Order.Id], processorActor: ActorRef, clientActor: ActorRef, timeout: FiniteDuration, initResponse: Map[Order.Id, OrderCancelResult] ) extends Actor with ScorexLogging { import BatchOrderCancelActor._ import context.dispatcher orderIds.foreach(processorActor ! CancelOrder(_)) override def receive: Receive = state(orderIds, initResponse, context.system.scheduler.scheduleOnce(timeout, self, TimedOut)) private def state(restOrderIds: Set[Order.Id], response: Map[Order.Id, OrderCancelResult], timer: Cancellable): Receive = { case CancelResponse(id, x) => val updatedRestOrderIds = restOrderIds - id val updatedResponse = response.updated(id, x) if (updatedRestOrderIds.isEmpty) stop(Event.BatchCancelCompleted(updatedResponse), timer) else context.become(state(restOrderIds - id, updatedResponse, timer)) // case Terminated(ref) => // Can't terminate before processorActor, because processorActor is a parent case TimedOut => log.error(s"CancelOrder is timed out for orders: ${restOrderIds.mkString(", ")}") stop(Event.BatchCancelCompleted(response), timer) } private def stop(response: Event.BatchCancelCompleted, timer: Cancellable): Unit = { timer.cancel() clientActor ! response context.stop(self) } } object BatchOrderCancelActor { def props(orderIds: Set[Order.Id], processorActor: ActorRef, clientActor: ActorRef, timeout: FiniteDuration, initResponse: Map[Order.Id, OrderCancelResult] = Map.empty): Props = { require(orderIds.nonEmpty, "orderIds is empty") Props(new BatchOrderCancelActor(orderIds, processorActor, clientActor, timeout, initResponse)) } object CancelResponse { type OrderCancelResult = Either[error.MatcherError, Event.OrderCanceled] def unapply(arg: Any): Option[(Order.Id, OrderCancelResult)] = helper.lift(arg) private val helper: PartialFunction[Any, (Order.Id, OrderCancelResult)] = { case x @ Event.OrderCanceled(id) => (id, Right(x)) case x @ error.OrderNotFound(id) => (id, Left(x)) case x @ error.OrderCanceled(id) => (id, Left(x)) case x @ error.OrderFull(id) => (id, Left(x)) case x @ error.MarketOrderCancel(id) => (id, Left(x)) } } }
Example 22
Source File: OrderHistoryStub.scala From matcher with MIT License | 5 votes |
package com.wavesplatform.dex.model import akka.actor.{ActorRef, ActorSystem, Props} import com.wavesplatform.dex.actors.SpendableBalancesActor import com.wavesplatform.dex.actors.address.{AddressActor, AddressDirectoryActor} import com.wavesplatform.dex.db.{EmptyOrderDB, TestOrderDB} import com.wavesplatform.dex.domain.account.Address import com.wavesplatform.dex.domain.asset.Asset import com.wavesplatform.dex.domain.bytes.ByteStr import com.wavesplatform.dex.error.ErrorFormatterContext import com.wavesplatform.dex.queue.QueueEventWithMeta import com.wavesplatform.dex.time.Time import scala.collection.mutable import scala.concurrent.Future class OrderHistoryStub(system: ActorSystem, time: Time, maxActiveOrders: Int, maxFinalizedOrders: Int) { private implicit val efc: ErrorFormatterContext = (_: Asset) => 8 private val refs = mutable.AnyRefMap.empty[Address, ActorRef] private val orders = mutable.AnyRefMap.empty[ByteStr, Address] private val spendableBalances: (Address, Set[Asset]) => Future[Map[Asset, Long]] = (_, _) => Future.successful(Map.empty[Asset, Long]) private val allAssetsSpendableBalances: Address => Future[Map[Asset, Long]] = _ => Future.successful(Map.empty[Asset, Long]) private val spendableBalanceActor = system.actorOf(Props(new SpendableBalancesActor(spendableBalances, allAssetsSpendableBalances, addressDir))) def createAddressActor(address: Address, enableSchedules: Boolean): Props = { Props( new AddressActor( address, time, new TestOrderDB(maxFinalizedOrders), (_, _) => Future.successful(Right(())), e => Future.successful { Some(QueueEventWithMeta(0, 0, e)) }, enableSchedules, spendableBalanceActor, AddressActor.Settings.default.copy(maxActiveOrders = maxActiveOrders) ) ) } private def actorFor(ao: AcceptedOrder): ActorRef = refs.getOrElseUpdate( ao.order.sender, system.actorOf(createAddressActor(ao.order.sender, enableSchedules = true)) ) lazy val addressDir = system.actorOf( Props( new AddressDirectoryActor( EmptyOrderDB, createAddressActor, None ) ) ) def ref(sender: Address): ActorRef = refs(sender) def ref(orderId: ByteStr): ActorRef = refs(orders(orderId)) def process(event: Events.Event): Unit = event match { case oa: Events.OrderAdded => orders += oa.order.order.id() -> oa.order.order.sender actorFor(oa.order) ! oa case ox: Events.OrderExecuted => orders += ox.submitted.order.id() -> ox.submitted.order.sender orders += ox.counter.order.id() -> ox.counter.order.sender actorFor(ox.counter) ! ox actorFor(ox.submitted) ! ox case oc: Events.OrderCanceled => actorFor(oc.acceptedOrder) ! oc } def processAll(events: Events.Event*): Unit = events.foreach(process) }
Example 23
Source File: TAC.scala From akka-tools with MIT License | 5 votes |
package no.nextgentel.oss.akkatools.example2.trustaccountcreation import java.util.concurrent.TimeUnit import akka.actor.Status.Failure import akka.actor.{ActorSystem, Props, ActorPath} import no.nextgentel.oss.akkatools.aggregate._ import no.nextgentel.oss.akkatools.example2.other.{DoCreateTrustAccount, DoPerformESigning, DoSendEmailToCustomer} import scala.concurrent.duration.FiniteDuration class TACAggregate ( dmSelf:ActorPath, eSigningSystem:ActorPath, emailSystem:ActorPath, trustAccountSystem:ActorPath ) extends GeneralAggregateDMViaEvent[TACEvent, TACState](dmSelf) { override def persistenceIdBase() = TACAggregate.persistenceIdBase // Override this one to set different timeout override def idleTimeout() = FiniteDuration(60, TimeUnit.SECONDS) override var state = TACState.empty() // This is the state of our initial state (empty) // transform command to event override def cmdToEvent = { case c:CreateNewTACCmd => ResultingEvent( RegisteredEvent(c.info) ) .onSuccess{ sender() ! "ok" } .onError{ (e) => sender() ! Failure(new Exception(s"Failed: $e"))} case c:ESigningFailedCmd => ResultingEvent( ESigningFailedEvent() ) case c:ESigningCompletedCmd => ResultingEvent( ESigningCompletedEvent() ) case c:CompletedCmd => ResultingEvent( CreatedEvent(c.trustAccountId) ) case c:DeclinedCmd => ResultingEvent( DeclinedEvent(c.cause) ) } override def generateDMs = { case e:RegisteredEvent => // We must send message to eSigningSystem val msg = DoPerformESigning(dispatchId, e.info.customerNo) ResultingDMs( msg, eSigningSystem) case e:ESigningCompletedEvent => // ESigning is completed, so we should init creation of the TrustAccount val info = state.info.get val msg = DoCreateTrustAccount(dispatchId, info.customerNo, info.trustAccountType) ResultingDMs(msg, trustAccountSystem) case e:DeclinedEvent => // The TrustAccountCreation-process failed - must notify customer val msg = DoSendEmailToCustomer(state.info.get.customerNo, s"Sorry.. TAC-failed: ${e.cause}") ResultingDMs(msg, emailSystem) case e:CreatedEvent => // The TrustAccountCreation-process was success - must notify customer val msg = DoSendEmailToCustomer(state.info.get.customerNo, s"Your TrustAccount '${e.trustAccountId}' has been created!") ResultingDMs(msg, emailSystem) } } object TACAggregate { val persistenceIdBase = "TAC-" def props(dmSelf:ActorPath, eSigningSystem:ActorPath, emailSystem:ActorPath, trustAccountSystem:ActorPath) = Props(new TACAggregate(dmSelf, eSigningSystem, emailSystem ,trustAccountSystem)) } class TACStarter(system:ActorSystem) extends AggregateStarter("tac", system) with AggregateViewStarter { def config(eSigningSystem:ActorPath, emailSystem:ActorPath, trustAccountSystem:ActorPath):TACStarter = { setAggregatePropsCreator{ dmSelf => TACAggregate.props(dmSelf, eSigningSystem, emailSystem, trustAccountSystem) } this } override def createViewProps(aggregateId: String): Props = Props( new GeneralAggregateView[TACEvent, TACState](TACAggregate.persistenceIdBase, aggregateId, TACState.empty(), true)) }
Example 24
Source File: ClusterSingletonHelperTest.scala From akka-tools with MIT License | 5 votes |
package no.nextgentel.oss.akkatools.cluster import akka.actor.{Actor, ActorRef, ActorSystem, Props} import akka.testkit.{TestKit, TestProbe} import com.typesafe.config.ConfigFactory import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll, FunSuiteLike, Matchers} import org.slf4j.LoggerFactory import scala.util.Random object ClusterSingletonHelperTest { val port = 20000 + Random.nextInt(20000) } class ClusterSingletonHelperTest (_system:ActorSystem) extends TestKit(_system) with FunSuiteLike with Matchers with BeforeAndAfterAll with BeforeAndAfter { def this() = this(ActorSystem("test-actor-system", ConfigFactory.parseString( s"""akka.actor.provider = "akka.cluster.ClusterActorRefProvider" |akka.remote.enabled-transports = ["akka.remote.netty.tcp"] |akka.remote.netty.tcp.hostname="localhost" |akka.remote.netty.tcp.port=${ClusterSingletonHelperTest.port} |akka.cluster.seed-nodes = ["akka.tcp://test-actor-system@localhost:${ClusterSingletonHelperTest.port}"] """.stripMargin ).withFallback(ConfigFactory.load("application-test.conf")))) override def afterAll { TestKit.shutdownActorSystem(system) } val log = LoggerFactory.getLogger(getClass) test("start and communicate with cluster-singleton") { val started = TestProbe() val proxy = ClusterSingletonHelper.startClusterSingleton(system, Props(new OurClusterSingleton(started.ref)), "ocl") started.expectMsg("started") val sender = TestProbe() sender.send(proxy, "ping") sender.expectMsg("pong") } } class OurClusterSingleton(started:ActorRef) extends Actor { started ! "started" def receive = { case "ping" => sender ! "pong" } }
Example 25
Source File: GeneralAggregateBaseTest_handleSnapshotMessages.scala From akka-tools with MIT License | 5 votes |
package no.nextgentel.oss.akkatools.aggregate.aggregateTest_usingAggregateStateBase import java.util.UUID import akka.actor.{ActorPath, ActorSystem, Props} import akka.persistence.{DeleteMessagesFailure, DeleteMessagesSuccess, SaveSnapshotFailure, SaveSnapshotSuccess, SnapshotMetadata, SnapshotOffer} import akka.testkit.{TestKit, TestProbe} import com.typesafe.config.ConfigFactory import no.nextgentel.oss.akkatools.aggregate._ import no.nextgentel.oss.akkatools.testing.AggregateTesting import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll, FunSuiteLike, Matchers} import org.slf4j.LoggerFactory override def onSnapshotOffer(offer: SnapshotOffer): Unit = { state = offer.snapshot.asInstanceOf[StringState] } override def acceptSnapshotRequest(req: SaveSnapshotOfCurrentState): Boolean = { if (state == StringState("WAT")) { state = StringState("SAVED") true } else { state = StringState("WAT") //So it works second time false } } override def onSnapshotSuccess(success: SaveSnapshotSuccess): Unit = { state = StringState("SUCCESS_SNAP") } override def onSnapshotFailure(failure: SaveSnapshotFailure): Unit = { state = StringState("FAIL_SNAP") } override def onDeleteMessagesSuccess(success: DeleteMessagesSuccess): Unit = { state = StringState("SUCCESS_MSG") } override def onDeleteMessagesFailure(failure: DeleteMessagesFailure): Unit = { state = StringState("FAIL_MSG") } // Used as prefix/base when constructing the persistenceId to use - the unique ID is extracted runtime from actorPath which is construced by Sharding-coordinator override def persistenceIdBase(): String = "/x/" } case class StringEv(data: String) case class StringState(data:String) extends AggregateStateBase[StringEv, StringState] { override def transitionState(event: StringEv): StateTransition[StringEv, StringState] = StateTransition(StringState(event.data)) }
Example 26
Source File: ActorWithDMSupportTest.scala From akka-tools with MIT License | 5 votes |
package no.nextgentel.oss.akkatools.persistence import java.util.concurrent.TimeUnit import akka.actor.{Props, ActorSystem} import akka.testkit.{TestProbe, TestKit} import com.typesafe.config.ConfigFactory import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll, Matchers, FunSuiteLike} import scala.concurrent.duration.FiniteDuration class ActorWithDMSupportTest(_system:ActorSystem) extends TestKit(_system) with FunSuiteLike with Matchers with BeforeAndAfterAll with BeforeAndAfter { def this() = this(ActorSystem("ActorWithDMSupportTest", ConfigFactory.load("application-test.conf"))) test("success with dm") { val a = system.actorOf(Props(new TestActorWithDMSupport())) val s = TestProbe() // send raw s.send(a, "sendok") s.expectMsg("ok") // send via dm and withNewPayload val dm = DurableMessage(1L, "sendok", s.ref.path) s.send(a, dm) s.expectMsg(dm.withNewPayload("ok")) // send raw - do nothing s.send(a, "silent") // send silent - wait for configm s.send(a, DurableMessage(1L, "silent", s.ref.path)) s.expectMsg( DurableMessageReceived(1,None) ) // send noconfirm - with dm s.send(a, DurableMessage(1L, "no-confirm", s.ref.path)) s.expectNoMessage(FiniteDuration(500, TimeUnit.MILLISECONDS)) // send noconfirm - with dm s.send(a, DurableMessage(1L, "no-confirm-custom", s.ref.path)) s.expectNoMessage(FiniteDuration(500, TimeUnit.MILLISECONDS)) // send noconfirm - without dm s.send(a, "no-confirm") s.expectNoMessage(FiniteDuration(500, TimeUnit.MILLISECONDS)) // send noconfirm - without dm s.send(a, "no-confirm-custom") s.expectNoMessage(FiniteDuration(500, TimeUnit.MILLISECONDS)) } } class TestActorWithDMSupport extends ActorWithDMSupport { // All raw messages or payloads in DMs are passed to this function. override def receivePayload = { case "sendok" => send(sender.path, "ok") case "silent" => Unit case "no-confirm" => throw new LogWarningAndSkipDMConfirmException("something went wrong") case "no-confirm-custom" => throw new CustomLogWarningAndSkipDMConfirm() } } class CustomLogWarningAndSkipDMConfirm extends Exception("") with LogWarningAndSkipDMConfirm
Example 27
Source File: Scheduler.scala From piflow with BSD 2-Clause "Simplified" License | 5 votes |
package cn.piflow.api import akka.actor.{Actor, ActorSystem, Props} import cn.piflow.api.HTTPService.config import cn.piflow.util.H2Util import com.typesafe.akka.extension.quartz.QuartzSchedulerExtension object ScheduleType { val FLOW = "Flow" val GROUP = "Group" } class ExecutionActor(id: String, scheduleType: String) extends Actor { override def receive: Receive = { case json: String => { scheduleType match { case ScheduleType.FLOW => { val (appId,process) = API.startFlow(json) H2Util.addScheduleEntry(id,appId,ScheduleType.FLOW) } case ScheduleType.GROUP => { val groupExecution = API.startGroup(json) H2Util.addScheduleEntry(id,groupExecution.getGroupId(),ScheduleType.GROUP) } } } case _ => println("error type!") } }
Example 28
Source File: Master.scala From asyspark with MIT License | 5 votes |
package org.apache.spark.asyspark.core import java.util.concurrent.TimeUnit import akka.actor.{Actor, ActorLogging, ActorRef, ActorSystem, Address, Props, Terminated} import akka.util.Timeout import com.typesafe.config.Config import com.typesafe.scalalogging.slf4j.StrictLogging import org.apache.spark.asyspark.core.messages.master.{ClientList, RegisterClient, RegisterServer, ServerList} import scala.concurrent.duration._ import scala.concurrent.{ExecutionContext, Future} var clients = Set.empty[ActorRef] override def receive: Receive = { case RegisterServer(server) => log.info(s"Registering server ${server.path.toString}") println("register server") servers += server context.watch(server) sender ! true case RegisterClient(client) => log.info(s"Registering client ${sender.path.toString}") clients += client context.watch(client) sender ! true case ServerList() => log.info(s"Sending current server list to ${sender.path.toString}") sender ! servers.toArray case ClientList() => log.info(s"Sending current client list to ${sender.path.toString}") sender ! clients.toArray case Terminated(actor) => actor match { case server: ActorRef if servers contains server => log.info(s"Removing server ${server.path.toString}") servers -= server case client: ActorRef if clients contains client => log.info(s"Removing client ${client.path.toString}") clients -= client case actor: ActorRef => log.warning(s"Actor ${actor.path.toString} will be terminated for some unknown reason") } } } object Master extends StrictLogging { def run(config: Config): Future[(ActorSystem, ActorRef)] = { logger.debug("Starting master actor system") val system = ActorSystem(config.getString("asyspark.master.system"), config.getConfig("asyspark.master")) logger.debug("Starting master") val master = system.actorOf(Props[Master], config.getString("asyspark.master.name")) implicit val timeout = Timeout(config.getDuration("asyspark.master.startup-timeout", TimeUnit.MILLISECONDS) milliseconds) implicit val ec = ExecutionContext.Implicits.global val address = Address("akka.tcp", config.getString("asyspark.master.system"), config.getString("asyspark.master.host"), config.getString("asyspark.master.port").toInt) system.actorSelection(master.path.toSerializationFormat).resolveOne().map { case actor: ActorRef => logger.debug("Master successfully started") (system, master) } } }
Example 29
Source File: PhilosopherMessages.scala From didactic-computing-machine with GNU Affero General Public License v3.0 | 5 votes |
package DiningPhilosophers import akka.actor.{ Actor, ActorLogging, ActorRef, ActorSystem, Props } object PhilosopherMessages { case object Eat case object Think } object ForkMessages { case object Take case object Put case object ForkBeingUsed case object ForkTaken }
Example 30
Source File: Philosopher.scala From didactic-computing-machine with GNU Affero General Public License v3.0 | 5 votes |
package DiningPhilosophers import DiningPhilosophers.ForkMessages._ import DiningPhilosophers.PhilosopherMessages._ import akka.actor.{Actor, ActorLogging, ActorRef, ActorSystem, Props} import scala.concurrent.duration.FiniteDuration import scala.concurrent.duration.DurationInt import scala.concurrent.duration.FiniteDuration import scala.concurrent.ExecutionContext.Implicits.global class Philosopher(val leftFork: ActorRef, val rightFork: ActorRef) extends Actor with ActorLogging { def name = self.path.name private val eatingTime = 2500.millis private val thinkingTime = 5000.millis private val retryTime = 10.millis def thinkFor(duration: FiniteDuration) = { context.system.scheduler.scheduleOnce(duration, self, Eat) context.become(thinking) } def thinking: Receive = { case Eat => log.info(s"Philosopher ${self.path.name} wants to eat") leftFork ! Take rightFork ! Take context.become(hungry) } def hungry: Receive = { case ForkBeingUsed => handleForkBeingUsed() case ForkTaken => log.info(s"Philosopher ${self.path.name} found one fork to be taken by other philosopher") context.become(waitingForOtherFork) } def waitingForOtherFork: Receive = { case ForkBeingUsed => handleForkBeingUsed() case ForkTaken => log.info(s"Philosopher ${self.path.name} starts to eat") context.system.scheduler.scheduleOnce(eatingTime, self, Think) context.become(eating) } def eating: Receive = { case Think => log.info(s"Philosopher ${self.path.name} starts to think") leftFork ! Put rightFork ! Put thinkFor(thinkingTime) } def handleForkBeingUsed(): Unit = { log.info(s"Philosopher ${self.path.name} found one fork to be in use") leftFork ! Put rightFork ! Put thinkFor(retryTime) } def receive = { case Think => log.info(s"Philosopher ${self.path.name} started thinking") thinkFor(thinkingTime) } }
Example 31
Source File: Fork.scala From didactic-computing-machine with GNU Affero General Public License v3.0 | 5 votes |
package DiningPhilosophers import DiningPhilosophers.ForkMessages._ import akka.actor.{Actor, ActorLogging, ActorRef, ActorSystem, Props} class Fork extends Actor with ActorLogging { def available: Receive = { case Take => log.info(s"Fork ${self.path.name} by ${sender.path.name}") sender ! ForkTaken context.become(inUse(sender)) } def inUse(philosopher: ActorRef): Receive = { case Take => log.info(s"Fork ${self.path.name} already being used by ${philosopher.path.name}") sender ! ForkBeingUsed case Put => log.info(s"Fork ${self.path.name} put down by ${sender.path.name}") sender ! Put context.become(available) } def receive = available }
Example 32
Source File: AkkaQuickstartSpec.scala From didactic-computing-machine with GNU Affero General Public License v3.0 | 5 votes |
//#full-example package com.lightbend.akka.sample import org.scalatest.{ BeforeAndAfterAll, FlatSpecLike, Matchers } import akka.actor.{ Actor, Props, ActorSystem } import akka.testkit.{ ImplicitSender, TestKit, TestActorRef, TestProbe } import scala.concurrent.duration._ import Greeter._ import Printer._ //#test-classes class AkkaQuickstartSpec(_system: ActorSystem) extends TestKit(_system) with Matchers with FlatSpecLike with BeforeAndAfterAll { //#test-classes def this() = this(ActorSystem("AkkaQuickstartSpec")) override def afterAll: Unit = { shutdown(system) } //#first-test //#specification-example "A Greeter Actor" should "pass on a greeting message when instructed to" in { //#specification-example val testProbe = TestProbe() val helloGreetingMessage = "hello" val helloGreeter = system.actorOf(Greeter.props(helloGreetingMessage, testProbe.ref)) val greetPerson = "Akka" helloGreeter ! WhoToGreet(greetPerson) helloGreeter ! Greet testProbe.expectMsg(500 millis, Greeting(s"$helloGreetingMessage, $greetPerson")) } //#first-test } //#full-example
Example 33
Source File: TestActor.scala From AI with Apache License 2.0 | 5 votes |
package com.bigchange.akka.actor import akka.actor.{Actor, ActorSystem, Props} import akka.event.Logging import com.bigchange.akka.message.MapData @scala.throws[Exception](classOf[Exception]) override def preStart(): Unit = { // 初始化Actor代码块 } // props val props1 = Props() val props2 = Props[TestActor] val props3 = Props(new TestActor) val props6 = props1.withDispatcher("my-dispatcher") // create actor val system = ActorSystem("MySystem") val myActor = system.actorOf(Props[TestActor].withDispatcher("my-dispatcher"), name = "myactor2") //使用匿名类创建Actor,在从某个actor中派生新的actor来完成特定的子任务时,可能使用匿名类来包含将要执行的代码会更方便 def receive = { case m: MapData ⇒ context.actorOf(Props(new Actor { def receive = { case Some(msg) ⇒ val replyMsg = doSomeDangerousWork(msg.toString) sender ! replyMsg context.stop(self) } def doSomeDangerousWork(msg: String): String = { "done" } })) forward m } }
Example 34
Source File: ActorRefWithAckTest.scala From intro-to-akka-streams with Apache License 2.0 | 5 votes |
package com.github.dnvriend.streams.sink import akka.actor.{ Actor, ActorRef, Props } import akka.stream.scaladsl.{ Sink, Source } import akka.stream.testkit.TestPublisher import akka.stream.testkit.scaladsl.TestSource import akka.testkit.TestProbe import com.github.dnvriend.streams.TestSpec import scala.concurrent.duration._ import scala.reflect.ClassTag // see: https://github.com/akka/akka/blob/4acc1cca6a27be0ff80f801de3640f91343dce94/akka-stream-tests/src/test/scala/akka/stream/scaladsl/ActorRefBackpressureSinkSpec.scala object ActorRefWithAckTest { final val InitMessage = "start" final val CompleteMessage = "done" final val AckMessage = "ack" class Forwarder(ref: ActorRef) extends Actor { def receive = { case msg @ `InitMessage` ⇒ sender() ! AckMessage ref forward msg case msg @ `CompleteMessage` ⇒ ref forward msg case msg ⇒ sender() ! AckMessage ref forward msg } } } class ActorRefWithAckTest extends TestSpec { import ActorRefWithAckTest._ def createActor[A: ClassTag](testProbeRef: ActorRef): ActorRef = system.actorOf(Props(implicitly[ClassTag[A]].runtimeClass, testProbeRef)) def withForwarder(xs: Int*)(f: TestProbe ⇒ Unit): Unit = { val tp = TestProbe() val ref = createActor[Forwarder](tp.ref) Source(xs.toList).runWith(Sink.actorRefWithAck(ref, InitMessage, AckMessage, CompleteMessage)) try f(tp) finally killActors(ref) } def withTestPublisher[A](f: (TestPublisher.Probe[A], TestProbe, ActorRef) ⇒ Unit): Unit = { val tp = TestProbe() val ref = createActor[Forwarder](tp.ref) val pub: TestPublisher.Probe[A] = TestSource.probe[A].to(Sink.actorRefWithAck(ref, InitMessage, AckMessage, CompleteMessage)).run() try f(pub, tp, ref) finally killActors(ref) } it should "send the elements to the ActorRef" in { // which means that the forwarder actor that acts as a sink // will initially receive an InitMessage // next it will receive each `payload` element, here 1, 2 and 3, // finally the forwarder will receive the CompletedMessage, stating that // the producer completes the stream because there are no more elements (a finite stream) withForwarder(1, 2, 3) { tp ⇒ tp.expectMsg(InitMessage) tp.expectMsg(1) tp.expectMsg(2) tp.expectMsg(3) tp.expectMsg(CompleteMessage) tp.expectNoMsg(100.millis) } } it should "send the elements to the ActorRef manually 1, 2 and 3" in { withTestPublisher[Int] { (pub, tp, _) ⇒ pub.sendNext(1) tp.expectMsg(InitMessage) tp.expectMsg(1) pub.sendNext(2) tp.expectMsg(2) pub.sendNext(3) tp.expectMsg(3) pub.sendComplete() tp.expectMsg(CompleteMessage) tp.expectNoMsg(100.millis) } } it should "cancel stream when actor terminates" in { withTestPublisher[Int] { (pub, tp, ref) ⇒ pub.sendNext(1) tp.expectMsg(InitMessage) tp.expectMsg(1) killActors(ref) pub.expectCancellation() } } }
Example 35
Source File: ActorSubscriberTest.scala From intro-to-akka-streams with Apache License 2.0 | 5 votes |
package com.github.dnvriend.streams.sink import akka.Done import akka.actor.Actor.Receive import akka.actor.{ ActorRef, Props } import akka.event.LoggingReceive import akka.stream.actor.ActorSubscriberMessage.{ OnComplete, OnError, OnNext } import akka.stream.actor.{ ActorSubscriber, OneByOneRequestStrategy, RequestStrategy } import akka.stream.scaladsl.{ Sink, Source } import akka.stream.testkit.TestPublisher import akka.stream.testkit.scaladsl.TestSource import akka.testkit.TestProbe import com.github.dnvriend.streams.TestSpec import com.github.dnvriend.streams.sink.ActorSubscriberTest.TestActorSubscriber import scala.concurrent.Future import scala.reflect.ClassTag object ActorSubscriberTest { final val OnNextMessage = "onNext" final val OnCompleteMessage = "onComplete" final val OnErrorMessage = "onError" class TestActorSubscriber(ref: ActorRef) extends ActorSubscriber { override protected val requestStrategy: RequestStrategy = OneByOneRequestStrategy override def receive: Receive = LoggingReceive { case OnNext(msg) ⇒ ref ! OnNextMessage case OnComplete ⇒ ref ! OnCompleteMessage case OnError(cause) ⇒ ref ! OnErrorMessage } } } //class ActorSubscriberTest extends TestSpec { // def withForwarder(xs: Int*)(f: TestProbe ⇒ Unit): Unit = { // val tp = TestProbe() // val ref = new TestActorSubscriber(tp.ref) // Source(xs.toList).to(Sink.actorSubscriber(Props())).mapMaterializedValue(_ ⇒ Future.successful[Done]).run() // try f(tp) finally killActors(ref) // } // //}
Example 36
Source File: RandomDataProducer.scala From parquet4s with MIT License | 5 votes |
package com.github.mjakubowski84.parquet4s.indefinite import akka.actor.{Actor, ActorRef, Cancellable, Props, Scheduler} import akka.pattern.ask import akka.util.Timeout import scala.concurrent.duration._ import scala.concurrent.{Await, ExecutionContext} import scala.util.Random object RandomDataProducer { private val words = Seq("Example", "how", "to", "setup", "indefinite", "stream", "with", "Parquet", "writer") } trait RandomDataProducer { this: Akka with Logger with Kafka => import RandomDataProducer._ private def nextWord: String = words(Random.nextInt(words.size - 1)) private def action(): Unit = sendKafkaMessage(nextWord) private lazy val scheduler: ActorRef = system.actorOf(FluctuatingSchedulerActor.props(action)) implicit private val stopTimeout: Timeout = new Timeout(FluctuatingSchedulerActor.MaxDelay) def startDataProducer(): Unit = { logger.info("Starting scheduler that sends messages to Kafka...") scheduler ! FluctuatingSchedulerActor.Start } def stopDataProducer(): Unit = { logger.info("Stopping scheduler...") Await.ready(scheduler.ask(FluctuatingSchedulerActor.Stop), Duration.Inf) } } private object FluctuatingSchedulerActor { case object Start case object ScheduleNext case object Stop val MinDelay: FiniteDuration = 1.milli val MaxDelay: FiniteDuration = 500.millis val StartDelay: FiniteDuration = 100.millis trait Direction case object Up extends Direction case object Down extends Direction def props(action: () => Unit): Props = Props(new FluctuatingSchedulerActor(action)) } private class FluctuatingSchedulerActor(action: () => Unit) extends Actor { import FluctuatingSchedulerActor._ implicit def executionContext: ExecutionContext = context.system.dispatcher def scheduler: Scheduler = context.system.scheduler var scheduled: Option[Cancellable] = None override def receive: Receive = { case Start => self ! ScheduleNext context.become(scheduling(StartDelay, direction = Down), discardOld = true) } def scheduling(delay: FiniteDuration, direction: Direction): Receive = { case ScheduleNext => action() val rate = Random.nextFloat / 10.0f val step = (delay.toMillis * rate).millis val (newDirection, newDelay) = direction match { case Up if delay + step < MaxDelay => (Up, delay + step) case Up => (Down, delay - step) case Down if delay - step > MinDelay => (Down, delay - step) case Down => (Up, delay + step) } scheduled = Some(scheduler.scheduleOnce(delay, self, ScheduleNext)) context.become(scheduling(newDelay, newDirection), discardOld = true) case Stop => scheduled.foreach(_.cancel()) context.stop(self) } }
Example 37
Source File: TokenizerWrapper.scala From dbpedia-spotlight-model with Apache License 2.0 | 5 votes |
package org.dbpedia.spotlight.db.concurrent import java.io.IOException import java.util.concurrent.TimeUnit import akka.actor.SupervisorStrategy.Restart import akka.actor.{Actor, ActorSystem, OneForOneStrategy, Props} import akka.pattern.ask import akka.routing.SmallestMailboxRouter import akka.util import org.apache.commons.lang.NotImplementedException import org.dbpedia.spotlight.db.model.{StringTokenizer, TextTokenizer} import org.dbpedia.spotlight.model.{Text, Token} import scala.concurrent.Await class TokenizerWrapper(val tokenizers: Seq[TextTokenizer]) extends TextTokenizer { var requestTimeout = 60 val system = ActorSystem() val workers = tokenizers.map { case tokenizer: TextTokenizer => system.actorOf(Props(new TokenizerActor(tokenizer))) }.seq def size: Int = tokenizers.size val router = system.actorOf(Props[TokenizerActor].withRouter( // This might be a hack SmallestMailboxRouter(scala.collection.immutable.Iterable(workers:_*)).withSupervisorStrategy( OneForOneStrategy(maxNrOfRetries = 10) { case _: IOException => Restart }) ) ) implicit val timeout = util.Timeout(requestTimeout, TimeUnit.SECONDS) override def tokenizeMaybe(text: Text) { val futureResult = router ? TokenizerRequest(text) Await.result(futureResult, timeout.duration) } override def tokenize(text: Text): List[Token] = { tokenizeMaybe(text) text.featureValue[List[Token]]("tokens").get } def tokenizeRaw(text: String): Seq[String] = { throw new NotImplementedException() } def close() { system.shutdown() } def getStringTokenizer: StringTokenizer = tokenizers.head.getStringTokenizer } class TokenizerActor(val tokenizer: TextTokenizer) extends Actor { def receive = { case TokenizerRequest(text) => { try { sender ! tokenizer.tokenizeMaybe(text) } catch { case e: NullPointerException => throw new IOException("Could not tokenize.") } } } } case class TokenizerRequest(text: Text)
Example 38
Source File: AmqpSubscriberPerfSpec.scala From reliable-http-client with Apache License 2.0 | 5 votes |
package rhttpc.transport.amqp import akka.Done import akka.actor.{Actor, ActorSystem, Props} import akka.http.scaladsl.Http import akka.http.scaladsl.model.{HttpRequest, HttpResponse} import akka.pattern._ import akka.stream.ActorMaterializer import akka.testkit.{TestKit, TestProbe} import dispatch.url import org.scalatest.{BeforeAndAfterAll, FlatSpecLike, Ignore} import rhttpc.transport.{Deserializer, InboundQueueData, OutboundQueueData, Serializer} import scala.concurrent.duration._ import scala.concurrent.{Await, Future} import scala.util.{Random, Try} @Ignore class AmqpSubscriberPerfSpec extends TestKit(ActorSystem("AmqpSubscriberPerfSpec")) with FlatSpecLike with BeforeAndAfterAll { import system.dispatcher implicit val materializer = ActorMaterializer() implicit def serializer[Msg] = new Serializer[Msg] { override def serialize(obj: Msg): String = obj.toString } implicit def deserializer[Msg] = new Deserializer[Msg] { override def deserialize(value: String): Try[Msg] = Try(value.asInstanceOf[Msg]) } val queueName = "request" val outboundQueueData = OutboundQueueData(queueName, autoDelete = true, durability = false) val inboundQueueData = InboundQueueData(queueName, batchSize = 10, parallelConsumers = 10, autoDelete = true, durability = false) val count = 100 private val interface = "localhost" private val port = 8081 def handle(request: HttpRequest) = { val delay = 5 + Random.nextInt(10) after(delay.seconds, system.scheduler)(Future.successful(HttpResponse())) } it should "have a good throughput" in { val bound = Await.result( Http().bindAndHandleAsync( handle, interface, port ), 5.seconds ) val http = dispatch.Http() // .configure(_.setMaxConnections(count) // .setExecutorService(Executors.newFixedThreadPool(count))) val connection = Await.result(AmqpConnectionFactory.connect(system), 5 seconds) val transport = AmqpTransport( connection = connection ) val publisher = transport.publisher[String](outboundQueueData) val probe = TestProbe() val actor = system.actorOf(Props(new Actor { override def receive: Receive = { case str: String => http(url(s"http://$interface:$port") OK identity).map(_ => Done).pipeTo(self)(sender()) case Done => probe.ref ! Done sender() ! Done } })) val subscriber = transport.subscriber[String](inboundQueueData, actor) subscriber.start() try { measureMeanThroughput(count) { (1 to count).foreach { _ => publisher.publish("x") } probe.receiveWhile(10 minutes, messages = count) { case a => a } } } finally { Await.result(subscriber.stop(), 5.seconds) connection.close(5 * 1000) Await.result(bound.unbind(), 5.seconds) } } def measureMeanThroughput(count: Int)(consume: => Unit) = { val before = System.currentTimeMillis() consume val msgsPerSecond = count / ((System.currentTimeMillis() - before).toDouble / 1000) println(s"Throughput was: $msgsPerSecond msgs/sec") } override protected def afterAll(): Unit = { shutdown() } }
Example 39
Source File: QueueActor.scala From reliable-http-client with Apache License 2.0 | 5 votes |
package rhttpc.transport.inmem import akka.pattern._ import akka.actor.{Actor, ActorLogging, ActorRef, Props, Stash} import akka.routing.{RoundRobinRoutingLogic, Routee, Router} import akka.util.Timeout import rhttpc.transport.{Message, RejectingMessage} import scala.concurrent.Future import scala.concurrent.duration.FiniteDuration import scala.util.control.NonFatal private class QueueActor(consumeTimeout: FiniteDuration, retryDelay: FiniteDuration) extends Actor with Stash with ActorLogging { import context.dispatcher private var consumers = Map.empty[ActorRef, AskingActorRefRouteeWithSpecifiedMessageType] private var router = Router(RoundRobinRoutingLogic(), collection.immutable.IndexedSeq.empty) override def receive: Receive = { case RegisterConsumer(consumer, fullMessage) => val routee = AskingActorRefRouteeWithSpecifiedMessageType(consumer, consumeTimeout, handleResponse, fullMessage) consumers += consumer -> routee router = router.addRoutee(routee) log.debug(s"${self.path.name}: registered consumer, unstashing") unstashAll() case UnregisterConsumer(consumer) => log.debug(s"${self.path.name}: unregistered consumer") consumers.get(consumer).foreach { routee => consumers -= consumer router = router.removeRoutee(routee) } sender() ! ((): Unit) case msg: Message[_] => if (consumers.isEmpty) { log.debug(s"${self.path.name}: got message when no consumer registered, stashing") stash() implicit val timeout = Timeout(consumeTimeout) sender() ! ((): Unit) } else { router.route(msg, sender()) } } private def handleResponse(future: Future[Any], msg: Message[_]): Unit = future.recover { case ex: AskTimeoutException => log.error(ex, s"${self.path.name}: REJECT [${msg.content.getClass.getName}] because of ask timeout") case ex: Exception with RejectingMessage => log.error(ex, s"${self.path.name}: REJECT [${msg.content.getClass.getName}] because of rejecting failure") case NonFatal(ex) => log.error(ex, s"${self.path.name}: will RETRY [${msg.content.getClass.getName}] after $retryDelay because of failure") context.system.scheduler.scheduleOnce(retryDelay, self, msg) } } object QueueActor { def props(consumeTimeout: FiniteDuration, retryDelay: FiniteDuration): Props = Props( new QueueActor( consumeTimeout = consumeTimeout, retryDelay = retryDelay)) } private[inmem] case class AskingActorRefRouteeWithSpecifiedMessageType(ref: ActorRef, askTimeout: FiniteDuration, handleResponse: (Future[Any], Message[_]) => Unit, fullMessage: Boolean) extends Routee { override def send(message: Any, sender: ActorRef): Unit = { val typedMessage = message.asInstanceOf[Message[_]] val msgToSend = if (fullMessage) message else typedMessage.content handleResponse(ref.ask(msgToSend)(askTimeout, sender), typedMessage) } } private[inmem] case class RegisterConsumer(consumer: ActorRef, fullMessage: Boolean) private[inmem] case class UnregisterConsumer(consumer: ActorRef)
Example 40
Source File: TransportActor.scala From reliable-http-client with Apache License 2.0 | 5 votes |
package rhttpc.transport.inmem import akka.actor.{Actor, Props, Status} import scala.util.control.NonFatal private class TransportActor(queueActorProps: => Props) extends Actor { override def receive: Receive = { case GetOrCreateQueue(name) => try { val ref = context.child(name).getOrElse(context.actorOf(queueActorProps, name)) sender() ! ref } catch { case NonFatal(ex) => sender() ! Status.Failure(ex) } } } object TransportActor { def props(queueActorProps: => Props): Props = Props(new TransportActor(queueActorProps)) } private[inmem] case class GetOrCreateQueue(name: String)
Example 41
Source File: PromiseSubscriptionCommandsListener.scala From reliable-http-client with Apache License 2.0 | 5 votes |
package rhttpc.client.subscription import akka.actor.{Actor, Props, Status} import scala.concurrent.Promise private class PromiseSubscriptionCommandsListener(pubPromise: ReplyFuture, replyPromise: Promise[Any]) (subscriptionManager: SubscriptionManager) extends PublicationListener { import context.dispatcher override def subscriptionPromiseRegistered(sub: SubscriptionOnResponse): Unit = {} override def receive: Actor.Receive = { case RequestPublished(sub) => subscriptionManager.confirmOrRegister(sub, self) context.become(waitForMessage) case RequestAborted(sub, cause) => replyPromise.failure(cause) context.stop(self) } private val waitForMessage: Receive = { case MessageFromSubscription(Status.Failure(ex), sub) => replyPromise.failure(ex) context.stop(self) case MessageFromSubscription(msg, sub) => replyPromise.success(msg) context.stop(self) } pubPromise.pipeTo(this) } private[subscription] object PromiseSubscriptionCommandsListener { def props(pubPromise: ReplyFuture, replyPromise: Promise[Any]) (subscriptionManager: SubscriptionManager): Props = Props(new PromiseSubscriptionCommandsListener(pubPromise, replyPromise)(subscriptionManager)) }
Example 42
Source File: MessageDispatcherActorSpec.scala From reliable-http-client with Apache License 2.0 | 5 votes |
package rhttpc.client.subscription import java.util.UUID import akka.actor.{ActorSystem, Props} import akka.testkit.{ImplicitSender, TestKit, TestProbe} import org.scalatest._ import rhttpc.client.protocol.{Correlated, SuccessExchange} class MessageDispatcherActorSpec extends TestKit(ActorSystem("MessageDispatcherActorSpec")) with ImplicitSender with FlatSpecLike with Matchers { it should "ack after promise -> confirm -> reply -> consumed" in { val actor = system.actorOf(Props[MessageDispatcherActor]) val sub = SubscriptionOnResponse(UUID.randomUUID().toString) actor ! RegisterSubscriptionPromise(sub) val replyMock = TestProbe() actor ! ConfirmOrRegisterSubscription(sub, replyMock.ref) val ackProbe = TestProbe() ackProbe.send(actor, Correlated(SuccessExchange("fooReq", "foo"), sub.correlationId)) replyMock.expectMsg(MessageFromSubscription("foo", sub)) ackProbe.expectNoMsg() replyMock.reply("ok") ackProbe.expectMsg("ok") () } it should "ack after promise -> reply -> confirm -> consumed" in { val actor = system.actorOf(Props[MessageDispatcherActor]) val sub = SubscriptionOnResponse(UUID.randomUUID().toString) actor ! RegisterSubscriptionPromise(sub) val ackProbe = TestProbe() ackProbe.send(actor, Correlated(SuccessExchange("fooReq", "foo"), sub.correlationId)) val replyMock = TestProbe() actor ! ConfirmOrRegisterSubscription(sub, replyMock.ref) replyMock.expectMsg(MessageFromSubscription("foo", sub)) ackProbe.expectNoMsg() replyMock.reply("ok") ackProbe.expectMsg("ok") () } }
Example 43
Source File: ReliableClientWithSubscriptionActorSpec.scala From reliable-http-client with Apache License 2.0 | 5 votes |
package rhttpc.client import akka.actor.{ActorRef, ActorSystem, Props} import akka.testkit._ import org.scalatest.Matchers import rhttpc.client.subscription._ import scala.concurrent.ExecutionContext class ReliableClientWithSubscriptionActorSpec extends TestKit(ActorSystem("ReliableClientWithSubscriptionActorSpec")) with ReliableClientBaseSpec with ImplicitSender with Matchers { it should "got success reply" in { fixture => val replyMock = TestProbe() val actor = system.actorOf(MockSubscriptionActor.props(fixture.client, replyMock.ref)) actor ! SendRequest fixture.transport.publicationPromise.success(Unit) expectMsg(Unit) fixture.transport.replySubscriptionPromise.success("bar") replyMock.expectMsg("bar") } it should "got subscription aborted" in { fixture => val replyMock = TestProbe() val actor = system.actorOf(MockSubscriptionActor.props(fixture.client, replyMock.ref)) actor ! SendRequest fixture.transport.publicationPromise.failure(FailedAcknowledge) expectMsgAllClassOf(classOf[RequestAborted]) } } private class MockSubscriptionActor(client: InOutReliableClient[String], replyMock: ActorRef) (implicit ec: ExecutionContext) extends PublicationListener { override def receive: Receive = { case SendRequest => client.send("foo") pipeTo this } override def subscriptionPromiseRegistered(sub: SubscriptionOnResponse): Unit = { context.become(waitingOnSubscriptionCommand(sender())) } private def waitingOnSubscriptionCommand(originalSender: ActorRef): Receive = { case RequestPublished(sub) => client.subscriptionManager.confirmOrRegister(sub, self) // FIXME originalSender ! Unit context.become(waitingOnReply) case a: RequestAborted => originalSender ! a context.stop(self) } private def waitingOnReply: Receive = { case MessageFromSubscription(reply, sub) => replyMock ! reply context.stop(self) } } object MockSubscriptionActor { def props(client: InOutReliableClient[String], replyMock: ActorRef)(implicit ec: ExecutionContext): Props = Props(new MockSubscriptionActor(client, replyMock)) } case object SendRequest
Example 44
Source File: ModelTrainer.scala From recommendersystem with Apache License 2.0 | 5 votes |
package com.infosupport.recommendedcontent.core import akka.actor.{Props, ActorLogging, Actor} import org.apache.spark.SparkContext import org.apache.spark.mllib.recommendation.{Rating, ALS, MatrixFactorizationModel} import com.datastax.spark.connector._ private def trainModel() = { val table = context.system.settings.config.getString("cassandra.table") val keyspace = context.system.settings.config.getString("cassandra.keyspace") // Retrieve the ratings given by users from the database. // Map them to the rating structure needed by the Alternate Least Squares algorithm. val ratings = sc.cassandraTable(keyspace, table).map(record => Rating(record.get[Int]("user_id"), record.get[Int]("item_id"), record.get[Double]("rating"))) // These settings control how well the predictions are going // to fit the actual observations we loaded from Cassandra. // Modify these to optimize the model! val rank = 10 val iterations = 10 val lambda = 0.01 val model = ALS.train(ratings, rank, iterations, lambda) sender ! TrainingResult(model) context.stop(self) } }
Example 45
Source File: RecommenderSystem.scala From recommendersystem with Apache License 2.0 | 5 votes |
package com.infosupport.recommendedcontent.core import java.io.Serializable import akka.actor.{Props, Actor, ActorLogging} import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.Path import org.apache.spark.SparkContext import org.apache.spark.mllib.recommendation.MatrixFactorizationModel private def generateRecommendations(userId: Int, count: Int) = { log.info(s"Generating ${count} recommendations for user with ID ${userId}") // Generate recommendations based on the machine learning model. // When there's no trained model return an empty list instead. val results = model match { case Some(m) => m.recommendProducts(userId,count) .map(rating => Recommendation(rating.product,rating.rating)) .toList case None => Nil } sender ! Recommendations(results) } }
Example 46
Source File: WindTurbineSimulator.scala From akka_streams_tutorial with MIT License | 5 votes |
package sample.stream_actor import akka.actor.{Actor, ActorLogging, Props} import akka.http.scaladsl.model.StatusCode import sample.stream_actor.WindTurbineSimulator._ case class WindTurbineSimulatorException(id: String) extends RuntimeException object WindTurbineSimulator { def props(id: String, endpoint: String) = Props(new WindTurbineSimulator(id, endpoint)) final case object Upgraded final case object Connected final case object Terminated final case class ConnectionFailure(ex: Throwable) final case class FailedUpgrade(statusCode: StatusCode) } class WindTurbineSimulator(id: String, endpoint: String) extends Actor with ActorLogging { implicit private val system = context.system implicit private val executionContext = system.dispatcher val webSocketClient = WebSocketClient(id, endpoint, self) override def receive: Receive = startup //initial state private def startup: Receive = { case Upgraded => log.info(s"$id : WebSocket upgraded") case FailedUpgrade(statusCode) => log.error(s"$id : Failed to upgrade WebSocket connection: $statusCode") throw WindTurbineSimulatorException(id) case ConnectionFailure(ex) => log.error(s"$id : Failed to establish WebSocket connection: $ex") throw WindTurbineSimulatorException(id) case Connected => log.info(s"$id : WebSocket connected") context.become(running) } private def running: Receive = { case Terminated => log.error(s"$id : WebSocket connection terminated") throw WindTurbineSimulatorException(id) case ConnectionFailure(ex) => log.error(s"$id : ConnectionFailure occurred: $ex") throw WindTurbineSimulatorException(id) } }
Example 47
Source File: WebsocketClientActor.scala From akka_streams_tutorial with MIT License | 5 votes |
package alpakka.tcp_to_websockets.websockets import akka.actor.{Actor, ActorLogging, ActorRef, Props} import akka.http.scaladsl.model.StatusCode import alpakka.tcp_to_websockets.websockets.WebsocketClientActor._ import org.apache.commons.lang3.exception.ExceptionUtils import scala.concurrent.duration._ case class ConnectionException(cause: String) extends RuntimeException object WebsocketClientActor { def props(id: String, endpoint: String, websocketConnectionStatusActor: ActorRef) = Props(new WebsocketClientActor(id, endpoint, websocketConnectionStatusActor)) final case object Upgraded final case object Connected final case object Terminated final case class ConnectionFailure(ex: Throwable) final case class FailedUpgrade(statusCode: StatusCode) final case class SendMessage(msg: String) } class WebsocketClientActor(id: String, endpoint: String, websocketConnectionStatusActor: ActorRef) extends Actor with ActorLogging { implicit private val system = context.system implicit private val executionContext = system.dispatcher val webSocketClient = WebSocketClient(id, endpoint, self) override def receive: Receive = startup //initial state private def startup: Receive = { case Upgraded => log.info(s"Client$id: WebSocket upgraded") case FailedUpgrade(statusCode) => log.error(s"Client$id: failed to upgrade WebSocket connection: $statusCode") websocketConnectionStatusActor ! WebsocketConnectionStatusActor.Terminated throw ConnectionException(statusCode.toString()) case ConnectionFailure(ex) => log.error(s"Client $id: failed to establish WebSocket connection: $ex") websocketConnectionStatusActor ! WebsocketConnectionStatusActor.Terminated throw ConnectionException(ExceptionUtils.getRootCause(ex).getMessage) case Connected => log.info(s"Client $id: WebSocket connected") websocketConnectionStatusActor ! WebsocketConnectionStatusActor.Connected context.become(running) case SendMessage(msg) => log.warning(s"In state startup. Can not receive message: $msg. Resend after 2 seconds") system.scheduler.scheduleOnce(2.seconds, self, SendMessage(msg)) } private def running: Receive = { case SendMessage(msg) => log.info(s"About to send message to WebSocket: $msg") webSocketClient.sendToWebsocket(msg) case Terminated => log.error(s"Client $id: WebSocket connection terminated") websocketConnectionStatusActor ! WebsocketConnectionStatusActor.Terminated throw ConnectionException(s"Client $id: WebSocket connection terminated") case ConnectionFailure(ex) => log.error(s"Client $id: ConnectionFailure occurred: $ex") websocketConnectionStatusActor ! WebsocketConnectionStatusActor.Terminated throw ConnectionException(ExceptionUtils.getRootCause(ex).getMessage) } override def postStop(): Unit = { websocketConnectionStatusActor ! WebsocketConnectionStatusActor.Terminated } }
Example 48
Source File: WebsocketConnectionStatusActor.scala From akka_streams_tutorial with MIT License | 5 votes |
package alpakka.tcp_to_websockets.websockets import akka.actor.{Actor, ActorLogging, Props} import alpakka.tcp_to_websockets.websockets.WebsocketConnectionStatusActor.{Connected, ConnectionStatus, Terminated} object WebsocketConnectionStatusActor { def props(id: String, endpoint: String) = Props(new WebsocketConnectionStatusActor(id, endpoint)) final case object Connected final case object Terminated final case object ConnectionStatus } class WebsocketConnectionStatusActor(id: String, endpoint: String) extends Actor with ActorLogging { implicit private val system = context.system implicit private val executionContext = system.dispatcher var isConnected = false override def receive: Receive = { case Connected => isConnected = true log.info(s"Client $id: connected to: $endpoint") case Terminated => isConnected = false log.info(s"Client $id: terminated from: $endpoint") case ConnectionStatus => sender() ! isConnected } }
Example 49
Source File: WordCountConsumer.scala From akka_streams_tutorial with MIT License | 5 votes |
package alpakka.kafka import akka.Done import akka.actor.{ActorSystem, Props} import akka.kafka.scaladsl.Consumer.DrainingControl import akka.kafka.scaladsl.{Committer, Consumer} import akka.kafka.{CommitterSettings, ConsumerSettings, Subscriptions} import akka.stream.scaladsl.Sink import akka.util.Timeout import alpakka.kafka.TotalFake.{IncrementMessage, IncrementWord} import org.apache.kafka.clients.consumer.ConsumerConfig import org.apache.kafka.common.serialization.{LongDeserializer, StringDeserializer} import scala.concurrent.Future import scala.concurrent.duration._ object WordCountConsumer extends App { implicit val system = ActorSystem("WordCountConsumer") implicit val ec = system.dispatcher val total = system.actorOf(Props[TotalFake], "totalFake") val committerSettings = CommitterSettings(system).withMaxBatch(1) def createConsumerSettings(group: String): ConsumerSettings[String, java.lang.Long] = { ConsumerSettings(system, new StringDeserializer , new LongDeserializer) .withBootstrapServers("localhost:9092") .withGroupId(group) //Define consumer behavior upon starting to read a partition for which it does not have a committed offset or if the committed offset it has is invalid .withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") } def createAndRunConsumerWordCount(id: String) = { Consumer.committableSource(createConsumerSettings("wordcount consumer group"), Subscriptions.topics("wordcount-output")) .mapAsync(1) { msg => //println(s"$id - Offset: ${msg.record.offset()} - Partition: ${msg.record.partition()} Consume msg with key: ${msg.record.key()} and value: ${msg.record.value()}") if (msg.record.key().equalsIgnoreCase("fakeNews")) { //hardcoded because WordCountProducer.fakeNewsKeyword does not work import akka.pattern.ask implicit val askTimeout: Timeout = Timeout(3.seconds) (total ? IncrementWord(msg.record.value.toInt, id)) .mapTo[Done] .map(_ => msg.committableOffset) } else { Future(msg).map(_ => msg.committableOffset) } } .via(Committer.flow(committerSettings)) .toMat(Sink.seq)(DrainingControl.apply) .run() } def createAndRunConsumerMessageCount(id: String) = { Consumer.committableSource(createConsumerSettings("messagecount consumer group"), Subscriptions.topics("messagecount-output")) .mapAsync(1) { msg => //println(s"$id - Offset: ${msg.record.offset()} - Partition: ${msg.record.partition()} Consume msg with key: ${msg.record.key()} and value: ${msg.record.value()}") import akka.pattern.ask implicit val askTimeout: Timeout = Timeout(3.seconds) (total ? IncrementMessage(msg.record.value.toInt, id)) .mapTo[Done] .map(_ => msg.committableOffset) } .via(Committer.flow(committerSettings)) .toMat(Sink.seq)(DrainingControl.apply) .run() } val drainingControlW1 = createAndRunConsumerWordCount("W.1") val drainingControlW2 = createAndRunConsumerWordCount("W.2") val drainingControlM = createAndRunConsumerMessageCount("M") sys.addShutdownHook{ println("Got control-c cmd from shell, about to shutdown...") drainingControlW1.drainAndShutdown() drainingControlW2.drainAndShutdown() drainingControlM.drainAndShutdown() } }
Example 50
Source File: TotalTweetsScheduler.scala From redrock with Apache License 2.0 | 5 votes |
package com.restapi import java.io.{File, FileInputStream} import akka.actor.{ActorRef, Actor, ActorSystem, Props} import akka.io.IO import org.slf4j.LoggerFactory import play.api.libs.json.Json import spray.can.Http import akka.pattern.ask import spray.http.DateTime import scala.concurrent.duration._ import akka.util.Timeout import scala.concurrent.ExecutionContext.Implicits.global import org.apache.commons.codec.digest.DigestUtils import scala.io.Source case object GetTotalTweetsScheduler object CurrentTotalTweets { @volatile var totalTweets: Long = 0 } class ExecuterTotalTweetsES(delay: FiniteDuration, interval: FiniteDuration) extends Actor { context.system.scheduler.schedule(delay, interval) { getTotalTweetsES } val logger = LoggerFactory.getLogger(this.getClass) override def receive: Actor.Receive = { case GetTotalTweetsScheduler => { logger.info(s"Getting Total of Tweets. Begin: ${CurrentTotalTweets.totalTweets}") } case _ => // just ignore any messages } def getTotalTweetsES: Unit = { val elasticsearchRequests = new GetElasticsearchResponse(0, Array[String](), Array[String](), LoadConf.restConf.getString("searchParam.defaulStartDatetime"), LoadConf.restConf.getString("searchParam.defaultEndDatetime"), LoadConf.esConf.getString("decahoseIndexName")) val totalTweetsResponse = Json.parse(elasticsearchRequests.getTotalTweetsESResponse()) logger.info(s"Getting Total of Tweets. Current: ${CurrentTotalTweets.totalTweets}") CurrentTotalTweets.totalTweets = (totalTweetsResponse \ "hits" \ "total").as[Long] logger.info(s"Total users updated. New: ${CurrentTotalTweets.totalTweets}") } }
Example 51
Source File: Application.scala From redrock with Apache License 2.0 | 5 votes |
package com.restapi import akka.actor.{ActorSystem, Props} import akka.io.IO import spray.can.Http import akka.pattern.ask import scala.concurrent.duration._ import akka.util.Timeout import org.slf4j.LoggerFactory; object Application extends App { val logger = LoggerFactory.getLogger(this.getClass) // we need an ActorSystem to host our application in implicit val system = ActorSystem(LoadConf.restConf.getString("actor")) // create and start our service actor val service = system.actorOf(Props[MyServiceActor], LoadConf.restConf.getString("name")) val sessionTimeout = system.actorOf(Props[SessionTimeoutActor]) val sessionTable = system.actorOf(Props(classOf[SimpleSession], sessionTimeout, LoadConf.accessConf.getInt("delay") seconds, LoadConf.accessConf.getInt("timeout-interval") seconds)) sessionTable ! InitSessionTable val sessionLoader = system.actorOf(Props(classOf[LoadSessionActor], sessionTable, LoadConf.accessConf.getInt("delay") seconds, LoadConf.accessConf.getInt("check-interval") seconds)) sessionLoader ! InitFileMd5Sum val schedTotalTweets = system.actorOf(Props(classOf[ExecuterTotalTweetsES], LoadConf.restConf.getInt("totalTweetsScheduler.delay") seconds, LoadConf.restConf.getInt("totalTweetsScheduler.reapeatEvery") seconds)) schedTotalTweets ! GetTotalTweetsScheduler implicit val timeout = Timeout(800.seconds) IO(Http) ? Http.Bind(service, interface = "0.0.0.0", port = LoadConf.restConf.getInt("port")) logger.info( s"""Application: ${LoadConf.globalConf.getString("appName")} running version: ${LoadConf.globalConf.getString("appVersion")}""".stripMargin) }
Example 52
Source File: AmqpManage.scala From gatling-amqp with MIT License | 5 votes |
package io.gatling.amqp.infra import akka.actor.{ActorRef, Props} import io.gatling.amqp.config._ import io.gatling.amqp.data._ import io.gatling.core.result.writer.StatsEngine import scala.collection.JavaConversions._ class AmqpManage(statsEngine: StatsEngine)(implicit amqp: AmqpProtocol) extends AmqpActor { override def receive = { case msg@ DeclareExchange(AmqpExchange(name, tpe, durable, autoDelete, arguments)) => log.info(s"Initializing AMQP exchange $name") interact(msg) { _.exchangeDeclare(name, tpe, durable, autoDelete, arguments) } case msg@ DeclareQueue(AmqpQueue(name, durable, exclusive, autoDelete, arguments)) => log.info(s"Initializing AMQP queue $name") interact(msg) { _.queueDeclare(name, durable, exclusive, autoDelete, arguments) } case msg@ BindQueue(exchange, queue, routingKey, arguments) => log.info(s"Initializing AMQP binding $exchange to $queue") interact(msg) { _.queueBind(queue.name, exchange.name, routingKey, arguments) } } } object AmqpManage { def props(statsEngine : StatsEngine, amqp: AmqpProtocol) = Props(classOf[AmqpManage], statsEngine, amqp) }
Example 53
Source File: AmqpPublisher.scala From gatling-amqp with MIT License | 5 votes |
package io.gatling.amqp.infra import java.util.concurrent.atomic._ import akka.actor.Props import com.rabbitmq.client._ import io.gatling.amqp.config._ import io.gatling.amqp.data._ import io.gatling.amqp.event._ import io.gatling.core.result.writer.StatsEngine import io.gatling.core.session.Session import io.gatling.core.util.TimeHelper.nowMillis import scala.util._ class AmqpPublisher(actorName: String)(implicit amqp: AmqpProtocol) extends AmqpActor { private val nacker = amqp.nacker private val isConfirmMode = amqp.isConfirmMode private def sendEvent(event: AmqpEvent): Unit = nacker ! event // private def sendEvent(event: AmqpEvent): Unit = amqp.event.publish(event) override def preStart(): Unit = { super.preStart() if (isConfirmMode) { channel.confirmSelect() channel.addConfirmListener(new ConfirmListener() { def handleAck (no: Long, multi: Boolean): Unit = { sendEvent(AmqpPublishAcked (actorName, no.toInt, multi, nowMillis)) } def handleNack(no: Long, multi: Boolean): Unit = sendEvent(AmqpPublishNacked(actorName, no.toInt, multi, nowMillis)) }) } } private val localPublishSeqNoCounter = new AtomicInteger(1) private def getNextPublishSeqNo: Int = { if (isConfirmMode) channel.getNextPublishSeqNo.toInt else localPublishSeqNoCounter.getAndIncrement } override def receive = { case AmqpPublishRequest(req, session) if isConfirmMode => publishAsync(req, session) case AmqpPublishRequest(req, session) => publishSync(req, session) } protected def publishSync(req: PublishRequest, session: Session): Unit = { import req._ val startedAt = nowMillis val no: Int = getNextPublishSeqNo val event = AmqpPublishing(actorName, no, nowMillis, req, session) Try { channel.basicPublish(exchange.name, routingKey, props, bytes) } match { case Success(_) => sendEvent(AmqpPublished(actorName, no, nowMillis, event)) case Failure(e) => sendEvent(AmqpPublishFailed(actorName, no, nowMillis, e)) log.error(s"basicPublish($exchange) failed", e) } } protected def publishAsync(req: PublishRequest, session: Session): Unit = { import req._ val no: Int = getNextPublishSeqNo sendEvent(AmqpPublishing(actorName, no, nowMillis, req, session)) try { channel.basicPublish(exchange.name, routingKey, props, bytes) } catch { case e: Exception => sendEvent(AmqpPublishFailed(actorName, no, nowMillis, e)) log.error(s"basicPublish($exchange) failed", e) } } } object AmqpPublisher { def props(name: String, amqp: AmqpProtocol) = Props(classOf[AmqpPublisher], name, amqp) }
Example 54
Source File: HelloAkka.scala From sbt-reactive-app with Apache License 2.0 | 5 votes |
package hello.akka import akka.cluster.Cluster import akka.cluster.ClusterEvent._ import akka.actor.{ Actor, ActorSystem, Props } import akka.discovery._ import com.typesafe.config.ConfigFactory final case class Greet(name: String) class GreeterActor extends Actor { val cluster = Cluster(context.system) override def preStart = { cluster.subscribe(self, initialStateMode = InitialStateAsEvents, classOf[MemberEvent], classOf[UnreachableMember]) } override def postStop = { cluster.unsubscribe(self) } def receive = { case Greet(name) => println(s"Hello, $name") case MemberUp(member) => println(s"Member up: $member") case MemberRemoved(member, previousStatus) => println(s"Member down: $member") case _: MemberEvent => // ignore } } object HelloAkka { def main(args: Array[String]) = { startup() } def startup() = { val system = ActorSystem("ClusterSystem") val discovery = ServiceDiscovery(system).discovery val actor = system.actorOf(Props[GreeterActor], name = "GreeterActor") actor ! Greet("[unnamed]") } }
Example 55
Source File: DemoApp.scala From sbt-reactive-app with Apache License 2.0 | 5 votes |
package foo import akka.actor.{ Actor, ActorLogging, ActorSystem, Props } import akka.cluster.ClusterEvent.ClusterDomainEvent import akka.cluster.{ Cluster, ClusterEvent } import akka.http.scaladsl.Http import akka.http.scaladsl.model._ import akka.http.scaladsl.server.Directives._ import akka.management.AkkaManagement import akka.management.cluster.bootstrap.ClusterBootstrap import akka.stream.ActorMaterializer object DemoApp extends App { implicit val system = ActorSystem("Appka") import system.log implicit val mat = ActorMaterializer() val cluster = Cluster(system) log.info(s"Started [$system], cluster.selfAddress = ${cluster.selfAddress}") log.info("something2") //#start-akka-management AkkaManagement(system).start() //#start-akka-management ClusterBootstrap(system).start() cluster.subscribe( system.actorOf(Props[ClusterWatcher]), ClusterEvent.InitialStateAsEvents, classOf[ClusterDomainEvent]) // add real app routes here val routes = path("hello") { get { complete( HttpEntity( ContentTypes.`text/html(UTF-8)`, "<h1>Hello</h1>")) } } Http().bindAndHandle(routes, "0.0.0.0", 8080) Cluster(system).registerOnMemberUp({ log.info("Cluster member is up!") }) } class ClusterWatcher extends Actor with ActorLogging { val cluster = Cluster(context.system) override def receive = { case msg ⇒ log.info(s"Cluster ${cluster.selfAddress} >>> " + msg) } }
Example 56
Source File: ActorDemo.scala From logging with Apache License 2.0 | 5 votes |
package demo.test import java.net.InetAddress import akka.actor.{Props, Actor, ActorSystem} import com.persist.logging._ import logging_demo.BuildInfo import scala.concurrent.duration._ import scala.language.postfixOps import scala.concurrent.Await object DemoActor { def props() = Props(new DemoActor()) } class DemoActor() extends Actor with ActorLogging { println(this.getClass.getSimpleName) def receive = { case "foo" => log.info("Saw foo") case "done" => context.stop(self) case x: Any => log.error(Map("@msg" -> "Unexpected actor message", "message" -> x.toString)) } } case class ActorDemo(system: ActorSystem) { def demo(): Unit = { val a = system.actorOf(DemoActor.props(), name = "Demo") a ! "foo" a ! "bar" a ! "done" } } object ActorDemo { def main(args: Array[String]) { val system = ActorSystem("test") val host = InetAddress.getLocalHost.getHostName val loggingSystem = LoggingSystem(system, BuildInfo.name, BuildInfo.version, host) val act = ActorDemo(system) act.demo() Await.result(loggingSystem.stop, 30 seconds) Await.result(system.terminate(), 20 seconds) } }
Example 57
Source File: OtherApis.scala From logging with Apache License 2.0 | 5 votes |
package demo.test import java.net.InetAddress import akka.actor.{Props, Actor, ActorSystem} import com.persist.logging._ import logging_demo.BuildInfo import scala.concurrent.duration._ import scala.language.postfixOps import scala.concurrent.Await import org.slf4j.LoggerFactory case class Slf4jDemo() { val slf4jlog = LoggerFactory.getLogger(classOf[Slf4jDemo]) def demo(): Unit = { slf4jlog.warn("slf4j") } } object AkkaActor { def props() = Props(new AkkaActor()) } class AkkaActor() extends Actor with akka.actor.ActorLogging { def receive = { case "foo" => log.warning("Saw foo") case "done" => context.stop(self) case x: Any => log.error(s"Unexpected actor message: ${x}") } } case class AkkaDemo(system: ActorSystem) { def demo(): Unit = { val a = system.actorOf(AkkaActor.props(), name="Demo") a ! "foo" a ! "bar" a ! "done" } } object OtherApis { def main(args: Array[String]) { val system = ActorSystem("test") val host = InetAddress.getLocalHost.getHostName val loggingSystem = LoggingSystem(system, BuildInfo.name, BuildInfo.version, host) val slf = Slf4jDemo() slf.demo() val act = AkkaDemo(system) act.demo() Await.result(loggingSystem.stop, 30 seconds) Await.result(system.terminate(), 20 seconds) } }
Example 58
Source File: Persistence.scala From Principles-of-Reactive-Programming with GNU General Public License v3.0 | 5 votes |
package kvstore import akka.actor.{Props, Actor} import scala.util.Random import java.util.concurrent.atomic.AtomicInteger object Persistence { case class Persist(key: String, valueOption: Option[String], id: Long) case class Persisted(key: String, id: Long) class PersistenceException extends Exception("Persistence failure") def props(flaky: Boolean): Props = Props(classOf[Persistence], flaky) } class Persistence(flaky: Boolean) extends Actor { import Persistence._ def receive = { case Persist(key, _, id) => if (!flaky || Random.nextBoolean()) sender ! Persisted(key, id) else throw new PersistenceException } }
Example 59
Source File: Replicator.scala From Principles-of-Reactive-Programming with GNU General Public License v3.0 | 5 votes |
package kvstore import akka.actor.Props import akka.actor.Actor import akka.actor.ActorRef import scala.concurrent.duration._ object Replicator { case class Replicate(key: String, valueOption: Option[String], id: Long) case class Replicated(key: String, id: Long) case class Snapshot(key: String, valueOption: Option[String], seq: Long) case class SnapshotAck(key: String, seq: Long) def props(replica: ActorRef): Props = Props(new Replicator(replica)) } class Replicator(val replica: ActorRef) extends Actor { import Replicator._ import Replica._ import context.dispatcher def receive: Receive = { case _ => } }
Example 60
Source File: Replica.scala From Principles-of-Reactive-Programming with GNU General Public License v3.0 | 5 votes |
package kvstore import akka.actor.{ OneForOneStrategy, Props, ActorRef, Actor } import kvstore.Arbiter._ import scala.collection.immutable.Queue import akka.actor.SupervisorStrategy.Restart import scala.annotation.tailrec import akka.pattern.{ ask, pipe } import akka.actor.Terminated import scala.concurrent.duration._ import akka.actor.PoisonPill import akka.actor.OneForOneStrategy import akka.actor.SupervisorStrategy import akka.util.Timeout object Replica { sealed trait Operation { def key: String def id: Long } case class Insert(key: String, value: String, id: Long) extends Operation case class Remove(key: String, id: Long) extends Operation case class Get(key: String, id: Long) extends Operation sealed trait OperationReply case class OperationAck(id: Long) extends OperationReply case class OperationFailed(id: Long) extends OperationReply case class GetResult(key: String, valueOption: Option[String], id: Long) extends OperationReply def props(arbiter: ActorRef, persistenceProps: Props): Props = Props(new Replica(arbiter, persistenceProps)) } class Replica(val arbiter: ActorRef, persistenceProps: Props) extends Actor { import Replica._ import Replicator._ import Persistence._ import context.dispatcher val replica: Receive = { case _ => } }
Example 61
Source File: Tools.scala From Principles-of-Reactive-Programming with GNU General Public License v3.0 | 5 votes |
package kvstore import akka.actor.ActorSystem import scala.concurrent.duration.FiniteDuration import akka.testkit.TestProbe import akka.actor.{ ActorRef, Actor } import org.scalatest.Matchers import org.scalatest.FunSuiteLike import akka.actor.Props import akka.testkit.TestKit import akka.testkit.ImplicitSender import scala.concurrent.duration._ object Tools { class TestRefWrappingActor(val probe: TestProbe) extends Actor { def receive = { case msg => probe.ref forward msg } } } trait Tools { this: TestKit with FunSuiteLike with Matchers with ImplicitSender => import Arbiter._ import Tools._ def probeProps(probe: TestProbe): Props = Props(classOf[TestRefWrappingActor], probe) class Session(val probe: TestProbe, val replica: ActorRef) { import Replica._ @volatile private var seq = 0L private def nextSeq: Long = { val next = seq seq += 1 next } @volatile private var referenceMap = Map.empty[String, String] def waitAck(s: Long): Unit = probe.expectMsg(OperationAck(s)) def waitFailed(s: Long): Unit = probe.expectMsg(OperationFailed(s)) def set(key: String, value: String): Long = { referenceMap += key -> value val s = nextSeq probe.send(replica, Insert(key, value, s)) s } def setAcked(key: String, value: String): Unit = waitAck(set(key, value)) def remove(key: String): Long = { referenceMap -= key val s = nextSeq probe.send(replica, Remove(key, s)) s } def removeAcked(key: String): Unit = waitAck(remove(key)) def getAndVerify(key: String): Unit = { val s = nextSeq probe.send(replica, Get(key, s)) probe.expectMsg(GetResult(key, referenceMap.get(key), s)) } def get(key: String): Option[String] = { val s = nextSeq probe.send(replica, Get(key, s)) probe.expectMsgType[GetResult].valueOption } def nothingHappens(duration: FiniteDuration): Unit = probe.expectNoMsg(duration) } def session(replica: ActorRef)(implicit system: ActorSystem) = new Session(TestProbe(), replica) }
Example 62
Source File: IntegrationSpec.scala From Principles-of-Reactive-Programming with GNU General Public License v3.0 | 5 votes |
package kvstore import akka.actor.{ Actor, Props, ActorRef, ActorSystem } import akka.testkit.{ TestProbe, ImplicitSender, TestKit } import org.scalatest.{ BeforeAndAfterAll, FlatSpec, Matchers } import scala.concurrent.duration._ import org.scalatest.FunSuiteLike import org.scalactic.ConversionCheckedTripleEquals class IntegrationSpec(_system: ActorSystem) extends TestKit(_system) with FunSuiteLike with Matchers with BeforeAndAfterAll with ConversionCheckedTripleEquals with ImplicitSender with Tools { import Replica._ import Replicator._ import Arbiter._ def this() = this(ActorSystem("ReplicatorSpec")) override def afterAll: Unit = system.shutdown() }
Example 63
Source File: SidechainNodeViewSynchronizer.scala From Sidechains-SDK with MIT License | 5 votes |
package com.horizen.network import akka.actor.{ActorRef, ActorRefFactory, Props} import com.horizen._ import com.horizen.block.SidechainBlock import com.horizen.validation.{BlockInFutureException, InconsistentDataException} import scorex.core.network.NodeViewSynchronizer import scorex.core.network.NodeViewSynchronizer.ReceivableMessages.SyntacticallyFailedModification import scorex.core.serialization.ScorexSerializer import scorex.core.settings.NetworkSettings import scorex.core.utils.NetworkTimeProvider import scorex.core.{ModifierTypeId, NodeViewModifier} import scala.concurrent.ExecutionContext class SidechainNodeViewSynchronizer(networkControllerRef: ActorRef, viewHolderRef: ActorRef, syncInfoSpec: SidechainSyncInfoMessageSpec.type, networkSettings: NetworkSettings, timeProvider: NetworkTimeProvider, modifierSerializers: Map[ModifierTypeId, ScorexSerializer[_ <: NodeViewModifier]])(implicit ec: ExecutionContext) extends NodeViewSynchronizer[SidechainTypes#SCBT, SidechainSyncInfo, SidechainSyncInfoMessageSpec.type, SidechainBlock, SidechainHistory, SidechainMemoryPool](networkControllerRef, viewHolderRef, syncInfoSpec, networkSettings, timeProvider, modifierSerializers){ override protected val deliveryTracker = new SidechainDeliveryTracker(context.system, deliveryTimeout, maxDeliveryChecks, self) private val onSyntacticallyFailedModifier: Receive = { case SyntacticallyFailedModification(mod, exception) => exception match { case _: BlockInFutureException => // When next time NodeViewSynchronizer.processInv will be emitted for mod.id it will be processed again. // So no ban for mod.id deliveryTracker.setUnknown(mod.id) case _: InconsistentDataException => // Try to ban the sender only (in case of modifier from remote) val peerOpt = deliveryTracker.peerInfo(mod.id) deliveryTracker.setUnknown(mod.id) peerOpt.foreach(penalizeMisbehavingPeer) case _ => // InvalidBlockException, InvalidSidechainBlockHeaderException and all other exceptions // Ban both mod.id and peer deliveryTracker.setInvalid(mod.id).foreach(penalizeMisbehavingPeer) } } override protected def viewHolderEvents: Receive = onSyntacticallyFailedModifier orElse super.viewHolderEvents } object SidechainNodeViewSynchronizer { def props(networkControllerRef: ActorRef, viewHolderRef: ActorRef, syncInfoSpec: SidechainSyncInfoMessageSpec.type, networkSettings: NetworkSettings, timeProvider: NetworkTimeProvider, modifierSerializers: Map[ModifierTypeId, ScorexSerializer[_ <: NodeViewModifier]]) (implicit ex: ExecutionContext): Props = Props(new SidechainNodeViewSynchronizer(networkControllerRef, viewHolderRef, syncInfoSpec, networkSettings, timeProvider, modifierSerializers)) def apply(networkControllerRef: ActorRef, viewHolderRef: ActorRef, syncInfoSpec: SidechainSyncInfoMessageSpec.type, networkSettings: NetworkSettings, timeProvider: NetworkTimeProvider, modifierSerializers: Map[ModifierTypeId, ScorexSerializer[_ <: NodeViewModifier]]) (implicit context: ActorRefFactory, ex: ExecutionContext): ActorRef = context.actorOf(props(networkControllerRef, viewHolderRef, syncInfoSpec, networkSettings, timeProvider, modifierSerializers)) def apply(networkControllerRef: ActorRef, viewHolderRef: ActorRef, syncInfoSpec: SidechainSyncInfoMessageSpec.type, networkSettings: NetworkSettings, timeProvider: NetworkTimeProvider, modifierSerializers: Map[ModifierTypeId, ScorexSerializer[_ <: NodeViewModifier]], name: String) (implicit context: ActorRefFactory, ex: ExecutionContext): ActorRef = context.actorOf(props(networkControllerRef, viewHolderRef, syncInfoSpec, networkSettings, timeProvider, modifierSerializers), name) }
Example 64
Source File: SidechainTransactionActor.scala From Sidechains-SDK with MIT License | 5 votes |
package com.horizen.api.http import akka.actor.{Actor, ActorRef, ActorSystem, Props} import com.horizen.SidechainTypes import com.horizen.api.http.SidechainTransactionActor.ReceivableMessages.BroadcastTransaction import scorex.core.NodeViewHolder.ReceivableMessages.LocallyGeneratedTransaction import scorex.core.network.NodeViewSynchronizer.ReceivableMessages.{FailedTransaction, SuccessfulTransaction} import scorex.util.{ModifierId, ScorexLogging} import scala.collection.concurrent.TrieMap import scala.concurrent.{ExecutionContext, Promise} class SidechainTransactionActor[T <: SidechainTypes#SCBT](sidechainNodeViewHolderRef: ActorRef)(implicit ec: ExecutionContext) extends Actor with ScorexLogging { private var transactionMap : TrieMap[String, Promise[ModifierId]] = TrieMap() override def preStart(): Unit = { context.system.eventStream.subscribe(self, classOf[SuccessfulTransaction[T]]) context.system.eventStream.subscribe(self, classOf[FailedTransaction]) } protected def broadcastTransaction: Receive = { case BroadcastTransaction(transaction) => val promise = Promise[ModifierId] val future = promise.future transactionMap(transaction.id) = promise sender() ! future sidechainNodeViewHolderRef ! LocallyGeneratedTransaction[SidechainTypes#SCBT](transaction) } protected def sidechainNodeViewHolderEvents: Receive = { case SuccessfulTransaction(transaction) => transactionMap.remove(transaction.id) match { case Some(promise) => promise.success(transaction.id) case None => } case FailedTransaction(transactionId, throwable, _) => transactionMap.remove(transactionId) match { case Some(promise) => promise.failure(throwable) case None => } } override def receive: Receive = { broadcastTransaction orElse sidechainNodeViewHolderEvents orElse { case message: Any => log.error("SidechainTransactionActor received strange message: " + message) } } } object SidechainTransactionActor { object ReceivableMessages { case class BroadcastTransaction[T <: SidechainTypes#SCBT](transaction: T) } } object SidechainTransactionActorRef { def props(sidechainNodeViewHolderRef: ActorRef) (implicit ec: ExecutionContext): Props = Props(new SidechainTransactionActor(sidechainNodeViewHolderRef)) def apply(sidechainNodeViewHolderRef: ActorRef) (implicit system: ActorSystem, ec: ExecutionContext): ActorRef = system.actorOf(props(sidechainNodeViewHolderRef)) }
Example 65
Source File: MockedSidechainNodeViewHolderFixture.scala From Sidechains-SDK with MIT License | 5 votes |
package com.horizen.fixtures import akka.actor.{ActorRef, ActorSystem, Props} import com.horizen._ import org.mockito.Mockito import org.scalatest.mockito.MockitoSugar import scorex.core.settings.{NetworkSettings, ScorexSettings} class MockedSidechainNodeViewHolder(sidechainSettings: SidechainSettings, history: SidechainHistory, state: SidechainState, wallet: SidechainWallet, mempool: SidechainMemoryPool) extends SidechainNodeViewHolder(sidechainSettings, null, null, null, null, null, null, null, null, null, null, null, null) { override def restoreState(): Option[(HIS, MS, VL, MP)] = { Some(history, state, wallet, mempool) } } trait MockedSidechainNodeViewHolderFixture extends MockitoSugar { def getMockedSidechainNodeViewHolderRef(history: SidechainHistory, state: SidechainState, wallet: SidechainWallet, mempool: SidechainMemoryPool) (implicit actorSystem: ActorSystem): ActorRef = { val sidechainSettings = mock[SidechainSettings] val scorexSettings = mock[ScorexSettings] val networkSettings = mock[NetworkSettings] Mockito.when(sidechainSettings.scorexSettings) .thenAnswer(answer => { scorexSettings }) Mockito.when(scorexSettings.network) .thenAnswer(answer => { networkSettings }) Mockito.when(networkSettings.maxModifiersCacheSize) .thenAnswer(answer => { 10 }) actorSystem.actorOf(Props(new MockedSidechainNodeViewHolder(sidechainSettings, history, state, wallet, mempool))) } }
Example 66
Source File: ProcessStep.scala From process with Apache License 2.0 | 5 votes |
package processframework import scala.concurrent.duration.Duration import scala.concurrent.{ ExecutionContext, Future, Promise } import scala.reflect.ClassTag import akka.actor.{ Actor, ActorContext, ActorRef, Props } import akka.util.Timeout trait ProcessStep[S] { implicit def context: ActorContext private[processframework] val promise: Promise[Unit] = Promise[Unit]() type Execution = S ⇒ Unit type UpdateFunction = PartialFunction[Process.Event, S ⇒ S] type CommandToEvent = PartialFunction[Any, Process.Event] def execute()(implicit process: ActorRef): Execution def receiveCommand: CommandToEvent def updateState: UpdateFunction def retryInterval: Duration = Duration.Inf final def isCompleted = promise.isCompleted final def markDone(): Unit = promise.trySuccess(()) final def markDone(newState: S): S = { markDone() newState } private[processframework] def abort(): Unit = promise.tryFailure(new RuntimeException("Process aborted")) final def onComplete(completeFn: ((ActorContext, S)) ⇒ Unit)(implicit executionContext: ExecutionContext, process: ActorRef): Unit = promise.future.foreach { _ ⇒ process ! PersistentProcess.Perform(completeFn) } final def onCompleteAsync(completeFn: ⇒ Unit)(implicit executionContext: ExecutionContext): Unit = promise.future.foreach(_ ⇒ completeFn) final def ~>(next: ProcessStep[S]*)(implicit context: ActorContext): ProcessStep[S] = new Chain(this, next: _*) private[processframework] def run()(implicit process: ActorRef, executionContext: ExecutionContext, classTag: ClassTag[S]): Future[Unit] = runImpl private val innerActor = context.actorOf(Props(new Actor { def receive = { case msg if receiveCommand.isDefinedAt(msg) ⇒ val event = receiveCommand(msg) context.parent ! event } })) private[processframework] def handleUpdateState: UpdateFunction = if (isCompleted) PartialFunction.empty[Process.Event, S ⇒ S] else updateState private[processframework] def handleReceiveCommand: CommandToEvent = if (isCompleted) PartialFunction.empty[Any, Process.Event] else receiveCommand private[processframework] def executeWithPossibleRetry()(implicit process: ActorRef): Execution = { state ⇒ implicit val _ = context.dispatcher if (retryInterval.isFinite()) context.system.scheduler.scheduleOnce(Duration.fromNanos(retryInterval.toNanos)) { if (!isCompleted) executeWithPossibleRetry()(process)(state) } execute()(process)(state) } private[processframework] def runImpl()(implicit process: ActorRef, executionContext: ExecutionContext, classTag: ClassTag[S]): Future[Unit] = { import akka.pattern.ask import scala.concurrent.duration._ implicit val timeout: Timeout = 5 seconds if (!isCompleted) (process ? Process.GetState).mapTo[S].foreach(executeWithPossibleRetry()(innerActor)) promise.future } }
Example 67
Source File: ProcessTest.scala From process with Apache License 2.0 | 5 votes |
package processframework import java.lang import akka.actor.{ ActorContext, ActorRef, ActorSystem, Props } import akka.testkit.{ ImplicitSender, TestKit, TestProbe } import org.scalatest._ import org.scalatest.concurrent.Eventually import scala.concurrent.duration._ object ProcessTest { case object Start case object Response case class Command(i: Int) case object Completed extends Process.Event class MockStep(service: ActorRef, retryInt: Duration)(implicit val context: ActorContext) extends ProcessStep[Int] { override val retryInterval = retryInt def execute()(implicit process: akka.actor.ActorRef) = { state ⇒ service ! Command(state) } def receiveCommand = { case Response ⇒ Completed } def updateState = { case Completed ⇒ state ⇒ markDone(state + 1) } } class Process1(service: ActorRef, retryInterval: Duration) extends Process[Int] { import context.dispatcher var state = 0 val process = new MockStep(service, retryInterval) def receive = { case Start ⇒ process.run() } } } class ProcessTest extends BaseSpec { import ProcessTest._ "Process" should { "have a happy flow" in { val service = TestProbe() val process = system.actorOf(Props(new Process1(service.ref, Duration.Inf)), "Process1") process ! processframework.Process.GetState expectMsg(0) process ! Start service.expectMsg(Command(0)) service.reply(Response) eventually { process ! processframework.Process.GetState expectMsg(1) } process ! Start expectNoMsg(250 millis) process ! processframework.Process.GetState expectMsg(1) } "does not retry by default" in { val service = TestProbe() val process = system.actorOf(Props(new Process1(service.ref, Duration.Inf)), "Process2") process ! processframework.Process.GetState expectMsg(0) process ! Start service.expectMsg(Command(0)) expectNoMsg() } "retries execution until succeeded" in { val service = TestProbe() val process = system.actorOf(Props(new Process1(service.ref, 150 millis)), "Process3") process ! processframework.Process.GetState expectMsg(0) process ! Start service.expectMsg(Command(0)) service.expectMsg(1000.millis, Command(0)) service.expectMsg(1000.millis, Command(0)) service.reply(Response) expectNoMsg() } } }
Example 68
Source File: ProcessStepTestSupport.scala From process with Apache License 2.0 | 5 votes |
package processframework import akka.pattern.ask import akka.actor.{ ActorRef, ActorContext, Actor, Props } import akka.util.Timeout import scala.concurrent.duration._ import scala.concurrent.Await import scala.reflect.ClassTag import akka.testkit.{ TestProbe, TestKit } import org.scalatest.BeforeAndAfterEach object ProcessStepTestSupport { case object GetStep case object ACommand case object AnEvent extends Process.Event } trait ProcessStepTestSupport[S, PS <: ProcessStep[S]] { this: TestKit with BeforeAndAfterEach ⇒ implicit val timeout: Timeout = 1 second var testProbe: TestProbe = null var processActor: ActorRef = null override protected def beforeEach(): Unit = { testProbe = createTestProbe() processActor = createProcessActor() } def createTestProbe(): TestProbe def createProcessStep(executeProbe: TestProbe)(implicit context: ActorContext): PS def createProcessActor() = system.actorOf(Props(new Actor { val step = createProcessStep(testProbe) def receive = { case msg if sender() == step ⇒ testActor forward msg case ProcessStepTestSupport.GetStep ⇒ sender() ! step case e: Process.Event ⇒ testActor ! e } })) def processStep()(implicit classTag: ClassTag[PS]): PS = Await.result[PS]((processActor ? ProcessStepTestSupport.GetStep).mapTo[PS], 2 seconds) }
Example 69
Source File: LocalApplicationMaster.scala From DataXServer with Apache License 2.0 | 5 votes |
package org.tianlangstudio.data.hamal.yarn.local import java.util.UUID import akka.actor.{Actor, ActorLogging, Props} import org.slf4j.LoggerFactory import org.tianlangstudio.data.hamal.core.{Constants, HamalConf} import org.tianlangstudio.data.hamal.server.thrift.ThriftServerApp import org.tianlangstudio.data.hamal.yarn.{ApplyExecutor, TaskScheduler} import org.tianlangstudio.data.hamal.yarn.thrift.AkkaThriftTaskHandler import org.tianlangstudio.data.hamal.yarn.util.AkkaUtils import org.tianlangstudio.data.hamal.core.HamalConf /** * Created by zhuhq on 2016/5/3. * 在本机申请运行资源,多进程方式批量运行任务 */ object LocalApplicationMaster extends App{ val logging = org.slf4j.LoggerFactory.getLogger(classOf[LocalApplicationMaster]) val dataxConf = new HamalConf() logging.info("create master actor system begin"); val schedulerHost = dataxConf.getString(Constants.DATAX_MASTER_HOST,"127.0.0.1") val (schedulerSystem,schedulerPort) = AkkaUtils.createActorSystem(Constants.AKKA_JOB_SCHEDULER_SYSTEM,schedulerHost,0,dataxConf) logging.info(s"create master actor system end on port $schedulerPort"); val amActor = schedulerSystem.actorOf(Props(classOf[LocalApplicationMaster],dataxConf),Constants.AKKA_AM_ACTOR) val taskSchedulerActor = schedulerSystem.actorOf(Props(classOf[TaskScheduler],dataxConf,amActor),Constants.AKKA_JOB_SCHEDULER_ACTOR) taskSchedulerActor ! "start taskSchedulerActor" logging.info(s"start thrift server begin") val thriftPort = dataxConf.getInt(Constants.THRIFT_SERVER_PORT,9777) val thriftHost = dataxConf.getString(Constants.THRIFT_SERVER_HOST,"127.0.0.1") val thriftConcurrence = dataxConf.getInt(Constants.THRIFT_SERVER_CONCURRENCE,8) val thriftServerHandler = new AkkaThriftTaskHandler(taskSchedulerActor) logging.info(s"start thrift server on $thriftHost:$thriftPort") ThriftServerApp.start(thriftHost,thriftPort,thriftServerHandler) } class LocalApplicationMaster(dataxConf: HamalConf) extends Actor with ActorLogging{ private val logger = LoggerFactory.getLogger(getClass) val runEnv = dataxConf.getString(Constants.RUN_ENV, Constants.RUN_ENV_PRODUCTION).toLowerCase() logger.info("run env:{}", runEnv) val containerCmd = if(Constants.RUN_ENV_DEVELOPMENT.equals(runEnv)) { s""" |java ${System.getProperty("java.class.path")} | -Ddatax.home=${dataxConf.getString(Constants.DATAX_HOME)} -Xms512M -Xmx1024M | -XX:PermSize=128M -XX:MaxPermSize=512M com.tianlangstudio.data.datax.Executor """.stripMargin }else { dataxConf.getString(Constants.DATAX_EXECUTOR_CMD, "./startLocalExecutor.sh") } override def receive: Receive = { case msg:String => log.info(s"${self.path} receive msg: $msg") case ApplyExecutor(num) => applyExecutor(num) } private def applyExecutor(num:Int): Unit = { log.info(s"apply executor num $num"); for(i <- 0 until num) { sys.process.stringToProcess( containerCmd + " " + LocalApplicationMaster.schedulerHost + ":" + LocalApplicationMaster.schedulerPort + " " + UUID.randomUUID().toString).run() log.info(s"apply executor ${i+1}/$num") } } }
Example 70
Source File: AkkaUtil.scala From wookiee with Apache License 2.0 | 5 votes |
package com.webtrends.harness.utils import akka.actor.{ActorRef, ActorContext, Props} import akka.routing.{FromConfig, RoundRobinPool} import org.slf4j.LoggerFactory def initActorFromConfig(props:Props, actorName:String, defNumRoutees:Int=3)(implicit context:ActorContext) : ActorRef = { val config = context.system.settings.config val deployPath = s"akka.actor.deployment.${context.self.path.toStringWithoutAddress}/$actorName" if (!config.hasPath(deployPath)) { // message primarily for debugging so that you can see immediately if your actor found the config externalLogger.debug(s"Could not find deployment config for path [$deployPath], deploying default round robin with $defNumRoutees routees") context.actorOf(RoundRobinPool(defNumRoutees).props(props), actorName) } else { context.actorOf(FromConfig.props(props), actorName) } } }
Example 71
Source File: ActorWaitHelper.scala From wookiee with Apache License 2.0 | 5 votes |
package com.webtrends.harness.utils import java.util.concurrent.TimeUnit import akka.actor.{Actor, ActorRef, ActorSystem, Props} import akka.util.Timeout import scala.concurrent.Await object ActorWaitHelper { // Will wait until an actor has come up before returning its ActorRef def awaitActor(props: Props, system: ActorSystem, actorName: Option[String] = None)(implicit timeout: Timeout = Timeout(5, TimeUnit.SECONDS)): ActorRef = { val actor = actorName match { case Some(name) => system.actorOf(props, name) case None => system.actorOf(props) } awaitActorRef(actor, system) } // Will wait until an actor has come up before returning its ActorRef def awaitActorRef(actor: ActorRef, system: ActorSystem)(implicit timeout: Timeout = Timeout(5, TimeUnit.SECONDS)): ActorRef = { Await.result(system.actorSelection(actor.path).resolveOne(), timeout.duration) actor } } trait ActorWaitHelper { this: Actor => // Will wait until an actor has come up before returning its ActorRef def awaitActor(props: Props, actorName: Option[String] = None)(implicit timeout: Timeout = Timeout(5, TimeUnit.SECONDS)): ActorRef = ActorWaitHelper.awaitActor(props, context.system, actorName)(timeout) }
Example 72
Source File: PolicyManager.scala From wookiee with Apache License 2.0 | 5 votes |
package com.webtrends.harness.policy import akka.pattern.{ask, pipe} import akka.actor.{ActorRef, Props} import akka.routing.{RoundRobinPool, FromConfig} import com.webtrends.harness.HarnessConstants import com.webtrends.harness.app.{PrepareForShutdown, HActor} import com.webtrends.harness.app.HarnessActor.SystemReady import org.slf4j.LoggerFactory import scala.collection.mutable import scala.concurrent.{Future, Promise} import scala.util.{Success, Failure} case class GetPolicies() class PolicyManager extends PrepareForShutdown { import context.dispatcher override def receive = super.receive orElse { case GetPolicies => pipe(getPolicies) to sender case SystemReady => // ignore } protected def getPolicies : Future[Map[String, Policy]] = { Future { PolicyManager.getPolicies.get } } } object PolicyManager { private val externalLogger = LoggerFactory.getLogger(this.getClass) // map that stores the name of the command with the actor it references val policyMap = mutable.Map[String, Policy]() def props = Props[PolicyManager] def addPolicy[T<:Policy](name:String, ref:T) = { ref.addCommands externalLogger.debug(s"Policy $name inserted into Policy Manager map.") policyMap += (name -> ref) } protected def removePolicy(name:String) : Boolean = { policyMap.get(name) match { case Some(n) => externalLogger.debug(s"Policy $name removed from Policy Manager map.") policyMap -= name true case None => false } } def getPolicy(name:String) : Option[Policy] = policyMap.get(name) def getPolicies : Option[Map[String, Policy]] = Some(policyMap.toMap) }
Example 73
Source File: Harness.scala From wookiee with Apache License 2.0 | 5 votes |
package com.webtrends.harness.app import akka.actor.{ActorRef, ActorSystem, Props, UnhandledMessage} import akka.pattern._ import com.typesafe.config.Config import com.webtrends.harness.UnhandledEventListener import com.webtrends.harness.app.HarnessActor.ShutdownSystem import com.webtrends.harness.logging.Logger import scala.concurrent.{Await, Future} import scala.concurrent.duration._ import scala.util.Failure import scala.util.Success def addShutdownHook(): Unit = { Runtime.getRuntime.addShutdownHook(new Thread(new Runnable { def run() = { system match { case Some(sys) => sys.log.debug("The shutdown hook has been called") shutdownActorSystem(block = true) { externalLogger.info("Wookiee Shut Down, Thanks for Coming!") } case _ => } } })) } }
Example 74
Source File: InternalHTTP.scala From wookiee with Apache License 2.0 | 5 votes |
package com.webtrends.harness.http import akka.actor.{Props, ActorRef, Actor} trait InternalHTTP { this: Actor => var httpRef:Option[ActorRef] = None def startInternalHTTP(port:Int) : ActorRef = { httpRef = Some(context.actorOf(Props(classOf[SimpleHttpServer], port), InternalHTTP.InternalHttpName)) httpRef.get } } object InternalHTTP { val InternalHttpName = "Internal-Http" }
Example 75
Source File: HealthCheckActor.scala From wookiee with Apache License 2.0 | 5 votes |
package com.webtrends.harness.health import akka.actor.{Props, Status} import com.webtrends.harness.HarnessConstants import com.webtrends.harness.app.HActor import com.webtrends.harness.utils.ConfigUtil import scala.util.{Failure, Success} object HealthCheckActor { def props: Props = Props[HealthCheckActor] // These objects will be temporary enough, favoring time complexity concerns over space concerns protected[health] def collectHealthStates(health: ApplicationHealth): collection.mutable.Map[Seq[String], ComponentState.ComponentState] = { val checks = collection.mutable.Map.empty[Seq[String], ComponentState.ComponentState] def drillDown(parentPath: Seq[String], check: HealthComponent): Unit = { checks.+=((parentPath :+ check.name, check.state)) check.components.foreach(c => drillDown(parentPath :+ check.name, c)) } checks.+=((Seq(health.applicationName), health.state)) health.components.foreach(c => drillDown(Seq(health.applicationName), c)) checks } protected[health] def healthChecksDiffer(previous: ApplicationHealth, current: ApplicationHealth): Boolean = { val previousStates = collectHealthStates(previous) var foundDiff = false def drillDown(parentPath: Seq[String], check: HealthComponent): Unit = if (!foundDiff) { val previous = previousStates.get(parentPath :+ check.name) if (!previous.contains(check.state)) foundDiff = true else check.components.foreach(c => drillDown(parentPath :+ check.name, c)) } current.components.foreach(c => drillDown(Seq(current.applicationName), c)) previous.state != current.state || foundDiff } } class HealthCheckActor extends HActor with HealthCheckProvider { private var previousCheck: Option[ApplicationHealth] = None override def preStart() { log.info("Health Manager started: {}", context.self.path) } override def postStop(): Unit = { log.info("Health Manager stopped: {}", context.self.path) } override def receive = health orElse { case HealthRequest(typ) => val caller = sender log.debug("Fetching the system health") import context.dispatcher runChecks onComplete { case Success(s) => comparePreviousCheck(s) val res = typ match { case HealthResponseType.NAGIOS => "%s|%s".format(s.state.toString.toUpperCase, s.details) case HealthResponseType.LB => if (s.state == ComponentState.CRITICAL) "DOWN" else "UP" case _ => s } caller ! res case Failure(f) => caller ! Status.Failure(f) } } private def comparePreviousCheck(health: ApplicationHealth): Unit = if (ConfigUtil.getDefaultValue(HarnessConstants.LogHealthCheckDiffs, config.getBoolean, false)) { previousCheck match { case Some(c) => if (HealthCheckActor.healthChecksDiffer(c, health)) log.info(s"Health check status changed. Old: ${c.toJson()} New: ${health.toJson()}") case None => // Not much use checking against nothing } previousCheck = Some(health) } }
Example 76
Source File: TypedCommandManager.scala From wookiee with Apache License 2.0 | 5 votes |
package com.webtrends.harness.command.typed import akka.actor.{Actor, ActorRef, Props} import akka.routing.{FromConfig, RoundRobinPool} import com.webtrends.harness.HarnessConstants import com.webtrends.harness.health.{ActorHealth, ComponentState, HealthComponent} import com.webtrends.harness.logging.LoggingAdapter import scala.collection.mutable import scala.concurrent.Future import scala.util.Try case class RegisterCommand[T<:TypedCommand[_,_]](name:String, props: Props, checkHealth: Boolean) class TypedCommandManager extends Actor with ActorHealth with LoggingAdapter { val healthCheckChildren = mutable.ArrayBuffer.empty[ActorRef] val config = context.system.settings.config.getConfig("akka.actor.deployment") override def receive: Receive = health orElse { case RegisterCommand(name, props, checkHealth) => sender ! registerCommand(name, props, checkHealth) } def registerCommand[T<:TypedCommand[_,_]](name: String, actorProps: Props, checkHealth: Boolean): ActorRef = { TypedCommandManager.commands.get(name) match { case Some(commandRef) => log.warn(s"Command $name has already been added, not re-adding it.") commandRef case None => val props = if (config.hasPath(s"akka.actor.deployment.${HarnessConstants.TypedCommandFullName}/$name")) { FromConfig.props(actorProps) } else { val nrRoutees = Try { config.getInt(HarnessConstants.KeyCommandsNrRoutees) }.getOrElse(5) RoundRobinPool(nrRoutees).props(actorProps) } val commandRef = context.actorOf(props, name) TypedCommandManager.commands(name) = commandRef if (checkHealth) { healthCheckChildren += commandRef } commandRef } } override def getHealthChildren: Iterable[ActorRef] = { healthCheckChildren } override def getHealth: Future[HealthComponent] = { Future.successful( HealthComponent(self.path.toString, ComponentState.NORMAL, s"Managing ${TypedCommandManager.commands.size} typed commands") ) } } object TypedCommandManager { private[typed] val commands = mutable.Map[String, ActorRef]() def props = Props[TypedCommandManager] }
Example 77
Source File: TypedCommandHelper.scala From wookiee with Apache License 2.0 | 5 votes |
package com.webtrends.harness.command.typed import akka.actor.{Actor, ActorRef, Props} import akka.pattern._ import akka.util.Timeout import com.webtrends.harness.HarnessConstants import scala.concurrent.duration._ import scala.concurrent.{ExecutionContext, Future} trait TypedCommandHelper { this: Actor => var typedCommandManager: Option[ActorRef] = None implicit def ec: ExecutionContext = context.dispatcher def registerTypedCommand[T<:TypedCommand[_,_]](name: String, actorClass: Class[T], checkHealth: Boolean = false): Future[ActorRef] = { implicit val timeout = Timeout(2 seconds) getManager().flatMap { cm => (cm ? RegisterCommand(name, Props(actorClass), checkHealth)).mapTo[ActorRef] } } protected def getManager(): Future[ActorRef] = { typedCommandManager match { case Some(cm) => Future.successful(cm) case None => context.system.actorSelection(HarnessConstants.TypedCommandFullName).resolveOne()(2 seconds).map { s => typedCommandManager = Some(s) s } } } }
Example 78
Source File: CommandHelper.scala From wookiee with Apache License 2.0 | 5 votes |
package com.webtrends.harness.command import akka.actor.{Props, ActorRef, Actor} import akka.pattern.ask import akka.util.Timeout import com.webtrends.harness.app.Harness import scala.concurrent.duration._ import com.webtrends.harness.HarnessConstants import com.webtrends.harness.logging.ActorLoggingAdapter import scala.concurrent.{Promise, Future} import scala.util.{Failure, Success} trait CommandHelper extends ActorLoggingAdapter with BaseCommandHelper { this: Actor => override lazy implicit val actorSystem = context.system } def executeCommand[T:Manifest](name:String, bean:Option[CommandBean]=None, server:Option[String]=None, port:Int=2552)(implicit timeout:Timeout) : Future[BaseCommandResponse[T]] = { val p = Promise[BaseCommandResponse[T]] initCommandManager onComplete { case Success(_) => commandManager match { case Some(cm) => val msg = server match { case Some(srv) => ExecuteRemoteCommand(name, srv, port, bean, timeout) case None => ExecuteCommand(name, bean, timeout) } (cm ? msg)(timeout).mapTo[BaseCommandResponse[T]] onComplete { case Success(s) => p success s case Failure(f) => p failure CommandException("CommandManager", f) } case None => p failure CommandException("CommandManager", "CommandManager not found!") } case Failure(f) => p failure f } p.future } }
Example 79
Source File: LoggingActorSpec.scala From wookiee with Apache License 2.0 | 5 votes |
package com.webtrends.harness.logging import akka.actor.{ActorSystem, Props} import akka.event.Logging.{InitializeLogger, LoggerInitialized} import akka.testkit.{TestKit, TestProbe} import com.typesafe.config.ConfigFactory import com.webtrends.harness.TestKitSpecificationWithJUnit class LoggingActorSpec extends TestKitSpecificationWithJUnit(ActorSystem("test", ConfigFactory.parseString( """logging.use-actor=off"""))) { val logger = system.actorOf(Props[LoggingActor]) "Logging" should { "test logging initialization" in { val probe = TestProbe() probe.send(logger, InitializeLogger(null)) LoggerInitialized must beEqualTo(probe.expectMsg(LoggerInitialized)) } } step { TestKit.shutdownActorSystem(system) } }
Example 80
Source File: ConfigSpec.scala From wookiee with Apache License 2.0 | 5 votes |
package com.webtrends.harness import java.io.{BufferedWriter, File, FileWriter} import java.util.concurrent.TimeUnit import akka.actor.{Actor, ActorSystem, Props} import akka.testkit.TestProbe import com.typesafe.config.ConfigFactory import com.webtrends.harness.app.HarnessActor.ConfigChange import com.webtrends.harness.config.ConfigWatcherActor import com.webtrends.harness.health.{ComponentState, HealthComponent} import com.webtrends.harness.service.messages.CheckHealth import org.specs2.mutable.SpecificationWithJUnit import scala.concurrent.ExecutionContextExecutor import scala.concurrent.duration.FiniteDuration import scala.reflect.io.{Directory, Path} class ConfigSpec extends SpecificationWithJUnit { implicit val dur = FiniteDuration(2, TimeUnit.SECONDS) new File("services/test/conf").mkdirs() implicit val sys = ActorSystem("system", ConfigFactory.parseString( """ akka.actor.provider = "akka.actor.LocalActorRefProvider" services { path = "services" } """).withFallback(ConfigFactory.load)) implicit val ec: ExecutionContextExecutor = sys.dispatcher val probe = TestProbe() val parent = sys.actorOf(Props(new Actor { val child = context.actorOf(ConfigWatcherActor.props, "child") def receive = { case x if sender == child => probe.ref forward x case x => child forward x } })) sequential "config " should { "be in good health" in { probe.send(parent, CheckHealth) val msg = probe.expectMsgClass(classOf[HealthComponent]) msg.state equals ComponentState.NORMAL } "detect changes in config" in { val file = new File("services/test/conf/test.conf") val bw = new BufferedWriter(new FileWriter(file)) bw.write("test = \"value\"") bw.close() val msg = probe.expectMsgClass(classOf[ConfigChange]) msg.isInstanceOf[ConfigChange] } } step { sys.terminate().onComplete { _ => Directory(Path(new File("services"))).deleteRecursively() } } }
Example 81
Source File: InternalHttpSpec.scala From wookiee with Apache License 2.0 | 5 votes |
package com.webtrends.harness.http import java.net.{HttpURLConnection, URL} import java.util.concurrent.TimeUnit import akka.actor.{Props, ActorSystem} import akka.testkit.TestKit import akka.util.Timeout import com.webtrends.harness.TestKitSpecificationWithJUnit import com.webtrends.harness.service.messages.CheckHealth import scala.concurrent.Await import akka.pattern.ask import scala.concurrent.duration.FiniteDuration class InternalHttpSpec extends TestKitSpecificationWithJUnit(ActorSystem("test")) with InternalHttpClient { val port = 8123 val path = "http://127.0.0.1:" + port + "/" val httpActor = system.actorOf(Props(classOf[SimpleHttpServer], port)) // We need to make sure the httpActor has started up before trying to connect. implicit val timeout = Timeout(FiniteDuration(5, TimeUnit.SECONDS)) Await.result(httpActor ? CheckHealth, timeout.duration) "Test handlers" should { "handle the get path /ping" in { val url = new URL(path + "ping") val conn = url.openConnection().asInstanceOf[HttpURLConnection] val resp = getResponseContent(conn) resp.status mustEqual "200" resp.content.length must be > 0 resp.content.substring(0, 5) mustEqual "pong:" } } step { TestKit.shutdownActorSystem(system) } }
Example 82
Source File: ActorWaitSpec.scala From wookiee with Apache License 2.0 | 5 votes |
package com.webtrends.harness import java.util.concurrent.TimeUnit import akka.actor.{Actor, ActorSystem, PoisonPill, Props} import akka.pattern.ask import akka.testkit.TestKit import akka.util.Timeout import com.webtrends.harness.utils.ActorWaitHelper import org.specs2.mutable.SpecificationLike import scala.concurrent.Await import scala.concurrent.duration.Duration class WaitedOnActor extends Actor with ActorWaitHelper { def receive: Receive = { case "message" => sender ! "waitedResponse" } } class WaitActor extends Actor with ActorWaitHelper { implicit val timeout = Timeout(5000, TimeUnit.MILLISECONDS) val waited = awaitActor(Props[WaitedOnActor]) def receive: Receive = { case "message" => sender ! "response" case "waited" => sender ! Await.result((waited ? "message").mapTo[String], Duration(5, "seconds")) } } class ActorWaitSpec extends TestKit(ActorSystem("wait-spec")) with SpecificationLike { implicit val timeout = Timeout(5000, TimeUnit.MILLISECONDS) val waitActor = ActorWaitHelper.awaitActor(Props[WaitActor], system) sequential "ActorWaitSpec" should { "await the WaitActor successfully " in { Await.result((waitActor ? "message").mapTo[String], Duration(5, "seconds")) must beEqualTo("response") } "the WaitActor's awaited actor must have come up " in { Await.result((waitActor ? "waited").mapTo[String], Duration(5, "seconds")) must beEqualTo("waitedResponse") } } step { waitActor ! PoisonPill } }
Example 83
Source File: IngestionActors.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.ingest.services import akka.actor.Props import hydra.common.config.ConfigSupport import hydra.common.util.ActorUtils import hydra.core.bootstrap.ServiceProvider import hydra.ingest.bootstrap.HydraIngestorRegistryClient object IngestionActors extends ServiceProvider with ConfigSupport { private val registryPath = HydraIngestorRegistryClient.registryPath(applicationConfig) override val services = Seq( Tuple2( ActorUtils.actorName[IngestionHandlerGateway], IngestionHandlerGateway.props(registryPath) ), Tuple2(ActorUtils.actorName[TransportRegistrar], Props[TransportRegistrar]), Tuple2(ActorUtils.actorName[IngestorRegistry], Props[IngestorRegistry]), Tuple2(ActorUtils.actorName[IngestorRegistrar], Props[IngestorRegistrar]) ) }
Example 84
Source File: TransportRegistrar.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.ingest.services import java.lang.reflect.Method import akka.actor.{Actor, ActorRef, ActorRefFactory, Props} import com.typesafe.config.Config import hydra.common.config.ConfigSupport import hydra.common.logging.LoggingAdapter import hydra.common.reflect.ReflectionUtils import hydra.common.util.ActorUtils import hydra.core.transport.Transport import hydra.ingest.bootstrap.ClasspathHydraComponentLoader import hydra.ingest.services.TransportRegistrar.{ GetTransports, GetTransportsResponse } import scala.util.Try private def companion[T](clazz: Class[T]): Option[(T, Method)] = { try { val companion = ReflectionUtils.companionOf(clazz) companion.getClass.getMethods.toList.filter(m => m.getName == "props" && m.getParameterTypes.toList == List(classOf[Config]) ) match { case Nil => None case method :: Nil => Some(companion, method) case _ => None } } catch { case _: Throwable => None } } }
Example 85
Source File: IngestSocketFactory.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.ingest.services import akka.NotUsed import akka.actor.{ActorRef, ActorRefFactory, Props} import akka.stream.OverflowStrategy import akka.stream.scaladsl.{Flow, Sink, Source} import hydra.core.ingest.IngestionReport trait IngestSocketFactory { def ingestFlow(): Flow[String, OutgoingMessage, NotUsed] } object IngestSocketFactory { def createSocket(fact: ActorRefFactory): IngestSocketFactory = { () => { val socketActor = fact.actorOf(Props[IngestionSocketActor]) def actorSink = Sink.actorRefWithBackpressure( socketActor, onInitMessage = SocketInit, ackMessage = SocketAck, onCompleteMessage = SocketEnded, onFailureMessage = SocketFailed.apply ) val in = Flow[String] .map(IncomingMessage) .to(actorSink) val out = Source .actorRefWithBackpressure[OutgoingMessage]( SocketAck, PartialFunction.empty, PartialFunction.empty ) .mapMaterializedValue(socketActor ! SocketStarted(_)) Flow.fromSinkAndSourceCoupled(in, out) } } } sealed trait SocketEvent case object SocketInit extends SocketEvent case class SocketStarted(ref: ActorRef) extends SocketEvent case object SocketEnded extends SocketEvent case object SocketAck extends SocketEvent case class IncomingMessage(message: String) extends SocketEvent case class SocketFailed(ex: Throwable) sealed trait OutgoingMessage extends SocketEvent case class SimpleOutgoingMessage(status: Int, message: String) extends OutgoingMessage case class IngestionOutgoingMessage(report: IngestionReport) extends OutgoingMessage
Example 86
Source File: ActorFactory.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.ingest.bootstrap import java.lang.reflect.Modifier import akka.actor.Props import com.pluralsight.hydra.reflect.DoNotScan import hydra.common.config.ConfigSupport import hydra.common.reflect.ReflectionUtils import hydra.core.bootstrap.{ReflectionsWrapper, ServiceProvider} import scala.util.Try object ActorFactory extends ConfigSupport { import ReflectionsWrapper._ import scala.collection.JavaConverters._ def getActors(): Seq[(String, Props)] = { val serviceProviders = scanFor(classOf[ServiceProvider]) serviceProviders.flatMap { clz => Try(ReflectionUtils.getObjectInstance(clz)) .map(_.services) .getOrElse(clz.newInstance().services) } } private def scanFor[T](clazz: Class[T]): Seq[Class[_ <: T]] = reflections .getSubTypesOf(clazz) .asScala .filterNot(c => Modifier.isAbstract(c.getModifiers)) .filterNot(c => c.isAnnotationPresent(classOf[DoNotScan])) .toSeq }
Example 87
Source File: IngestorRegistrarSpec.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.ingest.services import java.util.concurrent.TimeUnit import akka.actor.{ActorSystem, Props} import akka.pattern.ask import akka.testkit.{ImplicitSender, TestKit} import akka.util.Timeout import hydra.common.util.ActorUtils import hydra.ingest.services.IngestorRegistrar.UnregisterAll import hydra.ingest.services.IngestorRegistry.{ FindAll, FindByName, LookupResult } import hydra.ingest.test.TestIngestor import org.scalatest.concurrent.{Eventually, ScalaFutures} import org.scalatest.time.{Seconds, Span} import org.scalatest.matchers.should.Matchers import org.scalatest.funspec.AnyFunSpecLike import org.scalatest.BeforeAndAfterAll import scala.concurrent.duration._ class IngestorRegistrarSpec extends TestKit(ActorSystem("IngestorRegistrarSpec")) with Matchers with AnyFunSpecLike with ImplicitSender with ScalaFutures with BeforeAndAfterAll with Eventually { override def afterAll = TestKit.shutdownActorSystem(system, verifySystemShutdown = true) implicit override val patienceConfig = PatienceConfig(timeout = Span(10, Seconds), interval = Span(1, Seconds)) val registry = system.actorOf(Props[IngestorRegistry], "ingestor_registry") val act = system.actorOf(Props[IngestorRegistrar]) implicit val timeout = Timeout(3, TimeUnit.SECONDS) describe("The ingestor registrar actor") { it("registers from classpath on bootstrap") { eventually { whenReady( (registry ? FindByName(ActorUtils.actorName(classOf[TestIngestor]))) .mapTo[LookupResult] ) { i => i.ingestors.size shouldBe 1 i.ingestors(0).name shouldBe ActorUtils.actorName( classOf[TestIngestor] ) } } } it("unregisters") { act ! UnregisterAll eventually { whenReady((registry ? FindAll).mapTo[LookupResult]) { i => i.ingestors.size shouldBe 0 } } } } }
Example 88
Source File: IngestionSocketActorSpec.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.ingest.services import org.scalatest.matchers.should.Matchers import org.scalatest.flatspec.AnyFlatSpecLike import org.scalatest.BeforeAndAfterAll import akka.actor.ActorSystem import akka.actor.Props import akka.testkit.TestProbe class IngestionSocketActorSpec extends AnyFlatSpecLike with Matchers with BeforeAndAfterAll { private implicit val system: ActorSystem = ActorSystem() override def afterAll(): Unit = { system.terminate() } private def getIngestActorRef = system.actorOf(Props[IngestionSocketActor]) it should "ack the init message in waiting state" in { val ingestActor = getIngestActorRef val probe = TestProbe() ingestActor.tell(SocketInit, probe.ref) probe.expectMsg(SocketAck) } it should "ack the init message in initialized state" in { val ingestActor = getIngestActorRef val probe = TestProbe() ingestActor ! SocketStarted(probe.ref) ingestActor.tell(SocketInit, probe.ref) probe.expectMsg(SocketAck) } private def testIngestionMessageAck(ingestionMessages: IncomingMessage*) = { it should s"ack the incoming messages of form: $ingestionMessages" in { val ingestActor = getIngestActorRef val probe = TestProbe() ingestActor ! SocketStarted(probe.ref) ingestActor.tell(SocketInit, probe.ref) probe.expectMsg(SocketAck) ingestionMessages.foreach { ingestionMessage => ingestActor.tell(ingestionMessage, probe.ref) probe.expectMsgClass(classOf[SimpleOutgoingMessage]) probe.expectMsg(SocketAck) } } } testIngestionMessageAck(IncomingMessage("-c HELP")) testIngestionMessageAck(IncomingMessage("-c SET hydra-ack = replicated")) testIngestionMessageAck(IncomingMessage("-c WHAT")) }
Example 89
Source File: ActorFactorySpec.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.ingest.bootstrap import akka.actor.Props import hydra.core.bootstrap.ServiceProvider import hydra.ingest.test.TestIngestorDefault import org.scalatest.matchers.should.Matchers import org.scalatest.flatspec.AnyFlatSpecLike class ActorFactorySpec extends Matchers with AnyFlatSpecLike { "The ActorFactory object" should "load actor Props from the classpath" in { ActorFactory .getActors().map(_._1) should contain allOf ("test1","test2") } } private class DummyServiceProvider extends ServiceProvider { override val services = Seq("test1" -> Props[TestIngestorDefault]) } private object DummyServiceProviderObject extends ServiceProvider { override val services = Seq("test2" -> Props[TestIngestorDefault]) }
Example 90
Source File: HydraIngestorRegistrySpec.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.ingest.bootstrap import akka.actor.{ActorSystem, Props} import akka.testkit.{ImplicitSender, TestKit} import com.typesafe.config.ConfigFactory import hydra.common.util.ActorUtils import hydra.core.bootstrap.ReflectionsWrapper import hydra.ingest.IngestorInfo import hydra.ingest.services.IngestorRegistry import hydra.ingest.services.IngestorRegistry.RegisterWithClass import hydra.ingest.test.TestIngestor import org.scalatest.concurrent.ScalaFutures import org.scalatest.matchers.should.Matchers import org.scalatest.funspec.AnyFunSpecLike import org.scalatest.BeforeAndAfterAll import scala.concurrent.duration._ class HydraIngestorRegistrySpec extends TestKit(ActorSystem("HydraIngestorRegistrySpec")) with Matchers with AnyFunSpecLike with BeforeAndAfterAll with ImplicitSender with ScalaFutures { override def afterAll = TestKit.shutdownActorSystem( system, verifySystemShutdown = true, duration = 10.seconds ) val testRegistry = system.actorOf(Props[IngestorRegistry], "ingestor_registry") val cfg = ConfigFactory.parseString( "ingest.ingestor-registry.path=/user/ingestor_registry" ) val registry = HydraIngestorRegistryClient(cfg) implicit val actorRefFactory = system ReflectionsWrapper.rescan() registry.registry ! RegisterWithClass(classOf[TestIngestor], "global") expectMsgType[IngestorInfo] describe("The Ingestor Registry") { it("uses the default registry if no config") { val path = HydraIngestorRegistryClient.registryPath(ConfigFactory.empty()) path shouldBe s"/user/service/${ActorUtils.actorName(classOf[IngestorRegistry])}" } it("looks up an ingestor") { implicit val timeout = akka.util.Timeout(10.seconds) whenReady(registry.lookupIngestor("test_ingestor")) { i => i.ingestors.size shouldBe 1 i.ingestors(0).name shouldBe "test_ingestor" i.ingestors(0).path shouldBe testRegistry.path / "test_ingestor" } } } }
Example 91
Source File: RabbitTransport.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.rabbit import akka.actor.Props import akka.pattern.ask import akka.util.Timeout import com.spingo.op_rabbit.Message.{Ack, ConfirmResponse, Fail, Nack} import com.spingo.op_rabbit._ import com.typesafe.config.Config import hydra.core.transport.Transport import hydra.core.transport.Transport.Deliver import scala.concurrent.duration._ class RabbitTransport(rabbitControlProps: Props) extends Transport { implicit val ec = context.dispatcher val rabbitControl = context.actorOf(rabbitControlProps) private def sendMessage(r: RabbitRecord) = { implicit val timeout = Timeout(3 seconds) val message = r.destinationType match { case RabbitRecord.DESTINATION_TYPE_EXCHANGE => val pub = Publisher.exchange(r.destination) Message(r.payload.getBytes(), pub) case RabbitRecord.DESTINATION_TYPE_QUEUE => val pub = Publisher.queue(r.destination) Message(r.payload.getBytes(), pub) } (rabbitControl ? message).mapTo[ConfirmResponse] } override def transport = { case Deliver(r: RabbitRecord, deliveryId, callback) => sendMessage(r).foreach { result => result match { case x: Ack => callback.onCompletion( deliveryId, Some( RabbitRecordMetadata( System.currentTimeMillis(), x.id, r.destination, r.destinationType, r.ackStrategy ) ), None ) case _: Nack => callback.onCompletion( deliveryId, None, Some( RabbitProducerException( "Rabbit returned Nack, record not produced" ) ) ) case x: Fail => callback.onCompletion(deliveryId, None, Some(x.exception)) } } } } object RabbitTransport { // will be used in testing def props(p: Props): Props = Props(classOf[RabbitTransport], p) // $COVERAGE-OFF$ def props(c: Config): Props = Props( classOf[RabbitTransport], Props( classOf[RabbitControl], Left(ConnectionParams.fromConfig(c.getConfig("op-rabbit.connection"))) ) ) // $COVERAGE-ON$ } case class RabbitProducerException(msg: String) extends Exception(msg)
Example 92
Source File: RabbitIngestorSpec.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.rabbit import akka.actor.{ActorSystem, Props} import akka.testkit.TestActors.ForwardActor import akka.testkit.{ImplicitSender, TestKit, TestProbe} import hydra.core.ingest.HydraRequest import hydra.core.protocol._ import hydra.core.transport.{AckStrategy, HydraRecord} import org.scalatest.matchers.should.Matchers import org.scalatest.funspec.AnyFunSpecLike import org.scalatest.BeforeAndAfterAll import scala.concurrent.duration._ class RabbitIngestorSpec extends TestKit(ActorSystem("rabbit-ingestor-spec")) with Matchers with AnyFunSpecLike with ImplicitSender with BeforeAndAfterAll { val ingestor = system.actorOf(Props[RabbitIngestor]) val probe = TestProbe() val rabbitTransport = system.actorOf(Props(new ForwardActor(probe.ref)), "rabbit_transport") override def afterAll = TestKit.shutdownActorSystem(system, verifySystemShutdown = true) describe("When using the rabbit ingestor") { it("Joins if exchange provided") { val request = HydraRequest( "123", "{'name': 'test'}", None, Map(RabbitRecord.HYDRA_RABBIT_EXCHANGE -> "test.exchange") ) ingestor ! Publish(request) expectMsg(10.seconds, Join) } it("Joins if queue provided") { val request = HydraRequest( "123", "{'name': 'test'}", None, Map(RabbitRecord.HYDRA_RABBIT_QUEUE -> "test.queue") ) ingestor ! Publish(request) expectMsg(10.seconds, Join) } it("Ignores") { val request = HydraRequest("123", "test string") ingestor ! Publish(request) expectMsg(10.seconds, Ignore) } it("transports") { ingestor ! Ingest( TestRecord("test", "test", "", AckStrategy.NoAck), AckStrategy.NoAck ) probe.expectMsg( Produce( TestRecord("test", "test", "", AckStrategy.NoAck), self, AckStrategy.NoAck ) ) } } } case class TestRecord( destination: String, payload: String, key: String, ackStrategy: AckStrategy ) extends HydraRecord[String, String]
Example 93
Source File: KafkaConsumerProxySpec.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.kafka.consumer import akka.actor.{ActorSystem, Props} import akka.testkit.{ImplicitSender, TestKit} import hydra.kafka.consumer.KafkaConsumerProxy._ import net.manub.embeddedkafka.{EmbeddedKafka, EmbeddedKafkaConfig} import org.apache.kafka.common.TopicPartition import org.scalatest.matchers.should.Matchers import org.scalatest.funspec.AnyFunSpecLike import org.scalatest.BeforeAndAfterAll import scala.concurrent.duration._ class KafkaConsumerProxySpec extends TestKit(ActorSystem("test")) with Matchers with AnyFunSpecLike with BeforeAndAfterAll with ImplicitSender { implicit val config = EmbeddedKafkaConfig(kafkaPort = 8092, zooKeeperPort = 3181) override def beforeAll() = { super.beforeAll() EmbeddedKafka.start() EmbeddedKafka.createCustomTopic("test-consumer1") EmbeddedKafka.createCustomTopic("test-consumer2") } override def afterAll() = { super.afterAll() EmbeddedKafka.stop() TestKit.shutdownActorSystem(system, verifySystemShutdown = true) } lazy val kafkaProxy = system.actorOf(Props[KafkaConsumerProxy]) describe("When using KafkaConsumerProxy") { it("gets latest offsets for a topic") { kafkaProxy ! GetLatestOffsets("test-consumer1") expectMsg( 10.seconds, LatestOffsetsResponse( "test-consumer1", Map(new TopicPartition("test-consumer1", 0) -> 0L) ) ) } it("lists topics") { kafkaProxy ! ListTopics expectMsgPF(10.seconds) { case ListTopicsResponse(topics) => topics.keys should contain allOf ("test-consumer1", "test-consumer2") } } it("gets partition info") { kafkaProxy ! GetPartitionInfo("test-consumer2") expectMsgPF(10.seconds) { case PartitionInfoResponse(topic, response) => topic shouldBe "test-consumer2" response.map(p => p.partition()) shouldBe Seq(0) } } it("handles errors") { kafkaProxy ! GetPartitionInfo("test-consumer-unknown") expectMsgPF(10.seconds) { case PartitionInfoResponse(topic, response) => response(0).leader().idString shouldBe "0" topic should startWith("test-consumer-unknown") } } } }
Example 94
Source File: BootstrapEndpointTestActors.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.kafka.endpoints import java.util.UUID import akka.actor.{ActorRef, Props} import com.typesafe.config.Config import hydra.avro.registry.ConfluentSchemaRegistry import hydra.core.akka.SchemaRegistryActor import hydra.kafka.model.TopicMetadata import hydra.kafka.services.{StreamsManagerActor, TopicBootstrapActor} import hydra.kafka.util.KafkaUtils import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient import org.joda.time.DateTime trait BootstrapEndpointTestActors extends BootstrapEndpointActors { class StreamsActorTest( bootstrapKafkaConfig: Config, bootstrapServers: String, schemaRegistryClient: SchemaRegistryClient ) extends StreamsManagerActor( bootstrapKafkaConfig, bootstrapServers, schemaRegistryClient ) { override val metadataMap: Map[String, TopicMetadata] = Map[String, TopicMetadata] { "exp.test-existing.v1.SubjectPreexisted" -> TopicMetadata( "exp.test-existing.v1.SubjectPreexisted", 0, "", derived = false, None, "", "", None, None, UUID.randomUUID(), DateTime.now().minusSeconds(10) ) } } object StreamsActorTest { def props( bootstrapKafkaConfig: Config, bootstrapServers: String, schemaRegistryClient: SchemaRegistryClient ) = { Props( new StreamsActorTest( bootstrapKafkaConfig, bootstrapServers, schemaRegistryClient ) ) } } private[kafka] val streamsManagerPropsTest = StreamsActorTest.props( bootstrapKafkaConfig, KafkaUtils.BootstrapServers, ConfluentSchemaRegistry.forConfig(applicationConfig).registryClient ) override val bootstrapActor: ActorRef = system.actorOf( TopicBootstrapActor.props( schemaRegistryActor, kafkaIngestor, streamsManagerPropsTest, Some(bootstrapKafkaConfig) ) ) }
Example 95
Source File: TransportOpsSpec.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.core.ingest import akka.actor.{ActorRef, ActorSystem, Props} import akka.testkit.TestActors.ForwardActor import akka.testkit.{ImplicitSender, TestKit, TestProbe} import com.pluralsight.hydra.reflect.DoNotScan import hydra.core.akka.ActorInitializationException import hydra.core.protocol.{IngestorError, Produce} import hydra.core.test.TestRecordFactory import hydra.core.transport.AckStrategy.NoAck import org.scalatest.concurrent.ScalaFutures import org.scalatest.matchers.should.Matchers import org.scalatest.funspec.AnyFunSpecLike import org.scalatest.BeforeAndAfterAll import scala.concurrent.Await import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.duration._ class TransportOpsSpec extends TestKit(ActorSystem("test")) with Matchers with AnyFunSpecLike with BeforeAndAfterAll with ImplicitSender with ScalaFutures { override def afterAll() = TestKit.shutdownActorSystem(system) val supervisor = TestProbe() val tm = TestProbe() val transport = system.actorOf(Props(new ForwardActor(tm.ref)), "test-transport") describe("TransportOps") { it("looks up a transport") { val t = system.actorOf(Props(classOf[TestTransportIngestor], supervisor.ref)) t ! "hello" expectMsg("hi!") } it("won't initialize if transport can't be found") { val t = system.actorOf(Props[TestTransportIngestorError]) t ! "hello" expectNoMessage() } it("transports a record") { val req = HydraRequest("123", "test-produce") val t = system.actorOf(Props(classOf[TestTransportIngestor], supervisor.ref)) t ! req whenReady(TestRecordFactory.build(req))(r => tm.expectMsg(Produce(r, self, NoAck)) ) } } } @DoNotScan class TestTransportIngestor(supervisor: ActorRef) extends Ingestor with TransportOps { override val recordFactory = TestRecordFactory override def initTimeout = 500 millis ingest { case "hello" => sender ! "hi!" case req: HydraRequest => val record = Await.result(TestRecordFactory.build(req), 3.seconds) transport(record, NoAck) } override def transportName = "test-transport" } class TestTransportIngestorError extends Ingestor with TransportOps { override val recordFactory = TestRecordFactory override def transportName = "test-transport-unknown" }
Example 96
Source File: ComposeReceiveSpec.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.core.akka import akka.actor.{Actor, ActorSystem, Props} import akka.testkit.{ImplicitSender, TestKit} import org.scalatest.BeforeAndAfterAll import org.scalatest.matchers.should.Matchers import org.scalatest.flatspec.AnyFlatSpecLike class ComposeReceiveSpec extends TestKit(ActorSystem("test")) with Matchers with AnyFlatSpecLike with BeforeAndAfterAll with ImplicitSender { override def afterAll = TestKit.shutdownActorSystem(system, verifySystemShutdown = true) "The ComposingReceiveTrait" should "compose" in { system.actorOf(Props[TestBaseActor]) ! "foo" expectMsg("bar") system.actorOf(Props[TestComposeActor]) ! "foo" expectMsg("new-bar") } } trait TestBase extends Actor with ComposingReceive { override def baseReceive = { case "foo" => sender ! "bar" } } class TestBaseActor extends TestBase { compose(Actor.emptyBehavior) } class TestComposeActor extends TestBase { compose { case "foo" => sender ! "new-bar" } }
Example 97
Source File: JsonReceiverActor.scala From incubator-retired-iota with Apache License 2.0 | 5 votes |
package org.apache.iota.fey import java.nio.file.Paths import java.io.File import akka.actor.{Actor, ActorLogging, ActorRef, Props} import play.api.libs.json.{JsValue, Json} class JsonReceiverActor extends Actor with ActorLogging { import JsonReceiverActor._ val monitoring_actor = FEY_MONITOR.actorRef var watchFileTask: WatchServiceReceiver = _ var watchThread: Thread = _ override def preStart() { prepareDynamicJarRepo() processCheckpointFiles() watchFileTask = new WatchServiceReceiver(self) watchThread = new Thread(watchFileTask, GLOBAL_DEFINITIONS.WATCH_SERVICE_THREAD) monitoring_actor ! Monitor.START(Utils.getTimestamp) watchThread.setDaemon(true) watchThread.start() watchFileTask.watch(Paths.get(CONFIG.JSON_REPOSITORY)) } private def prepareDynamicJarRepo() = { val jarDir = new File(CONFIG.DYNAMIC_JAR_REPO) if (!jarDir.exists()){ jarDir.mkdir() }else if(CONFIG.DYNAMIC_JAR_FORCE_PULL){ jarDir.listFiles().foreach(_.delete()) } } private def processCheckpointFiles() = { if (CONFIG.CHEKPOINT_ENABLED) { val checkpoint = new CheckpointProcessor(self) checkpoint.run() } } override def postStop() { monitoring_actor ! Monitor.STOP(Utils.getTimestamp) watchThread.interrupt() watchThread.join() } override def postRestart(reason: Throwable): Unit = { monitoring_actor ! Monitor.RESTART(reason, Utils.getTimestamp) preStart() } override def receive: Receive = { case JSON_RECEIVED(json, file) => log.info(s"JSON RECEIVED => ${Json.stringify(json)}") context.parent ! FeyCore.ORCHESTRATION_RECEIVED(json, Some(file)) case _ => } } object JsonReceiverActor { case class JSON_RECEIVED(json: JsValue, file: File) }
Example 98
Source File: GlobalPerformer.scala From incubator-retired-iota with Apache License 2.0 | 5 votes |
package org.apache.iota.fey import akka.actor.SupervisorStrategy.Restart import akka.actor.{Actor, ActorLogging, ActorRef, OneForOneStrategy, Props, Terminated} import akka.routing._ import play.api.libs.json.JsObject import scala.collection.mutable.HashMap import scala.concurrent.duration._ protected class GlobalPerformer(val orchestrationID: String, val orchestrationName: String, val globalPerformers: List[JsObject], val ensemblesSpec : List[JsObject]) extends Actor with ActorLogging{ val monitoring_actor = FEY_MONITOR.actorRef var global_metadata: Map[String, Performer] = Map.empty[String, Performer] override def receive: Receive = { case GlobalPerformer.PRINT_GLOBAL => context.actorSelection(s"*") ! FeyGenericActor.PRINT_PATH case Terminated(actor) => monitoring_actor ! Monitor.TERMINATE(actor.path.toString, Utils.getTimestamp) log.error(s"DEAD Global Performers ${actor.path.name}") context.children.foreach{ child => context.unwatch(child) context.stop(child) } throw new RestartGlobalPerformers(s"DEAD Global Performer ${actor.path.name}") case GetRoutees => //Discard case x => log.warning(s"Message $x not treated by Global Performers") } private def loadClazzFromJar(classPath: String, jarLocation: String, jarName: String):Class[FeyGenericActor] = { try { Utils.loadActorClassFromJar(jarLocation,classPath,jarName) }catch { case e: Exception => log.error(e,s"Could not load class $classPath from jar $jarLocation. Please, check the Jar repository path as well the jar name") throw e } } } object GlobalPerformer{ val activeGlobalPerformers:HashMap[String, Map[String, ActorRef]] = HashMap.empty[String, Map[String, ActorRef]] case object PRINT_GLOBAL }
Example 99
Source File: BaseAkkaSpec.scala From incubator-retired-iota with Apache License 2.0 | 5 votes |
package org.apache.iota.fey import java.nio.file.Paths import akka.actor.{ActorIdentity, ActorRef, ActorSystem, Identify, Props} import akka.testkit.{EventFilter, TestEvent, TestProbe} import com.typesafe.config.ConfigFactory import org.scalatest.BeforeAndAfterAll import play.api.libs.json._ import scala.concurrent.duration.{DurationInt, FiniteDuration} import scala.concurrent.Await class BaseAkkaSpec extends BaseSpec with BeforeAndAfterAll with LoggingTest{ //Load default configuration for Fey when running tests resetCapturedLogs() CONFIG.loadUserConfiguration(Paths.get(TestSetup.configTest.toURI()).toFile().getAbsolutePath) TestSetup.setup() val systemName = "FEY-TEST" implicit val system = ActorSystem(systemName, ConfigFactory.parseString("""akka.loggers = ["akka.testkit.TestEventListener"]""")) system.eventStream.publish(TestEvent.Mute(EventFilter.debug())) system.eventStream.publish(TestEvent.Mute(EventFilter.info())) system.eventStream.publish(TestEvent.Mute(EventFilter.warning())) system.eventStream.publish(TestEvent.Mute(EventFilter.error())) val globalIdentifierName = "GLOBAL-IDENTIFIER" val globalIdentifierRef = system.actorOf(Props[IdentifyFeyActors],globalIdentifierName) override protected def afterAll(): Unit = { //Force reload of GenericActor's jar Utils.loadedJars.remove("fey-test-actor.jar") Monitor.events.removeAllNodes() Await.ready(system.terminate(), 20.seconds) } implicit class TestProbeOps(probe: TestProbe) { def expectActor(path: String, max: FiniteDuration = 3.seconds): ActorRef = { probe.within(max) { var actor = null: ActorRef probe.awaitAssert { (probe.system actorSelection path).tell(Identify(path), probe.ref) probe.expectMsgPF(100 milliseconds) { case ActorIdentity(`path`, Some(ref)) => actor = ref } } actor } } def expectActorInSystem(path: String, lookInSystem: ActorSystem, max: FiniteDuration = 3.seconds): ActorRef = { probe.within(max) { var actor = null: ActorRef probe.awaitAssert { (lookInSystem actorSelection path).tell(Identify(path), probe.ref) probe.expectMsgPF(100 milliseconds) { case ActorIdentity(`path`, Some(ref)) => actor = ref } } actor } } def verifyActorTermination(actor: ActorRef)(implicit system: ActorSystem): Unit = { val watcher = TestProbe() watcher.watch(actor) watcher.expectTerminated(actor) } def notExpectActor(path: String, max: FiniteDuration = 3.seconds): Unit = { probe.within(max) { probe.awaitAssert { (probe.system actorSelection path).tell(Identify(path), probe.ref) probe.expectMsgPF(100 milliseconds) { case ActorIdentity(`path`, None) => } } } } def isThreadRunning(threadName: String): Boolean = { Thread.getAllStackTraces.keySet().toArray .map(_.asInstanceOf[Thread]) .find(_.getName == threadName) match { case Some(thread) => if(thread.isAlive) true else false case None => false } } } //Utils Functions def getJSValueFromString(json: String): JsValue = { Json.parse(json) } }
Example 100
Source File: IdentifyFeyActorsSpec.scala From incubator-retired-iota with Apache License 2.0 | 5 votes |
package org.apache.iota.fey import akka.actor.{ActorRef, Props} class IdentifyFeyActorsSpec extends BaseAkkaSpec { val aux_events = new Trie(systemName) "Sending IdentifyFeyActors.IDENTIFY_TREE to IdentifyFeyActors" should { s"result in one path added to IdentifyFeyActors.actorsPath" in { globalIdentifierRef ! IdentifyFeyActors.IDENTIFY_TREE(s"akka://$systemName/user") Thread.sleep(1000) IdentifyFeyActors.actorsPath.size should equal(1) } s"result in path 'akka://FEY-TEST/user/$globalIdentifierName' " in { IdentifyFeyActors.actorsPath should contain(s"akka://$systemName/user/$globalIdentifierName") } } var actor2: ActorRef = _ "Creating a new actor in the system and sending IdentifyFeyActors.IDENTIFY_TREE to IdentifyFeyActors" should { s"result in two paths added to IdentifyFeyActors.actorsPath" in { actor2 = system.actorOf(Props(new Monitor(aux_events)),"MONITOR") globalIdentifierRef ! IdentifyFeyActors.IDENTIFY_TREE(s"akka://$systemName/user") Thread.sleep(1000) IdentifyFeyActors.actorsPath.size should equal(2) } s"result in matching paths" in { IdentifyFeyActors.actorsPath should contain(s"akka://$systemName/user/$globalIdentifierName") IdentifyFeyActors.actorsPath should contain(s"akka://$systemName/user/MONITOR") } } "Stopping previous added actor and sending IdentifyFeyActors.IDENTIFY_TREE to IdentifyFeyActors" should { "result in going back to have just one path added to IdentifyFeyActors.actorsPath" in { globalIdentifierRef ! IdentifyFeyActors.IDENTIFY_TREE(s"akka://$systemName/user") Thread.sleep(1000) IdentifyFeyActors.actorsPath.size should equal(2) } s"result in path 'akka://FEY-TEST/user/$globalIdentifierName' " in { IdentifyFeyActors.actorsPath should contain(s"akka://FEY-TEST/user/$globalIdentifierName") } } }
Example 101
Source File: MultiNodeSupportCassandra.scala From eventuate with Apache License 2.0 | 5 votes |
package com.rbmhtechnology.eventuate import java.io.File import akka.actor.Props import akka.remote.testconductor.RoleName import akka.remote.testkit.MultiNodeSpec import com.rbmhtechnology.eventuate.log.cassandra._ import org.apache.commons.io.FileUtils import org.scalatest.BeforeAndAfterAll trait MultiNodeSupportCassandra extends BeforeAndAfterAll { this: MultiNodeSpec with MultiNodeWordSpec => val coordinator = RoleName("nodeA") def cassandraDir: String = MultiNodeEmbeddedCassandra.DefaultCassandraDir def logProps(logId: String): Props = CassandraEventLog.props(logId) override def atStartup(): Unit = { if (isNode(coordinator)) { MultiNodeEmbeddedCassandra.start(cassandraDir) Cassandra(system) } enterBarrier("startup") } override def afterAll(): Unit = { // get all config data before shutting down node val snapshotRootDir = new File(system.settings.config.getString("eventuate.snapshot.filesystem.dir")) // shut down node super.afterAll() // clean database and delete snapshot files if (isNode(coordinator)) { FileUtils.deleteDirectory(snapshotRootDir) MultiNodeEmbeddedCassandra.clean() } } }
Example 102
Source File: LocationSpecsCassandra.scala From eventuate with Apache License 2.0 | 5 votes |
package com.rbmhtechnology.eventuate import akka.actor.{ Props, ActorSystem } import akka.testkit.TestKit // -------------------------------------------------------------------------- // Provider-specific single-location specs // -------------------------------------------------------------------------- class EventsourcedProcessorIntegrationSpecCassandra extends TestKit(ActorSystem("test")) with EventsourcedProcessorIntegrationSpec with SingleLocationSpecCassandra { override def beforeEach(): Unit = { super.beforeEach() init() } } class EventsourcedActorIntegrationSpecCassandra extends TestKit(ActorSystem("test")) with EventsourcedActorIntegrationSpec with SingleLocationSpecCassandra { override def batching = false } class PersistOnEventIntegrationSpecCassandra extends TestKit(ActorSystem("test")) with PersistOnEventIntegrationSpec with SingleLocationSpecCassandra class EventsourcedActorThroughputSpecCassandra extends TestKit(ActorSystem("test")) with EventsourcedActorThroughputSpec with SingleLocationSpecCassandra // -------------------------------------------------------------------------- // Provider-specific multi-location specs // -------------------------------------------------------------------------- class EventsourcedActorCausalitySpecCassandra extends EventsourcedActorCausalitySpec with MultiLocationSpecCassandra { override val logFactory: String => Props = id => SingleLocationSpecCassandra.TestEventLog.props(id, batching = true, aggregateIndexing = true) } class ReplicationIntegrationSpecCassandra extends ReplicationIntegrationSpec with MultiLocationSpecCassandra { def customPort = 2554 } class ReplicationCycleSpecCassandra extends ReplicationCycleSpec with MultiLocationSpecCassandra
Example 103
Source File: LogProducer.scala From eventuate with Apache License 2.0 | 5 votes |
package com.rbmhtechnology.eventuate.adapter.vertx import akka.actor.{ ActorRef, Props } import com.rbmhtechnology.eventuate.EventsourcedActor import io.vertx.core.eventbus.Message import scala.util.{ Failure, Success } private[vertx] object LogProducer { case class PersistMessage(message: Message[Any]) def props(id: String, eventLog: ActorRef): Props = Props(new LogProducer(id, eventLog)) } private[vertx] class LogProducer(val id: String, val eventLog: ActorRef) extends EventsourcedActor { import LogProducer._ override def stateSync: Boolean = false // prevent event-replay override def replayFromSequenceNr: Option[Long] = Some(Long.MaxValue) override def onCommand: Receive = { case PersistMessage(msg) => persist(msg.body()) { case Success(res) => msg.reply(ProcessingResult.PERSISTED) case Failure(err) => msg.fail(0, err.getMessage) } } override def onEvent: Receive = { case _ => } }
Example 104
Source File: LogEventDispatcher.scala From eventuate with Apache License 2.0 | 5 votes |
package com.rbmhtechnology.eventuate.adapter.vertx import akka.actor.{ Actor, ActorRef, Props } import com.rbmhtechnology.eventuate.adapter.vertx.LogEventDispatcher.EndpointRoute import com.rbmhtechnology.eventuate.adapter.vertx.LogProducer.PersistMessage import io.vertx.core.Vertx import io.vertx.core.eventbus.{ Message, MessageConsumer } object LogEventDispatcher { case class EventProducerRef(id: String, log: ActorRef) case class EndpointRoute(sourceEndpoint: String, producer: EventProducerRef, filter: PartialFunction[Any, Boolean] = { case _ => true }) def props(routes: Seq[EndpointRoute], vertx: Vertx): Props = Props(new LogEventDispatcher(routes, vertx)) } class LogEventDispatcher(routes: Seq[EndpointRoute], vertx: Vertx) extends Actor { import VertxHandlerConverters._ val producers = routes .groupBy(_.producer) .map { case (producer, _) => producer.id -> context.actorOf(LogProducer.props(producer.id, producer.log)) } val consumers = routes .map { r => installMessageConsumer(r.sourceEndpoint, producers(r.producer.id), r.filter) } private def installMessageConsumer(endpoint: String, producer: ActorRef, filter: PartialFunction[Any, Boolean]): MessageConsumer[Any] = { val handler = (msg: Message[Any]) => { if (filter.applyOrElse(msg.body(), (_: Any) => false)) { producer ! PersistMessage(msg) } else { msg.reply(ProcessingResult.FILTERED) } } vertx.eventBus().consumer[Any](endpoint, handler.asVertxHandler) } override def receive: Receive = Actor.emptyBehavior override def postStop(): Unit = { consumers.foreach(_.unregister()) } }
Example 105
Source File: VertxNoConfirmationSender.scala From eventuate with Apache License 2.0 | 5 votes |
package com.rbmhtechnology.eventuate.adapter.vertx import akka.actor.{ ActorRef, Props } import com.rbmhtechnology.eventuate.adapter.vertx.api.{ EndpointRouter, StorageProvider } import io.vertx.core.Vertx import scala.collection.immutable.Seq import scala.concurrent.{ ExecutionContext, Future } private[eventuate] object VertxNoConfirmationSender { def props(id: String, eventLog: ActorRef, endpointRouter: EndpointRouter, vertx: Vertx, storageProvider: StorageProvider): Props = Props(new VertxNoConfirmationSender(id, eventLog, endpointRouter, vertx, storageProvider)) .withDispatcher("eventuate.log.dispatchers.write-dispatcher") } private[eventuate] class VertxNoConfirmationSender(val id: String, val eventLog: ActorRef, val endpointRouter: EndpointRouter, val vertx: Vertx, val storageProvider: StorageProvider) extends VertxEventDispatcher[Long, Long] with VertxSender with SequenceNumberProgressStore { override def dispatch(events: Seq[EventEnvelope])(implicit ec: ExecutionContext): Future[Unit] = Future(events.foreach(e => send(e.address, e.evt))) }
Example 106
Source File: VertxNoConfirmationPublisher.scala From eventuate with Apache License 2.0 | 5 votes |
package com.rbmhtechnology.eventuate.adapter.vertx import akka.actor.{ ActorRef, Props } import com.rbmhtechnology.eventuate.adapter.vertx.api.{ EndpointRouter, StorageProvider } import io.vertx.core.Vertx import scala.collection.immutable.Seq import scala.concurrent.{ ExecutionContext, Future } private[eventuate] object VertxNoConfirmationPublisher { def props(id: String, eventLog: ActorRef, endpointRouter: EndpointRouter, vertx: Vertx, storageProvider: StorageProvider): Props = Props(new VertxNoConfirmationPublisher(id, eventLog, endpointRouter, vertx, storageProvider)) .withDispatcher("eventuate.log.dispatchers.write-dispatcher") } private[eventuate] class VertxNoConfirmationPublisher(val id: String, val eventLog: ActorRef, val endpointRouter: EndpointRouter, val vertx: Vertx, val storageProvider: StorageProvider) extends VertxEventDispatcher[Long, Long] with VertxPublisher with SequenceNumberProgressStore { override def dispatch(events: Seq[EventEnvelope])(implicit ec: ExecutionContext): Future[Unit] = Future(events.foreach(e => publish(e.address, e.evt))) }
Example 107
Source File: VertxSingleConfirmationSender.scala From eventuate with Apache License 2.0 | 5 votes |
package com.rbmhtechnology.eventuate.adapter.vertx import akka.actor.{ ActorLogging, ActorRef, Props } import akka.pattern.pipe import com.rbmhtechnology.eventuate.adapter.vertx.api.EndpointRouter import com.rbmhtechnology.eventuate.{ ConfirmedDelivery, EventsourcedActor } import io.vertx.core.Vertx import scala.concurrent.duration.FiniteDuration import scala.util.{ Failure, Success } private[vertx] object VertxSingleConfirmationSender { case class DeliverEvent(evt: EventEnvelope, deliveryId: String) case class Confirm(deliveryId: String) case class DeliverFailed(evt: EventEnvelope, deliveryId: String, err: Throwable) case object Redeliver case class DeliveryConfirmed() def props(id: String, eventLog: ActorRef, endpointRouter: EndpointRouter, vertx: Vertx, confirmationTimeout: FiniteDuration): Props = Props(new VertxSingleConfirmationSender(id, eventLog, endpointRouter, vertx, confirmationTimeout)) } private[vertx] class VertxSingleConfirmationSender(val id: String, val eventLog: ActorRef, val endpointRouter: EndpointRouter, val vertx: Vertx, confirmationTimeout: FiniteDuration) extends EventsourcedActor with ConfirmedDelivery with VertxSender with ActorLogging { import VertxSingleConfirmationSender._ import context.dispatcher context.system.scheduler.schedule(confirmationTimeout, confirmationTimeout, self, Redeliver) override def onCommand: Receive = { case DeliverEvent(envelope, deliveryId) => send[Any](envelope.address, envelope.evt, confirmationTimeout) .map(_ => Confirm(deliveryId)) .recover { case err => DeliverFailed(envelope, deliveryId, err) } .pipeTo(self) case Confirm(deliveryId) if unconfirmed.contains(deliveryId) => persistConfirmation(DeliveryConfirmed(), deliveryId) { case Success(evt) => case Failure(err) => log.error(s"Confirmation for delivery with id '$deliveryId' could not be persisted.", err) } case Redeliver => redeliverUnconfirmed() case DeliverFailed(evt, deliveryId, err) => log.warning(s"Delivery with id '$deliveryId' for event [$evt] failed with $err. The delivery will be retried.") } override def onEvent: Receive = { case DeliveryConfirmed() => // confirmations should not be published case ev => endpointRouter.endpoint(ev) match { case Some(endpoint) => val deliveryId = lastSequenceNr.toString deliver(deliveryId, DeliverEvent(EventEnvelope(endpoint, lastHandledEvent), deliveryId), self.path) case None => } } }
Example 108
Source File: LeveldbDeletionActor.scala From eventuate with Apache License 2.0 | 5 votes |
package com.rbmhtechnology.eventuate.log.leveldb import java.io.Closeable import akka.actor.Actor import akka.actor.PoisonPill import akka.actor.Props import com.rbmhtechnology.eventuate.log.leveldb.LeveldbEventLog._ import org.iq80.leveldb.DB import org.iq80.leveldb.ReadOptions import org.iq80.leveldb.WriteOptions import scala.annotation.tailrec import scala.concurrent.Promise private object LeveldbDeletionActor { case object DeleteBatch def props(leveldb: DB, leveldbReadOptions: ReadOptions, leveldbWriteOptions: WriteOptions, batchSize: Int, toSequenceNr: Long, promise: Promise[Unit]): Props = Props(new LeveldbDeletionActor(leveldb, leveldbReadOptions, leveldbWriteOptions, batchSize, toSequenceNr, promise)) } private class LeveldbDeletionActor( val leveldb: DB, val leveldbReadOptions: ReadOptions, val leveldbWriteOptions: WriteOptions, batchSize: Int, toSequenceNr: Long, promise: Promise[Unit]) extends Actor with WithBatch { import LeveldbDeletionActor._ val eventKeyIterator: CloseableIterator[EventKey] = newEventKeyIterator override def preStart() = self ! DeleteBatch override def postStop() = eventKeyIterator.close() override def receive = { case DeleteBatch => withBatch { batch => eventKeyIterator.take(batchSize).foreach { eventKey => batch.delete(eventKeyBytes(eventKey.classifier, eventKey.sequenceNr)) } } if (eventKeyIterator.hasNext) { self ! DeleteBatch } else { promise.success(()) self ! PoisonPill } } private def newEventKeyIterator: CloseableIterator[EventKey] = { new Iterator[EventKey] with Closeable { val iterator = leveldb.iterator(leveldbReadOptions.snapshot(leveldb.getSnapshot)) iterator.seek(eventKeyBytes(EventKey.DefaultClassifier, 1L)) @tailrec override def hasNext: Boolean = { val key = eventKey(iterator.peekNext().getKey) key != eventKeyEnd && (key.sequenceNr <= toSequenceNr || { iterator.seek(eventKeyBytes(key.classifier + 1, 1L)) hasNext }) } override def next() = eventKey(iterator.next().getKey) override def close() = { iterator.close() leveldbReadOptions.snapshot().close() } } } }
Example 109
Source File: MultiNodeSupportLeveldb.scala From eventuate with Apache License 2.0 | 5 votes |
package com.rbmhtechnology.eventuate import java.io.File import akka.actor.Props import akka.remote.testconductor.RoleName import akka.remote.testkit.MultiNodeSpec import com.rbmhtechnology.eventuate.log.leveldb.LeveldbEventLog import org.apache.commons.io.FileUtils import org.scalatest.BeforeAndAfterAll trait MultiNodeSupportLeveldb extends BeforeAndAfterAll { this: MultiNodeSpec with MultiNodeWordSpec => val coordinator = RoleName("nodeA") def logProps(logId: String): Props = LeveldbEventLog.props(logId) override def afterAll(): Unit = { // get all config data before shutting down node val snapshotRootDir = new File(system.settings.config.getString("eventuate.snapshot.filesystem.dir")) val logRootDir = new File(system.settings.config.getString("eventuate.log.leveldb.dir")) // shut down node super.afterAll() // delete log and snapshot files if (isNode(coordinator)) { FileUtils.deleteDirectory(snapshotRootDir) FileUtils.deleteDirectory(logRootDir) } } }
Example 110
Source File: PersistOnEventWithRecoverySpecLeveldb.scala From eventuate with Apache License 2.0 | 5 votes |
package com.rbmhtechnology.eventuate import java.util.UUID import akka.actor.Actor import akka.actor.ActorRef import akka.actor.Props import akka.testkit.TestProbe import com.rbmhtechnology.eventuate.ReplicationIntegrationSpec.replicationConnection import com.rbmhtechnology.eventuate.utilities._ import org.apache.commons.io.FileUtils import org.scalatest.Matchers import org.scalatest.WordSpec import scala.concurrent.duration.DurationInt object PersistOnEventWithRecoverySpecLeveldb { class OnBEmitRandomActor(val eventLog: ActorRef, probe: TestProbe) extends EventsourcedActor with PersistOnEvent { override def id = getClass.getName override def onCommand = Actor.emptyBehavior override def onEvent = { case "A" => case "B" => persistOnEvent(UUID.randomUUID().toString) case uuid: String => probe.ref ! uuid } } def persistOnEventProbe(locationA1: Location, log: ActorRef) = { val probe = locationA1.probe locationA1.system.actorOf(Props(new OnBEmitRandomActor(log, probe))) probe } val noMsgTimeout = 100.millis } class PersistOnEventWithRecoverySpecLeveldb extends WordSpec with Matchers with MultiLocationSpecLeveldb { import RecoverySpecLeveldb._ import PersistOnEventWithRecoverySpecLeveldb._ override val logFactory: String => Props = id => SingleLocationSpecLeveldb.TestEventLog.props(id, batching = true) "An EventsourcedActor with PersistOnEvent" must { "not re-attempt persistence on successful write after reordering of events through disaster recovery" in { val locationB = location("B", customConfig = RecoverySpecLeveldb.config) def newLocationA = location("A", customConfig = RecoverySpecLeveldb.config) val locationA1 = newLocationA val endpointB = locationB.endpoint(Set("L1"), Set(replicationConnection(locationA1.port))) def newEndpointA(l: Location, activate: Boolean) = l.endpoint(Set("L1"), Set(replicationConnection(locationB.port)), activate = activate) val endpointA1 = newEndpointA(locationA1, activate = true) val targetA = endpointA1.target("L1") val logDirA = logDirectory(targetA) val targetB = endpointB.target("L1") val a1Probe = persistOnEventProbe(locationA1, targetA.log) write(targetA, List("A")) write(targetB, List("B")) val event = a1Probe.expectMsgClass(classOf[String]) assertConvergence(Set("A", "B", event), endpointA1, endpointB) locationA1.terminate().await FileUtils.deleteDirectory(logDirA) val locationA2 = newLocationA val endpointA2 = newEndpointA(locationA2, activate = false) endpointA2.recover().await val a2Probe = persistOnEventProbe(locationA2, endpointA2.logs("L1")) a2Probe.expectMsg(event) a2Probe.expectNoMsg(noMsgTimeout) assertConvergence(Set("A", "B", event), endpointA2, endpointB) } } }
Example 111
Source File: LocationSpecsLeveldb.scala From eventuate with Apache License 2.0 | 5 votes |
package com.rbmhtechnology.eventuate import akka.actor.{ Props, ActorSystem } import akka.testkit.TestKit // -------------------------------------------------------------------------- // Provider-specific single-location specs // -------------------------------------------------------------------------- class EventsourcedProcessorIntegrationSpecLeveldb extends TestKit(ActorSystem("test")) with EventsourcedProcessorIntegrationSpec with SingleLocationSpecLeveldb { override def beforeEach(): Unit = { super.beforeEach() init() } } class EventsourcedActorIntegrationSpecLeveldb extends TestKit(ActorSystem("test")) with EventsourcedActorIntegrationSpec with SingleLocationSpecLeveldb { override def batching = false } class PersistOnEventIntegrationSpecLeveldb extends TestKit(ActorSystem("test")) with PersistOnEventIntegrationSpec with SingleLocationSpecLeveldb class EventsourcedActorThroughputSpecLeveldb extends TestKit(ActorSystem("test")) with EventsourcedActorThroughputSpec with SingleLocationSpecLeveldb // -------------------------------------------------------------------------- // Provider-specific multi-location specs // -------------------------------------------------------------------------- class EventsourcedActorCausalitySpecLeveldb extends EventsourcedActorCausalitySpec with MultiLocationSpecLeveldb { override val logFactory: String => Props = id => SingleLocationSpecLeveldb.TestEventLog.props(id, batching = true) } class ReplicationIntegrationSpecLeveldb extends ReplicationIntegrationSpec with MultiLocationSpecLeveldb { def customPort = 2553 } class ReplicationCycleSpecLeveldb extends ReplicationCycleSpec with MultiLocationSpecLeveldb
Example 112
Source File: RoleLeaderAutoDowningRoles.scala From akka-cluster-custom-downing with Apache License 2.0 | 5 votes |
package tanukki.akka.cluster.autodown import akka.actor.{ActorSystem, Address, Props} import akka.cluster.{Cluster, DowningProvider} import com.typesafe.config.Config import scala.collection.JavaConverters._ import scala.concurrent.duration.{FiniteDuration, _} final class RoleLeaderAutoDowningRoles(system: ActorSystem) extends DowningProvider { private[this] val cluster = Cluster(system) private val config: Config = system.settings.config override def downRemovalMargin: FiniteDuration = { val key = "custom-downing.down-removal-margin" config.getString(key) match { case "off" => Duration.Zero case _ => Duration(config.getDuration(key, MILLISECONDS), MILLISECONDS) } } override def downingActorProps: Option[Props] = { val stableAfter = system.settings.config.getDuration("custom-downing.stable-after").toMillis millis val leaderRole = system.settings.config.getString("custom-downing.role-leader-auto-downing-roles.leader-role") val roles = system.settings.config.getStringList("custom-downing.role-leader-auto-downing-roles.target-roles").asScala.toSet if (roles.isEmpty) None else Some(RoleLeaderAutoDownRoles.props(leaderRole, roles, stableAfter)) } } private[autodown] object RoleLeaderAutoDownRoles { def props(leaderRole: String, targetRoles: Set[String], autoDownUnreachableAfter: FiniteDuration): Props = Props(classOf[RoleLeaderAutoDownRoles], leaderRole, targetRoles, autoDownUnreachableAfter) } private[autodown] class RoleLeaderAutoDownRoles(leaderRole: String, targetRoles: Set[String], autoDownUnreachableAfter: FiniteDuration) extends RoleLeaderAutoDownRolesBase(leaderRole, targetRoles, autoDownUnreachableAfter) with ClusterCustomDowning { override def down(node: Address): Unit = { log.info("RoleLeader is auto-downing unreachable node [{}]", node) cluster.down(node) } }
Example 113
Source File: OldestAutoDowning.scala From akka-cluster-custom-downing with Apache License 2.0 | 5 votes |
package tanukki.akka.cluster.autodown import akka.ConfigurationException import akka.actor.{ActorSystem, Address, Props} import akka.cluster.{Cluster, DowningProvider} import com.typesafe.config.Config import scala.concurrent.Await import scala.concurrent.duration._ class OldestAutoDowning(system: ActorSystem) extends DowningProvider { private[this] val cluster = Cluster(system) private val config: Config = system.settings.config override def downRemovalMargin: FiniteDuration = { val key = "custom-downing.down-removal-margin" config.getString(key) match { case "off" => Duration.Zero case _ => Duration(config.getDuration(key, MILLISECONDS), MILLISECONDS) } } override def downingActorProps: Option[Props] = { val stableAfter = system.settings.config.getDuration("custom-downing.stable-after").toMillis millis val oldestMemberRole = { val r = system.settings.config.getString("custom-downing.oldest-auto-downing.oldest-member-role") if (r.isEmpty) None else Some(r) } val downIfAlone = system.settings.config.getBoolean("custom-downing.oldest-auto-downing.down-if-alone") val shutdownActorSystem = system.settings.config.getBoolean("custom-downing.oldest-auto-downing.shutdown-actor-system-on-resolution") if (stableAfter == Duration.Zero && downIfAlone) throw new ConfigurationException("If you set down-if-alone=true, stable-after timeout must be greater than zero.") else { Some(OldestAutoDown.props(oldestMemberRole, downIfAlone, shutdownActorSystem, stableAfter)) } } } private[autodown] object OldestAutoDown { def props(oldestMemberRole: Option[String], downIfAlone: Boolean, shutdownActorSystem: Boolean, autoDownUnreachableAfter: FiniteDuration): Props = Props(classOf[OldestAutoDown], oldestMemberRole, downIfAlone, shutdownActorSystem, autoDownUnreachableAfter) } private[autodown] class OldestAutoDown(oldestMemberRole: Option[String], downIfAlone: Boolean, shutdownActorSystem: Boolean, autoDownUnreachableAfter: FiniteDuration) extends OldestAutoDownBase(oldestMemberRole, downIfAlone, autoDownUnreachableAfter) with ClusterCustomDowning { override def down(node: Address): Unit = { log.info("Oldest is auto-downing unreachable node [{}]", node) cluster.down(node) } override def shutdownSelf(): Unit = { if (shutdownActorSystem) { Await.result(context.system.terminate(), 10 seconds) } else { throw new SplitBrainResolvedError("OldestAutoDowning") } } }
Example 114
Source File: MajorityLeaderAutoDowning.scala From akka-cluster-custom-downing with Apache License 2.0 | 5 votes |
package tanukki.akka.cluster.autodown import akka.actor.{ActorSystem, Address, Props} import akka.cluster.{Cluster, DowningProvider} import com.typesafe.config.Config import scala.concurrent.Await import scala.concurrent.duration._ class MajorityLeaderAutoDowning(system: ActorSystem) extends DowningProvider { private[this] val cluster = Cluster(system) private val config: Config = system.settings.config override def downRemovalMargin: FiniteDuration = { val key = "custom-downing.down-removal-margin" config.getString(key) match { case "off" => Duration.Zero case _ => Duration(config.getDuration(key, MILLISECONDS), MILLISECONDS) } } override def downingActorProps: Option[Props] = { val stableAfter = config.getDuration("custom-downing.stable-after").toMillis millis val majorityMemberRole = { val r = config.getString("custom-downing.majority-leader-auto-downing.majority-member-role") if (r.isEmpty) None else Some(r) } val downIfInMinority = config.getBoolean("custom-downing.majority-leader-auto-downing.down-if-in-minority") val shutdownActorSystem = config.getBoolean("custom-downing.majority-leader-auto-downing.shutdown-actor-system-on-resolution") Some(MajorityLeaderAutoDown.props(majorityMemberRole, downIfInMinority, shutdownActorSystem, stableAfter)) } } private[autodown] object MajorityLeaderAutoDown { def props(majorityMemberRole: Option[String], downIfInMinority: Boolean, shutdownActorSystem: Boolean, autoDownUnreachableAfter: FiniteDuration): Props = Props(classOf[MajorityLeaderAutoDown], majorityMemberRole, downIfInMinority, shutdownActorSystem, autoDownUnreachableAfter) } private[autodown] class MajorityLeaderAutoDown(majorityMemberRole: Option[String], downIfInMinority: Boolean, shutdownActorSystem: Boolean, autoDownUnreachableAfter: FiniteDuration) extends MajorityLeaderAutoDownBase(majorityMemberRole, downIfInMinority, autoDownUnreachableAfter) with ClusterCustomDowning { override def down(node: Address): Unit = { log.info("Majority is auto-downing unreachable node [{}]", node) cluster.down(node) } override def shutdownSelf(): Unit = { if (shutdownActorSystem) { Await.result(context.system.terminate(), 10 seconds) } else { throw new SplitBrainResolvedError("MajorityAutoDowning") } } }
Example 115
Source File: QuorumLeaderAutoDowning.scala From akka-cluster-custom-downing with Apache License 2.0 | 5 votes |
package tanukki.akka.cluster.autodown import akka.actor.{ActorSystem, Address, Props} import akka.cluster.{Cluster, DowningProvider} import com.typesafe.config.Config import scala.concurrent.Await import scala.concurrent.duration._ class QuorumLeaderAutoDowning(system: ActorSystem) extends DowningProvider { private[this] val cluster = Cluster(system) private val config: Config = system.settings.config override def downRemovalMargin: FiniteDuration = { val key = "custom-downing.down-removal-margin" config.getString(key) match { case "off" => Duration.Zero case _ => Duration(config.getDuration(key, MILLISECONDS), MILLISECONDS) } } override def downingActorProps: Option[Props] = { val stableAfter = system.settings.config.getDuration("custom-downing.stable-after").toMillis millis val role = { val r = system.settings.config.getString("custom-downing.quorum-leader-auto-downing.role") if (r.isEmpty) None else Some(r) } val quorumSize = system.settings.config.getInt("custom-downing.quorum-leader-auto-downing.quorum-size") val downIfOutOfQuorum = system.settings.config.getBoolean("custom-downing.quorum-leader-auto-downing.down-if-out-of-quorum") val shutdownActorSystem = system.settings.config.getBoolean("custom-downing.quorum-leader-auto-downing.shutdown-actor-system-on-resolution") Some(QuorumLeaderAutoDown.props(role, quorumSize, downIfOutOfQuorum, shutdownActorSystem, stableAfter)) } } private[autodown] object QuorumLeaderAutoDown { def props(quorumRole: Option[String], quorumSize: Int, downIfOutOfQuorum: Boolean, shutdownActorSystem: Boolean, autoDownUnreachableAfter: FiniteDuration): Props = Props(classOf[QuorumLeaderAutoDown], quorumRole, quorumSize, downIfOutOfQuorum, shutdownActorSystem, autoDownUnreachableAfter) } private[autodown] class QuorumLeaderAutoDown(quorumRole: Option[String], quorumSize: Int, downIfOutOfQuorum: Boolean, shutdownActorSystem: Boolean, autoDownUnreachableAfter: FiniteDuration) extends QuorumLeaderAutoDownBase(quorumRole, quorumSize, downIfOutOfQuorum, autoDownUnreachableAfter) with ClusterCustomDowning { override def down(node: Address): Unit = { log.info("Quorum leader is auto-downing unreachable node [{}]", node) cluster.down(node) } override def shutdownSelf(): Unit = { if (shutdownActorSystem) { Await.result(context.system.terminate(), 10 seconds) } else { throw new SplitBrainResolvedError("QuorumLeaderAutoDowning") } } }
Example 116
Source File: LeaderAutoDowningRoles.scala From akka-cluster-custom-downing with Apache License 2.0 | 5 votes |
package tanukki.akka.cluster.autodown import akka.actor.{ActorSystem, Address, Props} import akka.cluster.{Cluster, DowningProvider} import com.typesafe.config.Config import scala.collection.JavaConverters._ import scala.concurrent.duration.{FiniteDuration, _} final class LeaderAutoDowningRoles(system: ActorSystem) extends DowningProvider { private[this] val cluster = Cluster(system) private val config: Config = system.settings.config override def downRemovalMargin: FiniteDuration = { val key = "custom-downing.down-removal-margin" config.getString(key) match { case "off" => Duration.Zero case _ => Duration(config.getDuration(key, MILLISECONDS), MILLISECONDS) } } override def downingActorProps: Option[Props] = { val stableAfter = system.settings.config.getDuration("custom-downing.stable-after").toMillis millis val roles = system.settings.config.getStringList("custom-downing.leader-auto-downing-roles.target-roles").asScala.toSet if (roles.isEmpty) None else Some(LeaderAutoDownRoles.props(roles, stableAfter)) } } private[autodown] object LeaderAutoDownRoles { def props(targetRoles: Set[String], autoDownUnreachableAfter: FiniteDuration): Props = Props(classOf[LeaderAutoDownRoles], targetRoles, autoDownUnreachableAfter) } private[autodown] class LeaderAutoDownRoles(targetRoles: Set[String], autoDownUnreachableAfter: FiniteDuration) extends LeaderAutoDownRolesBase(targetRoles, autoDownUnreachableAfter) with ClusterCustomDowning { override def down(node: Address): Unit = { log.info("Leader is auto-downing unreachable node [{}]", node) cluster.down(node) } }
Example 117
Source File: Server.scala From rokku with Apache License 2.0 | 5 votes |
package com.ing.wbaa.rokku.proxy import akka.actor.{ ActorSystem, Props } import com.ing.wbaa.rokku.proxy.config._ import com.ing.wbaa.rokku.proxy.handler.parsers.RequestParser import com.ing.wbaa.rokku.proxy.handler.{ FilterRecursiveListBucketHandler, RequestHandlerS3Cache } import com.ing.wbaa.rokku.proxy.persistence.HttpRequestRecorder import com.ing.wbaa.rokku.proxy.provider._ import com.ing.wbaa.rokku.proxy.queue.MemoryUserRequestQueue import com.typesafe.config.ConfigFactory object Server extends App { new RokkuS3Proxy with AuthorizationProviderRanger with RequestHandlerS3Cache with AuthenticationProviderSTS with LineageProviderAtlas with SignatureProviderAws with KerberosLoginProvider with FilterRecursiveListBucketHandler with MessageProviderKafka with AuditLogProvider with MemoryUserRequestQueue with RequestParser { override implicit lazy val system: ActorSystem = ActorSystem.create("rokku") override def kerberosSettings: KerberosSettings = KerberosSettings(system) override val httpSettings: HttpSettings = HttpSettings(system) override val rangerSettings: RangerSettings = RangerSettings(system) override val storageS3Settings: StorageS3Settings = StorageS3Settings(system) override val stsSettings: StsSettings = StsSettings(system) override val kafkaSettings: KafkaSettings = KafkaSettings(system) val requestPersistenceEnabled = ConfigFactory.load().getBoolean("rokku.requestPersistence.enabled") val configuredPersistenceId = ConfigFactory.load().getString("rokku.requestPersistence.persistenceId") if (requestPersistenceEnabled) { system.actorOf(Props(classOf[HttpRequestRecorder]), configuredPersistenceId) } // Force Ranger plugin to initialise on startup rangerPluginForceInit }.startup }
Example 118
Source File: HttpRequestRecorderItTest.scala From rokku with Apache License 2.0 | 5 votes |
package com.ing.wbaa.rokku.proxy.persistence import akka.Done import akka.actor.{ActorSystem, Props} import akka.http.scaladsl.model.HttpRequest import akka.http.scaladsl.model.Uri.{Authority, Host} import akka.persistence.cassandra.query.scaladsl.CassandraReadJournal import akka.persistence.query.PersistenceQuery import akka.stream.ActorMaterializer import akka.stream.scaladsl.Sink import com.amazonaws.services.s3.AmazonS3 import com.ing.wbaa.rokku.proxy.RokkuS3Proxy import com.ing.wbaa.rokku.proxy.config.{HttpSettings, KafkaSettings, StorageS3Settings} import com.ing.wbaa.rokku.proxy.data._ import com.ing.wbaa.rokku.proxy.handler.parsers.RequestParser import com.ing.wbaa.rokku.proxy.handler.{FilterRecursiveListBucketHandler, RequestHandlerS3Cache} import com.ing.wbaa.rokku.proxy.provider.{AuditLogProvider, MessageProviderKafka, SignatureProviderAws} import com.ing.wbaa.rokku.proxy.queue.MemoryUserRequestQueue import com.ing.wbaa.testkit.RokkuFixtures import org.scalatest.Assertion import org.scalatest.diagrams.Diagrams import org.scalatest.wordspec.AsyncWordSpec import scala.concurrent.duration._ import scala.concurrent.{Await, Future} class HttpRequestRecorderItTest extends AsyncWordSpec with Diagrams with RokkuFixtures { implicit val testSystem: ActorSystem = ActorSystem.create("test-system") implicit val mat: ActorMaterializer = ActorMaterializer() val rokkuHttpSettings: HttpSettings = new HttpSettings(testSystem.settings.config) { override val httpPort: Int = 0 override val httpBind: String = "127.0.0.1" } def withS3SdkToMockProxy(testCode: AmazonS3 => Assertion): Future[Assertion] = { val proxy: RokkuS3Proxy = new RokkuS3Proxy with RequestHandlerS3Cache with SignatureProviderAws with FilterRecursiveListBucketHandler with MessageProviderKafka with AuditLogProvider with MemoryUserRequestQueue with RequestParser { override implicit lazy val system: ActorSystem = testSystem override val httpSettings: HttpSettings = rokkuHttpSettings override def isUserAuthorizedForRequest(request: S3Request, user: User)(implicit id: RequestId): Boolean = true override def isUserAuthenticated(httpRequest: HttpRequest, awsSecretKey: AwsSecretKey)(implicit id: RequestId): Boolean = true override val storageS3Settings: StorageS3Settings = StorageS3Settings(testSystem) override val kafkaSettings: KafkaSettings = KafkaSettings(testSystem) override def areCredentialsActive(awsRequestCredential: AwsRequestCredential)(implicit id: RequestId): Future[Option[User]] = Future(Some(User(UserRawJson("userId", Some(Set("group")), "accesskey", "secretkey", None)))) def createLineageFromRequest(httpRequest: HttpRequest, userSTS: User, userIPs: UserIps)(implicit id: RequestId): Future[Done] = Future.successful(Done) override protected def auditEnabled: Boolean = false override val requestPersistenceEnabled: Boolean = true override val configuredPersistenceId: String = "localhost-1" } proxy.startup.map { binding => try testCode(getAmazonS3( authority = Authority(Host(binding.localAddress.getAddress), binding.localAddress.getPort) )) finally proxy.shutdown() } } private val CHECKER_PERSISTENCE_ID = "localhost-1" val requestRecorder = testSystem.actorOf(Props(classOf[HttpRequestRecorder]), CHECKER_PERSISTENCE_ID) val queries = PersistenceQuery(testSystem) .readJournalFor[CassandraReadJournal](CassandraReadJournal.Identifier) "S3 Proxy" should { s"with Request Recorder" that { "persists requests in Cassandra" in withS3SdkToMockProxy { sdk => withBucket(sdk) { bucketName => Thread.sleep(6000) val storedInCassandraF = queries.currentEventsByPersistenceId(CHECKER_PERSISTENCE_ID, 1L, Long.MaxValue) .map(_.event) .runWith(Sink.seq) .mapTo[Seq[ExecutedRequestEvt]] val r = Await.result(storedInCassandraF, 5.seconds).filter(_.httpRequest.getUri().toString.contains(bucketName)) assert(r.size == 1) assert(r.head.userSTS.userName.value == "userId") } } } } }
Example 119
Source File: HttpRequestRecorderSpec.scala From rokku with Apache License 2.0 | 5 votes |
package com.ing.wbaa.rokku.proxy.persistence import java.net.InetAddress import akka.actor.{ ActorSystem, PoisonPill, Props } import akka.http.scaladsl.model.HttpHeader.ParsingResult import akka.http.scaladsl.model._ import akka.testkit.{ ImplicitSender, TestKit } import com.ing.wbaa.rokku.proxy.data._ import com.ing.wbaa.rokku.proxy.persistence.HttpRequestRecorder.{ ExecutedRequestCmd, LatestRequests, LatestRequestsResult } import org.scalatest.BeforeAndAfterAll import org.scalatest.diagrams.Diagrams import org.scalatest.wordspec.AnyWordSpecLike import scala.collection.immutable class HttpRequestRecorderSpec extends TestKit(ActorSystem("RequestRecorderTest")) with ImplicitSender with AnyWordSpecLike with Diagrams with BeforeAndAfterAll { override def afterAll: Unit = { TestKit.shutdownActorSystem(system) } private def convertStringsToAkkaHeaders(headers: List[String]): immutable.Seq[HttpHeader] = headers.map { p => val kv = p.split("=") HttpHeader.parse(kv(0), kv(1)) match { case ParsingResult.Ok(header, _) => header case ParsingResult.Error(error) => throw new Exception(s"Unable to convert to HttpHeader: ${error.summary}") } } val requestRecorder = system.actorOf(Props(classOf[HttpRequestRecorder]), "localhost-1") val headers = List("Remote-Address=0:0:0:0:0:0:0:1:58170", "Host=localhost:8987", "X-Amz-Content-SHA256=02502914aca52472205417e4c418ee499ba39ca1b283d99da26e295df2eccf32", "User-Agent=aws-cli/1.16.30 Python/2.7.5 Linux/3.10.0-862.14.4.el7.x86_64 botocore/1.12.20", "Content-MD5=Wf7l+rCPsVw8eqc34kVJ1g==", "Authorization=AWS4-HMAC-SHA256 Credential=6r24619bHVWvrxR5AMHNkGZ6vNRXoGCP/20190704/us-east-1/s3/aws4_request", "SignedHeaders=content-md5;host;x-amz-content-sha256;x-amz-date;x-amz-security-token", "Signature=271dda503da6fcf04cc058cb514b28a6d522a9b712ab553bfb88fb7814ab082f") val httpRequest = HttpRequest( HttpMethods.PUT, Uri("http://127.0.0.1:8010/home/testuser/file34"), convertStringsToAkkaHeaders(headers), HttpEntity.Empty.withContentType(ContentTypes.`application/octet-stream`).toString(), HttpProtocols.`HTTP/1.1` ) val userSTS = User(UserName("okUser"), Set(UserGroup("okGroup")), AwsAccessKey("accesskey"), AwsSecretKey("secretkey"), UserAssumeRole("")) val clientIPAddress = RemoteAddress(InetAddress.getByName("localhost"), Some(1234)) "RequestRecorder" should { "persist Http request event" in { requestRecorder ! ExecutedRequestCmd(httpRequest, userSTS, clientIPAddress) requestRecorder ! LatestRequests(1) expectMsg(LatestRequestsResult(List(ExecutedRequestEvt(httpRequest, userSTS, clientIPAddress)))) requestRecorder ! PoisonPill val requestRecorder1 = system.actorOf(Props(classOf[HttpRequestRecorder]), "localhost-2") requestRecorder1 ! LatestRequests(1) expectMsg(LatestRequestsResult(List(ExecutedRequestEvt(httpRequest, userSTS, clientIPAddress)))) } } }
Example 120
Source File: SimpleClusterListener.scala From constructr-consul with Apache License 2.0 | 5 votes |
package com.tecsisa.constructr.coordination package demo import akka.actor.{ Actor, ActorLogging, Address, Props } import akka.cluster.ClusterEvent.{ MemberEvent, MemberJoined, MemberRemoved, MemberUp, UnreachableMember } import akka.cluster.Cluster object SimpleClusterListener { case object GetMemberNodes final val Name = "clusterListener" def props: Props = Props(new SimpleClusterListener) } class SimpleClusterListener extends Actor with ActorLogging { import SimpleClusterListener._ val cluster = Cluster(context.system) private var members = Set.empty[Address] override def preStart(): Unit = cluster.subscribe(self, classOf[MemberEvent], classOf[UnreachableMember]) override def postStop(): Unit = cluster.unsubscribe(self) override def receive = { case GetMemberNodes => sender() ! members case MemberJoined(member) => log.info("Member joined: {}", member.address) members += member.address case MemberUp(member) => log.info("Member up: {}", member.address) members += member.address case MemberRemoved(member, _) => log.info("Member removed: {}", member.address) members -= member.address } }
Example 121
Source File: GrcpActionActor.scala From grpc-gatling with MIT License | 5 votes |
package ch.tamedia.gatling.actions import akka.actor.Props import ch.tamedia.gatling.GrpcProtocol import ch.tamedia.gatling.grpc.GrpcCheck import com.trueaccord.scalapb.GeneratedMessage import io.gatling.commons.stats.{KO, OK} import io.gatling.commons.util.TimeHelper import io.gatling.commons.validation.Failure import io.gatling.core.action.{Action, ActionActor} import io.gatling.core.check.Check import io.gatling.core.session.Session import io.gatling.core.stats.StatsEngine import io.gatling.core.stats.message.ResponseTimings import scala.concurrent.Future def logResult(maybeResult: Option[GeneratedMessage], error: Option[Throwable] = None) = { val endTime = TimeHelper.nowMillis val timings = ResponseTimings(startTime, endTime) if (error.isEmpty) { val result = maybeResult.get if (Option(result).nonEmpty) { val (newSession, error) = Check.check(result, session, checks) error match { case None => { statsEngine.logResponse(session, action.name, timings, OK, None, None) next ! newSession(session) } case Some(Failure(errorMessage)) => { statsEngine.logResponse(session, action.name, timings, KO, None, Some(errorMessage)) next ! newSession(session).markAsFailed } } } else { statsEngine.logResponse(session, action.name, timings, KO, None, Some(s"Error during the call!")) next ! session.markAsFailed } } else { val throwable = error.get statsEngine.logResponse(session, action.name, timings, KO, None, Some(throwable.getMessage)) next ! session.markAsFailed } } } }
Example 122
Source File: UIDFetchHandler.scala From gatling-imap with GNU Affero General Public License v3.0 | 5 votes |
package com.linagora.gatling.imap.protocol.command import akka.actor.{ActorRef, Props} import com.linagora.gatling.imap.protocol._ import com.yahoo.imapnio.async.client.ImapAsyncSession import com.yahoo.imapnio.async.request.UidFetchCommand import io.gatling.core.akka.BaseActor object UIDFetchHandler { def props(session: ImapAsyncSession) = Props(new UIDFetchHandler(session)) } class UIDFetchHandler(session: ImapAsyncSession) extends BaseActor { override def receive: Receive = { case Command.UIDFetch(userId, sequence, attributes) => context.become(waitCallback(sender())) ImapSessionExecutor.listen(self, userId, Response.Fetched)(logger)(session.execute(new UidFetchCommand(sequence.asImap, attributes.asString))) } def waitCallback(sender: ActorRef): Receive = { case [email protected](_) => sender ! msg context.stop(self) } }
Example 123
Source File: ExpungeHandler.scala From gatling-imap with GNU Affero General Public License v3.0 | 5 votes |
package com.linagora.gatling.imap.protocol.command import akka.actor.{ActorRef, Props} import com.linagora.gatling.imap.protocol.{Response, _} import com.yahoo.imapnio.async.client.ImapAsyncSession import com.yahoo.imapnio.async.request.ExpungeCommand import io.gatling.core.akka.BaseActor object ExpungeHandler { def props(session: ImapAsyncSession) = Props(new ExpungeHandler(session)) } class ExpungeHandler(session: ImapAsyncSession) extends BaseActor { override def receive: Receive = { case Command.Expunge(userId) => context.become(waitCallback(sender())) ImapSessionExecutor.listen(self, userId, Response.Expunged)(logger)(session.execute(new ExpungeCommand())) } def waitCallback(sender: ActorRef): Receive = { case [email protected](_) => sender ! msg context.stop(self) } }
Example 124
Source File: FetchHandler.scala From gatling-imap with GNU Affero General Public License v3.0 | 5 votes |
package com.linagora.gatling.imap.protocol.command import akka.actor.{ActorRef, Props} import com.linagora.gatling.imap.protocol._ import com.yahoo.imapnio.async.client.ImapAsyncSession import com.yahoo.imapnio.async.request.FetchCommand import io.gatling.core.akka.BaseActor abstract class FetchAttributes { def asString: String } object FetchAttributes { case class ALL() extends FetchAttributes { override def asString = "ALL" } case class FULL() extends FetchAttributes { override def asString = "FULL" } case class FAST() extends FetchAttributes { override def asString = "FAST" } case class AttributeList(fetchAttributes: String*) extends FetchAttributes { override def asString = fetchAttributes.mkString(" ") } } object FetchHandler { def props(session: ImapAsyncSession) = Props(new FetchHandler(session)) } class FetchHandler(session: ImapAsyncSession) extends BaseActor { override def receive: Receive = { case Command.Fetch(userId, sequence, attributes) => context.become(waitCallback(sender())) ImapSessionExecutor.listen(self, userId, Response.Fetched)(logger)(session.execute(new FetchCommand(sequence.asImap, attributes.asString))) } def waitCallback(sender: ActorRef): Receive = { case [email protected](_) => sender ! msg context.stop(self) } }
Example 125
Source File: ListHandler.scala From gatling-imap with GNU Affero General Public License v3.0 | 5 votes |
package com.linagora.gatling.imap.protocol.command import akka.actor.{ActorRef, Props} import com.linagora.gatling.imap.protocol._ import com.yahoo.imapnio.async.client.ImapAsyncSession import com.yahoo.imapnio.async.request.ListCommand import com.yahoo.imapnio.async.response.ImapAsyncResponse import io.gatling.core.akka.BaseActor import scala.concurrent.Future import scala.util.{Failure, Success, Try} object ListHandler { def props(session: ImapAsyncSession) = Props(new ListHandler(session)) } class ListHandler(session: ImapAsyncSession) extends BaseActor { override def receive: Receive = { case Command.List(userId, reference, name) => context.become(waitCallback(sender())) ImapSessionExecutor.listenWithHandler(self, userId, Response.Listed, callback)(logger)(session.execute(new ListCommand(reference, name))) } private def callback(response: Future[ImapAsyncResponse]) = { Try(response) match { case Success(_) => case Failure(e) => logger.error("ERROR when executing LIST COMMAND", e) throw e; } } def waitCallback(sender: ActorRef): Receive = { case [email protected](_) => sender ! msg context.stop(self) } }
Example 126
Source File: AppendHandler.scala From gatling-imap with GNU Affero General Public License v3.0 | 5 votes |
package com.linagora.gatling.imap.protocol.command import java.nio.charset.StandardCharsets import javax.mail.Flags import akka.actor.{ActorRef, Props} import com.linagora.gatling.imap.protocol._ import com.yahoo.imapnio.async.client.ImapAsyncSession import com.yahoo.imapnio.async.request.AppendCommand import com.yahoo.imapnio.async.response.ImapAsyncResponse import io.gatling.core.akka.BaseActor import scala.concurrent.Future import scala.util.{Failure, Success, Try} object AppendHandler { def props(session: ImapAsyncSession) = Props(new AppendHandler(session)) } class AppendHandler(session: ImapAsyncSession) extends BaseActor { override def receive: Receive = { case Command.Append(userId, mailbox, flags, date, content) => if (date.isDefined) throw new NotImplementedError("Date parameter for APPEND is still not implemented") logger.debug(s"APPEND receive from sender ${sender.path} on ${self.path}") context.become(waitCallback(sender())) val nullDate = null val crLfContent = content.replaceAll("(?<!\r)\n", "\r\n").getBytes(StandardCharsets.UTF_8) ImapSessionExecutor .listenWithHandler(self, userId, Response.Appended, callback)(logger)(session.execute(new AppendCommand(mailbox, flags.map(toImapFlags).orNull, nullDate, crLfContent))) } private def callback(response: Future[ImapAsyncResponse]) = { Try(response) match { case Success(futureResult) => futureResult.onComplete(future => { logger.debug(s"AppendHandler command completed, success : ${future.isSuccess}") if (!future.isSuccess) { logger.error("AppendHandler command failed", future.toEither.left) } }) case Failure(e) => logger.error("ERROR when executing APPEND COMMAND", e) throw e } } private def toImapFlags(flags: Seq[String]): Flags = { val imapFlags = new Flags() flags.foreach(imapFlags.add) imapFlags } def waitCallback(sender: ActorRef): Receive = { case [email protected](_) => logger.debug(s"APPEND reply to sender ${sender.path}") sender ! msg context.stop(self) } }
Example 127
Source File: LoginHandler.scala From gatling-imap with GNU Affero General Public License v3.0 | 5 votes |
package com.linagora.gatling.imap.protocol.command import akka.actor.{ActorRef, Props} import com.linagora.gatling.imap.protocol._ import com.yahoo.imapnio.async.client.ImapAsyncSession import com.yahoo.imapnio.async.request.LoginCommand import com.yahoo.imapnio.async.response.ImapAsyncResponse import io.gatling.core.akka.BaseActor import scala.concurrent.Future import scala.util.{Failure, Success, Try} object LoginHandler { def props(session: ImapAsyncSession) = Props(new LoginHandler(session)) } class LoginHandler(session: ImapAsyncSession) extends BaseActor { override def receive: Receive = { case Command.Login(userId, user, password) => logger.trace(s"LoginHandler for user : ${userId.value}, on actor ${self.path} responding to ${sender.path}") context.become(waitForLoggedIn(sender())) ImapSessionExecutor.listenWithHandler(self, userId, Response.LoggedIn, callback)(logger)(session.execute(new LoginCommand(user, password))) } private def callback(response: Future[ImapAsyncResponse]) = { Try(response) match { case Success(futureResult) => futureResult.onComplete(future => { logger.debug(s"LoginHandler command completed, success : ${future.isSuccess}") if (!future.isSuccess) { logger.error("LoginHandler command failed", future.toEither.left) } }) case Failure(e) => logger.error("ERROR when executing LOGIN COMMAND", e) throw e } } def waitForLoggedIn(sender: ActorRef): Receive = { case [email protected](_) => logger.trace(s"LoginHandler respond to ${sender.path} with $msg") sender ! msg context.stop(self) } }
Example 128
Source File: SelectHandler.scala From gatling-imap with GNU Affero General Public License v3.0 | 5 votes |
package com.linagora.gatling.imap.protocol.command import akka.actor.{ActorRef, Props} import com.linagora.gatling.imap.protocol._ import com.yahoo.imapnio.async.client.ImapAsyncSession import com.yahoo.imapnio.async.request.SelectFolderCommand import io.gatling.core.akka.BaseActor object SelectHandler { def props(session: ImapAsyncSession) = Props(new SelectHandler(session)) } class SelectHandler(session: ImapAsyncSession) extends BaseActor { override def receive: Receive = { case Command.Select(userId, mailbox) => context.become(waitCallback(sender())) ImapSessionExecutor.listen(self, userId, Response.Selected)(logger)(session.execute(new SelectFolderCommand(mailbox))) } def waitCallback(sender: ActorRef): Receive = { case [email protected](_) => sender ! msg context.stop(self) } }
Example 129
Source File: StoreAction.scala From gatling-imap with GNU Affero General Public License v3.0 | 5 votes |
package com.linagora.gatling.imap.action import akka.actor.Props import com.linagora.gatling.imap.check.ImapCheck import com.linagora.gatling.imap.protocol.{Command, UserId} import com.linagora.gatling.imap.protocol.command.{MessageRanges, StoreFlags} import io.gatling.commons.validation.Validation import io.gatling.core.action.ValidatedActionActor import io.gatling.core.session._ import scala.collection.immutable.Seq object StoreAction { def props(imapContext: ImapActionContext, requestname: String, checks: Seq[ImapCheck], sequence: Expression[MessageRanges], flags: Expression[StoreFlags]) = Props(new StoreAction(imapContext, requestname, checks, sequence, flags)) } class StoreAction(val imapContext: ImapActionContext, val requestName: String, override val checks: Seq[ImapCheck], sequence: Expression[MessageRanges], flags: Expression[StoreFlags]) extends ValidatedActionActor with ImapActionActor { override protected def executeOrFail(session: Session): Validation[_] = { for { sequence <- sequence(session) flags <- flags(session) } yield { val id: Long = session.userId val handler = handleResponse(session, imapContext.clock.nowMillis) sessions.tell(Command.Store(UserId(id), sequence, flags), handler) } } }
Example 130
Source File: SignatureCheckerActorSpecForIntegration.scala From incubator-toree with Apache License 2.0 | 5 votes |
package integration.security import akka.actor.{ActorRef, ActorSystem, Props} import akka.testkit.{ImplicitSender, TestKit} import org.apache.toree.kernel.protocol.v5._ import org.apache.toree.communication.security.{Hmac, SignatureCheckerActor} import com.typesafe.config.ConfigFactory import org.scalatest.{BeforeAndAfter, FunSpecLike, Matchers} import play.api.libs.json.Json object SignatureCheckerActorSpecForIntegration { val config = """ akka { loglevel = "WARNING" }""" } class SignatureCheckerActorSpecForIntegration extends TestKit( ActorSystem( "SignatureCheckerActorSpec", ConfigFactory.parseString(SignatureCheckerActorSpecForIntegration.config) ) ) with ImplicitSender with FunSpecLike with Matchers with BeforeAndAfter { private val sigKey = "12345" private val signature = "1c4859a7606fd93eb5f73c3d9642f9bc860453ba42063961a00d02ed820147b5" private val goodMessage = KernelMessage( null, signature, Header("a", "b", "c", "d", "e"), ParentHeader("f", "g", "h", "i", "j"), Metadata(), "<STRING>" ) private val badMessage = KernelMessage( null, "wrong signature", Header("a", "b", "c", "d", "e"), ParentHeader("f", "g", "h", "i", "j"), Metadata(), "<STRING>" ) private var signatureChecker: ActorRef = _ before { val hmac = Hmac(sigKey) signatureChecker = system.actorOf(Props(classOf[SignatureCheckerActor], hmac)) } after { signatureChecker = null } describe("SignatureCheckerActor") { describe("#receive") { it("should return true if the kernel message is valid") { val blob = Json.stringify(Json.toJson(goodMessage.header)) :: Json.stringify(Json.toJson(goodMessage.parentHeader)) :: Json.stringify(Json.toJson(goodMessage.metadata)) :: goodMessage.contentString :: Nil signatureChecker ! ((goodMessage.signature, blob)) expectMsg(true) } it("should return false if the kernel message is invalid") { val blob = Json.stringify(Json.toJson(badMessage.header)) :: Json.stringify(Json.toJson(badMessage.parentHeader)) :: Json.stringify(Json.toJson(badMessage.metadata)) :: badMessage.contentString :: Nil signatureChecker ! ((badMessage.signature, blob)) expectMsg(false) } } } }
Example 131
Source File: HandlerInitialization.scala From incubator-toree with Apache License 2.0 | 5 votes |
package org.apache.toree.kernel.protocol.v5.client.boot.layers import akka.actor.{ActorSystem, Props} import org.apache.toree.kernel.protocol.v5.client.ActorLoader import org.apache.toree.kernel.protocol.v5.MessageType import org.apache.toree.kernel.protocol.v5.MessageType.MessageType import org.apache.toree.kernel.protocol.v5.client.handler.ExecuteHandler import org.apache.toree.utils.LogLike override def initializeHandlers( actorSystem: ActorSystem, actorLoader: ActorLoader ): Unit = initializeMessageHandlers(actorSystem, actorLoader) private def initializeRequestHandler[T]( actorSystem: ActorSystem, actorLoader: ActorLoader, clazz: Class[T], messageType: MessageType ) = { logger.info("Creating %s handler".format(messageType.toString)) actorSystem.actorOf(Props(clazz, actorLoader), name = messageType.toString) } private def initializeMessageHandlers( actorSystem: ActorSystem, actorLoader: ActorLoader ): Unit = { initializeRequestHandler( actorSystem, actorLoader, classOf[ExecuteHandler], MessageType.Incoming.ExecuteRequest ) } }
Example 132
Source File: ShellClientSpec.scala From incubator-toree with Apache License 2.0 | 5 votes |
package org.apache.toree.kernel.protocol.v5.client.socket import java.util.UUID import akka.actor.{ActorRef, ActorSystem, Props} import akka.testkit.{TestProbe, ImplicitSender, TestKit} import org.apache.toree.communication.ZMQMessage import org.apache.toree.communication.security.SecurityActorType import org.apache.toree.kernel.protocol.v5._ import org.apache.toree.kernel.protocol.v5.client.ActorLoader import org.apache.toree.kernel.protocol.v5.content.ExecuteRequest import org.scalatest.mock.MockitoSugar import org.scalatest.{Matchers, FunSpecLike} import org.mockito.Mockito._ import org.mockito.Matchers._ import play.api.libs.json.Json class ShellClientSpec extends TestKit(ActorSystem("ShellActorSpec")) with ImplicitSender with FunSpecLike with Matchers with MockitoSugar { private val SignatureEnabled = true describe("ShellClientActor") { val socketFactory = mock[SocketFactory] val mockActorLoader = mock[ActorLoader] val probe : TestProbe = TestProbe() when(socketFactory.ShellClient( any(classOf[ActorSystem]), any(classOf[ActorRef]) )).thenReturn(probe.ref) val signatureManagerProbe = TestProbe() doReturn(system.actorSelection(signatureManagerProbe.ref.path.toString)) .when(mockActorLoader).load(SecurityActorType.SignatureManager) val shellClient = system.actorOf(Props( classOf[ShellClient], socketFactory, mockActorLoader, SignatureEnabled )) describe("send execute request") { it("should send execute request") { val request = ExecuteRequest( "foo", false, true, UserExpressions(), true ) val header = Header( UUID.randomUUID().toString, "spark", UUID.randomUUID().toString, MessageType.Incoming.ExecuteRequest.toString, "5.0" ) val kernelMessage = KernelMessage( Seq[Array[Byte]](), "", header, HeaderBuilder.empty, Metadata(), Json.toJson(request).toString ) shellClient ! kernelMessage // Echo back the kernel message sent to have a signature injected signatureManagerProbe.expectMsgClass(classOf[KernelMessage]) signatureManagerProbe.reply(kernelMessage) probe.expectMsgClass(classOf[ZMQMessage]) } } } }
Example 133
Source File: HeartbeatClientSpec.scala From incubator-toree with Apache License 2.0 | 5 votes |
package org.apache.toree.kernel.protocol.v5.client.socket import akka.actor.{ActorRef, ActorSystem, Props} import akka.testkit.{TestProbe, ImplicitSender, TestKit} import org.apache.toree.communication.ZMQMessage import org.apache.toree.kernel.protocol.v5.client.ActorLoader import org.scalatest.mock.MockitoSugar import org.scalatest.{Matchers, FunSpecLike} import org.mockito.Matchers._ import org.mockito.Mockito._ class HeartbeatClientSpec extends TestKit(ActorSystem("HeartbeatActorSpec")) with ImplicitSender with FunSpecLike with Matchers with MockitoSugar { describe("HeartbeatClientActor") { val socketFactory = mock[SocketFactory] val mockActorLoader = mock[ActorLoader] val probe : TestProbe = TestProbe() when(socketFactory.HeartbeatClient(any(classOf[ActorSystem]), any(classOf[ActorRef]))).thenReturn(probe.ref) val heartbeatClient = system.actorOf(Props( classOf[HeartbeatClient], socketFactory, mockActorLoader, true )) describe("send heartbeat") { it("should send ping ZMQMessage") { heartbeatClient ! HeartbeatMessage probe.expectMsgClass(classOf[ZMQMessage]) } } } }
Example 134
Source File: SocketFactory.scala From incubator-toree with Apache License 2.0 | 5 votes |
package org.apache.toree.kernel.protocol.v5.kernel.socket import akka.actor.{Props, ActorRef, ActorSystem} import org.apache.toree.communication.actors.{RouterSocketActor, RepSocketActor, PubSocketActor} object SocketFactory { def apply(socketConfig: SocketConfig) = { new SocketFactory(socketConfig) } } def IOPub(system: ActorSystem) : ActorRef = system.actorOf(Props(classOf[PubSocketActor], IOPubConnection.toString)) // ZeroMQExtension(system).newPubSocket( // Bind(IOPubConnection.toString) // ) }
Example 135
Source File: CodeCompleteTaskActor.scala From incubator-toree with Apache License 2.0 | 5 votes |
package org.apache.toree.kernel.protocol.v5.interpreter.tasks import akka.actor.{Actor, Props} import org.apache.toree.interpreter.Interpreter import org.apache.toree.kernel.protocol.v5.content.CompleteRequest import org.apache.toree.utils.LogLike object CodeCompleteTaskActor { def props(interpreter: Interpreter): Props = Props(classOf[CodeCompleteTaskActor], interpreter) } class CodeCompleteTaskActor(interpreter: Interpreter) extends Actor with LogLike { require(interpreter != null) override def receive: Receive = { case completeRequest: CompleteRequest => logger.debug("Invoking the interpreter completion") sender ! interpreter.completion(completeRequest.code, completeRequest.cursor_pos) case _ => sender ! "Unknown message" // TODO: Provide a failure message type to be passed around? } }
Example 136
Source File: IsCompleteTaskActor.scala From incubator-toree with Apache License 2.0 | 5 votes |
package org.apache.toree.kernel.protocol.v5.interpreter.tasks import akka.actor.{Actor, Props} import org.apache.toree.interpreter.Interpreter import org.apache.toree.kernel.protocol.v5.content.IsCompleteRequest import org.apache.toree.utils.LogLike object IsCompleteTaskActor { def props(interpreter: Interpreter): Props = Props(classOf[IsCompleteTaskActor], interpreter) } class IsCompleteTaskActor(interpreter: Interpreter) extends Actor with LogLike { require(interpreter != null) override def receive: Receive = { case req: IsCompleteRequest => logger.debug("Invoking the interpreter completion") sender ! interpreter.isComplete(req.code) case _ => sender ! "Unknown message" // TODO: Provide a failure message type to be passed around? } }
Example 137
Source File: GenericSocketMessageHandlerSpec.scala From incubator-toree with Apache License 2.0 | 5 votes |
package org.apache.toree.kernel.protocol.v5.handler import akka.actor.{ActorSystem, Props, ActorRef, ActorSelection} import akka.testkit.{ImplicitSender, TestKit, TestProbe} import org.apache.toree.kernel.protocol.v5._ import org.apache.toree.kernel.protocol.v5.kernel.ActorLoader import org.apache.toree.kernel.protocol.v5Test._ import org.mockito.Mockito._ import org.scalatest.mock.MockitoSugar import org.scalatest.{Matchers, FunSpecLike} import test.utils.MaxAkkaTestTimeout class GenericSocketMessageHandlerSpec extends TestKit( ActorSystem( "GenericSocketMessageHandlerSystem", None, Some(org.apache.toree.Main.getClass.getClassLoader) )) with ImplicitSender with FunSpecLike with Matchers with MockitoSugar { describe("GenericSocketMessageHandler( ActorLoader, SocketType )") { // Create a mock ActorLoader for the Relay we are going to test val actorLoader: ActorLoader = mock[ActorLoader] // Create a probe for the ActorSelection that the ActorLoader will return val selectionProbe: TestProbe = TestProbe() val selection: ActorSelection = system.actorSelection(selectionProbe.ref.path.toString) when(actorLoader.load(SocketType.Control)).thenReturn(selection) // The Relay we are going to be testing against val genericHandler: ActorRef = system.actorOf( Props(classOf[GenericSocketMessageHandler], actorLoader, SocketType.Control) ) describe("#receive( KernelMessage )") { genericHandler ! MockKernelMessage it("should send the message to the selected actor"){ selectionProbe.expectMsg(MaxAkkaTestTimeout, MockKernelMessage) } } } }
Example 138
Source File: KernelInfoRequestHandlerSpec.scala From incubator-toree with Apache License 2.0 | 5 votes |
package org.apache.toree.kernel.protocol.v5.handler import akka.actor.{ActorSelection, ActorSystem, Props} import akka.testkit.{ImplicitSender, TestKit, TestProbe} import org.apache.toree.Main import org.apache.toree.kernel.protocol.v5.content.KernelInfoReply import org.apache.toree.kernel.protocol.v5.kernel.ActorLoader import org.apache.toree.kernel.protocol.v5._ import org.mockito.AdditionalMatchers.{not => mockNot} import org.mockito.Matchers.{eq => mockEq} import com.typesafe.config.ConfigFactory import org.mockito.Mockito._ import org.scalatest.mock.MockitoSugar import org.scalatest.{FunSpecLike, Matchers} import play.api.libs.json.Json import test.utils.MaxAkkaTestTimeout object KernelInfoRequestHandlerSpec { val config = """ akka { loglevel = "WARNING" }""" } class KernelInfoRequestHandlerSpec extends TestKit( ActorSystem("KernelInfoRequestHandlerSpec", ConfigFactory.parseString(KernelInfoRequestHandlerSpec.config), Main.getClass.getClassLoader) ) with ImplicitSender with FunSpecLike with Matchers with MockitoSugar { val actorLoader: ActorLoader = mock[ActorLoader] val actor = system.actorOf(Props(classOf[KernelInfoRequestHandler], actorLoader, LanguageInfo("test", "1.0.0", Some(".test")))) val relayProbe : TestProbe = TestProbe() val relaySelection : ActorSelection = system.actorSelection(relayProbe.ref.path) when(actorLoader.load(SystemActorType.KernelMessageRelay)) .thenReturn(relaySelection) when(actorLoader.load(mockNot(mockEq(SystemActorType.KernelMessageRelay)))) .thenReturn(system.actorSelection("")) val header = Header("","","","","") val kernelMessage = new KernelMessage( Seq[Array[Byte]](), "test message", header, header, Metadata(), "{}" ) describe("Kernel Info Request Handler") { it("should return a KernelMessage containing kernel info response") { actor ! kernelMessage val reply = relayProbe.receiveOne(MaxAkkaTestTimeout).asInstanceOf[KernelMessage] val kernelInfo = Json.parse(reply.contentString).as[KernelInfoReply] kernelInfo.implementation should be ("spark") } } }
Example 139
Source File: ShellSpec.scala From incubator-toree with Apache License 2.0 | 5 votes |
package org.apache.toree.kernel.protocol.v5.kernel.socket import java.nio.charset.Charset import akka.actor.{ActorSelection, ActorRef, ActorSystem, Props} import akka.testkit.{ImplicitSender, TestKit, TestProbe} import akka.util.ByteString import org.apache.toree.communication.ZMQMessage import org.apache.toree.kernel.protocol.v5._ import org.apache.toree.kernel.protocol.v5.kernel.{ActorLoader, Utilities} import org.apache.toree.kernel.protocol.v5Test._ import Utilities._ import com.typesafe.config.ConfigFactory import org.mockito.Matchers._ import org.mockito.Mockito._ import org.scalatest.mock.MockitoSugar import org.scalatest.{FunSpecLike, Matchers} import test.utils.MaxAkkaTestTimeout object ShellSpec { val config =""" akka { loglevel = "WARNING" }""" } class ShellSpec extends TestKit( ActorSystem( "ShellActorSpec", ConfigFactory.parseString(ShellSpec.config), org.apache.toree.Main.getClass.getClassLoader )) with ImplicitSender with FunSpecLike with Matchers with MockitoSugar { describe("Shell") { val socketFactory = mock[SocketFactory] val actorLoader = mock[ActorLoader] val socketProbe : TestProbe = TestProbe() when(socketFactory.Shell(any(classOf[ActorSystem]), any(classOf[ActorRef]))).thenReturn(socketProbe.ref) val relayProbe : TestProbe = TestProbe() val relaySelection : ActorSelection = system.actorSelection(relayProbe.ref.path) when(actorLoader.load(SystemActorType.KernelMessageRelay)).thenReturn(relaySelection) val shell = system.actorOf(Props(classOf[Shell], socketFactory, actorLoader)) describe("#receive") { it("( KernelMessage ) should reply with a ZMQMessage via the socket") { // Use the implicit to convert the KernelMessage to ZMQMessage val MockZMQMessage : ZMQMessage = MockKernelMessage shell ! MockKernelMessage socketProbe.expectMsg(MockZMQMessage) } it("( ZMQMessage ) should forward ZMQ Strings and KernelMessage to Relay") { // Use the implicit to convert the KernelMessage to ZMQMessage val MockZMQMessage : ZMQMessage = MockKernelMessage shell ! MockZMQMessage // Should get the last four (assuming no buffer) strings in UTF-8 val zmqStrings = MockZMQMessage.frames.map((byteString: ByteString) => new String(byteString.toArray, Charset.forName("UTF-8")) ).takeRight(4) val kernelMessage: KernelMessage = MockZMQMessage relayProbe.expectMsg(MaxAkkaTestTimeout, (zmqStrings, kernelMessage)) } } } }
Example 140
Source File: IOPubSpec.scala From incubator-toree with Apache License 2.0 | 5 votes |
package org.apache.toree.kernel.protocol.v5.kernel.socket import akka.actor.{ActorSystem, Props} import akka.testkit.{ImplicitSender, TestKit, TestProbe} import org.apache.toree.communication.ZMQMessage import org.apache.toree.kernel.protocol.v5.kernel.Utilities import org.apache.toree.kernel.protocol.v5Test._ import Utilities._ import com.typesafe.config.ConfigFactory import org.mockito.Matchers._ import org.mockito.Mockito._ import org.scalatest.mock.MockitoSugar import org.scalatest.{FunSpecLike, Matchers} import test.utils.MaxAkkaTestTimeout object IOPubSpec { val config =""" akka { loglevel = "WARNING" }""" } class IOPubSpec extends TestKit( ActorSystem("IOPubActorSpec", ConfigFactory.parseString(IOPubSpec.config), org.apache.toree.Main.getClass.getClassLoader )) with ImplicitSender with FunSpecLike with Matchers with MockitoSugar { describe("IOPubActor") { val socketFactory = mock[SocketFactory] val probe : TestProbe = TestProbe() when(socketFactory.IOPub(any(classOf[ActorSystem]))).thenReturn(probe.ref) val socket = system.actorOf(Props(classOf[IOPub], socketFactory)) // TODO test that the response type changed describe("#receive") { it("should reply with a ZMQMessage") { // Use the implicit to convert the KernelMessage to ZMQMessage val MockZMQMessage : ZMQMessage = MockKernelMessage socket ! MockKernelMessage probe.expectMsg(MaxAkkaTestTimeout, MockZMQMessage) } } } }
Example 141
Source File: HeartbeatSpec.scala From incubator-toree with Apache License 2.0 | 5 votes |
package org.apache.toree.kernel.protocol.v5.kernel.socket import akka.actor.{ActorRef, ActorSystem, Props} import akka.testkit.{ImplicitSender, TestKit, TestProbe} import akka.util.ByteString import org.apache.toree.communication.ZMQMessage import com.typesafe.config.ConfigFactory import org.mockito.Matchers._ import org.mockito.Mockito._ import org.scalatest.mock.MockitoSugar import org.scalatest.{FunSpecLike, Matchers} import test.utils.MaxAkkaTestTimeout object HeartbeatSpec { val config = """ akka { loglevel = "WARNING" }""" } class HeartbeatSpec extends TestKit( ActorSystem( "HeartbeatActorSpec", ConfigFactory.parseString(HeartbeatSpec.config), org.apache.toree.Main.getClass.getClassLoader )) with ImplicitSender with FunSpecLike with Matchers with MockitoSugar { val SomeMessage: String = "some message" val SomeZMQMessage: ZMQMessage = ZMQMessage(ByteString(SomeMessage.getBytes)) describe("HeartbeatActor") { val socketFactory = mock[SocketFactory] val probe : TestProbe = TestProbe() when(socketFactory.Heartbeat(any(classOf[ActorSystem]), any(classOf[ActorRef]))).thenReturn(probe.ref) val heartbeat = system.actorOf(Props(classOf[Heartbeat], socketFactory)) describe("send heartbeat") { it("should receive and send same ZMQMessage") { heartbeat ! SomeZMQMessage probe.expectMsg(MaxAkkaTestTimeout, SomeZMQMessage) } } } }
Example 142
Source File: StdinSpec.scala From incubator-toree with Apache License 2.0 | 5 votes |
package org.apache.toree.kernel.protocol.v5.kernel.socket import java.nio.charset.Charset import akka.actor.{Props, ActorSelection, ActorRef, ActorSystem} import akka.testkit.{TestProbe, ImplicitSender, TestKit} import akka.util.ByteString import org.apache.toree.communication.ZMQMessage import org.apache.toree.kernel.protocol.v5.kernel.Utilities._ import org.apache.toree.kernel.protocol.v5Test._ import org.apache.toree.kernel.protocol.v5.{KernelMessage, SystemActorType} import org.apache.toree.kernel.protocol.v5.kernel.ActorLoader import com.typesafe.config.ConfigFactory import org.scalatest.mock.MockitoSugar import org.scalatest.{Matchers, FunSpecLike} import org.mockito.Mockito._ import org.mockito.Matchers._ import test.utils.MaxAkkaTestTimeout object StdinSpec { val config =""" akka { loglevel = "WARNING" }""" } class StdinSpec extends TestKit(ActorSystem( "StdinActorSpec", ConfigFactory.parseString(StdinSpec.config), org.apache.toree.Main.getClass.getClassLoader )) with ImplicitSender with FunSpecLike with Matchers with MockitoSugar { describe("Stdin") { val socketFactory = mock[SocketFactory] val actorLoader = mock[ActorLoader] val socketProbe : TestProbe = TestProbe() when(socketFactory.Stdin(any(classOf[ActorSystem]), any(classOf[ActorRef]))).thenReturn(socketProbe.ref) val relayProbe : TestProbe = TestProbe() val relaySelection : ActorSelection = system.actorSelection(relayProbe.ref.path) when(actorLoader.load(SystemActorType.KernelMessageRelay)).thenReturn(relaySelection) val stdin = system.actorOf(Props(classOf[Stdin], socketFactory, actorLoader)) describe("#receive") { it("( KernelMessage ) should reply with a ZMQMessage via the socket") { // Use the implicit to convert the KernelMessage to ZMQMessage val MockZMQMessage : ZMQMessage = MockKernelMessage stdin ! MockKernelMessage socketProbe.expectMsg(MockZMQMessage) } it("( ZMQMessage ) should forward ZMQ Strings and KernelMessage to Relay") { // Use the implicit to convert the KernelMessage to ZMQMessage val MockZMQMessage : ZMQMessage = MockKernelMessage stdin ! MockZMQMessage // Should get the last four (assuming no buffer) strings in UTF-8 val zmqStrings = MockZMQMessage.frames.map((byteString: ByteString) => new String(byteString.toArray, Charset.forName("UTF-8")) ).takeRight(4) val kernelMessage: KernelMessage = MockZMQMessage relayProbe.expectMsg(MaxAkkaTestTimeout, (zmqStrings, kernelMessage)) } } } }
Example 143
Source File: ActorLoaderSpec.scala From incubator-toree with Apache License 2.0 | 5 votes |
package org.apache.toree.kernel.protocol.v5.kernel import akka.actor.{ActorSystem, Props} import akka.testkit.{ImplicitSender, TestKit, TestProbe} import org.apache.toree.kernel.protocol.v5.{MessageType, SocketType} import org.scalatest.mock.MockitoSugar import org.scalatest.{FunSpecLike, Matchers} import test.utils.TestProbeProxyActor import test.utils.MaxAkkaTestTimeout class ActorLoaderSpec extends TestKit( ActorSystem( "ActorLoaderSpecSystem", None, Some(org.apache.toree.Main.getClass.getClassLoader) )) with ImplicitSender with FunSpecLike with Matchers with MockitoSugar { describe("ActorLoader"){ describe("#load( MessageType )"){ it("should load an ActorSelection that has been loaded into the system"){ val testProbe: TestProbe = TestProbe() system.actorOf(Props(classOf[TestProbeProxyActor], testProbe), MessageType.Outgoing.ClearOutput.toString) val actorLoader: ActorLoader = SimpleActorLoader(system) actorLoader.load(MessageType.Outgoing.ClearOutput) ! "<Test Message>" testProbe.expectMsg("<Test Message>") } it("should expect no message when there is no actor"){ val testProbe: TestProbe = TestProbe() val actorLoader: ActorLoader = SimpleActorLoader(system) actorLoader.load(MessageType.Outgoing.CompleteReply) ! "<Test Message>" testProbe.expectNoMessage(MaxAkkaTestTimeout) // This is to test to see if there the messages go to the actor inbox or the dead mail inbox system.actorOf(Props(classOf[TestProbeProxyActor], testProbe), MessageType.Outgoing.CompleteReply.toString) testProbe.expectNoMessage(MaxAkkaTestTimeout) } } describe("#load( SocketType )"){ it("should load an ActorSelection that has been loaded into the system"){ val testProbe: TestProbe = TestProbe() system.actorOf(Props(classOf[TestProbeProxyActor], testProbe), SocketType.Shell.toString) val actorLoader: ActorLoader = SimpleActorLoader(system) actorLoader.load(SocketType.Shell) ! "<Test Message>" testProbe.expectMsg("<Test Message>") } it("should expect no message when there is no actor"){ val testProbe: TestProbe = TestProbe() val actorLoader: ActorLoader = SimpleActorLoader(system) actorLoader.load(SocketType.IOPub) ! "<Test Message>" testProbe.expectNoMessage(MaxAkkaTestTimeout) // This is to test to see if there the messages go to the actor inbox or the dead mail inbox system.actorOf(Props(classOf[TestProbeProxyActor], testProbe), SocketType.IOPub.toString) testProbe.expectNoMessage(MaxAkkaTestTimeout) } } } }
Example 144
Source File: SimpleActorLoaderSpec.scala From incubator-toree with Apache License 2.0 | 5 votes |
package org.apache.toree.kernel.protocol.v5.kernel import akka.actor.{ActorSelection, ActorSystem, Props} import akka.testkit.{TestKit, TestProbe} import org.apache.toree.kernel.protocol.v5.MessageType import org.scalatest.{FunSpecLike, Matchers} import test.utils.TestProbeProxyActor import test.utils.MaxAkkaTestTimeout class SimpleActorLoaderSpec extends TestKit( ActorSystem( "SimpleActorLoaderSpecSystem", None, Some(org.apache.toree.Main.getClass.getClassLoader) ) ) with FunSpecLike with Matchers { describe("SimpleActorLoader") { //val system = ActorSystem("SimpleActorLoaderSystem") val testMessage: String = "Hello Message" describe("#load( MessageType )") { it("should load a MessageType Actor"){ // Create a new test probe to verify our selection works val messageTypeProbe: TestProbe = new TestProbe(system) // Add an actor to the system to send a message to system.actorOf( Props(classOf[TestProbeProxyActor], messageTypeProbe), name = MessageType.Outgoing.ExecuteInput.toString ) // Create the ActorLoader with our test system val actorLoader: SimpleActorLoader = SimpleActorLoader(system) // Get the actor and send it a message val loadedMessageActor: ActorSelection = actorLoader.load(MessageType.Outgoing.ExecuteInput) loadedMessageActor ! testMessage // Assert the probe received the message messageTypeProbe.expectMsg(MaxAkkaTestTimeout, testMessage) } } } }
Example 145
Source File: StatusDispatchSpec.scala From incubator-toree with Apache License 2.0 | 5 votes |
package org.apache.toree.kernel.protocol.v5.dispatch import akka.actor.{ActorRef, ActorSystem, Props} import akka.testkit.{TestKit, TestProbe} import org.apache.toree.kernel.protocol.v5._ import org.apache.toree.kernel.protocol.v5.content.KernelStatus import org.apache.toree.kernel.protocol.v5.kernel.ActorLoader import org.mockito.Mockito._ import org.scalatest.mock.MockitoSugar import org.scalatest.{BeforeAndAfter, FunSpecLike, Matchers} import play.api.libs.json.Json import test.utils.MaxAkkaTestTimeout class StatusDispatchSpec extends TestKit( ActorSystem( "StatusDispatchSystem", None, Some(org.apache.toree.Main.getClass.getClassLoader) ) ) with FunSpecLike with Matchers with MockitoSugar with BeforeAndAfter{ var statusDispatchRef: ActorRef = _ var relayProbe: TestProbe = _ before { // Mock the relay with a probe relayProbe = TestProbe() // Mock the ActorLoader val mockActorLoader: ActorLoader = mock[ActorLoader] when(mockActorLoader.load(SystemActorType.KernelMessageRelay)) .thenReturn(system.actorSelection(relayProbe.ref.path.toString)) statusDispatchRef = system.actorOf(Props(classOf[StatusDispatch],mockActorLoader)) } describe("StatusDispatch") { describe("#receive( KernelStatusType )") { it("should send a status message to the relay") { statusDispatchRef ! KernelStatusType.Busy // Check the kernel message is the correct type val statusMessage: KernelMessage = relayProbe.receiveOne(MaxAkkaTestTimeout).asInstanceOf[KernelMessage] statusMessage.header.msg_type should be (MessageType.Outgoing.Status.toString) // Check the status is what we sent val status: KernelStatus = Json.parse(statusMessage.contentString).as[KernelStatus] status.execution_state should be (KernelStatusType.Busy.toString) } } describe("#receive( KernelStatusType, Header )") { it("should send a status message to the relay") { val tuple = Tuple2(KernelStatusType.Busy, mock[Header]) statusDispatchRef ! tuple // Check the kernel message is the correct type val statusMessage: KernelMessage = relayProbe.receiveOne(MaxAkkaTestTimeout).asInstanceOf[KernelMessage] statusMessage.header.msg_type should be (MessageType.Outgoing.Status.toString) // Check the status is what we sent val status: KernelStatus = Json.parse(statusMessage.contentString).as[KernelStatus] status.execution_state should be (KernelStatusType.Busy.toString) } } } }
Example 146
Source File: ProxyMultiJvm.scala From 006877 with MIT License | 5 votes |
package aia.channels // multi-jvm:test-only aia.channels.ReliableProxySampleSpec 로 시작할것 import org.scalatest.{WordSpecLike, BeforeAndAfterAll, MustMatchers} import akka.testkit.ImplicitSender import akka.actor.{Props, Actor} import akka.remote.testkit.MultiNodeSpecCallbacks import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec trait STMultiNodeSpec extends MultiNodeSpecCallbacks with WordSpecLike with MustMatchers with BeforeAndAfterAll { override def beforeAll() = multiNodeSpecBeforeAll() override def afterAll() = multiNodeSpecAfterAll() } object ReliableProxySampleConfig extends MultiNodeConfig { val client = role("Client") val server = role("Server") testTransport(on = true) } class ReliableProxySampleSpecMultiJvmNode1 extends ReliableProxySample class ReliableProxySampleSpecMultiJvmNode2 extends ReliableProxySample import akka.remote.transport.ThrottlerTransportAdapter.Direction import scala.concurrent.duration._ import concurrent.Await import akka.contrib.pattern.ReliableProxy class ReliableProxySample extends MultiNodeSpec(ReliableProxySampleConfig) with STMultiNodeSpec with ImplicitSender { import ReliableProxySampleConfig._ def initialParticipants = roles.size "A MultiNodeSample" must { "wait for all nodes to enter a barrier" in { enterBarrier("startup") } "send to and receive from a remote node" in { runOn(client) { enterBarrier("deployed") val pathToEcho = node(server) / "user" / "echo" val echo = system.actorSelection(pathToEcho) val proxy = system.actorOf( ReliableProxy.props(pathToEcho, 500.millis), "proxy") proxy ! "message1" expectMsg("message1") Await.ready( testConductor.blackhole( client, server, Direction.Both), 1 second) echo ! "DirectMessage" proxy ! "ProxyMessage" expectNoMsg(3 seconds) Await.ready( testConductor.passThrough( client, server, Direction.Both), 1 second) expectMsg("ProxyMessage") echo ! "DirectMessage2" expectMsg("DirectMessage2") } runOn(server) { system.actorOf(Props(new Actor { def receive = { case msg: AnyRef => { sender() ! msg } } }), "echo") enterBarrier("deployed") } enterBarrier("finished") } } }
Example 147
Source File: DeadLetterTest.scala From 006877 with MIT License | 5 votes |
package aia.channels import akka.testkit.{ ImplicitSender, TestProbe, TestKit } import akka.actor.{ PoisonPill, Props, DeadLetter, ActorSystem } import org.scalatest.{WordSpecLike, BeforeAndAfterAll, MustMatchers} import java.util.Date class DeadLetterTest extends TestKit(ActorSystem("DeadLetterTest")) with WordSpecLike with BeforeAndAfterAll with MustMatchers with ImplicitSender { override def afterAll() { system.terminate() } "DeadLetter" must { "catch messages send to deadLetters" in { val deadLetterMonitor = TestProbe() system.eventStream.subscribe( deadLetterMonitor.ref, classOf[DeadLetter]) val msg = new StateEvent(new Date(), "Connected") system.deadLetters ! msg val dead = deadLetterMonitor.expectMsgType[DeadLetter] dead.message must be(msg) dead.sender must be(testActor) dead.recipient must be(system.deadLetters) } "catch deadLetter messages send to deadLetters" in { val deadLetterMonitor = TestProbe() val actor = system.actorOf(Props[EchoActor], "echo") system.eventStream.subscribe( deadLetterMonitor.ref, classOf[DeadLetter]) val msg = new Order("me", "Akka in Action", 1) val dead = DeadLetter(msg, testActor, actor) system.deadLetters ! dead deadLetterMonitor.expectMsg(dead) system.stop(actor) } "catch messages send to terminated Actor" in { val deadLetterMonitor = TestProbe() system.eventStream.subscribe( deadLetterMonitor.ref, classOf[DeadLetter]) val actor = system.actorOf(Props[EchoActor], "echo") actor ! PoisonPill val msg = new Order("me", "Akka in Action", 1) actor ! msg val dead = deadLetterMonitor.expectMsgType[DeadLetter] dead.message must be(msg) dead.sender must be(testActor) dead.recipient must be(actor) } } }
Example 148
Source File: OrderServiceApp.scala From 006877 with MIT License | 5 votes |
package aia.integration import scala.concurrent.Future import akka.actor.{ ActorSystem , Actor, Props } import akka.event.Logging import akka.util.Timeout import akka.http.scaladsl.Http import akka.http.scaladsl.Http.ServerBinding import akka.http.scaladsl.server.Directives._ import akka.stream.ActorMaterializer import com.typesafe.config.{ Config, ConfigFactory } object OrderServiceApp extends App with RequestTimeout { val config = ConfigFactory.load() val host = config.getString("http.host") val port = config.getInt("http.port") implicit val system = ActorSystem() implicit val ec = system.dispatcher val processOrders = system.actorOf( Props(new ProcessOrders), "process-orders" ) val api = new OrderServiceApi(system, requestTimeout(config), processOrders).routes implicit val materializer = ActorMaterializer() val bindingFuture: Future[ServerBinding] = Http().bindAndHandle(api, host, port) val log = Logging(system.eventStream, "order-service") bindingFuture.map { serverBinding => log.info(s"Bound to ${serverBinding.localAddress} ") }.failed.foreach { case ex: Exception => log.error(ex, "Failed to bind to {}:{}!", host, port) system.terminate() } } trait RequestTimeout { import scala.concurrent.duration._ def requestTimeout(config: Config): Timeout = { val t = config.getString("akka.http.server.request-timeout") val d = Duration(t) FiniteDuration(d.length, d.unit) } }
Example 149
Source File: OrderServiceTest.scala From 006877 with MIT License | 5 votes |
package aia.integration import scala.concurrent.duration._ import scala.xml.NodeSeq import akka.actor.Props import akka.http.scaladsl.marshallers.xml.ScalaXmlSupport._ import akka.http.scaladsl.model.StatusCodes import akka.http.scaladsl.server._ import akka.http.scaladsl.testkit.ScalatestRouteTest import org.scalatest.{ Matchers, WordSpec } class OrderServiceTest extends WordSpec with Matchers with OrderService with ScalatestRouteTest { implicit val executionContext = system.dispatcher implicit val requestTimeout = akka.util.Timeout(1 second) val processOrders = system.actorOf(Props(new ProcessOrders), "orders") "The order service" should { "return NotFound if the order cannot be found" in { Get("/orders/1") ~> routes ~> check { status shouldEqual StatusCodes.NotFound } } "return the tracking order for an order that was posted" in { val xmlOrder = <order><customerId>customer1</customerId> <productId>Akka in action</productId> <number>10</number> </order> Post("/orders", xmlOrder) ~> routes ~> check { status shouldEqual StatusCodes.OK val xml = responseAs[NodeSeq] val id = (xml \\ "id").text.toInt val orderStatus = (xml \\ "status").text id shouldEqual 1 orderStatus shouldEqual "received" } Get("/orders/1") ~> routes ~> check { status shouldEqual StatusCodes.OK val xml = responseAs[NodeSeq] val id = (xml \\ "id").text.toInt val orderStatus = (xml \\ "status").text id shouldEqual 1 orderStatus shouldEqual "processing" } } } }
Example 150
Source File: ThroughputCPUTest.scala From 006877 with MIT License | 5 votes |
package aia.performance.throughput import akka.testkit.TestProbe import akka.actor.{Props, ActorSystem} import org.scalatest.{WordSpecLike, BeforeAndAfterAll, MustMatchers} import akka.routing.RoundRobinPool import com.typesafe.config.ConfigFactory import aia.performance.{ProcessCPURequest, SystemMessage, ProcessRequest} import concurrent.duration._ class ThroughputCPUTest extends WordSpecLike with BeforeAndAfterAll with MustMatchers { val configuration = ConfigFactory.load("performance/through") implicit val system = ActorSystem("ThroughputTest", configuration) "System" must { "fails to with cpu" in { val nrWorkers = 40 val nrMessages = nrWorkers * 40 val end = TestProbe() val workers = system.actorOf( RoundRobinPool(nrWorkers).props( Props(new ProcessCPURequest(250 millis, end.ref)).withDispatcher("my-dispatcher")), "Workers-cpu") val startTime = System.currentTimeMillis() for (i <- 0 until nrMessages) { workers ! new SystemMessage(startTime, 0, "") } val msg = end.receiveN(n = nrMessages, max = 9000 seconds).asInstanceOf[Seq[SystemMessage]] val endTime = System.currentTimeMillis() val total = endTime - startTime println("total process time %d Average=%d".format(total, total / nrMessages)) val grouped = msg.groupBy(_.id) grouped.map { case (key, listMsg) => (key, listMsg.foldLeft(0L) { (m, x) => math.max(m, x.duration) }) }.foreach(println(_)) Thread.sleep(1000) system.stop(workers) } } }
Example 151
Source File: ThroughputTest.scala From 006877 with MIT License | 5 votes |
package aia.performance.throughput import akka.testkit.TestProbe import akka.actor.{Props, ActorSystem} import org.scalatest.{WordSpecLike, BeforeAndAfterAll, MustMatchers} import akka.routing.RoundRobinPool import com.typesafe.config.ConfigFactory import aia.performance.{ProcessCPURequest, SystemMessage, ProcessRequest} import concurrent.duration._ class ThroughputTest extends WordSpecLike with BeforeAndAfterAll with MustMatchers { val configuration = ConfigFactory.load("performance/through") implicit val system = ActorSystem("ThroughputTest", configuration) "System" must { "fails to perform" in { val nrMessages = 99 val nrWorkers = 3 val statDuration = 2000 millis //((nrMessages * 10)+1000)/4 millis val end = TestProbe() val workers = system.actorOf( RoundRobinPool(nrWorkers).props(Props(new ProcessRequest(1 second, end.ref)).withDispatcher("my-dispatcher")), "Workers") val startTime = System.currentTimeMillis() for (i <- 0 until nrMessages) { workers ! new SystemMessage(startTime, 0, "") } val msg = end.receiveN(n = nrMessages, max = 9000 seconds).asInstanceOf[Seq[SystemMessage]] val endTime = System.currentTimeMillis() val total = endTime - startTime println("total process time %d Average=%d".format(total, total / nrMessages)) val grouped = msg.groupBy(_.id) grouped.map { case (key, listMsg) => (key, listMsg.foldLeft(0L) { (m, x) => math.max(m, x.duration) }) }.foreach(println(_)) Thread.sleep(1000) system.stop(workers) } } }
Example 152
Source File: MonitorMailboxTest.scala From 006877 with MIT License | 5 votes |
package aia.performance.monitor import akka.testkit.TestProbe import akka.actor.{ Props, Actor, ActorSystem } import org.scalatest.{WordSpecLike, BeforeAndAfterAll, MustMatchers} import concurrent.duration._ import com.typesafe.config.ConfigFactory class MonitorMailboxTest extends WordSpecLike with BeforeAndAfterAll with MustMatchers { val configuration = ConfigFactory.load("monitor/mailbox") implicit val system = ActorSystem("MonitorMailboxTest", configuration) override protected def afterAll(): Unit = { system.terminate() super.afterAll() } "mailbox" must { "send statistics with dispatcher" in { val statProbe = TestProbe() system.eventStream.subscribe( statProbe.ref, classOf[MailboxStatistics]) val testActor = system.actorOf(Props( new ProcessTestActor(1.second)) .withDispatcher("my-dispatcher"), "monitorActor") statProbe.send(testActor, "message") statProbe.send(testActor, "message2") statProbe.send(testActor, "message3") val stat = statProbe.expectMsgType[MailboxStatistics] println(stat) stat.queueSize must be(1) val stat2 = statProbe.expectMsgType[MailboxStatistics] println(stat2) stat2.queueSize must (be(2) or be(1)) val stat3 = statProbe.expectMsgType[MailboxStatistics] println(stat3) stat3.queueSize must (be(3) or be(2)) Thread.sleep(2000) system.stop(testActor) system.eventStream.unsubscribe(statProbe.ref) } "send statistics with default" in { val statProbe = TestProbe() system.eventStream.subscribe( statProbe.ref, classOf[MailboxStatistics]) val testActor = system.actorOf(Props( new ProcessTestActor(1.second)), "monitorActor2") statProbe.send(testActor, "message") statProbe.send(testActor, "message2") statProbe.send(testActor, "message3") val stat = statProbe.expectMsgType[MailboxStatistics] stat.queueSize must be(1) val stat2 = statProbe.expectMsgType[MailboxStatistics] stat2.queueSize must (be(2) or be(1)) val stat3 = statProbe.expectMsgType[MailboxStatistics] stat3.queueSize must (be(3) or be(2)) Thread.sleep(2000) system.stop(testActor) system.eventStream.unsubscribe(statProbe.ref) } } } class ProcessTestActor(serviceTime: Duration) extends Actor { def receive = { case _ => { Thread.sleep(serviceTime.toMillis) } } }
Example 153
Source File: MonitorActorTest.scala From 006877 with MIT License | 5 votes |
package aia.performance.monitor import org.scalatest.{WordSpecLike, BeforeAndAfterAll, MustMatchers} import akka.testkit.{ TestProbe, TestKit } import akka.actor.{ Props, ActorSystem } import concurrent.duration._ class MonitorActorTest extends TestKit(ActorSystem("MonitorActorTest")) with WordSpecLike with BeforeAndAfterAll with MustMatchers { "Actor" must { "send statistics" in { val statProbe = TestProbe() system.eventStream.subscribe( statProbe.ref, classOf[ActorStatistics]) val testActor = system.actorOf(Props( new ProcessTestActor(1.second) with MonitorActor), "monitorActor") statProbe.send(testActor, "message") statProbe.send(testActor, "message2") statProbe.send(testActor, "message3") val stat = statProbe.expectMsgType[ActorStatistics] println(stat) stat.exitTime - stat.entryTime must be(1000L +- 20) val stat2 = statProbe.expectMsgType[ActorStatistics] println(stat2) stat2.exitTime - stat2.entryTime must be(1000L +- 20) val stat3 = statProbe.expectMsgType[ActorStatistics] println(stat3) stat3.exitTime - stat3.entryTime must be(1000L +- 20) Thread.sleep(2000) system.stop(testActor) system.eventStream.unsubscribe(statProbe.ref) } } }
Example 154
Source File: Main.scala From 006877 with MIT License | 5 votes |
package com.goticks import scala.concurrent.Future import scala.util.{Failure, Success} import akka.actor.{ ActorSystem , Actor, Props } import akka.event.Logging import akka.util.Timeout import akka.http.scaladsl.Http import akka.http.scaladsl.Http.ServerBinding import akka.http.scaladsl.server.Directives._ import akka.stream.ActorMaterializer import com.typesafe.config.{ Config, ConfigFactory } object Main extends App with RequestTimeout { val config = ConfigFactory.load() val host = config.getString("http.host") // 설정으로부터 호스트와 포트를 가져온다 val port = config.getInt("http.port") implicit val system = ActorSystem() implicit val ec = system.dispatcher // bindAndHandle은 비동기적이며, ExecutionContext를 암시적으로 사용해야 한다 val api = new RestApi(system, requestTimeout(config)).routes // RestApi는 HTTP 루트를 제공한다 implicit val materializer = ActorMaterializer() val bindingFuture: Future[ServerBinding] = Http().bindAndHandle(api, host, port) // RestApi 루트를 가지고 HTTP 서버를 시작한다 val log = Logging(system.eventStream, "go-ticks") bindingFuture.map { serverBinding => log.info(s"RestApi bound to ${serverBinding.localAddress} ") }.onComplete { case Success(v) => case Failure(ex) => log.error(ex, "Failed to bind to {}:{}!", host, port) system.terminate() } } trait RequestTimeout { import scala.concurrent.duration._ def requestTimeout(config: Config): Timeout = { val t = config.getString("akka.http.server.request-timeout") val d = Duration(t) FiniteDuration(d.length, d.unit) } }
Example 155
Source File: TicketSeller.scala From 006877 with MIT License | 5 votes |
package com.goticks import akka.actor.{ Actor, Props, PoisonPill } object TicketSeller { def props(event: String) = Props(new TicketSeller(event)) case class Add(tickets: Vector[Ticket]) case class Buy(tickets: Int) case class Ticket(id: Int) case class Tickets(event: String, entries: Vector[Ticket] = Vector.empty[Ticket]) case object GetEvent case object Cancel } class TicketSeller(event: String) extends Actor { import TicketSeller._ var tickets = Vector.empty[Ticket] def receive = { case Add(newTickets) => tickets = tickets ++ newTickets case Buy(nrOfTickets) => val entries = tickets.take(nrOfTickets) if(entries.size >= nrOfTickets) { sender() ! Tickets(event, entries) tickets = tickets.drop(nrOfTickets) } else sender() ! Tickets(event) case GetEvent => sender() ! Some(BoxOffice.Event(event, tickets.size)) case Cancel => sender() ! Some(BoxOffice.Event(event, tickets.size)) self ! PoisonPill } }
Example 156
Source File: TicketSellerSpec.scala From 006877 with MIT License | 5 votes |
package com.goticks import akka.actor.{Props, ActorSystem} import akka.testkit.{ImplicitSender, TestKit} import org.scalatest.{WordSpecLike, MustMatchers} class TickerSellerSpec extends TestKit(ActorSystem("testTickets")) with WordSpecLike with MustMatchers with ImplicitSender with StopSystemAfterAll { "The TicketSeller" must { "Sell tickets until they are sold out" in { import TicketSeller._ def mkTickets = (1 to 10).map(i=>Ticket(i)).toVector val event = "RHCP" val ticketingActor = system.actorOf(TicketSeller.props(event)) ticketingActor ! Add(mkTickets) ticketingActor ! Buy(1) expectMsg(Tickets(event, Vector(Ticket(1)))) val nrs = (2 to 10) nrs.foreach(_ => ticketingActor ! Buy(1)) val tickets = receiveN(9) tickets.zip(nrs).foreach { case (Tickets(event, Vector(Ticket(id))), ix) => id must be(ix) } ticketingActor ! Buy(1) expectMsg(Tickets(event)) } "Sell tickets in batches until they are sold out" in { import TicketSeller._ val firstBatchSize = 10 def mkTickets = (1 to (10 * firstBatchSize)).map(i=>Ticket(i)).toVector val event = "Madlib" val ticketingActor = system.actorOf(TicketSeller.props(event)) ticketingActor ! Add(mkTickets) ticketingActor ! Buy(firstBatchSize) val bought = (1 to firstBatchSize).map(Ticket).toVector expectMsg(Tickets(event, bought)) val secondBatchSize = 5 val nrBatches = 18 val batches = (1 to nrBatches * secondBatchSize) batches.foreach(_ => ticketingActor ! Buy(secondBatchSize)) val tickets = receiveN(nrBatches) tickets.zip(batches).foreach { case (Tickets(event, bought), ix) => bought.size must equal(secondBatchSize) val last = ix * secondBatchSize + firstBatchSize val first = ix * secondBatchSize + firstBatchSize - (secondBatchSize - 1) bought.map(_.id) must equal((first to last).toVector) case _ => } ticketingActor ! Buy(1) expectMsg(Tickets(event)) ticketingActor ! Buy(10) expectMsg(Tickets(event)) } } }
Example 157
Source File: BoxOfficeSpec.scala From 006877 with MIT License | 5 votes |
package com.goticks import akka.actor.{ ActorRef, ActorSystem, Props } import akka.testkit.{ DefaultTimeout, ImplicitSender, TestKit } import com.goticks.BoxOffice._ import com.goticks.TicketSeller._ import org.scalatest.{ MustMatchers, WordSpecLike } class BoxOfficeSpec extends TestKit(ActorSystem("testBoxOffice")) with WordSpecLike with MustMatchers with ImplicitSender with DefaultTimeout with StopSystemAfterAll { "The BoxOffice" must { "Create an event and get tickets from the correct Ticket Seller" in { val boxOffice = system.actorOf(BoxOffice.props) val eventName = "RHCP" boxOffice ! CreateEvent(eventName, 10) expectMsg(EventCreated(Event(eventName, 10))) boxOffice ! GetEvents expectMsg(Events(Vector(Event(eventName, 10)))) boxOffice ! BoxOffice.GetEvent(eventName) expectMsg(Some(Event(eventName, 10))) boxOffice ! GetTickets(eventName, 1) expectMsg(Tickets(eventName, Vector(Ticket(1)))) boxOffice ! GetTickets("DavidBowie", 1) expectMsg(Tickets("DavidBowie")) } "Create a child actor when an event is created and sends it a Tickets message" in { val boxOffice = system.actorOf(Props( new BoxOffice { override def createTicketSeller(name: String): ActorRef = testActor } ) ) val tickets = 3 val eventName = "RHCP" val expectedTickets = (1 to tickets).map(Ticket).toVector boxOffice ! CreateEvent(eventName, tickets) expectMsg(Add(expectedTickets)) expectMsg(EventCreated(Event(eventName, tickets))) } "Get and cancel an event that is not created yet" in { val boxOffice = system.actorOf(BoxOffice.props) val noneExitEventName = "noExitEvent" boxOffice ! BoxOffice.GetEvent(noneExitEventName) expectMsg(None) boxOffice ! CancelEvent(noneExitEventName) expectMsg(None) } "Cancel a ticket which event is not created " in { val boxOffice = system.actorOf(BoxOffice.props) val noneExitEventName = "noExitEvent" boxOffice ! CancelEvent(noneExitEventName) expectMsg(None) } "Cancel a ticket which event is created" in { val boxOffice = system.actorOf(BoxOffice.props) val eventName = "RHCP" val tickets = 10 boxOffice ! CreateEvent(eventName, tickets) expectMsg(EventCreated(Event(eventName, tickets))) boxOffice ! CancelEvent(eventName) expectMsg(Some(Event(eventName, tickets))) } } }
Example 158
Source File: BootHello.scala From 006877 with MIT License | 5 votes |
package aia.deploy import akka.actor.{ Props, ActorSystem } import scala.concurrent.duration._ object BootHello extends App { val system = ActorSystem("hellokernel") val actor = system.actorOf(Props[HelloWorld]) val config = system.settings.config val timer = config.getInt("helloWorld.timer") system.actorOf(Props( new HelloWorldCaller( timer millis, actor))) }
Example 159
Source File: Main.scala From 006877 with MIT License | 5 votes |
package aia.cluster package words import com.typesafe.config.ConfigFactory import akka.actor.{Props, ActorSystem} import akka.cluster.Cluster import JobReceptionist.JobRequest object Main extends App { val config = ConfigFactory.load() val system = ActorSystem("words", config) println(s"Starting node with roles: ${Cluster(system).selfRoles}") if(system.settings.config.getStringList("akka.cluster.roles").contains("master")) { Cluster(system).registerOnMemberUp { val receptionist = system.actorOf(Props[JobReceptionist], "receptionist") println("Master node is ready.") val text = List("this is a test", "of some very naive word counting", "but what can you say", "it is what it is") receptionist ! JobRequest("the first job", (1 to 100000).flatMap(i => text ++ text).toList) system.actorOf(Props(new ClusterDomainEventListener), "cluster-listener") } } }
Example 160
Source File: WordsClusterSpec.scala From 006877 with MIT License | 5 votes |
package aia.cluster package words import scala.concurrent.duration._ import akka.actor.Props import akka.cluster.Cluster import akka.cluster.ClusterEvent.{CurrentClusterState, MemberUp} import akka.testkit.ImplicitSender import akka.remote.testkit.MultiNodeSpec import JobReceptionist._ class WordsClusterSpecMultiJvmNode1 extends WordsClusterSpec class WordsClusterSpecMultiJvmNode2 extends WordsClusterSpec class WordsClusterSpecMultiJvmNode3 extends WordsClusterSpec class WordsClusterSpecMultiJvmNode4 extends WordsClusterSpec class WordsClusterSpec extends MultiNodeSpec(WordsClusterSpecConfig) with STMultiNodeSpec with ImplicitSender { import WordsClusterSpecConfig._ def initialParticipants = roles.size val seedAddress = node(seed).address val masterAddress = node(master).address val worker1Address = node(worker1).address val worker2Address = node(worker2).address muteDeadLetters(classOf[Any])(system) "A Words cluster" must { "form the cluster" in within(10 seconds) { Cluster(system).subscribe(testActor, classOf[MemberUp]) expectMsgClass(classOf[CurrentClusterState]) Cluster(system).join(seedAddress) receiveN(4).map { case MemberUp(m) => m.address }.toSet must be( Set(seedAddress, masterAddress, worker1Address, worker2Address)) Cluster(system).unsubscribe(testActor) enterBarrier("cluster-up") } "execute a words job once the cluster is running" in within(10 seconds) { runOn(master) { val receptionist = system.actorOf(Props[JobReceptionist], "receptionist") receptionist ! JobRequest("job-1", List("some", "some very long text", "some long text")) expectMsg(JobSuccess("job-1", Map("some" -> 3, "very" -> 1, "long" -> 2, "text" -> 2))) } enterBarrier("job-done") } "continue to process a job when failures occur" in within(10 seconds) { runOn(master) { val receptionist = system.actorSelection("/user/receptionist") receptionist ! JobRequest("job-2", List("some", "FAIL", "some very long text", "some long text")) expectMsg(JobSuccess("job-2", Map("some" -> 3, "very" -> 1, "long" -> 2, "text" -> 2))) } enterBarrier("job-done") } } }
Example 161
Source File: TicketSeller.scala From 006877 with MIT License | 5 votes |
package com.goticks import akka.actor.{ Actor, Props, PoisonPill } object TicketSeller { def props(event: String) = Props(new TicketSeller(event)) case class Add(tickets: Vector[Ticket]) case class Buy(tickets: Int) case class Ticket(id: Int) case class Tickets(event: String, entries: Vector[Ticket] = Vector.empty[Ticket]) case object GetEvent case object Cancel } class TicketSeller(event: String) extends Actor { import TicketSeller._ var tickets = Vector.empty[Ticket] def receive = { case Add(newTickets) => tickets = tickets ++ newTickets case Buy(nrOfTickets) => val entries = tickets.take(nrOfTickets).toVector if(entries.size >= nrOfTickets) { sender() ! Tickets(event, entries) tickets = tickets.drop(nrOfTickets) } else sender() ! Tickets(event) case GetEvent => sender() ! Some(BoxOffice.Event(event, tickets.size)) case Cancel => sender() ! Some(BoxOffice.Event(event, tickets.size)) self ! PoisonPill } }
Example 162
Source File: FrontendMain.scala From 006877 with MIT License | 5 votes |
package com.goticks import akka.actor.{ ActorRef, ActorSystem, Props } import akka.event.Logging import com.typesafe.config.ConfigFactory object FrontendMain extends App with Startup { val config = ConfigFactory.load("frontend") implicit val system = ActorSystem("frontend", config) val api = new RestApi() { val log = Logging(system.eventStream, "frontend") implicit val requestTimeout = configuredRequestTimeout(config) implicit def executionContext = system.dispatcher def createPath(): String = { val config = ConfigFactory.load("frontend").getConfig("backend") val host = config.getString("host") val port = config.getInt("port") val protocol = config.getString("protocol") val systemName = config.getString("system") val actorName = config.getString("actor") s"$protocol://$systemName@$host:$port/$actorName" } def createBoxOffice: ActorRef = { val path = createPath() system.actorOf(Props(new RemoteLookupProxy(path)), "lookupBoxOffice") } } startup(api.routes) }
Example 163
Source File: BoxOfficeSpec.scala From 006877 with MIT License | 5 votes |
package com.goticks import akka.actor.{ ActorRef, Props, ActorSystem } import akka.testkit.{ TestKit, ImplicitSender, DefaultTimeout } import org.scalatest.{ WordSpecLike, MustMatchers } class BoxOfficeSpec extends TestKit(ActorSystem("testBoxOffice")) with WordSpecLike with MustMatchers with ImplicitSender with DefaultTimeout with StopSystemAfterAll { "The BoxOffice" must { "Create an event and get tickets from the correct Ticket Seller" in { import BoxOffice._ import TicketSeller._ val boxOffice = system.actorOf(BoxOffice.props) val eventName = "RHCP" boxOffice ! CreateEvent(eventName, 10) expectMsg(EventCreated(Event(eventName, 10))) boxOffice ! GetTickets(eventName, 1) expectMsg(Tickets(eventName, Vector(Ticket(1)))) boxOffice ! GetTickets("DavidBowie", 1) expectMsg(Tickets("DavidBowie")) } "Create a child actor when an event is created and sends it a Tickets message" in { import BoxOffice._ import TicketSeller._ val boxOffice = system.actorOf(Props( new BoxOffice { override def createTicketSeller(name: String): ActorRef = testActor } ) ) val tickets = 3 val eventName = "RHCP" val expectedTickets = (1 to tickets).map(Ticket).toVector boxOffice ! CreateEvent(eventName, tickets) expectMsg(Add(expectedTickets)) expectMsg(EventCreated(Event(eventName, tickets))) } } }
Example 164
Source File: FilteringActorTest.scala From 006877 with MIT License | 5 votes |
package aia.testdriven import akka.testkit.TestKit import akka.actor.{ Actor, Props, ActorRef, ActorSystem } import org.scalatest.{MustMatchers, WordSpecLike } class FilteringActorTest extends TestKit(ActorSystem("testsystem")) with WordSpecLike with MustMatchers with StopSystemAfterAll { "A Filtering Actor" must { "filter out particular messages" in { import FilteringActor._ val props = FilteringActor.props(testActor, 5) val filter = system.actorOf(props, "filter-1") filter ! Event(1) filter ! Event(2) filter ! Event(1) filter ! Event(3) filter ! Event(1) filter ! Event(4) filter ! Event(5) filter ! Event(5) filter ! Event(6) val eventIds = receiveWhile() { case Event(id) if id <= 5 => id } eventIds must be(List(1, 2, 3, 4, 5)) expectMsg(Event(6)) } "filter out particular messages using expectNoMsg" in { import FilteringActor._ val props = FilteringActor.props(testActor, 5) val filter = system.actorOf(props, "filter-2") filter ! Event(1) filter ! Event(2) expectMsg(Event(1)) expectMsg(Event(2)) filter ! Event(1) expectNoMsg filter ! Event(3) expectMsg(Event(3)) filter ! Event(1) expectNoMsg filter ! Event(4) filter ! Event(5) filter ! Event(5) expectMsg(Event(4)) expectMsg(Event(5)) expectNoMsg() } } } object FilteringActor { def props(nextActor: ActorRef, bufferSize: Int) = Props(new FilteringActor(nextActor, bufferSize)) case class Event(id: Long) } class FilteringActor(nextActor: ActorRef, bufferSize: Int) extends Actor { import FilteringActor._ var lastMessages = Vector[Event]() def receive = { case msg: Event => if (!lastMessages.contains(msg)) { lastMessages = lastMessages :+ msg nextActor ! msg if (lastMessages.size > bufferSize) { // 가장 오래된 것을 버린다 lastMessages = lastMessages.tail } } } }
Example 165
Source File: EchoActorTest.scala From 006877 with MIT License | 5 votes |
package aia.testdriven import akka.testkit.{ TestKit, ImplicitSender } import akka.actor.{ Props, Actor, ActorSystem } import org.scalatest.WordSpecLike import akka.util.Timeout import scala.concurrent.Await import scala.util.{ Success, Failure } import scala.language.postfixOps class EchoActorTest extends TestKit(ActorSystem("testsystem")) with WordSpecLike with ImplicitSender with StopSystemAfterAll { "An EchoActor" must { "Reply with the same message it receives" in { import akka.pattern.ask import scala.concurrent.duration._ implicit val timeout = Timeout(3 seconds) implicit val ec = system.dispatcher val echo = system.actorOf(Props[EchoActor], "echo1") val future = echo.ask("some message") future.onComplete { case Failure(_) => //실패 처리 case Success(msg) => //성공 처리 } Await.ready(future, timeout.duration) } "Reply with the same message it receives without ask" in { val echo = system.actorOf(Props[EchoActor], "echo2") echo ! "some message" expectMsg("some message") } } } class EchoActor extends Actor { def receive = { case msg => sender() ! msg } }
Example 166
Source File: SendingActorTest.scala From 006877 with MIT License | 5 votes |
package aia.testdriven import scala.util.Random import akka.testkit.TestKit import akka.actor.{ Props, ActorRef, Actor, ActorSystem } import org.scalatest.{WordSpecLike, MustMatchers} class SendingActorTest extends TestKit(ActorSystem("testsystem")) with WordSpecLike with MustMatchers with StopSystemAfterAll { "A Sending Actor" must { "send a message to another actor when it has finished processing" in { import SendingActor._ val props = SendingActor.props(testActor) val sendingActor = system.actorOf(props, "sendingActor") val size = 1000 val maxInclusive = 100000 def randomEvents() = (0 until size).map{ _ => Event(Random.nextInt(maxInclusive)) }.toVector val unsorted = randomEvents() val sortEvents = SortEvents(unsorted) sendingActor ! sortEvents expectMsgPF() { case SortedEvents(events) => events.size must be(size) unsorted.sortBy(_.id) must be(events) } } } } object SendingActor { def props(receiver: ActorRef) = Props(new SendingActor(receiver)) case class Event(id: Long) case class SortEvents(unsorted: Vector[Event]) case class SortedEvents(sorted: Vector[Event]) } class SendingActor(receiver: ActorRef) extends Actor { import SendingActor._ def receive = { case SortEvents(unsorted) => receiver ! SortedEvents(unsorted.sortBy(_.id)) } }
Example 167
Source File: Greeter01Test.scala From 006877 with MIT License | 5 votes |
package aia.testdriven import akka.testkit.{ CallingThreadDispatcher, EventFilter, TestKit } import akka.actor.{ Props, ActorSystem } import com.typesafe.config.ConfigFactory import org.scalatest.WordSpecLike import Greeter01Test._ class Greeter01Test extends TestKit(testSystem) with WordSpecLike with StopSystemAfterAll { "The Greeter" must { "say Hello World! when a Greeting(\"World\") is sent to it" in { val dispatcherId = CallingThreadDispatcher.Id val props = Props[Greeter].withDispatcher(dispatcherId) val greeter = system.actorOf(props) EventFilter.info(message = "Hello World!", occurrences = 1).intercept { greeter ! Greeting("World") } } } } object Greeter01Test { val testSystem = { val config = ConfigFactory.parseString( """ akka.loggers = [akka.testkit.TestEventListener] """) ActorSystem("testsystem", config) } }
Example 168
Source File: FanLogsApp.scala From 006877 with MIT License | 5 votes |
package aia.stream import java.nio.file.{ Files, FileSystems, Path } import scala.concurrent.Future import scala.concurrent.duration._ import akka.NotUsed import akka.actor.{ ActorSystem , Actor, Props } import akka.event.Logging import akka.stream.{ ActorMaterializer, ActorMaterializerSettings, Supervision } import akka.http.scaladsl.Http import akka.http.scaladsl.Http.ServerBinding import akka.http.scaladsl.server.Directives._ import com.typesafe.config.{ Config, ConfigFactory } object FanLogsApp extends App { val config = ConfigFactory.load() val host = config.getString("http.host") val port = config.getInt("http.port") val logsDir = { val dir = config.getString("log-stream-processor.logs-dir") Files.createDirectories(FileSystems.getDefault.getPath(dir)) } val maxLine = config.getInt("log-stream-processor.max-line") val maxJsObject = config.getInt("log-stream-processor.max-json-object") implicit val system = ActorSystem() implicit val ec = system.dispatcher val decider : Supervision.Decider = { case _: LogStreamProcessor.LogParseException => Supervision.Resume case _ => Supervision.Stop } implicit val materializer = ActorMaterializer( ActorMaterializerSettings(system) .withSupervisionStrategy(decider) ) val api = new FanLogsApi(logsDir, maxLine, maxJsObject).routes val bindingFuture: Future[ServerBinding] = Http().bindAndHandle(api, host, port) val log = Logging(system.eventStream, "fan-logs") bindingFuture.map { serverBinding => log.info(s"Bound to ${serverBinding.localAddress} ") }.onFailure { case ex: Exception => log.error(ex, "Failed to bind to {}:{}!", host, port) system.terminate() } }
Example 169
Source File: LogsApp.scala From 006877 with MIT License | 5 votes |
package aia.stream import java.nio.file.{ Files, FileSystems, Path } import scala.concurrent.Future import scala.concurrent.duration._ import akka.NotUsed import akka.actor.{ ActorSystem , Actor, Props } import akka.event.Logging import akka.stream.{ ActorMaterializer, ActorMaterializerSettings, Supervision } import akka.http.scaladsl.Http import akka.http.scaladsl.Http.ServerBinding import akka.http.scaladsl.server.Directives._ import com.typesafe.config.{ Config, ConfigFactory } object LogsApp extends App { val config = ConfigFactory.load() val host = config.getString("http.host") val port = config.getInt("http.port") val logsDir = { val dir = config.getString("log-stream-processor.logs-dir") Files.createDirectories(FileSystems.getDefault.getPath(dir)) } val maxLine = config.getInt("log-stream-processor.max-line") implicit val system = ActorSystem() implicit val ec = system.dispatcher val decider : Supervision.Decider = { case _: LogStreamProcessor.LogParseException => Supervision.Stop case _ => Supervision.Stop } implicit val materializer = ActorMaterializer( ActorMaterializerSettings(system) .withSupervisionStrategy(decider) ) val api = new LogsApi(logsDir, maxLine).routes val bindingFuture: Future[ServerBinding] = Http().bindAndHandle(api, host, port) val log = Logging(system.eventStream, "logs") bindingFuture.map { serverBinding => log.info(s"Bound to ${serverBinding.localAddress} ") }.onFailure { case ex: Exception => log.error(ex, "Failed to bind to {}:{}!", host, port) system.terminate() } }
Example 170
Source File: LogStreamProcessorApp.scala From 006877 with MIT License | 5 votes |
package aia.stream import java.nio.file.{ Files, FileSystems, Path } import scala.concurrent.Future import scala.concurrent.duration._ import akka.NotUsed import akka.actor.{ ActorSystem , Actor, Props } import akka.event.Logging import akka.stream.{ ActorMaterializer, ActorMaterializerSettings, Supervision } import akka.http.scaladsl.Http import akka.http.scaladsl.Http.ServerBinding import akka.http.scaladsl.server.Directives._ import com.typesafe.config.{ Config, ConfigFactory } object LogStreamProcessorApp extends App { val config = ConfigFactory.load() val host = config.getString("http.host") val port = config.getInt("http.port") val logsDir = { val dir = config.getString("log-stream-processor.logs-dir") Files.createDirectories(FileSystems.getDefault.getPath(dir)) } val notificationsDir = { val dir = config.getString("log-stream-processor.notifications-dir") Files.createDirectories(FileSystems.getDefault.getPath(dir)) } val metricsDir = { val dir = config.getString("log-stream-processor.metrics-dir") Files.createDirectories(FileSystems.getDefault.getPath(dir)) } val maxLine = config.getInt("log-stream-processor.max-line") val maxJsObject = config.getInt("log-stream-processor.max-json-object") implicit val system = ActorSystem() implicit val ec = system.dispatcher val decider : Supervision.Decider = { case _: LogStreamProcessor.LogParseException => Supervision.Resume case _ => Supervision.Stop } implicit val materializer = ActorMaterializer( ActorMaterializerSettings(system) .withSupervisionStrategy(decider) ) val api = new LogStreamProcessorApi(logsDir, notificationsDir, metricsDir, maxLine, maxJsObject).routes val bindingFuture: Future[ServerBinding] = Http().bindAndHandle(api, host, port) val log = Logging(system.eventStream, "processor") bindingFuture.map { serverBinding => log.info(s"Bound to ${serverBinding.localAddress} ") }.onFailure { case ex: Exception => log.error(ex, "Failed to bind to {}:{}!", host, port) system.terminate() } }
Example 171
Source File: ContentNegLogsApp.scala From 006877 with MIT License | 5 votes |
package aia.stream import java.nio.file.{ Files, FileSystems, Path } import scala.concurrent.Future import scala.concurrent.duration._ import akka.NotUsed import akka.actor.{ ActorSystem , Actor, Props } import akka.event.Logging import akka.stream.{ ActorMaterializer, ActorMaterializerSettings, Supervision } import akka.http.scaladsl.Http import akka.http.scaladsl.Http.ServerBinding import akka.http.scaladsl.server.Directives._ import com.typesafe.config.{ Config, ConfigFactory } object ContentNegLogsApp extends App { val config = ConfigFactory.load() val host = config.getString("http.host") val port = config.getInt("http.port") val logsDir = { val dir = config.getString("log-stream-processor.logs-dir") Files.createDirectories(FileSystems.getDefault.getPath(dir)) } val maxLine = config.getInt("log-stream-processor.max-line") val maxJsObject = config.getInt("log-stream-processor.max-json-object") implicit val system = ActorSystem() implicit val ec = system.dispatcher val decider : Supervision.Decider = { case _: LogStreamProcessor.LogParseException => Supervision.Stop case _ => Supervision.Stop } implicit val materializer = ActorMaterializer( ActorMaterializerSettings(system) .withSupervisionStrategy(decider) ) val api = new ContentNegLogsApi(logsDir, maxLine, maxJsObject).routes val bindingFuture: Future[ServerBinding] = Http().bindAndHandle(api, host, port) val log = Logging(system.eventStream, "content-neg-logs") bindingFuture.map { serverBinding => log.info(s"Bound to ${serverBinding.localAddress} ") }.onFailure { case ex: Exception => log.error(ex, "Failed to bind to {}:{}!", host, port) system.terminate() } }
Example 172
Source File: BlockActor.scala From bitcoin-s-spv-node with MIT License | 5 votes |
package org.bitcoins.spvnode.networking import akka.actor.{Actor, ActorContext, ActorRef, ActorSystem, Props} import akka.event.LoggingReceive import org.bitcoins.core.crypto.DoubleSha256Digest import org.bitcoins.core.protocol.CompactSizeUInt import org.bitcoins.core.protocol.blockchain.BlockHeader import org.bitcoins.core.util.BitcoinSLogger import org.bitcoins.spvnode.NetworkMessage import org.bitcoins.spvnode.constant.Constants import org.bitcoins.spvnode.messages.{BlockMessage, GetBlocksMessage, InventoryMessage, MsgBlock} import org.bitcoins.spvnode.messages.data.{GetBlocksMessage, GetDataMessage, Inventory, InventoryMessage} import org.bitcoins.spvnode.util.BitcoinSpvNodeUtil sealed trait BlockActor extends Actor with BitcoinSLogger { def receive: Receive = LoggingReceive { case hash: DoubleSha256Digest => val peerMsgHandler = PeerMessageHandler(context) val inv = Inventory(MsgBlock,hash) val getDataMessage = GetDataMessage(inv) val networkMessage = NetworkMessage(Constants.networkParameters, getDataMessage) peerMsgHandler ! networkMessage context.become(awaitBlockMsg) case blockHeader: BlockHeader => self.forward(blockHeader.hash) } def awaitBlockMsg: Receive = LoggingReceive { case blockMsg: BlockMessage => context.parent ! blockMsg context.stop(self) } } object BlockActor { private case class BlockActorImpl() extends BlockActor def props = Props(classOf[BlockActorImpl]) def apply(context: ActorContext): ActorRef = context.actorOf(props) }
Example 173
Source File: ProducerActor.scala From typebus with MIT License | 5 votes |
package io.surfkit.typebus.actors import akka.actor.{Actor, ActorLogging, Props} import io.surfkit.typebus.AvroByteStreams import io.surfkit.typebus.bus.Publisher import io.surfkit.typebus.event._ class ProducerActor(producer: Publisher) extends Actor with ActorLogging with AvroByteStreams { def receive = { case x:PublishedEvent => try { log.info(s"[ProducerActor] publish ${x.meta.eventType}") producer.publish(x)(context.system) }catch{ case t: Throwable => log.error(t, "Error trying to publish event.") } case x => log.warning(s"ProducerActor does not know how to handle type[${x.getClass.getSimpleName}] containing: ${x} ...WTF WTF WTF !!!!!!!!") } override def postStop() { log.debug(s"ProducerActor ACTOR STOP !!! ${self.path.toStringWithoutAddress}") } }
Example 174
Source File: Client.scala From typebus with MIT License | 5 votes |
package io.surfkit.typebus.client import io.surfkit.typebus.{ByteStreamReader, ByteStreamWriter} import io.surfkit.typebus.event.EventMeta import io.surfkit.typebus.bus.Publisher import io.surfkit.typebus.actors.GatherActor import io.surfkit.typebus.event._ import scala.concurrent.Future import scala.concurrent.duration._ import akka.actor.{ActorSystem, Props} import scala.reflect.ClassTag import akka.util.Timeout import java.util.UUID def wire[T : ClassTag, U : ClassTag](x: T, eventMeta: Option[EventMeta] = None)(implicit timeout:Timeout = Timeout(4 seconds), w:ByteStreamWriter[T], r: ByteStreamReader[U]) :Future[Either[ServiceException,U]]= { val tType = scala.reflect.classTag[T].runtimeClass.getCanonicalName val uType = scala.reflect.classTag[U].runtimeClass.getCanonicalName val gather = system.actorOf(Props(new GatherActor[T, U](serviceIdentifier, publisher, timeout, w, r))) val meta = eventMeta.map(_.copy(eventId = UUID.randomUUID().toString, eventType = EventType.parse(x.getClass.getCanonicalName))).getOrElse{ EventMeta( eventId = UUID.randomUUID().toString, eventType = EventType.parse(x.getClass.getCanonicalName), directReply = None, correlationId = None ) } (gather ? GatherActor.Request(x)).map{ case x: U => Right(x.asInstanceOf[U]) case y: ServiceException => Left(y) }.recoverWith{ case t: Throwable => publisher.produceErrorReport(t, meta, s"FAILED RPC call ${tType} => Future[${uType}] failed with exception '${t.getMessage}'")(system) Future.failed(t) } } }
Example 175
Source File: MqttIntermediary.scala From mqtt-mongo with MIT License | 5 votes |
package com.izmailoff.mm.mqtt import akka.actor.{ActorRef, ActorSystem, Props} import com.izmailoff.mm.config.GlobalAppConfig.Application.MqttBroker import com.sandinh.paho.akka.MqttPubSub import com.sandinh.paho.akka.MqttPubSub.PSConfig import com.izmailoff.mm.util.StringUtils._ trait MqttIntermediary extends MqttIntermediaryComponent { def system: ActorSystem def startMqttIntermediary(): ActorRef = system.actorOf(Props(classOf[MqttPubSub], PSConfig( brokerUrl = MqttBroker.url, userName = emptyToNull(MqttBroker.userName), password = emptyToNull(MqttBroker.password), stashTimeToLive = MqttBroker.stashTimeToLive, stashCapacity = MqttBroker.stashCapacity, reconnectDelayMin = MqttBroker.reconnectDelayMin, reconnectDelayMax = MqttBroker.reconnectDelayMax )), name = "MqttIntermediary") } trait MqttIntermediaryComponent { def startMqttIntermediary(): ActorRef }
Example 176
Source File: PersistentFSMActor.scala From Akka-Cookbook with MIT License | 5 votes |
package com.packt.chapter6 import akka.actor.{ActorLogging, Props} import akka.persistence.fsm.PersistentFSM import scala.reflect.ClassTag object PersistentFSMActor { def props(persistenceId: String) = Props(new PersistentFSMActor(persistenceId)) } class PersistentFSMActor(_persistenceId: String)(implicit val domainEventClassTag: ClassTag[DomainEvent]) extends PersistentFSM[CountDownLatchState,Count,DomainEvent] with ActorLogging { startWith(Closed, Count()) when(Closed) { case Event(Initialize(count), _) => log.info(s"Initializing countdown latch with count $count") stay applying LatchDownClosed(count) case Event(Mark, Count(n)) if n != 0 => log.info(s"Still $n to open gate.") stay applying LatchDownClosed(n) case Event(Mark, _) => log.info(s"Gate open.") goto(Open) applying LatchDownOpen } when(Open) { case Event(Initialize(count), _) => goto(Closed) applying LatchDownClosed(count) } override def preStart() = log.info("Starting.") override def postStop() = log.info("Stopping.") override val persistenceId = _persistenceId override def applyEvent(event: DomainEvent, countdown: Count) = event match { case LatchDownClosed(i) => Count(i-1) case LatchDownOpen => Count() } }
Example 177
Source File: FriendActor.scala From Akka-Cookbook with MIT License | 5 votes |
package com.packt.chapter6 import akka.actor.{ActorLogging, Props} import akka.persistence.{PersistentActor, Recovery, RecoveryCompleted, SnapshotOffer} object FriendActor { def props(friendId: String, recoveryStrategy: Recovery) = Props(new FriendActor(friendId, recoveryStrategy)) } class FriendActor(friendId: String, r: Recovery) extends PersistentActor with ActorLogging { override val persistenceId = friendId override val recovery = r var state = FriendState() def updateState(event: FriendEvent) = state = state.update(event) val receiveRecover: Receive = { case evt: FriendEvent => log.info(s"Replaying event: $evt") updateState(evt) case SnapshotOffer(_, recoveredState : FriendState) => log.info(s"Snapshot offered: $recoveredState") state = recoveredState case RecoveryCompleted => log.info(s"Recovery completed. Current state: $state") } val receiveCommand: Receive = { case AddFriend(friend) => persist(FriendAdded(friend))(updateState) case RemoveFriend(friend) => persist(FriendRemoved(friend))(updateState) case "snap" => saveSnapshot(state) case "print" => log.info(s"Current state: $state") } }
Example 178
Source File: SafePersistenceActorShutdownApp.scala From Akka-Cookbook with MIT License | 5 votes |
package com.packt.chapter6 import akka.actor.{ActorSystem, PoisonPill, Props} object SafePersistenceActorShutdownApp extends App { val system = ActorSystem("safe-shutdown") val persistentActor1 = system.actorOf(Props[SamplePersistenceActor]) val persistentActor2 = system.actorOf(Props[SamplePersistenceActor]) persistentActor1 ! UserUpdate("foo", Add) persistentActor1 ! UserUpdate("foo", Add) persistentActor1 ! PoisonPill persistentActor2 ! UserUpdate("foo", Add) persistentActor2 ! UserUpdate("foo", Add) persistentActor2 ! ShutdownPersistentActor }
Example 179
Source File: SamplePersistenceApp.scala From Akka-Cookbook with MIT License | 5 votes |
package com.packt.chapter6 import akka.actor.{ActorSystem, Props} object SamplePersistenceApp extends App { val system = ActorSystem("example") val persistentActor1 = system.actorOf(Props[SamplePersistenceActor]) persistentActor1 ! UserUpdate("foo", Add) persistentActor1 ! UserUpdate("baz", Add) persistentActor1 ! "snap" persistentActor1 ! "print" persistentActor1 ! UserUpdate("baz", Remove) persistentActor1 ! "print" Thread.sleep(2000) system.stop(persistentActor1) val persistentActor2 = system.actorOf(Props[SamplePersistenceActor]) persistentActor2 ! "print" Thread.sleep(2000) system.terminate() }
Example 180
Source File: StockPersistenceActor.scala From Akka-Cookbook with MIT License | 5 votes |
package com.packt.chapter6 import akka.actor.{ActorLogging, Props} import akka.persistence.{PersistentActor, RecoveryCompleted} object StockPersistenceActor { def props(stockId: String) = Props(new StockPersistenceActor(stockId)) } class StockPersistenceActor(stockId: String) extends PersistentActor with ActorLogging { override val persistenceId = stockId var state = StockHistory() def updateState(event: ValueAppended) = state = state.update(event) val receiveRecover: Receive = { case evt: ValueAppended => updateState(evt) case RecoveryCompleted => log.info(s"Recovery completed. Current state: $state") } val receiveCommand: Receive = { case ValueUpdate(value) => persist(ValueAppended(StockValue(value)))(updateState) case "print" => log.info(s"Current state: $state") } override def postStop() = log.info(s"Stopping [${self.path}]") }
Example 181
Source File: HandlingExceptionsServer.scala From Akka-Cookbook with MIT License | 5 votes |
package com.packt.chapter9 import akka.actor.{ActorRef, ActorSystem, Props} import akka.http.scaladsl.server.HttpApp import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.settings.ServerSettings import com.typesafe.config.ConfigFactory import akka.pattern.ask import akka.util.Timeout import scala.concurrent.duration._ class HandlingExceptionsServer(someActor: ActorRef) extends HttpApp with RouteExceptionHandler { implicit val timeout = Timeout(500 millis) val route = handleExceptions(routeExceptionHandler) { path("divide") { parameters('a.as[Int], 'b.as[Int]) { (a, b) => complete { val result = a / b s"Result is: $result" } } } ~ path("futureTimingOut") { onSuccess(someActor ? "Something") { case _ => complete("Actor finished processing.") } } } } object HandlingExceptionsApplication extends App { val actorSystem = ActorSystem() val unresponsiveActor = actorSystem.actorOf(Props[UnresponsiveActor]) new HandlingExceptionsServer(unresponsiveActor).startServer("0.0.0.0", 8088, ServerSettings(ConfigFactory.load)) }
Example 182
Source File: IntegratingWithActorsApplication.scala From Akka-Cookbook with MIT License | 5 votes |
package com.packt.chapter8 import akka.actor.{ActorSystem, Props} import akka.stream.{ActorMaterializer, OverflowStrategy} import akka.stream.scaladsl._ import akka.pattern.ask import akka.util.Timeout import com.packt.chapter8.SinkActor.{AckSinkActor, CompletedSinkActor, InitSinkActor} import scala.concurrent.duration._ object IntegratingWithActorsApplication extends App { implicit val actorSystem = ActorSystem("IntegratingWithActors") implicit val actorMaterializer = ActorMaterializer() implicit val askTimeout = Timeout(5 seconds) val stringCleaner = actorSystem.actorOf(Props[StringCleanerActor]) val sinkActor = actorSystem.actorOf(Props[SinkActor]) val source = Source.queue[String](100, OverflowStrategy.backpressure) val sink = Sink.actorRefWithAck[String](sinkActor, InitSinkActor, AckSinkActor, CompletedSinkActor) val queue = source .mapAsync(parallelism = 5)(elem => (stringCleaner ? elem).mapTo[String]) .to(sink) .run() actorSystem.actorOf(SourceActor.props(queue)) }
Example 183
Source File: SourceActor.scala From Akka-Cookbook with MIT License | 5 votes |
package com.packt.chapter8 import akka.actor.{Actor, Props} import akka.stream.scaladsl.SourceQueueWithComplete import scala.concurrent.duration._ object SourceActor { case object Tick def props(sourceQueue: SourceQueueWithComplete[String]) = Props(new SourceActor(sourceQueue)) } class SourceActor(sourceQueue: SourceQueueWithComplete[String]) extends Actor { import SourceActor._ import context.dispatcher override def preStart() = { context.system.scheduler.schedule(0 seconds, 5 seconds, self, Tick) } def receive = { case Tick => println(s"Offering element from SourceActor") sourceQueue.offer("Integrating!!### Akka$$$ Actors? with}{ Akka** Streams") } }
Example 184
Source File: CountDownLatch.scala From Akka-Cookbook with MIT License | 5 votes |
package com.packt.chapter10 import akka.Done import akka.actor.{Actor, ActorLogging, ActorRef, ActorSystem, Props} import scala.concurrent.{Await, Future, Promise} import scala.concurrent.duration._ object CountDownLatch { case object CountDown def apply(count:Int)(implicit actorSystem: ActorSystem) = { val promise = Promise[Done]() val props = Props(classOf[CountDownLatchActor], count, promise) val countDownLatchActor = actorSystem.actorOf(props, "countDownLatchActor") new CountDownLatch(countDownLatchActor, promise) } } class CountDownLatch(private val actor: ActorRef, private val promise: Promise[Done]) { import CountDownLatch._ def countDown() = actor ! CountDown def await() : Unit = Await.result(promise.future, 10 minutes) val result : Future[Done] = promise.future } class CountDownLatchActor(count: Int, promise: Promise[Done]) extends Actor with ActorLogging { import CountDownLatch._ var remaining = count def receive = { case CountDown if remaining - 1 == 0 => log.info("Counting down") promise.success(Done) log.info("Gate opened") context.stop(self) case CountDown => log.info("Counting down") remaining -= 1 } }
Example 185
Source File: ShutdownPatternMaster.scala From Akka-Cookbook with MIT License | 5 votes |
package com.packt.chapter10 import akka.actor.{Actor, ActorLogging, Props} import scala.concurrent.duration._ class ShutdownPatternMaster extends ReaperAwareActor with ActorLogging { import context.dispatcher val receive = Actor.emptyBehavior override def preStartPostRegistration() = { val worker1 = context.actorOf(Props[ShutdownPatternWorker], "worker1") context.actorOf(Props[ShutdownPatternWorker], "worker2") context.system.scheduler.scheduleOnce(2 second, worker1, new Exception("something went wrong")) log.info(s"${self.path.name} is running") } override def postStop() = log.info(s"${self.path.name} has stopped") }
Example 186
Source File: CountDownLatchApp.scala From Akka-Cookbook with MIT License | 5 votes |
package com.packt.chapter10 import akka.actor.{ActorSystem, Props} import akka.routing.RoundRobinPool object CountDownLatchApp extends App { implicit val actorSystem = ActorSystem() import actorSystem._ val routeesToSetUp = 2 val countDownLatch = CountDownLatch(routeesToSetUp) actorSystem.actorOf(Props(classOf[CountDownLatchWorker], countDownLatch) .withRouter(RoundRobinPool(routeesToSetUp)), "workers") //Future based solution countDownLatch.result.onSuccess { case _ => log.info("Future completed successfully") } //Await based solution countDownLatch.await() actorSystem.terminate() }
Example 187
Source File: ServiceHandlersCreator.scala From Akka-Cookbook with MIT License | 5 votes |
package com.packt.chapter10 import akka.actor.{ActorRef, Props} class ServiceHandlersCreator extends OrderedKiller { override def preStart() = { context.actorOf(Props[ServiceHandler], "DatabaseHandler1") context.actorOf(Props[ServiceHandler], "DatabaseHandler2") context.actorOf(Props[ServiceHandler], "ExternalSOAPHandler") context.actorOf(Props[ServiceHandler], "ExternalRESTHandler") } def orderChildren(unorderedChildren: Iterable[ActorRef]) = { val result = unorderedChildren.toList.sortBy(_.path.name) log.info(s"Killing order is ${result.map(_.path.name)}") result } }
Example 188
Source File: BalancingWorkApp.scala From Akka-Cookbook with MIT License | 5 votes |
package com.packt.chapter10 import akka.actor.{ActorSystem, Props} import akka.routing.{BalancingPool, SmallestMailboxPool} import com.packt.chapter10.BalancedWorker.WorkTask import scala.concurrent.duration._ import scala.util.Random object BalancingDispatcherApp extends App { val actorSystem = ActorSystem() val workerPool = actorSystem.actorOf(Props[BalancedWorker].withRouter(BalancingPool(4)),"workers") import actorSystem.dispatcher actorSystem.scheduler.schedule(1 second, 200 millis)(sendTask) def sendTask : Unit = workerPool ! WorkTask(Random.nextInt(10000)) } object SmallestMailboxRouterApp extends App { val actorSystem = ActorSystem() val workerPool = actorSystem.actorOf(Props[BalancedWorker].withRouter(SmallestMailboxPool(4)),"workers") import actorSystem.dispatcher actorSystem.scheduler.schedule(1 second, 200 millis)(sendTask) def sendTask() : Unit = workerPool ! WorkTask(Random.nextInt(10000)) }
Example 189
Source File: EnvelopingActorApp.scala From Akka-Cookbook with MIT License | 5 votes |
package com.packt.chapter10 import java.util.UUID import akka.actor.{ActorSystem, Props} object EnvelopingActorApp extends App { val actorSystem = ActorSystem() val envelopReceived = actorSystem.actorOf(Props[EnvelopeReceiver], "receiver") val envelopingActor = actorSystem.actorOf(Props(classOf[EnvelopingActor], envelopReceived, headers _)) envelopingActor ! "Hello!" def headers(msg: Any) = { Map( "timestamp" -> System.currentTimeMillis(), "correlationId" -> UUID.randomUUID().toString ) } }
Example 190
Source File: Shutdown.scala From Akka-Cookbook with MIT License | 5 votes |
package com.packt.chapter1 import akka.actor.{PoisonPill, Props, ActorSystem, Actor} object ShutdownApp extends App{ val actorSystem = ActorSystem("HelloAkka") val shutdownActor1 = actorSystem.actorOf(Props[ShutdownActor], "shutdownActor1") shutdownActor1 ! "hello" shutdownActor1 ! PoisonPill shutdownActor1 ! "Are you there?" val shutdownActor2 = actorSystem.actorOf(Props[ShutdownActor], "shutdownActor2") shutdownActor2 ! "hello" shutdownActor2 ! Stop shutdownActor2 ! "Are you there?" } class ShutdownActor extends Actor { override def receive: Receive = { case msg:String => println(s"$msg") case Stop => context.stop(self) } } { case object Stop }
Example 191
Source File: CustomMailbox.scala From Akka-Cookbook with MIT License | 5 votes |
package com.packt.chapter1 import java.util.concurrent.ConcurrentLinkedQueue import akka.actor.{Props, Actor, ActorSystem, ActorRef} import akka.dispatch.{MailboxType, ProducesMessageQueue, Envelope, MessageQueue} import com.typesafe.config.Config object CustomMailbox extends App { val actorSystem = ActorSystem("HelloAkka") val actor = actorSystem.actorOf(Props[MySpecialActor].withDispatcher("custom-dispatcher")) val actor1 = actorSystem.actorOf(Props[MyActor],"xyz") val actor2 = actorSystem.actorOf(Props[MyActor],"MyActor") actor1 ! ("hello", actor) actor2 ! ("hello", actor) } class MySpecialActor extends Actor { override def receive: Receive = { case msg: String => println(s"msg is $msg" ) } } class MyActor extends Actor { override def receive: Receive = { case (msg: String, actorRef: ActorRef) => actorRef ! msg case msg => println(msg) } } trait MyUnboundedMessageQueueSemantics // This is the MessageQueue implementation class MyMessageQueue extends MessageQueue { private final val queue = new ConcurrentLinkedQueue[Envelope]() // these should be implemented; queue used as example def enqueue(receiver: ActorRef, handle: Envelope): Unit = { if(handle.sender.path.name == "MyActor") { handle.sender ! "Hey dude, How are you?, I Know your name,processing your request" queue.offer(handle) } else handle.sender ! "I don't talk to strangers, I can't process your request" } def dequeue(): Envelope = queue.poll def numberOfMessages: Int = queue.size def hasMessages: Boolean = !queue.isEmpty def cleanUp(owner: ActorRef, deadLetters: MessageQueue) { while (hasMessages) { deadLetters.enqueue(owner, dequeue()) } } } class MyUnboundedMailbox extends MailboxType with ProducesMessageQueue[MyMessageQueue] { // This constructor signature must exist, it will be called by Akka def this(settings: ActorSystem.Settings, config: Config) = { // put your initialization code here this() } // The create method is called to create the MessageQueue final override def create(owner: Option[ActorRef], system: Option[ActorSystem]): MessageQueue = new MyMessageQueue() }
Example 192
Source File: BecomeUnBecome.scala From Akka-Cookbook with MIT License | 5 votes |
package com.packt.chapter1 import akka.actor.{Props, ActorSystem, Actor} object BecomeUnBecomeApp extends App { val actorSystem = ActorSystem("HelloAkka") val becomeUnBecome = actorSystem.actorOf(Props[BecomeUnBecomeActor]) becomeUnBecome ! true becomeUnBecome ! "Hello how are you?" becomeUnBecome ! false becomeUnBecome ! 1100 becomeUnBecome ! true becomeUnBecome ! "What do u do?" } class BecomeUnBecomeActor extends Actor { def receive: Receive = { case true => context.become(isStateTrue) case false => context.become(isStateFalse) case _ => println("don't know what you want to say !! ") } def isStateTrue: Receive = { case msg : String => println(s"$msg") case false => context.become(isStateFalse) } def isStateFalse: Receive = { case msg : Int => println(s"$msg") case true => context.become(isStateTrue) } }
Example 193
Source File: Communication.scala From Akka-Cookbook with MIT License | 5 votes |
package com.packt.chapter1 import akka.actor.{ActorRef, Actor, ActorSystem, Props} import scala.util.Random._ object Messages { case class Done(randomNumber: Int) case object GiveMeRandomNumber case class Start(actorRef: ActorRef) } class RandomNumberGeneratorActor extends Actor { import Messages._ override def receive: Receive = { case GiveMeRandomNumber => println("received a message to generate a random integer") val randomNumber = nextInt sender ! Done(randomNumber) } } class QueryActor extends Actor { import Messages._ override def receive: Receive = { case Start(actorRef) => println(s"send me the next random number") actorRef ! GiveMeRandomNumber case Done(randomNumber) => println(s"received a random number $randomNumber") } } object Communication extends App { import Messages._ val actorSystem = ActorSystem("HelloAkka") val randomNumberGenerator = actorSystem.actorOf(Props[RandomNumberGeneratorActor], "randomNumberGeneratorActor") val queryActor = actorSystem.actorOf(Props[QueryActor], "queryActor") queryActor ! Start(randomNumberGenerator) }
Example 194
Source File: PriorityMailBox.scala From Akka-Cookbook with MIT License | 5 votes |
package com.packt.chapter1 import akka.actor.{Props, ActorSystem, Actor} import akka.dispatch.{PriorityGenerator, UnboundedPriorityMailbox} import com.typesafe.config.Config object PriorityMailBoxApp extends App { val actorSystem = ActorSystem("HelloAkka") val myPriorityActor = actorSystem.actorOf(Props[MyPriorityActor].withDispatcher("prio-dispatcher")) myPriorityActor ! 6.0 myPriorityActor ! 1 myPriorityActor ! 5.0 myPriorityActor ! 3 myPriorityActor ! "Hello" myPriorityActor ! 5 myPriorityActor ! "I am priority actor" myPriorityActor ! "I process string messages first,then integer, long and others" } class MyPriorityActor extends Actor { def receive: PartialFunction[Any, Unit] = { // Int Messages case x: Int => println(x) // String Messages case x: String => println(x) // Long messages case x: Long => println(x) // other messages case x => println(x) } } class MyPriorityActorMailbox(settings: ActorSystem.Settings, config: Config) extends UnboundedPriorityMailbox( // Create a new PriorityGenerator, lower prio means more important PriorityGenerator { // Int Messages case x: Int => 1 // String Messages case x: String => 0 // Long messages case x: Long => 2 // other messages case _ => 3 })
Example 195
Source File: FibonacciActor.scala From Akka-Cookbook with MIT License | 5 votes |
package com.packt.chapter1 import akka.actor.{Props, ActorSystem, Actor} import akka.pattern.ask import akka.util.Timeout import scala.concurrent.Await import scala.concurrent.duration._ class FibonacciActor extends Actor { override def receive: Receive = { case num: Int => val fibonacciNumber = fib(num) sender ! fibonacciNumber } def fib(n: Int): Int = n match { case 0 | 1 => n case _ => fib(n - 1) + fib(n - 2) } } object FibonacciActorApp extends App { implicit val timeout = Timeout(10 seconds) val actorSystem = ActorSystem("HelloAkka") val actor = actorSystem.actorOf(Props[FibonacciActor]) // asking for result from actor val future = (actor ? 10).mapTo[Int] val fiboacciNumber = Await.result(future, 10 seconds) println(fiboacciNumber) }
Example 196
Source File: ChatClient.scala From Akka-Cookbook with MIT License | 5 votes |
package com.packt.chapter7 import akka.actor.{Actor, ActorRef, Props} import com.packt.chapter7.ChatServer.{Connect, Disconnect, Disconnected, Message} import akka.pattern.ask import akka.pattern.pipe import scala.concurrent.duration._ import akka.util.Timeout object ChatClient { def props(chatServer: ActorRef) = Props(new ChatClient(chatServer)) } class ChatClient(chatServer: ActorRef) extends Actor { import context.dispatcher implicit val timeout = Timeout(5 seconds) override def preStart = { chatServer ! Connect } def receive = { case Disconnect => (chatServer ? Disconnect).pipeTo(self) case Disconnected => context.stop(self) case body : String => chatServer ! Message(self, body) case msg : Message => println(s"Message from [${msg.author}] at [${msg.creationTimestamp}]: ${msg.body}") } }
Example 197
Source File: LookingUpRemoteApplication.scala From Akka-Cookbook with MIT License | 5 votes |
package com.packt.chapter7 import akka.actor.{ActorRef, ActorSystem, Props} import scala.concurrent.duration._ object LookingUpActorSelection extends App { val actorSystem = ActorSystem("LookingUpActors") implicit val dispatcher = actorSystem.dispatcher val selection = actorSystem.actorSelection("akka.tcp://[email protected]:2553/user/remoteActor") selection ! "test" selection.resolveOne(3 seconds).onSuccess { case actorRef : ActorRef => println("We got an ActorRef") actorRef ! "test" } } object LookingUpRemoteActors extends App { val actorSystem = ActorSystem("LookingUpRemoteActors") actorSystem.actorOf(Props[SimpleActor], "remoteActor") }
Example 198
Source File: ChatClientInterface.scala From Akka-Cookbook with MIT License | 5 votes |
package com.packt.chapter7 import akka.actor.{Actor, ActorRef, Props} import com.packt.chapter7.ChatServer.Disconnect import scala.io.StdIn._ object ChatClientInterface { case object Check def props(chatClient: ActorRef) = Props(new ChatClientInterface(chatClient)) } class ChatClientInterface(chatClient: ActorRef) extends Actor { import ChatClientInterface._ override def preStart() = { println("You are logged in. Please type and press enter to send messages. Type 'DISCONNECT' to log out.") self ! Check } def receive = { case Check => readLine() match { case "DISCONNECT" => chatClient ! Disconnect println("Disconnecting...") context.stop(self) case msg => chatClient ! msg self ! Check } } }
Example 199
Source File: ChatServer.scala From Akka-Cookbook with MIT License | 5 votes |
package com.packt.chapter7 import akka.actor.{Actor, ActorRef, Props, Terminated} object ChatServer { case object Connect case object Disconnect case object Disconnected case class Message(author: ActorRef, body: String, creationTimestamp : Long = System.currentTimeMillis()) def props = Props(new ChatServer()) } class ChatServer extends Actor { import ChatServer._ var onlineClients = Set.empty[ActorRef] def receive = { case Connect => onlineClients += sender context.watch(sender) case Disconnect => onlineClients -= sender context.unwatch(sender) sender ! Disconnected case Terminated(ref) => onlineClients -= ref case msg: Message => onlineClients.filter(_ != sender).foreach(_ ! msg) } }
Example 200
Source File: ChatApplication.scala From Akka-Cookbook with MIT License | 5 votes |
package com.packt.chapter7 import akka.actor.{ActorRef, ActorSystem, Props} import scala.concurrent.duration._ object ChatClientApplication extends App { val actorSystem = ActorSystem("ChatServer") implicit val dispatcher = actorSystem.dispatcher val chatServerAddress = "akka.tcp://[email protected]:2552/user/chatServer" actorSystem.actorSelection(chatServerAddress).resolveOne(3 seconds).onSuccess { case chatServer : ActorRef => val client = actorSystem.actorOf(ChatClient.props(chatServer), "chatClient") actorSystem.actorOf(ChatClientInterface.props(client), "chatClientInterface") } } object ChatServerApplication extends App { val actorSystem = ActorSystem("ChatServer") actorSystem.actorOf(ChatServer.props, "chatServer") }