akka.remote.testkit.MultiNodeSpec Scala Examples
The following examples show how to use akka.remote.testkit.MultiNodeSpec.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: ClusteredMultiNodeUtils.scala From lagom with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.internal.cluster import akka.actor.ActorRef import akka.actor.Address import akka.cluster.Cluster import akka.cluster.MemberStatus import akka.remote.testconductor.RoleName import akka.remote.testkit.MultiNodeSpec import akka.testkit.ImplicitSender import com.lightbend.lagom.internal.cluster.ClusterMultiNodeConfig.node1 import scala.concurrent.duration._ abstract class ClusteredMultiNodeUtils(val numOfNodes: Int, multiNodeConfig: ClusterMultiNodeConfig) extends MultiNodeSpec(multiNodeConfig, ClusterMultiNodeActorSystemFactory.createActorSystem()) with STMultiNodeSpec with ImplicitSender { override def initialParticipants: Int = roles.size def join(from: RoleName, to: RoleName): Unit = { runOn(from) { Cluster(system).join(node(to).address) } enterBarrier(from.name + "-joined") } def fullAddress(ref: ActorRef): Address = if (ref.path.address.hasLocalScope) Cluster(system).selfAddress else ref.path.address protected override def atStartup(): Unit = { join(node1, node1) roles.tail.foreach(n => join(n, node1)) within(15.seconds) { awaitAssert(Cluster(system).state.members.size should be(numOfNodes)) awaitAssert( Cluster(system).state.members.toIndexedSeq.map(_.status).distinct should be(IndexedSeq(MemberStatus.Up)) ) } enterBarrier("startup") } }
Example 2
Source File: LithiumMultiNodeSpec.scala From lithium with Apache License 2.0 | 5 votes |
package com.swissborg.lithium import akka.actor.{ActorSystem, Address} import akka.cluster.Cluster import akka.cluster.MemberStatus._ import akka.remote.testconductor.RoleName import akka.remote.testkit.{MultiNodeConfig, MultiNodeSpec, MultiNodeSpecCallbacks} import akka.testkit.ImplicitSender import org.scalatest.BeforeAndAfterAll import org.scalatest.concurrent.{Eventually, IntegrationPatience} import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AnyWordSpecLike abstract class LithiumMultiNodeSpec(val config: MultiNodeConfig) extends MultiNodeSpec(config) with MultiNodeSpecCallbacks with AnyWordSpecLike with Matchers with BeforeAndAfterAll with ImplicitSender with Eventually with IntegrationPatience { override def beforeAll(): Unit = multiNodeSpecBeforeAll() override def afterAll(): Unit = multiNodeSpecAfterAll() private val addresses: Map[RoleName, Address] = roles.map(r => r -> node(r).address).toMap protected def addressOf(roleName: RoleName): Address = addresses(roleName) protected def waitToBecomeUnreachable(roleNames: RoleName*): Unit = awaitCond(allUnreachable(roleNames: _*)) protected def waitForSurvivors(roleNames: RoleName*): Unit = awaitCond(allSurvivors(roleNames: _*)) protected def waitForUp(roleNames: RoleName*): Unit = awaitCond(allUp(roleNames: _*)) protected def waitForSelfDowning(implicit system: ActorSystem): Unit = awaitCond(downedItself) protected def waitForAllLeaving(roleNames: RoleName*): Unit = awaitCond(allLeaving(roleNames: _*)) protected def waitExistsAllDownOrGone(groups: Seq[Seq[RoleName]]): Unit = awaitCond(existsAllDownOrGone(groups)) private def allUnreachable(roleNames: RoleName*): Boolean = roleNames.forall( role => Cluster(system).state.unreachable.exists(_.address === addressOf(role)) ) private def allSurvivors(roleNames: RoleName*): Boolean = roleNames.forall(role => Cluster(system).state.members.exists(_.address === addressOf(role))) private def allUp(roleNames: RoleName*): Boolean = roleNames.forall( role => Cluster(system).state.members.exists(m => m.address === addressOf(role) && m.status === Up) ) private def existsAllDownOrGone(groups: Seq[Seq[RoleName]]): Boolean = groups.exists(group => allLeaving(group: _*)) private def downedItself(implicit system: ActorSystem): Boolean = { val selfAddress = Cluster(system).selfAddress Cluster(system).state.members .exists( m => m.address === selfAddress && (m.status === Exiting || m.status === Down || m.status === Removed) ) } private def allLeaving(roleNames: RoleName*): Boolean = roleNames.forall { role => val members = Cluster(system).state.members val unreachable = Cluster(system).state.unreachable val address = addressOf(role) unreachable.isEmpty && // no unreachable members (members.exists(m => m.address === address && (m.status === Down || m.status === Exiting)) || // member is down !members.exists(_.address === address)) // member is not in the cluster } }
Example 3
Source File: STMultiNodeSpec.scala From fusion-data with Apache License 2.0 | 5 votes |
package sample.multinode import akka.remote.testkit.{ MultiNodeSpec, MultiNodeSpecCallbacks } import org.scalatest.{ BeforeAndAfterAll, Matchers, WordSpecLike } import scala.language.implicitConversions trait STMultiNodeSpec extends MultiNodeSpecCallbacks with WordSpecLike with Matchers with BeforeAndAfterAll { self: MultiNodeSpec => override protected def beforeAll(): Unit = { super.beforeAll() multiNodeSpecBeforeAll() } override protected def afterAll(): Unit = { multiNodeSpecAfterAll() super.afterAll() } // Might not be needed anymore if we find a nice way to tag all logging from a node implicit override def convertToWordSpecStringWrapper(s: String): WordSpecStringWrapper = new WordSpecStringWrapper(s"$s (on node '${self.myself.name}', $getClass)") }
Example 4
Source File: MultiNodeSampleTest.scala From fusion-data with Apache License 2.0 | 5 votes |
package sample.multinode import akka.actor.{Actor, Props} import akka.remote.testkit.{MultiNodeConfig, MultiNodeSpec} import akka.testkit.ImplicitSender object MultiNodeSampleConfig extends MultiNodeConfig { val node1 = role("node1") val node2 = role("node2") } class MultiNodeSampleTestMultiJvmNode1 extends MultiNodeSampleTest class MultiNodeSampleTestMultiJvmNode2 extends MultiNodeSampleTest object MultiNodeSampleTest { class Ponger extends Actor { def receive = { case "ping" => sender() ! "pong" } } } class MultiNodeSampleTest extends MultiNodeSpec(MultiNodeSampleConfig) with STMultiNodeSpec with ImplicitSender { import MultiNodeSampleConfig._ import MultiNodeSampleTest._ override def initialParticipants: Int = roles.size "A MultiNodeSample" must { "wait for all nodes to enter a barrier" in { enterBarrier("startup") // 当所有节点都发起栅栏消息:startup 后再继续之后代码的运行 } "send to and receive from a remote node" in { runOn(node1) { enterBarrier("deployed") // 等待另一个节点也发起栅栏 deployed val ponger = system.actorSelection(node(node2) / "user" / "ponger") ponger ! "ping" import scala.concurrent.duration._ expectMsg(10.seconds, "pong") println(System.getProperty("akka.remote.port") + " received pong") } runOn(node2) { system.actorOf(Props[Ponger], "ponger") enterBarrier("deployed") println(System.getProperty("akka.remote.port") + " started ponger") } enterBarrier("finished") } } }
Example 5
Source File: MultiNodeSampleSpec.scala From fusion-data with Apache License 2.0 | 5 votes |
package sample import akka.actor.{Actor, Props} import akka.remote.testconductor.RoleName import akka.remote.testkit.{MultiNodeConfig, MultiNodeSpec} import akka.testkit.ImplicitSender import mass.STMultiNodeSpec object MultiNodeSampleConfig extends MultiNodeConfig { val node1: RoleName = role("node1") val node2: RoleName = role("node2") } object MultiNodeSampleSpec { class Ponger extends Actor { def receive: Receive = { case "ping" => sender() ! "pong" } } } class MultiNodeSampleSpec extends MultiNodeSpec(MultiNodeSampleConfig) with STMultiNodeSpec with ImplicitSender { import MultiNodeSampleSpec._ import MultiNodeSampleConfig._ // 设置参与者数量,之后的Barrier(enterBarrier)需要满足此数量后才运行之后的代码。 def initialParticipants: Int = roles.size "A MultiNodeSampleSpec" must { "wait for all nodes to enter a barrier" in { enterBarrier("startup") } "send to and receive from a remote node" in { runOn(node1) { // 进入 deployed barrier,等待另一个节点实例化 actor 完成。 enterBarrier("deployed") val ponger = system.actorSelection(node(node2) / "user" / "ponger") ponger ! "ping" import scala.concurrent.duration._ expectMsg(10.seconds, "pong") // 阻塞接收并assert消息,10秒超时 } runOn(node2) { system.actorOf(Props[Ponger], "ponger") // 先实例化actor,再进入 deployed barrier enterBarrier("deployed") } enterBarrier("finished") } } } class MultiNodeSampleSpecMultiJvmNode1 extends MultiNodeSampleSpec class MultiNodeSampleSpecMultiJvmNode2 extends MultiNodeSampleSpec
Example 6
Source File: MultiNodeBaseSpec.scala From NSDb with Apache License 2.0 | 5 votes |
package io.radicalbit.nsdb.split_brain import akka.actor.Address import akka.cluster.MemberStatus.{Down, Exiting, Removed, Up} import akka.remote.testconductor.RoleName import akka.remote.testkit.{MultiNodeConfig, MultiNodeSpec} import akka.remote.transport.ThrottlerTransportAdapter.Direction import akka.testkit.ImplicitSender import io.radicalbit.rtsae.STMultiNodeSpec import scala.concurrent.duration.Duration abstract class MultiNodeBaseSpec(config: MultiNodeConfig) extends MultiNodeSpec(config) with STMultiNodeSpec with ImplicitSender { def initialParticipants: Int = roles.size private val addresses: Map[RoleName, Address] = roles.map(r => r -> node(r).address).toMap protected def addressOf(roleName: RoleName): Address = addresses(roleName) protected def awaitClusterNodesForUp(roleNames: RoleName*): Unit = awaitCond { roleNames.forall( role => cluster.state.members.exists(m => m.address === addressOf(role) && m.status === Up) ) } protected def awaitClusterLeader(nodesInCluster: RoleName*): Unit = if (nodesInCluster.contains(myself)) { nodesInCluster.length should not be 0 awaitCond(nodesInCluster.map(addressOf).contains(cluster.state.getLeader)) } protected def awaitUnreachableNodes(unreachableNodes: RoleName*): Unit = awaitCond(cluster.state.unreachable.map(_.address) === unreachableNodes.map(addressOf).toSet) protected def switchOffConnection(from: RoleName, to: RoleName) = testConductor.blackhole(from, to, Direction.Both).await protected def awaitSurvivorsNodes(roleNames: RoleName*): Unit = awaitCond(roleNames.forall(role => cluster.state.members.exists(_.address === addressOf(role)))) protected def awaitAllLeavingNodes(roleNames: RoleName*): Unit = awaitCond(roleNames.forall { role => val members = cluster.state.members val unreachable = cluster.state.unreachable val address = addressOf(role) unreachable.isEmpty && (members.exists(m => m.address === address && (m.status === Down || m.status === Exiting)) || !members.exists(_.address === address)) }) protected def awaitSelfDowningNode(max: Duration = Duration.Undefined) = awaitCond( { val selfAddress = cluster.selfAddress cluster.state.members.exists(m => m.address === selfAddress && (m.status === Exiting || m.status === Down || m.status === Removed)) }, max ) protected def awaitExistingMembers(roleNames: RoleName*): Unit = awaitCond(cluster.state.members.map(_.address) === roleNames.map(addressOf).toSet) }
Example 7
Source File: SimpleDowningSpec.scala From simple-akka-downing with Apache License 2.0 | 5 votes |
package com.ajjpj.simpleakkadowning.util import akka.actor.Props import akka.cluster.{Cluster, MemberStatus} import akka.http.scaladsl.Http import akka.http.scaladsl.model.{HttpRequest, Uri} import akka.remote.testconductor.RoleName import akka.remote.testkit.MultiNodeSpec import akka.remote.transport.ThrottlerTransportAdapter.Direction import akka.stream.ActorMaterializer import akka.testkit.ImplicitSender import scala.concurrent.duration._ import scala.util.control.NonFatal abstract class SimpleDowningSpec(config: SimpleDowningConfig) extends MultiNodeSpec(config) with STMultiNodeSpec with ImplicitSender { def initialParticipants = roles.size private var portToNode = Map.empty[Int,RoleName] def init(): Unit = { if (roles.headOption contains myself) { enterBarrier("initialized") } else { val cluster = Cluster(system) cluster.joinSeedNodes(seedAddresses) system.actorOf(Props(new ClusterHttpInspector(httpPort(myself))), "http-server") while (cluster.state.members.count(_.status == MemberStatus.Up) < roles.tail.size) Thread.sleep(100) enterBarrier("initialized") } portToNode = roles.map(r => node(r).address.port.get -> r).toMap } def httpPort (node: RoleName) = { val nodeNo = roles.indexOf(node) require(nodeNo > 0) 8080 + nodeNo } def seedAddresses = roles.tail.map(node(_).root.address) private def httpGetNodes(node: RoleName, path: String): Set[RoleName] = { try { import system.dispatcher implicit val mat = ActorMaterializer() val uri = Uri (s"http://localhost:${httpPort (node)}$path") val response = Http (system).singleRequest (HttpRequest (uri = uri)).await val strict = response.entity.toStrict (10.seconds).await strict.data.decodeString ("utf-8") match { case s if s.isEmpty => Set.empty case s => s.split (' ').map (_.toInt).map (portToNode).toSet } } catch { case NonFatal(th) => th.printStackTrace() Set.empty } } def upNodesFor(node: RoleName) = httpGetNodes(node, "/cluster-members/up") def unreachableNodesFor (node: RoleName) = httpGetNodes(node, "/cluster-members/unreachable") def createPartition(nodes: RoleName*) = { val otherNodes = roles.tail.toSet -- nodes for (n1 <- nodes; n2 <- otherNodes) testConductor.blackhole(n1, n2, Direction.Both).await } def healPartition(): Unit = { for (n1 <- roles.tail; n2 <- roles.tail) testConductor.passThrough(n1, n2, Direction.Both).await } }
Example 8
Source File: ClusterSpec.scala From akka-cqrs with Apache License 2.0 | 5 votes |
package test.support import java.io.{File, IOException} import java.nio.file._ import java.nio.file.attribute.BasicFileAttributes import akka.actor.{ActorIdentity, Identify, Props} import akka.cluster.Cluster import akka.persistence.Persistence import akka.persistence.journal.leveldb.{SharedLeveldbJournal, SharedLeveldbStore} import akka.remote.testconductor.RoleName import akka.remote.testkit.MultiNodeSpec import akka.testkit.ImplicitSender import scala.util.control.NonFatal abstract class ClusterSpec extends MultiNodeSpec(ClusterConfig) with SbtMultiNodeSpec with ImplicitSender { import ClusterConfig._ implicit val logger = system.log def initialParticipants = roles.size def deleteDirectory(path: Path): Unit = if (Files.exists(path)) { Files.walkFileTree(path, new SimpleFileVisitor[Path] { def deleteAndContinue(file: Path): FileVisitResult = { Files.delete(file) FileVisitResult.CONTINUE } override def visitFile(file: Path, attrs: BasicFileAttributes): FileVisitResult = deleteAndContinue(file) override def visitFileFailed(file: Path, exc: IOException): FileVisitResult = deleteAndContinue(file) override def postVisitDirectory(dir: Path, exc: IOException): FileVisitResult = { Option(exc).fold(deleteAndContinue(dir)) { case NonFatal(e) => throw e } } }) } val storageLocations = List( "akka.persistence.journal.leveldb.dir", "akka.persistence.journal.leveldb-shared.store.dir", "akka.persistence.snapshot-store.local.dir").map(s => new File(system.settings.config.getString(s))) override protected def atStartup() { on(node1) { storageLocations.foreach(dir => deleteDirectory(dir.toPath)) } } override protected def afterTermination() { on(node1) { storageLocations.foreach(dir => deleteDirectory(dir.toPath)) } } def join(startOn: RoleName, joinTo: RoleName) { on(startOn) { Cluster(system) join node(joinTo).address } enterBarrier(startOn.name + "-joined") } def setupSharedJournal() { Persistence(system) on(node1) { system.actorOf(Props[SharedLeveldbStore], "store") } enterBarrier("persistence-started") system.actorSelection(node(node1) / "user" / "store") ! Identify(None) val sharedStore = expectMsgType[ActorIdentity].ref.get SharedLeveldbJournal.setStore(sharedStore, system) enterBarrier("after-1") } def joinCluster() { join(startOn = node1, joinTo = node1) join(startOn = node2, joinTo = node1) enterBarrier("after-2") } def on(nodes: RoleName*)(thunk: => Unit): Unit = { runOn(nodes: _*)(thunk) } }
Example 9
Source File: ClusteredTeskit.scala From lagom with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.internal.cluster import akka.actor.ActorRef import akka.actor.ActorSystem import akka.actor.Address import akka.actor.BootstrapSetup import akka.actor.setup.ActorSystemSetup import akka.cluster.Cluster import akka.cluster.MemberStatus import akka.remote.testconductor.RoleName import com.typesafe.config.ConfigFactory import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec import akka.testkit.ImplicitSender import com.lightbend.lagom.internal.cluster.ClusterMultiNodeConfig.node1 import com.typesafe.config.Config import scala.concurrent.duration._ object ClusterMultiNodeConfig extends ClusterMultiNodeConfig // this is reused in multiple multi-jvm tests. There's still some copy/paste around though. abstract class ClusterMultiNodeConfig extends MultiNodeConfig { val node1 = role("node1") val node2 = role("node2") val node3 = role("node3") protected def systemConfig: Config = ConfigFactory.parseString( """ akka.loglevel = INFO akka.actor.provider = cluster terminate-system-after-member-removed = 60s # increase default timeouts to leave wider margin for Travis. # 30s to 60s akka.testconductor.barrier-timeout=60s akka.test.single-expect-default = 15s akka.cluster.sharding.waiting-for-state-timeout = 5s # Don't terminate the actor system when doing a coordinated shutdown akka.coordinated-shutdown.terminate-actor-system = off akka.coordinated-shutdown.run-by-jvm-shutdown-hook = off akka.cluster.run-coordinated-shutdown-when-down = off ## The settings below are incidental because this code lives in a project that depends on lagom-cluster and ## lagom-akka-management-core. # multi-jvm tests forms the cluster programmatically # therefore we disable Akka Cluster Bootstrap lagom.cluster.bootstrap.enabled = off # no jvm exit on tests lagom.cluster.exit-jvm-when-system-terminated = off """ ) commonConfig(systemConfig) } // heavily inspired by AbstractClusteredPersistentEntitySpec // this is reused in multiple multi-jvm tests. There's still some copy/paste around though. object ClusterMultiNodeActorSystemFactory { // Copied from MultiNodeSpec private def getCallerName(clazz: Class[_]): String = { val s = Thread.currentThread.getStackTrace.map(_.getClassName).drop(1).dropWhile(_.matches(".*MultiNodeSpec.?$")) val reduced = s.lastIndexWhere(_ == clazz.getName) match { case -1 => s case z => s.drop(z + 1) } reduced.head.replaceFirst(""".*\.""", "").replaceAll("[^a-zA-Z_0-9]", "_") } def createActorSystem(): Config => ActorSystem = { config => val setup = ActorSystemSetup(BootstrapSetup(ConfigFactory.load(config))) ActorSystem(getCallerName(classOf[MultiNodeSpec]), setup) } }
Example 10
Source File: MultiNodeReplicationEndpoint.scala From eventuate with Apache License 2.0 | 5 votes |
package com.rbmhtechnology.eventuate import akka.actor._ import akka.remote.testkit.MultiNodeSpec import org.scalatest.BeforeAndAfterAll trait MultiNodeReplicationEndpoint extends BeforeAndAfterAll { this: MultiNodeSpec with MultiNodeWordSpec => def logName: String = { val cn = getClass.getSimpleName cn.substring(0, cn.lastIndexOf("MultiJvm")) } def createEndpoint(endpointId: String, connections: Set[ReplicationConnection]): ReplicationEndpoint = createEndpoint(endpointId, Set(logName), connections) def createEndpoint(endpointId: String, logNames: Set[String], connections: Set[ReplicationConnection], activate: Boolean = true): ReplicationEndpoint = { val endpoint = new ReplicationEndpoint(endpointId, logNames, id => logProps(id), connections) if (activate) endpoint.activate() endpoint } implicit class RichAddress(address: Address) { def toReplicationConnection: ReplicationConnection = ReplicationConnection(address.host.get, address.port.get, address.system) } implicit class RichReplicationEndpoint(endpoint: ReplicationEndpoint) { def log: ActorRef = endpoint.logs(logName) def logId: String = endpoint.logId(logName) } def logProps(logId: String): Props }
Example 11
Source File: WordsClusterSpec.scala From 006877 with MIT License | 5 votes |
package aia.cluster package words import scala.concurrent.duration._ import akka.actor.Props import akka.cluster.Cluster import akka.cluster.ClusterEvent.{CurrentClusterState, MemberUp} import akka.testkit.ImplicitSender import akka.remote.testkit.MultiNodeSpec import JobReceptionist._ class WordsClusterSpecMultiJvmNode1 extends WordsClusterSpec class WordsClusterSpecMultiJvmNode2 extends WordsClusterSpec class WordsClusterSpecMultiJvmNode3 extends WordsClusterSpec class WordsClusterSpecMultiJvmNode4 extends WordsClusterSpec class WordsClusterSpec extends MultiNodeSpec(WordsClusterSpecConfig) with STMultiNodeSpec with ImplicitSender { import WordsClusterSpecConfig._ def initialParticipants = roles.size val seedAddress = node(seed).address val masterAddress = node(master).address val worker1Address = node(worker1).address val worker2Address = node(worker2).address muteDeadLetters(classOf[Any])(system) "A Words cluster" must { "form the cluster" in within(10 seconds) { Cluster(system).subscribe(testActor, classOf[MemberUp]) expectMsgClass(classOf[CurrentClusterState]) Cluster(system).join(seedAddress) receiveN(4).map { case MemberUp(m) => m.address }.toSet must be( Set(seedAddress, masterAddress, worker1Address, worker2Address)) Cluster(system).unsubscribe(testActor) enterBarrier("cluster-up") } "execute a words job once the cluster is running" in within(10 seconds) { runOn(master) { val receptionist = system.actorOf(Props[JobReceptionist], "receptionist") receptionist ! JobRequest("job-1", List("some", "some very long text", "some long text")) expectMsg(JobSuccess("job-1", Map("some" -> 3, "very" -> 1, "long" -> 2, "text" -> 2))) } enterBarrier("job-done") } "continue to process a job when failures occur" in within(10 seconds) { runOn(master) { val receptionist = system.actorSelection("/user/receptionist") receptionist ! JobRequest("job-2", List("some", "FAIL", "some very long text", "some long text")) expectMsg(JobSuccess("job-2", Map("some" -> 3, "very" -> 1, "long" -> 2, "text" -> 2))) } enterBarrier("job-done") } } }
Example 12
Source File: ProxyMultiJvm.scala From 006877 with MIT License | 5 votes |
package aia.channels // multi-jvm:test-only aia.channels.ReliableProxySampleSpec 로 시작할것 import org.scalatest.{WordSpecLike, BeforeAndAfterAll, MustMatchers} import akka.testkit.ImplicitSender import akka.actor.{Props, Actor} import akka.remote.testkit.MultiNodeSpecCallbacks import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec trait STMultiNodeSpec extends MultiNodeSpecCallbacks with WordSpecLike with MustMatchers with BeforeAndAfterAll { override def beforeAll() = multiNodeSpecBeforeAll() override def afterAll() = multiNodeSpecAfterAll() } object ReliableProxySampleConfig extends MultiNodeConfig { val client = role("Client") val server = role("Server") testTransport(on = true) } class ReliableProxySampleSpecMultiJvmNode1 extends ReliableProxySample class ReliableProxySampleSpecMultiJvmNode2 extends ReliableProxySample import akka.remote.transport.ThrottlerTransportAdapter.Direction import scala.concurrent.duration._ import concurrent.Await import akka.contrib.pattern.ReliableProxy class ReliableProxySample extends MultiNodeSpec(ReliableProxySampleConfig) with STMultiNodeSpec with ImplicitSender { import ReliableProxySampleConfig._ def initialParticipants = roles.size "A MultiNodeSample" must { "wait for all nodes to enter a barrier" in { enterBarrier("startup") } "send to and receive from a remote node" in { runOn(client) { enterBarrier("deployed") val pathToEcho = node(server) / "user" / "echo" val echo = system.actorSelection(pathToEcho) val proxy = system.actorOf( ReliableProxy.props(pathToEcho, 500.millis), "proxy") proxy ! "message1" expectMsg("message1") Await.ready( testConductor.blackhole( client, server, Direction.Both), 1 second) echo ! "DirectMessage" proxy ! "ProxyMessage" expectNoMsg(3 seconds) Await.ready( testConductor.passThrough( client, server, Direction.Both), 1 second) expectMsg("ProxyMessage") echo ! "DirectMessage2" expectMsg("DirectMessage2") } runOn(server) { system.actorOf(Props(new Actor { def receive = { case msg: AnyRef => { sender() ! msg } } }), "echo") enterBarrier("deployed") } enterBarrier("finished") } } }
Example 13
Source File: MultiDcSpec.scala From JustinDB with Apache License 2.0 | 5 votes |
package justin.db import akka.remote.testkit.{MultiNodeConfig, MultiNodeSpec} import com.typesafe.config.ConfigFactory final class MultiDcSpecConfig(crossDcConnections: Int = 1) extends MultiNodeConfig { val first = role("first") val second = role("second") private[this] val allRoles = List(first, second) private[this] val clusterName = "MultiDcSpec" private[this] def commonNodeConfig(id: Int) = ConfigFactory.parseString( s""" |justin.system = $clusterName |justin.kubernetes-hostname = s"justindb-$id" |justin.http.port = ${9000 + id} |akka.cluster.role.storagenode.min-nr-of-members = ${allRoles.size} |akka.cluster.http.management.port = ${19999 + id} |akka.cluster.seed-nodes.0 = "akka.tcp://$clusterName@localhost:25551" |akka.remote.netty.tcp.port = ${25551 + id} |akka.remote.netty.tcp.hostname = "localhost" |akka.cluster.multi-data-center.cross-data-center-connections = $crossDcConnections |akka.cluster.multi-data-center.self-data-center = "dc$id" """.stripMargin ) commonConfig(MultiNodeClusterSpec.commonBaseConfig.withFallback(JustinDBConfig.init.config)) allRoles.zipWithIndex.foreach { case (roleName, id) => nodeConfig(roleName)(commonNodeConfig(id)) } } final class MultiDcMultiJvm1 extends MultiDcSpec final class MultiDcMultiJvm2 extends MultiDcSpec abstract class MultiDcSpec(config: MultiDcSpecConfig) extends MultiNodeSpec(config) with MultiNodeClusterSpec { def this() = this(new MultiDcSpecConfig()) "A cluster with multiple data centers" must { "be able to form" in { val config = new JustinDBConfig(system.settings.config) val justinDB = JustinDB.init(config)(system) enterBarrier("justindb-cluster-up") } } }
Example 14
Source File: MultiNodeClusterSpec.scala From JustinDB with Apache License 2.0 | 5 votes |
package justin.db import akka.cluster.Cluster import akka.remote.testkit.MultiNodeSpec import com.typesafe.config.{Config, ConfigFactory} import org.scalatest.Suite object MultiNodeClusterSpec { val commonBaseConfig: Config = ConfigFactory.parseString( s""" |akka.loglevel = INFO |akka.log-config-on-start = false |akka.log-dead-letters = off |akka.log-dead-letters-during-shutdown = off |akka.remote.log-remote-lifecycle-events = off """.stripMargin ) } trait MultiNodeClusterSpec extends Suite with STMultiNodeSpec { self: MultiNodeSpec ⇒ def cluster: Cluster = Cluster(system) def initialParticipants: Int = roles.size }
Example 15
Source File: ConvergeJustinDBClusterSpec.scala From JustinDB with Apache License 2.0 | 5 votes |
package justin.db import akka.remote.testkit.{MultiNodeConfig, MultiNodeSpec} import com.typesafe.config.ConfigFactory final class ConvergeJustinDBClusterConfig extends MultiNodeConfig { val first = role("first") val second = role("second") val third = role("third") private[this] val allRoles = List(first, second, third) private[this] val clusterName = "ConvergeJustinDBClusterSpec" private[this] def commonNodeConfig(id: Int) = ConfigFactory.parseString( s""" |justin.system = $clusterName |justin.kubernetes-hostname = s"justindb-$id" |justin.http.port = ${9000 + id} |akka.cluster.role.storagenode.min-nr-of-members = ${allRoles.size} |akka.cluster.http.management.port = ${19999 + id} |akka.cluster.seed-nodes.0 = "akka.tcp://$clusterName@localhost:25551" |akka.remote.netty.tcp.port = ${25551 + id} |akka.remote.netty.tcp.hostname = "localhost" """.stripMargin ) commonConfig(MultiNodeClusterSpec.commonBaseConfig.withFallback(JustinDBConfig.init.config)) allRoles.zipWithIndex.foreach { case (roleName, id) => nodeConfig(roleName)(commonNodeConfig(id)) } } final class ConvergeJustinDBClusterSpecMultiJvmNode1 extends ConvergeJustinDBClusterSpec final class ConvergeJustinDBClusterSpecMultiJvmNode2 extends ConvergeJustinDBClusterSpec final class ConvergeJustinDBClusterSpecMultiJvmNode3 extends ConvergeJustinDBClusterSpec abstract class ConvergeJustinDBClusterSpec(config: ConvergeJustinDBClusterConfig) extends MultiNodeSpec(config) with MultiNodeClusterSpec { def this() = this(new ConvergeJustinDBClusterConfig()) "A cluster" must { "be able to form" in { val config = new JustinDBConfig(system.settings.config) val justinDB = JustinDB.init(config)(system) enterBarrier("justindb-cluster-up") } } }
Example 16
Source File: MultiNodeSupportLeveldb.scala From eventuate with Apache License 2.0 | 5 votes |
package com.rbmhtechnology.eventuate import java.io.File import akka.actor.Props import akka.remote.testconductor.RoleName import akka.remote.testkit.MultiNodeSpec import com.rbmhtechnology.eventuate.log.leveldb.LeveldbEventLog import org.apache.commons.io.FileUtils import org.scalatest.BeforeAndAfterAll trait MultiNodeSupportLeveldb extends BeforeAndAfterAll { this: MultiNodeSpec with MultiNodeWordSpec => val coordinator = RoleName("nodeA") def logProps(logId: String): Props = LeveldbEventLog.props(logId) override def afterAll(): Unit = { // get all config data before shutting down node val snapshotRootDir = new File(system.settings.config.getString("eventuate.snapshot.filesystem.dir")) val logRootDir = new File(system.settings.config.getString("eventuate.log.leveldb.dir")) // shut down node super.afterAll() // delete log and snapshot files if (isNode(coordinator)) { FileUtils.deleteDirectory(snapshotRootDir) FileUtils.deleteDirectory(logRootDir) } } }
Example 17
Source File: MultiNodeSupportCassandra.scala From eventuate with Apache License 2.0 | 5 votes |
package com.rbmhtechnology.eventuate import java.io.File import akka.actor.Props import akka.remote.testconductor.RoleName import akka.remote.testkit.MultiNodeSpec import com.rbmhtechnology.eventuate.log.cassandra._ import org.apache.commons.io.FileUtils import org.scalatest.BeforeAndAfterAll trait MultiNodeSupportCassandra extends BeforeAndAfterAll { this: MultiNodeSpec with MultiNodeWordSpec => val coordinator = RoleName("nodeA") def cassandraDir: String = MultiNodeEmbeddedCassandra.DefaultCassandraDir def logProps(logId: String): Props = CassandraEventLog.props(logId) override def atStartup(): Unit = { if (isNode(coordinator)) { MultiNodeEmbeddedCassandra.start(cassandraDir) Cassandra(system) } enterBarrier("startup") } override def afterAll(): Unit = { // get all config data before shutting down node val snapshotRootDir = new File(system.settings.config.getString("eventuate.snapshot.filesystem.dir")) // shut down node super.afterAll() // clean database and delete snapshot files if (isNode(coordinator)) { FileUtils.deleteDirectory(snapshotRootDir) MultiNodeEmbeddedCassandra.clean() } } }
Example 18
Source File: FailureDetectionSpec.scala From eventuate with Apache License 2.0 | 5 votes |
package com.rbmhtechnology.eventuate import akka.remote.testkit.MultiNodeSpec import akka.remote.transport.ThrottlerTransportAdapter.Direction import akka.testkit.TestProbe import com.rbmhtechnology.eventuate.ReplicationProtocol.ReplicationReadTimeoutException import com.typesafe.config._ class FailureDetectionConfig(providerConfig: Config) extends MultiNodeReplicationConfig { val nodeA = role("nodeA") val nodeB = role("nodeB") testTransport(on = true) val customConfig = ConfigFactory.parseString(""" |eventuate.log.replication.remote-read-timeout = 1s |eventuate.log.replication.failure-detection-limit = 10s """.stripMargin) setConfig(customConfig.withFallback(providerConfig)) } abstract class FailureDetectionSpec(config: FailureDetectionConfig) extends MultiNodeSpec(config) with MultiNodeWordSpec with MultiNodeReplicationEndpoint { import ReplicationEndpoint._ import config._ def initialParticipants: Int = roles.size muteDeadLetters(classOf[AnyRef])(system) "Event log replication" must { "detect replication server availability" in { val probeAvailable1 = new TestProbe(system) val probeAvailable2 = new TestProbe(system) val probeUnavailable = new TestProbe(system) system.eventStream.subscribe(probeAvailable1.ref, classOf[Available]) system.eventStream.subscribe(probeUnavailable.ref, classOf[Unavailable]) enterBarrier("subscribe") runOn(nodeA) { createEndpoint(nodeA.name, Set(node(nodeB).address.toReplicationConnection)) probeAvailable1.expectMsg(Available(nodeB.name, logName)) enterBarrier("connected") testConductor.blackhole(nodeA, nodeB, Direction.Both).await probeUnavailable.expectMsgPF() { case Unavailable(nodeB.name, logName, causes) if causes.nonEmpty => causes.head shouldBe a[ReplicationReadTimeoutException] } system.eventStream.subscribe(probeAvailable2.ref, classOf[Available]) enterBarrier("repair") testConductor.passThrough(nodeA, nodeB, Direction.Both).await probeAvailable2.expectMsg(Available(nodeB.name, logName)) } runOn(nodeB) { createEndpoint(nodeB.name, Set(node(nodeA).address.toReplicationConnection)) probeAvailable1.expectMsg(Available(nodeA.name, logName)) enterBarrier("connected") probeUnavailable.expectMsgPF() { case Unavailable(nodeA.name, logName, causes) if causes.nonEmpty => causes.head shouldBe a[ReplicationReadTimeoutException] } system.eventStream.subscribe(probeAvailable2.ref, classOf[Available]) enterBarrier("repair") probeAvailable2.expectMsg(Available(nodeA.name, logName)) } enterBarrier("finish") } } }