akka.actor.Address Scala Examples

The following examples show how to use akka.actor.Address. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: package.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs

import java.net.{URLDecoder, URLEncoder}
import java.nio.ByteBuffer
import java.nio.charset.Charset

import akka.actor.{Address, AddressFromURIString}
import akka.util.ByteString
import com.typesafe.scalalogging.Logger
import org.apache.curator.framework.CuratorFramework
import org.apache.zookeeper.CreateMode
import org.apache.zookeeper.KeeperException.NodeExistsException

import scala.language.implicitConversions
import scala.util.Try
import scala.util.control.NonFatal
import scala.collection.JavaConverters._

package object cluster {

  trait SegmentationLogic {
    val segmentsSize:Int
    def segmentation(partitionKey:ByteString): String = s"segment-${Math.abs(partitionKey.hashCode()) % segmentsSize}"
    def partitionZkPath(partitionKey:ByteString): String = s"/segments/${segmentation(partitionKey)}/${keyToPath(partitionKey)}"
    def sizeOfParZkPath(partitionKey:ByteString): String = s"${partitionZkPath(partitionKey)}/$$size"
    def servantsOfParZkPath(partitionKey:ByteString): String = s"${partitionZkPath(partitionKey)}/servants"
  }

  case class DefaultSegmentationLogic(segmentsSize:Int) extends SegmentationLogic

  def guarantee(path:String, data:Option[Array[Byte]], mode:CreateMode = CreateMode.EPHEMERAL)
               (implicit zkClient:CuratorFramework, logger:Logger):String = {
    try{
      data match {
        case None => zkClient.create.withMode(mode).forPath(path)
        case Some(bytes) => zkClient.create.withMode(mode).forPath(path, bytes)
      }
    }
    catch{
      case e: NodeExistsException =>
        if(data.nonEmpty && data.get.length > 0){
          zkClient.setData().forPath(path, data.get)
        }
        path
      case NonFatal(e) =>
        logger.info("leader znode creation failed due to %s\n", e)
        path
    }
  }

  def safelyDiscard(path:String, recursive: Boolean = true)(implicit zkClient: CuratorFramework): String = Try {
    if(recursive) zkClient.getChildren.forPath(path).asScala.foreach(child => safelyDiscard(s"$path/$child", recursive))
    zkClient.delete.forPath(path)
    path
  } getOrElse path

  def keyToPath(name:String):String = URLEncoder.encode(name, "utf-8")

  def pathToKey(name:String):String = URLDecoder.decode(name, "utf-8")

  private[cluster] val BYTES_OF_INT = Integer.SIZE / java.lang.Byte.SIZE

  implicit def intToBytes(integer:Int):Array[Byte] = {
    val buf = ByteBuffer.allocate(BYTES_OF_INT)
    buf.putInt(integer)
    buf.rewind
    buf.array()
  }

  val UTF_8 = Charset.forName("utf-8")

  implicit class ByteConversions(val bytes: Array[Byte]) extends AnyVal {

    def toAddress: Option[Address] =
      Option(bytes) flatMap (b => if (b.length <= 0) None else Some(AddressFromURIString(new String(b, UTF_8))))

    def toInt: Int = ByteBuffer.wrap(bytes).getInt

    def toUtf8: String = new String(bytes, UTF_8)

    def toByteString: ByteString = ByteString(bytes)

    def toAddressSet: Set[Address] = Try {
      new String(bytes, UTF_8).split("[,]").map(seg => AddressFromURIString(seg.trim)).toSet
    } getOrElse Set.empty
  }


  implicit def byteStringToUtf8(bs:ByteString):String = new String(bs.toArray, UTF_8)

  implicit def addressToBytes(address:Address):Array[Byte] = {
    address.toString.getBytes(UTF_8)
  }

  implicit def addressSetToBytes(members: Set[Address]): Array[Byte] = {
    members.mkString(",").getBytes(UTF_8)
  }
} 
Example 2
Source File: HttpBootstrapJsonProtocol.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package akka.management.cluster.bootstrap.contactpoint

import akka.actor.{ Address, AddressFromURIString }
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport
import spray.json.{ DefaultJsonProtocol, JsString, JsValue, RootJsonFormat }

trait HttpBootstrapJsonProtocol extends SprayJsonSupport with DefaultJsonProtocol {
  import HttpBootstrapJsonProtocol._

  implicit object AddressFormat extends RootJsonFormat[Address] {
    override def read(json: JsValue): Address = json match {
      case JsString(s) => AddressFromURIString.parse(s)
      case invalid     => throw new IllegalArgumentException(s"Illegal address value! Was [$invalid]")
    }

    override def write(obj: Address): JsValue = JsString(obj.toString)
  }
  implicit val SeedNodeFormat: RootJsonFormat[SeedNode] = jsonFormat1(SeedNode)
  implicit val ClusterMemberFormat: RootJsonFormat[ClusterMember] = jsonFormat4(ClusterMember)
  implicit val ClusterMembersFormat: RootJsonFormat[SeedNodes] = jsonFormat2(SeedNodes)
}

object HttpBootstrapJsonProtocol extends DefaultJsonProtocol {

  final case class SeedNode(address: Address)

  // we use Address since we want to know which protocol is being used (tcp, artery, artery-tcp etc)
  final case class ClusterMember(node: Address, nodeUid: Long, status: String, roles: Set[String])
  implicit val clusterMemberOrdering: Ordering[ClusterMember] = Ordering.by(_.node)

  final case class SeedNodes(selfNode: Address, seedNodes: Set[ClusterMember])

} 
Example 3
Source File: ClusterHttpManagementHelperSpec.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package akka.cluster.management

// Accesses private[cluster] so has to be in this package

import akka.actor.Address
import akka.cluster.MemberStatus._
import akka.cluster.{ Member, UniqueAddress }
import akka.management.cluster.ClusterHttpManagementHelper
import org.scalatest.{ Matchers, WordSpec }

class ClusterHttpManagementHelperSpec extends WordSpec with Matchers {

  "Oldest nodes per role" must {
    "work" in {
      val dc = "dc-one"
      val address1 = Address("akka", "Main", "hostname.com", 3311)
      val address2 = Address("akka", "Main", "hostname2.com", 3311)
      val address3 = Address("akka", "Main", "hostname3.com", 3311)
      val address4 = Address("akka", "Main", "hostname4.com", 3311)

      val uniqueAddress1 = UniqueAddress(address1, 1L)
      val uniqueAddress2 = UniqueAddress(address2, 2L)
      val uniqueAddress3 = UniqueAddress(address3, 3L)
      val uniqueAddress4 = UniqueAddress(address4, 4L)

      val clusterMember1 = new Member(uniqueAddress1, 1, Up, Set("one", "two", dc))
      val clusterMember2 = new Member(uniqueAddress2, 2, Joining, Set("one", "two", dc))
      val clusterMember3 = new Member(uniqueAddress3, 3, Joining, Set("three", dc))
      val clusterMember4 = new Member(uniqueAddress4, 4, Joining, Set(dc))

      val members = Seq(clusterMember1, clusterMember2, clusterMember3, clusterMember4)

      ClusterHttpManagementHelper.oldestPerRole(members) shouldEqual Map(
        "one" -> address1.toString,
        "two" -> address1.toString,
        "three" -> address3.toString,
        dc -> address1.toString
      )
    }
  }

} 
Example 4
Source File: ExternalAddressExt.scala    From ForestFlow   with Apache License 2.0 5 votes vote down vote up
package ai.forestflow.akka.extensions

import akka.actor.{ActorRef, ActorSystem, Address, ExtendedActorSystem, Extension, ExtensionId, ExtensionIdProvider}

//noinspection TypeAnnotation
object ExternalAddress extends ExtensionId[ExternalAddressExt] with ExtensionIdProvider {
  //The lookup method is required by ExtensionIdProvider,
  // so we return ourselves here, this allows us
  // to configure our extension to be loaded when
  // the ActorSystem starts
  override def lookup = ExternalAddress

  //This method will be called by Akka
  // to instantiate our Extension
  override def createExtension(system: ExtendedActorSystem): ExternalAddressExt =
    new ExternalAddressExt(system)

  
  override def get(system: ActorSystem): ExternalAddressExt = super.get(system)
}
class ExternalAddressExt(system: ExtendedActorSystem) extends Extension {
  def addressForAkka: Address = system.provider.getDefaultAddress
  def akkaActorRefFromString(refString: String): ActorRef = system.provider.resolveActorRef(refString)

} 
Example 5
Source File: DocSvr.scala    From Raphtory   with Apache License 2.0 5 votes vote down vote up
package com.raphtory.core.clustersetup

import akka.actor.ActorSystem
import akka.actor.Address
import akka.actor.ExtendedActorSystem
import akka.cluster.Cluster
import akka.cluster.Member
import akka.event.LoggingAdapter
import akka.management.cluster.bootstrap.ClusterBootstrap
import akka.management.javadsl.AkkaManagement
import com.raphtory.core.clustersetup.util.ConfigUtils._
import com.raphtory.core.utils.Utils
import com.typesafe.config.Config
import com.typesafe.config.ConfigFactory
import com.typesafe.config.ConfigValueFactory

import scala.collection.JavaConversions
import scala.collection.JavaConversions._

trait DocSvr {

  def seedLoc: String

  implicit val system: ActorSystem
  val docker = System.getenv().getOrDefault("DOCKER", "false").trim.toBoolean

  val clusterSystemName: String = Utils.clusterSystemName
  val ssn: String               = java.util.UUID.randomUUID.toString

  
  def printConfigInfo(config: Config, system: ActorSystem): Unit = {
    val log: LoggingAdapter = system.log

    val systemConfig: SystemConfig = config.parse()
    val bindAddress: SocketAddress = systemConfig.bindAddress
    val tcpAddress: SocketAddress  = systemConfig.tcpAddress

    log.info(s"Created ActorSystem with ID: $ssn")

    log.info(s"Binding ActorSystem internally to address ${bindAddress.host}:${bindAddress.port}")
    log.info(s"Binding ActorSystem externally to host ${tcpAddress.host}:${tcpAddress.port}")

    log.info(s"Registering the following seeds to ActorSystem: ${systemConfig.seeds}")
    log.info(s"Registering the following roles to ActorSystem: ${systemConfig.roles}")

    // FIXME: This is bit unorthodox ...
    val akkaSystemUrl: Address = system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress
    log.info(s"ActorSystem successfully initialised at the following Akka URL: $akkaSystemUrl")
  }
} 
Example 6
Source File: SurvivalDecider.scala    From simple-akka-downing   with Apache License 2.0 5 votes vote down vote up
package com.ajjpj.simpleakkadowning

import akka.actor.Address
import akka.cluster.{Member, UniqueAddress}
import com.ajjpj.simpleakkadowning.SurvivalDecider.ClusterState
import com.typesafe.config.Config

import scala.collection.Set
import scala.collection.immutable.SortedSet


trait SurvivalDecider {
  def isInMinority(clusterState: ClusterState, selfAddress: Address): Boolean
}

object SurvivalDecider {
  private val memberOrdering = new Ordering[ClusterMemberInfo] {
    override def compare (x: ClusterMemberInfo, y: ClusterMemberInfo) =
      Member.addressOrdering.compare(x.uniqueAddress.address, y.uniqueAddress.address)
  }

  case class ClusterMemberInfo(uniqueAddress: UniqueAddress, roles: Set[String], member: Member)
  case class ClusterState(upMembers: Set[ClusterMemberInfo], unreachable: Set[UniqueAddress]) {
    lazy val sortedUpMembers = SortedSet.empty(memberOrdering) ++  upMembers
    lazy val sortedUpAndReachable = sortedUpMembers.filterNot (x => unreachable.contains(x.uniqueAddress))
    lazy val upReachable = upMembers.filterNot(x => unreachable(x.uniqueAddress))
    lazy val upUnreachable = upMembers.filter(x => unreachable(x.uniqueAddress))
  }


  def apply(config: Config): SurvivalDecider = {
    val cc = config.getConfig("simple-akka-downing")

    cc.getString("active-strategy") match {
      case "static-quorum" =>
        val ccc = cc.getConfig("static-quorum")
        val quorumSize = ccc.getInt("quorum-size")
        val role = ccc.getString("role") match {
          case r if r.trim.isEmpty => None
          case r => Some(r)
        }
        new FixedQuorumDecider(quorumSize, role)
      case "keep-majority" =>
        val ccc = cc.getConfig("keep-majority")
        val role = ccc.getString("role") match {
          case r if r.trim.isEmpty => None
          case r => Some(r)
        }
        new KeepMajorityDecider(role)
      case "keep-oldest" =>
        val ccc = cc.getConfig("keep-oldest")
        val downIfAlone = ccc.getBoolean("down-if-alone")
        new KeepOldestDecider(downIfAlone)
    }
  }


  class FixedQuorumDecider(quorumSize: Int, role: Option[String]) extends SurvivalDecider {
    override def isInMinority(clusterState: ClusterState, selfAddress: Address) = {
      val relevantMembers = role match {
        case Some (r) => clusterState.upMembers.filter (_.roles contains r)
        case None =>     clusterState.upMembers
      }

      (relevantMembers -- clusterState.upUnreachable).size < quorumSize
    }
  }

  class KeepMajorityDecider(role: Option[String]) extends SurvivalDecider {
    override def isInMinority (clusterState: ClusterState, selfAddress: Address) = {
      role match {
        case Some(r) =>
          val all = clusterState.upMembers.filter(_.roles contains r)
          val unreachable = clusterState.upUnreachable.filter(_.roles contains r)
          all.size <= 2*unreachable.size
        case None =>
          clusterState.upMembers.size <= 2*clusterState.upUnreachable.size
      }
    }
  }

  class KeepOldestDecider(downIfAlone: Boolean) extends SurvivalDecider {
    override def isInMinority (clusterState: ClusterState, selfAddress: Address) = {
      val allRelevant = clusterState.upMembers
      val oldestRelevant = allRelevant.foldLeft(allRelevant.head)((a, b) => if (a.member isOlderThan b.member) a else b)

      if (downIfAlone) {
        clusterState.upReachable match {
          case s if s == Set(oldestRelevant) => true                                       // only the oldest node --> terminate
          case _ if clusterState.unreachable == Set(oldestRelevant.uniqueAddress) => false // the oldest node is the only unreachable node --> survive
          case _ => clusterState.unreachable contains oldestRelevant.uniqueAddress
        }
      }
      else {
        clusterState.unreachable contains oldestRelevant.uniqueAddress
      }
    }
  }
} 
Example 7
Source File: CustomerApp.scala    From reactive-application-development-scala   with Apache License 2.0 5 votes vote down vote up
package com.rarebooks.library

import akka.actor.{ActorSelection, ActorSystem, Address, RootActorPath}
import akka.event.Logging

import scala.annotation.tailrec
import scala.concurrent.Await
import scala.concurrent.duration.{Duration, FiniteDuration, MILLISECONDS => Millis}
import scala.io.StdIn

object CustomerApp {

  
  protected def createCustomer(count: Int, odds: Int, tolerance: Int): Unit = {
    val selection: ActorSelection  =
        system.actorSelection(
        RootActorPath(rareBooksAddress) /
        "user" / "rare-books")

    selection.resolveOne(resolveTimeout).onComplete {
      case scala.util.Success(rareBooks) =>
        for (_ <- 1 to count)
          system.actorOf(Customer.props(rareBooks, odds, tolerance))
      case scala.util.Failure(ex) =>
        log.error(ex, ex.getMessage)
    }
  }
} 
Example 8
Source File: RareBooks.scala    From reactive-application-development-scala   with Apache License 2.0 5 votes vote down vote up
package com.rarebooks.library

import akka.actor.{Actor, ActorLogging, ActorPath, Address, OneForOneStrategy, Props, RootActorPath, Stash, SupervisorStrategy}
import akka.routing.{ActorRefRoutee, RoundRobinRoutingLogic, Router}

import scala.concurrent.duration.{Duration, FiniteDuration, MILLISECONDS => Millis}

object RareBooks {

  case object Close
  case object Open
  case object Report

//  val name: String =
//    "rare-books"
//
//  def pathFor(address: Address): ActorPath =
//    RootActorPath(address) / "user" / name
  
  def props: Props =
    Props(new RareBooks)
}

class RareBooks extends Actor with ActorLogging with Stash {

  import context.dispatcher
  import RareBooks._
  import LibraryProtocol._

  override val supervisorStrategy: SupervisorStrategy = {
    val decider: SupervisorStrategy.Decider = {
      case Librarian.ComplainException(complain, customer) =>
        customer ! Credit()
        log.info(s"RareBooks sent customer $customer a credit")
        SupervisorStrategy.Restart
    }
    OneForOneStrategy()(decider orElse super.supervisorStrategy.decider)
  }

  private val openDuration: FiniteDuration =
    Duration(context.system.settings.config.getDuration("rare-books.open-duration", Millis), Millis)

  private val closeDuration: FiniteDuration =
    Duration(context.system.settings.config.getDuration("rare-books.close-duration", Millis), Millis)

  private val nbrOfLibrarians: Int = context.system.settings.config getInt "rare-books.nbr-of-librarians"

  private val findBookDuration: FiniteDuration =
    Duration(context.system.settings.config.getDuration("rare-books.librarian.find-book-duration", Millis), Millis)

  private val maxComplainCount: Int = context.system.settings.config getInt "rare-books.librarian.max-complain-count"

  var requestsToday: Int = 0
  var totalRequests: Int = 0

  var router: Router = createLibrarian()

  context.system.scheduler.scheduleOnce(openDuration, self, Close)

  
  protected def createLibrarian(): Router = {
    var cnt: Int = 0
    val routees: Vector[ActorRefRoutee] = Vector.fill(nbrOfLibrarians) {
      val r = context.actorOf(Librarian.props(findBookDuration, maxComplainCount), s"librarian-$cnt")
      cnt += 1
      ActorRefRoutee(r)
    }
    Router(RoundRobinRoutingLogic(), routees)
  }
} 
Example 9
Source File: package.scala    From NSDb   with Apache License 2.0 5 votes vote down vote up
package io.radicalbit.nsdb
import akka.actor.Address
import akka.cluster.Member

package object cluster {

  def createNodeName(member: Member) =
    s"${member.address.host.getOrElse("noHost")}_${member.address.port.getOrElse(0)}"

  def createNodeName(address: Address) =
    s"${address.host.getOrElse("noHost")}_${address.port.getOrElse(0)}"

  
  def createAddress(nodeName: String): Address = {
    val splitNodeName = nodeName.split("_")
    Address("nsdb",
            "NSDb",
            Option(splitNodeName(0)).getOrElse("noHost"),
            Option(splitNodeName(1)).map(_.toInt).getOrElse(0))
  }

  final object PubSubTopics {
    final val COORDINATORS_TOPIC   = "coordinators"
    final val NODE_GUARDIANS_TOPIC = "node-guardians"
    final val NSDB_METRICS_TOPIC   = "nsdb-metrics"
  }
} 
Example 10
Source File: MultiNodeBaseSpec.scala    From NSDb   with Apache License 2.0 5 votes vote down vote up
package io.radicalbit.nsdb.split_brain

import akka.actor.Address
import akka.cluster.MemberStatus.{Down, Exiting, Removed, Up}
import akka.remote.testconductor.RoleName
import akka.remote.testkit.{MultiNodeConfig, MultiNodeSpec}
import akka.remote.transport.ThrottlerTransportAdapter.Direction
import akka.testkit.ImplicitSender
import io.radicalbit.rtsae.STMultiNodeSpec

import scala.concurrent.duration.Duration


abstract class MultiNodeBaseSpec(config: MultiNodeConfig)
    extends MultiNodeSpec(config)
    with STMultiNodeSpec
    with ImplicitSender {

  def initialParticipants: Int = roles.size

  private val addresses: Map[RoleName, Address] = roles.map(r => r -> node(r).address).toMap

  protected def addressOf(roleName: RoleName): Address = addresses(roleName)

  protected def awaitClusterNodesForUp(roleNames: RoleName*): Unit = awaitCond {
    roleNames.forall(
      role => cluster.state.members.exists(m => m.address === addressOf(role) && m.status === Up)
    )
  }

  protected def awaitClusterLeader(nodesInCluster: RoleName*): Unit =
    if (nodesInCluster.contains(myself)) {
      nodesInCluster.length should not be 0
      awaitCond(nodesInCluster.map(addressOf).contains(cluster.state.getLeader))
    }

  protected def awaitUnreachableNodes(unreachableNodes: RoleName*): Unit =
    awaitCond(cluster.state.unreachable.map(_.address) === unreachableNodes.map(addressOf).toSet)

  protected def switchOffConnection(from: RoleName, to: RoleName) =
    testConductor.blackhole(from, to, Direction.Both).await

  protected def awaitSurvivorsNodes(roleNames: RoleName*): Unit =
    awaitCond(roleNames.forall(role => cluster.state.members.exists(_.address === addressOf(role))))

  protected def awaitAllLeavingNodes(roleNames: RoleName*): Unit =
    awaitCond(roleNames.forall { role =>
      val members     = cluster.state.members
      val unreachable = cluster.state.unreachable

      val address = addressOf(role)

      unreachable.isEmpty &&
      (members.exists(m => m.address === address && (m.status === Down || m.status === Exiting)) ||
      !members.exists(_.address === address))
    })

  protected def awaitSelfDowningNode(max: Duration = Duration.Undefined) =
    awaitCond(
      {
        val selfAddress = cluster.selfAddress
        cluster.state.members.exists(m =>
          m.address === selfAddress && (m.status === Exiting || m.status === Down || m.status === Removed))
      },
      max
    )

  protected def awaitExistingMembers(roleNames: RoleName*): Unit =
    awaitCond(cluster.state.members.map(_.address) === roleNames.map(addressOf).toSet)

} 
Example 11
Source File: DiskMetricsSelectorSpec.scala    From NSDb   with Apache License 2.0 5 votes vote down vote up
package io.radicalbit.nsdb.cluster.metrics

import java.nio.file.{Files, Paths}

import akka.actor.Address
import akka.cluster.metrics.StandardMetrics._
import akka.cluster.metrics.{Metric, NodeMetrics}
import io.radicalbit.nsdb.cluster.metrics.NSDbMetrics._
import org.scalatest.{Matchers, WordSpec}
import org.scalatest.OptionValues._

class DiskMetricsSelectorSpec extends WordSpec with Matchers {

  val emptyNode      = Address("nsdb", "NSDb", "emptyNode", 2552)
  val almostFullNode = Address("nsdb", "NSDb", "node1", 2552)
  val node2          = Address("nsdb", "NSDb", "node2", 2552)
  val node3          = Address("nsdb", "NSDb", "node3", 2552)
  val node4          = Address("nsdb", "NSDb", "node4", 2552)
  val realNode       = Address("nsdb", "NSDb", "real", 2552)

  val fs = Files.getFileStore(Paths.get("."))

  val nodeMetrics1 = NodeMetrics(
    almostFullNode,
    System.currentTimeMillis,
    Set(
      Metric.create(DiskTotalSpace, 1000000, None),
      Metric.create(DiskFreeSpace, 100, None),
      Metric.create(HeapMemoryMax, 512, None),
      Metric.create(CpuCombined, 0.2, None),
      Metric.create(CpuStolen, 0.1, None),
      Metric.create(SystemLoadAverage, 0.5, None),
      Metric.create(Processors, 8, None)
    ).flatten
  )

  val emptyNodeMetric = NodeMetrics(
    emptyNode,
    System.currentTimeMillis,
    Set(Metric.create(DiskTotalSpace, 1000000, None), Metric.create(DiskFreeSpace, 0, None)).flatten
  )

  val nodeMetrics2 = NodeMetrics(
    node2,
    System.currentTimeMillis,
    Set(Metric.create(DiskTotalSpace, 1000000, None), Metric.create(DiskFreeSpace, 750000, None)).flatten
  )

  val nodeMetrics3 = NodeMetrics(
    node3,
    System.currentTimeMillis,
    Set(Metric.create(DiskTotalSpace, 1000000, None), Metric.create(DiskFreeSpace, 1000000, None)).flatten
  )

  val nodeMetrics4 = NodeMetrics(
    node4,
    System.currentTimeMillis,
    Set()
  )

  val realNodeMetrics = NodeMetrics(
    realNode,
    System.currentTimeMillis,
    Set(Metric.create(DiskTotalSpace, fs.getTotalSpace, None), Metric.create(DiskFreeSpace, fs.getUsableSpace, None)).flatten
  )

  val nodeMetrics = Set(emptyNodeMetric, nodeMetrics1, nodeMetrics2, nodeMetrics3, nodeMetrics4, realNodeMetrics)

  "DiskMetricsSelector" must {
    "calculate capacity of heap metrics" in {
      val capacity = DiskMetricsSelector.capacity(nodeMetrics)
      capacity.get(emptyNode) shouldBe Some(0.0)
      capacity.get(almostFullNode) shouldBe Some(0.0001)
      capacity.get(node2) shouldBe Some(0.75)
      capacity.get(node3) shouldBe Some(1)
      capacity.get(node4) shouldBe None
      //for a real node the capacity must be between 0 and 1. There's no way to estimate a reasonable capacity value and mocking is not the point here
      capacity.get(realNode).value shouldBe >(0.0)
      capacity.get(realNode).value shouldBe <(1.0)
    }
  }

} 
Example 12
Source File: package.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.cluster

import akka.actor.Address
import akka.util.ByteString

import scala.annotation.tailrec

package object rebalance {
  trait RebalanceLogic {
    val spareLeader:Boolean
    
    def rebalance(partitionsToMembers: Map[ByteString, Set[Address]], members:Set[Address]):
    Map[ByteString, Set[Address]] = {
      val utilization = partitionsToMembers.foldLeft(Map.empty[Address, Seq[ByteString]]){(memoize, assign) =>
        assign._2.foldLeft(memoize){(memoize, member) =>
          memoize.updated(member, memoize.getOrElse(member, Seq.empty) :+ assign._1)
        }
      }
      val ordered = members.toSeq sortWith { (one, two) =>
        utilization.getOrElse(one, Seq.empty).size < utilization.getOrElse(two, Seq.empty).size
      }
      @tailrec def rebalanceRecursively(partitionsToMembers: Map[ByteString, Set[Address]],
                                        utilization: Map[Address, Seq[ByteString]],
                                        ordered:Seq[Address]): Map[ByteString, Set[Address]] = {
        val overflows = utilization.getOrElse(ordered.last, Seq.empty)
        val underflow = utilization.getOrElse(ordered.head, Seq.empty)
        if (overflows.size - underflow.size > 1) {
          val move = overflows.head
          val updatedUtil = utilization.updated(ordered.last, overflows.tail).updated(ordered.head, underflow :+ move)
          var headOrdered = ordered.tail takeWhile { next =>
            updatedUtil.getOrElse(ordered.head, Seq.empty).size < updatedUtil.getOrElse(next, Seq.empty).size
          }
          headOrdered = (headOrdered :+ ordered.head) ++ ordered.tail.drop(headOrdered.size)
          var rearOrdered = headOrdered takeWhile { next =>
            updatedUtil.getOrElse(headOrdered.last, Seq.empty).size > updatedUtil.getOrElse(next, Seq.empty).size
          }
          // Drop the headOrdered.last
          rearOrdered = (rearOrdered :+ headOrdered.last) ++ headOrdered.drop(rearOrdered.size).dropRight(1)
          rebalanceRecursively(partitionsToMembers.updated(move,
            partitionsToMembers.getOrElse(move, Set.empty) + ordered.head - ordered.last), updatedUtil, rearOrdered)
        }
        else
          partitionsToMembers
      }
      rebalanceRecursively(partitionsToMembers, utilization, ordered)
    }
  }

  class DefaultRebalanceLogic(val spareLeader: Boolean) extends RebalanceLogic

  object DefaultRebalanceLogic {
    def apply(spareLeader: Boolean) = new DefaultRebalanceLogic(spareLeader)
  }
} 
Example 13
Source File: MasterSuite.scala    From SparkCore   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy.master

import akka.actor.Address
import org.scalatest.FunSuite

import org.apache.spark.{SSLOptions, SparkConf, SparkException}

class MasterSuite extends FunSuite {

  test("toAkkaUrl") {
    val conf = new SparkConf(loadDefaults = false)
    val akkaUrl = Master.toAkkaUrl("spark://1.2.3.4:1234", "akka.tcp")
    assert("akka.tcp://[email protected]:1234/user/Master" === akkaUrl)
  }

  test("toAkkaUrl with SSL") {
    val conf = new SparkConf(loadDefaults = false)
    val akkaUrl = Master.toAkkaUrl("spark://1.2.3.4:1234", "akka.ssl.tcp")
    assert("akka.ssl.tcp://[email protected]:1234/user/Master" === akkaUrl)
  }

  test("toAkkaUrl: a typo url") {
    val conf = new SparkConf(loadDefaults = false)
    val e = intercept[SparkException] {
      Master.toAkkaUrl("spark://1.2. 3.4:1234", "akka.tcp")
    }
    assert("Invalid master URL: spark://1.2. 3.4:1234" === e.getMessage)
  }

  test("toAkkaAddress") {
    val conf = new SparkConf(loadDefaults = false)
    val address = Master.toAkkaAddress("spark://1.2.3.4:1234", "akka.tcp")
    assert(Address("akka.tcp", "sparkMaster", "1.2.3.4", 1234) === address)
  }

  test("toAkkaAddress with SSL") {
    val conf = new SparkConf(loadDefaults = false)
    val address = Master.toAkkaAddress("spark://1.2.3.4:1234", "akka.ssl.tcp")
    assert(Address("akka.ssl.tcp", "sparkMaster", "1.2.3.4", 1234) === address)
  }

  test("toAkkaAddress: a typo url") {
    val conf = new SparkConf(loadDefaults = false)
    val e = intercept[SparkException] {
      Master.toAkkaAddress("spark://1.2. 3.4:1234", "akka.tcp")
    }
    assert("Invalid master URL: spark://1.2. 3.4:1234" === e.getMessage)
  }
} 
Example 14
Source File: ExecutorClusterListener.scala    From marvin-engine-executor   with Apache License 2.0 5 votes vote down vote up
package org.marvin.executor.manager

import akka.cluster.Cluster
import akka.cluster.ClusterEvent._
import akka.actor.{Actor, ActorLogging, Address}

import scala.collection.immutable

class ExecutorClusterListener(seedNodes: immutable.Seq[Address]) extends Actor with ActorLogging {

  var cluster: Cluster = _

  override def preStart(): Unit = {
    cluster = Cluster(context.system)

    log.info(s"Joining to the cluster ${context.system.name} ...")
    cluster.joinSeedNodes(seedNodes)

    log.info(s"Subscribing to the cluster ${context.system.name} ...")
    cluster.subscribe(self, initialStateMode = InitialStateAsEvents, classOf[MemberUp], classOf[MemberEvent], classOf[UnreachableMember])

    log.info(s"Cluster configuration done! :-P")
    log.info(s"Cluster Node Address is ${cluster.selfAddress}")
  }

  override def postStop(): Unit = {
    log.info(s"Leaving cluster ${context.system.name} :-( ...")
    cluster.unsubscribe(self)
    cluster.leave(cluster.selfAddress)
    log.info("Left cluster with success!")
  }

  def receive = {
    case MemberUp(member) =>
      log.info("Member is Up: {}", member.address)

    case UnreachableMember(member) =>
      log.info("Member detected as unreachable: {}", member)

    case MemberRemoved(member, previousStatus) =>
      log.info("Member is Removed: {} after {}", member.address, previousStatus)

    case _:MemberEvent =>
      log.info("Unknow Message received ...")
  }
} 
Example 15
Source File: MassSettings.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package mass

import akka.actor.Address
import akka.actor.typed.ActorSystem
import com.typesafe.config.Config
import com.typesafe.scalalogging.StrictLogging
import helloscala.common.Configuration
import mass.core.Constants._

final class Compiles(c: Configuration) {
  def scala213Home: String = c.getString("scala213")
  def scala212Home: String = c.getString("scala212")
  def scala211Home: String = c.getString("scala211")
}

final class MassSettings private (val configuration: Configuration) extends StrictLogging {
  val compiles = new Compiles(configuration.getConfiguration(s"$BASE_CONF.core.compiles"))

  def clusterName: String = configuration.getString(BASE_CONF + ".cluster.name")

  def clusterProtocol: String = configuration.getString(BASE_CONF + ".cluster.protocol")

  def clusterSeeds: List[Address] =
    configuration
      .get[Seq[String]](BASE_CONF + ".cluster.seeds")
      .map { seed =>
        val Array(host, port) = seed.split(':')
        Address(clusterProtocol, clusterName, host, port.toInt)
      }
      .toList
}

object MassSettings {
  def apply(configuration: Configuration): MassSettings = new MassSettings(configuration)
  def apply(config: Config): MassSettings = apply(Configuration(config))
  def apply(system: ActorSystem[_]): MassSettings = apply(system.settings.config)
} 
Example 16
Source File: StatsSample.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package sample.cluster.stats

import java.util.concurrent.ThreadLocalRandom

import akka.actor.{ Actor, ActorSystem, Address, Props, RelativeActorPath, RootActorPath }
import akka.cluster.ClusterEvent._
import akka.cluster.{ Cluster, MemberStatus }
import com.typesafe.config.ConfigFactory

import scala.concurrent.duration._

object StatsSample {
  def main(args: Array[String]): Unit =
    if (args.isEmpty) {
      startup(Seq("2551", "2552", "0"))
      StatsSampleClient.main(Array.empty)
    } else {
      startup(args)
    }

  def startup(ports: Seq[String]): Unit =
    ports foreach { port =>
      // Override the configuration of the port when specified as program argument
      val config = ConfigFactory
        .parseString(s"""
        akka.remote.netty.tcp.port=$port
        akka.remote.artery.canonical.port=$port
        """)
        .withFallback(ConfigFactory.parseString("akka.cluster.roles = [compute]"))
        .withFallback(ConfigFactory.load("stats1"))

      val system = ActorSystem("ClusterSystem", config)

      system.actorOf(Props[StatsWorker], name = "statsWorker")
      system.actorOf(Props[StatsService], name = "statsService")
    }
}

object StatsSampleClient {
  def main(args: Array[String]): Unit = {
    // note that client is not a compute node, role not defined
    val system = ActorSystem("ClusterSystem")
    system.actorOf(Props(classOf[StatsSampleClient], "/user/statsService"), "client")
  }
}

class StatsSampleClient(servicePath: String) extends Actor {
  val cluster = Cluster(context.system)

  val servicePathElements = servicePath match {
    case RelativeActorPath(elements) => elements
    case _ =>
      throw new IllegalArgumentException("servicePath [%s] is not a valid relative actor path" format servicePath)
  }
  import context.dispatcher

  val tickTask =
    context.system.scheduler.schedule(2.seconds, 2.seconds, self, "tick")

  var nodes = Set.empty[Address]

  override def preStart(): Unit =
    cluster.subscribe(self, classOf[MemberEvent], classOf[ReachabilityEvent])
  override def postStop(): Unit = {
    cluster.unsubscribe(self)
    tickTask.cancel()
  }

  def receive = {
    case "tick" if nodes.nonEmpty =>
      // just pick any one
      val address =
        nodes.toIndexedSeq(ThreadLocalRandom.current.nextInt(nodes.size))
      val service =
        context.actorSelection(RootActorPath(address) / servicePathElements)
      service ! StatsJob("this is the text that will be analyzed")
    case result: StatsResult =>
      println(result)
    case failed: JobFailed =>
      println(failed)
    case state: CurrentClusterState =>
      nodes = state.members.collect {
        case m if m.hasRole("compute") && m.status == MemberStatus.Up =>
          m.address
      }
    case MemberUp(m) if m.hasRole("compute")        => nodes += m.address
    case other: MemberEvent                         => nodes -= other.member.address
    case UnreachableMember(m)                       => nodes -= m.address
    case ReachableMember(m) if m.hasRole("compute") => nodes += m.address
  }
} 
Example 17
Source File: Coordination.scala    From constructr   with Apache License 2.0 5 votes vote down vote up
package de.heikoseeberger.constructr.coordination

import akka.Done
import akka.actor.{ ActorSystem, Address }
import scala.concurrent.Future
import scala.concurrent.duration.FiniteDuration

object Coordination {

  def apply(clusterName: String, system: ActorSystem): Coordination =
    try {
      val className =
        system.settings.config.getString("constructr.coordination.class-name")
      Class
        .forName(className)
        .getConstructor(classOf[String], classOf[ActorSystem])
        .newInstance(clusterName, system)
        .asInstanceOf[Coordination]
    } catch {
      case _: NoSuchMethodException =>
        throw new Exception(
          """|A Coordination implementation must provide a constructor with the following signature:
             |(clusterName: String, system: ActorSystem)""".stripMargin
        )
    }
}


  def refresh(self: Address, ttl: FiniteDuration): Future[Done]
} 
Example 18
Source File: MultiNodeEtcdConstructrSpec.scala    From constructr   with Apache License 2.0 5 votes vote down vote up
package de.heikoseeberger.constructr

import akka.actor.{ Address, AddressFromURIString }
import io.circe.Json
import io.circe.parser.parse
import java.util.Base64

class MultiNodeEtcdConstructrSpecMultiJvmNode1 extends MultiNodeEtcdConstructrSpec
class MultiNodeEtcdConstructrSpecMultiJvmNode2 extends MultiNodeEtcdConstructrSpec
class MultiNodeEtcdConstructrSpecMultiJvmNode3 extends MultiNodeEtcdConstructrSpec
class MultiNodeEtcdConstructrSpecMultiJvmNode4 extends MultiNodeEtcdConstructrSpec
class MultiNodeEtcdConstructrSpecMultiJvmNode5 extends MultiNodeEtcdConstructrSpec

object MultiNodeEtcdConstructrSpec {
  def toNodes(s: String): Set[Address] = {
    def jsonToNode(json: Json) = {
      val key =
        json.hcursor
          .get[String]("key")
          .fold(throw _, identity)
          .stripPrefix("/constructr/MultiNodeConstructrSpec/nodes/")
      AddressFromURIString(new String(Base64.getUrlDecoder.decode(key)))
    }
    import cats.syntax.either._ // for Scala 2.11
    parse(s)
      .fold(throw _, identity)
      .hcursor
      .downField("node")
      .get[Set[Json]]("nodes")
      .getOrElse(Set.empty)
      .map(jsonToNode)
  }
}

abstract class MultiNodeEtcdConstructrSpec
    extends MultiNodeConstructrSpec(
      2379,
      "/v2/keys/constructr?recursive=true",
      "/v2/keys/constructr/MultiNodeConstructrSpec/nodes",
      MultiNodeEtcdConstructrSpec.toNodes
    ) 
Example 19
Source File: Leadership.scala    From cave   with MIT License 5 votes vote down vote up
package actors

import akka.actor.{Actor, ActorLogging, Address}
import akka.cluster.ClusterEvent._
import akka.cluster.{Cluster, Member}

object Leadership {
  object IsLeader
}

class Leadership(address: Address) extends Actor with ActorLogging {

  private val cluster = Cluster(context.system)
  private var members = Set.empty[Member]

  private var isLeader = false

  override def preStart(): Unit =
    cluster.subscribe(self, initialStateMode = InitialStateAsEvents,
      classOf[MemberEvent],
      classOf[UnreachableMember],
      classOf[ClusterDomainEvent])

  override def postStop(): Unit = cluster.unsubscribe(self)

  import actors.Leadership._

  def receive = {

    case IsLeader =>
      sender ! isLeader

    case state: CurrentClusterState =>
      log.warning("Initial state: " + state.leader)
      setLeader(state.leader)

    case MemberUp(member) =>
      log.warning(s"Member up($member)")
      members += member

    case MemberRemoved(member, previousStatus) =>
      log.warning(s"Member removed($member)")
      members.find(_.address == member.address) foreach (members -= _)

    case LeaderChanged(member) =>
      log.warning("Leader changed, now: " + member)
      setLeader(member)

    case e: MemberEvent =>
      log.warning(s"Member event($e)")
  }

  private def setLeader(leader: Option[Address]): Unit = {
    isLeader = leader exists (_ == address)
  }
} 
Example 20
Source File: UtilsSpec.scala    From coral   with Apache License 2.0 5 votes vote down vote up
package io.coral.utils

import akka.actor.Address
import io.coral.cluster.Machine
import org.scalatest._

class UtilsSpec
	extends WordSpecLike {
	"A Utils class" should {
		"Return the friendly time" in {
			val input = 1449848431868L
			val actual = Utils.friendlyTime(input)
			val expected = "2015-12-11T16:40:31.868"
			assert(actual == expected)
		}

		"Parse the friendly time" in {
			val actual = Utils.timeStampFromFriendlyTime("2015-12-11T16:40:31.868")
			val expected = Some(1449848431868L)
			assert(actual ==expected)
		}

		"Create string and convert back to timestamp" in {
			val time = Some(System.currentTimeMillis)
			val actual = Utils.timeStampFromFriendlyTime(Utils.friendlyTime(time.get))
			assert(actual == time)
		}

		"Create a machine from an actor path" in {
			val path1 = "akka.tcp://[email protected]:2551/user/clusterMonitor"
			val actual1 = Utils.machineFromPath(path1)
			val expected1 = Machine(None, "127.0.0.1", 2551, List(), None)
			assert(actual1 == expected1)

			val path2 = "invalid.path"
			intercept[IllegalArgumentException] {
				Utils.machineFromPath(path2)
			}

			// No system name (always "coral") and no port
			val path3 = "akka.tcp://127.0.0.1/user/actor"
			intercept[IllegalArgumentException] {
				Utils.machineFromPath(path3)
			}

			// No port
			val path4 = "akka.tcp://[email protected]/user/some/actor"
			intercept[IllegalArgumentException] {
				Utils.machineFromPath(path4)
			}

			val path5 = ""
			intercept[IllegalArgumentException] {
				Utils.machineFromPath(path5)
			}

			val path6 = null
			intercept[IllegalArgumentException] {
				Utils.machineFromPath(path6)
			}

			// No system name (always "coral")
			val path7 = "akka.tcp://127.0.0.1:2551/user/actor"
			intercept[IllegalArgumentException] {
				val actual7 = Utils.machineFromPath(path7)
			}
		}

		"Create an address from an actor path" in {
			val path1 = "akka.tcp://[email protected]:2551/user/clusterMonitor"
			val actual1 = Utils.addressFromPath(path1)
			val expected1 = Address("akka.tcp", "coral", "127.0.0.1", 2551)
			assert(actual1 == expected1)

			val path2 = "akka.tcp://[email protected]:2553"
			val actual2 = Utils.addressFromPath(path2)
			val expected2 = Address("akka.tcp", "coral", "127.0.0.1", 2553)
			assert(actual2 == expected2)

			val path3 = "akka.tcp://[email protected]:2551/user/clusterMonitor"
			val actual3 = Utils.addressFromPath(path3)
			val expected3 = Address("akka.tcp", "coral", "127.0.0.1", 2551)
			assert(actual3 == expected3)

			val path4 = ""
			intercept[IllegalArgumentException] {
				Utils.addressFromPath(path4)
			}

			val path5 = null
			intercept[IllegalArgumentException] {
				Utils.addressFromPath(path5)
			}
		}
	}
} 
Example 21
Source File: LithiumMultiNodeSpec.scala    From lithium   with Apache License 2.0 5 votes vote down vote up
package com.swissborg.lithium

import akka.actor.{ActorSystem, Address}
import akka.cluster.Cluster
import akka.cluster.MemberStatus._
import akka.remote.testconductor.RoleName
import akka.remote.testkit.{MultiNodeConfig, MultiNodeSpec, MultiNodeSpecCallbacks}
import akka.testkit.ImplicitSender
import org.scalatest.BeforeAndAfterAll
import org.scalatest.concurrent.{Eventually, IntegrationPatience}
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpecLike

abstract class LithiumMultiNodeSpec(val config: MultiNodeConfig)
    extends MultiNodeSpec(config)
    with MultiNodeSpecCallbacks
    with AnyWordSpecLike
    with Matchers
    with BeforeAndAfterAll
    with ImplicitSender
    with Eventually
    with IntegrationPatience {
  override def beforeAll(): Unit = multiNodeSpecBeforeAll()
  override def afterAll(): Unit  = multiNodeSpecAfterAll()

  private val addresses: Map[RoleName, Address] = roles.map(r => r -> node(r).address).toMap

  protected def addressOf(roleName: RoleName): Address = addresses(roleName)

  protected def waitToBecomeUnreachable(roleNames: RoleName*): Unit =
    awaitCond(allUnreachable(roleNames: _*))

  protected def waitForSurvivors(roleNames: RoleName*): Unit =
    awaitCond(allSurvivors(roleNames: _*))

  protected def waitForUp(roleNames: RoleName*): Unit = awaitCond(allUp(roleNames: _*))

  protected def waitForSelfDowning(implicit system: ActorSystem): Unit = awaitCond(downedItself)

  protected def waitForAllLeaving(roleNames: RoleName*): Unit =
    awaitCond(allLeaving(roleNames: _*))

  protected def waitExistsAllDownOrGone(groups: Seq[Seq[RoleName]]): Unit =
    awaitCond(existsAllDownOrGone(groups))

  private def allUnreachable(roleNames: RoleName*): Boolean =
    roleNames.forall(
      role => Cluster(system).state.unreachable.exists(_.address === addressOf(role))
    )

  private def allSurvivors(roleNames: RoleName*): Boolean =
    roleNames.forall(role => Cluster(system).state.members.exists(_.address === addressOf(role)))

  private def allUp(roleNames: RoleName*): Boolean =
    roleNames.forall(
      role => Cluster(system).state.members.exists(m => m.address === addressOf(role) && m.status === Up)
    )

  private def existsAllDownOrGone(groups: Seq[Seq[RoleName]]): Boolean =
    groups.exists(group => allLeaving(group: _*))

  private def downedItself(implicit system: ActorSystem): Boolean = {
    val selfAddress = Cluster(system).selfAddress
    Cluster(system).state.members
      .exists(
        m => m.address === selfAddress && (m.status === Exiting || m.status === Down || m.status === Removed)
      )
  }

  private def allLeaving(roleNames: RoleName*): Boolean =
    roleNames.forall { role =>
      val members     = Cluster(system).state.members
      val unreachable = Cluster(system).state.unreachable

      val address = addressOf(role)

      unreachable.isEmpty &&                                                                        // no unreachable members
      (members.exists(m => m.address === address && (m.status === Down || m.status === Exiting)) || // member is down
      !members.exists(_.address === address))                                                       // member is not in the cluster
    }
} 
Example 22
Source File: KeepRefereeSuite.scala    From lithium   with Apache License 2.0 5 votes vote down vote up
package com.swissborg.lithium

package strategy

import akka.actor.Address
import akka.cluster.ClusterEvent.CurrentClusterState
import akka.cluster.MemberStatus.Up
import akka.cluster.swissborg.TestMember
import cats.Id
import org.scalatest.wordspec.AnyWordSpecLike

import scala.collection.immutable.SortedSet
import org.scalatest.matchers.should.Matchers

class KeepRefereeSuite extends AnyWordSpecLike with Matchers {
  private val aa = TestMember(Address("akka", "sys", "a", 2552), Up)
  private val bb = TestMember(Address("akka", "sys", "b", 2552), Up)
  private val cc = TestMember(Address("akka", "sys", "c", 2552), Up)

  private val referee = aa.address.toString

  "KeepReferee" must {
    "down the unreachable nodes when being the referee node and reaching enough nodes" in {
      val w = WorldView.fromSnapshot(aa, CurrentClusterState(SortedSet(aa, bb, cc), Set(bb), seenBy = Set.empty))

      new KeepReferee[Id](KeepRefereeConfig(referee, 1)).takeDecision(w) should ===(
        Decision.DownUnreachable(w)
      )
    }

    "down the reachable nodes when being the referee and not reaching enough nodes" in {
      val w = WorldView.fromSnapshot(aa, CurrentClusterState(SortedSet(aa, bb, cc), Set(bb), seenBy = Set.empty))

      new strategy.KeepReferee[Id](KeepRefereeConfig(referee, 3)).takeDecision(w) should ===(
        Decision.DownReachable(w)
      )
    }

    "down the unreachable nodes when the referee is reachable and reaching enough nodes" in {
      val w = WorldView.fromSnapshot(cc, CurrentClusterState(SortedSet(aa, bb, cc), Set(bb), seenBy = Set.empty))

      new strategy.KeepReferee[Id](KeepRefereeConfig(referee, 1)).takeDecision(w) should ===(
        Decision.DownUnreachable(w)
      )
    }

    "down the reachable nodes when the referee is reachable and not reaching enough nodes" in {
      val w = WorldView.fromSnapshot(cc, CurrentClusterState(SortedSet(aa, bb, cc), Set(bb), seenBy = Set.empty))

      new strategy.KeepReferee[Id](KeepRefereeConfig(referee, 3)).takeDecision(w) should ===(
        Decision.DownReachable(w)
      )
    }

    "down the reachable nodes when the referee is unreachable" in {
      val w = WorldView.fromSnapshot(bb, CurrentClusterState(SortedSet(aa, bb, cc), Set(aa), seenBy = Set.empty))

      new strategy.KeepReferee[Id](KeepRefereeConfig(referee, 1)).takeDecision(w) should ===(
        Decision.DownReachable(w)
      )

      new strategy.KeepReferee[Id](KeepRefereeConfig(referee, 3)).takeDecision(w) should ===(
        Decision.DownReachable(w)
      )
    }
  }
} 
Example 23
Source File: AkkaArbitraryInstances.scala    From lithium   with Apache License 2.0 5 votes vote down vote up
package akka.cluster.swissborg

import akka.actor.{ActorPath, Address, ChildActorPath, RootActorPath}
import akka.cluster.{Member, UniqueAddress, Reachability => _}
import com.swissborg.lithium.instances.ArbitraryTestInstances._
import org.scalacheck.Arbitrary._
import org.scalacheck.{Arbitrary, Gen}
import shapeless.tag
import shapeless.tag.@@


object AkkaArbitraryInstances {
  sealed trait JoiningTag
  type JoiningMember = Member @@ JoiningTag

  implicit val arbJoiningMember: Arbitrary[JoiningMember] = Arbitrary {
    for {
      uniqueAddress <- arbitrary[UniqueAddress]
    } yield tag[JoiningTag][Member](Member(uniqueAddress, Set("dc-datacenter")))
  }

  implicit val arbRootActorPath: Arbitrary[RootActorPath] = Arbitrary(arbitrary[Address].map(RootActorPath(_)))

  def arbChildActorPath(parent: ActorPath): Arbitrary[ChildActorPath] =
    Arbitrary(for {
      c   <- Gen.alphaChar
      cs  <- Gen.alphaStr
      uid <- Gen.chooseNum(0, Int.MaxValue)
      name = s"$c$cs"
    } yield new ChildActorPath(parent, name, uid))

  def arbActorPath(depth: Int, parent: ActorPath): Arbitrary[ActorPath] =
    Arbitrary(
      if (depth <= 0) Gen.const(parent)
      else arbChildActorPath(parent).arbitrary.flatMap(arbActorPath(depth - 1, _).arbitrary)
    )

  implicit val arbActorPath0: Arbitrary[ActorPath] = Arbitrary(for {
    depth  <- Gen.chooseNum(0, 10)
    parent <- arbitrary[RootActorPath]
    path   <- arbActorPath(depth, parent).arbitrary
  } yield path)
} 
Example 24
Source File: TestMember.scala    From lithium   with Apache License 2.0 5 votes vote down vote up
package akka.cluster.swissborg

import akka.actor.Address
import akka.cluster.ClusterSettings.DataCenter
import akka.cluster.{ClusterSettings, Member, MemberStatus, UniqueAddress}

object TestMember {

  def apply(address: Address, status: MemberStatus): Member =
    apply(address, status, Set.empty[String], ClusterSettings.DefaultDataCenter)

  def apply(address: Address, status: MemberStatus, dataCenter: DataCenter): Member =
    apply(address, status, Set.empty[String], dataCenter)

  def apply(address: Address, status: MemberStatus, roles: Set[String]): Member =
    apply(address, status, roles, dataCenter = ClusterSettings.DefaultDataCenter)

  def apply(address: Address, status: MemberStatus, roles: Set[String], dataCenter: DataCenter): Member =
    withUniqueAddress(UniqueAddress(address, 0L), status, roles, dataCenter)

  def withUniqueAddress(uniqueAddress: UniqueAddress,
                        status: MemberStatus,
                        roles: Set[String],
                        dataCenter: DataCenter): Member =
    new Member(uniqueAddress, Int.MaxValue, status, roles + (ClusterSettings.DcRolePrefix + dataCenter))
} 
Example 25
Source File: LeaderAutoDowningRoles.scala    From akka-cluster-custom-downing   with Apache License 2.0 5 votes vote down vote up
package tanukki.akka.cluster.autodown

import akka.actor.{ActorSystem, Address, Props}
import akka.cluster.{Cluster, DowningProvider}
import com.typesafe.config.Config

import scala.collection.JavaConverters._
import scala.concurrent.duration.{FiniteDuration, _}

final class LeaderAutoDowningRoles(system: ActorSystem) extends DowningProvider {

  private[this] val cluster = Cluster(system)

  private val config: Config = system.settings.config

  override def downRemovalMargin: FiniteDuration = {
    val key = "custom-downing.down-removal-margin"
    config.getString(key) match {
      case "off" => Duration.Zero
      case _     => Duration(config.getDuration(key, MILLISECONDS), MILLISECONDS)
    }
  }

  override def downingActorProps: Option[Props] = {
    val stableAfter = system.settings.config.getDuration("custom-downing.stable-after").toMillis millis
    val roles = system.settings.config.getStringList("custom-downing.leader-auto-downing-roles.target-roles").asScala.toSet
    if (roles.isEmpty) None else Some(LeaderAutoDownRoles.props(roles, stableAfter))
  }
}


private[autodown] object LeaderAutoDownRoles {
  def props(targetRoles: Set[String], autoDownUnreachableAfter: FiniteDuration): Props = Props(classOf[LeaderAutoDownRoles], targetRoles, autoDownUnreachableAfter)
}

private[autodown] class LeaderAutoDownRoles(targetRoles: Set[String], autoDownUnreachableAfter: FiniteDuration)
  extends LeaderAutoDownRolesBase(targetRoles, autoDownUnreachableAfter) with ClusterCustomDowning {

  override def down(node: Address): Unit = {
    log.info("Leader is auto-downing unreachable node [{}]", node)
    cluster.down(node)
  }
} 
Example 26
Source File: LeaderAwareCustomAutoDownBase.scala    From akka-cluster-custom-downing   with Apache License 2.0 5 votes vote down vote up
package tanukki.akka.cluster.autodown

import akka.actor.Address
import akka.cluster.ClusterEvent._
import akka.event.Logging

import scala.concurrent.duration.FiniteDuration

abstract class LeaderAwareCustomAutoDownBase(autoDownUnreachableAfter: FiniteDuration) extends CustomAutoDownBase(autoDownUnreachableAfter) {

  private val log = Logging(context.system, this)

  private var leader = false

  def onLeaderChanged(leader: Option[Address]): Unit = {}

  def isLeader: Boolean = leader

  override def receiveEvent: Receive = {
    case LeaderChanged(leaderOption) =>
      leader = leaderOption.contains(selfAddress)
      if (isLeader) {
        log.info("This node is the new Leader")
      }
      onLeaderChanged(leaderOption)
    case UnreachableMember(m) =>
      log.info("{} is unreachable", m)
      unreachableMember(m)
    case ReachableMember(m)   =>
      log.info("{} is reachable", m)
      remove(m)
    case MemberRemoved(m, _)  =>
      log.info("{} was removed from the cluster", m)
      remove(m)
  }

  override def initialize(state: CurrentClusterState): Unit = {
    leader = state.leader.exists(_ == selfAddress)
    super.initialize(state)
  }
} 
Example 27
Source File: RoleLeaderAutoDowningRoles.scala    From akka-cluster-custom-downing   with Apache License 2.0 5 votes vote down vote up
package tanukki.akka.cluster.autodown

import akka.actor.{ActorSystem, Address, Props}
import akka.cluster.{Cluster, DowningProvider}
import com.typesafe.config.Config

import scala.collection.JavaConverters._
import scala.concurrent.duration.{FiniteDuration, _}

final class RoleLeaderAutoDowningRoles(system: ActorSystem) extends DowningProvider {

  private[this] val cluster = Cluster(system)

  private val config: Config = system.settings.config

  override def downRemovalMargin: FiniteDuration = {
    val key = "custom-downing.down-removal-margin"
    config.getString(key) match {
      case "off" => Duration.Zero
      case _     => Duration(config.getDuration(key, MILLISECONDS), MILLISECONDS)
    }
  }

  override def downingActorProps: Option[Props] = {
    val stableAfter = system.settings.config.getDuration("custom-downing.stable-after").toMillis millis
    val leaderRole = system.settings.config.getString("custom-downing.role-leader-auto-downing-roles.leader-role")
    val roles = system.settings.config.getStringList("custom-downing.role-leader-auto-downing-roles.target-roles").asScala.toSet
    if (roles.isEmpty) None else Some(RoleLeaderAutoDownRoles.props(leaderRole, roles, stableAfter))
  }
}


private[autodown] object RoleLeaderAutoDownRoles {
  def props(leaderRole: String, targetRoles: Set[String], autoDownUnreachableAfter: FiniteDuration): Props = Props(classOf[RoleLeaderAutoDownRoles], leaderRole, targetRoles, autoDownUnreachableAfter)
}

private[autodown] class RoleLeaderAutoDownRoles(leaderRole: String, targetRoles: Set[String], autoDownUnreachableAfter: FiniteDuration)
  extends RoleLeaderAutoDownRolesBase(leaderRole, targetRoles, autoDownUnreachableAfter) with ClusterCustomDowning {

  override def down(node: Address): Unit = {
    log.info("RoleLeader is auto-downing unreachable node [{}]", node)
    cluster.down(node)
  }
} 
Example 28
Source File: ClusterCustomDowning.scala    From akka-cluster-custom-downing   with Apache License 2.0 5 votes vote down vote up
package tanukki.akka.cluster.autodown

import akka.actor.{Address, ActorLogging, Scheduler}
import akka.cluster.Cluster
import akka.cluster.ClusterEvent.ClusterDomainEvent
import scala.concurrent.duration._

trait ClusterCustomDowning extends ActorLogging { base: CustomAutoDownBase =>

  val cluster = Cluster(context.system)

  override def selfAddress: Address = cluster.selfAddress

  override def scheduler: Scheduler = {
    if (context.system.scheduler.maxFrequency < 1.second / cluster.settings.SchedulerTickDuration) {
      log.warning("CustomDowning does not use a cluster dedicated scheduler. Cluster will use a dedicated scheduler if configured " +
        "with 'akka.scheduler.tick-duration' [{} ms] >  'akka.cluster.scheduler.tick-duration' [{} ms].",
        (1000 / context.system.scheduler.maxFrequency).toInt, cluster.settings.SchedulerTickDuration.toMillis)
    }
    context.system.scheduler
  }

  override def preStart(): Unit = {
    cluster.subscribe(self, classOf[ClusterDomainEvent])
  }
  override def postStop(): Unit = {
    cluster.unsubscribe(self)
  }
} 
Example 29
Source File: CustomAutoDownBase.scala    From akka-cluster-custom-downing   with Apache License 2.0 5 votes vote down vote up
package tanukki.akka.cluster.autodown

import akka.actor.{Cancellable, Scheduler, Address, Actor}
import akka.cluster.ClusterEvent._
import akka.cluster.MemberStatus.{Exiting, Down}
import akka.cluster._
import scala.concurrent.duration.{Duration, FiniteDuration}

object CustomDowning {
  case class UnreachableTimeout(member: Member)
}

abstract class CustomAutoDownBase(autoDownUnreachableAfter: FiniteDuration) extends Actor {

  import CustomDowning._

  def selfAddress: Address

  def down(node: Address): Unit

  def downOrAddPending(member: Member): Unit

  def downOrAddPendingAll(members: Set[Member]): Unit

  def scheduler: Scheduler

  import context.dispatcher

  val skipMemberStatus = Set[MemberStatus](Down, Exiting)

  private var scheduledUnreachable: Map[Member, Cancellable] = Map.empty
  private var pendingUnreachable: Set[Member] = Set.empty
  private var unstableUnreachable: Set[Member] = Set.empty

  override def postStop(): Unit = {
    scheduledUnreachable.values foreach { _.cancel }
    super.postStop()
  }

  def receiveEvent: Receive

  def receive: Receive = receiveEvent orElse predefinedReceiveEvent

  def predefinedReceiveEvent: Receive = {
    case state: CurrentClusterState =>
      initialize(state)
      state.unreachable foreach unreachableMember

    case UnreachableTimeout(member) =>
      if (scheduledUnreachable contains member) {
        scheduledUnreachable -= member
        if (scheduledUnreachable.isEmpty) {
          unstableUnreachable += member
          downOrAddPendingAll(unstableUnreachable)
          unstableUnreachable = Set.empty
        } else {
          unstableUnreachable += member
        }
      }

    case _: ClusterDomainEvent =>
  }

  def initialize(state: CurrentClusterState) = {}

  def unreachableMember(m: Member): Unit =
    if (!skipMemberStatus(m.status) && !scheduledUnreachable.contains(m))
      scheduleUnreachable(m)

  def scheduleUnreachable(m: Member): Unit = {
    if (autoDownUnreachableAfter == Duration.Zero) {
      downOrAddPending(m)
    } else {
      val task = scheduler.scheduleOnce(autoDownUnreachableAfter, self, UnreachableTimeout(m))
      scheduledUnreachable += (m -> task)
    }
  }

  def remove(member: Member): Unit = {
    scheduledUnreachable.get(member) foreach { _.cancel }
    scheduledUnreachable -= member
    pendingUnreachable -= member
    unstableUnreachable -= member
  }

  def scheduledUnreachableMembers: Map[Member, Cancellable] = scheduledUnreachable

  def pendingUnreachableMembers: Set[Member] = pendingUnreachable

  def pendingAsUnreachable(member: Member): Unit = pendingUnreachable += member

  def downPendingUnreachableMembers(): Unit = {
    pendingUnreachable.foreach(member => down(member.address))
    pendingUnreachable = Set.empty
  }

  def unstableUnreachableMembers: Set[Member] = unstableUnreachable
} 
Example 30
Source File: OldestAutoDowning.scala    From akka-cluster-custom-downing   with Apache License 2.0 5 votes vote down vote up
package tanukki.akka.cluster.autodown

import akka.ConfigurationException
import akka.actor.{ActorSystem, Address, Props}
import akka.cluster.{Cluster, DowningProvider}
import com.typesafe.config.Config

import scala.concurrent.Await
import scala.concurrent.duration._

class OldestAutoDowning(system: ActorSystem) extends DowningProvider {

  private[this] val cluster = Cluster(system)

  private val config: Config = system.settings.config

  override def downRemovalMargin: FiniteDuration = {
    val key = "custom-downing.down-removal-margin"
    config.getString(key) match {
      case "off" => Duration.Zero
      case _     => Duration(config.getDuration(key, MILLISECONDS), MILLISECONDS)
    }
  }

  override def downingActorProps: Option[Props] = {
    val stableAfter = system.settings.config.getDuration("custom-downing.stable-after").toMillis millis
    val oldestMemberRole = {
      val r = system.settings.config.getString("custom-downing.oldest-auto-downing.oldest-member-role")
      if (r.isEmpty) None else Some(r)
    }
    val downIfAlone = system.settings.config.getBoolean("custom-downing.oldest-auto-downing.down-if-alone")
    val shutdownActorSystem = system.settings.config.getBoolean("custom-downing.oldest-auto-downing.shutdown-actor-system-on-resolution")
    if (stableAfter == Duration.Zero && downIfAlone) throw new ConfigurationException("If you set down-if-alone=true, stable-after timeout must be greater than zero.")
    else {
      Some(OldestAutoDown.props(oldestMemberRole, downIfAlone, shutdownActorSystem, stableAfter))
    }
  }
}

private[autodown] object OldestAutoDown {
  def props(oldestMemberRole: Option[String], downIfAlone: Boolean, shutdownActorSystem: Boolean, autoDownUnreachableAfter: FiniteDuration): Props =
    Props(classOf[OldestAutoDown], oldestMemberRole, downIfAlone, shutdownActorSystem, autoDownUnreachableAfter)
}

private[autodown] class OldestAutoDown(oldestMemberRole: Option[String], downIfAlone: Boolean, shutdownActorSystem: Boolean, autoDownUnreachableAfter: FiniteDuration)
  extends OldestAutoDownBase(oldestMemberRole, downIfAlone, autoDownUnreachableAfter) with ClusterCustomDowning {

  override def down(node: Address): Unit = {
    log.info("Oldest is auto-downing unreachable node [{}]", node)
    cluster.down(node)
  }

  override def shutdownSelf(): Unit = {
    if (shutdownActorSystem) {
      Await.result(context.system.terminate(), 10 seconds)
    } else {
      throw new SplitBrainResolvedError("OldestAutoDowning")
    }
  }
} 
Example 31
Source File: RoleLeaderAutoDownRolesBase.scala    From akka-cluster-custom-downing   with Apache License 2.0 5 votes vote down vote up
package tanukki.akka.cluster.autodown

import akka.actor.Address
import akka.cluster.Member
import scala.concurrent.duration.FiniteDuration

abstract class RoleLeaderAutoDownRolesBase(leaderRole: String, targetRoles: Set[String], autoDownUnreachableAfter: FiniteDuration)
  extends RoleLeaderAwareCustomAutoDownBase(autoDownUnreachableAfter){


  override def onRoleLeaderChanged(role: String, leader: Option[Address]): Unit = {
    if (leaderRole == role && isRoleLeaderOf(leaderRole)) downPendingUnreachableMembers()
  }

  override def downOrAddPending(member: Member): Unit = {
    if (targetRoles.exists(role => member.hasRole(role))) {
      if (isRoleLeaderOf(leaderRole)) {
        down(member.address)
      } else {
        pendingAsUnreachable(member)
      }
    }
  }

  override def downOrAddPendingAll(members: Set[Member]): Unit = {
    members.foreach(downOrAddPending)
  }
} 
Example 32
Source File: QuorumLeaderAutoDownBase.scala    From akka-cluster-custom-downing   with Apache License 2.0 5 votes vote down vote up
package tanukki.akka.cluster.autodown

import akka.actor.Address
import akka.cluster.{MemberStatus, Member}
import akka.cluster.MemberStatus.Down

import scala.concurrent.duration.FiniteDuration

abstract class QuorumLeaderAutoDownBase(quorumRole: Option[String], quorumSize: Int, downIfOutOfQuorum: Boolean, autoDownUnreachableAfter: FiniteDuration)
  extends QuorumAwareCustomAutoDownBase(quorumSize, autoDownUnreachableAfter) {

  override def onLeaderChanged(leader: Option[Address]): Unit = {
    if (quorumRole.isEmpty && isLeader) downPendingUnreachableMembers()
  }

  override def onRoleLeaderChanged(role: String, leader: Option[Address]): Unit = {
    quorumRole.foreach { r =>
      if (r == role && isRoleLeaderOf(r)) downPendingUnreachableMembers()
    }
  }


  override def onMemberRemoved(member: Member, previousStatus: MemberStatus): Unit = {
    if (isQuorumMet(quorumRole)) {
      if (isLeaderOf(quorumRole)) {
        downPendingUnreachableMembers()
      }
    } else {
      down(selfAddress)
    }
    super.onMemberRemoved(member, previousStatus)
  }

  override def downOrAddPending(member: Member): Unit = {
    if (isLeaderOf(quorumRole)) {
      down(member.address)
      replaceMember(member.copy(Down))
    } else {
      pendingAsUnreachable(member)
    }
  }

  override def downOrAddPendingAll(members: Set[Member]): Unit = {
    if (isQuorumMetAfterDown(members, quorumRole)) {
      members.foreach(downOrAddPending)
    } else if (downIfOutOfQuorum) {
      shutdownSelf()
    }
  }
} 
Example 33
Source File: MajorityLeaderAutoDowning.scala    From akka-cluster-custom-downing   with Apache License 2.0 5 votes vote down vote up
package tanukki.akka.cluster.autodown

import akka.actor.{ActorSystem, Address, Props}
import akka.cluster.{Cluster, DowningProvider}
import com.typesafe.config.Config

import scala.concurrent.Await
import scala.concurrent.duration._

class MajorityLeaderAutoDowning(system: ActorSystem) extends DowningProvider {

  private[this] val cluster = Cluster(system)

  private val config: Config = system.settings.config

  override def downRemovalMargin: FiniteDuration = {
    val key = "custom-downing.down-removal-margin"
    config.getString(key) match {
      case "off" => Duration.Zero
      case _     => Duration(config.getDuration(key, MILLISECONDS), MILLISECONDS)
    }
  }

  override def downingActorProps: Option[Props] = {
    val stableAfter = config.getDuration("custom-downing.stable-after").toMillis millis
    val majorityMemberRole = {
      val r = config.getString("custom-downing.majority-leader-auto-downing.majority-member-role")
      if (r.isEmpty) None else Some(r)
    }
    val downIfInMinority = config.getBoolean("custom-downing.majority-leader-auto-downing.down-if-in-minority")
    val shutdownActorSystem = config.getBoolean("custom-downing.majority-leader-auto-downing.shutdown-actor-system-on-resolution")
    Some(MajorityLeaderAutoDown.props(majorityMemberRole, downIfInMinority, shutdownActorSystem, stableAfter))
  }
}

private[autodown] object MajorityLeaderAutoDown {
  def props(majorityMemberRole: Option[String], downIfInMinority: Boolean, shutdownActorSystem: Boolean, autoDownUnreachableAfter: FiniteDuration): Props =
    Props(classOf[MajorityLeaderAutoDown], majorityMemberRole, downIfInMinority, shutdownActorSystem, autoDownUnreachableAfter)
}

private[autodown] class MajorityLeaderAutoDown(majorityMemberRole: Option[String], downIfInMinority: Boolean, shutdownActorSystem: Boolean, autoDownUnreachableAfter: FiniteDuration)
  extends MajorityLeaderAutoDownBase(majorityMemberRole, downIfInMinority, autoDownUnreachableAfter) with ClusterCustomDowning {

  override def down(node: Address): Unit = {
    log.info("Majority is auto-downing unreachable node [{}]", node)
    cluster.down(node)
  }

  override def shutdownSelf(): Unit = {
    if (shutdownActorSystem) {
      Await.result(context.system.terminate(), 10 seconds)
    } else {
      throw new SplitBrainResolvedError("MajorityAutoDowning")
    }
  }
} 
Example 34
Source File: QuorumLeaderAutoDowning.scala    From akka-cluster-custom-downing   with Apache License 2.0 5 votes vote down vote up
package tanukki.akka.cluster.autodown

import akka.actor.{ActorSystem, Address, Props}
import akka.cluster.{Cluster, DowningProvider}
import com.typesafe.config.Config

import scala.concurrent.Await
import scala.concurrent.duration._

class QuorumLeaderAutoDowning(system: ActorSystem) extends DowningProvider {

  private[this] val cluster = Cluster(system)

  private val config: Config = system.settings.config

  override def downRemovalMargin: FiniteDuration = {
    val key = "custom-downing.down-removal-margin"
    config.getString(key) match {
      case "off" => Duration.Zero
      case _     => Duration(config.getDuration(key, MILLISECONDS), MILLISECONDS)
    }
  }

  override def downingActorProps: Option[Props] = {
    val stableAfter = system.settings.config.getDuration("custom-downing.stable-after").toMillis millis
    val role = {
      val r = system.settings.config.getString("custom-downing.quorum-leader-auto-downing.role")
      if (r.isEmpty) None else Some(r)
    }
    val quorumSize = system.settings.config.getInt("custom-downing.quorum-leader-auto-downing.quorum-size")
    val downIfOutOfQuorum = system.settings.config.getBoolean("custom-downing.quorum-leader-auto-downing.down-if-out-of-quorum")
    val shutdownActorSystem = system.settings.config.getBoolean("custom-downing.quorum-leader-auto-downing.shutdown-actor-system-on-resolution")
    Some(QuorumLeaderAutoDown.props(role, quorumSize, downIfOutOfQuorum, shutdownActorSystem, stableAfter))
  }
}


private[autodown] object QuorumLeaderAutoDown {
  def props(quorumRole: Option[String], quorumSize: Int, downIfOutOfQuorum: Boolean, shutdownActorSystem: Boolean, autoDownUnreachableAfter: FiniteDuration): Props =
    Props(classOf[QuorumLeaderAutoDown], quorumRole, quorumSize, downIfOutOfQuorum, shutdownActorSystem, autoDownUnreachableAfter)
}

private[autodown] class QuorumLeaderAutoDown(quorumRole: Option[String], quorumSize: Int, downIfOutOfQuorum: Boolean, shutdownActorSystem: Boolean, autoDownUnreachableAfter: FiniteDuration)
  extends QuorumLeaderAutoDownBase(quorumRole, quorumSize, downIfOutOfQuorum, autoDownUnreachableAfter) with ClusterCustomDowning {

  override def down(node: Address): Unit = {
    log.info("Quorum leader is auto-downing unreachable node [{}]", node)
    cluster.down(node)
  }

  override def shutdownSelf(): Unit = {
    if (shutdownActorSystem) {
      Await.result(context.system.terminate(), 10 seconds)
    } else {
      throw new SplitBrainResolvedError("QuorumLeaderAutoDowning")
    }
  }
} 
Example 35
Source File: LeaderAutoDownRolesBase.scala    From akka-cluster-custom-downing   with Apache License 2.0 5 votes vote down vote up
package tanukki.akka.cluster.autodown

import akka.actor.Address
import akka.cluster.Member

import scala.concurrent.duration.FiniteDuration


abstract class LeaderAutoDownRolesBase(targetRoles: Set[String], autoDownUnreachableAfter: FiniteDuration)
  extends LeaderAwareCustomAutoDownBase(autoDownUnreachableAfter){


  override def onLeaderChanged(leader: Option[Address]): Unit = {
    if (isLeader) downPendingUnreachableMembers()
  }

  override def downOrAddPending(member: Member): Unit = {
    if (targetRoles.exists(role => member.hasRole(role))) {
      if (isLeader) {
        down(member.address)
      } else {
        pendingAsUnreachable(member)
      }
    }
  }

  override def downOrAddPendingAll(members: Set[Member]): Unit = {
    members.foreach(downOrAddPending)
  }
} 
Example 36
Source File: RoleLeaderAwareCustomAutoDownBase.scala    From akka-cluster-custom-downing   with Apache License 2.0 5 votes vote down vote up
package tanukki.akka.cluster.autodown

import akka.actor.Address
import akka.cluster.ClusterEvent._
import akka.event.Logging

import scala.concurrent.duration.FiniteDuration

abstract class RoleLeaderAwareCustomAutoDownBase(autoDownUnreachableAfter: FiniteDuration) extends CustomAutoDownBase(autoDownUnreachableAfter) {

  private val log = Logging(context.system, this)

  private var roleLeader: Map[String, Boolean] = Map.empty

  def isRoleLeaderOf(role: String): Boolean = roleLeader.getOrElse(role, false)

  def onRoleLeaderChanged(role: String, leader: Option[Address]): Unit = {}

  override def receiveEvent: Receive = {
    case RoleLeaderChanged(role, leaderOption) =>
      roleLeader = roleLeader + (role -> leaderOption.contains(selfAddress))
      if (isRoleLeaderOf(role)) {
        log.info("This node is the new role leader for role {}", role)
      }
      onRoleLeaderChanged(role, leaderOption)
    case UnreachableMember(m) =>
      log.info("{} is unreachable", m)
      unreachableMember(m)
    case ReachableMember(m)   =>
      log.info("{} is reachable", m)
      remove(m)
    case MemberRemoved(m, _)  =>
      log.info("{} was removed from the cluster", m)
      remove(m)
  }

  override def initialize(state: CurrentClusterState): Unit = {
    roleLeader = state.roleLeaderMap.mapValues(_.exists(_ == selfAddress)).toMap
    super.initialize(state)
  }
} 
Example 37
Source File: Master.scala    From asyspark   with MIT License 5 votes vote down vote up
package org.apache.spark.asyspark.core

import java.util.concurrent.TimeUnit

import akka.actor.{Actor, ActorLogging, ActorRef, ActorSystem, Address, Props, Terminated}
import akka.util.Timeout
import com.typesafe.config.Config
import com.typesafe.scalalogging.slf4j.StrictLogging
import org.apache.spark.asyspark.core.messages.master.{ClientList, RegisterClient, RegisterServer, ServerList}

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}



  var clients = Set.empty[ActorRef]

  override def receive: Receive = {
    case RegisterServer(server) =>
      log.info(s"Registering server ${server.path.toString}")
      println("register server")
      servers += server
      context.watch(server)
      sender ! true

    case RegisterClient(client)  =>
      log.info(s"Registering client ${sender.path.toString}")
      clients += client
      context.watch(client)
      sender ! true

    case ServerList() =>
      log.info(s"Sending current server list to ${sender.path.toString}")
      sender ! servers.toArray

    case ClientList() =>
      log.info(s"Sending current client list to ${sender.path.toString}")
      sender ! clients.toArray


    case Terminated(actor) =>
      actor match {
        case server: ActorRef if servers contains server =>
          log.info(s"Removing server ${server.path.toString}")
          servers -= server
        case client: ActorRef if clients contains client =>
          log.info(s"Removing client ${client.path.toString}")
          clients -= client
        case actor: ActorRef =>
          log.warning(s"Actor ${actor.path.toString} will be terminated for some unknown reason")
      }
  }

}

object Master extends StrictLogging {
  def run(config: Config): Future[(ActorSystem, ActorRef)] = {
    logger.debug("Starting master actor system")
    val system = ActorSystem(config.getString("asyspark.master.system"), config.getConfig("asyspark.master"))
    logger.debug("Starting master")
    val master = system.actorOf(Props[Master], config.getString("asyspark.master.name"))
    implicit val timeout = Timeout(config.getDuration("asyspark.master.startup-timeout", TimeUnit.MILLISECONDS) milliseconds)
    implicit val ec = ExecutionContext.Implicits.global
    val address = Address("akka.tcp", config.getString("asyspark.master.system"), config.getString("asyspark.master.host"),
    config.getString("asyspark.master.port").toInt)
    system.actorSelection(master.path.toSerializationFormat).resolveOne().map {
      case actor: ActorRef =>
        logger.debug("Master successfully started")
        (system, master)

    }
  }

} 
Example 38
Source File: MajorityLeaderAutoDownBase.scala    From akka-cluster-custom-downing   with Apache License 2.0 5 votes vote down vote up
package tanukki.akka.cluster.autodown

import akka.actor.Address
import akka.cluster.MemberStatus.Down
import akka.cluster.{MemberStatus, Member}

import scala.concurrent.duration.FiniteDuration

abstract class MajorityLeaderAutoDownBase(majorityMemberRole: Option[String], downIfInMinority: Boolean, autoDownUnreachableAfter: FiniteDuration)
    extends MajorityAwareCustomAutoDownBase(autoDownUnreachableAfter) {

  override def onLeaderChanged(leader: Option[Address]): Unit = {
    if (majorityMemberRole.isEmpty && isLeader) downPendingUnreachableMembers()
  }

  override def onRoleLeaderChanged(role: String, leader: Option[Address]): Unit = {
    majorityMemberRole.foreach { r =>
      if (r == role && isRoleLeaderOf(r)) downPendingUnreachableMembers()
    }
  }

  override def onMemberRemoved(member: Member, previousStatus: MemberStatus): Unit = {
    if (isMajority(majorityMemberRole)) {
      if (isLeaderOf(majorityMemberRole)) {
        downPendingUnreachableMembers()
      }
    } else {
      down(selfAddress)
    }
    super.onMemberRemoved(member, previousStatus)
  }

  override def downOrAddPending(member: Member): Unit = {
    if (isLeaderOf(majorityMemberRole)) {
      down(member.address)
      replaceMember(member.copy(Down))
    } else {
      pendingAsUnreachable(member)
    }
  }

  override def downOrAddPendingAll(members: Set[Member]): Unit = {
    if (isMajorityAfterDown(members, majorityMemberRole)) {
      members.foreach(downOrAddPending)
    } else if (downIfInMinority) {
      shutdownSelf()
    }
  }
} 
Example 39
Source File: SimpleClusterListener.scala    From constructr-consul   with Apache License 2.0 5 votes vote down vote up
package com.tecsisa.constructr.coordination
package demo

import akka.actor.{ Actor, ActorLogging, Address, Props }
import akka.cluster.ClusterEvent.{ MemberEvent, MemberJoined, MemberRemoved, MemberUp, UnreachableMember }
import akka.cluster.Cluster

object SimpleClusterListener {

  case object GetMemberNodes

  final val Name = "clusterListener"

  def props: Props = Props(new SimpleClusterListener)
}

class SimpleClusterListener extends Actor with ActorLogging {
  import SimpleClusterListener._

  val cluster = Cluster(context.system)

  private var members = Set.empty[Address]

  override def preStart(): Unit =
    cluster.subscribe(self, classOf[MemberEvent], classOf[UnreachableMember])

  override def postStop(): Unit = cluster.unsubscribe(self)

  override def receive = {
    case GetMemberNodes =>
      sender() ! members
    case MemberJoined(member) =>
      log.info("Member joined: {}", member.address)
      members += member.address
    case MemberUp(member) =>
      log.info("Member up: {}", member.address)
      members += member.address
    case MemberRemoved(member, _) =>
      log.info("Member removed: {}", member.address)
      members -= member.address
  }
} 
Example 40
Source File: DemoApp.scala    From constructr-consul   with Apache License 2.0 5 votes vote down vote up
package com.tecsisa.constructr.coordination
package demo

import akka.actor.{ ActorRef, ActorSystem, Address }
import akka.http.scaladsl.Http
import akka.http.scaladsl.server.Directives
import akka.pattern.ask
import akka.stream.ActorMaterializer
import akka.util.Timeout
import com.typesafe.config.ConfigFactory

import scala.concurrent.duration.{ Duration, MILLISECONDS }

object DemoApp {

  val conf     = ConfigFactory.load()
  val hostname = conf.getString("demo.hostname")
  val httpPort = conf.getInt("demo.port")

  def main(args: Array[String]): Unit = {
    // Create an Akka system
    implicit val system = ActorSystem("ConstructR-Consul")
    import system.dispatcher
    implicit val mat = ActorMaterializer()

    // Create an actor that handles cluster domain events
    val cluster =
      system.actorOf(SimpleClusterListener.props, SimpleClusterListener.Name)
    Http().bindAndHandle(route(cluster), hostname, httpPort)
  }

  private def route(cluster: ActorRef) = {
    import Directives._
    implicit val timeout = Timeout(
      Duration(
        conf.getDuration("demo.cluster-view-timeout").toMillis,
        MILLISECONDS
      )
    )
    path("member-nodes") { // List cluster nodes
      get {
        onSuccess(
          (cluster ? SimpleClusterListener.GetMemberNodes).mapTo[Set[Address]]
        )(addresses => complete(addresses.mkString("\n")))
      }
    }
  }

} 
Example 41
Source File: MultiNodeConsulConstructrSpec.scala    From constructr-consul   with Apache License 2.0 5 votes vote down vote up
package com.tecsisa.constructr.akka.consul

import akka.actor.{ Address, AddressFromURIString }
import io.circe.Json
import io.circe.parser.parse
import java.util.Base64._

class MultiNodeConsulConstructrSpecMultiJvmNode1 extends MultiNodeConsulConstructrSpec
class MultiNodeConsulConstructrSpecMultiJvmNode2 extends MultiNodeConsulConstructrSpec
class MultiNodeConsulConstructrSpecMultiJvmNode3 extends MultiNodeConsulConstructrSpec
class MultiNodeConsulConstructrSpecMultiJvmNode4 extends MultiNodeConsulConstructrSpec
class MultiNodeConsulConstructrSpecMultiJvmNode5 extends MultiNodeConsulConstructrSpec

object MultiNodeConsulConstructrSpec {
  def toNodes(s: String): Set[Address] = {
    def jsonToNode(json: Json) = {
      val a =
        json.hcursor
          .get[String]("Key")
          .fold(throw _, identity)
          .stripPrefix("constructr/MultiNodeConstructrSpec/nodes/")
      AddressFromURIString(new String(getUrlDecoder.decode(a), "UTF-8"))
    }
    import cats.syntax.either._ // for Scala 2.11
    parse(s)
      .fold(throw _, identity)
      .as[Set[Json]]
      .getOrElse(Set.empty)
      .map(jsonToNode)
  }
}

abstract class MultiNodeConsulConstructrSpec
    extends MultiNodeConstructrSpec(
      8501,
      "/v1/kv/constructr/MultiNodeConstructrSpec?recurse",
      "/v1/kv/constructr/MultiNodeConstructrSpec/nodes?recurse",
      MultiNodeConsulConstructrSpec.toNodes
    ) 
Example 42
Source File: Cluster.scala    From zio-akka-cluster   with Apache License 2.0 5 votes vote down vote up
package zio.akka.cluster

import akka.actor.{ Actor, ActorSystem, Address, PoisonPill, Props }
import akka.cluster.ClusterEvent._
import zio.Exit.{ Failure, Success }
import zio.{ Has, Queue, Runtime, Task, ZIO }

object Cluster {

  private val cluster: ZIO[Has[ActorSystem], Throwable, akka.cluster.Cluster] =
    for {
      actorSystem <- ZIO.access[Has[ActorSystem]](_.get)
      cluster     <- Task(akka.cluster.Cluster(actorSystem))
    } yield cluster

  
  def clusterEventsWith(
    queue: Queue[ClusterDomainEvent],
    initialStateAsEvents: Boolean = false
  ): ZIO[Has[ActorSystem], Throwable, Unit] =
    for {
      rts         <- Task.runtime
      actorSystem <- ZIO.access[Has[ActorSystem]](_.get)
      _           <- Task(actorSystem.actorOf(Props(new SubscriberActor(rts, queue, initialStateAsEvents))))
    } yield ()

  private[cluster] class SubscriberActor(
    rts: Runtime[Any],
    queue: Queue[ClusterDomainEvent],
    initialStateAsEvents: Boolean
  ) extends Actor {

    val initialState: SubscriptionInitialStateMode =
      if (initialStateAsEvents) InitialStateAsEvents else InitialStateAsSnapshot
    akka.cluster.Cluster(context.system).subscribe(self, initialState, classOf[ClusterDomainEvent])

    def receive: PartialFunction[Any, Unit] = {
      case ev: ClusterDomainEvent =>
        rts.unsafeRunAsync(queue.offer(ev)) {
          case Success(_)     => ()
          case Failure(cause) => if (cause.interrupted) self ! PoisonPill // stop listening if the queue was shut down
        }
      case _                      =>
    }
  }

} 
Example 43
Source File: ExecutorSystem.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.cluster.appmaster

import akka.actor.{ActorRef, Address, PoisonPill}

import org.apache.gearpump.cluster.scheduler.Resource
import org.apache.gearpump.cluster.worker.WorkerId
import org.apache.gearpump.util.ActorSystemBooter.BindLifeCycle

case class WorkerInfo(workerId: WorkerId, ref: ActorRef)


case class ExecutorSystem(executorSystemId: Int, address: Address, daemon:
    ActorRef, resource: Resource, worker: WorkerInfo) {
  def bindLifeCycleWith(actor: ActorRef): Unit = {
    daemon ! BindLifeCycle(actor)
  }

  def shutdown(): Unit = {
    daemon ! PoisonPill
  }
} 
Example 44
Source File: RemoteActorsProgramatically.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter7

import akka.actor.{ActorSystem, Address, Deploy, Props}
import akka.remote.RemoteScope

object RemoteActorsProgrammatically1 extends App {
  val actorSystem = ActorSystem("RemoteActorsProgramatically1")
}

object RemoteActorsProgrammatically2 extends App {
  val actorSystem = ActorSystem("RemoteActorsProgramatically2")
  println("Creating actor from RemoteActorsProgramatically2")
  val address = Address("akka.tcp", "RemoteActorsProgramatically1", "127.0.0.1", 2552) // this gives the same
  val actor = actorSystem.actorOf(Props[SimpleActor].withDeploy(Deploy(scope = RemoteScope(address))), "remoteActor")
  actor ! "Checking"
} 
Example 45
Source File: ClusteredMultiNodeUtils.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.internal.cluster

import akka.actor.ActorRef
import akka.actor.Address
import akka.cluster.Cluster
import akka.cluster.MemberStatus
import akka.remote.testconductor.RoleName
import akka.remote.testkit.MultiNodeSpec
import akka.testkit.ImplicitSender
import com.lightbend.lagom.internal.cluster.ClusterMultiNodeConfig.node1

import scala.concurrent.duration._

abstract class ClusteredMultiNodeUtils(val numOfNodes: Int, multiNodeConfig: ClusterMultiNodeConfig)
    extends MultiNodeSpec(multiNodeConfig, ClusterMultiNodeActorSystemFactory.createActorSystem())
    with STMultiNodeSpec
    with ImplicitSender {
  override def initialParticipants: Int = roles.size

  def join(from: RoleName, to: RoleName): Unit = {
    runOn(from) {
      Cluster(system).join(node(to).address)
    }
    enterBarrier(from.name + "-joined")
  }

  def fullAddress(ref: ActorRef): Address =
    if (ref.path.address.hasLocalScope) Cluster(system).selfAddress
    else ref.path.address

  protected override def atStartup(): Unit = {
    join(node1, node1)
    roles.tail.foreach(n => join(n, node1))
    within(15.seconds) {
      awaitAssert(Cluster(system).state.members.size should be(numOfNodes))
      awaitAssert(
        Cluster(system).state.members.toIndexedSeq.map(_.status).distinct should be(IndexedSeq(MemberStatus.Up))
      )
    }

    enterBarrier("startup")
  }
} 
Example 46
Source File: ClusteredTeskit.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.internal.cluster

import akka.actor.ActorRef
import akka.actor.ActorSystem
import akka.actor.Address
import akka.actor.BootstrapSetup
import akka.actor.setup.ActorSystemSetup
import akka.cluster.Cluster
import akka.cluster.MemberStatus
import akka.remote.testconductor.RoleName
import com.typesafe.config.ConfigFactory
import akka.remote.testkit.MultiNodeConfig
import akka.remote.testkit.MultiNodeSpec
import akka.testkit.ImplicitSender
import com.lightbend.lagom.internal.cluster.ClusterMultiNodeConfig.node1
import com.typesafe.config.Config

import scala.concurrent.duration._

object ClusterMultiNodeConfig extends ClusterMultiNodeConfig

// this is reused in multiple multi-jvm tests. There's still some copy/paste around though.
abstract class ClusterMultiNodeConfig extends MultiNodeConfig {
  val node1 = role("node1")
  val node2 = role("node2")
  val node3 = role("node3")

  protected def systemConfig: Config =
    ConfigFactory.parseString(
      """
    akka.loglevel = INFO
    akka.actor.provider = cluster
    terminate-system-after-member-removed = 60s

    # increase default timeouts to leave wider margin for Travis.
    # 30s to 60s
    akka.testconductor.barrier-timeout=60s
    akka.test.single-expect-default = 15s

    akka.cluster.sharding.waiting-for-state-timeout = 5s

    # Don't terminate the actor system when doing a coordinated shutdown
    akka.coordinated-shutdown.terminate-actor-system = off
    akka.coordinated-shutdown.run-by-jvm-shutdown-hook = off
    akka.cluster.run-coordinated-shutdown-when-down = off

    ## The settings below are incidental because this code lives in a project that depends on lagom-cluster and
    ## lagom-akka-management-core.

    # multi-jvm tests forms the cluster programmatically
    # therefore we disable Akka Cluster Bootstrap
    lagom.cluster.bootstrap.enabled = off

    # no jvm exit on tests
    lagom.cluster.exit-jvm-when-system-terminated = off
    """
    )

  commonConfig(systemConfig)
}

// heavily inspired by AbstractClusteredPersistentEntitySpec
// this is reused in multiple multi-jvm tests. There's still some copy/paste around though.
object ClusterMultiNodeActorSystemFactory {
  // Copied from MultiNodeSpec
  private def getCallerName(clazz: Class[_]): String = {
    val s = Thread.currentThread.getStackTrace.map(_.getClassName).drop(1).dropWhile(_.matches(".*MultiNodeSpec.?$"))
    val reduced = s.lastIndexWhere(_ == clazz.getName) match {
      case -1 => s
      case z  => s.drop(z + 1)
    }
    reduced.head.replaceFirst(""".*\.""", "").replaceAll("[^a-zA-Z_0-9]", "_")
  }
  def createActorSystem(): Config => ActorSystem = { config =>
    val setup = ActorSystemSetup(BootstrapSetup(ConfigFactory.load(config)))
    ActorSystem(getCallerName(classOf[MultiNodeSpec]), setup)
  }
} 
Example 47
Source File: WorkerWatcher.scala    From SparkCore   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy.worker

import akka.actor.{Actor, Address, AddressFromURIString}
import akka.remote.{AssociatedEvent, AssociationErrorEvent, AssociationEvent, DisassociatedEvent, RemotingLifecycleEvent}

import org.apache.spark.Logging
import org.apache.spark.deploy.DeployMessages.SendHeartbeat
import org.apache.spark.util.ActorLogReceive


private[spark] class WorkerWatcher(workerUrl: String)
  extends Actor with ActorLogReceive with Logging {

  override def preStart() {
    context.system.eventStream.subscribe(self, classOf[RemotingLifecycleEvent])

    logInfo(s"Connecting to worker $workerUrl")
    val worker = context.actorSelection(workerUrl)
    worker ! SendHeartbeat // need to send a message here to initiate connection
  }

  // Used to avoid shutting down JVM during tests
  private[deploy] var isShutDown = false
  private[deploy] def setTesting(testing: Boolean) = isTesting = testing
  private var isTesting = false

  // Lets us filter events only from the worker's actor system
  private val expectedHostPort = AddressFromURIString(workerUrl).hostPort
  private def isWorker(address: Address) = address.hostPort == expectedHostPort

  def exitNonZero() = if (isTesting) isShutDown = true else System.exit(-1)

  override def receiveWithLogging = {
    case AssociatedEvent(localAddress, remoteAddress, inbound) if isWorker(remoteAddress) =>
      logInfo(s"Successfully connected to $workerUrl")

    case AssociationErrorEvent(cause, localAddress, remoteAddress, inbound, _)
        if isWorker(remoteAddress) =>
      // These logs may not be seen if the worker (and associated pipe) has died
      logError(s"Could not initialize connection to worker $workerUrl. Exiting.")
      logError(s"Error was: $cause")
      exitNonZero()

    case DisassociatedEvent(localAddress, remoteAddress, inbound) if isWorker(remoteAddress) =>
      // This log message will never be seen
      logError(s"Lost connection to worker actor $workerUrl. Exiting.")
      exitNonZero()

    case e: AssociationEvent =>
      // pass through association events relating to other remote actor systems

    case e => logWarning(s"Received unexpected actor system event: $e")
  }
}