akka.cluster.Member Scala Examples

The following examples show how to use akka.cluster.Member. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: package.scala    From NSDb   with Apache License 2.0 5 votes vote down vote up
package io.radicalbit.nsdb
import akka.actor.Address
import akka.cluster.Member

package object cluster {

  def createNodeName(member: Member) =
    s"${member.address.host.getOrElse("noHost")}_${member.address.port.getOrElse(0)}"

  def createNodeName(address: Address) =
    s"${address.host.getOrElse("noHost")}_${address.port.getOrElse(0)}"

  
  def createAddress(nodeName: String): Address = {
    val splitNodeName = nodeName.split("_")
    Address("nsdb",
            "NSDb",
            Option(splitNodeName(0)).getOrElse("noHost"),
            Option(splitNodeName(1)).map(_.toInt).getOrElse(0))
  }

  final object PubSubTopics {
    final val COORDINATORS_TOPIC   = "coordinators"
    final val NODE_GUARDIANS_TOPIC = "node-guardians"
    final val NSDB_METRICS_TOPIC   = "nsdb-metrics"
  }
} 
Example 2
Source File: Router.scala    From akka-cluster-manager   with MIT License 5 votes vote down vote up
package io.orkestra.cluster.routing

import io.orkestra.cluster.protocol.Response.Success.RouteeDeleted
import io.orkestra.cluster.routing.ClusterListener.DeleteRoutee

import scala.collection.immutable.Queue
import akka.actor._
import akka.cluster.{Member, Cluster}

class RouterRR(memberId: String, cluster: Cluster)
    extends Actor
    with ActorLogging {

  import RouterRR._

  var members: Queue[ActorRef] = Queue()

  var quarantineMembers: List[ActorRef] = List.empty[ActorRef]

  def receive = {

    case GetRoutee(role) =>
      sender ! Routee(getMember)

    case GetRoutees =>
      sender ! members.toList

    case RegisterRoutee(path) =>
      if (isQuarantine(path))
        recoverMember(path)
      else
        probeRoutee(path)

    case RemoveRoutee(path) =>
      removeMember(path)

    case DeleteRoutee(role, path) =>
      members = members.filterNot(_.path.toString == path)
      sender ! RouteeDeleted(role, path)

    case QuarantineRoutee(path) =>
      quarantineMember(path)

    case RecoverRoutee(path) =>
      recoverMember(path)

    case CleanQuarantine(path) =>
      quarantineCleaner(path)

    case ActorIdentity(`memberId`, Some(routeeRef)) =>
      registerMember(routeeRef)

    case ActorIdentity(`memberId`, None) =>
      log.warning(s"member with id $memberId not found")

    case Terminated(memberRef) =>
      log.info(s"Member ${memberRef.path} was Terminated")
      removeMember(memberRef.path)
      SupervisorStrategy

  }

  
  def quarantineCleaner(path: ActorPath) = {
    log.debug(s"Quarantine is being cleaned of $path...")
    quarantineMembers.filter(_.path == path).map { m =>
      log.warning(s"Removing quarantined member ${m.path.address}")
      cluster.down(m.path.address)
    }
  }
}

object RouterRR {
  case class RegisterRoutee(x: ActorPath)
  case class RemoveRoutee(x: ActorPath)
  case class QuarantineRoutee(x: ActorPath)
  case class RecoverRoutee(x: ActorPath)
  case class GetRoutee(role: String)
  case object GetRoutees
  case class Routee(ref: Option[ActorRef])
  case class CleanQuarantine(path: ActorPath)
} 
Example 3
Source File: TestMember.scala    From lithium   with Apache License 2.0 5 votes vote down vote up
package akka.cluster.swissborg

import akka.actor.Address
import akka.cluster.ClusterSettings.DataCenter
import akka.cluster.{ClusterSettings, Member, MemberStatus, UniqueAddress}

object TestMember {

  def apply(address: Address, status: MemberStatus): Member =
    apply(address, status, Set.empty[String], ClusterSettings.DefaultDataCenter)

  def apply(address: Address, status: MemberStatus, dataCenter: DataCenter): Member =
    apply(address, status, Set.empty[String], dataCenter)

  def apply(address: Address, status: MemberStatus, roles: Set[String]): Member =
    apply(address, status, roles, dataCenter = ClusterSettings.DefaultDataCenter)

  def apply(address: Address, status: MemberStatus, roles: Set[String], dataCenter: DataCenter): Member =
    withUniqueAddress(UniqueAddress(address, 0L), status, roles, dataCenter)

  def withUniqueAddress(uniqueAddress: UniqueAddress,
                        status: MemberStatus,
                        roles: Set[String],
                        dataCenter: DataCenter): Member =
    new Member(uniqueAddress, Int.MaxValue, status, roles + (ClusterSettings.DcRolePrefix + dataCenter))
} 
Example 4
Source File: KeepOldest.scala    From lithium   with Apache License 2.0 5 votes vote down vote up
package com.swissborg.lithium

package strategy

import akka.cluster.Member
import akka.cluster.MemberStatus._
import cats._
import cats.implicits._
import com.swissborg.lithium.implicits._


private[lithium] class KeepOldest[F[_]: Applicative](config: KeepOldestConfig) extends Strategy[F] {

  import config._

  override def takeDecision(worldView: WorldView): F[Decision] = {
    val allConsideredNodes =
      worldView.nonICNodesWithRole(role).filter(n => n.status === Up || n.status === Leaving)

    val allConsideredNodesSortedByAge = allConsideredNodes.toList.sortBy(_.member)(Member.ageOrdering)

    // If there are no nodes in the cluster with the given role the current partition is downed.
    allConsideredNodesSortedByAge.headOption
      .fold(Decision.downReachable(worldView)) {
        case node: ReachableNode =>
          if (node.status === Leaving) {
            // Nodes can change their status at the same time as a partition. This is especially problematic when the
            // oldest node becomes exiting. The side that sees the oldest node as leaving considers it and will decide
            // to down the other side. However, the other side sees it as exiting, doesn't consider it and decides
            // to survive because, by chance, contains the "new" oldest node. To counter this, the oldest node, if
            // leaving, will be assumed to have move to exiting on the other side. If it's really the
            // case, this will prevent both sides from downing each other and creating a split-brain. On the other,
            // if the other side didn't see the oldest node as exiting, both side might down themselves and down the
            // entire cluster. Better be safe than sorry.
            Decision.downReachable(worldView)
          } else {
            if (downIfAlone) {
              val nbrOfConsideredReachableNodes =
                allConsideredNodes.count {
                  case _: ReachableNode => true
                  case _                => false
                }

              if (nbrOfConsideredReachableNodes > 1) {
                Decision.downUnreachable(worldView)
              } else {
                Decision.downReachable(worldView)
              }
            } else {
              Decision.downUnreachable(worldView)
            }
          }

        case node: UnreachableNode =>
          if (node.status === Leaving) {
            // See comment above
            Decision.downReachable(worldView)
          } else {
            if (downIfAlone) {
              val nbrOfConsideredUnreachableNodes = worldView.unreachableNodesWithRole(role).size

              if (nbrOfConsideredUnreachableNodes > 1) {
                Decision.downReachable(worldView)
              } else {
                Decision.downUnreachable(worldView) // down the oldest node + all non-considered unreachable nodes
              }
            } else {
              Decision.downReachable(worldView)
            }
          }
      }
      .pure[F]
  }

  override def toString: String = s"KeepOldest($config)"
} 
Example 5
Source File: Leadership.scala    From cave   with MIT License 5 votes vote down vote up
package actors

import akka.actor.{Actor, ActorLogging, Address}
import akka.cluster.ClusterEvent._
import akka.cluster.{Cluster, Member}

object Leadership {
  object IsLeader
}

class Leadership(address: Address) extends Actor with ActorLogging {

  private val cluster = Cluster(context.system)
  private var members = Set.empty[Member]

  private var isLeader = false

  override def preStart(): Unit =
    cluster.subscribe(self, initialStateMode = InitialStateAsEvents,
      classOf[MemberEvent],
      classOf[UnreachableMember],
      classOf[ClusterDomainEvent])

  override def postStop(): Unit = cluster.unsubscribe(self)

  import actors.Leadership._

  def receive = {

    case IsLeader =>
      sender ! isLeader

    case state: CurrentClusterState =>
      log.warning("Initial state: " + state.leader)
      setLeader(state.leader)

    case MemberUp(member) =>
      log.warning(s"Member up($member)")
      members += member

    case MemberRemoved(member, previousStatus) =>
      log.warning(s"Member removed($member)")
      members.find(_.address == member.address) foreach (members -= _)

    case LeaderChanged(member) =>
      log.warning("Leader changed, now: " + member)
      setLeader(member)

    case e: MemberEvent =>
      log.warning(s"Member event($e)")
  }

  private def setLeader(leader: Option[Address]): Unit = {
    isLeader = leader exists (_ == address)
  }
} 
Example 6
Source File: Constructr.scala    From constructr   with Apache License 2.0 5 votes vote down vote up
package de.heikoseeberger.constructr

import akka.actor.{ Actor, ActorLogging, ActorRef, Props, SupervisorStrategy, Terminated }
import akka.cluster.{ Cluster, Member }
import akka.cluster.ClusterEvent.{ InitialStateAsEvents, MemberExited, MemberLeft, MemberRemoved }
import akka.cluster.MemberStatus.Up
import de.heikoseeberger.constructr.coordination.Coordination
import scala.concurrent.duration.{ FiniteDuration, NANOSECONDS }

object Constructr {

  final val Name = "constructr"

  def props: Props =
    Props(new Constructr)
}

final class Constructr private extends Actor with ActorLogging {

  override val supervisorStrategy = SupervisorStrategy.stoppingStrategy

  private val cluster = Cluster(context.system)

  if (cluster.settings.SeedNodes.isEmpty) {
    log.info("Creating constructr-machine, because no seed-nodes defined")
    cluster.subscribe(self,
                      InitialStateAsEvents,
                      classOf[MemberLeft],
                      classOf[MemberExited],
                      classOf[MemberRemoved])
    context.become(active(context.watch(createConstructrMachine())))
  } else {
    log.info("Stopping self, because seed-nodes defined")
    context.stop(self)
  }

  override def receive = Actor.emptyBehavior

  private def active(machine: ActorRef): Receive = {
    case Terminated(`machine`) =>
      val selfAddress = cluster.selfAddress
      def isSelfAndUp(member: Member) =
        member.address == selfAddress && member.status == Up
      if (cluster.state.members.exists(isSelfAndUp)) {
        log.error("Leaving, because constructr-machine terminated!")
        cluster.leave(selfAddress)
      } else {
        log.error("Terminating system, because constructr-machine terminated!")
        context.system.terminate()
      }

    case MemberRemoved(member, _) if member.address == cluster.selfAddress =>
      log.error("Terminating system, because member has been removed!")
      context.system.terminate()
  }

  private def createConstructrMachine() = {
    val config = context.system.settings.config
    def getDuration(key: String) =
      FiniteDuration(config.getDuration(key).toNanos, NANOSECONDS)

    val coordinationTimeout   = getDuration("constructr.coordination-timeout")
    val nrOfRetries           = config.getInt("constructr.nr-of-retries")
    val retryDelay            = getDuration("constructr.retry-delay")
    val refreshInterval       = getDuration("constructr.refresh-interval")
    val ttlFactor             = config.getDouble("constructr.ttl-factor")
    val maxNrOfSeedNodes      = config.getInt("constructr.max-nr-of-seed-nodes")
    val joinTimeout           = getDuration("constructr.join-timeout")
    val abortOnJoinTimeout    = config.getBoolean("constructr.abort-on-join-timeout")
    val ignoreRefreshFailures = config.getBoolean("constructr.ignore-refresh-failures")

    context.actorOf(
      ConstructrMachine.props(
        cluster.selfAddress,
        Coordination(context.system.name, context.system),
        coordinationTimeout,
        nrOfRetries,
        retryDelay,
        refreshInterval,
        ttlFactor,
        if (maxNrOfSeedNodes <= 0) Int.MaxValue else maxNrOfSeedNodes,
        joinTimeout,
        abortOnJoinTimeout,
        ignoreRefreshFailures
      ),
      ConstructrMachine.Name
    )
  }
} 
Example 7
Source File: TransformationBackend.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package sample.cluster.transformation

import java.util.concurrent.TimeUnit

import akka.actor.{ Actor, ActorSystem, Props, RootActorPath }
import akka.cluster.ClusterEvent.{ CurrentClusterState, MemberUp }
import akka.cluster.{ Cluster, Member, MemberStatus }
import com.typesafe.config.ConfigFactory

import scala.concurrent.ExecutionContext.Implicits
import scala.concurrent.Future
import scala.util.Random

//#backend
class TransformationBackend extends Actor {
  val cluster = Cluster(context.system)

  // subscribe to cluster changes, MemberUp
  // re-subscribe when restart
  override def preStart(): Unit = cluster.subscribe(self, classOf[MemberUp])
  override def postStop(): Unit = cluster.unsubscribe(self)

  def receive = {
    case TransformationJob(text) =>
      sender() ! TransformationResult(text.toUpperCase)
    case state: CurrentClusterState =>
      state.members.filter(_.status == MemberStatus.Up) foreach register
    case MemberUp(m) => register(m)
  }

  def register(member: Member): Unit =
    if (member.hasRole("frontend"))
      context.actorSelection(RootActorPath(member.address) / "user" / "frontend") !
      BackendRegistration
}
//#backend

object TransformationBackend {
  def main(args: Array[String]): Unit = {
    // Override the configuration of the port when specified as program argument
    val port = if (args.isEmpty) "0" else args(0)
    val config = ConfigFactory
      .parseString(s"""
        akka.remote.netty.tcp.port=$port
        akka.remote.artery.canonical.port=$port
        """)
      .withFallback(ConfigFactory.parseString("akka.cluster.roles = [backend]"))
      .withFallback(ConfigFactory.load("simple-cluster"))

    val system = ActorSystem("ClusterSystem", config)
    system.actorOf(Props[TransformationBackend], name = "backend")
    Future {
      TimeUnit.SECONDS.sleep(10)
      TimeUnit.SECONDS.sleep(Random.nextInt(50))
      system.terminate()
    }(Implicits.global)
  }
} 
Example 8
Source File: JobSchedulerNode.scala    From lemon-schedule   with GNU General Public License v2.0 5 votes vote down vote up
package com.gabry.job.scheduler.node

import akka.actor.{ActorRef, Props}
import akka.cluster.Member
import com.gabry.job.core.command.JobSchedulerCommand
import com.gabry.job.core.event.TaskTrackerEvent
import com.gabry.job.core.node.{ClusterNode, ClusterNodeProps}
import com.gabry.job.db.factory.DatabaseFactory
import com.gabry.job.db.proxy.DataAccessProxy
import com.gabry.job.scheduler.actor.{JobSchedulerActor, JobTaskAggregatorActor, JobTaskDispatcherActor}

import scala.concurrent.ExecutionContextExecutor


class JobSchedulerNode extends ClusterNode{
  private var schedulerActor:ActorRef = _
  private var dispatcherActor:ActorRef = _
  private var aggregatorActor:ActorRef = _
  private val dataAccessFactory = DatabaseFactory.getDataAccessFactory(config).get
  private var dataAccessProxy:ActorRef = _
  private implicit lazy val databaseIoExecutionContext: ExecutionContextExecutor = context.system.dispatchers.lookup("akka.actor.database-io-dispatcher")

  override def preStart(): Unit = {
    super.preStart()
    dataAccessFactory.init()
    dataAccessProxy = context.actorOf(DataAccessProxy.props(databaseIoExecutionContext),"dataAccessProxy")
    context.watch(dataAccessProxy)

    schedulerActor = context.actorOf(JobSchedulerActor.props(dataAccessProxy,selfAnchor),"schedulerActor")

    context.watch(schedulerActor)
    aggregatorActor = context.actorOf(JobTaskAggregatorActor.props(dataAccessProxy,selfAnchor),"aggregatorActor")

    context.watch(aggregatorActor)
    dispatcherActor = context.actorOf(JobTaskDispatcherActor.props(dataAccessProxy,selfAnchor,aggregatorActor),"dispatcherActor")

    context.watch(dispatcherActor)

  }

  override def postStop(): Unit = {
    super.postStop()
    dataAccessFactory.destroy()
    context.stop(schedulerActor)
    context.stop(dispatcherActor)
    context.stop(aggregatorActor)
    context.stop(dataAccessProxy)

  }
  override def userDefineEventReceive: Receive = {
    case cmd @ JobSchedulerCommand.ScheduleJob(job,replyTo) =>
      schedulerActor ! cmd
    case cmd @ JobSchedulerCommand.StopScheduleJob(job) =>
      schedulerActor ! cmd
    case evt @ TaskTrackerEvent.TaskTrackerStarted(taskTracker) =>
      log.info(s"TaskTracker启动 $taskTracker")
      dispatcherActor ! evt
    case evt @ TaskTrackerEvent.TaskTrackerStopped(taskTracker) =>
      log.info(s"TaskTracker停止 $taskTracker")
      dispatcherActor ! evt
  }

  override def register(member: Member): Unit = {

  }

  override def unRegister(member: Member): Unit = {

  }
} 
Example 9
Source File: JobTrackerNode.scala    From lemon-schedule   with GNU General Public License v2.0 5 votes vote down vote up
package com.gabry.job.manager.node

import akka.actor.{ActorRef, Props, RootActorPath}
import akka.cluster.Member
import akka.routing.{ActorSelectionRoutee, RoundRobinRoutingLogic, Router}
import com.gabry.job.core.command.{JobSchedulerCommand, JobTrackerCommand}
import com.gabry.job.core.constant.Constants
import com.gabry.job.core.domain.{Dependency, UID}
import com.gabry.job.core.event.JobTrackerEvent
import com.gabry.job.core.node.{ClusterNode, ClusterNodeProps}
import com.gabry.job.core.po.DependencyPo
import com.gabry.job.db.proxy.DataAccessProxy
import com.gabry.job.manager.actor.JobTrackerActor

import scala.concurrent.ExecutionContextExecutor

    case originCmd @ JobTrackerCommand.SubmitJob(job,_,_) =>
      log.debug(s"Receive SubmitJob Command $originCmd")
      jobTracker ! originCmd
    case JobTrackerCommand.ScheduleJob(job,replyTo) =>
      if(schedulerRouter.routees.nonEmpty){
        schedulerRouter.route(JobSchedulerCommand.ScheduleJob(job,self),self)
        log.info(s"Send ScheduleJob command to scheduler job.id = ${job.uid}")
        // 此处将插入后更新的Job对象发送给reply
        replyTo ! JobTrackerEvent.JobSubmitted(job)
      }else{
        replyTo ! JobTrackerEvent.JobSubmitFailed("No Scheduler node found")
      }
  }
  override def register(member: Member): Unit = {
    if(member.hasRole(Constants.ROLE_SCHEDULER_NAME)){
      val scheduleNode = context.system.actorSelection(RootActorPath(member.address)/ "user" / Constants.ROLE_SCHEDULER_NAME)
      schedulerRouter = schedulerRouter.addRoutee(scheduleNode)
    }
  }

  override def unRegister(member: Member): Unit = {
    if(member.hasRole(Constants.ROLE_SCHEDULER_NAME)){
      val scheduleNode = context.system.actorSelection(RootActorPath(member.address)/ "user" / Constants.ROLE_SCHEDULER_NAME)
      schedulerRouter = schedulerRouter.removeRoutee(scheduleNode)
    }
  }
} 
Example 10
Source File: TaskWorkerNode.scala    From lemon-schedule   with GNU General Public License v2.0 5 votes vote down vote up
package com.gabry.job.worker.node

import akka.actor.{Props, RootActorPath}
import akka.cluster.Member
import com.gabry.job.core.command.TaskWorkerCommand
import com.gabry.job.core.constant.Constants
import com.gabry.job.core.domain.{TaskClassInfo, TaskTrackerInfo}
import com.gabry.job.core.event.TaskTrackerEvent
import com.gabry.job.core.node.{ClusterNode, ClusterNodeProps}
import com.gabry.job.utils.Utils
import com.gabry.job.worker.tracker.TaskTrackerActor

import scala.collection.JavaConverters._


class TaskWorkerNode extends ClusterNode{

  override def preStart(): Unit = {
    super.preStart()
    // 配置文件中的jars配置
    val jars = config.getConfigList("task-tracker.jars").asScala
    jars.foreach{ jar =>
      val classInfo = jar.getConfigList("classInfo").asScala.map{ clasInfo =>
        val parallel = if(clasInfo.getInt("parallel")<1) Int.MaxValue else clasInfo.getInt("parallel")
        TaskClassInfo(clasInfo.getString("name"),parallel,clasInfo.getDuration("time-out").getSeconds)
      }.toArray

      val taskTrackerInfo = TaskTrackerInfo(clusterName
        ,jar.getString("group-name")
        ,jar.getString("path")
        ,classInfo)
      log.info(s"taskTrackerInfo is $taskTrackerInfo")
      // 根据jar包中每个class的配置,发送StartTaskTracker启动TaskTracker
      self ! TaskWorkerCommand.StartTaskTracker(taskTrackerInfo,self)
    }
  }

  override def postStop(): Unit = {
    super.postStop()

  }
  override def userDefineEventReceive: Receive = {
    case TaskWorkerCommand.StartTaskTracker(taskTrackerInfo,replyTo) =>

      val taskTracker = context.actorOf(Props.create(classOf[TaskTrackerActor],taskTrackerInfo)
        ,taskTrackerInfo.group)

      context.watchWith(taskTracker,TaskTrackerEvent.TaskTrackerStopped(taskTracker))

      replyTo ! TaskTrackerEvent.TaskTrackerStarted(taskTracker)

    case evt @ TaskTrackerEvent.TaskTrackerStarted(taskTracker) =>
      log.info(s"task tracker [$taskTracker] started at ${evt.at}")
    case evt @ TaskTrackerEvent.TaskTrackerStopped(taskTracker) =>
      val stopAt = System.currentTimeMillis()
      log.warning(s"task tracker [$taskTracker] alive time is ${Utils.formatAliveTime(evt.at,stopAt)}")
      // 通知调度器,有TaskTracker退出
      currentMembers.filter(_.hasRole(Constants.ROLE_SCHEDULER_NAME))
        .map(member=>context.actorSelection(RootActorPath(member.address)/ "user" / Constants.ROLE_SCHEDULER_NAME))
        .foreach( _ ! evt )
  }

  override def register(member: Member): Unit = {
    log.info(s"member register $member")
    if(member.hasRole(Constants.ROLE_SCHEDULER_NAME)){
      // 有调度节点加入的时候,将该worker下面的TaskTracker汇报给它
      log.info(s"scheduler node address = ${RootActorPath(member.address)/ "user" / Constants.ROLE_SCHEDULER_NAME}")
      val scheduler = context.actorSelection(RootActorPath(member.address)/ "user" / Constants.ROLE_SCHEDULER_NAME)
      context.children.foreach{ taskTracker =>
        scheduler ! TaskTrackerEvent.TaskTrackerStarted(taskTracker)
      }
    }
  }

  override def unRegister(member: Member): Unit = {
    log.info(s"member unRegister $member")
  }
} 
Example 11
Source File: ClusterStateRoute.scala    From akka-kubernetes-tests   with Apache License 2.0 5 votes vote down vote up
package akka.kubernetes.sample

import akka.cluster.{Cluster, Member, MemberStatus}
import akka.management.cluster.{
  ClusterHttpManagementJsonProtocol,
  ClusterMembers,
  ClusterReadViewAccess,
  ClusterUnreachableMember
}

// Just want the read view
object ClusterStateRoute extends ClusterHttpManagementJsonProtocol {

  import akka.http.scaladsl.server.Directives._
  import akka.management.cluster.ClusterHttpManagementHelper._

  def routeGetMembers(cluster: Cluster) =
    path("cluster" / "members") {
      get {
        complete {
          val readView = ClusterReadViewAccess.internalReadView(cluster)
          val members = readView.state.members.map(memberToClusterMember)

          val unreachable = readView.reachability.observersGroupedByUnreachable.toSeq
            .sortBy(_._1)
            .map {
              case (subject, observers) ⇒
                ClusterUnreachableMember(s"${subject.address}", observers.toSeq.sorted.map(m ⇒ s"${m.address}").toList)
            }
            .toList

          val thisDcMembers =
            cluster.state.members.toSeq
              .filter(node => node.status == MemberStatus.Up && node.dataCenter == cluster.selfDataCenter)

          val leader = readView.leader.map(_.toString)

          val oldest = if (thisDcMembers.isEmpty) None else Some(thisDcMembers.min(Member.ageOrdering).address.toString)

          ClusterMembers(s"${readView.selfAddress}", members, unreachable, leader, oldest, oldestPerRole(thisDcMembers))
        }
      }
    }

} 
Example 12
Source File: MemberInfo.scala    From asura   with MIT License 5 votes vote down vote up
package asura.cluster.model

import akka.cluster.Member
import asura.common.util.StringUtils

case class MemberInfo(
                       roles: Seq[String],
                       address: String,
                       protocol: String,
                       port: Int,
                       status: String,
                     )

object MemberInfo {

  def fromMember(m: Member): MemberInfo = {
    MemberInfo(
      roles = m.roles.toSeq,
      protocol = m.address.protocol,
      address = m.address.host.getOrElse(StringUtils.EMPTY),
      port = m.address.port.getOrElse(0),
      status = m.status.toString,
    )
  }
} 
Example 13
Source File: MemberListenerActor.scala    From asura   with MIT License 5 votes vote down vote up
package asura.cluster.actor

import akka.actor.Props
import akka.cluster.ClusterEvent._
import akka.cluster.{Cluster, Member, MemberStatus}
import asura.cluster.actor.MemberListenerActor.GetAllMembers
import asura.cluster.model.MemberInfo
import asura.common.actor.BaseActor

class MemberListenerActor extends BaseActor {

  val cluster = Cluster(context.system)
  var nodes = Set.empty[Member]

  override def preStart(): Unit = {
    cluster.subscribe(self, classOf[MemberEvent])
  }

  override def postStop(): Unit = {
    cluster.unsubscribe(self)
  }

  override def receive: Receive = {
    case state: CurrentClusterState =>
      nodes = state.members.collect {
        case m if m.status == MemberStatus.Up => m
      }
    case MemberUp(member) =>
      log.info("Member({}) is Up: {}", member.roles.mkString(","), member.address)
      nodes += member
    case MemberRemoved(member, previousStatus) =>
      log.info("Member is Removed: {} after {}", member.address, previousStatus)
      nodes -= member
    case _: MemberEvent =>
    case GetAllMembers =>
      sender() ! nodes.map(MemberInfo.fromMember(_))
  }

}

object MemberListenerActor {
  def props() = Props(new MemberListenerActor())

  case class GetAllMembers()

} 
Example 14
Source File: RoleLeaderAutoDownRolesBase.scala    From akka-cluster-custom-downing   with Apache License 2.0 5 votes vote down vote up
package tanukki.akka.cluster.autodown

import akka.actor.Address
import akka.cluster.Member
import scala.concurrent.duration.FiniteDuration

abstract class RoleLeaderAutoDownRolesBase(leaderRole: String, targetRoles: Set[String], autoDownUnreachableAfter: FiniteDuration)
  extends RoleLeaderAwareCustomAutoDownBase(autoDownUnreachableAfter){


  override def onRoleLeaderChanged(role: String, leader: Option[Address]): Unit = {
    if (leaderRole == role && isRoleLeaderOf(leaderRole)) downPendingUnreachableMembers()
  }

  override def downOrAddPending(member: Member): Unit = {
    if (targetRoles.exists(role => member.hasRole(role))) {
      if (isRoleLeaderOf(leaderRole)) {
        down(member.address)
      } else {
        pendingAsUnreachable(member)
      }
    }
  }

  override def downOrAddPendingAll(members: Set[Member]): Unit = {
    members.foreach(downOrAddPending)
  }
} 
Example 15
Source File: SurvivalDecider.scala    From simple-akka-downing   with Apache License 2.0 5 votes vote down vote up
package com.ajjpj.simpleakkadowning

import akka.actor.Address
import akka.cluster.{Member, UniqueAddress}
import com.ajjpj.simpleakkadowning.SurvivalDecider.ClusterState
import com.typesafe.config.Config

import scala.collection.Set
import scala.collection.immutable.SortedSet


trait SurvivalDecider {
  def isInMinority(clusterState: ClusterState, selfAddress: Address): Boolean
}

object SurvivalDecider {
  private val memberOrdering = new Ordering[ClusterMemberInfo] {
    override def compare (x: ClusterMemberInfo, y: ClusterMemberInfo) =
      Member.addressOrdering.compare(x.uniqueAddress.address, y.uniqueAddress.address)
  }

  case class ClusterMemberInfo(uniqueAddress: UniqueAddress, roles: Set[String], member: Member)
  case class ClusterState(upMembers: Set[ClusterMemberInfo], unreachable: Set[UniqueAddress]) {
    lazy val sortedUpMembers = SortedSet.empty(memberOrdering) ++  upMembers
    lazy val sortedUpAndReachable = sortedUpMembers.filterNot (x => unreachable.contains(x.uniqueAddress))
    lazy val upReachable = upMembers.filterNot(x => unreachable(x.uniqueAddress))
    lazy val upUnreachable = upMembers.filter(x => unreachable(x.uniqueAddress))
  }


  def apply(config: Config): SurvivalDecider = {
    val cc = config.getConfig("simple-akka-downing")

    cc.getString("active-strategy") match {
      case "static-quorum" =>
        val ccc = cc.getConfig("static-quorum")
        val quorumSize = ccc.getInt("quorum-size")
        val role = ccc.getString("role") match {
          case r if r.trim.isEmpty => None
          case r => Some(r)
        }
        new FixedQuorumDecider(quorumSize, role)
      case "keep-majority" =>
        val ccc = cc.getConfig("keep-majority")
        val role = ccc.getString("role") match {
          case r if r.trim.isEmpty => None
          case r => Some(r)
        }
        new KeepMajorityDecider(role)
      case "keep-oldest" =>
        val ccc = cc.getConfig("keep-oldest")
        val downIfAlone = ccc.getBoolean("down-if-alone")
        new KeepOldestDecider(downIfAlone)
    }
  }


  class FixedQuorumDecider(quorumSize: Int, role: Option[String]) extends SurvivalDecider {
    override def isInMinority(clusterState: ClusterState, selfAddress: Address) = {
      val relevantMembers = role match {
        case Some (r) => clusterState.upMembers.filter (_.roles contains r)
        case None =>     clusterState.upMembers
      }

      (relevantMembers -- clusterState.upUnreachable).size < quorumSize
    }
  }

  class KeepMajorityDecider(role: Option[String]) extends SurvivalDecider {
    override def isInMinority (clusterState: ClusterState, selfAddress: Address) = {
      role match {
        case Some(r) =>
          val all = clusterState.upMembers.filter(_.roles contains r)
          val unreachable = clusterState.upUnreachable.filter(_.roles contains r)
          all.size <= 2*unreachable.size
        case None =>
          clusterState.upMembers.size <= 2*clusterState.upUnreachable.size
      }
    }
  }

  class KeepOldestDecider(downIfAlone: Boolean) extends SurvivalDecider {
    override def isInMinority (clusterState: ClusterState, selfAddress: Address) = {
      val allRelevant = clusterState.upMembers
      val oldestRelevant = allRelevant.foldLeft(allRelevant.head)((a, b) => if (a.member isOlderThan b.member) a else b)

      if (downIfAlone) {
        clusterState.upReachable match {
          case s if s == Set(oldestRelevant) => true                                       // only the oldest node --> terminate
          case _ if clusterState.unreachable == Set(oldestRelevant.uniqueAddress) => false // the oldest node is the only unreachable node --> survive
          case _ => clusterState.unreachable contains oldestRelevant.uniqueAddress
        }
      }
      else {
        clusterState.unreachable contains oldestRelevant.uniqueAddress
      }
    }
  }
} 
Example 16
Source File: Formatters.scala    From CM-Well   with Apache License 2.0 5 votes vote down vote up
package k.grid

import akka.cluster.Member
import k.grid.monitoring.{ActiveActors, SingletonData}


object Formatters {
  val pad = 60
  def taple(fields: String*): String = {
    fields.map(f => f.padTo(pad, " ").mkString).mkString(" | ") + "\n"
  }

  def memberFormatter(member: Member, isLeader: Boolean): String = {
    taple(member.address.toString + (if (isLeader) "*" else ""), member.roles.mkString(", "), member.status.toString)
  }

  def membersFormatter(members: Map[GridJvm, JvmInfo]): String = {
    taple("Address") +
      members.toSeq.sortBy(m => m._1.hostname).map(m => taple(m._1.hostname)).mkString +
      s"Total: ${members.size}"
  }

  def singletonsFormatter(singletons: Set[SingletonData]): String = {
    taple("Singleton", "Role", "Location") + singletons.map(s => taple(s.name, s.role, s.location)).mkString
  }

  def activeActorsFormatter(actors: Set[ActiveActors]): String = {
    taple("Member", "Actor", "Latency") +
      actors.map(a => a.actors.map(aa => taple(a.host, aa.name, aa.latency.toString)).mkString).mkString
  }
} 
Example 17
Source File: DocSvr.scala    From Raphtory   with Apache License 2.0 5 votes vote down vote up
package com.raphtory.core.clustersetup

import akka.actor.ActorSystem
import akka.actor.Address
import akka.actor.ExtendedActorSystem
import akka.cluster.Cluster
import akka.cluster.Member
import akka.event.LoggingAdapter
import akka.management.cluster.bootstrap.ClusterBootstrap
import akka.management.javadsl.AkkaManagement
import com.raphtory.core.clustersetup.util.ConfigUtils._
import com.raphtory.core.utils.Utils
import com.typesafe.config.Config
import com.typesafe.config.ConfigFactory
import com.typesafe.config.ConfigValueFactory

import scala.collection.JavaConversions
import scala.collection.JavaConversions._

trait DocSvr {

  def seedLoc: String

  implicit val system: ActorSystem
  val docker = System.getenv().getOrDefault("DOCKER", "false").trim.toBoolean

  val clusterSystemName: String = Utils.clusterSystemName
  val ssn: String               = java.util.UUID.randomUUID.toString

  
  def printConfigInfo(config: Config, system: ActorSystem): Unit = {
    val log: LoggingAdapter = system.log

    val systemConfig: SystemConfig = config.parse()
    val bindAddress: SocketAddress = systemConfig.bindAddress
    val tcpAddress: SocketAddress  = systemConfig.tcpAddress

    log.info(s"Created ActorSystem with ID: $ssn")

    log.info(s"Binding ActorSystem internally to address ${bindAddress.host}:${bindAddress.port}")
    log.info(s"Binding ActorSystem externally to host ${tcpAddress.host}:${tcpAddress.port}")

    log.info(s"Registering the following seeds to ActorSystem: ${systemConfig.seeds}")
    log.info(s"Registering the following roles to ActorSystem: ${systemConfig.roles}")

    // FIXME: This is bit unorthodox ...
    val akkaSystemUrl: Address = system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress
    log.info(s"ActorSystem successfully initialised at the following Akka URL: $akkaSystemUrl")
  }
} 
Example 18
Source File: ClusterHttpManagementHelperSpec.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package akka.cluster.management

// Accesses private[cluster] so has to be in this package

import akka.actor.Address
import akka.cluster.MemberStatus._
import akka.cluster.{ Member, UniqueAddress }
import akka.management.cluster.ClusterHttpManagementHelper
import org.scalatest.{ Matchers, WordSpec }

class ClusterHttpManagementHelperSpec extends WordSpec with Matchers {

  "Oldest nodes per role" must {
    "work" in {
      val dc = "dc-one"
      val address1 = Address("akka", "Main", "hostname.com", 3311)
      val address2 = Address("akka", "Main", "hostname2.com", 3311)
      val address3 = Address("akka", "Main", "hostname3.com", 3311)
      val address4 = Address("akka", "Main", "hostname4.com", 3311)

      val uniqueAddress1 = UniqueAddress(address1, 1L)
      val uniqueAddress2 = UniqueAddress(address2, 2L)
      val uniqueAddress3 = UniqueAddress(address3, 3L)
      val uniqueAddress4 = UniqueAddress(address4, 4L)

      val clusterMember1 = new Member(uniqueAddress1, 1, Up, Set("one", "two", dc))
      val clusterMember2 = new Member(uniqueAddress2, 2, Joining, Set("one", "two", dc))
      val clusterMember3 = new Member(uniqueAddress3, 3, Joining, Set("three", dc))
      val clusterMember4 = new Member(uniqueAddress4, 4, Joining, Set(dc))

      val members = Seq(clusterMember1, clusterMember2, clusterMember3, clusterMember4)

      ClusterHttpManagementHelper.oldestPerRole(members) shouldEqual Map(
        "one" -> address1.toString,
        "two" -> address1.toString,
        "three" -> address3.toString,
        dc -> address1.toString
      )
    }
  }

} 
Example 19
Source File: ClusterHttpManagementHelper.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package akka.management.cluster

import akka.cluster.Member

object ClusterHttpManagementHelper {
  def memberToClusterMember(m: Member): ClusterMember =
    ClusterMember(s"${m.address}", s"${m.uniqueAddress.longUid}", s"${m.status}", m.roles)

  private[akka] def oldestPerRole(thisDcMembers: Seq[Member]): Map[String, String] = {
    val roles: Set[String] = thisDcMembers.flatMap(_.roles).toSet
    roles.map(role => (role, oldestForRole(thisDcMembers, role))).toMap
  }

  private def oldestForRole(cluster: Seq[Member], role: String): String = {
    val forRole = cluster.filter(_.roles.contains(role))

    if (forRole.isEmpty)
      "<unknown>"
    else
      forRole.min(Member.ageOrdering).address.toString

  }
} 
Example 20
Source File: HttpClusterBootstrapRoutes.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package akka.management.cluster.bootstrap.contactpoint

import scala.concurrent.duration._

import akka.actor.ActorSystem
import akka.cluster.Cluster
import akka.cluster.Member
import akka.event.Logging
import akka.event.LoggingAdapter
import akka.http.javadsl.server.directives.RouteAdapter
import akka.http.scaladsl.model.HttpRequest
import akka.http.scaladsl.model.Uri
import akka.http.scaladsl.server.Route
import akka.management.cluster.bootstrap.ClusterBootstrapSettings
import akka.management.cluster.bootstrap.contactpoint.HttpBootstrapJsonProtocol.ClusterMember
import akka.management.cluster.bootstrap.contactpoint.HttpBootstrapJsonProtocol.SeedNodes

final class HttpClusterBootstrapRoutes(settings: ClusterBootstrapSettings) extends HttpBootstrapJsonProtocol {

  import akka.http.scaladsl.server.Directives._

  private def routeGetSeedNodes: Route = extractClientIP { clientIp =>
    extractActorSystem { implicit system =>
      import akka.cluster.MemberStatus
      val cluster = Cluster(system)

      def memberToClusterMember(m: Member): ClusterMember =
        ClusterMember(m.uniqueAddress.address, m.uniqueAddress.longUid, m.status.toString, m.roles)

      val state = cluster.state

      // TODO shuffle the members so in a big deployment nodes start joining different ones and not all the same?
      val members = state.members
        .diff(state.unreachable)
        .filter(m =>
          m.status == MemberStatus.up || m.status == MemberStatus.weaklyUp || m.status == MemberStatus.joining)
        .take(settings.contactPoint.httpMaxSeedNodesToExpose)
        .map(memberToClusterMember)

      val info = SeedNodes(cluster.selfMember.uniqueAddress.address, members)
      log.info(
        "Bootstrap request from {}: Contact Point returning {} seed-nodes [{}]",
        clientIp,
        members.size,
        members.map(_.node).mkString(", "))
      complete(info)
    }
  }

  
  def getRoutes: akka.http.javadsl.server.Route = RouteAdapter(routes)

  private def log(implicit sys: ActorSystem): LoggingAdapter =
    Logging(sys, classOf[HttpClusterBootstrapRoutes])

}

object ClusterBootstrapRequests {

  import akka.http.scaladsl.client.RequestBuilding._

  def bootstrapSeedNodes(baseUri: Uri): HttpRequest =
    Get(baseUri + "/bootstrap/seed-nodes")

} 
Example 21
Source File: StorageNodeActor.scala    From JustinDB   with Apache License 2.0 5 votes vote down vote up
package justin.db.actors

import akka.actor.{Actor, ActorRef, Props, RootActorPath, Terminated}
import akka.cluster.ClusterEvent.{CurrentClusterState, MemberUp}
import akka.cluster.{Cluster, Member, MemberStatus}
import com.typesafe.scalalogging.StrictLogging
import justin.db.actors.protocol.{RegisterNode, _}
import justin.db.cluster.ClusterMembers
import justin.db.cluster.datacenter.Datacenter
import justin.db.consistenthashing.{NodeId, Ring}
import justin.db.replica._
import justin.db.replica.read.{ReplicaLocalReader, ReplicaReadCoordinator, ReplicaRemoteReader}
import justin.db.replica.write.{ReplicaLocalWriter, ReplicaRemoteWriter, ReplicaWriteCoordinator}
import justin.db.storage.PluggableStorageProtocol

import scala.concurrent.ExecutionContext

class StorageNodeActor(nodeId: NodeId, datacenter: Datacenter, storage: PluggableStorageProtocol, ring: Ring, n: N) extends Actor with StrictLogging {

  private[this] implicit val ec: ExecutionContext = context.dispatcher
  private[this] val cluster = Cluster(context.system)

  private[this] var clusterMembers   = ClusterMembers.empty
  private[this] val readCoordinator  = new ReplicaReadCoordinator(nodeId, ring, n, new ReplicaLocalReader(storage), new ReplicaRemoteReader)
  private[this] val writeCoordinator = new ReplicaWriteCoordinator(nodeId, ring, n, new ReplicaLocalWriter(storage), new ReplicaRemoteWriter)

  private[this] val coordinatorRouter = context.actorOf(
    props = RoundRobinCoordinatorRouter.props(readCoordinator, writeCoordinator),
    name  = RoundRobinCoordinatorRouter.routerName
  )

  private[this] val name = self.path.name

  override def preStart(): Unit = cluster.subscribe(this.self, classOf[MemberUp])
  override def postStop(): Unit = cluster.unsubscribe(this.self)

  def receive: Receive = {
    receiveDataPF orElse receiveClusterDataPF orElse receiveRegisterNodePR orElse notHandledPF
  }

  private[this] def receiveDataPF: Receive = {
    case readReq: StorageNodeReadRequest              =>
      coordinatorRouter ! ReadData(sender(), clusterMembers, readReq)
    case writeLocalDataReq: StorageNodeWriteDataLocal =>
      coordinatorRouter ! WriteData(sender(), clusterMembers, writeLocalDataReq)
    case writeClientReplicaReq: Internal.WriteReplica =>
      coordinatorRouter ! WriteData(sender(), clusterMembers, writeClientReplicaReq)
  }

  private[this] def receiveClusterDataPF: Receive = {
    case "members"                  => sender() ! clusterMembers
    case MemberUp(member)           => register(nodeId, ring, member)
    case state: CurrentClusterState => state.members.filter(_.status == MemberStatus.Up).foreach(member => register(nodeId, ring, member))
    case Terminated(actorRef)       => clusterMembers = clusterMembers.removeByRef(StorageNodeActorRef(actorRef))
  }

  private[this] def receiveRegisterNodePR: Receive = {
    case RegisterNode(senderNodeId) if clusterMembers.notContains(senderNodeId) =>
      val senderRef = sender()
      context.watch(senderRef)
      clusterMembers = clusterMembers.add(senderNodeId, StorageNodeActorRef(senderRef))
      senderRef ! RegisterNode(nodeId)
      logger.info(s"Actor[$name]: Successfully registered node [id-${senderNodeId.id}]")
    case RegisterNode(senderNodeId) =>
      logger.info(s"Actor[$name]: Node [id-${senderNodeId.id}] is already registered")
  }

  private[this] def register(nodeId: NodeId, ring: Ring, member: Member) = {
    (member.hasRole(StorageNodeActor.role), datacenter.name == member.dataCenter) match {
      case (true, true) => register()
      case (_,   false) => logger.info(s"Actor[$name]: $member doesn't belong to datacenter [${datacenter.name}]")
      case (false,   _) => logger.info(s"Actor[$name]: $member doesn't have [${StorageNodeActor.role}] role (it has roles ${member.roles}")
    }

    def register() = for {
      ringNodeId    <- ring.nodesId
      nodeName       = StorageNodeActor.name(ringNodeId, Datacenter(member.dataCenter))
      nodeRef        = context.actorSelection(RootActorPath(member.address) / "user" / nodeName)
    } yield nodeRef ! RegisterNode(nodeId)
  }

  private[this] def notHandledPF: Receive = {
    case t => logger.warn(s"Actor[$name]: Not handled message [$t]")
  }
}

object StorageNodeActor {
  def role: String = "storagenode"
  def name(nodeId: NodeId, datacenter: Datacenter): String = s"${datacenter.name}-id-${nodeId.id}"
  def props(nodeId: NodeId, datacenter: Datacenter, storage: PluggableStorageProtocol, ring: Ring, n: N): Props = {
    Props(new StorageNodeActor(nodeId, datacenter, storage, ring, n))
  }
}

case class StorageNodeActorRef(ref: ActorRef) extends AnyVal 
Example 22
Source File: MajorityLeaderAutoDownBase.scala    From akka-cluster-custom-downing   with Apache License 2.0 5 votes vote down vote up
package tanukki.akka.cluster.autodown

import akka.actor.Address
import akka.cluster.MemberStatus.Down
import akka.cluster.{MemberStatus, Member}

import scala.concurrent.duration.FiniteDuration

abstract class MajorityLeaderAutoDownBase(majorityMemberRole: Option[String], downIfInMinority: Boolean, autoDownUnreachableAfter: FiniteDuration)
    extends MajorityAwareCustomAutoDownBase(autoDownUnreachableAfter) {

  override def onLeaderChanged(leader: Option[Address]): Unit = {
    if (majorityMemberRole.isEmpty && isLeader) downPendingUnreachableMembers()
  }

  override def onRoleLeaderChanged(role: String, leader: Option[Address]): Unit = {
    majorityMemberRole.foreach { r =>
      if (r == role && isRoleLeaderOf(r)) downPendingUnreachableMembers()
    }
  }

  override def onMemberRemoved(member: Member, previousStatus: MemberStatus): Unit = {
    if (isMajority(majorityMemberRole)) {
      if (isLeaderOf(majorityMemberRole)) {
        downPendingUnreachableMembers()
      }
    } else {
      down(selfAddress)
    }
    super.onMemberRemoved(member, previousStatus)
  }

  override def downOrAddPending(member: Member): Unit = {
    if (isLeaderOf(majorityMemberRole)) {
      down(member.address)
      replaceMember(member.copy(Down))
    } else {
      pendingAsUnreachable(member)
    }
  }

  override def downOrAddPendingAll(members: Set[Member]): Unit = {
    if (isMajorityAfterDown(members, majorityMemberRole)) {
      members.foreach(downOrAddPending)
    } else if (downIfInMinority) {
      shutdownSelf()
    }
  }
} 
Example 23
Source File: LeaderAutoDownRolesBase.scala    From akka-cluster-custom-downing   with Apache License 2.0 5 votes vote down vote up
package tanukki.akka.cluster.autodown

import akka.actor.Address
import akka.cluster.Member

import scala.concurrent.duration.FiniteDuration


abstract class LeaderAutoDownRolesBase(targetRoles: Set[String], autoDownUnreachableAfter: FiniteDuration)
  extends LeaderAwareCustomAutoDownBase(autoDownUnreachableAfter){


  override def onLeaderChanged(leader: Option[Address]): Unit = {
    if (isLeader) downPendingUnreachableMembers()
  }

  override def downOrAddPending(member: Member): Unit = {
    if (targetRoles.exists(role => member.hasRole(role))) {
      if (isLeader) {
        down(member.address)
      } else {
        pendingAsUnreachable(member)
      }
    }
  }

  override def downOrAddPendingAll(members: Set[Member]): Unit = {
    members.foreach(downOrAddPending)
  }
} 
Example 24
Source File: QuorumLeaderAutoDownBase.scala    From akka-cluster-custom-downing   with Apache License 2.0 5 votes vote down vote up
package tanukki.akka.cluster.autodown

import akka.actor.Address
import akka.cluster.{MemberStatus, Member}
import akka.cluster.MemberStatus.Down

import scala.concurrent.duration.FiniteDuration

abstract class QuorumLeaderAutoDownBase(quorumRole: Option[String], quorumSize: Int, downIfOutOfQuorum: Boolean, autoDownUnreachableAfter: FiniteDuration)
  extends QuorumAwareCustomAutoDownBase(quorumSize, autoDownUnreachableAfter) {

  override def onLeaderChanged(leader: Option[Address]): Unit = {
    if (quorumRole.isEmpty && isLeader) downPendingUnreachableMembers()
  }

  override def onRoleLeaderChanged(role: String, leader: Option[Address]): Unit = {
    quorumRole.foreach { r =>
      if (r == role && isRoleLeaderOf(r)) downPendingUnreachableMembers()
    }
  }


  override def onMemberRemoved(member: Member, previousStatus: MemberStatus): Unit = {
    if (isQuorumMet(quorumRole)) {
      if (isLeaderOf(quorumRole)) {
        downPendingUnreachableMembers()
      }
    } else {
      down(selfAddress)
    }
    super.onMemberRemoved(member, previousStatus)
  }

  override def downOrAddPending(member: Member): Unit = {
    if (isLeaderOf(quorumRole)) {
      down(member.address)
      replaceMember(member.copy(Down))
    } else {
      pendingAsUnreachable(member)
    }
  }

  override def downOrAddPendingAll(members: Set[Member]): Unit = {
    if (isQuorumMetAfterDown(members, quorumRole)) {
      members.foreach(downOrAddPending)
    } else if (downIfOutOfQuorum) {
      shutdownSelf()
    }
  }
} 
Example 25
Source File: OldestAutoDownBase.scala    From akka-cluster-custom-downing   with Apache License 2.0 5 votes vote down vote up
package tanukki.akka.cluster.autodown

import akka.cluster.MemberStatus.Down
import akka.cluster.{MemberStatus, Member}

import scala.concurrent.duration.FiniteDuration

abstract class OldestAutoDownBase(oldestMemberRole: Option[String], downIfAlone: Boolean, autoDownUnreachableAfter: FiniteDuration)
  extends OldestAwareCustomAutoDownBase(autoDownUnreachableAfter){

  override def onMemberRemoved(member: Member, previousStatus: MemberStatus): Unit = {
    if (isOldestOf(oldestMemberRole))
      downPendingUnreachableMembers()
  }

  override def downOrAddPending(member: Member): Unit = {
    if (isOldestOf(oldestMemberRole)) {
      down(member.address)
      replaceMember(member.copy(Down))
    } else {
      pendingAsUnreachable(member)
    }
  }

  def downOnSecondary(member: Member): Unit = {
    if (isSecondaryOldest(oldestMemberRole)) {
      down(member.address)
      replaceMember(member.copy(Down))
    }
  }

  override def downOrAddPendingAll(members: Set[Member]): Unit = {
    val oldest = oldestMember(oldestMemberRole)
    if (downIfAlone && isOldestAlone(oldestMemberRole)) {
      if (isOldestOf(oldestMemberRole)) {
        shutdownSelf()
      } else if (isSecondaryOldest(oldestMemberRole)) {
        members.foreach(downOnSecondary)
      } else {
        members.foreach(downOrAddPending)
      }
    } else {
      if (oldest.fold(true)(o => members.contains(o))) {
        shutdownSelf()
      } else {
        members.foreach(downOrAddPending)
      }
    }
  }

  def downAloneOldest(member: Member): Unit = {
    val oldest = oldestMember(oldestMemberRole)
    if (isOldestOf(oldestMemberRole)) {
      shutdownSelf()
    } else if (isSecondaryOldest(oldestMemberRole) && oldest.contains(member)) {
      oldest.foreach { m =>
        down(m.address)
        replaceMember(m.copy(Down))
      }
    } else {
      pendingAsUnreachable(member)
    }
  }
}