akka.cluster.pubsub.DistributedPubSub Scala Examples

The following examples show how to use akka.cluster.pubsub.DistributedPubSub. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: Server.scala    From scalachain   with MIT License 5 votes vote down vote up
package com.elleflorio.scalachain

import akka.actor.{ActorRef, ActorSystem}
import akka.cluster.pubsub.DistributedPubSub
import akka.http.scaladsl.Http
import akka.http.scaladsl.server.Route
import akka.http.scaladsl.server.Directives._
import akka.stream.ActorMaterializer
import com.elleflorio.scalachain.actor.Node
import com.elleflorio.scalachain.api.NodeRoutes
import com.elleflorio.scalachain.cluster.ClusterManager
import com.typesafe.config.{Config, ConfigFactory}

import scala.concurrent.Await
import scala.concurrent.duration.Duration

object Server extends App with NodeRoutes {

  implicit val system: ActorSystem = ActorSystem("scalachain")
  implicit val materializer: ActorMaterializer = ActorMaterializer()

  val config: Config = ConfigFactory.load()
  val address = config.getString("http.ip")
  val port = config.getInt("http.port")
  val nodeId = config.getString("scalachain.node.id")

  lazy val routes: Route = statusRoutes ~ transactionRoutes ~ mineRoutes

  val clusterManager: ActorRef = system.actorOf(ClusterManager.props(nodeId), "clusterManager")
  val mediator: ActorRef = DistributedPubSub(system).mediator
  val node: ActorRef = system.actorOf(Node.props(nodeId, mediator), "node")

  Http().bindAndHandle(routes, address, port)
  println(s"Server online at http://$address:$port/")

  Await.result(system.whenTerminated, Duration.Inf)

} 
Example 2
Source File: ClusterManager.scala    From scalachain   with MIT License 5 votes vote down vote up
package com.elleflorio.scalachain.cluster

import akka.actor.{Actor, ActorLogging, ActorRef, Props}
import akka.cluster.pubsub.DistributedPubSub
import akka.cluster.{Cluster, MemberStatus}
import com.elleflorio.scalachain.cluster.ClusterManager.GetMembers

object ClusterManager {

  sealed trait ClusterMessage
  case object GetMembers extends ClusterMessage

  def props(nodeId: String) = Props(new ClusterManager(nodeId))
}

class ClusterManager(nodeId: String) extends Actor with ActorLogging {

  val cluster: Cluster = Cluster(context.system)
  val listener: ActorRef = context.actorOf(ClusterListener.props(nodeId, cluster), "clusterListener")

  override def receive: Receive = {
    case GetMembers => {
      sender() ! cluster.state.members.filter(_.status == MemberStatus.up)
        .map(_.address.toString)
        .toList
    }
  }
} 
Example 3
Source File: BrokerTest.scala    From scalachain   with MIT License 5 votes vote down vote up
package com.elleflorio.scalachain.actor

import akka.actor.{ActorRef, ActorSystem}
import akka.cluster.pubsub.DistributedPubSub
import akka.testkit.{ImplicitSender, TestKit}
import com.elleflorio.scalachain.actor.Broker.{AddTransaction, Clear, GetTransactions}
import com.elleflorio.scalachain.blockchain.Transaction
import org.scalatest.{BeforeAndAfterAll, FlatSpecLike, Matchers}

import scala.concurrent.duration._

class BrokerTest(sys: ActorSystem) extends TestKit(sys)
  with ImplicitSender
  with Matchers
  with FlatSpecLike
  with BeforeAndAfterAll {

  def this() = this(ActorSystem("broker-test"))
  val mediator: ActorRef = DistributedPubSub(this.system).mediator

  override def afterAll: Unit = {
    shutdown(system)
  }

  "A Broker Actor" should "start with an empty list of transactions" in {
    val broker = system.actorOf(Broker.props)

    broker ! GetTransactions
    expectMsg(500 millis, List())
  }

  "A Broker Actor" should "return the correct list of added transactions" in {
    val broker = system.actorOf(Broker.props)
    val transaction1 = Transaction("A", "B", 100)
    val transaction2 = Transaction("C", "D", 1000)

    broker ! AddTransaction(transaction1)
    broker ! AddTransaction(transaction2)

    broker ! GetTransactions
    expectMsg(500 millis, List(transaction2, transaction1))
  }

  "A Broker Actor" should "clear the transaction lists when requested" in {
    val broker = system.actorOf(Broker.props)
    val transaction1 = Transaction("A", "B", 100)
    val transaction2 = Transaction("C", "D", 1000)

    broker ! AddTransaction(transaction1)
    broker ! AddTransaction(transaction2)

    broker ! Clear

    broker ! GetTransactions
    expectMsg(500 millis, List())
  }

} 
Example 4
Source File: NotificationSubscriber.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter7

import akka.actor.Actor
import akka.cluster.Cluster
import akka.cluster.pubsub.DistributedPubSub
import akka.cluster.pubsub.DistributedPubSubMediator.{Subscribe, SubscribeAck}

case class Notification(title: String, body: String)

class NotificationSubscriber extends Actor {
  val mediator = DistributedPubSub(context.system).mediator
  // subscribe to the topic named "notifications"
  mediator ! Subscribe("notification", self)

  val cluster = Cluster(context.system)
  val clusterAddress = cluster.selfUniqueAddress

  def receive = {
    case notification: Notification ⇒
      println(s"Got notification in node $clusterAddress => $notification")
    case SubscribeAck(Subscribe("notification", None, `self`)) ⇒
      println("subscribing");
  }
} 
Example 5
Source File: PubSubRegistryImpl.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.internal.javadsl.pubsub

import akka.actor.ActorSystem
import akka.cluster.pubsub.DistributedPubSub
import com.typesafe.config.Config
import javax.inject.Inject
import javax.inject.Singleton

import com.lightbend.lagom.javadsl.pubsub.PubSubRef
import com.lightbend.lagom.javadsl.pubsub.PubSubRegistry
import com.lightbend.lagom.javadsl.pubsub.TopicId

@Singleton
private[lagom] class PubSubRegistryImpl(system: ActorSystem, conf: Config) extends PubSubRegistry {
  @Inject
  def this(system: ActorSystem) =
    this(system, system.settings.config.getConfig("lagom.pubsub"))

  private val pubsub          = DistributedPubSub(system)
  private val bufferSize: Int = conf.getInt("subscriber-buffer-size")

  override def refFor[T](topic: TopicId[T]): PubSubRef[T] =
    new PubSubRef(topic, pubsub.mediator, system, bufferSize)
} 
Example 6
Source File: NodeActor.scala    From ForestFlow   with Apache License 2.0 5 votes vote down vote up
package ai.forestflow.serving.cluster

import java.io.File

import akka.actor.{Actor, ActorLogging, ActorRef, Props, Timers}
import akka.cluster.Cluster
import akka.cluster.pubsub.DistributedPubSub
import akka.cluster.pubsub.DistributedPubSubMediator.Subscribe
import ai.forestflow.domain.CleanupLocalStorage
import org.apache.commons.io.FileUtils
import com.typesafe.scalalogging.LazyLogging
import ai.forestflow.utils.ThrowableImplicits._

import scala.util.{Failure, Success, Try}

/***
 * This actor is responsible for node-level (host-level) stuff that should be done on a per-node basis.
 * A good example of this is file system cleanup tasks.
 */
object NodeActor extends LazyLogging {
  
  def props(): Props =
    Props(new NodeActor)
      .withDispatcher("blocking-io-dispatcher")

  def cleanupLocalStorage(path: String): Unit = {
    val localDir = new File(path)
    val localDirExists = localDir.exists()
    logger.info(s"Cleaning up local storage: Local Directory: $localDir , exists? $localDirExists")
    if (localDirExists)
      Try(FileUtils.deleteDirectory(localDir)) match {
        case Success(_) => logger.info(s"Local Directory $localDir cleaned up successfully")
        case Failure(ex) => logger.error(s"Local Directory $localDir cleanup failed! Reason: ${ex.printableStackTrace}")
      }
  }
}

class NodeActor extends Actor
  with ActorLogging
  with Timers {

  
  implicit val cluster: Cluster = Cluster(context.system)
  val mediator: ActorRef = DistributedPubSub(context.system).mediator

  mediator ! Subscribe(classOf[CleanupLocalStorage].getSimpleName, self)

  override def receive: Receive = {
    case CleanupLocalStorage(path) =>
      NodeActor.cleanupLocalStorage(path)
  }
} 
Example 7
Source File: Reader.scala    From Raphtory   with Apache License 2.0 5 votes vote down vote up
package com.raphtory.core.components.PartitionManager

import akka.actor.Actor
import akka.actor.ActorLogging
import akka.actor.ActorRef
import akka.actor.Props
import akka.actor.Terminated
import akka.cluster.pubsub.DistributedPubSubMediator.SubscribeAck
import akka.cluster.pubsub.DistributedPubSub
import akka.cluster.pubsub.DistributedPubSubMediator
import com.raphtory.core.analysis.API.Analyser
import com.raphtory.core.components.PartitionManager.Workers.ReaderWorker
import com.raphtory.core.model.communication._
import com.raphtory.core.storage.EntityStorage
import com.raphtory.core.utils.Utils
import com.twitter.util.Eval

import scala.collection.parallel.mutable.ParTrieMap
import scala.util.Try

class Reader(
    id: Int,
    test: Boolean,
    managerCountVal: Int,
    storage: ParTrieMap[Int, EntityStorage],
    workerCount: Int = 10
) extends Actor
        with ActorLogging {

  implicit var managerCount: Int = managerCountVal

  // Id which refers to the partitions position in the graph manager map
  val managerId: Int = id

  val mediator: ActorRef = DistributedPubSub(context.system).mediator

  mediator ! DistributedPubSubMediator.Put(self)
  mediator ! DistributedPubSubMediator.Subscribe(Utils.readersTopic, self)

  var readers: ParTrieMap[Int, ActorRef] = new ParTrieMap[Int, ActorRef]()

  for (i <- 0 until workerCount) {
    log.debug("Initialising [{}] worker children for Reader [{}}.", workerCount, managerId)

    // create threads for writing
    val child = context.system.actorOf(
            Props(new ReaderWorker(managerCount, managerId, i, storage(i))).withDispatcher("reader-dispatcher"),
            s"Manager_${id}_reader_$i"
    )

    context.watch(child)
    readers.put(i, child)
  }

  override def preStart(): Unit =
    log.debug("Reader [{}] is being started.", managerId)

  override def receive: Receive = {
    case ReaderWorkersOnline()     => sender ! ReaderWorkersACK()
    case req: AnalyserPresentCheck => processAnalyserPresentCheckRequest(req)
    case req: UpdatedCounter       => processUpdatedCounterRequest(req)
    case SubscribeAck              =>
    case Terminated(child) =>
      log.warning(s"ReaderWorker with path [{}] belonging to Reader [{}] has died.", child.path, managerId)
    case x => log.warning(s"Reader [{}] received unknown [{}] message.", managerId, x)
  }

  def processAnalyserPresentCheckRequest(req: AnalyserPresentCheck): Unit = {
    log.debug(s"Reader [{}] received [{}] request.", managerId, req)

    val className   = req.className
    val classExists = Try(Class.forName(className))

    classExists.toEither.fold(
            { _: Throwable =>
              log.debug("Class [{}] was not found within this image.", className)

              sender ! ClassMissing()
            }, { _: Class[_] =>
              log.debug(s"Class [{}] exists. Proceeding.", className)

              sender ! AnalyserPresent()
            }
    )
  }





  def processUpdatedCounterRequest(req: UpdatedCounter): Unit = {
    log.debug("Reader [{}] received [{}] request.", managerId, req)

    managerCount = req.newValue
    readers.foreach(x => x._2 ! UpdatedCounter(req.newValue))
  }
} 
Example 8
Source File: WatermarkManager.scala    From Raphtory   with Apache License 2.0 5 votes vote down vote up
package com.raphtory.core.components.ClusterManagement

import akka.actor.{Actor, ActorLogging, ActorRef}
import akka.cluster.pubsub.{DistributedPubSub, DistributedPubSubMediator}
import com.raphtory.core.model.communication.{UpdateArrivalTime, WatermarkTime}
import kamon.Kamon

import scala.collection.mutable
import scala.collection.parallel.mutable.ParTrieMap

case class queueItem(wallclock:Long,timestamp:Long)extends Ordered[queueItem] {
  def compare(that: queueItem): Int = (that.timestamp-this.timestamp).toInt
}

class WatermarkManager(managerCount: Int) extends Actor with ActorLogging  {

  val spoutWallClock = Kamon.histogram("Raphtory_Wall_Clock").withTag("Actor","Watchdog")
  val safeTime       = Kamon.gauge("Raphtory_Safe_Time").withTag("actor",s"WatermarkManager")

  val watermarkqueue = mutable.PriorityQueue[queueItem]()
  private val safeMessageMap = ParTrieMap[String, Long]()
  var counter = 0;
  val mediator: ActorRef = DistributedPubSub(context.system).mediator
  mediator ! DistributedPubSubMediator.Put(self)
  override def receive: Receive = {
    case u:UpdateArrivalTime => processUpdateArrivalTime(u)
    case u:WatermarkTime => processWatermarkTime(u)
  }

  def processUpdateArrivalTime(u: UpdateArrivalTime):Unit = watermarkqueue += queueItem(u.wallClock,u.time)

  def processWatermarkTime(u:WatermarkTime):Unit = {
    val currentTime = System.currentTimeMillis()
    safeMessageMap put(sender().toString(),u.time)
    counter +=1
    if(counter%(10*managerCount)==0) {
      val watermark = safeMessageMap.map(x=>x._2).min
      safeTime.update(watermark)
      while((watermarkqueue nonEmpty) && (watermarkqueue.head.timestamp<= watermark)) {
        spoutWallClock.record(currentTime-watermarkqueue.dequeue().wallclock)
      }
    }
  }
} 
Example 9
Source File: ResultProcessor.scala    From akka-iot-mqtt-v2   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package akkaiot

import akka.actor.{ Props, ActorRef, Actor, ActorLogging, ActorPath }
import akka.cluster.pubsub.DistributedPubSub
import akka.cluster.pubsub.DistributedPubSubMediator

object ResultProcessor {
  def props(iotPath: ActorPath): Props = Props(new ResultProcessor(iotPath))
}

class ResultProcessor(iotPath: ActorPath) extends Actor with ActorLogging {

  val mediator = DistributedPubSub(context.system).mediator
  mediator ! DistributedPubSubMediator.Subscribe(Master.ResultsTopic, self)

  def receive = {
    case _: DistributedPubSubMediator.SubscribeAck =>
    case result: WorkResult =>
      log.info("Result Processor -> Got work result: {}-{} | State {} | Setting {}", result.deviceType, result.deviceId, result.nextState, result.nextSetting)

      context.actorSelection(iotPath) ! result
      log.info("Result Processor -> Sent work result for {}-{} to IoT Manager", result.deviceType, result.deviceId)
  }
} 
Example 10
Source File: ClusterListenerTestActor.scala    From NSDb   with Apache License 2.0 5 votes vote down vote up
package io.radicalbit.nsdb.cluster.actor

import akka.actor.{Actor, ActorLogging, Deploy}
import akka.cluster.Cluster
import akka.cluster.ClusterEvent.{InitialStateAsEvents, MemberEvent, MemberUp, UnreachableMember}
import akka.cluster.pubsub.DistributedPubSub
import akka.cluster.pubsub.DistributedPubSubMediator.Subscribe
import akka.remote.RemoteScope
import io.radicalbit.nsdb.cluster.PubSubTopics.NODE_GUARDIANS_TOPIC
import io.radicalbit.nsdb.cluster.actor.ClusterListener.{GetNodeMetrics, NodeMetricsGot}
import io.radicalbit.nsdb.cluster.createNodeName

class ClusterListenerTestActor extends Actor with ActorLogging {

  private lazy val cluster             = Cluster(context.system)
  private lazy val mediator = DistributedPubSub(context.system).mediator

  override def preStart(): Unit = {
    cluster.subscribe(self, initialStateMode = InitialStateAsEvents, classOf[MemberEvent], classOf[UnreachableMember])
  }

  override def receive: Receive = {
    case MemberUp(member) if member == cluster.selfMember =>

      val nodeName = createNodeName(member)

      val nodeActorsGuardian =
        context.system.actorOf(NodeActorsGuardian.props(self).withDeploy(Deploy(scope = RemoteScope(member.address))),
          name = s"guardian_$nodeName")

      mediator ! Subscribe(NODE_GUARDIANS_TOPIC, nodeActorsGuardian)
    case GetNodeMetrics =>
    sender() ! NodeMetricsGot(Set.empty)
  }

}