akka.actor.typed.scaladsl.Behaviors Scala Examples
The following examples show how to use akka.actor.typed.scaladsl.Behaviors.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: ClusterStatusTrackerMain.scala From Pi-Akka-Cluster with Apache License 2.0 | 5 votes |
package akkapi.cluster import akka.NotUsed import akka.actor.typed.scaladsl.adapter.TypedActorSystemOps import akka.actor.typed.scaladsl.{ActorContext, Behaviors, Routers} import akka.actor.typed.{ActorSystem, Behavior, Terminated} import akka.cluster.typed.{ClusterSingleton, SingletonActor} import akka.management.scaladsl.AkkaManagement import akkapi.cluster.sudoku.{SudokuSolverSettings, SudokuSolver, SudokuProblemSender} object Main { def apply(settings: Settings): Behavior[NotUsed] = Behaviors.setup { context => val sudokuSolverSettings = SudokuSolverSettings("sudokusolver.conf") // Start CLusterStatusTracker & LedStripVisualiser val ledStripDriver = context.spawn(LedStripDriver(settings), "led-strip-driver") val ledStripController = context.spawn(LedStripVisualiser(settings, ledStripDriver), "led-strip-controller") val clusterStatusTracker = context.spawn( ClusterStatusTracker( settings, Some(contextToClusterSingleton(settings)) ), "cluster-status-tracker" ) clusterStatusTracker ! ClusterStatusTracker.SubscribeVisualiser(ledStripController) // Start SodukuSolver: we'll run one instance/cluster node context.spawn(SudokuSolver(ledStripDriver, sudokuSolverSettings), s"sudoku-solver") // We'll use a [cluster-aware] group router val sudokuSolverGroup = context.spawn(Routers.group(SudokuSolver.Key).withRoundRobinRouting(), "sudoku-solvers") // And run one instance if the Sudoku problem sender in the cluster ClusterSingleton(context.system).init(SingletonActor(SudokuProblemSender(sudokuSolverGroup, sudokuSolverSettings), "sudoku-problem-sender")) Behaviors.receiveSignal { case (_, Terminated(_)) => Behaviors.stopped } } private def contextToClusterSingleton(settings: Settings): ActorContextToSingletonBehavior = (context: ActorContext[ClusterStatusTracker.ClusterEvent]) => PiClusterSingleton(settings, context.self) type ActorContextToSingletonBehavior = ActorContext[ClusterStatusTracker.ClusterEvent] => Behavior[PiClusterSingleton.Command] } object ClusterStatusTrackerMain { def main(args: Array[String]): Unit = { System.loadLibrary("rpi_ws281x") val settings = Settings() val config = settings.config val system = ActorSystem[NotUsed](Main(settings), settings.actorSystemName, config) val classicSystem = system.toClassic // Start Akka HTTP Management extension AkkaManagement(classicSystem).start() } }
Example 2
Source File: ForkJoinCreation.scala From effpi with MIT License | 5 votes |
// Effpi - verified message-passing programs in Dotty // Copyright 2019 Alceste Scalas and Elias Benussi // Released under the MIT License: https://opensource.org/licenses/MIT package effpi.benchmarks.akka import akka.NotUsed import akka.actor.typed.scaladsl.{ Behaviors, MutableBehavior, ActorContext} import akka.actor.typed.{ ActorRef, ActorSystem, Behavior, DispatcherSelector, Terminated } import scala.concurrent.Future import scala.concurrent.duration._ import scala.concurrent.{ Future, Promise, Await } import scala.concurrent.ExecutionContext.Implicits.global object ForkJoinCreation { case class Message(msg: String) val simpleActor = Behaviors.receive[Message] { (ctx, msg) => Behaviors.stopped } def mainActor( durationPromise: Promise[Long], numActors: Int ): Behavior[akka.NotUsed] = Behaviors.setup { ctx => val startTime = System.nanoTime() val simpleActorRefs = (1 to numActors).toList.map { id => ctx.spawn(simpleActor, "simple" + id) } simpleActorRefs.foreach { simpleActorRef => simpleActorRef ! Message("Hello World!") } val endTime = System.nanoTime() durationPromise.success(endTime - startTime) Behaviors.stopped } def bench(params: Int): Long = { val durationPromise = Promise[Long]() val durationFuture = durationPromise.future val system = ActorSystem( mainActor(durationPromise, params), "ForkJoinCreationDemo") Await.result(system.whenTerminated, Duration.Inf) val duration = Await.result(durationFuture, Duration.Inf) duration } }
Example 3
Source File: ClusterListener.scala From akka-sample-cluster-docker-compose-scala with Apache License 2.0 | 5 votes |
package com.example import akka.actor.typed.Behavior import akka.actor.typed.scaladsl.Behaviors import akka.cluster.ClusterEvent import akka.cluster.ClusterEvent._ import akka.cluster.typed.Cluster import akka.cluster.typed.Subscribe object ClusterListener { def apply(): Behavior[ClusterEvent.ClusterDomainEvent] = Behaviors.setup { ctx => ctx.log.debug("starting up cluster listener...") Cluster(ctx.system).subscriptions ! Subscribe(ctx.self, classOf[ClusterEvent.ClusterDomainEvent]) Behaviors.receiveMessagePartial { case MemberUp(member) => ctx.log.debug("Member is Up: {}", member.address) Behaviors.same case UnreachableMember(member) => ctx.log.debug("Member detected as unreachable: {}", member) Behaviors.same case MemberRemoved(member, previousStatus) => ctx.log.debug("Member is Removed: {} after {}", member.address, previousStatus) Behaviors.same case LeaderChanged(member) => ctx.log.info("Leader changed: " + member) Behaviors.same case any: MemberEvent => ctx.log.info("Member Event: " + any.toString) Behaviors.same } } }
Example 4
Source File: ClusterStatusTrackerMain.scala From Pi-Akka-Cluster with Apache License 2.0 | 5 votes |
package akkapi.cluster import akka.NotUsed import akka.actor.typed.scaladsl.Behaviors import akka.actor.typed.{ActorSystem, Behavior, Terminated} import akka.management.scaladsl.AkkaManagement import akka.actor.typed.scaladsl.adapter.TypedActorSystemOps object Main { def apply(settings: Settings): Behavior[NotUsed] = Behaviors.setup { context => val ledStripDriver = context.spawn(LedStripDriver(settings), "led-strip-driver") val ledStripController = context.spawn(LedStripVisualiser(settings, ledStripDriver), "led-strip-controller") val clusterStatusTracker = context.spawn(ClusterStatusTracker(settings, None), "cluster-status-tracker") clusterStatusTracker ! ClusterStatusTracker.SubscribeVisualiser(ledStripController) Behaviors.receiveSignal { case (_, Terminated(_)) => Behaviors.stopped } } } object ClusterStatusTrackerMain { def main(args: Array[String]): Unit = { System.loadLibrary("rpi_ws281x") val settings = Settings() val config = settings.config val system = ActorSystem[NotUsed](Main(settings), settings.actorSystemName, config) // Start Akka HTTP Management extension AkkaManagement(system.toClassic).start() } }
Example 5
Source File: ShardStateTracker.scala From Pi-Akka-Cluster with Apache License 2.0 | 5 votes |
package com.lightbend.akka_oled import akka.actor.typed.{ActorRef, Behavior} import akka.actor.typed.scaladsl.Behaviors import akka.cluster.sharding.ShardRegion.CurrentShardRegionState import akka.cluster.sharding.typed.{ClusterShardingQuery, GetShardRegionState} import akka.util.Timeout import com.lightbend.akka_oled.OledShardingVisualizer.ShardRegionState import scala.concurrent.duration._ object ShardStateTracker { implicit val timeout: Timeout = 6.seconds def apply(visualizer: ActorRef[OledShardingVisualizer.Command]): Behavior[CurrentShardRegionState] = Behaviors.setup { context => Behaviors.receiveMessage { message: CurrentShardRegionState => visualizer.tell(ShardRegionState(message.shards)) Behaviors.same } } } object ShardStateScheduler { implicit val timeout: Timeout = 6.seconds case class Tick() def apply(shardState: ActorRef[ClusterShardingQuery], shardTracker: ActorRef[CurrentShardRegionState]): Behavior[Tick] = Behaviors.withTimers { timer => timer.startTimerAtFixedRate(Tick(), 1.second) Behaviors.receiveMessage { _: Tick => shardState ! GetShardRegionState(ClientEntity.TypeKey, shardTracker) Behaviors.same } } }
Example 6
Source File: ClientEntity.scala From Pi-Akka-Cluster with Apache License 2.0 | 5 votes |
package com.lightbend.akka_oled import akka.actor.typed.scaladsl.Behaviors import akka.actor.typed.{ActorRef, Behavior} import akka.cluster.sharding.typed.scaladsl.EntityTypeKey import akka.persistence.typed.PersistenceId import akka.persistence.typed.scaladsl.{Effect, EventSourcedBehavior} import com.lightbend.akka_oled.OledShardingVisualizer.Notification object ClientEntity { sealed trait Command case class PostPoints(name: String, amount: Int)(val replyTo: ActorRef[String]) extends Command case class Get(name: String)(val replyTo: ActorRef[Int]) extends Command final case class PointsAdded(name: String, points: Int) val TypeKey: EntityTypeKey[Command] = EntityTypeKey[Command]("ClientEntity") final case class ClientPoints(name: String, points: Int, visualizer: ActorRef[Notification]) { def add(delta: Int) = copy(points = points + delta) } private val commandHandler: (ClientPoints, Command) => Effect[PointsAdded, ClientPoints] = { (state, cmd) => cmd match { case pp@PostPoints(name, amount) => Effect.persist(PointsAdded(name, amount)).thenRun(s => { state.visualizer ! Notification(s.name, s.points) pp.replyTo ! "Ok\n" }) case g@Get(_) => g.replyTo ! state.points state.visualizer ! Notification(state.name, state.points) Effect.none } } private val eventHandler: (ClientPoints, PointsAdded) => ClientPoints = { (state, evt) => state.add(evt.points) } def apply(entityId: String, persistenceId: PersistenceId, visualizer: ActorRef[Notification]): Behavior[ClientEntity.Command] = Behaviors.setup { _ => EventSourcedBehavior(persistenceId, ClientPoints(entityId, 0, visualizer), commandHandler, eventHandler) } }
Example 7
Source File: OledShardingVisualizer.scala From Pi-Akka-Cluster with Apache License 2.0 | 5 votes |
package com.lightbend.akka_oled import akka.actor.typed.scaladsl.Behaviors import akka.actor.typed.{ActorRef, Behavior} import akka.cluster.sharding.ShardRegion.ShardState import akkapi.cluster.OledDriver import akkapi.cluster.OledDriver.UpdateView import com.lightbend.akka_oled.OledShardingVisualizer.{Notification, ShardRegionState} object OledShardingVisualizer { sealed trait Command case class ShardRegionState(shards: Set[ShardState]) extends Command case class Notification(name: String, total: Int) extends Command def apply(screenIndex: Int, oledDriver: ActorRef[OledDriver.Command]): Behavior[OledShardingVisualizer.Command] = Behaviors.setup { context => Behaviors.withTimers[Command] { timer => new OledShardingVisualizer(screenIndex, oledDriver).running( clients = Map.empty[String, Int], shardToClientName = Map.empty[String, Set[String]] ) } } } class OledShardingVisualizer private(screenIndex: Int, oledDriver: ActorRef[OledDriver.Command]) { def running(clients: Map[String, Int], shardToClientName: Map[String, Set[String]] ): Behavior[OledShardingVisualizer.Command] = Behaviors .receiveMessage[OledShardingVisualizer.Command] { case Notification(name, total) => val newClients = clients + (name -> total) oledDriver ! UpdateView(screenIndex, renderState(newClients, shardToClientName)) running(newClients, shardToClientName) case ShardRegionState(shards: Set[ShardState]) => val entityIds: Set[String] = shards.flatMap(_.entityIds) val newShardToClientName = shards.foldLeft(Map.empty[String, Set[String]]) { case (map, value) => map + (value.shardId.toString -> value.entityIds.map(_.toString)) } val withNewClients: Map[String, Int] = entityIds.foldLeft(clients)((map, a) => if (clients.get(a).isEmpty) map + (a -> 0) else map) //remove old shards val updatedClients = withNewClients.filter { case (k, _) => entityIds.contains(k) } oledDriver ! UpdateView(screenIndex, renderState(updatedClients, newShardToClientName)) running(updatedClients, newShardToClientName) } private def renderState(clients: Map[String, Int], shardToClientName: Map[String, Set[String]]): String = { if (clients.nonEmpty) shardToClientName.flatMap[String] { case (key, names) => names.map { name => "Shard#" + key + "->" + name + ": " + clients.getOrElse(name, 0) } }.mkString("\n") else "No data" } }
Example 8
Source File: Main.scala From Pi-Akka-Cluster with Apache License 2.0 | 5 votes |
package com.lightbend.akka_oled import akka.NotUsed import akka.actor.typed.scaladsl.Behaviors import akka.actor.typed.scaladsl.adapter.TypedActorSystemOps import akka.actor.typed.{ActorSystem, Behavior, Terminated} import akka.cluster.sharding.typed.scaladsl.{ClusterSharding, Entity} import akka.http.scaladsl.Http import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport import akka.management.scaladsl.AkkaManagement import akka.persistence.typed.PersistenceId import akka.stream.Materializer import akkapi.cluster.{ClusterStatusTracker, OledClusterVisualizer, OledDriver, Settings} import spray.json._ import scala.concurrent.ExecutionContextExecutor object Main extends SprayJsonSupport with DefaultJsonProtocol { case class AddPoints(points: Int) implicit val transactionFormat = jsonFormat1(AddPoints) def apply(settings: Settings): Behavior[NotUsed] = Behaviors.setup { ctx => implicit val system = ctx.system implicit val untypedSystem: akka.actor.ActorSystem = ctx.system.toClassic implicit val ec: ExecutionContextExecutor = ctx.system.executionContext val oledDriver = ctx.spawn(OledDriver(settings), "oled-driver") oledDriver ! OledDriver.RegisterView("Cluster State", 0) oledDriver ! OledDriver.RegisterView("Sharding State", 1) val clusterView = ctx.spawn(OledClusterVisualizer(0, settings, oledDriver), "oled-cluster-view") val clusterStatusTracker = ctx.spawn(ClusterStatusTracker(settings, None), "cluster-status-tracker") clusterStatusTracker ! ClusterStatusTracker.SubscribeVisualiser(clusterView) val shardVisualizer = ctx.spawn(OledShardingVisualizer(1, oledDriver), "oled-sharding-view") val sharding = ClusterSharding(ctx.system) sharding.init(Entity(typeKey = ClientEntity.TypeKey) { entityContext => ClientEntity(entityContext.entityId, PersistenceId(entityContext.entityTypeKey.name, entityContext.entityId), shardVisualizer) }) val tracker = ctx.spawn(ShardStateTracker(shardVisualizer), "oled-sharding-tracker") ctx.spawn(ShardStateScheduler(sharding.shardState, tracker), "oled-sharding-scheduler") val routes = new Routes(sharding) //materializer Materializer.createMaterializer(ctx.system.toClassic) implicit val mat: Materializer = Materializer.createMaterializer(ctx.system.toClassic) Http()(ctx.system.toClassic).bindAndHandle(routes.route, settings.config.getString("cluster-node-configuration.external-ip"), 8080) Behaviors.receiveSignal { case (_, Terminated(_)) => Behaviors.stopped } } } object DisplayClusterShardingMain { def main(args: Array[String]): Unit = { val settings = Settings() val system = ActorSystem[NotUsed](Main(settings), "akka-oled", settings.config) // Start Akka HTTP Management extension AkkaManagement(system).start() } }
Example 9
Source File: DisplayClusterStatusMain.scala From Pi-Akka-Cluster with Apache License 2.0 | 5 votes |
package com.lightbend.akka_oled import akka.NotUsed import akka.actor.typed.scaladsl.Behaviors import akka.actor.typed.{ActorSystem, Behavior, Terminated} import akka.management.scaladsl.AkkaManagement import akkapi.cluster.{ClusterStatusTracker, OledClusterVisualizer, OledDriver, Settings} object Main { def apply(settings: Settings): Behavior[NotUsed] = Behaviors.setup { context => val oledDriver = context.spawn(OledDriver(settings), "oled-driver") val clusterView = context.spawn(OledClusterVisualizer(0, settings, oledDriver), "oled-cluster-view") oledDriver ! OledDriver.RegisterView("Cluster State", 0) val clusterStatusTracker = context.spawn(ClusterStatusTracker(settings, None), "cluster-status-tracker") clusterStatusTracker ! ClusterStatusTracker.SubscribeVisualiser(clusterView) Behaviors.receiveSignal { case (_, Terminated(_)) => Behaviors.stopped } } } object DisplayClusterStatusMain { def main(args: Array[String]): Unit = { val settings = Settings() val config = settings.config val system = ActorSystem[NotUsed](Main(settings), "akka-oled", config) // Start Akka HTTP Management extension AkkaManagement(system).start() } }
Example 10
Source File: PingPong.scala From effpi with MIT License | 5 votes |
// Effpi - verified message-passing programs in Dotty // Copyright 2019 Alceste Scalas and Elias Benussi // Released under the MIT License: https://opensource.org/licenses/MIT package effpi.benchmarks.akka import akka.NotUsed import akka.actor.typed.scaladsl.{ Behaviors, MutableBehavior, ActorContext} import akka.actor.typed.{ ActorRef, ActorSystem, Behavior, DispatcherSelector, Terminated } import scala.concurrent.Future import scala.concurrent.duration._ import scala.concurrent.{ Future, Promise, Await } import scala.concurrent.ExecutionContext.Implicits.global object PingPong { final case class Ping(iterations: Int, replyTo: ActorRef[Pong]) case class Pong(iterations: Int, pingTo: ActorRef[Ping]) val pong = Behaviors.receive[Ping] { (ctx, msg) => msg.replyTo ! Pong(msg.iterations - 1, ctx.self) Behaviors.same } def ping(startTimePromise: Promise[Long], endTimePromise: Promise[Long], expectedIterations: Int) = Behaviors.receive[Pong] { (ctx, pong) => if (pong.iterations == 0) { endTimePromise.success(System.nanoTime()) Behaviors.stopped } else { if (expectedIterations == pong.iterations) { startTimePromise.success(System.nanoTime()) } pong.pingTo ! Ping(pong.iterations, ctx.self) Behaviors.same } } def mainActor( durationPromise: Promise[Long], numPairs: Int, numIterations: Int ): Behavior[akka.NotUsed] = Behaviors.setup { ctx => val (startTimePromises, startTimeFutures): (List[Promise[Long]], List[Future[Long]]) = (1 to numPairs).toList.map { _ => val startTimePromise = Promise[Long]() val startTimeFuture = startTimePromise.future (startTimePromise, startTimeFuture) }.unzip val (endTimePromises, endTimeFutures): (List[Promise[Long]], List[Future[Long]]) = (1 to numPairs).toList.map { _ => val endTimePromise = Promise[Long]() val endTimeFuture = endTimePromise.future (endTimePromise, endTimeFuture) }.unzip // val refs = (1 to numPairs).toList.map { id => val refs = startTimePromises.zip(endTimePromises).zipWithIndex.map { (promises, id) => val (sPromise, ePromise) = promises val pongRef = ctx.spawn(pong, "pong" + id) val pingRef = ctx.spawn(ping(sPromise, ePromise, numIterations), "ping" + id) ctx.watch(pingRef) (pingRef, pongRef) } refs.foreach { (pingRef, pongRef) => pingRef ! Pong(numIterations, pongRef) } val startTimes = Await.result(Future.sequence(startTimeFutures), Duration.Inf) val startTime = startTimes.min val endTimes = Await.result(Future.sequence(endTimeFutures), Duration.Inf) val endTime = endTimes.max durationPromise.success(endTime - startTime) val pingPongDuration = endTime - startTime var terminatedProcesses = 0 Behaviors.receiveSignal { case (_, Terminated(ref)) => terminatedProcesses = terminatedProcesses + 1 if (terminatedProcesses == numPairs) { Behaviors.stopped } else { Behaviors.same } Behaviors.stopped case (_, _) => Behaviors.empty } } def bench(params: (Int, Int)): Long = { val (numPairs, numIterations) = params val durationPromise = Promise[Long]() val durationFuture = durationPromise.future val system = ActorSystem( mainActor(durationPromise, numPairs, numIterations), "PingPongDemo") Await.result(system.whenTerminated, Duration.Inf) val duration = Await.result(durationFuture, Duration.Inf) duration } }
Example 11
Source File: SudokuProblemSender.scala From Pi-Akka-Cluster with Apache License 2.0 | 5 votes |
package akkapi.cluster.sudoku import java.io.File import akka.actor.typed.scaladsl.{ActorContext, Behaviors, TimerScheduler} import akka.actor.typed.{ActorRef, Behavior} object SudokuProblemSender { sealed trait Command case object SendNewSudoku extends Command // Wrapped responses private final case class SolutionWrapper(result: SudokuSolver.Response) extends Command private val rowUpdates: Seq[SudokuDetailProcessor.RowUpdate] = SudokuIO.readSudokuFromFile(new File("sudokus/001.sudoku")) .map { case (rowIndex, update) => SudokuDetailProcessor.RowUpdate(rowIndex, update) } def apply(sudokuSolver: ActorRef[SudokuSolver.Command], sudokuSolverSettings: SudokuSolverSettings): Behavior[Command] = Behaviors.setup { context => Behaviors.withTimers { timers => new SudokuProblemSender(sudokuSolver, context, timers, sudokuSolverSettings).sending() } } } class SudokuProblemSender private (sudokuSolver: ActorRef[SudokuSolver.Command], context: ActorContext[SudokuProblemSender.Command], timers: TimerScheduler[SudokuProblemSender.Command], sudokuSolverSettings: SudokuSolverSettings) { import SudokuProblemSender._ private val solutionWrapper: ActorRef[SudokuSolver.Response] = context.messageAdapter(response => SolutionWrapper(response)) private val initialSudokuField = rowUpdates.toSudokuField private val rowUpdatesSeq = LazyList.continually( Seq( initialSudokuField, initialSudokuField.flipVertically, initialSudokuField.flipHorizontally, initialSudokuField.flipHorizontally.flipVertically, initialSudokuField.flipVertically.flipHorizontally, initialSudokuField.columnSwap(0,1), initialSudokuField.rowSwap(4,5).rowSwap(0, 2), initialSudokuField.randomSwapAround, initialSudokuField.randomSwapAround, initialSudokuField.rotateCW, initialSudokuField.rotateCCW, initialSudokuField.rotateCW.rotateCW, initialSudokuField.transpose, initialSudokuField.randomSwapAround, initialSudokuField.rotateCW.transpose, initialSudokuField.randomSwapAround, initialSudokuField.rotateCCW.transpose, initialSudokuField.randomSwapAround, initialSudokuField.randomSwapAround, initialSudokuField.flipVertically.transpose, initialSudokuField.flipVertically.rotateCW, initialSudokuField.columnSwap(4,5).columnSwap(0, 2).rowSwap(3,4), initialSudokuField.rotateCW.rotateCW.transpose ).map(_.toRowUpdates)).flatten.iterator private val problemSendInterval = sudokuSolverSettings.ProblemSender.SendInterval timers.startTimerAtFixedRate(SendNewSudoku, problemSendInterval) // on a 5 node RPi 4 based cluster in steady state, this can be lowered to about 6ms def sending(): Behavior[Command] = Behaviors.receiveMessagePartial { case SendNewSudoku => context.log.debug("sending new sudoku problem") sudokuSolver ! SudokuSolver.InitialRowUpdates(rowUpdatesSeq.next, solutionWrapper) Behaviors.same case SolutionWrapper(solution: SudokuSolver.SudokuSolution) => context.log.info(s"${SudokuIO.sudokuPrinter(solution)}") Behaviors.same } }
Example 12
Source File: SudokuProgressTracker.scala From Pi-Akka-Cluster with Apache License 2.0 | 5 votes |
package akkapi.cluster.sudoku import akka.actor.typed.scaladsl.{ActorContext, Behaviors} import akka.actor.typed.{ActorRef, Behavior} object SudokuProgressTracker { sealed trait Command final case class NewUpdatesInFlight(count: Int) extends Command final case class SudokuDetailState(index: Int, state: ReductionSet) extends Command // My responses sealed trait Response final case class Result(sudoku: Sudoku) extends Response def apply(rowDetailProcessors: Map[Int, ActorRef[SudokuDetailProcessor.Command]], sudokuSolver: ActorRef[Response]): Behavior[Command] = Behaviors.setup { context => new SudokuProgressTracker(rowDetailProcessors, context, sudokuSolver).trackProgress(updatesInFlight = 0) } } class SudokuProgressTracker private (rowDetailProcessors: Map[Int, ActorRef[SudokuDetailProcessor.Command]], context: ActorContext[SudokuProgressTracker.Command], sudokuSolver: ActorRef[SudokuProgressTracker.Response]) { import SudokuProgressTracker._ def trackProgress(updatesInFlight: Int): Behavior[Command] = Behaviors.receiveMessagePartial { case NewUpdatesInFlight(updateCount) if updatesInFlight - 1 == 0 => rowDetailProcessors.foreach { case (_, processor) => processor ! SudokuDetailProcessor.GetSudokuDetailState(context.self) } collectEndState() case NewUpdatesInFlight(updateCount) => trackProgress(updatesInFlight + updateCount) } def collectEndState(remainingRows: Int = 9, endState: Vector[SudokuDetailState] = Vector.empty[SudokuDetailState]): Behavior[Command] = Behaviors.receiveMessagePartial { case detail @ SudokuDetailState(index, state) if remainingRows == 1 => sudokuSolver ! Result((detail +: endState).sortBy { case SudokuDetailState(idx, _) => idx }.map { case SudokuDetailState(_, state) => state}) trackProgress(updatesInFlight = 0) case detail @ SudokuDetailState(index, state) => collectEndState(remainingRows = remainingRows - 1, detail +: endState) } }
Example 13
Source File: PiClusterSingleton.scala From Pi-Akka-Cluster with Apache License 2.0 | 5 votes |
package akkapi.cluster import akka.actor.typed.scaladsl.{ActorContext, Behaviors} import akka.actor.typed.{ActorRef, Behavior, PostStop} import akkapi.cluster.{ClusterStatusTracker, Settings} object PiClusterSingleton { sealed trait Command final case object Ping extends Command def apply(settings: Settings, clusterStatusTracker: ActorRef[ClusterStatusTracker.ClusterEvent]): Behavior[Command] = { Behaviors.setup { context => new PiClusterSingleton(context, settings, clusterStatusTracker).run() } } } class PiClusterSingleton private (context: ActorContext[PiClusterSingleton.Command], settings: Settings, clusterStatusTracker: ActorRef[ClusterStatusTracker.ClusterEvent]) { import PiClusterSingleton._ // Cluster singleton has been started on this node clusterStatusTracker ! ClusterStatusTracker.PiClusterSingletonOnNode def run(): Behavior[Command] = Behaviors.receiveMessage[Command] { case Ping => context.log.info(s"PiClusterSingleton was pinged") Behaviors.same }.receiveSignal { case (_, signal) if signal == PostStop => clusterStatusTracker ! ClusterStatusTracker.PiClusterSingletonNotOnNode Behaviors.same } }
Example 14
Source File: OledClusterVisualizer.scala From Pi-Akka-Cluster with Apache License 2.0 | 5 votes |
package akkapi.cluster import akka.actor.typed.scaladsl.Behaviors import akka.actor.typed.{ActorRef, Behavior} import akkapi.cluster.ClusterStatusTracker.{IsLeader, IsNoLeader, NodeDown, NodeExiting, NodeJoining, NodeLeaving, NodeRemoved, NodeState, NodeUnreachable, NodeUp, NodeWeaklyUp, PiClusterSingletonNotRunning, PiClusterSingletonRunning} object OledClusterVisualizer { def apply(screenNumber: Int, settings: Settings, oledDriver: ActorRef[OledDriver.Command]): Behavior[NodeState] = Behaviors.setup { context => new OledClusterVisualizer(screenNumber, settings, oledDriver).running( nodes = Map.empty[Int, String], leader = None ) } } class OledClusterVisualizer private(screenNumber: Int, settings: Settings, oledDriver: ActorRef[OledDriver.Command]) { private val thisHost = settings.config.getString("akka.remote.artery.canonical.hostname") private def updateState(nodeId: Int, status: String)(implicit nodes: Map[Int, String]): Map[Int, String] = { nodes + (nodeId -> status) } def running(implicit nodes: Map[Int, String], leader: Option[Int] ): Behavior[NodeState] = Behaviors .receiveMessage[NodeState] { case NodeUp(nodeLedId) => setClusterViewState(updateState(nodeLedId, "Up")) case NodeJoining(nodeLedId) => setClusterViewState(updateState(nodeLedId, "Joining")) case NodeLeaving(nodeLedId) => setClusterViewState(updateState(nodeLedId, "Left")) case NodeExiting(nodeLedId) => setClusterViewState(updateState(nodeLedId, "Exited")) case NodeRemoved(nodeLedId) => setClusterViewState(updateState(nodeLedId, "Removed")) case NodeDown(nodeLedId) => setClusterViewState(updateState(nodeLedId, "Down")) case NodeUnreachable(nodeLedId) => setClusterViewState(updateState(nodeLedId, "Unreachable")) case NodeWeaklyUp(nodeLedId) => setClusterViewState(updateState(nodeLedId, "Weakly Up")) case IsLeader => setLeader(Some(settings.HostToLedMapping(thisHost))) case IsNoLeader(address) => setLeader(address) case PiClusterSingletonRunning => Behaviors.same case PiClusterSingletonNotRunning => Behaviors.same } private def render(nodes: Map[Int, String], leader: Option[Int]): String = { val stringBuilder = new StringBuilder //TODO (0 to 2).foreach(i => stringBuilder ++= "Node " + i + ": " + nodes.getOrElse(i, "N/A") + "\n") stringBuilder ++= "Leader: " + leader.getOrElse("N/A") stringBuilder.toString() } private def setClusterViewState(nodes: Map[Int, String]) (implicit leader: Option[Int]): Behavior[NodeState] = { oledDriver ! OledDriver.UpdateView(screenNumber, render(nodes, leader)) running(nodes, leader) } private def setLeader(leader: Option[Int]) (implicit nodes: Map[Int, String]): Behavior[NodeState] = { oledDriver ! OledDriver.UpdateView(screenNumber, render(nodes, leader)) running(nodes, leader) } }
Example 15
Source File: LedPulser.scala From Pi-Akka-Cluster with Apache License 2.0 | 5 votes |
package akkapi.cluster import akka.actor.typed.scaladsl.{ActorContext, Behaviors, TimerScheduler} import akka.actor.typed.{ActorRef, Behavior} import org.neopixel.Neopixel import scala.concurrent.duration.FiniteDuration object LedPulser { sealed trait Command final case class PulseLed(ledNumber: Int, color: Long, flashDuration: FiniteDuration, overRunColor: Option[Long]) extends Command private final case class StopPulse(ledNumber: Int) extends Command def apply(settings: Settings, ledStripDriver: ActorRef[LedStripDriver.Command]): Behavior[Command] = Behaviors.setup { context => Behaviors.withTimers { timers => new LedPulser(settings, context, timers, ledStripDriver).run(Neopixel.Black) } } } class LedPulser(settings: Settings, context: ActorContext[LedPulser.Command], timers: TimerScheduler[LedPulser.Command], ledStripDriver: ActorRef[LedStripDriver.Command]) { import LedPulser._ def run(currentColor: Long): Behavior[Command] = Behaviors.receiveMessagePartial { case PulseLed(ledNumber, color, flashDuration, overRunColor) if color != currentColor => timers.startTimerWithFixedDelay(StopPulse(ledNumber), flashDuration) ledStripDriver ! LedStripDriver.SetLedState(ledNumber, color, None) run(color) case PulseLed(ledNumber, color, flashDuration, overRunColor) => // If the new color is the same as the current color, it implies that // the timer is still running. Obviously, no need to update the color // on the LED. Running `startTimerWithFixedDelay` will cancel the current // timer and start a "fresh" one timers.startTimerWithFixedDelay(StopPulse(ledNumber), flashDuration) run(color) case StopPulse(ledNumber) => ledStripDriver ! LedStripDriver.SetLedState(ledNumber, Neopixel.Black, None) run(Neopixel.Black) } }
Example 16
Source File: ClusterStatusTrackerMain.scala From Pi-Akka-Cluster with Apache License 2.0 | 5 votes |
package akkapi.cluster import akka.NotUsed import akka.actor.typed.scaladsl.adapter.TypedActorSystemOps import akka.actor.typed.scaladsl.{ActorContext, Behaviors} import akka.actor.typed.{ActorSystem, Behavior, Terminated} import akka.management.scaladsl.AkkaManagement object Main { def apply(settings: Settings): Behavior[NotUsed] = Behaviors.setup { context => val ledStripDriver = context.spawn(LedStripDriver(settings), "led-strip-driver") val ledStripController = context.spawn(LedStripVisualiser(settings, ledStripDriver), "led-strip-controller") val clusterStatusTracker = context.spawn( ClusterStatusTracker( settings, Some(contextToClusterSingleton(settings)) ), "cluster-status-tracker" ) clusterStatusTracker ! ClusterStatusTracker.SubscribeVisualiser(ledStripController) Behaviors.receiveSignal { case (_, Terminated(_)) => Behaviors.stopped } } private def contextToClusterSingleton(settings: Settings): ActorContextToSingletonBehavior = (context: ActorContext[ClusterStatusTracker.ClusterEvent]) => PiClusterSingleton(settings, context.self) type ActorContextToSingletonBehavior = ActorContext[ClusterStatusTracker.ClusterEvent] => Behavior[PiClusterSingleton.Command] } object ClusterStatusTrackerMain { def main(args: Array[String]): Unit = { System.loadLibrary("rpi_ws281x") val settings = Settings() val config = settings.config val system = ActorSystem[NotUsed](Main(settings), settings.actorSystemName, config) // Start Akka HTTP Management extension AkkaManagement(system.toClassic).start() } }
Example 17
Source File: PiClusterSingleton.scala From Pi-Akka-Cluster with Apache License 2.0 | 5 votes |
package akkapi.cluster import akka.actor.typed.scaladsl.{ActorContext, Behaviors} import akka.actor.typed.{ActorRef, Behavior, PostStop} object PiClusterSingleton { sealed trait Command final case object Ping extends Command def apply(settings: Settings, clusterStatusTracker: ActorRef[ClusterStatusTracker.ClusterEvent]): Behavior[Command] = { Behaviors.setup { context => new PiClusterSingleton(context, settings, clusterStatusTracker).run() } } } class PiClusterSingleton private (context: ActorContext[PiClusterSingleton.Command], settings: Settings, clusterStatusTracker: ActorRef[ClusterStatusTracker.ClusterEvent]) { import PiClusterSingleton._ // Cluster singleton has been started on this node clusterStatusTracker ! ClusterStatusTracker.PiClusterSingletonOnNode def run(): Behavior[Command] = Behaviors.receiveMessage[Command] { case Ping => context.log.info(s"PiClusterSingleton was pinged") Behaviors.same }.receiveSignal { case (_, signal) if signal == PostStop => clusterStatusTracker ! ClusterStatusTracker.PiClusterSingletonNotOnNode Behaviors.same } }
Example 18
Source File: ClusterStatusTrackerMain.scala From Pi-Akka-Cluster with Apache License 2.0 | 5 votes |
package akkapi.cluster import akka.NotUsed import akka.actor.typed.scaladsl.adapter.TypedActorSystemOps import akka.actor.typed.scaladsl.{ActorContext, Behaviors} import akka.actor.typed.{ActorSystem, Behavior, Terminated} import akka.management.cluster.bootstrap.ClusterBootstrap import akka.management.scaladsl.AkkaManagement object Main { def apply(settings: Settings): Behavior[NotUsed] = Behaviors.setup { context => val ledStripDriver = context.spawn(LedStripDriver(settings), "led-strip-driver") val ledStripController = context.spawn(LedStripVisualiser(settings, ledStripDriver), "led-strip-controller") val clusterStatusTracker = context.spawn( ClusterStatusTracker( settings, Some(contextToClusterSingleton(settings)) ), "cluster-status-tracker" ) clusterStatusTracker ! ClusterStatusTracker.SubscribeVisualiser(ledStripController) Behaviors.receiveSignal { case (_, Terminated(_)) => Behaviors.stopped } } private def contextToClusterSingleton(settings: Settings): ActorContextToSingletonBehavior = (context: ActorContext[ClusterStatusTracker.ClusterEvent]) => PiClusterSingleton(settings, context.self) type ActorContextToSingletonBehavior = ActorContext[ClusterStatusTracker.ClusterEvent] => Behavior[PiClusterSingleton.Command] } object ClusterStatusTrackerMain { def main(args: Array[String]): Unit = { System.loadLibrary("rpi_ws281x") val settings = Settings() val config = settings.config val system = ActorSystem[NotUsed](Main(settings), settings.actorSystemName, config) // Start Akka HTTP Management extension AkkaManagement(system.toClassic).start() ClusterBootstrap(system.toClassic).start() } }
Example 19
Source File: MetricsReporter.scala From kafka-lag-exporter with Apache License 2.0 | 5 votes |
package com.lightbend.kafkalagexporter import akka.actor.typed.Behavior import akka.actor.typed.scaladsl.Behaviors import com.lightbend.kafkalagexporter.MetricsSink._ object MetricsReporter { def init( metricsSink: MetricsSink): Behavior[Message] = Behaviors.setup { _ => reporter(metricsSink) } def reporter(metricsSink: MetricsSink): Behavior[Message] = Behaviors.receive { case (_, m: MetricValue) => metricsSink.report(m) Behaviors.same case (_, rm: RemoveMetric) => metricsSink.remove(rm) Behaviors.same case (context, Stop(sender)) => Behaviors.stopped { () => metricsSink.stop() context.log.info("Gracefully stopped Prometheus metrics endpoint HTTP server") sender ! KafkaClusterManager.Done } case (context, m) => context.log.error(s"Unhandled metric message: $m") Behaviors.same } }
Example 20
Source File: ModelServerManagerBehavior.scala From model-serving-tutorial with Apache License 2.0 | 5 votes |
package com.lightbend.modelserving.akka import akka.actor.typed.scaladsl.{AbstractBehavior, ActorContext, Behaviors} import akka.actor.typed.{ActorRef, Behavior} import com.lightbend.modelserving.model.ModelToServeStats class ModelServerManagerBehavior(context: ActorContext[ModelServerManagerActor]) extends AbstractBehavior[ModelServerManagerActor] { println("Creating Model Serving Manager") private def getModelServer(dataType: String): ActorRef[ModelServerActor] = { context.child(dataType) match { case Some(actorRef) => actorRef.asInstanceOf[ActorRef[ModelServerActor]] case _ => context.spawn(Behaviors.setup[ModelServerActor]( context => new ModelServerBehavior(context, dataType)), dataType) } } private def getInstances : GetModelsResult = GetModelsResult(context.children.map(_.path.name).toSeq) override def onMessage(msg: ModelServerManagerActor): Behavior[ModelServerManagerActor] = { msg match { case updateModel : UpdateModel => getModelServer(updateModel.model.dataType) tell updateModel case scoreData : ScoreData => getModelServer(scoreData.record.getType) tell scoreData case getState : GetState => // Used for state queries context.child(getState.dataType) match{ case Some(server) => server.asInstanceOf[ActorRef[ModelServerActor]] tell getState case _ => getState.reply ! ModelToServeStats() } case getModels : GetModels => // Get list of models getModels.reply ! getInstances } this } }
Example 21
Source File: TFServingModelServer.scala From model-serving-tutorial with Apache License 2.0 | 5 votes |
package com.lightbend.modelserving.tensorflowserving import akka.actor.typed.scaladsl.Behaviors import akka.actor.typed.scaladsl.adapter._ import akka.actor.typed.{ActorRef, ActorSystem} import akka.http.scaladsl.Http import akka.kafka.scaladsl.Consumer import akka.kafka.{ConsumerSettings, Subscriptions} import akka.stream.scaladsl.Sink import akka.stream.typed.scaladsl.{ActorFlow, ActorMaterializer} import akka.util.Timeout import com.lightbend.modelserving.configuration.ModelServingConfiguration import com.lightbend.modelserving.model.ServingResult import com.lightbend.modelserving.winemodel.DataRecord import org.apache.kafka.clients.consumer.ConsumerConfig import org.apache.kafka.common.serialization.ByteArrayDeserializer import scala.concurrent.duration._ import scala.util.Success object TFServingModelServer { import ModelServingConfiguration._ // Initialization implicit val modelServer = ActorSystem( Behaviors.setup[TFModelServerActor]( context => new TFModelServerBehaviour(context)), "ModelServing") implicit val materializer = ActorMaterializer() implicit val executionContext = modelServer.executionContext implicit val askTimeout = Timeout(30.seconds) // Configuration properties for the Kafka topic. val dataSettings = ConsumerSettings(modelServer.toUntyped, new ByteArrayDeserializer, new ByteArrayDeserializer) .withBootstrapServers(KAFKA_BROKER) .withGroupId(DATA_GROUP) .withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") def main(args: Array[String]): Unit = { println(s"Akka application that uses TensorFlow Serving, brokers $KAFKA_BROKER") // Data stream processing Consumer.atMostOnceSource(dataSettings, Subscriptions.topics(DATA_TOPIC)) .map(record => DataRecord.wineFromByteArray(record.value)).collect { case Success(a) => a } .via(ActorFlow.ask(1)(modelServer)((elem, replyTo : ActorRef[Option[ServingResult[Double]]]) => new ServeData(replyTo, elem))) .collect{ case Some(result) => result} .runWith(Sink.foreach(result => println(s"Model served in ${System.currentTimeMillis() - result.submissionTs} ms, with result ${result.result} " + s"(model ${result.name}, data type ${result.dataType})"))) // Rest Server startRest(modelServer) } def startRest(modelServerManager: ActorSystem[TFModelServerActor]): Unit = { implicit val timeout = Timeout(10.seconds) implicit val system = modelServerManager.toUntyped val host = "0.0.0.0" val port = MODELSERVING_PORT val routes = TFQueriesAkkaHttpResource.storeRoutes(modelServerManager)(modelServerManager.scheduler) val _ = Http().bindAndHandle(routes, host, port) map { binding => println(s"Starting models observer on port ${binding.localAddress}") } recover { case ex => println(s"Models observer could not bind to $host:$port - ${ex.getMessage}") } } }
Example 22
Source File: WsHeart.scala From AckCord with MIT License | 5 votes |
package ackcord.voice import scala.concurrent.duration._ import akka.actor.typed.scaladsl.{ActorContext, Behaviors, TimerScheduler} import akka.actor.typed.{ActorRef, Behavior} object WsHeart { def apply(parent: ActorRef[VoiceWsHandler.Command]): Behavior[Command] = Behaviors.setup { ctx => Behaviors.withTimers(timers => runningHeart(ctx, timers, parent, None, receivedAck = true)) } def runningHeart( context: ActorContext[Command], timers: TimerScheduler[Command], parent: ActorRef[VoiceWsHandler.Command], previousNonce: Option[Int], receivedAck: Boolean ): Behavior[Command] = Behaviors.receiveMessage { case StartBeating(interval, nonce) => context.log.debug(s"Starting to beat with initial nonce $nonce") timers.startTimerAtFixedRate("heartbeatTimerKey", Beat, interval.millis) runningHeart(context, timers, parent, Some(nonce), receivedAck = true) case StopBeating => timers.cancel("heartbeatTimerKey") runningHeart(context, timers, parent, None, receivedAck = true) case BeatAck(nonce) => val log = context.log log.debug(s"Received HeartbeatACK with nonce $nonce") if (previousNonce.contains(nonce)) runningHeart(context, timers, parent, None, receivedAck = true) else { log.warn("Did not receive correct nonce in HeartbeatACK. Restarting.") parent ! VoiceWsHandler.Restart(fresh = false, 500.millis) Behaviors.same } case Beat => val log = context.log if (receivedAck) { val nonce = System.currentTimeMillis().toInt parent ! VoiceWsHandler.SendHeartbeat(nonce) log.debug(s"Sent Heartbeat with nonce $nonce") runningHeart(context, timers, parent, previousNonce = Some(nonce), receivedAck = false) } else { log.warn("Did not receive HeartbeatACK between heartbeats. Restarting.") parent ! VoiceWsHandler.Restart(fresh = false, 0.millis) Behaviors.same } } sealed trait Command case class StartBeating(interval: Double, nonce: Int) extends Command case object StopBeating extends Command case class BeatAck(nonce: Int) extends Command case object Beat extends Command }
Example 23
Source File: WordShuffler.scala From streamee with Apache License 2.0 | 5 votes |
package io.moia.streamee.demo import akka.actor.typed.{ ActorRef, Behavior } import akka.actor.typed.scaladsl.Behaviors import akka.stream.Materializer import io.moia.streamee.{ IntoableProcessor, Process, ProcessSinkRef, Step } import org.slf4j.LoggerFactory import scala.annotation.tailrec import scala.util.Random object WordShuffler { final case class ShuffleWord(word: String) final case class WordShuffled(word: String) def apply(): Process[ShuffleWord, WordShuffled] = Process[ShuffleWord, WordShuffled] .via(shuffleWordToString) .via(shuffle) .via(stringToWordShuffled) def shuffleWordToString[Ctx]: Step[ShuffleWord, String, Ctx] = Step[ShuffleWord, Ctx].map(_.word) def shuffle[Ctx]: Step[String, String, Ctx] = Step[String, Ctx].map(shuffleWord) def stringToWordShuffled[Ctx]: Step[String, WordShuffled, Ctx] = Step[String, Ctx].map(WordShuffled) private def shuffleWord(word: String) = { @tailrec def loop(word: String, acc: String = ""): String = if (word.isEmpty) acc else { val (left, right) = word.splitAt(Random.nextInt(word.length)) val c = right.head val nextWord = left + right.tail loop(nextWord, c +: acc) } if (word.length <= 3) word else word.head +: loop(word.tail.init) :+ word.last } } object WordShufflerRunner { import WordShuffler._ sealed trait Command final case class GetProcessSinkRef(replyTo: ActorRef[ProcessSinkRef[ShuffleWord, WordShuffled]]) extends Command final case object Shutdown extends Command private final case object Stop extends Command private val logger = LoggerFactory.getLogger(getClass) def apply()(implicit mat: Materializer): Behavior[Command] = Behaviors.setup { context => import context.executionContext val self = context.self val wordShufflerProcessor = IntoableProcessor(WordShuffler(), "word-shuffler") wordShufflerProcessor.whenDone.onComplete { reason => if (logger.isWarnEnabled) logger.warn(s"Process completed: $reason") self ! Stop } Behaviors.receiveMessagePartial { case GetProcessSinkRef(replyTo) => replyTo ! wordShufflerProcessor.sinkRef() Behaviors.same case Shutdown => wordShufflerProcessor.shutdown() Behaviors.receiveMessagePartial { case Stop => Behaviors.stopped } } } }
Example 24
Source File: Main.scala From streamee with Apache License 2.0 | 5 votes |
package io.moia.streamee.demo import akka.actor.{ ActorSystem => ClassicSystem } import akka.actor.CoordinatedShutdown.Reason import akka.actor.typed.{ Behavior, Scheduler } import akka.actor.typed.scaladsl.{ ActorContext, Behaviors } import akka.actor.typed.scaladsl.adapter.{ ClassicActorSystemOps, TypedActorSystemOps } import akka.cluster.typed.{ Cluster, ClusterSingleton, SelfUp, SingletonActor, Subscribe, Unsubscribe } import akka.management.cluster.bootstrap.ClusterBootstrap import akka.management.scaladsl.AkkaManagement import io.moia.streamee.FrontProcessor import org.apache.logging.log4j.core.async.AsyncLoggerContextSelector import org.slf4j.LoggerFactory import pureconfig.generic.auto.exportReader import pureconfig.ConfigSource import scala.concurrent.ExecutionContext import scala.concurrent.duration.FiniteDuration object Main { final case class Config( api: Api.Config, textShufflerProcessorTimeout: FiniteDuration, textShuffler: TextShuffler.Config ) final object TopLevelActorTerminated extends Reason private val logger = LoggerFactory.getLogger(getClass) def main(args: Array[String]): Unit = { // Always use async logging! sys.props += "log4j2.contextSelector" -> classOf[AsyncLoggerContextSelector].getName // Must happen before creating the actor system! val config = ConfigSource.default.at("streamee-demo").loadOrThrow[Config] // Always start with a classic system! val system = ClassicSystem("streamee-demo") system.spawn(Main(config), "main") // Cluster bootstrap AkkaManagement(system).start() ClusterBootstrap(system).start() } def apply(config: Config): Behavior[SelfUp] = Behaviors.setup { context => if (logger.isInfoEnabled) logger.info(s"${context.system.name} started and ready to join cluster") Cluster(context.system).subscriptions ! Subscribe(context.self, classOf[SelfUp]) Behaviors.receive { (context, _) => if (logger.isInfoEnabled) logger.info(s"${context.system.name} joined cluster and is up") Cluster(context.system).subscriptions ! Unsubscribe(context.self) initialize(config)(context) Behaviors.empty } } private def initialize(config: Config)(implicit context: ActorContext[_]) = { import config._ implicit val classicSystem: ClassicSystem = context.system.toClassic implicit val ec: ExecutionContext = context.executionContext implicit val scheduler: Scheduler = context.system.scheduler val wordShufflerRunner = ClusterSingleton(context.system).init( SingletonActor(WordShufflerRunner(), "word-shuffler") .withStopMessage(WordShufflerRunner.Shutdown) ) val textShufflerProcessor = FrontProcessor( TextShuffler(config.textShuffler, wordShufflerRunner), textShufflerProcessorTimeout, "text-shuffler" ) Api(config.api, textShufflerProcessor) } }
Example 25
Source File: JobActor.scala From fusion-data with Apache License 2.0 | 5 votes |
package mass.job.service.job import akka.actor.typed.scaladsl.{ ActorContext, Behaviors } import akka.actor.typed.{ ActorRef, ActorSystem, Behavior } import akka.cluster.typed.{ ClusterSingleton, ClusterSingletonSettings, SingletonActor } import fusion.inject.guice.GuiceApplication import fusion.json.CborSerializable import helloscala.common.IntStatus import mass.core.Constants import mass.job.JobScheduler import mass.job.service.job.JobActor.CommandReply import mass.message.job._ import scala.concurrent.Future object JobActor { sealed trait Command extends CborSerializable final case class CommandReply(message: JobMessage, replyTo: ActorRef[JobResponse]) extends Command final case class CommandEvent(event: JobEvent) extends Command val NAME = "job" def init(system: ActorSystem[_]): ActorRef[Command] = { ClusterSingleton(system).init( SingletonActor(apply(), NAME).withSettings(ClusterSingletonSettings(system).withRole(Constants.Roles.CONSOLE))) } private def apply(): Behavior[Command] = Behaviors.setup[Command](context => new JobActor(context).init()) } import mass.job.service.job.JobActor._ class JobActor private (context: ActorContext[Command]) extends JobServiceComponent { import context.executionContext override val jobScheduler: JobScheduler = GuiceApplication(context.system).instance[JobScheduler] def init(): Behavior[Command] = { receive() } def receive(): Behavior[Command] = Behaviors.receiveMessage[Command] { case CommandReply(message, replyTo) => receiveMessage(message).foreach(resp => replyTo ! resp) Behaviors.same case CommandEvent(event) => receiveEvent(event) Behaviors.same } private def receiveMessage(message: JobMessage): Future[JobResponse] = try { val future = message match { case req: JobScheduleReq => handleScheduleJob(req) case req: JobPageReq => handlePage(req) case req: JobFindReq => handleFind(req) case req: JobUploadJobReq => handleUploadJob(req) case req: JobListReq => handleList(req) case req: JobGetAllOptionReq => Future(handleGetAllOption(req)) case req: JobCreateReq => handleCreateJob(req) case req: JobUpdateReq => handleUpdate(req) case req: JobUploadFilesReq => handleUploadFiles(req) } future.recover { case e => val message = s"Handle message error: ${e.getMessage}." logger.error(message, e) JobErrorResponse(IntStatus.INTERNAL_ERROR, message) } } catch { case e: Throwable => val message = s"Process message error: ${e.getMessage}." logger.error(message) Future.successful(JobErrorResponse(IntStatus.INTERNAL_ERROR, message)) } private def receiveEvent(v: JobEvent): Unit = try { v match { case event: JobTriggerEvent => triggerJob(event) } } catch { case e: Throwable => logger.error(s"Process event error: ${e.getMessage}", e) } }
Example 26
Source File: LagSim.scala From kafka-lag-exporter with Apache License 2.0 | 5 votes |
package com.lightbend.kafkalagexporter.integration import akka.actor.Cancellable import akka.actor.typed.scaladsl.Behaviors import akka.actor.typed.{Behavior, PostStop} import akka.kafka.{CommitterSettings, Subscriptions} import akka.kafka.scaladsl.{Committer, Consumer} import akka.kafka.testkit.scaladsl.KafkaSpec import akka.stream.OverflowStrategy import akka.stream.scaladsl.Keep import akka.stream.testkit.scaladsl.TestSink import org.scalatest.concurrent.ScalaFutures import scala.concurrent.Await import scala.concurrent.duration._ trait LagSim extends KafkaSpec with ScalaFutures { private implicit val patience: PatienceConfig = PatienceConfig(30.seconds, 1.second) class LagSimulator(topic: String, group: String) { private var offset: Int = 0 private val committerSettings = CommitterSettings(system).withMaxBatch(1).withParallelism(1) private lazy val (consumerControl, consumerProbe) = Consumer .committableSource(consumerDefaults.withGroupId(group), Subscriptions.topics(topic)) .buffer(size = 1, OverflowStrategy.backpressure) .map { elem => log.debug("Committing elem with offset: {}", elem.committableOffset.partitionOffset) elem.committableOffset.commitScaladsl() } .toMat(TestSink.probe)(Keep.both) .run() def produceElements(num: Int): Unit = { Await.result(produce(topic, offset to (offset + num)), remainingOrDefault) offset += num + 1 } // TODO: Replace this with regular Kafka Consumer for more fine-grained control over committing def consumeElements(num: Int): Unit = { consumerProbe .request(num) .expectNextN(num) } def shutdown(): Unit = { consumerControl.shutdown().futureValue consumerProbe.cancel() } } sealed trait Simulator case class Tick(produce: Int, consume: Int) extends Simulator def lagSimActor(simulator: LagSimulator, scheduledTick: Cancellable = Cancellable.alreadyCancelled): Behavior[Simulator] = Behaviors.receive[Simulator] { case (context, tick @ Tick(produce, consume)) => simulator.produceElements(produce) simulator.consumeElements(consume) lagSimActor(simulator, context.scheduleOnce(1 second, context.self, tick)) } receiveSignal { case (_, PostStop) => simulator.shutdown() scheduledTick.cancel() Behaviors.same } }
Example 27
Source File: KafkaClusterManager.scala From kafka-lag-exporter with Apache License 2.0 | 5 votes |
package com.lightbend.kafkalagexporter import akka.actor.typed.scaladsl.Behaviors import akka.actor.typed.{ActorRef, Behavior, ChildFailed} import akka.util.Timeout import com.lightbend.kafkalagexporter.KafkaClient.KafkaClientContract import com.lightbend.kafkalagexporter.watchers.Watcher import scala.concurrent.duration._ import scala.util.{Failure, Success} object KafkaClusterManager { sealed trait Message sealed trait Stop extends Message final case object Stop extends Stop sealed trait Done extends Message final case object Done extends Done final case class ClusterAdded(c: KafkaCluster) extends Message final case class ClusterRemoved(c: KafkaCluster) extends Message final case class NamedCreator(name: String, creator: () => MetricsSink) private val stopTimeout: Timeout = 3.seconds def init( appConfig: AppConfig, metricsSinks: List[NamedCreator], clientCreator: KafkaCluster => KafkaClientContract): Behavior[Message] = Behaviors.setup { context => context.log.info("Starting Kafka Lag Exporter with configuration: \n{}", appConfig) if (appConfig.clusters.isEmpty && !appConfig.strimziWatcher) context.log.info("No watchers are defined and no clusters are statically configured. Nothing to do.") val watchers: Seq[ActorRef[Watcher.Message]] = Watcher.createClusterWatchers(context, appConfig) val reporters: List[ActorRef[MetricsSink.Message]] = metricsSinks.map { metricsSink : NamedCreator => context.spawn(MetricsReporter.init(metricsSink.creator()), metricsSink.name) } appConfig.clusters.foreach(cluster => context.self ! ClusterAdded(cluster)) reporters.map { context.watch } manager(appConfig, clientCreator, reporters, collectors = Map.empty, watchers) } def manager( appConfig: AppConfig, clientCreator: KafkaCluster => KafkaClientContract, reporters: List[ActorRef[MetricsSink.Message]], collectors: Map[KafkaCluster, ActorRef[ConsumerGroupCollector.Message]], watchers: Seq[ActorRef[Watcher.Message]]): Behavior[Message] = Behaviors.receive[Message] { case (context, ClusterAdded(cluster)) => context.log.info(s"Cluster Added: $cluster") val config = ConsumerGroupCollector.CollectorConfig( appConfig.pollInterval, appConfig.lookupTableSize, cluster ) val collector = context.spawn( ConsumerGroupCollector.init(config, clientCreator, reporters), s"consumer-group-collector-${cluster.name}" ) manager(appConfig, clientCreator, reporters, collectors + (cluster -> collector), watchers) case (context, ClusterRemoved(cluster)) => context.log.info(s"Cluster Removed: $cluster") collectors.get(cluster) match { case Some(collector) => collector ! ConsumerGroupCollector.Stop manager(appConfig, clientCreator, reporters, collectors - cluster, watchers) case None => manager(appConfig, clientCreator, reporters, collectors, watchers) } case (context, _: Stop) => context.log.info("Attempting graceful shutdown") watchers.foreach(_ ! Watcher.Stop) collectors.foreach { case (_, collector) => collector ! ConsumerGroupCollector.Stop } implicit val timeout = stopTimeout reporters.foreach { reporter => context.ask(reporter, (_: ActorRef[MetricsSink.Message]) => MetricsSink.Stop(context.self)) { case Success(_) => Done case Failure(ex) => context.log.error("The metrics reporter shutdown failed.", ex) Done } } Behaviors.same case (_, _: Done) => Behaviors.stopped } receiveSignal { case (context, ChildFailed(`reporters`, cause)) => context.log.error("The metrics reporter failed. Shutting down.", cause) context.self ! Stop Behaviors.same } }
Example 28
Source File: PiClusterSingleton.scala From Pi-Akka-Cluster with Apache License 2.0 | 5 votes |
package akkapi.cluster import akka.actor.typed.scaladsl.{ActorContext, Behaviors} import akka.actor.typed.{ActorRef, Behavior, PostStop} object PiClusterSingleton { sealed trait Command final case object Ping extends Command def apply(settings: Settings, clusterStatusTracker: ActorRef[ClusterStatusTracker.ClusterEvent]): Behavior[Command] = { Behaviors.setup { context => new PiClusterSingleton(context, settings, clusterStatusTracker).run() } } } class PiClusterSingleton private (context: ActorContext[PiClusterSingleton.Command], settings: Settings, clusterStatusTracker: ActorRef[ClusterStatusTracker.ClusterEvent]) { import PiClusterSingleton._ // Cluster singleton has been started on this node clusterStatusTracker ! ClusterStatusTracker.PiClusterSingletonOnNode def run(): Behavior[Command] = Behaviors.receiveMessage[Command] { case Ping => context.log.info(s"PiClusterSingleton was pinged") Behaviors.same }.receiveSignal { case (_, signal) if signal == PostStop => clusterStatusTracker ! ClusterStatusTracker.PiClusterSingletonNotOnNode Behaviors.same } }
Example 29
Source File: StrimziClusterWatcher.scala From kafka-lag-exporter with Apache License 2.0 | 5 votes |
package com.lightbend.kafkalagexporter.watchers import akka.actor.typed.scaladsl.Behaviors import akka.actor.typed.{ActorRef, Behavior} import com.lightbend.kafkalagexporter.{KafkaCluster, KafkaClusterManager} object StrimziClusterWatcher { val name: String = "strimzi" def init(handler: ActorRef[KafkaClusterManager.Message]): Behavior[Watcher.Message] = Behaviors.setup { context => val watcher = new Watcher.Events { override def added(cluster: KafkaCluster): Unit = handler ! KafkaClusterManager.ClusterAdded(cluster) override def removed(cluster: KafkaCluster): Unit = handler ! KafkaClusterManager.ClusterAdded(cluster) override def error(e: Throwable): Unit = context.log.error(e.getMessage, e) } val client = StrimziClient(watcher) watch(client) } def watch(client: Watcher.Client): Behaviors.Receive[Watcher.Message] = Behaviors.receive { case (context, _: Watcher.Stop) => Behaviors.stopped { () => client.close() context.log.info("Gracefully stopped StrimziKafkaWatcher") } } }
Example 30
Source File: ReadSide.scala From akka-persistence-cassandra with Apache License 2.0 | 5 votes |
package akka.persistence.cassandra.example import akka.actor.typed.pubsub.Topic import akka.actor.typed.scaladsl.Behaviors import akka.actor.typed.{ ActorRef, ActorSystem, Behavior, PostStop } import akka.cluster.sharding.typed.{ ClusterShardingSettings, ShardedDaemonProcessSettings } import akka.cluster.sharding.typed.scaladsl.ShardedDaemonProcess import akka.stream.{ KillSwitches, SharedKillSwitch } import com.typesafe.config.Config import org.HdrHistogram.Histogram import akka.actor.typed.scaladsl.LoggerOps import scala.concurrent.duration._ object ReadSide { sealed trait Command private case object ReportMetrics extends Command object Settings { def apply(config: Config): Settings = Settings(config.getInt("processors"), config.getInt("tags-per-processor")) } case class Settings(nrProcessors: Int, tagsPerProcessor: Int) { val nrTags: Int = nrProcessors * tagsPerProcessor } def apply( system: ActorSystem[_], topic: ActorRef[Topic.Command[ReadSideTopic.ReadSideMetrics]], settings: Settings): Unit = { system.log.info("Running {} processors", settings.nrProcessors) val killSwitch: SharedKillSwitch = KillSwitches.shared("eventProcessorSwitch") ShardedDaemonProcess(system).init( "tag-processor", settings.nrProcessors - 1, // bug that creates +1 processor FIXME remove in 2.6.5 i => behavior(topic, i, settings, killSwitch), ShardedDaemonProcessSettings(system).withShardingSettings(ClusterShardingSettings(system).withRole("read")), None) } private def behavior( topic: ActorRef[Topic.Command[ReadSideTopic.ReadSideMetrics]], nr: Int, settings: Settings, killSwitch: SharedKillSwitch): Behavior[Command] = Behaviors.withTimers { timers => timers.startTimerAtFixedRate(ReportMetrics, 10.second) Behaviors.setup { ctx => val start = (settings.tagsPerProcessor * nr) val end = start + (settings.tagsPerProcessor) - 1 val tags = (start to end).map(i => s"tag-$i") ctx.log.info("Processor {} processing tags {}", nr, tags) // milliseconds, highest value = 1 minute val histogram = new Histogram(10 * 1000 * 60, 2) // maybe easier to just have these as different actors // my thinking is we can start with a large number of tags and scale out // read side processors later // having more tags will also increase write throughput/latency as it'll write to // many partitions // downside is running many streams/queries against c* tags.foreach( tag => new EventProcessorStream[ConfigurablePersistentActor.Event]( ctx.system, ctx.executionContext, s"processor-$nr", tag).runQueryStream(killSwitch, histogram)) Behaviors .receiveMessage[Command] { case ReportMetrics => if (histogram.getTotalCount > 0) { topic ! Topic.Publish( ReadSideTopic.ReadSideMetrics( histogram.getTotalCount, histogram.getMaxValue, histogram.getValueAtPercentile(99), histogram.getValueAtPercentile(50))) histogram.reset() } Behaviors.same } .receiveSignal { case (_, PostStop) => killSwitch.shutdown() Behaviors.same } } } }
Example 31
Source File: Reporter.scala From akka-persistence-cassandra with Apache License 2.0 | 5 votes |
package akka.persistence.cassandra.example import akka.actor.typed.{ ActorRef, Behavior } import akka.actor.typed.pubsub.Topic import akka.actor.typed.scaladsl.Behaviors import akka.persistence.cassandra.example.ReadSideTopic.ReadSideMetrics import akka.actor.typed.scaladsl.LoggerOps object Reporter { def apply(topic: ActorRef[Topic.Command[ReadSideTopic.ReadSideMetrics]]): Behavior[ReadSideMetrics] = Behaviors.setup { ctx => ctx.log.info("Subscribing to latency stats") topic ! Topic.Subscribe(ctx.self) Behaviors.receiveMessage[ReadSideMetrics] { case ReadSideMetrics(count, max, p99, p50) => ctx.log.infoN("Read side Count: {} Max: {} p99: {} p50: {}", count, max, p99, p50) Behaviors.same } } }
Example 32
Source File: ConfigurablePersistentActor.scala From akka-persistence-cassandra with Apache License 2.0 | 5 votes |
package akka.persistence.cassandra.example import akka.actor.typed.scaladsl.Behaviors import akka.actor.typed.{ ActorRef, ActorSystem, Behavior } import akka.cluster.sharding.typed.ShardingEnvelope import akka.cluster.sharding.typed.scaladsl.{ ClusterSharding, Entity, EntityTypeKey } import akka.persistence.typed.PersistenceId import akka.persistence.typed.scaladsl.{ Effect, EventSourcedBehavior } object ConfigurablePersistentActor { case class Settings(nrTags: Int) val Key: EntityTypeKey[Event] = EntityTypeKey[Event]("configurable") def init(settings: Settings, system: ActorSystem[_]): ActorRef[ShardingEnvelope[Event]] = { ClusterSharding(system).init(Entity(Key)(ctx => apply(settings, ctx.entityId)).withRole("write")) } final case class Event(timeCreated: Long = System.currentTimeMillis()) extends CborSerializable final case class State(eventsProcessed: Long) extends CborSerializable def apply(settings: Settings, persistenceId: String): Behavior[Event] = Behaviors.setup { ctx => EventSourcedBehavior[Event, Event, State]( persistenceId = PersistenceId.ofUniqueId(persistenceId), State(0), (_, event) => { ctx.log.info("persisting event {}", event) Effect.persist(event) }, (state, _) => state.copy(eventsProcessed = state.eventsProcessed + 1)).withTagger(event => Set("tag-" + math.abs(event.hashCode() % settings.nrTags))) } }
Example 33
Source File: LoadGenerator.scala From akka-persistence-cassandra with Apache License 2.0 | 5 votes |
package akka.persistence.cassandra.example import akka.actor.typed.{ ActorRef, Behavior } import akka.actor.typed.scaladsl.Behaviors import akka.cluster.sharding.typed.ShardingEnvelope import com.typesafe.config.Config import scala.concurrent.duration.FiniteDuration import scala.util.Random import akka.util.JavaDurationConverters._ object LoadGenerator { object Settings { def apply(config: Config): Settings = { Settings(config.getInt("persistence-ids"), config.getDuration("load-tick-duration").asScala) } } case class Settings(nrPersistenceIds: Int, tickDuration: FiniteDuration) sealed trait Command final case class Start(duration: FiniteDuration) extends Command final case class Tick() extends Command private case object Stop extends Command def apply( settings: Settings, ref: ActorRef[ShardingEnvelope[ConfigurablePersistentActor.Event]]): Behavior[Command] = { Behaviors.withTimers { timers => Behaviors.setup { ctx => Behaviors.receiveMessage { case Start(duration) => ctx.log.info("Starting...") timers.startTimerAtFixedRate(Tick(), settings.tickDuration) timers.startSingleTimer(Stop, duration) Behaviors.same case Tick() => ctx.log.info("Sending event") ref ! ShardingEnvelope( s"p${Random.nextInt(settings.nrPersistenceIds)}", ConfigurablePersistentActor.Event()) Behaviors.same case Stop => Behaviors.same } } } } }
Example 34
Source File: Main.scala From akka-persistence-cassandra with Apache License 2.0 | 5 votes |
package akka.persistence.cassandra.example import akka.actor.typed.{ ActorRef, ActorSystem } import akka.actor.typed.scaladsl.Behaviors import akka.cluster.sharding.typed.ShardingEnvelope import akka.cluster.typed.{ Cluster, SelfUp, Subscribe } import akka.management.cluster.bootstrap.ClusterBootstrap import akka.management.scaladsl.AkkaManagement import akka.persistence.cassandra.example.LoadGenerator.Start import akka.actor.typed.scaladsl.LoggerOps import akka.stream.alpakka.cassandra.scaladsl.CassandraSessionRegistry import scala.concurrent.Await import scala.concurrent.duration._ object Main { def main(args: Array[String]): Unit = { ActorSystem(Behaviors.setup[SelfUp] { ctx => val readSettings = ReadSide.Settings(ctx.system.settings.config.getConfig("cassandra.example")) val writeSettings = ConfigurablePersistentActor.Settings(readSettings.nrTags) val loadSettings = LoadGenerator.Settings(ctx.system.settings.config.getConfig("cassandra.example")) AkkaManagement(ctx.system).start() ClusterBootstrap(ctx.system).start() val cluster = Cluster(ctx.system) cluster.subscriptions ! Subscribe(ctx.self, classOf[SelfUp]) val topic = ReadSideTopic.init(ctx) if (cluster.selfMember.hasRole("read")) { val session = CassandraSessionRegistry(ctx.system).sessionFor("akka.persistence.cassandra") val offsetTableStmt = """ CREATE TABLE IF NOT EXISTS akka.offsetStore ( eventProcessorId text, tag text, timeUuidOffset timeuuid, PRIMARY KEY (eventProcessorId, tag) ) """ Await.ready(session.executeDDL(offsetTableStmt), 30.seconds) } Behaviors.receiveMessage { case SelfUp(state) => ctx.log.infoN( "Cluster member joined. Initializing persistent actors. Roles {}. Members {}", cluster.selfMember.roles, state.members) val ref = ConfigurablePersistentActor.init(writeSettings, ctx.system) if (cluster.selfMember.hasRole("read")) { ctx.spawnAnonymous(Reporter(topic)) } ReadSide(ctx.system, topic, readSettings) if (cluster.selfMember.hasRole("load")) { ctx.log.info("Starting load generation") val load = ctx.spawn(LoadGenerator(loadSettings, ref), "load-generator") load ! Start(10.seconds) } Behaviors.empty } }, "apc-example") } }
Example 35
Source File: ForkJoinThroughput.scala From effpi with MIT License | 5 votes |
// Effpi - verified message-passing programs in Dotty // Copyright 2019 Alceste Scalas and Elias Benussi // Released under the MIT License: https://opensource.org/licenses/MIT package effpi.benchmarks.akka import akka.NotUsed import akka.actor.typed.scaladsl.{ Behaviors, MutableBehavior, ActorContext} import akka.actor.typed.{ ActorRef, ActorSystem, Behavior, DispatcherSelector, Terminated } import scala.concurrent.Future import scala.concurrent.duration._ import scala.concurrent.{ Future, Promise, Await } import scala.concurrent.ExecutionContext.Implicits.global object ForkJoinThroughput { case class Message(msg: String) def receiver(maxMsgs: Int) = Behaviors.setup[Message] { ctx => new MutableSimpleActor(ctx, maxMsgs) } class MutableSimpleActor( ctx: ActorContext[Message], maxMsgs: Int ) extends MutableBehavior[Message] { var count = 0 override def onMessage(msg: Message): Behavior[Message] = { count +=1 if (count < maxMsgs) { Behaviors.same } else { Behaviors.stopped } } } def mainActor( durationPromise: Promise[Long], numActors: Int, numMessages: Int ): Behavior[akka.NotUsed] = Behaviors.setup { ctx => val receiversRef = (1 to numActors).map{ id => ctx.spawn(receiver(numMessages), "receiver" + id)} val startTime = System.nanoTime() (1 to numMessages).foreach { n => receiversRef.foreach { simpleActor => simpleActor ! Message("Hello World!") } } val endTime = System.nanoTime() durationPromise.success(endTime - startTime) Behaviors.stopped } def bench(params: (Int, Int)): Long = { val (numActors, numMessages) = params val durationPromise = Promise[Long]() val durationFuture = durationPromise.future val system = ActorSystem( mainActor(durationPromise, numActors, numMessages), "ForkJoinCreationDemo") Await.result(system.whenTerminated, Duration.Inf) val duration = Await.result(durationFuture, Duration.Inf) duration } }
Example 36
Source File: CountingActor.scala From effpi with MIT License | 5 votes |
// Effpi - verified message-passing programs in Dotty // Copyright 2019 Alceste Scalas and Elias Benussi // Released under the MIT License: https://opensource.org/licenses/MIT package effpi.benchmarks.akka import akka.NotUsed import akka.actor.typed.scaladsl.{ Behaviors, MutableBehavior, ActorContext} import akka.actor.typed.{ ActorRef, ActorSystem, Behavior, DispatcherSelector, Terminated } import scala.concurrent.Future import scala.concurrent.duration._ import scala.concurrent.{ Future, Promise, Await } import scala.concurrent.ExecutionContext.Implicits.global object CountingActor { sealed trait CounterAction object CounterAction { final case class Add(num: Int, p: Promise[Int]) extends CounterAction final case class Cheque(replyTo: ActorRef[Sum]) extends CounterAction } case class Sum(sum: Int) val counter = Behaviors.setup[CounterAction] { ctx => new MutableCounter(ctx) } class MutableCounter( ctx: ActorContext[CounterAction] ) extends MutableBehavior[CounterAction] { var counter = 0 override def onMessage(msg: CounterAction): Behavior[CounterAction] = { msg match { case CounterAction.Add(num, p) => counter += 1 p.success(num) Behaviors.same case CounterAction.Cheque(replyTo) => replyTo ! Sum(counter) Behaviors.stopped } } } def sink(endTimePromise: Promise[Long]) = Behaviors.receive[Sum] { (ctx, msg) => endTimePromise.success(System.nanoTime()) Behaviors.stopped } def mainActor( durationPromise: Promise[Long], numMessages: Int ): Behavior[akka.NotUsed] = Behaviors.setup { ctx => val endTimePromise = Promise[Long]() val endTimeFuture = endTimePromise.future val sinkRef = ctx.spawn(sink(endTimePromise), "sink") ctx.watch(sinkRef) val counterRef = ctx.spawn(counter, "counter") val startTime = System.nanoTime() val futs = (1 to numMessages).toList.map { num => val p = Promise[Int]() val f = p.future counterRef ! CounterAction.Add(num, p) f } Await.result(Future.sequence(futs), Duration.Inf) counterRef ! CounterAction.Cheque(sinkRef) val endTime = Await.result(endTimeFuture, Duration.Inf) val countingDuration = endTime - startTime durationPromise.success(countingDuration) Behaviors.receiveSignal { case (_, Terminated(ref)) => Behaviors.stopped case (_, _) => Behaviors.empty } } def bench(params: Int): Long = { val durationPromise = Promise[Long]() val durationFuture = durationPromise.future val system = ActorSystem( mainActor(durationPromise, params), "CountingActorDemo") Await.result(system.whenTerminated, Duration.Inf) val duration = Await.result(durationFuture, Duration.Inf) duration } }
Example 37
Source File: LobbyClientActor.scala From lila-ws with GNU Affero General Public License v3.0 | 5 votes |
package lila.ws import akka.actor.typed.scaladsl.Behaviors import akka.actor.typed.{ Behavior, PostStop } import play.api.libs.json.JsValue import ipc._ object LobbyClientActor { import ClientActor._ case class State( idle: Boolean = false, site: ClientActor.State = ClientActor.State() ) def start(deps: Deps): Behavior[ClientMsg] = Behaviors.setup { ctx => import deps._ onStart(deps, ctx) req.user foreach { users.connect(_, ctx.self, silently = true) } services.lobby.connect(req.sri -> req.user.map(_.id)) Bus.subscribe(Bus.channel.lobby, ctx.self) apply(State(), deps) } private def apply(state: State, deps: Deps): Behavior[ClientMsg] = Behaviors .receive[ClientMsg] { (ctx, msg) => import deps._ def forward(payload: JsValue): Unit = lilaIn.lobby(LilaIn.TellSri(req.sri, req.user.map(_.id), payload)) msg match { case ctrl: ClientCtrl => socketControl(state.site, deps, ctrl) case ClientIn.LobbyNonIdle(payload) => if (!state.idle) clientIn(payload) Behaviors.same case ClientIn.OnlyFor(endpoint, payload) => if (endpoint == ClientIn.OnlyFor.Lobby) clientIn(payload) Behaviors.same case in: ClientIn => clientInReceive(state.site, deps, in) match { case None => Behaviors.same case Some(s) => apply(state.copy(site = s), deps) } case msg: ClientOut.Ping => clientIn(services.lobby.pong.get) apply(state.copy(site = sitePing(state.site, deps, msg)), deps) case ClientOut.LobbyForward(payload) => forward(payload) Behaviors.same case ClientOut.Idle(value, payload) => forward(payload) apply(state.copy(idle = value), deps) // default receive (site) case msg: ClientOutSite => val siteState = globalReceive(state.site, deps, ctx, msg) if (siteState == state.site) Behaviors.same else apply(state.copy(site = siteState), deps) case _ => Monitor.clientOutUnhandled("lobby").increment() Behaviors.same } } .receiveSignal { case (ctx, PostStop) => onStop(state.site, deps, ctx) Bus.unsubscribe(Bus.channel.lobby, ctx.self) deps.services.lobby.disconnect(deps.req.sri) Behaviors.same } }
Example 38
Source File: Mixer.scala From Learn-Scala-Programming with MIT License | 5 votes |
package ch12 import akka.actor.typed.{ActorRef, Behavior, SupervisorStrategy} import akka.actor.typed.scaladsl.Behaviors import ch12.Bakery.{Groceries, Dough} import ch12.Chef.Collect import scala.concurrent.duration.FiniteDuration import scala.util.Random object Mixer { class MotorOverheatException extends Exception class SlowRotationSpeedException extends Exception class StrongVibrationException extends Exception final case class Mix(groceries: Groceries, sender: ActorRef[Collect]) def mix(mixTime: FiniteDuration): Behavior[Mix] = Behaviors.receive[Mix] { case (ctx, Mix(Groceries(eggs, flour, sugar, chocolate), sender)) => if (Random.nextBoolean()) throw new MotorOverheatException Thread.sleep(mixTime.toMillis) sender ! Collect(Dough(eggs * 50 + flour + sugar + chocolate), ctx.self) Behaviors.stopped } def controlledMix(mixTime: FiniteDuration): Behavior[Mix] = Behaviors .supervise( Behaviors .supervise(Behaviors .supervise(mix(mixTime)) .onFailure[MotorOverheatException](SupervisorStrategy.stop)) .onFailure[SlowRotationSpeedException](SupervisorStrategy.restart)) .onFailure[StrongVibrationException](SupervisorStrategy.resume) }
Example 39
Source File: Chef.scala From Learn-Scala-Programming with MIT License | 5 votes |
package ch12 import akka.actor.typed.scaladsl.Behaviors import akka.actor.typed.{ActorRef, Behavior, DispatcherSelector} import ch12.Bakery.{Groceries, Dough} import ch12.Manager.ReceiveDough object Chef { sealed trait Command final case class Mix(g: Groceries, manager: ActorRef[Manager.Command]) extends Command final case class Collect(p: Dough, mixer: ActorRef[Mixer.Mix]) extends Command final case class BrokenMixer(mixer: ActorRef[Mixer.Mix]) extends Command def idle(mixerFactory: Behavior[Mixer.Mix]): Behaviors.Receive[Command] = Behaviors.receivePartial[Command] { case (context, mix@Mix(Groceries(eggs, flour, sugar, chocolate), manager)) => val mixers = for (i <- 1 to eggs) yield context.spawn(mixerFactory, s"Mixer_$i", DispatcherSelector.fromConfig("mixers-dispatcher")) mixers.foreach(mixer => context.watchWith(mixer, BrokenMixer(mixer))) val msg = Groceries(1, flour / eggs, sugar / eggs, chocolate / eggs) mixers.foreach(_ ! Mixer.Mix(msg, context.self)) mixing(mixers.toSet, 0, manager, mixerFactory) } def mixing(mixers: Set[ActorRef[Mixer.Mix]], collected: Int, manager: ActorRef[Manager.Command], mixerBuilder: Behavior[Mixer.Mix]): Behaviors.Receive[Command] = { def designateBehavior(mixer: ActorRef[Mixer.Mix], doughBuf: Int) = { val mixersToGo = mixers - mixer if (mixersToGo.isEmpty) { manager ! ReceiveDough(Dough(doughBuf)) idle(mixerBuilder) } else { mixing(mixersToGo, doughBuf, manager, mixerBuilder) } } Behaviors.receivePartial { case (context, Collect(dough, mixer)) => val doughBuf = collected + dough.weight context.stop(mixer) designateBehavior(mixer, doughBuf) case (context, BrokenMixer(m)) => context.log.warning("Broken mixer detected {}", m) context.self ! Collect(Dough(0), m) designateBehavior(m, collected) } } }
Example 40
Source File: Baker.scala From Learn-Scala-Programming with MIT License | 5 votes |
package ch12 import akka.actor.typed.{ActorRef, Behavior} import akka.actor.typed.scaladsl.{Behaviors, StashBuffer} import ch12.Bakery.{RawCookies, ReadyCookies} import ch12.Manager.ReceiveReadyCookies import ch12.Oven.{Extract, Put} import scala.concurrent.duration._ object Baker { val DefaultBakingTime: FiniteDuration = 2.seconds private val TimerKey = 'TimerKey sealed trait Command final case class BakeCookies(raw: RawCookies, sender: ActorRef[Manager.Command]) extends Command final case class TooManyCookies(raw: RawCookies) extends Command final case class CookiesReady(cookies: ReadyCookies) extends Command final case object CheckOven extends Command def turnOvenOn: Behavior[Command] = Behaviors.setup { context => val oven = context.spawn(Oven.empty, "Oven") idle(oven) } def idle(oven: ActorRef[Oven.Command]): Behavior[Command] = Behaviors.receivePartial { case (context, BakeCookies(rawCookies, manager)) => oven ! Put(rawCookies.count, context.self) Behaviors.withTimers { timers => timers.startSingleTimer(TimerKey, CheckOven, DefaultBakingTime) baking(oven, manager) } } def baking(oven: ActorRef[Oven.Command], manager: ActorRef[Manager.Command]): Behavior[Command] = Behaviors.setup[Command] { context => val buffer = StashBuffer[Command](capacity = 100) Behaviors.receiveMessage { case CheckOven => oven ! Extract(context.self) Behaviors.same case CookiesReady(cookies) => manager ! ReceiveReadyCookies(cookies) buffer.unstashAll(context, idle(oven)) case c: TooManyCookies=> buffer.stash(BakeCookies(c.raw, manager)) Behaviors.same case c : BakeCookies => buffer.stash(c) Behaviors.same } } }
Example 41
Source File: Boy.scala From Learn-Scala-Programming with MIT License | 5 votes |
package ch12 import akka.actor.typed.ActorRef import akka.actor.typed.scaladsl.Behaviors import ch12.Shop._ object Boy { final case class GoShopping(shoppingList: ShoppingList, seller: ActorRef[SellByList], manager: ActorRef[Manager.Command]) val goShopping = Behaviors.receiveMessage[GoShopping] { case GoShopping(shoppingList, seller, manager) => seller ! SellByList(shoppingList, manager) Behaviors.stopped } }
Example 42
Source File: CustomCache.scala From akka_streams_tutorial with MIT License | 5 votes |
package sample.stream_actor.typed import akka.actor.typed.scaladsl.Behaviors import akka.actor.typed.{ActorRef, Behavior} case class DeviceId(id: String) object CustomCache { sealed trait CacheRequests final case class Get(requestId: String, replyTo: ActorRef[CacheResponses]) extends CacheRequests final case class Devices(devices: List[DeviceId]) extends CacheRequests final case class AddDevices(devices: List[DeviceId]) extends CacheRequests sealed trait CacheResponses final case object EmptyCache extends CacheResponses final case class CachedDevices(devices: List[DeviceId]) extends CacheResponses val empty: Behavior[CacheRequests] = Behaviors.receive[CacheRequests] { (context, message) => message match { case Get(requestId, replyTo) => context.log.info("Empty cache request for requestId {}.", requestId) replyTo ! EmptyCache Behaviors.same case Devices(devices) => context.log.info(s"Initializing cache with: ${devices.size} devices") cached(devices) case AddDevices(devices) => context.log.info(s"Initializing cache with: ${devices.size} devices") cached(devices) } } private def cached(devices: List[DeviceId]): Behavior[CacheRequests] = Behaviors.receive { (context, message) => message match { case Get(requestId, replyTo) => context.log.info("Cache request for requestId {}.", requestId) replyTo ! CachedDevices(devices) Behaviors.same case Devices(updatedDevices) => context.log.info(s"Updating cache with: ${updatedDevices.size} devices") cached(updatedDevices) case AddDevices(updatedDevices) => context.log.info(s"Adding: ${updatedDevices.size} devices.") cached(devices = devices ++ updatedDevices) } } }
Example 43
Source File: BlockingActor.scala From akka_streams_tutorial with MIT License | 5 votes |
package actor import akka.actor.typed.Behavior import akka.actor.typed.scaladsl.Behaviors object BlockingActor { def apply(): Behavior[Int] = Behaviors.receive { (context, i) => context.log.info(s"Started: $i by ${Thread.currentThread().getName}") //block for 5 seconds, representing blocking I/O, etc Thread.sleep(5000) context.log.info(s"Finished: $i") Behaviors.same } }
Example 44
Source File: BlockingRight.scala From akka_streams_tutorial with MIT License | 5 votes |
package actor import akka.actor.typed.scaladsl.Behaviors import akka.actor.typed.{ActorSystem, DispatcherSelector} object BlockingRight extends App { val root = Behaviors.setup[Nothing] { context => (1 to 50).foreach { i => //non blocking actor running on default-dispatcher context.spawn(PrintActor(), s"nonblocking-$i") ! i //blocking actor running on custom-dispatcher context.spawn( BlockingActor(), s"blocking-$i", DispatcherSelector.fromConfig("custom-dispatcher-for-blocking") ) ! i } Behaviors.empty } val system = ActorSystem[Nothing](root, "BlockingRight") }
Example 45
Source File: Clients.scala From lila-ws with GNU Affero General Public License v3.0 | 5 votes |
package lila.ws import akka.actor.typed.scaladsl.Behaviors import scala.concurrent.Promise object Clients { sealed trait Control final case class Start(behavior: ClientBehavior, promise: Promise[Client]) extends Control final case class Stop(client: Client) extends Control def behavior = Behaviors.receive[Control] { (ctx, msg) => msg match { case Start(behavior, promise) => promise success ctx.spawnAnonymous(behavior) Behaviors.same case Stop(client) => ctx.stop(client) Behaviors.same } } }
Example 46
Source File: Cook.scala From Learn-Scala-Programming with MIT License | 5 votes |
package ch12 import akka.actor.typed.ActorRef import akka.actor.typed.scaladsl.Behaviors import ch12.Bakery.{Dough, RawCookies} import ch12.Manager.ReceiveRawCookies object Cook { final case class FormCookies(dough: Dough, sender: ActorRef[Manager.Command]) val form: Behaviors.Receive[FormCookies] = Behaviors.receiveMessage { case FormCookies(dough, sender) => val numberOfCookies = makeCookies(dough.weight) sender ! ReceiveRawCookies(RawCookies(numberOfCookies)) form } private val cookieWeight = 60 private def makeCookies(weight: Int): Int = weight / cookieWeight }
Example 47
Source File: ChallengeClientActor.scala From lila-ws with GNU Affero General Public License v3.0 | 5 votes |
package lila.ws import akka.actor.typed.scaladsl.Behaviors import akka.actor.typed.{ Behavior, PostStop } import ipc._ object ChallengeClientActor { import ClientActor._ case class State( owner: Boolean, room: RoomActor.State, site: ClientActor.State = ClientActor.State() ) def start(roomState: RoomActor.State, owner: Boolean, fromVersion: Option[SocketVersion])( deps: Deps ): Behavior[ClientMsg] = Behaviors.setup { ctx => RoomActor.onStart(roomState, fromVersion, deps, ctx) apply(State(owner, roomState), deps) } private def apply(state: State, deps: Deps): Behavior[ClientMsg] = Behaviors .receive[ClientMsg] { (ctx, msg) => import deps._ def receive: PartialFunction[ClientMsg, Behavior[ClientMsg]] = { case in: ClientIn => clientInReceive(state.site, deps, in) match { case None => Behaviors.same case Some(s) => apply(state.copy(site = s), deps) } case ClientCtrl.Disconnect => // lila tries to close the round room, because there's no game with that ID yet // ignore it so we stay connected to the challenge Behaviors.same case ClientCtrl.Broom(oldSeconds) => if (state.site.lastPing < oldSeconds) Behaviors.stopped else { keepAlive challenge state.room.id Behaviors.same } case ctrl: ClientCtrl => socketControl(state.site, deps, ctrl) case ClientOut.ChallengePing => if (state.owner) services.challengePing(state.room.id) Behaviors.same // default receive (site) case msg: ClientOutSite => val siteState = globalReceive(state.site, deps, ctx, msg) if (siteState == state.site) Behaviors.same else apply(state.copy(site = siteState), deps) case _ => Monitor.clientOutUnhandled("challenge").increment() Behaviors.same } RoomActor.receive(state.room, deps).lift(msg).fold(receive(msg)) { case (newState, emit) => emit foreach lilaIn.challenge.apply newState.fold(Behaviors.same[ClientMsg]) { roomState => apply(state.copy(room = roomState), deps) } } } .receiveSignal { case (ctx, PostStop) => onStop(state.site, deps, ctx) RoomActor.onStop(state.room, deps, ctx) Behaviors.same } }
Example 48
Source File: StudyClientActor.scala From lila-ws with GNU Affero General Public License v3.0 | 5 votes |
package lila.ws import akka.actor.typed.scaladsl.Behaviors import akka.actor.typed.{ Behavior, PostStop } import play.api.libs.json.JsValue import ipc._ object StudyClientActor { import ClientActor._ case class State( room: RoomActor.State, site: ClientActor.State = ClientActor.State() ) def start(roomState: RoomActor.State, fromVersion: Option[SocketVersion])( deps: Deps ): Behavior[ClientMsg] = Behaviors.setup { ctx => RoomActor.onStart(roomState, fromVersion, deps, ctx) apply(State(roomState), deps) } private def apply(state: State, deps: Deps): Behavior[ClientMsg] = Behaviors .receive[ClientMsg] { (ctx, msg) => import deps._ def forward(payload: JsValue): Unit = lilaIn.study( LilaIn.TellRoomSri(state.room.id, LilaIn.TellSri(req.sri, req.user.map(_.id), payload)) ) def receive: PartialFunction[ClientMsg, Behavior[ClientMsg]] = { case in: ClientIn => clientInReceive(state.site, deps, in) match { case None => Behaviors.same case Some(s) => apply(state.copy(site = s), deps) } case ClientCtrl.Broom(oldSeconds) => if (state.site.lastPing < oldSeconds) Behaviors.stopped else { keepAlive study state.room.id Behaviors.same } case ctrl: ClientCtrl => socketControl(state.site, deps, ctrl) case ClientOut.StudyForward(payload) => forward(payload) Behaviors.same case anaMove: ClientOut.AnaMove => clientIn(Chess(anaMove)) forward(anaMove.payload) Behaviors.same case anaDrop: ClientOut.AnaDrop => clientIn(Chess(anaDrop)) forward(anaDrop.payload) Behaviors.same case ClientOut.PalantirPing => deps.req.user map { Palantir.respondToPing(state.room.id, _) } foreach clientIn Behaviors.same // default receive (site) case msg: ClientOutSite => val siteState = globalReceive(state.site, deps, ctx, msg) if (siteState == state.site) Behaviors.same else apply(state.copy(site = siteState), deps) case _ => Monitor.clientOutUnhandled("study").increment() Behaviors.same } RoomActor.receive(state.room, deps).lift(msg).fold(receive(msg)) { case (newState, emit) => emit foreach lilaIn.study newState.fold(Behaviors.same[ClientMsg]) { roomState => apply(state.copy(room = roomState), deps) } } } .receiveSignal { case (ctx, PostStop) => onStop(state.site, deps, ctx) RoomActor.onStop(state.room, deps, ctx) Behaviors.same } }
Example 49
Source File: ApiActor.scala From lila-ws with GNU Affero General Public License v3.0 | 5 votes |
package lila.ws import akka.actor.typed.{ Behavior, PostStop } import akka.actor.typed.scaladsl.{ ActorContext, Behaviors } import ipc._ object ApiActor { def start(deps: Deps): Behavior[ClientMsg] = Behaviors.setup { ctx => deps.services.users.connect(deps.user, ctx.self) LilaWsServer.connections.incrementAndGet apply(deps) } def onStop(deps: Deps, ctx: ActorContext[ClientMsg]): Unit = { import deps._ LilaWsServer.connections.decrementAndGet services.users.disconnect(user, ctx.self) services.friends.onClientStop(user.id) } private def apply(deps: Deps): Behavior[ClientMsg] = Behaviors .receive[ClientMsg] { (ctx, msg) => msg match { case ClientCtrl.ApiDisconnect => Behaviors.stopped case _ => Monitor.clientOutUnhandled("api").increment() Behaviors.same } } .receiveSignal { case (ctx, PostStop) => onStop(deps, ctx) Behaviors.same } case class Deps(user: User, services: Services) }
Example 50
Source File: TourClientActor.scala From lila-ws with GNU Affero General Public License v3.0 | 5 votes |
package lila.ws import akka.actor.typed.scaladsl.Behaviors import akka.actor.typed.{ Behavior, PostStop } import ipc._ object TourClientActor { import ClientActor._ case class State( room: RoomActor.State, site: ClientActor.State = ClientActor.State() ) def start(roomState: RoomActor.State, fromVersion: Option[SocketVersion])( deps: Deps ): Behavior[ClientMsg] = Behaviors.setup { ctx => RoomActor.onStart(roomState, fromVersion, deps, ctx) apply(State(roomState), deps) } private def apply(state: State, deps: Deps): Behavior[ClientMsg] = Behaviors .receive[ClientMsg] { (ctx, msg) => import deps._ def receive: PartialFunction[ClientMsg, Behavior[ClientMsg]] = { case in: ClientIn => clientInReceive(state.site, deps, in) match { case None => Behaviors.same case Some(s) => apply(state.copy(site = s), deps) } case ClientCtrl.Broom(oldSeconds) => if (state.site.lastPing < oldSeconds) Behaviors.stopped else { keepAlive.tour(state.room.id) Behaviors.same } case ctrl: ClientCtrl => socketControl(state.site, deps, ctrl) // default receive (site) case msg: ClientOutSite => val siteState = globalReceive(state.site, deps, ctx, msg) if (siteState == state.site) Behaviors.same else apply(state.copy(site = siteState), deps) case _ => Monitor.clientOutUnhandled("tour").increment() Behaviors.same } RoomActor.receive(state.room, deps).lift(msg).fold(receive(msg)) { case (newState, emit) => emit foreach lilaIn.tour newState.fold(Behaviors.same[ClientMsg]) { roomState => apply(state.copy(room = roomState), deps) } } } .receiveSignal { case (ctx, PostStop) => onStop(state.site, deps, ctx) RoomActor.onStop(state.room, deps, ctx) Behaviors.same } }
Example 51
Source File: SimulClientActor.scala From lila-ws with GNU Affero General Public License v3.0 | 5 votes |
package lila.ws import akka.actor.typed.scaladsl.Behaviors import akka.actor.typed.{ Behavior, PostStop } import ipc._ object SimulClientActor { import ClientActor._ case class State( room: RoomActor.State, site: ClientActor.State = ClientActor.State() ) def start(roomState: RoomActor.State, fromVersion: Option[SocketVersion])( deps: Deps ): Behavior[ClientMsg] = Behaviors.setup { ctx => RoomActor.onStart(roomState, fromVersion, deps, ctx) apply(State(roomState), deps) } private def apply(state: State, deps: Deps): Behavior[ClientMsg] = Behaviors .receive[ClientMsg] { (ctx, msg) => import deps._ def receive: PartialFunction[ClientMsg, Behavior[ClientMsg]] = { case in: ClientIn => clientInReceive(state.site, deps, in) match { case None => Behaviors.same case Some(s) => apply(state.copy(site = s), deps) } case ClientCtrl.Broom(oldSeconds) => if (state.site.lastPing < oldSeconds) Behaviors.stopped else { keepAlive.simul(state.room.id) Behaviors.same } case ctrl: ClientCtrl => socketControl(state.site, deps, ctrl) // default receive (site) case msg: ClientOutSite => val siteState = globalReceive(state.site, deps, ctx, msg) if (siteState == state.site) Behaviors.same else apply(state.copy(site = siteState), deps) case _ => Monitor.clientOutUnhandled("simul").increment() Behaviors.same } RoomActor.receive(state.room, deps).lift(msg).fold(receive(msg)) { case (newState, emit) => emit foreach lilaIn.simul newState.fold(Behaviors.same[ClientMsg]) { roomState => apply(state.copy(room = roomState), deps) } } } .receiveSignal { case (ctx, PostStop) => onStop(state.site, deps, ctx) RoomActor.onStop(state.room, deps, ctx) Behaviors.same } }
Example 52
Source File: SwissClientActor.scala From lila-ws with GNU Affero General Public License v3.0 | 5 votes |
package lila.ws import akka.actor.typed.scaladsl.Behaviors import akka.actor.typed.{ Behavior, PostStop } import ipc._ object SwissClientActor { import ClientActor._ case class State( room: RoomActor.State, site: ClientActor.State = ClientActor.State() ) def start(roomState: RoomActor.State, fromVersion: Option[SocketVersion])( deps: Deps ): Behavior[ClientMsg] = Behaviors.setup { ctx => RoomActor.onStart(roomState, fromVersion, deps, ctx) apply(State(roomState), deps) } private def apply(state: State, deps: Deps): Behavior[ClientMsg] = Behaviors .receive[ClientMsg] { (ctx, msg) => import deps._ def receive: PartialFunction[ClientMsg, Behavior[ClientMsg]] = { case in: ClientIn => clientInReceive(state.site, deps, in) match { case None => Behaviors.same case Some(s) => apply(state.copy(site = s), deps) } case ClientCtrl.Broom(oldSeconds) => if (state.site.lastPing < oldSeconds) Behaviors.stopped else { keepAlive.swiss(state.room.id) Behaviors.same } case ctrl: ClientCtrl => socketControl(state.site, deps, ctrl) // default receive (site) case msg: ClientOutSite => val siteState = globalReceive(state.site, deps, ctx, msg) if (siteState == state.site) Behaviors.same else apply(state.copy(site = siteState), deps) case _ => Monitor.clientOutUnhandled("swiss").increment() Behaviors.same } RoomActor.receive(state.room, deps).lift(msg).fold(receive(msg)) { case (newState, emit) => emit foreach lilaIn.swiss newState.fold(Behaviors.same[ClientMsg]) { roomState => apply(state.copy(room = roomState), deps) } } } .receiveSignal { case (ctx, PostStop) => onStop(state.site, deps, ctx) RoomActor.onStop(state.room, deps, ctx) Behaviors.same } }
Example 53
Source File: SiteClientActor.scala From lila-ws with GNU Affero General Public License v3.0 | 5 votes |
package lila.ws import akka.actor.typed.scaladsl.Behaviors import akka.actor.typed.{ Behavior, PostStop } import ipc._ object SiteClientActor { import ClientActor._ def start(deps: Deps): Behavior[ClientMsg] = Behaviors.setup { ctx => import deps._ onStart(deps, ctx) req.user foreach { users.connect(_, ctx.self) } apply(State(), deps) } private def apply(state: State, deps: Deps): Behavior[ClientMsg] = Behaviors .receive[ClientMsg] { (ctx, msg) => msg match { case ctrl: ClientCtrl => socketControl(state, deps, ctrl) case in: ClientIn => clientInReceive(state, deps, in) match { case None => Behaviors.same case Some(s) => apply(s, deps) } case msg: ClientOutSite => val newState = globalReceive(state, deps, ctx, msg) if (newState == state) Behaviors.same else apply(newState, deps) case _ => Monitor.clientOutUnhandled("site").increment() Behaviors.same } } .receiveSignal { case (ctx, PostStop) => onStop(state, deps, ctx) Behaviors.same } }
Example 54
Source File: TeamClientActor.scala From lila-ws with GNU Affero General Public License v3.0 | 5 votes |
package lila.ws import akka.actor.typed.scaladsl.Behaviors import akka.actor.typed.{ Behavior, PostStop } import ipc._ object TeamClientActor { import ClientActor._ case class State( room: RoomActor.State, site: ClientActor.State = ClientActor.State() ) def start(roomState: RoomActor.State, fromVersion: Option[SocketVersion])( deps: Deps ): Behavior[ClientMsg] = Behaviors.setup { ctx => RoomActor.onStart(roomState, fromVersion, deps, ctx) apply(State(roomState), deps) } private def apply(state: State, deps: Deps): Behavior[ClientMsg] = Behaviors .receive[ClientMsg] { (ctx, msg) => import deps._ def receive: PartialFunction[ClientMsg, Behavior[ClientMsg]] = { case in: ClientIn => clientInReceive(state.site, deps, in) match { case None => Behaviors.same case Some(s) => apply(state.copy(site = s), deps) } case ClientCtrl.Broom(oldSeconds) => if (state.site.lastPing < oldSeconds) Behaviors.stopped else { keepAlive.team(state.room.id) Behaviors.same } case ctrl: ClientCtrl => socketControl(state.site, deps, ctrl) // default receive (site) case msg: ClientOutSite => val siteState = globalReceive(state.site, deps, ctx, msg) if (siteState == state.site) Behaviors.same else apply(state.copy(site = siteState), deps) case _ => Monitor.clientOutUnhandled("team").increment() Behaviors.same } RoomActor.receive(state.room, deps).lift(msg).fold(receive(msg)) { case (newState, emit) => emit foreach lilaIn.team newState.fold(Behaviors.same[ClientMsg]) { roomState => apply(state.copy(room = roomState), deps) } } } .receiveSignal { case (ctx, PostStop) => onStop(state.site, deps, ctx) RoomActor.onStop(state.room, deps, ctx) Behaviors.same } }
Example 55
Source File: ClusterStatusTrackerMain.scala From Pi-Akka-Cluster with Apache License 2.0 | 5 votes |
package akkapi.cluster import akka.NotUsed import akka.actor.typed.scaladsl.adapter.TypedActorSystemOps import akka.actor.typed.scaladsl.{ActorContext, Behaviors} import akka.actor.typed.{ActorSystem, Behavior, Terminated} import akka.management.scaladsl.AkkaManagement object Main { def apply(settings: Settings): Behavior[NotUsed] = Behaviors.setup { context => val ledStripDriver = context.spawn(LedStripDriver(settings), "led-strip-driver") val ledStripController = context.spawn(LedStripVisualiser(settings, ledStripDriver), "led-strip-controller") val clusterStatusTracker = context.spawn( ClusterStatusTracker( settings, Some(contextToClusterSingleton(settings)) ), "cluster-status-tracker" ) clusterStatusTracker ! ClusterStatusTracker.SubscribeVisualiser(ledStripController) Behaviors.receiveSignal { case (_, Terminated(_)) => Behaviors.stopped } } private def contextToClusterSingleton(settings: Settings): ActorContextToSingletonBehavior = (context: ActorContext[ClusterStatusTracker.ClusterEvent]) => PiClusterSingleton(settings, context.self) type ActorContextToSingletonBehavior = ActorContext[ClusterStatusTracker.ClusterEvent] => Behavior[PiClusterSingleton.Command] } object ClusterStatusTrackerMain { def main(args: Array[String]): Unit = { System.loadLibrary("rpi_ws281x") val settings = Settings() val config = settings.config val system = ActorSystem[NotUsed](Main(settings), settings.actorSystemName, config) // Start Akka HTTP Management extension AkkaManagement(system.toClassic).start() } }
Example 56
Source File: ClusterStatusTrackerMain.scala From Pi-Akka-Cluster with Apache License 2.0 | 5 votes |
package akkapi.cluster import akka.NotUsed import akka.actor.typed.scaladsl.adapter.TypedActorSystemOps import akka.actor.typed.scaladsl.{ActorContext, Behaviors} import akka.actor.typed.{ActorSystem, Behavior, Terminated} import akka.management.scaladsl.AkkaManagement object Main { def apply(settings: Settings): Behavior[NotUsed] = Behaviors.setup { context => val ledStripDriver = context.spawn(LedStripDriver(settings), "led-strip-driver") val ledStripController = context.spawn(LedStripVisualiser(settings, ledStripDriver), "led-strip-controller") val clusterStatusTracker = context.spawn( ClusterStatusTracker( settings, Some(contextToClusterSingleton(settings)) ), "cluster-status-tracker" ) clusterStatusTracker ! ClusterStatusTracker.SubscribeVisualiser(ledStripController) Behaviors.receiveSignal { case (_, Terminated(_)) => Behaviors.stopped } } private def contextToClusterSingleton(settings: Settings): ActorContextToSingletonBehavior = (context: ActorContext[ClusterStatusTracker.ClusterEvent]) => PiClusterSingleton(settings, context.self) type ActorContextToSingletonBehavior = ActorContext[ClusterStatusTracker.ClusterEvent] => Behavior[PiClusterSingleton.Command] } object ClusterStatusTrackerMain { def main(args: Array[String]): Unit = { System.loadLibrary("rpi_ws281x") val settings = Settings() val config = settings.config val system = ActorSystem[NotUsed](Main(settings), settings.actorSystemName, config) // Start Akka HTTP Management extension AkkaManagement(system.toClassic).start() } }
Example 57
Source File: PiClusterSingleton.scala From Pi-Akka-Cluster with Apache License 2.0 | 5 votes |
package akkapi.cluster import akka.actor.typed.scaladsl.{ActorContext, Behaviors} import akka.actor.typed.{ActorRef, Behavior, PostStop} object PiClusterSingleton { sealed trait Command final case object Ping extends Command def apply(settings: Settings, clusterStatusTracker: ActorRef[ClusterStatusTracker.ClusterEvent]): Behavior[Command] = { Behaviors.setup { context => new PiClusterSingleton(context, settings, clusterStatusTracker).run() } } } class PiClusterSingleton private (context: ActorContext[PiClusterSingleton.Command], settings: Settings, clusterStatusTracker: ActorRef[ClusterStatusTracker.ClusterEvent]) { import PiClusterSingleton._ // Cluster singleton has been started on this node clusterStatusTracker ! ClusterStatusTracker.PiClusterSingletonOnNode def run(): Behavior[Command] = Behaviors.receiveMessage[Command] { case Ping => context.log.info(s"PiClusterSingleton was pinged") Behaviors.same }.receiveSignal { case (_, signal) if signal == PostStop => clusterStatusTracker ! ClusterStatusTracker.PiClusterSingletonNotOnNode Behaviors.same } }
Example 58
Source File: ClusterStatusTrackerMain.scala From Pi-Akka-Cluster with Apache License 2.0 | 5 votes |
package akkapi.cluster import akka.NotUsed import akka.actor.typed.scaladsl.adapter.TypedActorSystemOps import akka.actor.typed.scaladsl.{ActorContext, Behaviors} import akka.actor.typed.{ActorSystem, Behavior, Terminated} import akka.management.scaladsl.AkkaManagement object Main { def apply(settings: Settings): Behavior[NotUsed] = Behaviors.setup { context => val ledStripDriver = context.spawn(LedStripDriver(settings), "led-strip-driver") val ledStripController = context.spawn(LedStripVisualiser(settings, ledStripDriver), "led-strip-controller") val clusterStatusTracker = context.spawn( ClusterStatusTracker( settings, Some(contextToClusterSingleton(settings)) ), "cluster-status-tracker" ) clusterStatusTracker ! ClusterStatusTracker.SubscribeVisualiser(ledStripController) Behaviors.receiveSignal { case (_, Terminated(_)) => Behaviors.stopped } } private def contextToClusterSingleton(settings: Settings): ActorContextToSingletonBehavior = (context: ActorContext[ClusterStatusTracker.ClusterEvent]) => PiClusterSingleton(settings, context.self) type ActorContextToSingletonBehavior = ActorContext[ClusterStatusTracker.ClusterEvent] => Behavior[PiClusterSingleton.Command] } object ClusterStatusTrackerMain { def main(args: Array[String]): Unit = { System.loadLibrary("rpi_ws281x") val settings = Settings() val config = settings.config val system = ActorSystem[NotUsed](Main(settings), settings.actorSystemName, config) // Start Akka HTTP Management extension AkkaManagement(system.toClassic).start() } }
Example 59
Source File: PiClusterSingleton.scala From Pi-Akka-Cluster with Apache License 2.0 | 5 votes |
package akkapi.cluster import akka.actor.typed.scaladsl.{ActorContext, Behaviors} import akka.actor.typed.{ActorRef, Behavior, PostStop} object PiClusterSingleton { sealed trait Command final case object Ping extends Command def apply(settings: Settings, clusterStatusTracker: ActorRef[ClusterStatusTracker.ClusterEvent]): Behavior[Command] = { Behaviors.setup { context => new PiClusterSingleton(context, settings, clusterStatusTracker).run() } } } class PiClusterSingleton private (context: ActorContext[PiClusterSingleton.Command], settings: Settings, clusterStatusTracker: ActorRef[ClusterStatusTracker.ClusterEvent]) { import PiClusterSingleton._ // Cluster singleton has been started on this node clusterStatusTracker ! ClusterStatusTracker.PiClusterSingletonOnNode def run(): Behavior[Command] = Behaviors.receiveMessage[Command] { case Ping => context.log.info(s"PiClusterSingleton was pinged") Behaviors.same }.receiveSignal { case (_, signal) if signal == PostStop => clusterStatusTracker ! ClusterStatusTracker.PiClusterSingletonNotOnNode Behaviors.same } }
Example 60
Source File: ClusterStatusTrackerMain.scala From Pi-Akka-Cluster with Apache License 2.0 | 5 votes |
package akkapi.cluster import akka.NotUsed import akka.actor.typed.scaladsl.adapter.TypedActorSystemOps import akka.actor.typed.scaladsl.{ActorContext, Behaviors} import akka.actor.typed.{ActorSystem, Behavior, Terminated} import akka.management.cluster.bootstrap.ClusterBootstrap import akka.management.scaladsl.AkkaManagement object Main { def apply(settings: Settings): Behavior[NotUsed] = Behaviors.setup { context => val ledStripDriver = context.spawn(LedStripDriver(settings), "led-strip-driver") val ledStripController = context.spawn(LedStripVisualiser(settings, ledStripDriver), "led-strip-controller") val clusterStatusTracker = context.spawn( ClusterStatusTracker( settings, Some(contextToClusterSingleton(settings)) ), "cluster-status-tracker" ) clusterStatusTracker ! ClusterStatusTracker.SubscribeVisualiser(ledStripController) Behaviors.receiveSignal { case (_, Terminated(_)) => Behaviors.stopped } } private def contextToClusterSingleton(settings: Settings): ActorContextToSingletonBehavior = (context: ActorContext[ClusterStatusTracker.ClusterEvent]) => PiClusterSingleton(settings, context.self) type ActorContextToSingletonBehavior = ActorContext[ClusterStatusTracker.ClusterEvent] => Behavior[PiClusterSingleton.Command] } object ClusterStatusTrackerMain { def main(args: Array[String]): Unit = { System.loadLibrary("rpi_ws281x") val settings = Settings() val config = settings.config val system = ActorSystem[NotUsed](Main(settings), settings.actorSystemName, config) // Start Akka HTTP Management extension AkkaManagement(system.toClassic).start() ClusterBootstrap(system.toClassic).start() } }
Example 61
Source File: PiClusterSingleton.scala From Pi-Akka-Cluster with Apache License 2.0 | 5 votes |
package akkapi.cluster import akka.actor.typed.scaladsl.{ActorContext, Behaviors} import akka.actor.typed.{ActorRef, Behavior, PostStop} object PiClusterSingleton { sealed trait Command final case object Ping extends Command def apply(settings: Settings, clusterStatusTracker: ActorRef[ClusterStatusTracker.ClusterEvent]): Behavior[Command] = { Behaviors.setup { context => new PiClusterSingleton(context, settings, clusterStatusTracker).run() } } } class PiClusterSingleton private (context: ActorContext[PiClusterSingleton.Command], settings: Settings, clusterStatusTracker: ActorRef[ClusterStatusTracker.ClusterEvent]) { import PiClusterSingleton._ // Cluster singleton has been started on this node clusterStatusTracker ! ClusterStatusTracker.PiClusterSingletonOnNode def run(): Behavior[Command] = Behaviors.receiveMessage[Command] { case Ping => context.log.info(s"PiClusterSingleton was pinged") Behaviors.same }.receiveSignal { case (_, signal) if signal == PostStop => clusterStatusTracker ! ClusterStatusTracker.PiClusterSingletonNotOnNode Behaviors.same } }
Example 62
Source File: ClusterStatusTrackerMain.scala From Pi-Akka-Cluster with Apache License 2.0 | 5 votes |
package akkapi.cluster import akka.NotUsed import akka.actor.typed.scaladsl.adapter.TypedActorSystemOps import akka.actor.typed.scaladsl.{ActorContext, Behaviors} import akka.actor.typed.{ActorSystem, Behavior, Terminated} import akka.management.scaladsl.AkkaManagement object Main { def apply(settings: Settings): Behavior[NotUsed] = Behaviors.setup { context => val ledStripDriver = context.spawn(LedStripDriver(settings), "led-strip-driver") val ledStripController = context.spawn(LedStripVisualiser(settings, ledStripDriver), "led-strip-controller") val clusterStatusTracker = context.spawn( ClusterStatusTracker( settings, Some(contextToClusterSingleton(settings)) ), "cluster-status-tracker" ) clusterStatusTracker ! ClusterStatusTracker.SubscribeVisualiser(ledStripController) Behaviors.receiveSignal { case (_, Terminated(_)) => Behaviors.stopped } } private def contextToClusterSingleton(settings: Settings): ActorContextToSingletonBehavior = (context: ActorContext[ClusterStatusTracker.ClusterEvent]) => PiClusterSingleton(settings, context.self) type ActorContextToSingletonBehavior = ActorContext[ClusterStatusTracker.ClusterEvent] => Behavior[PiClusterSingleton.Command] } object ClusterStatusTrackerMain { def main(args: Array[String]): Unit = { System.loadLibrary("rpi_ws281x") val settings = Settings() val config = settings.config val system = ActorSystem[NotUsed](Main(settings), settings.actorSystemName, config) // Start Akka HTTP Management extension AkkaManagement(system.toClassic).start() } }
Example 63
Source File: PiClusterSingleton.scala From Pi-Akka-Cluster with Apache License 2.0 | 5 votes |
package akkapi.cluster import akka.actor.typed.scaladsl.{ActorContext, Behaviors} import akka.actor.typed.{ActorRef, Behavior, PostStop} object PiClusterSingleton { sealed trait Command final case object Ping extends Command def apply(settings: Settings, clusterStatusTracker: ActorRef[ClusterStatusTracker.ClusterEvent]): Behavior[Command] = { Behaviors.setup { context => new PiClusterSingleton(context, settings, clusterStatusTracker).run() } } } class PiClusterSingleton private (context: ActorContext[PiClusterSingleton.Command], settings: Settings, clusterStatusTracker: ActorRef[ClusterStatusTracker.ClusterEvent]) { import PiClusterSingleton._ // Cluster singleton has been started on this node clusterStatusTracker ! ClusterStatusTracker.PiClusterSingletonOnNode def run(): Behavior[Command] = Behaviors.receiveMessage[Command] { case Ping => context.log.info(s"PiClusterSingleton was pinged") Behaviors.same }.receiveSignal { case (_, signal) if signal == PostStop => clusterStatusTracker ! ClusterStatusTracker.PiClusterSingletonNotOnNode Behaviors.same } }
Example 64
Source File: TriggerRunner.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.lf.engine.trigger import akka.actor.typed.{Behavior, PostStop} import akka.actor.typed.scaladsl.AbstractBehavior import akka.actor.typed.SupervisorStrategy._ import akka.actor.typed.Signal import akka.actor.typed.scaladsl.Behaviors import akka.actor.typed.scaladsl.ActorContext import akka.stream.Materializer import com.typesafe.scalalogging.StrictLogging import com.daml.grpc.adapter.ExecutionSequencerFactory class InitializationHalted(s: String) extends Exception(s) {} class InitializationException(s: String) extends Exception(s) {} object TriggerRunner { type Config = TriggerRunnerImpl.Config trait Message final case object Stop extends Message def apply(config: Config, name: String)( implicit esf: ExecutionSequencerFactory, mat: Materializer): Behavior[TriggerRunner.Message] = Behaviors.setup(ctx => new TriggerRunner(ctx, config, name)) } class TriggerRunner( ctx: ActorContext[TriggerRunner.Message], config: TriggerRunner.Config, name: String)(implicit esf: ExecutionSequencerFactory, mat: Materializer) extends AbstractBehavior[TriggerRunner.Message](ctx) with StrictLogging { import TriggerRunner.{Message, Stop} // Spawn a trigger runner impl. Supervise it. Stop immediately on // initialization halted exceptions, retry any initialization or // execution failure exceptions. private val child = ctx.spawn( Behaviors .supervise( Behaviors .supervise(TriggerRunnerImpl(config)) .onFailure[InitializationHalted](stop) ) .onFailure( restartWithBackoff( config.restartConfig.minRestartInterval, config.restartConfig.maxRestartInterval, config.restartConfig.restartIntervalRandomFactor)), name ) override def onMessage(msg: Message): Behavior[Message] = Behaviors.receiveMessagePartial[Message] { case Stop => Behaviors.stopped // Automatically stops the child actor if running. } override def onSignal: PartialFunction[Signal, Behavior[Message]] = { case PostStop => logger.info(s"Trigger $name stopped") this } }
Example 65
Source File: PiClusterSingleton.scala From Pi-Akka-Cluster with Apache License 2.0 | 5 votes |
package akkapi.cluster import akka.actor.typed.scaladsl.{ActorContext, Behaviors} import akka.actor.typed.{ActorRef, Behavior, PostStop} object PiClusterSingleton { sealed trait Command final case object Ping extends Command def apply(settings: Settings, clusterStatusTracker: ActorRef[ClusterStatusTracker.ClusterEvent]): Behavior[Command] = { Behaviors.setup { context => new PiClusterSingleton(context, settings, clusterStatusTracker).run() } } } class PiClusterSingleton private (context: ActorContext[PiClusterSingleton.Command], settings: Settings, clusterStatusTracker: ActorRef[ClusterStatusTracker.ClusterEvent]) { import PiClusterSingleton._ // Cluster singleton has been started on this node clusterStatusTracker ! ClusterStatusTracker.PiClusterSingletonOnNode def run(): Behavior[Command] = Behaviors.receiveMessage[Command] { case Ping => context.log.info(s"PiClusterSingleton was pinged") Behaviors.same }.receiveSignal { case (_, signal) if signal == PostStop => clusterStatusTracker ! ClusterStatusTracker.PiClusterSingletonNotOnNode Behaviors.same } }
Example 66
Source File: ClusterStatusTrackerMain.scala From Pi-Akka-Cluster with Apache License 2.0 | 5 votes |
package akkapi.cluster import akka.NotUsed import akka.actor.typed.scaladsl.adapter.TypedActorSystemOps import akka.actor.typed.scaladsl.{ActorContext, Behaviors} import akka.actor.typed.{ActorSystem, Behavior, Terminated} import akka.management.scaladsl.AkkaManagement object Main { def apply(settings: Settings): Behavior[NotUsed] = Behaviors.setup { context => val ledStripDriver = context.spawn(LedStripDriver(settings), "led-strip-driver") val ledStripController = context.spawn(LedStripVisualiser(settings, ledStripDriver), "led-strip-controller") val clusterStatusTracker = context.spawn( ClusterStatusTracker( settings, Some(contextToClusterSingleton(settings)) ), "cluster-status-tracker" ) clusterStatusTracker ! ClusterStatusTracker.SubscribeVisualiser(ledStripController) Behaviors.receiveSignal { case (_, Terminated(_)) => Behaviors.stopped } } private def contextToClusterSingleton(settings: Settings): ActorContextToSingletonBehavior = (context: ActorContext[ClusterStatusTracker.ClusterEvent]) => PiClusterSingleton(settings, context.self) type ActorContextToSingletonBehavior = ActorContext[ClusterStatusTracker.ClusterEvent] => Behavior[PiClusterSingleton.Command] } object ClusterStatusTrackerMain { def main(args: Array[String]): Unit = { System.loadLibrary("rpi_ws281x") val settings = Settings() val config = settings.config val system = ActorSystem[NotUsed](Main(settings), settings.actorSystemName, config) // Start Akka HTTP Management extension AkkaManagement(system.toClassic).start() } }
Example 67
Source File: PiClusterSingleton.scala From Pi-Akka-Cluster with Apache License 2.0 | 5 votes |
package akkapi.cluster import akka.actor.typed.scaladsl.{ActorContext, Behaviors} import akka.actor.typed.{ActorRef, Behavior, PostStop} object PiClusterSingleton { sealed trait Command final case object Ping extends Command def apply(settings: Settings, clusterStatusTracker: ActorRef[ClusterStatusTracker.ClusterEvent]): Behavior[Command] = { Behaviors.setup { context => new PiClusterSingleton(context, settings, clusterStatusTracker).run() } } } class PiClusterSingleton private (context: ActorContext[PiClusterSingleton.Command], settings: Settings, clusterStatusTracker: ActorRef[ClusterStatusTracker.ClusterEvent]) { import PiClusterSingleton._ // Cluster singleton has been started on this node clusterStatusTracker ! ClusterStatusTracker.PiClusterSingletonOnNode def run(): Behavior[Command] = Behaviors.receiveMessage[Command] { case Ping => context.log.info(s"PiClusterSingleton was pinged") Behaviors.same }.receiveSignal { case (_, signal) if signal == PostStop => clusterStatusTracker ! ClusterStatusTracker.PiClusterSingletonNotOnNode Behaviors.same } }
Example 68
Source File: DistributedDataTracker.scala From Pi-Akka-Cluster with Apache License 2.0 | 5 votes |
package com.lightbend.akka_oled import akka.actor.typed.scaladsl.Behaviors import akka.actor.typed.{ActorRef, Behavior} import akka.cluster.ddata.typed.scaladsl.{DistributedData, Replicator} import akka.cluster.ddata.{LWWMap, LWWMapKey, SelfUniqueAddress} import akkapi.cluster.OledDriver import akkapi.cluster.OledDriver.UpdateView object DistributedDataTracker { sealed trait Command case class UpdateStatus(name: String, status: String) extends Command case class Get(name: String, replyTo: ActorRef[String]) extends Command case class SubscribeResponse(rsp: Replicator.SubscribeResponse[LWWMap[String, String]]) extends Command case class InternalUpdateResponse(rsp: Replicator.UpdateResponse[LWWMap[String, String]]) extends Command private val NO_DATA = "No data" def apply(screenIndex: Int, key: LWWMapKey[String, String], oledDriver: ActorRef[OledDriver.Command]): Behavior[DistributedDataTracker.Command] = Behaviors.setup { context => oledDriver ! UpdateView(screenIndex, NO_DATA) implicit val node: SelfUniqueAddress = DistributedData(context.system).selfUniqueAddress DistributedData.withReplicatorMessageAdapter[Command, LWWMap[String, String]] { replicatorAdapter => replicatorAdapter.subscribe(key, SubscribeResponse.apply) def updated(cachedValue: Map[String, String]): Behavior[Command] = { Behaviors.receiveMessage[Command] { case UpdateStatus(name, status) => replicatorAdapter.askUpdate( askReplyTo => Replicator.Update(key, LWWMap.empty[String, String], Replicator.WriteLocal, askReplyTo)(_ :+ (name -> status)), InternalUpdateResponse.apply) val updatedValue = cachedValue + (name -> status) oledDriver ! UpdateView(screenIndex, renderState(updatedValue)) updated(updatedValue) case Get(name, replyTo) => replyTo ! cachedValue.getOrElse(name, "") Behaviors.same case InternalUpdateResponse(_) => Behaviors.same case SubscribeResponse([email protected](`key`)) => val value = chg.get(key).entries oledDriver ! UpdateView(screenIndex, renderState(value)) updated(value) } } updated(Map.empty[String, String]) } } private def renderState(cachedValue: Map[String, String]): String = { if (cachedValue.nonEmpty) cachedValue.map[String] { case (key, value) => key + ": " + value + " " }.mkString("\n") else NO_DATA } }
Example 69
Source File: Main.scala From Pi-Akka-Cluster with Apache License 2.0 | 5 votes |
package com.lightbend.akka_oled import akka.NotUsed import akka.actor.typed.scaladsl.Behaviors import akka.actor.typed.scaladsl.adapter.TypedActorSystemOps import akka.actor.typed.{ActorSystem, Behavior, Terminated} import akka.cluster.ddata.LWWMapKey import akka.http.scaladsl.Http import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport import akka.management.scaladsl.AkkaManagement import akka.stream.Materializer import akkapi.cluster.{ClusterStatusTracker, OledClusterVisualizer, OledDriver, Settings} import spray.json.DefaultJsonProtocol object Main extends SprayJsonSupport with DefaultJsonProtocol { case class NodeStatus(status: String) implicit val transactionFormat = jsonFormat1(NodeStatus) def apply(settings: Settings): Behavior[NotUsed] = Behaviors.setup { ctx => val oledDriver = ctx.spawn(OledDriver(settings), "oled-driver") oledDriver ! OledDriver.RegisterView("Cluster State", 0) oledDriver ! OledDriver.RegisterView("Distributed Data State", 1) val clusterView = ctx.spawn(OledClusterVisualizer(0, settings, oledDriver), "oled-cluster-view") val clusterStatusTracker = ctx.spawn(ClusterStatusTracker(settings, None), "cluster-status-tracker") clusterStatusTracker ! ClusterStatusTracker.SubscribeVisualiser(clusterView) val ddataTracker = ctx.spawn( DistributedDataTracker(1, LWWMapKey[String, String]("cache"), oledDriver), "oled-ddata-view") val routes = new Routes(ddataTracker)(ctx.system) implicit val untypedSystem: akka.actor.ActorSystem = ctx.system.toClassic implicit val mat: Materializer = Materializer.createMaterializer(ctx.system.toClassic) Http()(ctx.system.toClassic).bindAndHandle(routes.route, settings.config.getString("cluster-node-configuration.external-ip"), 8080) Behaviors.receiveSignal { case (_, Terminated(_)) => Behaviors.stopped } } } object DisplayDistributedDataMain { def main(args: Array[String]): Unit = { val settings = Settings() val system = ActorSystem[NotUsed](Main(settings), "akka-oled", settings.config) // Start Akka HTTP Management extension AkkaManagement(system).start() } }
Example 70
Source File: ClusterStatusTrackerMain.scala From Pi-Akka-Cluster with Apache License 2.0 | 5 votes |
package akkapi.cluster import akka.NotUsed import akka.actor.typed.scaladsl.Behaviors import akka.actor.typed.scaladsl.adapter.TypedActorSystemOps import akka.actor.typed.{ActorSystem, Behavior, Terminated} import akka.management.scaladsl.AkkaManagement object Main { def apply(settings: Settings): Behavior[NotUsed] = Behaviors.setup { context => val ledStripDriver = context.spawn(LedStripDriver(settings), "led-strip-driver") val ledStripController = context.spawn(LedStripVisualiser(settings, ledStripDriver), "led-strip-controller") val clusterStatusTracker = context.spawn(ClusterStatusTracker(settings, None), "cluster-status-tracker") clusterStatusTracker ! ClusterStatusTracker.SubscribeVisualiser(ledStripController) Behaviors.receiveSignal { case (_, Terminated(_)) => Behaviors.stopped } } } object ClusterStatusTrackerMain { def main(args: Array[String]): Unit = { System.loadLibrary("rpi_ws281x") val settings = Settings() val config = settings.config val system = ActorSystem[NotUsed](Main(settings), settings.actorSystemName, config) // Start Akka HTTP Management extension AkkaManagement(system.toClassic).start() } }
Example 71
Source File: Shop.scala From Learn-Scala-Programming with MIT License | 5 votes |
package ch12 import akka.actor.typed.{ActorRef, ActorSystem, Behavior} import akka.actor.typed.receptionist.{Receptionist, ServiceKey} import akka.actor.typed.scaladsl.{ActorContext, Behaviors} import akka.actor.typed.receptionist.Receptionist._ import ch12.Bakery.Groceries import ch12.Manager.ReceiveGroceries import ch12.Shop.seller import com.typesafe.config.ConfigFactory object Store extends App { val config = ConfigFactory.load("grocery.conf") val system = ActorSystem(seller(Shop.systemReceptionist), "Typed-Bakery", config) } object Shop { final case class ShoppingList(eggs: Int, flour: Int, sugar: Int, chocolate: Int) final case class SellByList(list: ShoppingList, toWhom: ActorRef[Manager.Command]) val SellerKey = ServiceKey[SellByList]("GrocerySeller") type ReceptionistFactory = ActorContext[SellByList] => ActorRef[Receptionist.Command] val systemReceptionist: ReceptionistFactory = _.system.receptionist def seller(receptionist: ReceptionistFactory): Behavior[SellByList] = Behaviors.setup { ctx ⇒ receptionist(ctx) ! Register(SellerKey, ctx.self) Behaviors.receiveMessage[SellByList] { case SellByList(list, toWhom) ⇒ import list._ toWhom ! ReceiveGroceries(Groceries(eggs, flour, sugar, chocolate)) Behaviors.same } } }