akka.actor.typed.ActorSystem Scala Examples

The following examples show how to use akka.actor.typed.ActorSystem. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: MassSettings.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package mass

import akka.actor.Address
import akka.actor.typed.ActorSystem
import com.typesafe.config.Config
import com.typesafe.scalalogging.StrictLogging
import helloscala.common.Configuration
import mass.core.Constants._

final class Compiles(c: Configuration) {
  def scala213Home: String = c.getString("scala213")
  def scala212Home: String = c.getString("scala212")
  def scala211Home: String = c.getString("scala211")
}

final class MassSettings private (val configuration: Configuration) extends StrictLogging {
  val compiles = new Compiles(configuration.getConfiguration(s"$BASE_CONF.core.compiles"))

  def clusterName: String = configuration.getString(BASE_CONF + ".cluster.name")

  def clusterProtocol: String = configuration.getString(BASE_CONF + ".cluster.protocol")

  def clusterSeeds: List[Address] =
    configuration
      .get[Seq[String]](BASE_CONF + ".cluster.seeds")
      .map { seed =>
        val Array(host, port) = seed.split(':')
        Address(clusterProtocol, clusterName, host, port.toInt)
      }
      .toList
}

object MassSettings {
  def apply(configuration: Configuration): MassSettings = new MassSettings(configuration)
  def apply(config: Config): MassSettings = apply(Configuration(config))
  def apply(system: ActorSystem[_]): MassSettings = apply(system.settings.config)
} 
Example 2
Source File: PingPong.scala    From effpi   with MIT License 5 votes vote down vote up
// Effpi - verified message-passing programs in Dotty
// Copyright 2019 Alceste Scalas and Elias Benussi
// Released under the MIT License: https://opensource.org/licenses/MIT
package effpi.benchmarks.akka

import akka.NotUsed
import akka.actor.typed.scaladsl.{ Behaviors, MutableBehavior, ActorContext}
import akka.actor.typed.{ ActorRef, ActorSystem, Behavior, DispatcherSelector, Terminated }
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.concurrent.{ Future, Promise, Await }
import scala.concurrent.ExecutionContext.Implicits.global

object PingPong {

  final case class Ping(iterations: Int, replyTo: ActorRef[Pong])

  case class Pong(iterations: Int, pingTo: ActorRef[Ping])

  val pong = Behaviors.receive[Ping] { (ctx, msg) =>
    msg.replyTo ! Pong(msg.iterations - 1, ctx.self)
    Behaviors.same
  }

  def ping(startTimePromise: Promise[Long], endTimePromise: Promise[Long], expectedIterations: Int) = Behaviors.receive[Pong] { (ctx, pong) =>
    if (pong.iterations == 0) {
      endTimePromise.success(System.nanoTime())
      Behaviors.stopped
    } else {
      if (expectedIterations == pong.iterations) {
        startTimePromise.success(System.nanoTime())
      }
      pong.pingTo ! Ping(pong.iterations, ctx.self)
      Behaviors.same
    }
  }

  def mainActor(
    durationPromise: Promise[Long],
    numPairs: Int,
    numIterations: Int
  ): Behavior[akka.NotUsed] =
    Behaviors.setup { ctx =>

      val (startTimePromises, startTimeFutures): (List[Promise[Long]], List[Future[Long]]) = (1 to numPairs).toList.map { _ =>
        val startTimePromise = Promise[Long]()
        val startTimeFuture = startTimePromise.future

        (startTimePromise, startTimeFuture)
      }.unzip

      val (endTimePromises, endTimeFutures): (List[Promise[Long]], List[Future[Long]]) = (1 to numPairs).toList.map { _ =>
        val endTimePromise = Promise[Long]()
        val endTimeFuture = endTimePromise.future

        (endTimePromise, endTimeFuture)
      }.unzip

      // val refs = (1 to numPairs).toList.map { id =>
      val refs = startTimePromises.zip(endTimePromises).zipWithIndex.map { (promises, id) =>
        val (sPromise, ePromise) = promises
        val pongRef = ctx.spawn(pong, "pong" + id)
        val pingRef = ctx.spawn(ping(sPromise, ePromise, numIterations), "ping" + id)
        ctx.watch(pingRef)
        (pingRef, pongRef)
      }
      refs.foreach { (pingRef, pongRef) => pingRef ! Pong(numIterations, pongRef) }

      val startTimes = Await.result(Future.sequence(startTimeFutures), Duration.Inf)
      val startTime = startTimes.min
      val endTimes = Await.result(Future.sequence(endTimeFutures), Duration.Inf)
      val endTime = endTimes.max
      durationPromise.success(endTime - startTime)
      val pingPongDuration = endTime - startTime

      var terminatedProcesses = 0
      Behaviors.receiveSignal {
        case (_, Terminated(ref)) =>
          terminatedProcesses = terminatedProcesses + 1
          if (terminatedProcesses == numPairs) {
            Behaviors.stopped
          } else {
            Behaviors.same
          }
          Behaviors.stopped
        case (_, _) =>
          Behaviors.empty
      }
    }

  def bench(params: (Int, Int)): Long = {
    val (numPairs, numIterations) = params
    val durationPromise = Promise[Long]()
    val durationFuture = durationPromise.future
    val system = ActorSystem(
      mainActor(durationPromise, numPairs, numIterations), "PingPongDemo")
    Await.result(system.whenTerminated, Duration.Inf)
    val duration = Await.result(durationFuture, Duration.Inf)
    duration
  }
} 
Example 3
Source File: CountingActor.scala    From effpi   with MIT License 5 votes vote down vote up
// Effpi - verified message-passing programs in Dotty
// Copyright 2019 Alceste Scalas and Elias Benussi
// Released under the MIT License: https://opensource.org/licenses/MIT
package effpi.benchmarks.akka

import akka.NotUsed
import akka.actor.typed.scaladsl.{ Behaviors, MutableBehavior, ActorContext}
import akka.actor.typed.{ ActorRef, ActorSystem, Behavior, DispatcherSelector, Terminated }

import scala.concurrent.Future
import scala.concurrent.duration._
import scala.concurrent.{ Future, Promise, Await }
import scala.concurrent.ExecutionContext.Implicits.global

object CountingActor {

  sealed trait CounterAction
  object CounterAction {
    final case class Add(num: Int, p: Promise[Int]) extends CounterAction
    final case class Cheque(replyTo: ActorRef[Sum]) extends CounterAction
  }

  case class Sum(sum: Int)

  val counter = Behaviors.setup[CounterAction] { ctx =>
    new MutableCounter(ctx)
  }

  class MutableCounter(
    ctx: ActorContext[CounterAction]
  ) extends MutableBehavior[CounterAction] {
    var counter = 0

    override def onMessage(msg: CounterAction): Behavior[CounterAction] = {
      msg match {
        case CounterAction.Add(num, p) =>
          counter += 1
          p.success(num)
          Behaviors.same
        case CounterAction.Cheque(replyTo) =>
          replyTo ! Sum(counter)
          Behaviors.stopped
      }
    }
  }

  def sink(endTimePromise: Promise[Long]) = Behaviors.receive[Sum] { (ctx, msg) =>
    endTimePromise.success(System.nanoTime())
    Behaviors.stopped
  }

  def mainActor(
    durationPromise: Promise[Long],
    numMessages: Int
  ): Behavior[akka.NotUsed] =
    Behaviors.setup { ctx =>

      val endTimePromise = Promise[Long]()
      val endTimeFuture = endTimePromise.future

      val sinkRef = ctx.spawn(sink(endTimePromise), "sink")
      ctx.watch(sinkRef)
      val counterRef = ctx.spawn(counter, "counter")

      val startTime = System.nanoTime()
      val futs = (1 to numMessages).toList.map { num =>
        val p = Promise[Int]()
        val f = p.future
        counterRef ! CounterAction.Add(num, p)
        f
      }

      Await.result(Future.sequence(futs), Duration.Inf)

      counterRef ! CounterAction.Cheque(sinkRef)

      val endTime = Await.result(endTimeFuture, Duration.Inf)
      val countingDuration = endTime - startTime
      durationPromise.success(countingDuration)

      Behaviors.receiveSignal {
        case (_, Terminated(ref)) =>
          Behaviors.stopped
        case (_, _) =>
          Behaviors.empty
      }
    }

  def bench(params: Int): Long = {
    val durationPromise = Promise[Long]()
    val durationFuture = durationPromise.future
    val system = ActorSystem(
      mainActor(durationPromise, params), "CountingActorDemo")
    Await.result(system.whenTerminated, Duration.Inf)
    val duration = Await.result(durationFuture, Duration.Inf)
    duration
  }
} 
Example 4
Source File: ForkJoinThroughput.scala    From effpi   with MIT License 5 votes vote down vote up
// Effpi - verified message-passing programs in Dotty
// Copyright 2019 Alceste Scalas and Elias Benussi
// Released under the MIT License: https://opensource.org/licenses/MIT
package effpi.benchmarks.akka

import akka.NotUsed
import akka.actor.typed.scaladsl.{ Behaviors, MutableBehavior, ActorContext}
import akka.actor.typed.{ ActorRef, ActorSystem, Behavior, DispatcherSelector, Terminated }
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.concurrent.{ Future, Promise, Await }
import scala.concurrent.ExecutionContext.Implicits.global

object ForkJoinThroughput {

  case class Message(msg: String)

  def receiver(maxMsgs: Int) = Behaviors.setup[Message] { ctx =>
    new MutableSimpleActor(ctx, maxMsgs)
  }

  class MutableSimpleActor(
    ctx: ActorContext[Message],
    maxMsgs: Int
  ) extends MutableBehavior[Message] {
    var count = 0

    override def onMessage(msg: Message): Behavior[Message] = {
      count +=1
      if (count < maxMsgs) {
        Behaviors.same
      } else {
        Behaviors.stopped
      }
    }
  }

  def mainActor(
    durationPromise: Promise[Long],
    numActors: Int,
    numMessages: Int
  ): Behavior[akka.NotUsed] =
    Behaviors.setup { ctx =>

      val receiversRef = (1 to numActors).map{ id => ctx.spawn(receiver(numMessages), "receiver" + id)}

      val startTime = System.nanoTime()

      (1 to numMessages).foreach { n =>
        receiversRef.foreach { simpleActor =>
          simpleActor ! Message("Hello World!")
        }
      }

      val endTime = System.nanoTime()

      durationPromise.success(endTime - startTime)
      Behaviors.stopped
    }

  def bench(params: (Int, Int)): Long = {
    val (numActors, numMessages) = params
    val durationPromise = Promise[Long]()
    val durationFuture = durationPromise.future
    val system = ActorSystem(
      mainActor(durationPromise, numActors, numMessages),
      "ForkJoinCreationDemo")
    Await.result(system.whenTerminated, Duration.Inf)
    val duration = Await.result(durationFuture, Duration.Inf)
    duration
  }
} 
Example 5
Source File: Main.scala    From akka-persistence-cassandra   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.cassandra.example

import akka.actor.typed.{ ActorRef, ActorSystem }
import akka.actor.typed.scaladsl.Behaviors
import akka.cluster.sharding.typed.ShardingEnvelope
import akka.cluster.typed.{ Cluster, SelfUp, Subscribe }
import akka.management.cluster.bootstrap.ClusterBootstrap
import akka.management.scaladsl.AkkaManagement
import akka.persistence.cassandra.example.LoadGenerator.Start
import akka.actor.typed.scaladsl.LoggerOps
import akka.stream.alpakka.cassandra.scaladsl.CassandraSessionRegistry

import scala.concurrent.Await
import scala.concurrent.duration._

object Main {

  def main(args: Array[String]): Unit = {

    ActorSystem(Behaviors.setup[SelfUp] {
      ctx =>
        val readSettings = ReadSide.Settings(ctx.system.settings.config.getConfig("cassandra.example"))
        val writeSettings = ConfigurablePersistentActor.Settings(readSettings.nrTags)
        val loadSettings = LoadGenerator.Settings(ctx.system.settings.config.getConfig("cassandra.example"))

        AkkaManagement(ctx.system).start()
        ClusterBootstrap(ctx.system).start()
        val cluster = Cluster(ctx.system)
        cluster.subscriptions ! Subscribe(ctx.self, classOf[SelfUp])

        val topic = ReadSideTopic.init(ctx)

        if (cluster.selfMember.hasRole("read")) {
          val session = CassandraSessionRegistry(ctx.system).sessionFor("akka.persistence.cassandra")
          val offsetTableStmt =
            """
              CREATE TABLE IF NOT EXISTS akka.offsetStore (
                eventProcessorId text,
                tag text,
                timeUuidOffset timeuuid,
                PRIMARY KEY (eventProcessorId, tag)
              )
           """

          Await.ready(session.executeDDL(offsetTableStmt), 30.seconds)
        }

        Behaviors.receiveMessage {
          case SelfUp(state) =>
            ctx.log.infoN(
              "Cluster member joined. Initializing persistent actors. Roles {}. Members {}",
              cluster.selfMember.roles,
              state.members)
            val ref = ConfigurablePersistentActor.init(writeSettings, ctx.system)
            if (cluster.selfMember.hasRole("read")) {
              ctx.spawnAnonymous(Reporter(topic))
            }
            ReadSide(ctx.system, topic, readSettings)
            if (cluster.selfMember.hasRole("load")) {
              ctx.log.info("Starting load generation")
              val load = ctx.spawn(LoadGenerator(loadSettings, ref), "load-generator")
              load ! Start(10.seconds)
            }
            Behaviors.empty
        }
    }, "apc-example")
  }
} 
Example 6
Source File: ConfigurablePersistentActor.scala    From akka-persistence-cassandra   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.cassandra.example

import akka.actor.typed.scaladsl.Behaviors
import akka.actor.typed.{ ActorRef, ActorSystem, Behavior }
import akka.cluster.sharding.typed.ShardingEnvelope
import akka.cluster.sharding.typed.scaladsl.{ ClusterSharding, Entity, EntityTypeKey }
import akka.persistence.typed.PersistenceId
import akka.persistence.typed.scaladsl.{ Effect, EventSourcedBehavior }

object ConfigurablePersistentActor {

  case class Settings(nrTags: Int)

  val Key: EntityTypeKey[Event] = EntityTypeKey[Event]("configurable")

  def init(settings: Settings, system: ActorSystem[_]): ActorRef[ShardingEnvelope[Event]] = {
    ClusterSharding(system).init(Entity(Key)(ctx => apply(settings, ctx.entityId)).withRole("write"))
  }

  final case class Event(timeCreated: Long = System.currentTimeMillis()) extends CborSerializable

  final case class State(eventsProcessed: Long) extends CborSerializable

  def apply(settings: Settings, persistenceId: String): Behavior[Event] =
    Behaviors.setup { ctx =>
      EventSourcedBehavior[Event, Event, State](
        persistenceId = PersistenceId.ofUniqueId(persistenceId),
        State(0),
        (_, event) => {
          ctx.log.info("persisting event {}", event)
          Effect.persist(event)
        },
        (state, _) => state.copy(eventsProcessed = state.eventsProcessed + 1)).withTagger(event =>
        Set("tag-" + math.abs(event.hashCode() % settings.nrTags)))
    }

} 
Example 7
Source File: ReadSide.scala    From akka-persistence-cassandra   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.cassandra.example

import akka.actor.typed.pubsub.Topic
import akka.actor.typed.scaladsl.Behaviors
import akka.actor.typed.{ ActorRef, ActorSystem, Behavior, PostStop }
import akka.cluster.sharding.typed.{ ClusterShardingSettings, ShardedDaemonProcessSettings }
import akka.cluster.sharding.typed.scaladsl.ShardedDaemonProcess
import akka.stream.{ KillSwitches, SharedKillSwitch }
import com.typesafe.config.Config
import org.HdrHistogram.Histogram
import akka.actor.typed.scaladsl.LoggerOps
import scala.concurrent.duration._

object ReadSide {

  sealed trait Command
  private case object ReportMetrics extends Command

  object Settings {
    def apply(config: Config): Settings =
      Settings(config.getInt("processors"), config.getInt("tags-per-processor"))
  }

  case class Settings(nrProcessors: Int, tagsPerProcessor: Int) {
    val nrTags: Int = nrProcessors * tagsPerProcessor
  }

  def apply(
      system: ActorSystem[_],
      topic: ActorRef[Topic.Command[ReadSideTopic.ReadSideMetrics]],
      settings: Settings): Unit = {
    system.log.info("Running {} processors", settings.nrProcessors)
    val killSwitch: SharedKillSwitch = KillSwitches.shared("eventProcessorSwitch")
    ShardedDaemonProcess(system).init(
      "tag-processor",
      settings.nrProcessors - 1, // bug that creates +1 processor FIXME remove in 2.6.5
      i => behavior(topic, i, settings, killSwitch),
      ShardedDaemonProcessSettings(system).withShardingSettings(ClusterShardingSettings(system).withRole("read")),
      None)
  }

  private def behavior(
      topic: ActorRef[Topic.Command[ReadSideTopic.ReadSideMetrics]],
      nr: Int,
      settings: Settings,
      killSwitch: SharedKillSwitch): Behavior[Command] =
    Behaviors.withTimers { timers =>
      timers.startTimerAtFixedRate(ReportMetrics, 10.second)
      Behaviors.setup { ctx =>
        val start = (settings.tagsPerProcessor * nr)
        val end = start + (settings.tagsPerProcessor) - 1
        val tags = (start to end).map(i => s"tag-$i")
        ctx.log.info("Processor {} processing tags {}", nr, tags)
        // milliseconds, highest value = 1 minute
        val histogram = new Histogram(10 * 1000 * 60, 2)
        // maybe easier to just have these as different actors
        // my thinking is we can start with a large number of tags and scale out
        // read side processors later
        // having more tags will also increase write throughput/latency as it'll write to
        // many partitions
        // downside is running many streams/queries against c*
        tags.foreach(
          tag =>
            new EventProcessorStream[ConfigurablePersistentActor.Event](
              ctx.system,
              ctx.executionContext,
              s"processor-$nr",
              tag).runQueryStream(killSwitch, histogram))

        Behaviors
          .receiveMessage[Command] {
            case ReportMetrics =>
              if (histogram.getTotalCount > 0) {
                topic ! Topic.Publish(
                  ReadSideTopic.ReadSideMetrics(
                    histogram.getTotalCount,
                    histogram.getMaxValue,
                    histogram.getValueAtPercentile(99),
                    histogram.getValueAtPercentile(50)))
                histogram.reset()
              }
              Behaviors.same
          }
          .receiveSignal {
            case (_, PostStop) =>
              killSwitch.shutdown()
              Behaviors.same
          }
      }
    }

} 
Example 8
Source File: MainApp.scala    From kafka-lag-exporter   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.kafkalagexporter

import java.util.concurrent.Executors

import akka.actor.typed.ActorSystem
import com.typesafe.config.{Config, ConfigFactory}
import io.prometheus.client.CollectorRegistry
import io.prometheus.client.exporter.HTTPServer

import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContext}

object MainApp extends App {
  val system = start()

  // Add shutdown hook to respond to SIGTERM and gracefully shutdown the actor system
  sys.ShutdownHookThread {
    system ! KafkaClusterManager.Stop
    Await.result(system.whenTerminated, 10 seconds)
  }

  def start(config: Config = ConfigFactory.load()): ActorSystem[KafkaClusterManager.Message] = {
    // Cached thread pool for various Kafka calls for non-blocking I/O
    val kafkaClientEc = ExecutionContext.fromExecutor(Executors.newCachedThreadPool())

    val appConfig = AppConfig(config)

    val clientCreator = (cluster: KafkaCluster) =>
      KafkaClient(cluster, appConfig.clientGroupId, appConfig.clientTimeout)(kafkaClientEc)
    var endpointCreators : List[KafkaClusterManager.NamedCreator] = List()
    appConfig.prometheusConfig.foreach { prometheus =>
      val prometheusCreator = KafkaClusterManager.NamedCreator(
        "prometheus-lag-reporter", 
        (() => PrometheusEndpointSink(
          Metrics.definitions, appConfig.metricWhitelist, appConfig.clustersGlobalLabels(), new HTTPServer(prometheus.port), CollectorRegistry.defaultRegistry
        ))
      )
      endpointCreators = prometheusCreator :: endpointCreators
    }
    appConfig.graphiteConfig.foreach { _ =>
      val graphiteCreator = KafkaClusterManager.NamedCreator(
        "graphite-lag-reporter",
        (() => GraphiteEndpointSink(appConfig.metricWhitelist, appConfig.clustersGlobalLabels(), appConfig.graphiteConfig)))
      endpointCreators = graphiteCreator :: endpointCreators
    }
    ActorSystem(
      KafkaClusterManager.init(appConfig, endpointCreators, clientCreator), "kafka-lag-exporter")
  }
} 
Example 9
Source File: SpecBase.scala    From kafka-lag-exporter   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.kafkalagexporter.integration

import akka.actor.typed.ActorSystem
import akka.kafka.testkit.scaladsl.{EmbeddedKafkaLike, ScalatestKafkaSpec}
import com.lightbend.kafkalagexporter.MainApp
import com.lightbend.kafkalagexporter.KafkaClusterManager
import com.typesafe.config.{Config, ConfigFactory}
import net.manub.embeddedkafka.EmbeddedKafkaConfig
import org.scalatest.concurrent.{Eventually, ScalaFutures}
import org.scalatest.{BeforeAndAfterEach, Matchers, WordSpecLike}

import scala.concurrent.Await
import scala.concurrent.duration._

abstract class SpecBase(kafkaPort: Int, val exporterPort: Int)
  extends ScalatestKafkaSpec(kafkaPort)
    with WordSpecLike
    with BeforeAndAfterEach
    with EmbeddedKafkaLike
    with Matchers
    with ScalaFutures
    with Eventually
    with PrometheusUtils
    with LagSim {

  override def createKafkaConfig: EmbeddedKafkaConfig =
    EmbeddedKafkaConfig(kafkaPort,
      zooKeeperPort,
      Map(
        "offsets.topic.replication.factor" -> "1"
      ))

  var kafkaLagExporter: ActorSystem[KafkaClusterManager.Message] = _

  val clusterName = "default"

  val config: Config = ConfigFactory.parseString(s"""
                                            |kafka-lag-exporter {
                                            |  port: $exporterPort
                                            |  clusters = [
                                            |    {
                                            |      name: "$clusterName"
                                            |      bootstrap-brokers: "localhost:$kafkaPort"
                                            |    }
                                            |  ]
                                            |  poll-interval = 5 seconds
                                            |  lookup-table-size = 20
                                            |}""".stripMargin).withFallback(ConfigFactory.load())

  override def beforeEach(): Unit = {
    kafkaLagExporter = MainApp.start(config)
  }

  override def afterEach(): Unit = {
    kafkaLagExporter ! KafkaClusterManager.Stop
    Await.result(kafkaLagExporter.whenTerminated, 10 seconds)
  }
} 
Example 10
Source File: JobService.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package mass.job.service.job

import java.io.File
import java.nio.charset.StandardCharsets
import java.nio.file.{ Files, Path }

import akka.actor.typed.{ ActorRef, ActorSystem }
import akka.actor.typed.scaladsl.AskPattern._
import akka.http.scaladsl.server.directives.FileInfo
import akka.util.Timeout
import javax.inject.{ Inject, Singleton }
import mass.job.service.job.JobActor.CommandReply
import mass.message.job._

import scala.collection.immutable
import scala.concurrent.duration._
import scala.concurrent.{ ExecutionContext, Future }
import scala.reflect.ClassTag

@Singleton
class JobService @Inject() (implicit system: ActorSystem[_]) {
  implicit val timeout: Timeout = Timeout(10.seconds)
  val jobActor: ActorRef[JobActor.Command] = JobActor.init(system)

  def listOption(): Future[JobGetAllOptionResp] = askToJob[JobGetAllOptionResp](JobGetAllOptionReq())

  def uploadFiles(list: immutable.Seq[(FileInfo, File)])(implicit ec: ExecutionContext): Future[JobUploadFilesResp] = {
    askToJob[JobUploadFilesResp](JobUploadFilesReq(list)).andThen {
      case _ => list.foreach { case (_, file) => Files.deleteIfExists(file.toPath) }
    }
  }

  def uploadJobOnZip(fileInfo: FileInfo, file: Path)(implicit ec: ExecutionContext): Future[JobUploadJobResp] = {
    val req = JobUploadJobReq(
      file,
      fileInfo.fileName,
      fileInfo.contentType.charsetOption.map(_.nioCharset()).getOrElse(StandardCharsets.UTF_8))
    askToJob[JobUploadJobResp](req).andThen { case _ => Files.deleteIfExists(file) }
  }

  def updateTrigger(req: JobUpdateReq): Future[JobSchedulerResp] = askToJob[JobSchedulerResp](req)

  def page(req: JobPageReq): Future[JobPageResp] = askToJob[JobPageResp](req)

  def findItemByKey(key: String): Future[JobSchedulerResp] = askToJob[JobSchedulerResp](JobFindReq(key = key))

  def createJob(req: JobCreateReq): Future[JobCreateResp] = askToJob[JobCreateResp](req)

  def updateJob(req: JobUpdateReq): Future[JobSchedulerResp] = askToJob[JobSchedulerResp](req)

  @inline private def askToJob[RESP](req: JobMessage)(implicit tag: ClassTag[RESP]): Future[RESP] =
    jobActor.ask[JobResponse](replyTo => CommandReply(req, replyTo)).mapTo[RESP]
} 
Example 11
Source File: JobActor.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package mass.job.service.job

import akka.actor.typed.scaladsl.{ ActorContext, Behaviors }
import akka.actor.typed.{ ActorRef, ActorSystem, Behavior }
import akka.cluster.typed.{ ClusterSingleton, ClusterSingletonSettings, SingletonActor }
import fusion.inject.guice.GuiceApplication
import fusion.json.CborSerializable
import helloscala.common.IntStatus
import mass.core.Constants
import mass.job.JobScheduler
import mass.job.service.job.JobActor.CommandReply
import mass.message.job._

import scala.concurrent.Future

object JobActor {
  sealed trait Command extends CborSerializable
  final case class CommandReply(message: JobMessage, replyTo: ActorRef[JobResponse]) extends Command
  final case class CommandEvent(event: JobEvent) extends Command

  val NAME = "job"

  def init(system: ActorSystem[_]): ActorRef[Command] = {
    ClusterSingleton(system).init(
      SingletonActor(apply(), NAME).withSettings(ClusterSingletonSettings(system).withRole(Constants.Roles.CONSOLE)))
  }

  private def apply(): Behavior[Command] = Behaviors.setup[Command](context => new JobActor(context).init())
}

import mass.job.service.job.JobActor._
class JobActor private (context: ActorContext[Command]) extends JobServiceComponent {
  import context.executionContext
  override val jobScheduler: JobScheduler = GuiceApplication(context.system).instance[JobScheduler]

  def init(): Behavior[Command] = {
    receive()
  }

  def receive(): Behavior[Command] = Behaviors.receiveMessage[Command] {
    case CommandReply(message, replyTo) =>
      receiveMessage(message).foreach(resp => replyTo ! resp)
      Behaviors.same
    case CommandEvent(event) =>
      receiveEvent(event)
      Behaviors.same
  }

  private def receiveMessage(message: JobMessage): Future[JobResponse] =
    try {
      val future = message match {
        case req: JobScheduleReq     => handleScheduleJob(req)
        case req: JobPageReq         => handlePage(req)
        case req: JobFindReq         => handleFind(req)
        case req: JobUploadJobReq    => handleUploadJob(req)
        case req: JobListReq         => handleList(req)
        case req: JobGetAllOptionReq => Future(handleGetAllOption(req))
        case req: JobCreateReq       => handleCreateJob(req)
        case req: JobUpdateReq       => handleUpdate(req)
        case req: JobUploadFilesReq  => handleUploadFiles(req)
      }
      future.recover {
        case e =>
          val message = s"Handle message error: ${e.getMessage}."
          logger.error(message, e)
          JobErrorResponse(IntStatus.INTERNAL_ERROR, message)
      }
    } catch {
      case e: Throwable =>
        val message = s"Process message error: ${e.getMessage}."
        logger.error(message)
        Future.successful(JobErrorResponse(IntStatus.INTERNAL_ERROR, message))
    }

  private def receiveEvent(v: JobEvent): Unit =
    try {
      v match {
        case event: JobTriggerEvent => triggerJob(event)
      }
    } catch {
      case e: Throwable => logger.error(s"Process event error: ${e.getMessage}", e)
    }
} 
Example 12
Source File: ForkJoinCreation.scala    From effpi   with MIT License 5 votes vote down vote up
// Effpi - verified message-passing programs in Dotty
// Copyright 2019 Alceste Scalas and Elias Benussi
// Released under the MIT License: https://opensource.org/licenses/MIT
package effpi.benchmarks.akka

import akka.NotUsed
import akka.actor.typed.scaladsl.{ Behaviors, MutableBehavior, ActorContext}
import akka.actor.typed.{ ActorRef, ActorSystem, Behavior, DispatcherSelector, Terminated }

import scala.concurrent.Future
import scala.concurrent.duration._
import scala.concurrent.{ Future, Promise, Await }
import scala.concurrent.ExecutionContext.Implicits.global

object ForkJoinCreation {

  case class Message(msg: String)

  val simpleActor = Behaviors.receive[Message] { (ctx, msg) =>
    Behaviors.stopped
  }

  def mainActor(
    durationPromise: Promise[Long], numActors: Int
  ): Behavior[akka.NotUsed] =
    Behaviors.setup { ctx =>

      val startTime = System.nanoTime()

      val simpleActorRefs = (1 to numActors).toList.map { id =>
        ctx.spawn(simpleActor, "simple" + id)
      }

      simpleActorRefs.foreach { simpleActorRef =>
        simpleActorRef ! Message("Hello World!")
      }

      val endTime = System.nanoTime()

      durationPromise.success(endTime - startTime)
      Behaviors.stopped
    }

  def bench(params: Int): Long = {
    val durationPromise = Promise[Long]()
    val durationFuture = durationPromise.future
    val system = ActorSystem(
      mainActor(durationPromise, params), "ForkJoinCreationDemo")
    Await.result(system.whenTerminated, Duration.Inf)
    val duration = Await.result(durationFuture, Duration.Inf)
    duration
  }

} 
Example 13
Source File: Mass.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package mass

import akka.actor.typed.scaladsl.adapter._
import akka.actor.typed.{ ActorRef, ActorSystem, Behavior, Props }
import akka.{ actor => classic }
import com.typesafe.config.Config
import fusion.common.config.FusionConfigFactory
import fusion.common.{ ReceptionistFactory, SpawnFactory }
import fusion.core.extension.FusionCore
import helloscala.common.Configuration
import mass.core.Constants

import scala.concurrent.ExecutionContext

final class Mass private (val classicSystem: classic.ActorSystem) extends SpawnFactory with ReceptionistFactory {
  implicit def executionContext: ExecutionContext = classicSystem.dispatcher

  val configuration: Configuration = FusionCore(classicSystem).configuration

  override def typedSystem: ActorSystem[_] = classicSystem.toTyped

  override def spawn[T](behavior: Behavior[T], props: Props): ActorRef[T] =
    classicSystem.spawnAnonymous(behavior, props)

  override def spawn[T](behavior: Behavior[T], name: String, props: Props): ActorRef[T] =
    classicSystem.spawn(behavior, name, props)
}

object Mass {
  def fromMergedConfig(config: Config): Mass =
    fromActorSystem(classic.ActorSystem(Constants.MASS, config))

  private[mass] def fromActorSystem(system: classic.ActorSystem): Mass = new Mass(system)

  def fromConfig(originalConfig: Config): Mass = {
    val config = FusionConfigFactory.arrangeConfig(originalConfig, Constants.MASS, Seq("akka"))
    fromMergedConfig(config)
  }
} 
Example 14
Source File: MusicCommands.scala    From AckCord   with MIT License 5 votes vote down vote up
package ackcord.examplecore.music

import ackcord._
import ackcord.commands.{CommandBuilder, CommandController, NamedCommand, VoiceGuildMemberCommandMessage}
import ackcord.data.{GuildId, TextChannel}
import ackcord.examplecore.music.MusicHandler.{NextTrack, QueueUrl, StopMusic, TogglePause}
import akka.NotUsed
import akka.actor.typed.scaladsl.AskPattern._
import akka.actor.typed.{ActorRef, ActorSystem}
import akka.stream.scaladsl.{Flow, Keep, Sink}
import akka.stream.typed.scaladsl.ActorFlow
import akka.util.Timeout

class MusicCommands(requests: Requests, guildId: GuildId, musicHandler: ActorRef[MusicHandler.Command])(
    implicit timeout: Timeout,
    system: ActorSystem[Nothing]
) extends CommandController(requests) {

  val VoiceCommand: CommandBuilder[VoiceGuildMemberCommandMessage, NotUsed] =
    GuildVoiceCommand.andThen(CommandBuilder.inOneGuild(guildId))

  val queue: NamedCommand[String] =
    VoiceCommand.named("&", Seq("q", "queue")).parsing[String].withSideEffects { m =>
      musicHandler.ask[MusicHandler.CommandAck.type](QueueUrl(m.parsed, m.textChannel, m.voiceChannel.id, _))
    }

  private def simpleCommand(
      aliases: Seq[String],
      mapper: (TextChannel, ActorRef[MusicHandler.CommandAck.type]) => MusicHandler.MusicHandlerEvents
  ): NamedCommand[NotUsed] = {
    VoiceCommand.andThen(CommandBuilder.inOneGuild(guildId)).named("&", aliases, mustMention = true).toSink {
      Flow[VoiceGuildMemberCommandMessage[NotUsed]]
        .map(_.textChannel)
        .via(ActorFlow.ask(requests.parallelism)(musicHandler)(mapper))
        .toMat(Sink.ignore)(Keep.none)
    }
  }

  val stop: NamedCommand[NotUsed] = simpleCommand(Seq("s", "stop"), StopMusic.apply)

  val next: NamedCommand[NotUsed] = simpleCommand(Seq("n", "next"), NextTrack.apply)

  val pause: NamedCommand[NotUsed] = simpleCommand(Seq("p", "pause"), TogglePause.apply)
} 
Example 15
Source File: VoiceUDPFlow.scala    From AckCord   with MIT License 5 votes vote down vote up
package ackcord.voice

import java.net.InetSocketAddress
import java.nio.ByteOrder

import scala.concurrent.{Future, Promise}

import ackcord.data.{RawSnowflake, UserId}
import ackcord.util.UdpConnectedFlow
import akka.NotUsed
import akka.actor.typed.ActorSystem
import akka.stream.scaladsl.{BidiFlow, Concat, Flow, GraphDSL, Keep, Source}
import akka.stream.{BidiShape, OverflowStrategy}
import akka.util.ByteString

object VoiceUDPFlow {

  val silence = ByteString(0xF8, 0xFF, 0xFE)

  val SampleRate = 48000
  val FrameSize  = 960
  val FrameTime  = 20

  def flow[Mat](
      remoteAddress: InetSocketAddress,
      ssrc: Int,
      serverId: RawSnowflake,
      userId: UserId,
      secretKeys: Source[Option[ByteString], Mat]
  )(implicit system: ActorSystem[Nothing]): Flow[ByteString, AudioAPIMessage.ReceivedData, (Mat, Future[FoundIP])] =
    NaclBidiFlow
      .bidiFlow(ssrc, serverId, userId, secretKeys)
      .atopMat(voiceBidi(ssrc).reversed)(Keep.both)
      .async
      .join(Flow[ByteString].buffer(32, OverflowStrategy.backpressure).via(UdpConnectedFlow.flow(remoteAddress)))

  def voiceBidi(ssrc: Int): BidiFlow[ByteString, ByteString, ByteString, ByteString, Future[FoundIP]] = {
    implicit val byteOrder: ByteOrder = ByteOrder.BIG_ENDIAN
    val ipDiscoveryPacket = {
      val byteBuilder = ByteString.createBuilder
      byteBuilder.sizeHint(74)
      byteBuilder.putShort(0x1).putShort(70).putInt(ssrc)

      byteBuilder.putBytes(new Array[Byte](66))

      byteBuilder.result()
    }

    val valvePromise = Promise[Unit]
    val valve        = Source.future(valvePromise.future).drop(1).asInstanceOf[Source[ByteString, NotUsed]]

    val ipDiscoveryFlow = Flow[ByteString]
      .viaMat(new IPDiscoveryFlow(() => valvePromise.success(())))(Keep.right)

    BidiFlow
      .fromGraph(GraphDSL.create(ipDiscoveryFlow) { implicit b => ipDiscovery =>
        import GraphDSL.Implicits._

        val voiceIn = b.add(Flow[ByteString])

        val ipDiscoverySource           = b.add(Source.single(ipDiscoveryPacket) ++ valve)
        val ipDiscoveryAndThenVoiceData = b.add(Concat[ByteString]())

        ipDiscoverySource ~> ipDiscoveryAndThenVoiceData
        voiceIn ~> ipDiscoveryAndThenVoiceData

        BidiShape(
          ipDiscovery.in,
          ipDiscovery.out,
          voiceIn.in,
          ipDiscoveryAndThenVoiceData.out
        )
      })
  }

  
  case class FoundIP(address: String, port: Int)
} 
Example 16
Source File: CmdStreams.scala    From AckCord   with MIT License 5 votes vote down vote up
package ackcord.oldcommands

import ackcord._
import akka.NotUsed
import akka.actor.typed.ActorSystem
import akka.stream.scaladsl.{BroadcastHub, Keep, Source}
import akka.stream.{ActorAttributes, Supervision}

object CmdStreams {

  
  def cmdStreams[A](
      settings: AbstractCommandSettings,
      apiMessages: Source[APIMessage, A]
  )(implicit system: ActorSystem[Nothing]): (A, Source[RawCmdMessage, NotUsed]) = {
    apiMessages
      .collect {
        case APIMessage.MessageCreate(msg, c) =>
          implicit val cache: MemoryCacheSnapshot = c.current

          CmdHelper.isValidCommand(settings.needMention(msg), msg).map { args =>
            if (args == Nil) NoCmd(msg, c.current)
            else {
              settings
                .getPrefix(args, msg)
                .fold[RawCmdMessage](NoCmdPrefix(msg, args.head, args.tail, cache)) {
                  case (prefix, remaining) => RawCmd(msg, prefix, remaining.head, remaining.tail.toList, c.current)
                }
            }
          }
      }
      .mapConcat(_.toList)
      .toMat(BroadcastHub.sink(bufferSize = 256))(Keep.both)
      .addAttributes(ActorAttributes.supervisionStrategy(Supervision.resumingDecider))
      .run()
  }

} 
Example 17
Source File: AckCordGatewaySettings.scala    From AckCord   with MIT License 5 votes vote down vote up
package ackcord.util

import akka.actor.typed.ActorSystem
import com.typesafe.config.Config


class AckCordGatewaySettings(config: Config) {
  import config._

  val LogReceivedWs: Boolean = getBoolean("ackcord.logging.payloads.log-received-ws")
  val LogSentWs: Boolean     = getBoolean("ackcord.logging.payloads.log-sent-ws")

  val LogJsonTraces: Boolean    = getBoolean("ackcord.logging.traces.log-json-traces")
  val OnlyUniqueTraces: Boolean = getBoolean("ackcord.logging.traces.only-unique-traces")
  val NumTraces: Int            = getInt("ackcord.logging.traces.num-traces")
}
object AckCordGatewaySettings {

  def apply()(implicit system: ActorSystem[Nothing]): AckCordGatewaySettings =
    new AckCordGatewaySettings(system.settings.config)
} 
Example 18
Source File: SupervisionStreams.scala    From AckCord   with MIT License 5 votes vote down vote up
package ackcord.requests

import akka.actor.typed.ActorSystem
import akka.stream.javadsl.RunnableGraph
import akka.stream.scaladsl.{Flow, Sink, Source}
import akka.stream.{ActorAttributes, Attributes, Supervision}

object SupervisionStreams {

  def addLogAndContinueFunction[G](addAtributes: Attributes => G)(implicit system: ActorSystem[Nothing]): G =
    addAtributes(ActorAttributes.supervisionStrategy {
      case _: RetryFailedRequestException[_] => Supervision.Stop
      case e =>
        system.log.error("Unhandled exception in stream", e)
        Supervision.Resume
    })

  def logAndContinue[M](graph: RunnableGraph[M])(implicit system: ActorSystem[Nothing]): RunnableGraph[M] =
    addLogAndContinueFunction(graph.addAttributes)

  def logAndContinue[Out, Mat](source: Source[Out, Mat])(implicit system: ActorSystem[Nothing]): Source[Out, Mat] =
    addLogAndContinueFunction(source.addAttributes)

  def logAndContinue[In, Out, Mat](
      flow: Flow[In, Out, Mat]
  )(implicit system: ActorSystem[Nothing]): Flow[In, Out, Mat] =
    addLogAndContinueFunction(flow.addAttributes)

  def logAndContinue[In, Mat](sink: Sink[In, Mat])(implicit system: ActorSystem[Nothing]): Sink[In, Mat] =
    addLogAndContinueFunction(sink.addAttributes)
} 
Example 19
Source File: CacheStreams.scala    From AckCord   with MIT License 5 votes vote down vote up
package ackcord

import scala.collection.mutable

import ackcord.cachehandlers.CacheSnapshotBuilder
import ackcord.gateway.GatewayEvent.ReadyData
import ackcord.gateway.GatewayMessage
import ackcord.requests.SupervisionStreams
import akka.NotUsed
import akka.actor.typed.ActorSystem
import akka.stream.scaladsl.{BroadcastHub, Flow, Keep, MergeHub, Sink, Source}
import org.slf4j.Logger

object CacheStreams {

  
  def cacheUpdater(
      cacheProcessor: MemoryCacheSnapshot.CacheProcessor
  )(implicit system: ActorSystem[Nothing]): Flow[CacheEvent, (CacheEvent, CacheState), NotUsed] =
    Flow[CacheEvent].statefulMapConcat { () =>
      var state: CacheState    = null
      implicit val log: Logger = system.log

      //We only handle events when we are ready to, and we have received the ready event.
      def isReady: Boolean = state != null

      {
        case readyEvent @ APIMessageCacheUpdate(_: ReadyData, _, _, _, _) =>
          val builder = new CacheSnapshotBuilder(
            0,
            null, //The event will populate this,
            mutable.Map.empty,
            mutable.Map.empty,
            mutable.Map.empty,
            mutable.Map.empty,
            mutable.Map.empty,
            mutable.Map.empty,
            mutable.Map.empty,
            mutable.Map.empty,
            cacheProcessor
          )

          readyEvent.process(builder)

          val snapshot = builder.toImmutable
          state = CacheState(snapshot, snapshot)
          List(readyEvent -> state)
        case handlerEvent: CacheEvent if isReady =>
          val builder = CacheSnapshotBuilder(state.current)
          handlerEvent.process(builder)

          state = state.update(builder.toImmutable)
          List(handlerEvent -> state)
        case _ if !isReady =>
          log.error("Received event before ready")
          Nil
      }
    }
} 
Example 20
Source File: Cache.scala    From AckCord   with MIT License 5 votes vote down vote up
package ackcord

import scala.collection.immutable

import ackcord.gateway.GatewayMessage
import akka.actor.typed.ActorSystem
import akka.stream.scaladsl.{Sink, Source}
import akka.{NotUsed, actor => classic}


  def create(
      cacheProcessor: MemoryCacheSnapshot.CacheProcessor = MemoryCacheSnapshot.defaultCacheProcessor,
      parallelism: Int = 4
  )(implicit system: ActorSystem[Nothing]): Cache = {
    val (publish, subscribe)               = CacheStreams.cacheStreams(cacheProcessor)
    val (gatewayPublish, gatewaySubscribe) = CacheStreams.gatewayEvents[Any]

    //Keep it drained if nothing else is using it
    subscribe.runWith(Sink.ignore)

    Cache(publish, subscribe, gatewayPublish, gatewaySubscribe, parallelism)
  }
} 
Example 21
Source File: TFServingModelServer.scala    From model-serving-tutorial   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.modelserving.tensorflowserving

import akka.actor.typed.scaladsl.Behaviors
import akka.actor.typed.scaladsl.adapter._
import akka.actor.typed.{ActorRef, ActorSystem}
import akka.http.scaladsl.Http
import akka.kafka.scaladsl.Consumer
import akka.kafka.{ConsumerSettings, Subscriptions}
import akka.stream.scaladsl.Sink
import akka.stream.typed.scaladsl.{ActorFlow, ActorMaterializer}
import akka.util.Timeout
import com.lightbend.modelserving.configuration.ModelServingConfiguration
import com.lightbend.modelserving.model.ServingResult
import com.lightbend.modelserving.winemodel.DataRecord
import org.apache.kafka.clients.consumer.ConsumerConfig
import org.apache.kafka.common.serialization.ByteArrayDeserializer

import scala.concurrent.duration._
import scala.util.Success


object TFServingModelServer {

  import ModelServingConfiguration._

  // Initialization

  implicit val modelServer = ActorSystem(
    Behaviors.setup[TFModelServerActor](
      context => new TFModelServerBehaviour(context)), "ModelServing")

  implicit val materializer = ActorMaterializer()
  implicit val executionContext = modelServer.executionContext
  implicit val askTimeout = Timeout(30.seconds)

  // Configuration properties for the Kafka topic.
  val dataSettings = ConsumerSettings(modelServer.toUntyped, new ByteArrayDeserializer, new ByteArrayDeserializer)
    .withBootstrapServers(KAFKA_BROKER)
    .withGroupId(DATA_GROUP)
    .withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")

  def main(args: Array[String]): Unit = {

    println(s"Akka application that uses TensorFlow Serving, brokers $KAFKA_BROKER")

    // Data stream processing
    Consumer.atMostOnceSource(dataSettings, Subscriptions.topics(DATA_TOPIC))
      .map(record => DataRecord.wineFromByteArray(record.value)).collect { case Success(a) => a }
      .via(ActorFlow.ask(1)(modelServer)((elem, replyTo : ActorRef[Option[ServingResult[Double]]]) => new ServeData(replyTo, elem)))
      .collect{ case Some(result) => result}
      .runWith(Sink.foreach(result =>
        println(s"Model served in ${System.currentTimeMillis() - result.submissionTs} ms, with result ${result.result} " +
          s"(model ${result.name}, data type ${result.dataType})")))
    // Rest Server
    startRest(modelServer)
  }

  def startRest(modelServerManager: ActorSystem[TFModelServerActor]): Unit = {

    implicit val timeout = Timeout(10.seconds)
    implicit val system = modelServerManager.toUntyped

    val host = "0.0.0.0"
    val port = MODELSERVING_PORT
    val routes = TFQueriesAkkaHttpResource.storeRoutes(modelServerManager)(modelServerManager.scheduler)

    val _ = Http().bindAndHandle(routes, host, port) map
      { binding =>
        println(s"Starting models observer on port ${binding.localAddress}") } recover {
      case ex =>
        println(s"Models observer could not bind to $host:$port - ${ex.getMessage}")
    }
  }
} 
Example 22
Source File: ClusterStatusTrackerMain.scala    From Pi-Akka-Cluster   with Apache License 2.0 5 votes vote down vote up
package akkapi.cluster

import akka.NotUsed
import akka.actor.typed.scaladsl.adapter.TypedActorSystemOps
import akka.actor.typed.scaladsl.{ActorContext, Behaviors}
import akka.actor.typed.{ActorSystem, Behavior, Terminated}
import akka.management.cluster.bootstrap.ClusterBootstrap
import akka.management.scaladsl.AkkaManagement

object Main {
  def apply(settings: Settings): Behavior[NotUsed] = Behaviors.setup { context =>
    val ledStripDriver = context.spawn(LedStripDriver(settings), "led-strip-driver")
    val ledStripController = context.spawn(LedStripVisualiser(settings, ledStripDriver), "led-strip-controller")
    val clusterStatusTracker =
      context.spawn(
        ClusterStatusTracker(
          settings,
          Some(contextToClusterSingleton(settings))
        ),
        "cluster-status-tracker"
      )
    clusterStatusTracker ! ClusterStatusTracker.SubscribeVisualiser(ledStripController)
    Behaviors.receiveSignal {
      case (_, Terminated(_)) =>
        Behaviors.stopped
    }
  }

  private def contextToClusterSingleton(settings: Settings): ActorContextToSingletonBehavior  =
    (context: ActorContext[ClusterStatusTracker.ClusterEvent]) => PiClusterSingleton(settings, context.self)

  type ActorContextToSingletonBehavior = ActorContext[ClusterStatusTracker.ClusterEvent] => Behavior[PiClusterSingleton.Command]
}

object ClusterStatusTrackerMain {
  def main(args: Array[String]): Unit = {
    System.loadLibrary("rpi_ws281x")

    val settings = Settings()
    val config = settings.config
    val system = ActorSystem[NotUsed](Main(settings), settings.actorSystemName, config)

    // Start Akka HTTP Management extension
    AkkaManagement(system.toClassic).start()
    ClusterBootstrap(system.toClassic).start()
  }
} 
Example 23
Source File: package.scala    From lila-ws   with GNU Affero General Public License v3.0 5 votes vote down vote up
package lila

import akka.actor.typed.{ ActorRef, ActorSystem, Behavior }

package object ws {

  type Emit[A] = Function[A, Unit]

  type ClientSystem   = ActorSystem[Clients.Control]
  type ClientBehavior = Behavior[ipc.ClientMsg]
  type Client         = ActorRef[ipc.ClientMsg]
  type ClientEmit     = Emit[ipc.ClientIn]

  type ~[+A, +B] = Tuple2[A, B]
  object ~ {
    def apply[A, B](x: A, y: B)                              = Tuple2(x, y)
    def unapply[A, B](x: Tuple2[A, B]): Option[Tuple2[A, B]] = Some(x)
  }

  @inline implicit def toOrnicarAddKcombinator[A](any: A) =
    new ornicarAddKcombinator(any)
}

final class ornicarAddKcombinator[A](private val any: A) extends AnyVal {
  def kCombinator(sideEffect: A => Unit): A = {
    sideEffect(any)
    any
  }
  def ~(sideEffect: A => Unit): A = kCombinator(sideEffect)
  def pp: A                       = kCombinator(println)
  def pp(msg: String): A          = kCombinator(a => println(s"[$msg] $a"))
} 
Example 24
Source File: BlockingRight.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package actor

import akka.actor.typed.scaladsl.Behaviors
import akka.actor.typed.{ActorSystem, DispatcherSelector}


object BlockingRight extends App {

  val root = Behaviors.setup[Nothing] { context =>
    (1 to 50).foreach { i =>
      //non blocking actor running on default-dispatcher
      context.spawn(PrintActor(), s"nonblocking-$i") ! i
      //blocking actor running on custom-dispatcher
      context.spawn(
        BlockingActor(),
        s"blocking-$i",
        DispatcherSelector.fromConfig("custom-dispatcher-for-blocking")
      ) ! i
    }

    Behaviors.empty
  }

  val system = ActorSystem[Nothing](root, "BlockingRight")
} 
Example 25
Source File: CustomCacheRunner.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package sample.stream_actor.typed

import akka.actor.typed.ActorSystem
import akka.actor.typed.scaladsl.AskPattern._
import akka.stream.ThrottleMode
import akka.stream.scaladsl.{RestartSource, Sink, Source}
import akka.util.Timeout
import sample.stream_actor.typed.CustomCache.{AddDevices, CacheRequests, CacheResponses, CachedDevices}

import scala.concurrent.Future
import scala.concurrent.duration._


object CustomCacheRunner extends App {
  // the system is also the top level actor ref
  implicit val cache = ActorSystem[CacheRequests](CustomCache.empty, "CustomCache")
  implicit val ec = cache.executionContext
  implicit val timeout: Timeout = 5.seconds

  RestartSource
    .withBackoff(
      minBackoff = 0.seconds,
      maxBackoff = 60.seconds,
      randomFactor = 0.1
    ) { () =>
      Source
        .tick(initialDelay = 0.seconds, interval = 2.seconds, tick = ())
        .mapAsync(parallelism = 1) { _ => cache.ref.ask(ref => CustomCache.Get("42", ref)) }
        .map((each: CacheResponses) =>
          each match {
            case cachedDevices: CachedDevices => cache.log.info(s"Current amount of cached devices: ${cachedDevices.devices.size}")
            case _ => cache.log.info("No devices")
          })
        .recover {
          case ex => cache.log.error("Failed to read cached devices: ", ex)
        }
    }
    .runWith(Sink.ignore)

  val sourceOfUUID = Source(Stream.continually(java.util.UUID.randomUUID.toString).take(100))
  sourceOfUUID
    .throttle(10, 1.second, 10, ThrottleMode.shaping)
    .mapAsync(parallelism = 10)(each => Future(cache ! AddDevices(List(DeviceId(each)))))
    .runWith(Sink.ignore)
} 
Example 26
Source File: Shop.scala    From Learn-Scala-Programming   with MIT License 5 votes vote down vote up
package ch12

import akka.actor.typed.{ActorRef, ActorSystem, Behavior}
import akka.actor.typed.receptionist.{Receptionist, ServiceKey}
import akka.actor.typed.scaladsl.{ActorContext, Behaviors}
import akka.actor.typed.receptionist.Receptionist._
import ch12.Bakery.Groceries
import ch12.Manager.ReceiveGroceries
import ch12.Shop.seller
import com.typesafe.config.ConfigFactory

object Store extends App {
  val config = ConfigFactory.load("grocery.conf")
  val system = ActorSystem(seller(Shop.systemReceptionist), "Typed-Bakery", config)
}
object Shop {
  final case class ShoppingList(eggs: Int,
                                flour: Int,
                                sugar: Int,
                                chocolate: Int)
  final case class SellByList(list: ShoppingList,
                              toWhom: ActorRef[Manager.Command])

  val SellerKey = ServiceKey[SellByList]("GrocerySeller")

  type ReceptionistFactory = ActorContext[SellByList] => ActorRef[Receptionist.Command]

  val systemReceptionist: ReceptionistFactory = _.system.receptionist

  def seller(receptionist: ReceptionistFactory): Behavior[SellByList] = Behaviors.setup { ctx ⇒
    receptionist(ctx) ! Register(SellerKey, ctx.self)
    Behaviors.receiveMessage[SellByList] {
      case SellByList(list, toWhom) ⇒
        import list._
        toWhom ! ReceiveGroceries(Groceries(eggs, flour, sugar, chocolate))
        Behaviors.same
    }
  }

} 
Example 27
Source File: ClusterStatusTrackerMain.scala    From Pi-Akka-Cluster   with Apache License 2.0 5 votes vote down vote up
package akkapi.cluster

import akka.NotUsed
import akka.actor.typed.scaladsl.Behaviors
import akka.actor.typed.scaladsl.adapter.TypedActorSystemOps
import akka.actor.typed.{ActorSystem, Behavior, Terminated}
import akka.management.scaladsl.AkkaManagement

object Main {
  def apply(settings: Settings): Behavior[NotUsed] = Behaviors.setup { context =>
    val ledStripDriver = context.spawn(LedStripDriver(settings), "led-strip-driver")
    val ledStripController = context.spawn(LedStripVisualiser(settings, ledStripDriver), "led-strip-controller")
    val clusterStatusTracker = context.spawn(ClusterStatusTracker(settings, None), "cluster-status-tracker")
    clusterStatusTracker ! ClusterStatusTracker.SubscribeVisualiser(ledStripController)
    Behaviors.receiveSignal {
      case (_, Terminated(_)) =>
        Behaviors.stopped
    }
  }
}

object ClusterStatusTrackerMain {
  def main(args: Array[String]): Unit = {
    System.loadLibrary("rpi_ws281x")

    val settings = Settings()
    val config = settings.config
    val system = ActorSystem[NotUsed](Main(settings), settings.actorSystemName, config)

    // Start Akka HTTP Management extension
    AkkaManagement(system.toClassic).start()
  }
} 
Example 28
Source File: Main.scala    From Pi-Akka-Cluster   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.akka_oled

import akka.NotUsed
import akka.actor.typed.scaladsl.Behaviors
import akka.actor.typed.scaladsl.adapter.TypedActorSystemOps
import akka.actor.typed.{ActorSystem, Behavior, Terminated}
import akka.cluster.ddata.LWWMapKey
import akka.http.scaladsl.Http
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport
import akka.management.scaladsl.AkkaManagement
import akka.stream.Materializer
import akkapi.cluster.{ClusterStatusTracker, OledClusterVisualizer, OledDriver, Settings}
import spray.json.DefaultJsonProtocol

object Main extends SprayJsonSupport with DefaultJsonProtocol {

  case class NodeStatus(status: String)

  implicit val transactionFormat = jsonFormat1(NodeStatus)

  def apply(settings: Settings): Behavior[NotUsed] = Behaviors.setup { ctx =>

    val oledDriver = ctx.spawn(OledDriver(settings), "oled-driver")
    oledDriver ! OledDriver.RegisterView("Cluster State", 0)
    oledDriver ! OledDriver.RegisterView("Distributed Data State", 1)

    val clusterView = ctx.spawn(OledClusterVisualizer(0, settings, oledDriver), "oled-cluster-view")
    val clusterStatusTracker = ctx.spawn(ClusterStatusTracker(settings, None), "cluster-status-tracker")
    clusterStatusTracker ! ClusterStatusTracker.SubscribeVisualiser(clusterView)

    val ddataTracker = ctx.spawn(
      DistributedDataTracker(1, LWWMapKey[String, String]("cache"), oledDriver),
      "oled-ddata-view")

    val routes = new Routes(ddataTracker)(ctx.system)

    implicit val untypedSystem: akka.actor.ActorSystem = ctx.system.toClassic
    implicit val mat: Materializer = Materializer.createMaterializer(ctx.system.toClassic)
    Http()(ctx.system.toClassic).bindAndHandle(routes.route,
      settings.config.getString("cluster-node-configuration.external-ip"), 8080)

    Behaviors.receiveSignal {
      case (_, Terminated(_)) =>
        Behaviors.stopped
    }
  }
}

object DisplayDistributedDataMain {
  def main(args: Array[String]): Unit = {
    val settings = Settings()
    val system = ActorSystem[NotUsed](Main(settings), "akka-oled", settings.config)

    // Start Akka HTTP Management extension
    AkkaManagement(system).start()
  }
} 
Example 29
Source File: Routes.scala    From Pi-Akka-Cluster   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.akka_oled

import akka.actor.typed.scaladsl.AskPattern._
import akka.actor.typed.{ActorRef, ActorSystem}
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport
import akka.http.scaladsl.server.Directives.{as, complete, concat, entity, get, onSuccess, pathPrefix, post, _}
import akka.http.scaladsl.server.Route
import akka.util.Timeout
import com.lightbend.akka_oled.DistributedDataTracker.{Get, UpdateStatus}
import com.lightbend.akka_oled.Main.NodeStatus

import scala.concurrent.duration._

class Routes(tracker: ActorRef[DistributedDataTracker.Command])(implicit system: ActorSystem[_]) extends SprayJsonSupport {
  implicit val timeout: Timeout = 8.seconds

  val route: Route =
    pathPrefix("status" / "[0-9a-zA-Z]+".r) {
      node =>
        concat(
          get {
            onSuccess(tracker.ask[String](Get(node, _))) {
              value => complete(value + "\n")
            }
          },
          post {
            entity(as[NodeStatus]) { status =>
              tracker ! UpdateStatus(node, status.status)
              complete("Ok\n")
            }
          }
        )
    }

} 
Example 30
Source File: ClusterStatusTrackerMain.scala    From Pi-Akka-Cluster   with Apache License 2.0 5 votes vote down vote up
package akkapi.cluster

import akka.NotUsed
import akka.actor.typed.scaladsl.adapter.TypedActorSystemOps
import akka.actor.typed.scaladsl.{ActorContext, Behaviors}
import akka.actor.typed.{ActorSystem, Behavior, Terminated}
import akka.management.scaladsl.AkkaManagement

object Main {
  def apply(settings: Settings): Behavior[NotUsed] = Behaviors.setup { context =>
    val ledStripDriver = context.spawn(LedStripDriver(settings), "led-strip-driver")
    val ledStripController = context.spawn(LedStripVisualiser(settings, ledStripDriver), "led-strip-controller")
    val clusterStatusTracker =
      context.spawn(
        ClusterStatusTracker(
          settings,
          Some(contextToClusterSingleton(settings))
        ),
        "cluster-status-tracker"
      )
    clusterStatusTracker ! ClusterStatusTracker.SubscribeVisualiser(ledStripController)
    Behaviors.receiveSignal {
      case (_, Terminated(_)) =>
        Behaviors.stopped
    }
  }

  private def contextToClusterSingleton(settings: Settings): ActorContextToSingletonBehavior  =
    (context: ActorContext[ClusterStatusTracker.ClusterEvent]) => PiClusterSingleton(settings, context.self)

  type ActorContextToSingletonBehavior = ActorContext[ClusterStatusTracker.ClusterEvent] => Behavior[PiClusterSingleton.Command]
}

object ClusterStatusTrackerMain {
  def main(args: Array[String]): Unit = {
    System.loadLibrary("rpi_ws281x")

    val settings = Settings()
    val config = settings.config
    val system = ActorSystem[NotUsed](Main(settings), settings.actorSystemName, config)

    // Start Akka HTTP Management extension
    AkkaManagement(system.toClassic).start()
  }
} 
Example 31
Source File: ClusterStatusTrackerMain.scala    From Pi-Akka-Cluster   with Apache License 2.0 5 votes vote down vote up
package akkapi.cluster

import akka.NotUsed
import akka.actor.typed.scaladsl.adapter.TypedActorSystemOps
import akka.actor.typed.scaladsl.{ActorContext, Behaviors}
import akka.actor.typed.{ActorSystem, Behavior, Terminated}
import akka.management.scaladsl.AkkaManagement

object Main {
  def apply(settings: Settings): Behavior[NotUsed] = Behaviors.setup { context =>
    val ledStripDriver = context.spawn(LedStripDriver(settings), "led-strip-driver")
    val ledStripController = context.spawn(LedStripVisualiser(settings, ledStripDriver), "led-strip-controller")
    val clusterStatusTracker =
      context.spawn(
        ClusterStatusTracker(
          settings,
          Some(contextToClusterSingleton(settings))
        ),
        "cluster-status-tracker"
      )
    clusterStatusTracker ! ClusterStatusTracker.SubscribeVisualiser(ledStripController)
    Behaviors.receiveSignal {
      case (_, Terminated(_)) =>
        Behaviors.stopped
    }
  }

  private def contextToClusterSingleton(settings: Settings): ActorContextToSingletonBehavior  =
    (context: ActorContext[ClusterStatusTracker.ClusterEvent]) => PiClusterSingleton(settings, context.self)

  type ActorContextToSingletonBehavior = ActorContext[ClusterStatusTracker.ClusterEvent] => Behavior[PiClusterSingleton.Command]
}

object ClusterStatusTrackerMain {
  def main(args: Array[String]): Unit = {
    System.loadLibrary("rpi_ws281x")

    val settings = Settings()
    val config = settings.config
    val system = ActorSystem[NotUsed](Main(settings), settings.actorSystemName, config)

    // Start Akka HTTP Management extension
    AkkaManagement(system.toClassic).start()
  }
} 
Example 32
Source File: ClusterStatusTrackerMain.scala    From Pi-Akka-Cluster   with Apache License 2.0 5 votes vote down vote up
package akkapi.cluster

import akka.NotUsed
import akka.actor.typed.scaladsl.adapter.TypedActorSystemOps
import akka.actor.typed.scaladsl.{ActorContext, Behaviors}
import akka.actor.typed.{ActorSystem, Behavior, Terminated}
import akka.management.scaladsl.AkkaManagement

object Main {
  def apply(settings: Settings): Behavior[NotUsed] = Behaviors.setup { context =>
    val ledStripDriver = context.spawn(LedStripDriver(settings), "led-strip-driver")
    val ledStripController = context.spawn(LedStripVisualiser(settings, ledStripDriver), "led-strip-controller")
    val clusterStatusTracker =
      context.spawn(
        ClusterStatusTracker(
          settings,
          Some(contextToClusterSingleton(settings))
        ),
        "cluster-status-tracker"
      )
    clusterStatusTracker ! ClusterStatusTracker.SubscribeVisualiser(ledStripController)
    Behaviors.receiveSignal {
      case (_, Terminated(_)) =>
        Behaviors.stopped
    }
  }

  private def contextToClusterSingleton(settings: Settings): ActorContextToSingletonBehavior  =
    (context: ActorContext[ClusterStatusTracker.ClusterEvent]) => PiClusterSingleton(settings, context.self)

  type ActorContextToSingletonBehavior = ActorContext[ClusterStatusTracker.ClusterEvent] => Behavior[PiClusterSingleton.Command]
}

object ClusterStatusTrackerMain {
  def main(args: Array[String]): Unit = {
    System.loadLibrary("rpi_ws281x")

    val settings = Settings()
    val config = settings.config
    val system = ActorSystem[NotUsed](Main(settings), settings.actorSystemName, config)

    // Start Akka HTTP Management extension
    AkkaManagement(system.toClassic).start()
  }
} 
Example 33
Source File: LilaWsServer.scala    From lila-ws   with GNU Affero General Public License v3.0 5 votes vote down vote up
package lila.ws

import akka.actor.typed.{ ActorSystem, Scheduler }
import com.softwaremill.macwire._
import com.typesafe.config.{ Config, ConfigFactory }
import scala.concurrent.duration._
import scala.concurrent.ExecutionContext

import util.Util.nowSeconds

object Boot extends App {

  lazy val config: Config                         = ConfigFactory.load
  lazy val clientSystem: ClientSystem             = ActorSystem(Clients.behavior, "clients")
  implicit def scheduler: Scheduler               = clientSystem.scheduler
  implicit def executionContext: ExecutionContext = clientSystem.executionContext

  lazy val mongo         = wire[Mongo]
  lazy val groupedWithin = wire[util.GroupedWithin]
  lazy val lightUserApi  = wire[LightUserApi]
  lazy val lilaRedis     = wire[Lila]
  lazy val lilaHandlers  = wire[LilaHandler]
  lazy val roundCrowd    = wire[RoundCrowd]
  lazy val roomCrowd     = wire[RoomCrowd]
  lazy val crowdJson     = wire[ipc.CrowdJson]
  lazy val users         = wire[Users]
  lazy val keepAlive     = wire[KeepAlive]
  lazy val lobby         = wire[Lobby]
  lazy val socialGraph   = wire[SocialGraph]
  lazy val friendList    = wire[FriendList]
  lazy val services      = wire[Services]
  lazy val controller    = wire[Controller]
  lazy val router        = wire[Router]
  lazy val seenAt        = wire[SeenAtUpdate]
  lazy val auth          = wire[Auth]
  lazy val nettyServer   = wire[netty.NettyServer]
  lazy val monitor       = wire[Monitor]

  wire[LilaWsServer].start
}

final class LilaWsServer(
    nettyServer: netty.NettyServer,
    handlers: LilaHandler, // must eagerly instanciate!
    lila: Lila,
    monitor: Monitor,
    scheduler: Scheduler
)(implicit ec: ExecutionContext) {

  def start(): Unit = {

    monitor.start()

    Bus.internal.subscribe(
      "users",
      {
        case ipc.LilaIn.ConnectUser(_, true) => // don't send to lila
        case msg: ipc.LilaIn.Site            => lila.emit.site(msg)
      }
    )

    scheduler.scheduleWithFixedDelay(30.seconds, 7211.millis) { () =>
      Bus.publish(_.all, ipc.ClientCtrl.Broom(nowSeconds - 30))
    }

    nettyServer.start() // blocks
  }
}

object LilaWsServer {

  val connections = new java.util.concurrent.atomic.AtomicInteger
} 
Example 34
Source File: ClusterStatusTrackerMain.scala    From Pi-Akka-Cluster   with Apache License 2.0 5 votes vote down vote up
package akkapi.cluster

import akka.NotUsed
import akka.actor.typed.scaladsl.adapter.TypedActorSystemOps
import akka.actor.typed.scaladsl.{ActorContext, Behaviors}
import akka.actor.typed.{ActorSystem, Behavior, Terminated}
import akka.management.scaladsl.AkkaManagement

object Main {
  def apply(settings: Settings): Behavior[NotUsed] = Behaviors.setup { context =>
    val ledStripDriver = context.spawn(LedStripDriver(settings), "led-strip-driver")
    val ledStripController = context.spawn(LedStripVisualiser(settings, ledStripDriver), "led-strip-controller")
    val clusterStatusTracker =
      context.spawn(
        ClusterStatusTracker(
          settings,
          Some(contextToClusterSingleton(settings))
        ),
        "cluster-status-tracker"
      )
    clusterStatusTracker ! ClusterStatusTracker.SubscribeVisualiser(ledStripController)
    Behaviors.receiveSignal {
      case (_, Terminated(_)) =>
        Behaviors.stopped
    }
  }

  private def contextToClusterSingleton(settings: Settings): ActorContextToSingletonBehavior  =
    (context: ActorContext[ClusterStatusTracker.ClusterEvent]) => PiClusterSingleton(settings, context.self)

  type ActorContextToSingletonBehavior = ActorContext[ClusterStatusTracker.ClusterEvent] => Behavior[PiClusterSingleton.Command]
}

object ClusterStatusTrackerMain {
  def main(args: Array[String]): Unit = {
    System.loadLibrary("rpi_ws281x")

    val settings = Settings()
    val config = settings.config
    val system = ActorSystem[NotUsed](Main(settings), settings.actorSystemName, config)

    // Start Akka HTTP Management extension
    AkkaManagement(system.toClassic).start()
  }
} 
Example 35
Source File: ClusterStatusTrackerMain.scala    From Pi-Akka-Cluster   with Apache License 2.0 5 votes vote down vote up
package akkapi.cluster

import akka.NotUsed
import akka.actor.typed.scaladsl.adapter.TypedActorSystemOps
import akka.actor.typed.scaladsl.{ActorContext, Behaviors}
import akka.actor.typed.{ActorSystem, Behavior, Terminated}
import akka.management.scaladsl.AkkaManagement

object Main {
  def apply(settings: Settings): Behavior[NotUsed] = Behaviors.setup { context =>
    val ledStripDriver = context.spawn(LedStripDriver(settings), "led-strip-driver")
    val ledStripController = context.spawn(LedStripVisualiser(settings, ledStripDriver), "led-strip-controller")
    val clusterStatusTracker =
      context.spawn(
        ClusterStatusTracker(
          settings,
          Some(contextToClusterSingleton(settings))
        ),
        "cluster-status-tracker"
      )
    clusterStatusTracker ! ClusterStatusTracker.SubscribeVisualiser(ledStripController)
    Behaviors.receiveSignal {
      case (_, Terminated(_)) =>
        Behaviors.stopped
    }
  }

  private def contextToClusterSingleton(settings: Settings): ActorContextToSingletonBehavior  =
    (context: ActorContext[ClusterStatusTracker.ClusterEvent]) => PiClusterSingleton(settings, context.self)

  type ActorContextToSingletonBehavior = ActorContext[ClusterStatusTracker.ClusterEvent] => Behavior[PiClusterSingleton.Command]
}

object ClusterStatusTrackerMain {
  def main(args: Array[String]): Unit = {
    System.loadLibrary("rpi_ws281x")

    val settings = Settings()
    val config = settings.config
    val system = ActorSystem[NotUsed](Main(settings), settings.actorSystemName, config)

    // Start Akka HTTP Management extension
    AkkaManagement(system.toClassic).start()
  }
} 
Example 36
Source File: ClusterStatusTrackerMain.scala    From Pi-Akka-Cluster   with Apache License 2.0 5 votes vote down vote up
package akkapi.cluster

import akka.NotUsed
import akka.actor.typed.scaladsl.adapter.TypedActorSystemOps
import akka.actor.typed.scaladsl.{ActorContext, Behaviors}
import akka.actor.typed.{ActorSystem, Behavior, Terminated}
import akka.management.cluster.bootstrap.ClusterBootstrap
import akka.management.scaladsl.AkkaManagement

object Main {
  def apply(settings: Settings): Behavior[NotUsed] = Behaviors.setup { context =>
    val ledStripDriver = context.spawn(LedStripDriver(settings), "led-strip-driver")
    val ledStripController = context.spawn(LedStripVisualiser(settings, ledStripDriver), "led-strip-controller")
    val clusterStatusTracker =
      context.spawn(
        ClusterStatusTracker(
          settings,
          Some(contextToClusterSingleton(settings))
        ),
        "cluster-status-tracker"
      )
    clusterStatusTracker ! ClusterStatusTracker.SubscribeVisualiser(ledStripController)
    Behaviors.receiveSignal {
      case (_, Terminated(_)) =>
        Behaviors.stopped
    }
  }

  private def contextToClusterSingleton(settings: Settings): ActorContextToSingletonBehavior  =
    (context: ActorContext[ClusterStatusTracker.ClusterEvent]) => PiClusterSingleton(settings, context.self)

  type ActorContextToSingletonBehavior = ActorContext[ClusterStatusTracker.ClusterEvent] => Behavior[PiClusterSingleton.Command]
}

object ClusterStatusTrackerMain {
  def main(args: Array[String]): Unit = {
    System.loadLibrary("rpi_ws281x")

    val settings = Settings()
    val config = settings.config
    val system = ActorSystem[NotUsed](Main(settings), settings.actorSystemName, config)

    // Start Akka HTTP Management extension
    AkkaManagement(system.toClassic).start()
    ClusterBootstrap(system.toClassic).start()
  }
} 
Example 37
Source File: ClusterStatusTrackerMain.scala    From Pi-Akka-Cluster   with Apache License 2.0 5 votes vote down vote up
package akkapi.cluster

import akka.NotUsed
import akka.actor.typed.scaladsl.adapter.TypedActorSystemOps
import akka.actor.typed.scaladsl.{ActorContext, Behaviors}
import akka.actor.typed.{ActorSystem, Behavior, Terminated}
import akka.management.scaladsl.AkkaManagement

object Main {
  def apply(settings: Settings): Behavior[NotUsed] = Behaviors.setup { context =>
    val ledStripDriver = context.spawn(LedStripDriver(settings), "led-strip-driver")
    val ledStripController = context.spawn(LedStripVisualiser(settings, ledStripDriver), "led-strip-controller")
    val clusterStatusTracker =
      context.spawn(
        ClusterStatusTracker(
          settings,
          Some(contextToClusterSingleton(settings))
        ),
        "cluster-status-tracker"
      )
    clusterStatusTracker ! ClusterStatusTracker.SubscribeVisualiser(ledStripController)
    Behaviors.receiveSignal {
      case (_, Terminated(_)) =>
        Behaviors.stopped
    }
  }

  private def contextToClusterSingleton(settings: Settings): ActorContextToSingletonBehavior  =
    (context: ActorContext[ClusterStatusTracker.ClusterEvent]) => PiClusterSingleton(settings, context.self)

  type ActorContextToSingletonBehavior = ActorContext[ClusterStatusTracker.ClusterEvent] => Behavior[PiClusterSingleton.Command]
}

object ClusterStatusTrackerMain {
  def main(args: Array[String]): Unit = {
    System.loadLibrary("rpi_ws281x")

    val settings = Settings()
    val config = settings.config
    val system = ActorSystem[NotUsed](Main(settings), settings.actorSystemName, config)

    // Start Akka HTTP Management extension
    AkkaManagement(system.toClassic).start()
  }
} 
Example 38
Source File: ClusterStatusTrackerMain.scala    From Pi-Akka-Cluster   with Apache License 2.0 5 votes vote down vote up
package akkapi.cluster

import akka.NotUsed
import akka.actor.typed.scaladsl.adapter.TypedActorSystemOps
import akka.actor.typed.scaladsl.{ActorContext, Behaviors, Routers}
import akka.actor.typed.{ActorSystem, Behavior, Terminated}
import akka.cluster.typed.{ClusterSingleton, SingletonActor}
import akka.management.scaladsl.AkkaManagement
import akkapi.cluster.sudoku.{SudokuSolverSettings, SudokuSolver, SudokuProblemSender}

object Main {
  def apply(settings: Settings): Behavior[NotUsed] = Behaviors.setup { context =>
    val sudokuSolverSettings = SudokuSolverSettings("sudokusolver.conf")
    // Start CLusterStatusTracker & LedStripVisualiser
    val ledStripDriver = context.spawn(LedStripDriver(settings), "led-strip-driver")
    val ledStripController = context.spawn(LedStripVisualiser(settings, ledStripDriver), "led-strip-controller")
    val clusterStatusTracker =
      context.spawn(
        ClusterStatusTracker(
          settings,
          Some(contextToClusterSingleton(settings))
        ),
        "cluster-status-tracker"
      )
    clusterStatusTracker ! ClusterStatusTracker.SubscribeVisualiser(ledStripController)

    // Start SodukuSolver: we'll run one instance/cluster node
    context.spawn(SudokuSolver(ledStripDriver, sudokuSolverSettings), s"sudoku-solver")
    // We'll use a [cluster-aware] group router
    val sudokuSolverGroup = context.spawn(Routers.group(SudokuSolver.Key).withRoundRobinRouting(), "sudoku-solvers")
    // And run one instance if the Sudoku problem sender in the cluster
    ClusterSingleton(context.system).init(SingletonActor(SudokuProblemSender(sudokuSolverGroup, sudokuSolverSettings), "sudoku-problem-sender"))

    Behaviors.receiveSignal {
      case (_, Terminated(_)) =>
        Behaviors.stopped
    }
  }

  private def contextToClusterSingleton(settings: Settings): ActorContextToSingletonBehavior  =
    (context: ActorContext[ClusterStatusTracker.ClusterEvent]) => PiClusterSingleton(settings, context.self)

  type ActorContextToSingletonBehavior = ActorContext[ClusterStatusTracker.ClusterEvent] => Behavior[PiClusterSingleton.Command]
}

object ClusterStatusTrackerMain {
  def main(args: Array[String]): Unit = {
    System.loadLibrary("rpi_ws281x")

    val settings = Settings()
    val config = settings.config
    val system = ActorSystem[NotUsed](Main(settings), settings.actorSystemName, config)
    val classicSystem = system.toClassic

    // Start Akka HTTP Management extension
    AkkaManagement(classicSystem).start()
  }
} 
Example 39
Source File: DisplayClusterStatusMain.scala    From Pi-Akka-Cluster   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.akka_oled

import akka.NotUsed
import akka.actor.typed.scaladsl.Behaviors
import akka.actor.typed.{ActorSystem, Behavior, Terminated}
import akka.management.scaladsl.AkkaManagement
import akkapi.cluster.{ClusterStatusTracker, OledClusterVisualizer, OledDriver, Settings}


object Main {
  def apply(settings: Settings): Behavior[NotUsed] = Behaviors.setup { context =>
    val oledDriver = context.spawn(OledDriver(settings), "oled-driver")
    val clusterView = context.spawn(OledClusterVisualizer(0, settings, oledDriver), "oled-cluster-view")
    oledDriver ! OledDriver.RegisterView("Cluster State", 0)
    val clusterStatusTracker = context.spawn(ClusterStatusTracker(settings, None), "cluster-status-tracker")
    clusterStatusTracker ! ClusterStatusTracker.SubscribeVisualiser(clusterView)
    Behaviors.receiveSignal {
      case (_, Terminated(_)) =>
        Behaviors.stopped
    }
  }
}

object DisplayClusterStatusMain {
  def main(args: Array[String]): Unit = {
    val settings = Settings()
    val config = settings.config
    val system = ActorSystem[NotUsed](Main(settings), "akka-oled", config)

    // Start Akka HTTP Management extension
    AkkaManagement(system).start()
  }
} 
Example 40
Source File: Main.scala    From Pi-Akka-Cluster   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.akka_oled

import akka.NotUsed
import akka.actor.typed.scaladsl.Behaviors
import akka.actor.typed.scaladsl.adapter.TypedActorSystemOps
import akka.actor.typed.{ActorSystem, Behavior, Terminated}
import akka.cluster.sharding.typed.scaladsl.{ClusterSharding, Entity}
import akka.http.scaladsl.Http
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport
import akka.management.scaladsl.AkkaManagement
import akka.persistence.typed.PersistenceId
import akka.stream.Materializer
import akkapi.cluster.{ClusterStatusTracker, OledClusterVisualizer, OledDriver, Settings}
import spray.json._

import scala.concurrent.ExecutionContextExecutor

object Main extends SprayJsonSupport with DefaultJsonProtocol {

  case class AddPoints(points: Int)

  implicit val transactionFormat = jsonFormat1(AddPoints)

  def apply(settings: Settings): Behavior[NotUsed] = Behaviors.setup { ctx =>
    implicit val system = ctx.system
    implicit val untypedSystem: akka.actor.ActorSystem = ctx.system.toClassic
    implicit val ec: ExecutionContextExecutor = ctx.system.executionContext

    val oledDriver = ctx.spawn(OledDriver(settings), "oled-driver")
    oledDriver ! OledDriver.RegisterView("Cluster State", 0)
    oledDriver ! OledDriver.RegisterView("Sharding State", 1)

    val clusterView = ctx.spawn(OledClusterVisualizer(0, settings, oledDriver), "oled-cluster-view")
    val clusterStatusTracker = ctx.spawn(ClusterStatusTracker(settings, None), "cluster-status-tracker")
    clusterStatusTracker ! ClusterStatusTracker.SubscribeVisualiser(clusterView)

    val shardVisualizer = ctx.spawn(OledShardingVisualizer(1, oledDriver), "oled-sharding-view")

    val sharding = ClusterSharding(ctx.system)
    sharding.init(Entity(typeKey = ClientEntity.TypeKey) { entityContext =>
      ClientEntity(entityContext.entityId,
        PersistenceId(entityContext.entityTypeKey.name, entityContext.entityId),
        shardVisualizer)
    })
    val tracker = ctx.spawn(ShardStateTracker(shardVisualizer), "oled-sharding-tracker")
    ctx.spawn(ShardStateScheduler(sharding.shardState, tracker), "oled-sharding-scheduler")

    val routes = new Routes(sharding)

    //materializer
    Materializer.createMaterializer(ctx.system.toClassic)
    implicit val mat: Materializer = Materializer.createMaterializer(ctx.system.toClassic)
    Http()(ctx.system.toClassic).bindAndHandle(routes.route,
      settings.config.getString("cluster-node-configuration.external-ip"), 8080)

    Behaviors.receiveSignal {
      case (_, Terminated(_)) =>
        Behaviors.stopped
    }
  }
}

object DisplayClusterShardingMain {
  def main(args: Array[String]): Unit = {
    val settings = Settings()
    val system = ActorSystem[NotUsed](Main(settings), "akka-oled", settings.config)

    // Start Akka HTTP Management extension
    AkkaManagement(system).start()
  }
} 
Example 41
Source File: Routes.scala    From Pi-Akka-Cluster   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.akka_oled

import akka.actor.typed.ActorSystem
import akka.cluster.sharding.typed.scaladsl.ClusterSharding
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport
import akka.http.scaladsl.server.Directives.{as, complete, concat, entity, get, onSuccess, pathPrefix, post, _}
import akka.http.scaladsl.server.Route
import akka.util.Timeout
import com.lightbend.akka_oled.ClientEntity.{Get, PostPoints}
import com.lightbend.akka_oled.Main.AddPoints

import scala.concurrent.duration._

object Routes {

  case class NodeStatus(status: String)

}

class Routes(sharding: ClusterSharding)(implicit system: ActorSystem[_]) extends SprayJsonSupport {
  implicit val timeout: Timeout = 8.seconds
  implicit val scheduler = system.scheduler

  lazy val route: Route =
    pathPrefix("user" / "[0-9a-zA-Z]+".r) { username =>
      concat(
        get {
          val entityRef = sharding.entityRefFor(ClientEntity.TypeKey, username)
          onSuccess(entityRef ? Get(username)) {
            value: Int => complete(value.toString + "\n")
          }
        },
        post {
          entity(as[AddPoints]) { transaction =>
            val entityRef = sharding.entityRefFor(ClientEntity.TypeKey, username)
            onSuccess(entityRef ? PostPoints(username, transaction.points)) {
              result => complete(result)
            }
          }
        }
      )
    }

} 
Example 42
Source File: ClusterStatusTrackerMain.scala    From Pi-Akka-Cluster   with Apache License 2.0 5 votes vote down vote up
package akkapi.cluster

import akka.NotUsed
import akka.actor.typed.scaladsl.Behaviors
import akka.actor.typed.{ActorSystem, Behavior, Terminated}
import akka.management.scaladsl.AkkaManagement
import akka.actor.typed.scaladsl.adapter.TypedActorSystemOps

object Main {
  def apply(settings: Settings): Behavior[NotUsed] = Behaviors.setup { context =>
    val ledStripDriver = context.spawn(LedStripDriver(settings), "led-strip-driver")
    val ledStripController = context.spawn(LedStripVisualiser(settings, ledStripDriver), "led-strip-controller")
    val clusterStatusTracker = context.spawn(ClusterStatusTracker(settings, None), "cluster-status-tracker")
    clusterStatusTracker ! ClusterStatusTracker.SubscribeVisualiser(ledStripController)
    Behaviors.receiveSignal {
      case (_, Terminated(_)) =>
        Behaviors.stopped
    }
  }
}

object ClusterStatusTrackerMain {
  def main(args: Array[String]): Unit = {
    System.loadLibrary("rpi_ws281x")

    val settings = Settings()
    val config = settings.config
    val system = ActorSystem[NotUsed](Main(settings), settings.actorSystemName, config)

    // Start Akka HTTP Management extension
    AkkaManagement(system.toClassic).start()
  }
}