akka.actor.CoordinatedShutdown Scala Examples

The following examples show how to use akka.actor.CoordinatedShutdown. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: AkkaManagementModule.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.internal.javadsl.server

import akka.actor.ActorSystem
import akka.actor.CoordinatedShutdown
import com.lightbend.lagom.internal.akka.management.AkkaManagementTrigger
import com.typesafe.config.Config
import javax.inject.Inject
import javax.inject.Provider
import javax.inject.Singleton
import play.api.inject.Binding
import play.api.inject.Module
import play.api.Configuration
import play.api.Environment
import play.api.Mode

import scala.concurrent.ExecutionContext

private[lagom] class AkkaManagementModule extends Module {
  override def bindings(environment: Environment, configuration: Configuration): Seq[Binding[_]] = {
    // The trigger must be eager because it's often not required by anyone as a dependency to
    // be injected and yet it must be started anyway
    Seq(bind[AkkaManagementTrigger].toProvider[AkkaManagementProvider].eagerly())
  }
}

@Singleton
private[lagom] class AkkaManagementProvider @Inject() (
    config: Config,
    actorSystem: ActorSystem,
    coordinatedShutdown: CoordinatedShutdown,
    environment: Environment,
    executionContext: ExecutionContext
) extends Provider[AkkaManagementTrigger] {
  override def get(): AkkaManagementTrigger = {
    val instance = new AkkaManagementTrigger(config, actorSystem, coordinatedShutdown)(executionContext)
    if (environment.mode == Mode.Prod) {
      instance.start()
    }
    instance
  }
} 
Example 2
Source File: DiscordClientActor.scala    From AckCord   with MIT License 5 votes vote down vote up
package ackcord

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{Future, Promise}

import ackcord.requests.Ratelimiter
import akka.Done
import akka.actor.CoordinatedShutdown
import akka.actor.typed._
import akka.actor.typed.scaladsl._
import akka.actor.typed.scaladsl.adapter._
import akka.pattern.gracefulStop

class DiscordClientActor(
    ctx: ActorContext[DiscordClientActor.Command],
    shardBehaviors: Seq[Behavior[DiscordShard.Command]],
    cache: Cache
) extends AbstractBehavior[DiscordClientActor.Command](ctx) {
  import DiscordClientActor._
  implicit val system: ActorSystem[Nothing] = context.system
  import system.executionContext

  val shards: Seq[ActorRef[DiscordShard.Command]] =
    shardBehaviors.zipWithIndex.map(t => context.spawn(t._1, s"Shard${t._2}"))

  var shardShutdownManager: ActorRef[DiscordShard.StopShard.type] = _

  val musicManager: ActorRef[MusicManager.Command] = context.spawn(MusicManager(cache), "MusicManager")

  val rateLimiter: ActorRef[Ratelimiter.Command] = context.spawn(Ratelimiter(), "Ratelimiter")

  private val shutdown = CoordinatedShutdown(system.toClassic)

  shutdown.addTask("service-stop", "stop-discord") { () =>
    gracefulStop(shardShutdownManager.toClassic, shutdown.timeout("service-stop"), DiscordShard.StopShard)
      .map(_ => Done)
  }

  def login(): Unit = {
    require(shardShutdownManager == null, "Already logged in")
    shardShutdownManager = context.spawn(ShardShutdownManager(shards), "ShardShutdownManager")

    DiscordShard.startShards(shards)
  }

  def logout(timeout: FiniteDuration): Future[Boolean] = {
    import akka.actor.typed.scaladsl.adapter._

    val promise = Promise[Boolean]

    require(shardShutdownManager != null, "Not logged in")
    promise.completeWith(gracefulStop(shardShutdownManager.toClassic, timeout, DiscordShard.StopShard))

    promise.future
  }

  override def onMessage(msg: Command): Behavior[Command] = {
    msg match {
      case DiscordClientActor.Login => login()
      case Logout(timeout, replyTo) => replyTo ! LogoutReply(logout(timeout))
      case GetShards(replyTo)       => replyTo ! GetShardsReply(shards)
      case GetMusicManager(replyTo) => replyTo ! GetMusicManagerReply(musicManager)
      case GetRatelimiter(replyTo)  => replyTo ! GetRatelimiterReply(rateLimiter)
    }

    Behaviors.same
  }
}
object DiscordClientActor {
  def apply(
      shardBehaviors: Seq[Behavior[DiscordShard.Command]],
      cache: Cache
  ): Behavior[Command] = Behaviors.setup(ctx => new DiscordClientActor(ctx, shardBehaviors, cache))

  sealed trait Command

  case object Login                                                          extends Command
  case class Logout(timeout: FiniteDuration, replyTo: ActorRef[LogoutReply]) extends Command
  case class GetShards(replyTo: ActorRef[GetShardsReply])                    extends Command
  case class GetMusicManager(replyTo: ActorRef[GetMusicManagerReply])        extends Command
  case class GetRatelimiter(replyTo: ActorRef[GetRatelimiterReply])          extends Command

  case class LogoutReply(done: Future[Boolean])
  case class GetShardsReply(shards: Seq[ActorRef[DiscordShard.Command]])
  case class GetMusicManagerReply(musicManager: ActorRef[MusicManager.Command])
  case class GetRatelimiterReply(ratelimiter: ActorRef[Ratelimiter.Command])
} 
Example 3
Source File: ClusterSoakMain.scala    From akka-kubernetes-tests   with Apache License 2.0 5 votes vote down vote up
package akka.cluster.soak
import akka.actor.CoordinatedShutdown.Reason
import akka.actor.{ActorSystem, CoordinatedShutdown}
import akka.discovery.{Discovery, Lookup}
import akka.dispatch.Dispatchers
import akka.stream.ActorMaterializer
import com.lightbend.akka.diagnostics.{StarvationDetector, StarvationDetectorSettings}
import org.scalatest._
import org.scalatest.events.{Event, TestFailed}

import scala.concurrent.duration._
import scala.util.{Failure, Success}

object TestSuccess extends Reason
object TestFailure extends Reason
object TestException extends Reason

object ClusterSoakMain extends App {

  implicit val system = ActorSystem()
  implicit val mat = ActorMaterializer()
  import system.dispatcher
  val log = system.log

  system.log.info("Starting cluster soak tests")

  val serviceDiscovery = Discovery(system).discovery
  val resolveTimeout = 5.seconds

  val dnsDispatcher = system.dispatchers.lookup("dns-dispatcher")
  StarvationDetector.checkExecutionContext(dnsDispatcher, system.log, StarvationDetectorSettings(
    checkInterval = 1.second,
    initialDelay = 5.seconds,
    maxDelayWarningThreshold = 100.millis,
    warningInterval = 10.seconds), () => false)

  @volatile var failed = false
  val testResult = for {
    endpoints <- serviceDiscovery.lookup(Lookup("cluster-soak").withPortName("http").withProtocol("tcp"),
                                         resolveTimeout)
  } yield {
    log.info("Endpoints {}", endpoints)
    val reporter = new Reporter() {
      override def apply(event: Event): Unit =
        event match {
          case tf: TestFailed =>
            failed = true
            log.error("TestFailed({}): {}", tf.testName, tf.message)
          case _ =>
        }
    }
    new ClusterSoakSpec(endpoints).run(
      None,
      Args(reporter, Stopper.default, Filter()),
    )
  }

  testResult.onComplete {
    case Success(r) =>
      val result = if (r.succeeds()) TestSuccess else TestFailure
      log.info("Status: {}. Success: {}. Result {}", r, r.succeeds(), result)
      CoordinatedShutdown(system).run(result)
    case Failure(t) =>
      log.error(t, "Failed to run tests")
      CoordinatedShutdown(system).run(TestException)
  }
} 
Example 4
Source File: ZipkinModuleSpec.scala    From play-zipkin-tracing   with Apache License 2.0 5 votes vote down vote up
package brave.play.module

import java.util.Collections

import akka.actor.CoordinatedShutdown
import brave.Tracing
import brave.play.{ZipkinTraceService, ZipkinTraceServiceLike}
import org.scalatest.AsyncFlatSpec
import play.api.inject.guice.GuiceApplicationBuilder
import zipkin2.reporter.Sender
import zipkin2.reporter.okhttp3.OkHttpSender


class ZipkinModuleSpec extends AsyncFlatSpec {
  val injector = new GuiceApplicationBuilder()
    .bindings(new ZipkinModule)
    .injector()

  it should "provide an okhttp sender" in {
    val sender = injector.instanceOf[Sender]
    assert(sender.isInstanceOf[OkHttpSender])
  }

  it should "eventually close the sender" in {
    // provisioning the sender so we can tell if it is closed on shutdown
    val sender = injector.instanceOf[Sender]

    // stopping the application should close the sender!
    injector.instanceOf[CoordinatedShutdown].run(CoordinatedShutdown.UnknownReason) map { _ =>
      val thrown = intercept[Exception] {
        sender.sendSpans(Collections.emptyList[Array[Byte]]).execute()
      }
      assert(thrown.getMessage === "closed")
    }
  }

  it should "provide a tracing component" in instanceOfTracing { tracing =>
    assert(Tracing.current() != null)
    assert(Tracing.current() == tracing)
  }

  it should "eventually close the tracing component" in instanceOfTracing { tracing =>
    // stopping the application should close the tracing component!
    injector.instanceOf[CoordinatedShutdown].run(CoordinatedShutdown.UnknownReason) map { _ =>
      assert(Tracing.current() == null)
    }
  }

  private def instanceOfTracing[A](test: Tracing => A): A = {
    val tracing = injector.instanceOf[Tracing]
    try {
      test(tracing)
    } finally {
      // Ensures there is no active Tracing object
      tracing.close()
    }
  }

  it should "provide a zipkin trace service" in {
    // TODO: dies due to missing dispatcher
    val service = injector.instanceOf[ZipkinTraceServiceLike]
    assert(service.isInstanceOf[ZipkinTraceService])
  }
} 
Example 5
Source File: PgnDatabase.scala    From lila-openingexplorer   with GNU Affero General Public License v3.0 5 votes vote down vote up
package lila.openingexplorer

import akka.actor.CoordinatedShutdown
import fm.last.commons.kyoto.factory.{ Compressor, PageComparator }
import javax.inject.{ Inject, Singleton }

import chess.format.Forsyth
import chess.format.pgn.{ Move, ParsedPgn, Pgn, Tag, TagType, Tags, Turn }
import chess.Replay

@Singleton
final class PgnDatabase @Inject() (
    config: Config,
    shutdown: CoordinatedShutdown
)(implicit ec: scala.concurrent.ExecutionContext) {

  private val db = Util.wrapLog(
    "Loading PGN database...",
    "PGN database loaded!"
  ) {
    Kyoto
      .builder(config.explorer.pgn.kyoto)
      .compressor(Compressor.LZMA)
      .pageComparator(PageComparator.LEXICAL)
      .buildAndOpen
  }

  private val relevantTags: Set[TagType] =
    Tag.tagTypes.toSet diff Set(Tag.ECO, Tag.Opening, Tag.Variant)

  def get(gameId: String): Option[String] = Option(db.get(gameId))

  def store(gameId: String, parsed: ParsedPgn, replay: Replay): Boolean = {

    val tags = parsed.tags.value.filter { tag => relevantTags contains tag.name }
    val fenSituation = tags find (_.name == Tag.FEN) flatMap {
      case Tag(_, fen) => Forsyth <<< fen
    }
    val pgnMoves = replay.chronoMoves
      .foldLeft(replay.setup) {
        case (game, moveOrDrop) => moveOrDrop.fold(game.apply, game.applyDrop)
      }
      .pgnMoves
    val moves       = if (fenSituation.exists(_.situation.color.black)) ".." +: pgnMoves else pgnMoves
    val initialTurn = fenSituation.map(_.fullMoveNumber) getOrElse 1
    val pgn         = Pgn(Tags(tags), turns(moves, initialTurn))

    db.putIfAbsent(gameId, pgn.toString)
  }

  private def turns(moves: Vector[String], from: Int): List[Turn] =
    (moves grouped 2).zipWithIndex.toList map {
      case (moves, index) =>
        Turn(
          number = index + from,
          white = moves.headOption filter (".." !=) map { Move(_) },
          black = moves lift 1 map { Move(_) }
        )
    } filterNot (_.isEmpty)

  def delete(gameId: String) = db.remove(gameId)

  def count = db.recordCount()

  shutdown.addTask(CoordinatedShutdown.PhaseServiceStop, "close master db") { () =>
    scala.concurrent.Future {
      db.close()
      akka.Done
    }
  }
} 
Example 6
Source File: GameInfoDatabase.scala    From lila-openingexplorer   with GNU Affero General Public License v3.0 5 votes vote down vote up
package lila.openingexplorer

import fm.last.commons.kyoto.factory.{ Compressor, PageComparator }
import javax.inject.{ Inject, Singleton }
import akka.actor.CoordinatedShutdown

@Singleton
final class GameInfoDatabase @Inject() (
    config: Config,
    shutdown: CoordinatedShutdown
)(implicit ec: scala.concurrent.ExecutionContext) {

  private val db = Util.wrapLog(
    "Loading gameInfo database...",
    "GameInfo database loaded!"
  ) {
    Kyoto
      .builder(config.explorer.gameInfo.kyoto)
      .compressor(Compressor.LZMA)
      .pageComparator(PageComparator.LEXICAL)
      .buildAndOpen
  }

  def get(gameId: String): Option[GameInfo] =
    Option(db.get(gameId)) flatMap GameInfoDatabase.unpack

  def contains(gameId: String): Boolean = db.exists(gameId)

  def store(gameId: String, info: GameInfo): Boolean =
    db.putIfAbsent(gameId, GameInfoDatabase pack info)

  def count = db.recordCount()

  shutdown.addTask(CoordinatedShutdown.PhaseServiceStop, "close master db") { () =>
    scala.concurrent.Future {
      db.close()
      akka.Done
    }
  }
}

object GameInfoDatabase {

  def pack(info: GameInfo): String =
    List(
      info.white.name,
      info.white.rating,
      info.black.name,
      info.black.rating,
      info.year.fold("?")(_.toString)
    ) mkString "|"

  def unpack(str: String): Option[GameInfo] = str split '|' match {
    case Array(wn, wrS, bn, brS, yearS) =>
      for {
        wr <- wrS.toIntOption
        br <- brS.toIntOption
        year = yearS.toIntOption
      } yield GameInfo(
        white = GameInfo.Player(wn, wr),
        black = GameInfo.Player(bn, br),
        year = year
      )
    case _ => None
  }
} 
Example 7
Source File: HTTPServer.scala    From ForestFlow   with Apache License 2.0 5 votes vote down vote up
package ai.forestflow.serving.restapi

import ai.forestflow.serving.config.ApplicationEnvironment
import akka.Done
import akka.actor.CoordinatedShutdown.{PhaseServiceUnbind, Reason}
import akka.actor.SupervisorStrategy._
import akka.actor.{ActorRef, ActorSystem, CoordinatedShutdown}
import akka.cluster.Cluster
import akka.http.scaladsl.Http
import akka.http.scaladsl.server.Route
import akka.pattern.ask
import akka.stream.ActorMaterializer
import akka.util.Timeout
import ai.forestflow.akka.Supervisor
import ai.forestflow.domain.ServableRoutes.GetProxyRoutes
import ai.forestflow.utils.ThrowableImplicits._

import scala.concurrent.Await
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.util.{Failure, Success}

//noinspection TypeAnnotation
object HTTPServer {

  private final case object BindFailure extends Reason

}
//noinspection TypeAnnotation
class HTTPServer(servableProxyRef: ActorRef)(implicit system: ActorSystem, cluster: Cluster, shutdown: CoordinatedShutdown) {
  import HTTPServer._
  import system.log

  private implicit val materializer = ActorMaterializer()
  private implicit val executionContext = system.dispatcher
  implicit lazy val timeout: Timeout = Timeout(ApplicationEnvironment.HTTP_COMMAND_TIMEOUT_SECS seconds)

  private val address = ApplicationEnvironment.HTTP_BIND_ADDRESS
  private val port = ApplicationEnvironment.HTTP_PORT

  val routesSupervisor = system.actorOf(Supervisor.props {
    case _: ArithmeticException => Resume
    case _: Exception => Restart
  })


  private val servableRoutesActor = Await.result(
    routesSupervisor
      .ask(ServableRoutes.props(servableProxyRef))
      .mapTo[ActorRef], ApplicationEnvironment.HTTP_COMMAND_TIMEOUT_SECS second)

  servableRoutesActor.ask(GetProxyRoutes()).onComplete {
    case Success(r: Route) =>
      val bindingFuture = Http().bindAndHandle(r, address, port)
      bindingFuture.onComplete {
        case Success(bound) =>
          log.info(s"AKKA HTTP Server online at http://${bound.localAddress.getHostString}:${bound.localAddress.getPort}/")
          shutdown.addTask(PhaseServiceUnbind, "api.unbind") { () =>
            bound.terminate(5 seconds).map(_ => Done)
          }
        case Failure(e) =>
          log.error(s"AKKA HTTP Server could not start! Shutting down... ${e.printableStackTrace}")
          shutdown.run(BindFailure)
      }

    case Failure(e) =>
      log.error(s"Couldn't get dynamic HTTP routes from ServableRoutes actor! Shutting down... ${e.printableStackTrace}")
      shutdown.run(BindFailure)
  }


} 
Example 8
Source File: CouchbaseClusteredPersistentEntitySpec.scala    From akka-persistence-couchbase   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.scaladsl.persistence.couchbase

import java.io.File

import akka.actor.{ActorSystem, CoordinatedShutdown}
import akka.persistence.couchbase.CouchbaseClusterConnection
import akka.stream.{ActorMaterializer, Materializer}
import com.lightbend.lagom.internal.persistence.couchbase.TestConfig
import com.lightbend.lagom.internal.persistence.testkit.AwaitPersistenceInit.awaitPersistenceInit
import com.lightbend.lagom.scaladsl.api.ServiceLocator
import com.lightbend.lagom.scaladsl.api.ServiceLocator.NoServiceLocator
import com.lightbend.lagom.scaladsl.persistence.multinode.{
  AbstractClusteredPersistentEntityConfig,
  AbstractClusteredPersistentEntitySpec
}
import com.lightbend.lagom.scaladsl.persistence.{ReadSideProcessor, TestEntity}
import com.lightbend.lagom.scaladsl.playjson.JsonSerializerRegistry
import com.typesafe.config.Config
import play.api.{Configuration, Environment, Mode}
import play.api.inject.DefaultApplicationLifecycle

import scala.concurrent.{ExecutionContext, Future}

object CouchbaseClusteredPersistentEntityConfig extends AbstractClusteredPersistentEntityConfig {
  override def additionalCommonConfig(databasePort: Int): Config =
    TestConfig.persistenceConfig
}

class CouchbaseClusteredPersistentEntitySpecMultiJvmNode1 extends CouchbaseClusteredPersistentEntitySpec
class CouchbaseClusteredPersistentEntitySpecMultiJvmNode2 extends CouchbaseClusteredPersistentEntitySpec
class CouchbaseClusteredPersistentEntitySpecMultiJvmNode3 extends CouchbaseClusteredPersistentEntitySpec

class CouchbaseClusteredPersistentEntitySpec
    extends AbstractClusteredPersistentEntitySpec(CouchbaseClusteredPersistentEntityConfig) {
  import com.lightbend.lagom.scaladsl.persistence.couchbase.CouchbaseClusteredPersistentEntityConfig._

  override protected def atStartup(): Unit = {
    runOn(node1) {
      CouchbaseClusterConnection.connect().cleanUp().close()
      awaitPersistenceInit(system)
    }
    enterBarrier("couchbase-started")

    super.atStartup()
  }

  lazy val defaultApplicationLifecycle = new DefaultApplicationLifecycle

  override lazy val components: CouchbasePersistenceComponents =
    new CouchbasePersistenceComponents {
      override def actorSystem: ActorSystem = system
      override def executionContext: ExecutionContext = system.dispatcher
      override def materializer: Materializer = ActorMaterializer()(system)
      override def configuration: Configuration = Configuration(system.settings.config)
      override def serviceLocator: ServiceLocator = NoServiceLocator
      override def environment: Environment = Environment(new File("."), getClass.getClassLoader, Mode.Test)
      override def jsonSerializerRegistry: JsonSerializerRegistry = ???
      override def coordinatedShutdown: CoordinatedShutdown = CoordinatedShutdown(system)
    }

  def testEntityReadSide = new TestEntityReadSide(components.actorSystem, components.couchbase)

  override protected def readSideProcessor: () => ReadSideProcessor[TestEntity.Evt] =
    () => new TestEntityReadSide.TestEntityReadSideProcessor(system, components.couchbaseReadSide)

  override protected def getAppendCount(id: String): Future[Long] = testEntityReadSide.getAppendCount(id)
} 
Example 9
Source File: LeaseTestSuite.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package akka.coordination.lease.kubernetes

import akka.actor.{ ActorSystem, CoordinatedShutdown }
import org.scalatest.{ Args, Filter, Reporter, Stopper }
import org.scalatest.events.{ Event, TestFailed }

import scala.util.{ Failure, Success, Try }

object LeaseTestSuite {

  def main(args: Array[String]): Unit = {
    val as = ActorSystem("LeaseTestSuite")
    val log = as.log
    log.info("Running test")

    val leaseSpec = new LeaseSpec {
      override def system: ActorSystem = as
    }
    @volatile var failed = false

    val reporter = new Reporter() {
      override def apply(event: Event): Unit =
        event match {
          case tf: TestFailed =>
            failed = true
            log.error("TestFailed({}): {}", tf.testName, tf.message)
          case _ =>
        }
    }

    val testSuite = Try(leaseSpec.run(None, Args(reporter, Stopper.default, Filter())))
    log.info("Test complete {}", testSuite)
    testSuite match {
      case Success(_) if !failed =>
        log.info("Test succeeded")
        CoordinatedShutdown(as).run(TestPassedReason)
      case Success(_) if failed =>
        log.info("Test failed, see the logs")
        CoordinatedShutdown(as).run(TestFailedReason)
      case Failure(exception) =>
        log.error(exception, "Test exception")
        CoordinatedShutdown(as).run(TestFailedReason)
    }
  }

} 
Example 10
Source File: AkkaManagementTrigger.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.internal.akka.management

import akka.Done
import akka.actor.ActorSystem
import akka.actor.CoordinatedShutdown
import akka.actor.ExtendedActorSystem
import akka.management.scaladsl.AkkaManagement
import com.typesafe.config.Config
import play.api.Logger

import scala.concurrent.ExecutionContext
import scala.concurrent.Future


  private[lagom] def forcedStart(requester: String): Future[Done] = {
    if (!enabled) {
      logger.warn(
        s"'lagom.akka.management.enabled' property is set to '$enabledRenderedValue', " +
          s"but Akka Management is being required to start by: '$requester'."
      )
    }

    doStart()
  }

  private def doStart(): Future[Done] = {
    val akkaManagement = AkkaManagement(system.asInstanceOf[ExtendedActorSystem])
    akkaManagement.start().map { _ =>
      // add a task to stop
      coordinatedShutdown.addTask(
        CoordinatedShutdown.PhaseBeforeServiceUnbind,
        "stop-akka-http-management"
      ) { () =>
        akkaManagement.stop()
      }
      Done
    }
  }
} 
Example 11
Source File: AkkaManagementComponents.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.scaladsl.server

import akka.actor.ActorSystem
import akka.actor.CoordinatedShutdown
import com.lightbend.lagom.internal.akka.management.AkkaManagementTrigger
import play.api.Environment
import play.api.Mode

import scala.concurrent.ExecutionContext

trait AkkaManagementComponents {
  def configuration: play.api.Configuration
  def actorSystem: ActorSystem
  def coordinatedShutdown: CoordinatedShutdown
  def environment: Environment

  def executionContext: ExecutionContext

  // eager initialization
  private[lagom] val akkaManagementTrigger: AkkaManagementTrigger = {
    val instance =
      new AkkaManagementTrigger(configuration.underlying, actorSystem, coordinatedShutdown)(executionContext)
    if (environment.mode == Mode.Prod) {
      instance.start()
    }
    instance
  }
} 
Example 12
Source File: OpenWhiskEvents.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.core.monitoring.metrics

import akka.actor.{ActorSystem, CoordinatedShutdown}
import akka.event.slf4j.SLF4JLogging
import akka.http.scaladsl.Http
import akka.kafka.ConsumerSettings
import akka.stream.ActorMaterializer
import com.typesafe.config.Config
import kamon.Kamon
import kamon.prometheus.PrometheusReporter
import org.apache.kafka.common.serialization.StringDeserializer
import pureconfig._
import pureconfig.generic.auto._

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{ExecutionContext, Future}

object OpenWhiskEvents extends SLF4JLogging {

  case class MetricConfig(port: Int,
                          enableKamon: Boolean,
                          ignoredNamespaces: Set[String],
                          renameTags: Map[String, String],
                          retry: RetryConfig)

  case class RetryConfig(minBackoff: FiniteDuration, maxBackoff: FiniteDuration, randomFactor: Double, maxRestarts: Int)

  def start(config: Config)(implicit system: ActorSystem,
                            materializer: ActorMaterializer): Future[Http.ServerBinding] = {
    implicit val ec: ExecutionContext = system.dispatcher

    val prometheusReporter = new PrometheusReporter()
    Kamon.registerModule("prometheus", prometheusReporter)
    Kamon.init(config)

    val metricConfig = loadConfigOrThrow[MetricConfig](config, "whisk.user-events")

    val prometheusRecorder = PrometheusRecorder(prometheusReporter, metricConfig)
    val recorders = if (metricConfig.enableKamon) Seq(prometheusRecorder, KamonRecorder) else Seq(prometheusRecorder)
    val eventConsumer = EventConsumer(eventConsumerSettings(defaultConsumerConfig(config)), recorders, metricConfig)

    CoordinatedShutdown(system).addTask(CoordinatedShutdown.PhaseBeforeServiceUnbind, "shutdownConsumer") { () =>
      eventConsumer.shutdown()
    }
    val port = metricConfig.port
    val api = new PrometheusEventsApi(eventConsumer, prometheusRecorder)
    val httpBinding = Http().bindAndHandle(api.routes, "0.0.0.0", port)
    httpBinding.foreach(_ => log.info(s"Started the http server on http://localhost:$port"))(system.dispatcher)
    httpBinding
  }

  def eventConsumerSettings(config: Config): ConsumerSettings[String, String] =
    ConsumerSettings(config, new StringDeserializer, new StringDeserializer)

  def defaultConsumerConfig(globalConfig: Config): Config = globalConfig.getConfig("akka.kafka.consumer")
} 
Example 13
Source File: SlickDbTestProvider.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.internal.persistence.jdbc

import akka.Done
import akka.actor.CoordinatedShutdown
import play.api.db.Databases

import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import scala.util.Random

object SlickDbTestProvider {
  private val JNDIName   = "DefaultDS"
  private val JNDIDBName = "DefaultDB"

  private val AsyncExecConfig: AsyncExecutorConfig = new AsyncExecutorConfig {
    override val numThreads: Int         = 20
    override val minConnections: Int     = 20
    override val maxConnections: Int     = 20
    override val queueSize: Int          = 100
    override def registerMbeans: Boolean = false
  }

  
  def buildAndBindSlickDb(baseName: String, coordinatedShutdown: CoordinatedShutdown)(
      implicit executionContext: ExecutionContext
  ): Unit = {
    val dbName = s"${baseName}_${Random.alphanumeric.take(8).mkString}"
    val db     = Databases.inMemory(dbName, config = Map("jndiName" -> JNDIName))

    SlickDbProvider.buildAndBindSlickDatabase(db, AsyncExecConfig, JNDIDBName, coordinatedShutdown)
  }
} 
Example 14
Source File: SlickClusteredPersistentEntitySpec.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.scaladsl.persistence.slick

import akka.actor.ActorSystem
import akka.actor.CoordinatedShutdown
import akka.stream.Materializer
import akka.stream.SystemMaterializer
import com.lightbend.lagom.scaladsl.persistence.TestEntity.Evt
import com.lightbend.lagom.scaladsl.persistence.multinode.AbstractClusteredPersistentEntityConfig
import com.lightbend.lagom.scaladsl.persistence.multinode.AbstractClusteredPersistentEntitySpec
import com.lightbend.lagom.scaladsl.persistence.ReadSideProcessor
import com.lightbend.lagom.scaladsl.persistence.TestEntitySerializerRegistry
import com.lightbend.lagom.scaladsl.persistence.multinode.AbstractClusteredPersistentEntitySpec.Ports
import com.lightbend.lagom.scaladsl.playjson.JsonSerializerRegistry
import com.typesafe.config.Config
import com.typesafe.config.ConfigFactory
import org.h2.tools.Server
import play.api.Configuration
import play.api.Environment
import play.api.db.HikariCPComponents
import play.api.inject.ApplicationLifecycle
import play.api.inject.DefaultApplicationLifecycle

import scala.concurrent.Await
import scala.concurrent.ExecutionContext
import scala.concurrent.Future

object SlickClusteredPersistentEntityConfig extends AbstractClusteredPersistentEntityConfig {

  override def specPorts: Ports.SpecPorts = Ports.slickSpecPorts

  override def additionalCommonConfig: Config = ConfigFactory.parseString(
    s"""
      db.default.driver=org.h2.Driver
      db.default.url="jdbc:h2:tcp://localhost:${specPorts.database}/mem:JdbcClusteredPersistentEntitySpec"
    """
  )
}

class SlickClusteredPersistentEntitySpecMultiJvmNode1 extends SlickClusteredPersistentEntitySpec
class SlickClusteredPersistentEntitySpecMultiJvmNode2 extends SlickClusteredPersistentEntitySpec
class SlickClusteredPersistentEntitySpecMultiJvmNode3 extends SlickClusteredPersistentEntitySpec

class SlickClusteredPersistentEntitySpec
    extends AbstractClusteredPersistentEntitySpec(SlickClusteredPersistentEntityConfig) {
  import SlickClusteredPersistentEntityConfig._

  var h2: Server = _

  protected override def atStartup(): Unit = {
    runOn(node1) {
      h2 = Server.createTcpServer("-tcpPort", specPorts.database.toString, "-ifNotExists").start()
    }
    enterBarrier("h2-started")
    super.atStartup()
  }

  protected override def afterTermination(): Unit = {
    super.afterTermination()
    Await.ready(defaultApplicationLifecycle.stop(), shutdownTimeout)
    Option(h2).foreach(_.stop())
  }

  lazy val defaultApplicationLifecycle = new DefaultApplicationLifecycle

  override lazy val components: SlickPersistenceComponents =
    new SlickPersistenceComponents with HikariCPComponents {
      override def actorSystem: ActorSystem                 = SlickClusteredPersistentEntitySpec.this.system
      override def executionContext: ExecutionContext       = system.dispatcher
      override def coordinatedShutdown: CoordinatedShutdown = CoordinatedShutdown(actorSystem)

      override lazy val materializer: Materializer                 = SystemMaterializer(actorSystem).materializer
      override lazy val configuration: Configuration               = Configuration(system.settings.config)
      override def environment: Environment                        = SlickClusteredPersistentEntityConfig.environment
      override lazy val applicationLifecycle: ApplicationLifecycle = defaultApplicationLifecycle
      override def jsonSerializerRegistry: JsonSerializerRegistry  = TestEntitySerializerRegistry
    }

  lazy val jdbcTestEntityReadSide: SlickTestEntityReadSide =
    new SlickTestEntityReadSide(
      components.db,
      components.profile
    )(components.executionContext)

  protected override def getAppendCount(id: String): Future[Long] =
    jdbcTestEntityReadSide.getAppendCount(id)

  protected override def readSideProcessor: () => ReadSideProcessor[Evt] = { () =>
    new SlickTestEntityReadSide.TestEntityReadSideProcessor(
      components.slickReadSide,
      components.db,
      components.profile
    )(components.executionContext)
  }
} 
Example 15
Source File: JdbcClusteredPersistentEntitySpec.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.scaladsl.persistence.jdbc

import akka.actor.ActorSystem
import akka.actor.CoordinatedShutdown
import akka.stream.Materializer
import akka.stream.SystemMaterializer
import com.lightbend.lagom.scaladsl.persistence.TestEntity.Evt
import com.lightbend.lagom.scaladsl.persistence.multinode.AbstractClusteredPersistentEntityConfig
import com.lightbend.lagom.scaladsl.persistence.multinode.AbstractClusteredPersistentEntitySpec
import com.lightbend.lagom.scaladsl.persistence.ReadSideProcessor
import com.lightbend.lagom.scaladsl.persistence.TestEntitySerializerRegistry
import com.lightbend.lagom.scaladsl.persistence.multinode.AbstractClusteredPersistentEntitySpec.Ports
import com.lightbend.lagom.scaladsl.playjson.JsonSerializerRegistry
import com.typesafe.config.Config
import com.typesafe.config.ConfigFactory
import org.h2.tools.Server
import play.api.Configuration
import play.api.Environment
import play.api.db.HikariCPComponents
import play.api.inject.ApplicationLifecycle
import play.api.inject.DefaultApplicationLifecycle

import scala.concurrent.Await
import scala.concurrent.ExecutionContext
import scala.concurrent.Future

object JdbcClusteredPersistentEntityConfig extends AbstractClusteredPersistentEntityConfig {

  override def specPorts: Ports.SpecPorts = Ports.jdbcSpecPorts

  override def additionalCommonConfig: Config = ConfigFactory.parseString(
    s"""
      db.default.driver=org.h2.Driver
      db.default.url="jdbc:h2:tcp://localhost:${specPorts.database}/mem:JdbcClusteredPersistentEntitySpec"
    """
  )
}

class JdbcClusteredPersistentEntitySpecMultiJvmNode1 extends JdbcClusteredPersistentEntitySpec
class JdbcClusteredPersistentEntitySpecMultiJvmNode2 extends JdbcClusteredPersistentEntitySpec
class JdbcClusteredPersistentEntitySpecMultiJvmNode3 extends JdbcClusteredPersistentEntitySpec

class JdbcClusteredPersistentEntitySpec
    extends AbstractClusteredPersistentEntitySpec(JdbcClusteredPersistentEntityConfig) {
  import JdbcClusteredPersistentEntityConfig._

  var h2: Server = _

  protected override def atStartup(): Unit = {
    runOn(node1) {
      h2 = Server.createTcpServer("-tcpPort", specPorts.database.toString, "-ifNotExists").start()
    }

    enterBarrier("h2-started")
    super.atStartup()
  }

  protected override def afterTermination(): Unit = {
    super.afterTermination()
    Await.ready(defaultApplicationLifecycle.stop(), shutdownTimeout)
    Option(h2).foreach(_.stop())
  }

  lazy val defaultApplicationLifecycle = new DefaultApplicationLifecycle

  override lazy val components: JdbcPersistenceComponents =
    new JdbcPersistenceComponents with HikariCPComponents {
      override def actorSystem: ActorSystem                 = JdbcClusteredPersistentEntitySpec.this.system
      override def executionContext: ExecutionContext       = system.dispatcher
      override def coordinatedShutdown: CoordinatedShutdown = CoordinatedShutdown(actorSystem)

      override lazy val materializer: Materializer                 = SystemMaterializer(actorSystem).materializer
      override lazy val configuration: Configuration               = Configuration(system.settings.config)
      override def environment: Environment                        = JdbcClusteredPersistentEntityConfig.environment
      override lazy val applicationLifecycle: ApplicationLifecycle = defaultApplicationLifecycle
      override def jsonSerializerRegistry: JsonSerializerRegistry  = TestEntitySerializerRegistry
    }

  lazy val jdbcTestEntityReadSide: JdbcTestEntityReadSide =
    new JdbcTestEntityReadSide(components.jdbcSession)

  protected override def getAppendCount(id: String): Future[Long] =
    jdbcTestEntityReadSide.getAppendCount(id)

  protected override def readSideProcessor: () => ReadSideProcessor[Evt] = { () =>
    new JdbcTestEntityReadSide.TestEntityReadSideProcessor(components.jdbcReadSide)
  }
} 
Example 16
Source File: JdbcPersistenceComponents.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.scaladsl.persistence.jdbc

import akka.actor.ActorSystem
import akka.actor.CoordinatedShutdown
import com.lightbend.lagom.internal.persistence.jdbc.SlickDbProvider
import com.lightbend.lagom.internal.persistence.jdbc.SlickOffsetStore
import com.lightbend.lagom.internal.persistence.jdbc.SlickProvider
import com.lightbend.lagom.internal.scaladsl.persistence.jdbc.JdbcPersistentEntityRegistry
import com.lightbend.lagom.internal.scaladsl.persistence.jdbc.JdbcReadSideImpl
import com.lightbend.lagom.internal.scaladsl.persistence.jdbc.JdbcSessionImpl
import com.lightbend.lagom.internal.scaladsl.persistence.jdbc.OffsetTableConfiguration
import com.lightbend.lagom.scaladsl.persistence.PersistenceComponents
import com.lightbend.lagom.scaladsl.persistence.PersistentEntityRegistry
import com.lightbend.lagom.scaladsl.persistence.ReadSidePersistenceComponents
import com.lightbend.lagom.scaladsl.persistence.WriteSidePersistenceComponents
import com.lightbend.lagom.spi.persistence.OffsetStore
import play.api.db.DBComponents

import scala.concurrent.ExecutionContext


trait ReadSideJdbcPersistenceComponents extends ReadSidePersistenceComponents with SlickProviderComponents {
  lazy val offsetTableConfiguration: OffsetTableConfiguration = new OffsetTableConfiguration(
    configuration.underlying,
    readSideConfig
  )
  private[lagom] lazy val slickOffsetStore: SlickOffsetStore =
    new SlickOffsetStore(actorSystem, slickProvider, offsetTableConfiguration)

  lazy val offsetStore: OffsetStore = slickOffsetStore

  lazy val jdbcReadSide: JdbcReadSide = new JdbcReadSideImpl(slickProvider, slickOffsetStore)(executionContext)

  lazy val jdbcSession: JdbcSession = new JdbcSessionImpl(slickProvider)
} 
Example 17
Source File: JdbcPersistenceModule.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.javadsl.persistence.jdbc

import javax.inject.Inject
import javax.inject.Provider
import javax.inject.Singleton
import akka.actor.ActorSystem
import akka.actor.CoordinatedShutdown
import com.lightbend.lagom.internal.javadsl.persistence.jdbc._
import com.lightbend.lagom.internal.persistence.jdbc.SlickDbProvider
import com.lightbend.lagom.internal.persistence.jdbc.SlickOffsetStore
import com.lightbend.lagom.javadsl.persistence.PersistentEntityRegistry
import com.lightbend.lagom.spi.persistence.OffsetStore
import play.api.Configuration
import play.api.Environment
import play.api.db.DBApi
import play.api.inject.Binding
import play.api.inject.Module

import scala.concurrent.ExecutionContext

class JdbcPersistenceModule extends Module {
  override def bindings(environment: Environment, configuration: Configuration): Seq[Binding[_]] = Seq(
    bind[SlickProvider].toProvider[GuiceSlickProvider],
    bind[JdbcReadSide].to[JdbcReadSideImpl],
    bind[PersistentEntityRegistry].to[JdbcPersistentEntityRegistry],
    bind[JdbcSession].to[JdbcSessionImpl],
    bind[SlickOffsetStore].to[JavadslJdbcOffsetStore],
    bind[OffsetStore].to(bind[SlickOffsetStore])
  )
}

@Singleton
class GuiceSlickProvider @Inject() (
    dbApi: DBApi,
    actorSystem: ActorSystem,
    coordinatedShutdown: CoordinatedShutdown
)(
    implicit ec: ExecutionContext
) extends Provider[SlickProvider] {
  lazy val get = {
    // Ensures JNDI bindings are made before we build the SlickProvider
    SlickDbProvider.buildAndBindSlickDatabases(
      dbApi,
      actorSystem.settings.config,
      coordinatedShutdown
    )
    new SlickProvider(actorSystem, coordinatedShutdown)
  }
} 
Example 18
Source File: ActorSystemSpec.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.persistence

import java.lang.reflect.Modifier

import akka.actor.ActorSystem
import akka.actor.CoordinatedShutdown
import akka.actor.setup.ActorSystemSetup
import akka.event.Logging
import akka.event.LoggingAdapter
import akka.testkit.ImplicitSender
import akka.testkit.TestKit
import com.typesafe.config.Config
import com.typesafe.config.ConfigFactory
import org.scalactic.CanEqual
import org.scalactic.TypeCheckedTripleEquals
import org.scalatest.BeforeAndAfterAll
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpecLike

object ActorSystemSpec {
  // taken from akka-testkit's AkkaSpec
  private def testNameFromCallStack(classToStartFrom: Class[_]): String = {

    def isAbstractClass(className: String): Boolean = {
      try {
        Modifier.isAbstract(Class.forName(className).getModifiers)
      } catch {
        case _: Throwable => false // yes catch everything, best effort check
      }
    }

    val startFrom = classToStartFrom.getName
    val filteredStack = Thread.currentThread.getStackTrace.iterator
      .map(_.getClassName)
      // drop until we find the first occurrence of classToStartFrom
      .dropWhile(!_.startsWith(startFrom))
      // then continue to the next entry after classToStartFrom that makes sense
      .dropWhile {
        case `startFrom`                            => true
        case str if str.startsWith(startFrom + "$") => true // lambdas inside startFrom etc
        case str if isAbstractClass(str)            => true
        case _                                      => false
      }

    if (filteredStack.isEmpty)
      throw new IllegalArgumentException(s"Couldn't find [${classToStartFrom.getName}] in call stack")

    // sanitize for actor system name
    scrubActorSystemName(filteredStack.next())
  }

  // taken from akka-testkit's AkkaSpec
  
  private def scrubActorSystemName(name: String): String = {
    name
      .replaceFirst("""^.*\.""", "")  // drop package name
      .replaceAll("""\$\$?\w+""", "") // drop scala anonymous functions/classes
      .replaceAll("[^a-zA-Z_0-9]", "_")
  }
}

abstract class ActorSystemSpec(actorSystemFactory: () => ActorSystem)
    extends TestKit(actorSystemFactory())
    with AnyWordSpecLike
    with Matchers
    with BeforeAndAfterAll
    with TypeCheckedTripleEquals
    with ImplicitSender {

  def this(testName: String, config: Config) =
    this(() => ActorSystem(testName, config))

  def this(config: Config) = this(ActorSystemSpec.testNameFromCallStack(classOf[ActorSystemSpec]), config)

  def this(setup: ActorSystemSetup) =
    this(() => ActorSystem(ActorSystemSpec.testNameFromCallStack(classOf[ActorSystemSpec]), setup))

  def this() = this(ConfigFactory.empty())

  override def afterAll(): Unit = {
    shutdown()
    super.afterAll()
  }

  val log: LoggingAdapter                      = Logging(system, this.getClass)
  val coordinatedShutdown: CoordinatedShutdown = CoordinatedShutdown(system)

  // for ScalaTest === compare of Class objects
  implicit def classEqualityConstraint[A, B]: CanEqual[Class[A], Class[B]] =
    new CanEqual[Class[A], Class[B]] {
      def areEqual(a: Class[A], b: Class[B]) = a == b
    }
} 
Example 19
Source File: ServiceRegistration.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.devmode.internal.scaladsl.registry

import akka.Done
import akka.actor.CoordinatedShutdown
import com.lightbend.lagom.devmode.internal.registry.serviceDnsRecords
import com.lightbend.lagom.scaladsl.api.ServiceInfo
import com.typesafe.config.Config
import play.api.Logger

import scala.concurrent.ExecutionContext
import scala.util.Failure
import scala.util.Success

import scala.collection._

class ServiceRegistration(
    serviceInfo: ServiceInfo,
    coordinatedShutdown: CoordinatedShutdown,
    config: Config,
    registry: ServiceRegistry
)(implicit ec: ExecutionContext) {
  private val logger: Logger = Logger(this.getClass)

  private val uris = serviceDnsRecords(config)

  coordinatedShutdown.addTask(
    CoordinatedShutdown.PhaseBeforeServiceUnbind,
    "unregister-services-from-service-locator-scaladsl"
  ) { () =>
    registry.unregister(serviceInfo.serviceName).invoke().map(_ => Done)
  }

  registry
    .register(serviceInfo.serviceName)
    .invoke(new ServiceRegistryService(uris, immutable.Seq(serviceInfo.acls.toSeq: _*)))
    .onComplete {
      case Success(_) =>
        logger.debug(s"Service name=[${serviceInfo.serviceName}] successfully registered with service locator.")
      case Failure(e) =>
        logger.error(s"Service name=[${serviceInfo.serviceName}] couldn't register itself to the service locator.", e)
    }
} 
Example 20
Source File: ServiceRegistrationModule.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.internal.server

import java.net.URI
import java.util.function.{ Function => JFunction }

import akka.actor.CoordinatedShutdown
import akka.Done
import akka.NotUsed
import com.lightbend.lagom.internal.javadsl.registry.ServiceRegistry
import com.lightbend.lagom.internal.javadsl.registry.ServiceRegistryService
import com.lightbend.lagom.internal.javadsl.server.ResolvedServices
import com.lightbend.lagom.devmode.internal.registry.serviceDnsRecords
import com.typesafe.config.Config
import javax.inject.Inject
import javax.inject.Provider
import javax.inject.Singleton
import play.api.inject.Binding
import play.api.inject.Module
import play.api.Configuration
import play.api.Environment
import play.api.Logger

import scala.compat.java8.FutureConverters.CompletionStageOps
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import scala.collection.JavaConverters._
import scala.collection.immutable

class ServiceRegistrationModule extends Module {
  override def bindings(environment: Environment, configuration: Configuration): Seq[Binding[_]] = Seq(
    bind[ServiceRegistrationModule.RegisterWithServiceRegistry].toSelf.eagerly(),
    bind[ServiceRegistrationModule.ServiceConfig].toProvider[ServiceRegistrationModule.ServiceConfigProvider]
  )
}

object ServiceRegistrationModule {
  class ServiceConfigProvider @Inject() (config: Config) extends Provider[ServiceConfig] {
    override lazy val get = ServiceConfig(serviceDnsRecords(config))
  }

  case class ServiceConfig(uris: immutable.Seq[URI])

  
  @Singleton
  private class RegisterWithServiceRegistry @Inject() (
      coordinatedShutdown: CoordinatedShutdown,
      resolvedServices: ResolvedServices,
      config: ServiceConfig,
      registry: ServiceRegistry
  )(implicit ec: ExecutionContext) {
    private lazy val logger: Logger = Logger(this.getClass())

    private val locatableServices = resolvedServices.services.filter(_.descriptor.locatableService)

    coordinatedShutdown.addTask(
      CoordinatedShutdown.PhaseBeforeServiceUnbind,
      "unregister-services-from-service-locator-javadsl"
    ) { () =>
      Future
        .sequence(locatableServices.map { service =>
          registry.unregister(service.descriptor.name).invoke().toScala
        })
        .map(_ => Done)
    }

    locatableServices.foreach { service =>
      val c = ServiceRegistryService.of(config.uris.asJava, service.descriptor.acls)
      registry
        .register(service.descriptor.name)
        .invoke(c)
        .exceptionally(new JFunction[Throwable, NotUsed] {
          def apply(t: Throwable) = {
            logger
              .error(s"Service name=[${service.descriptor.name}] couldn't register itself to the service locator.", t)
            NotUsed.getInstance()
          }
        })
    }
  }
} 
Example 21
Source File: RunServer.scala    From mleap   with Apache License 2.0 5 votes vote down vote up
package ml.combust.mleap.grpc.server

import java.util.concurrent.{Executors, TimeUnit}

import akka.Done
import akka.actor.{ActorSystem, CoordinatedShutdown}
import akka.stream.{ActorMaterializer, Materializer}
import com.typesafe.config.Config
import com.typesafe.scalalogging.Logger
import io.grpc.ServerBuilder
import ml.combust.mleap.executor.MleapExecutor
import ml.combust.mleap.pb.MleapGrpc

import scala.concurrent.{ExecutionContext, Future}
import scala.language.existentials
import scala.util.{Failure, Success, Try}

class RunServer(config: Config)
               (implicit system: ActorSystem) {
  private val logger = Logger(classOf[RunServer])

  private var coordinator: Option[CoordinatedShutdown] = None

  def run(): Unit = {
    Try {
      logger.info("Starting MLeap gRPC Server")

      val coordinator = CoordinatedShutdown(system)
      this.coordinator = Some(coordinator)

      implicit val materializer: Materializer = ActorMaterializer()

      val grpcServerConfig = new GrpcServerConfig(config.getConfig("default"))
      val mleapExecutor = MleapExecutor(system)
      val port: Int = config.getInt("port")
      val threads: Option[Int] = if (config.hasPath("threads")) Some(config.getInt("threads")) else None
      val threadCount = threads.getOrElse {
        Math.min(Math.max(Runtime.getRuntime.availableProcessors() * 4, 32), 64)
      }

      logger.info(s"Creating thread pool for server with size $threadCount")
      val grpcThreadPool = Executors.newFixedThreadPool(threadCount)
      implicit val ec: ExecutionContext = ExecutionContext.fromExecutor(grpcThreadPool)

      coordinator.addTask(CoordinatedShutdown.PhaseServiceRequestsDone, "threadPoolShutdownNow") {
        () =>
          Future {
            logger.info("Shutting down gRPC thread pool")
            grpcThreadPool.shutdown()
            grpcThreadPool.awaitTermination(5, TimeUnit.SECONDS)

            Done
          }
      }

      logger.info(s"Creating executor service")
      val grpcService: GrpcServer = new GrpcServer(mleapExecutor, grpcServerConfig)
      val builder = ServerBuilder.forPort(port)
      builder.intercept(new ErrorInterceptor)
      builder.addService(MleapGrpc.bindService(grpcService, ec))
      val grpcServer = builder.build()

      logger.info(s"Starting server on port $port")
      grpcServer.start()

      coordinator.addTask(CoordinatedShutdown.PhaseServiceUnbind, "grpcServiceShutdown") {
        () =>
          Future {
            logger.info("Shutting down gRPC")
            grpcServer.shutdown()
            grpcServer.awaitTermination(10, TimeUnit.SECONDS)
            Done
          }(ExecutionContext.global)
      }

      coordinator.addTask(CoordinatedShutdown.PhaseServiceStop, "grpcServiceShutdownNow") {
        () =>
          Future {
            if (!grpcServer.isShutdown) {
              logger.info("Shutting down gRPC NOW!")

              grpcServer.shutdownNow()
              grpcServer.awaitTermination(5, TimeUnit.SECONDS)
            }

            Done
          }(ExecutionContext.global)
      }
    } match {
      case Success(_) =>
      case Failure(err) =>
        logger.error("Error encountered starting server", err)
        for (c <- this.coordinator) {
          c.run(CoordinatedShutdown.UnknownReason)
        }
        throw err
    }
  }
} 
Example 22
Source File: CacheInvalidator.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.core.database.cosmosdb.cache

import akka.Done
import akka.actor.{ActorSystem, CoordinatedShutdown}
import akka.kafka.ProducerSettings
import akka.stream.ActorMaterializer
import com.google.common.base.Throwables
import com.typesafe.config.Config
import org.apache.kafka.common.serialization.StringSerializer
import org.apache.openwhisk.common.Logging
import org.apache.openwhisk.core.database.RemoteCacheInvalidation.cacheInvalidationTopic

import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success}

object CacheInvalidator {

  val instanceId = "cache-invalidator"
  val whisksCollection = "whisks"

  def start(
    globalConfig: Config)(implicit system: ActorSystem, materializer: ActorMaterializer, log: Logging): Future[Done] = {
    implicit val ec: ExecutionContext = system.dispatcher
    val config = CacheInvalidatorConfig(globalConfig)
    val producer =
      KafkaEventProducer(
        kafkaProducerSettings(defaultProducerConfig(globalConfig)),
        cacheInvalidationTopic,
        config.eventProducerConfig)
    val observer = new WhiskChangeEventObserver(config.invalidatorConfig, producer)
    val feedConsumer = new ChangeFeedConsumer(whisksCollection, config, observer)
    feedConsumer.isStarted.andThen {
      case Success(_) =>
        registerShutdownTasks(system, feedConsumer, producer)
        log.info(this, s"Started the Cache invalidator service. ClusterId [${config.invalidatorConfig.clusterId}]")
      case Failure(t) =>
        log.error(this, "Error occurred while starting the Consumer" + Throwables.getStackTraceAsString(t))
    }
  }

  private def registerShutdownTasks(system: ActorSystem,
                                    feedConsumer: ChangeFeedConsumer,
                                    producer: KafkaEventProducer)(implicit ec: ExecutionContext, log: Logging): Unit = {
    CoordinatedShutdown(system).addTask(CoordinatedShutdown.PhaseBeforeServiceUnbind, "closeFeedListeners") { () =>
      feedConsumer
        .close()
        .flatMap { _ =>
          producer.close().andThen {
            case Success(_) =>
              log.info(this, "Kafka producer successfully shutdown")
          }
        }
    }
  }

  def kafkaProducerSettings(config: Config): ProducerSettings[String, String] =
    ProducerSettings(config, new StringSerializer, new StringSerializer)

  def defaultProducerConfig(globalConfig: Config): Config = globalConfig.getConfig("akka.kafka.producer")

}