akka.event.Logging Scala Examples
The following examples show how to use akka.event.Logging.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: AbstractWebServer.scala From ohara with Apache License 2.0 | 6 votes |
package oharastream.ohara.shabondi.common import akka.Done import akka.actor.ActorSystem import akka.event.Logging import akka.http.scaladsl.Http import akka.http.scaladsl.Http.ServerBinding import akka.http.scaladsl.server.{Directives, Route} import akka.http.scaladsl.settings.ServerSettings import oharastream.ohara.common.util.Releasable import scala.concurrent._ import scala.concurrent.duration.Duration import scala.io.StdIn import scala.util.{Failure, Success} private[shabondi] abstract class AbstractWebServer extends Directives with Releasable { implicit protected val actorSystem: ActorSystem = ActorSystem(Logging.simpleName(this).replaceAll("\\$", "")) protected def routes: Route protected def postBinding(binding: ServerBinding): Unit = { val hostname = binding.localAddress.getHostName val port = binding.localAddress.getPort actorSystem.log.info(s"Server online at http://$hostname:$port/") } protected def postBindingFailure(cause: Throwable): Unit = { actorSystem.log.error(cause, s"Error starting the server ${cause.getMessage}") } protected def waitForShutdownSignal()(implicit ec: ExecutionContext): Future[Done] = { val promise = Promise[Done]() sys.addShutdownHook { promise.trySuccess(Done) } Future { blocking { if (StdIn.readLine("Press <RETURN> to stop Shabondi WebServer...\n") != null) promise.trySuccess(Done) } } promise.future } protected def postServerShutdown(): Unit = actorSystem.log.info("Shutting down the server") def start(bindInterface: String, port: Int): Unit = { start(bindInterface, port, ServerSettings(actorSystem)) } def start(bindInterface: String, port: Int, settings: ServerSettings): Unit = { implicit val executionContext: ExecutionContextExecutor = actorSystem.dispatcher val bindingFuture: Future[Http.ServerBinding] = Http().bindAndHandle( handler = routes, interface = bindInterface, port = port, settings = settings ) bindingFuture.onComplete { case Success(binding) => postBinding(binding) case Failure(cause) => postBindingFailure(cause) } Await.ready( bindingFuture.flatMap(_ => waitForShutdownSignal()), Duration.Inf ) bindingFuture .flatMap(_.unbind()) .onComplete { _ => postServerShutdown() actorSystem.terminate() } } override def close(): Unit = actorSystem.terminate() }
Example 2
Source File: ModelService.scala From reactive-machine-learning-systems with MIT License | 6 votes |
package com.reactivemachinelearning import akka.actor.ActorSystem import akka.event.{Logging, LoggingAdapter} import akka.http.scaladsl.Http import akka.http.scaladsl.marshalling.ToResponseMarshallable import akka.http.scaladsl.model.StatusCodes._ import akka.http.scaladsl.server.Directives._ import akka.stream.{ActorMaterializer, Materializer} //import spray.json._ import spray.json.DefaultJsonProtocol import scala.concurrent.{ExecutionContextExecutor, Future} case class Prediction(id: Long, timestamp: Long, value: Double) trait Protocols extends DefaultJsonProtocol { implicit val ipInfoFormat = jsonFormat3(Prediction.apply) } trait Service extends Protocols { implicit val system: ActorSystem implicit def executor: ExecutionContextExecutor implicit val materializer: Materializer val logger: LoggingAdapter // private def parseFeatures(features: String): Map[Long, Double] = { // features.parseJson.convertTo[Map[Long, Double]] // } def predict(features: String): Future[Prediction] = { Future(Prediction(123, 456, 0.5)) } val routes = { logRequestResult("predictive-service") { pathPrefix("ip") { (get & path(Segment)) { features => complete { predict(features).map[ToResponseMarshallable] { // case prediction: Prediction => prediction case _ => BadRequest } } } } } } } object PredictiveService extends App with Service { override implicit val system = ActorSystem() override implicit val executor = system.dispatcher override implicit val materializer = ActorMaterializer() override val logger = Logging(system, getClass) Http().bindAndHandle(routes, "0.0.0.0", 9000) }
Example 3
Source File: TestSpec.scala From akka-serialization-test with Apache License 2.0 | 5 votes |
package com.github.dnvriend import akka.actor.{ ActorRef, ActorSystem, PoisonPill } import akka.event.{ Logging, LoggingAdapter } import akka.serialization.SerializationExtension import akka.stream.{ ActorMaterializer, Materializer } import akka.testkit.TestProbe import akka.util.Timeout import org.scalatest.concurrent.{ Eventually, ScalaFutures } import org.scalatest.prop.PropertyChecks import org.scalatest.{ BeforeAndAfterAll, FlatSpec, GivenWhenThen, Matchers } import scala.concurrent.duration._ import scala.concurrent.{ ExecutionContext, Future } import scala.util.Try trait TestSpec extends FlatSpec with Matchers with GivenWhenThen with ScalaFutures with BeforeAndAfterAll with Eventually with PropertyChecks with AkkaPersistenceQueries with AkkaStreamUtils with InMemoryCleanup { implicit val timeout: Timeout = Timeout(10.seconds) implicit val system: ActorSystem = ActorSystem() implicit val ec: ExecutionContext = system.dispatcher implicit val mat: Materializer = ActorMaterializer() implicit val log: LoggingAdapter = Logging(system, this.getClass) implicit val pc: PatienceConfig = PatienceConfig(timeout = 50.seconds) val serialization = SerializationExtension(system) implicit class FutureToTry[T](f: Future[T]) { def toTry: Try[T] = Try(f.futureValue) } def killActors(actors: ActorRef*): Unit = { val probe = TestProbe() actors.foreach { actor ⇒ probe watch actor actor ! PoisonPill probe expectTerminated actor } } override protected def afterAll(): Unit = { system.terminate() system.whenTerminated.toTry should be a 'success } }
Example 4
Source File: TestActor.scala From AI with Apache License 2.0 | 5 votes |
package com.bigchange.akka.actor import akka.actor.{Actor, ActorSystem, Props} import akka.event.Logging import com.bigchange.akka.message.MapData @scala.throws[Exception](classOf[Exception]) override def preStart(): Unit = { // 初始化Actor代码块 } // props val props1 = Props() val props2 = Props[TestActor] val props3 = Props(new TestActor) val props6 = props1.withDispatcher("my-dispatcher") // create actor val system = ActorSystem("MySystem") val myActor = system.actorOf(Props[TestActor].withDispatcher("my-dispatcher"), name = "myactor2") //使用匿名类创建Actor,在从某个actor中派生新的actor来完成特定的子任务时,可能使用匿名类来包含将要执行的代码会更方便 def receive = { case m: MapData ⇒ context.actorOf(Props(new Actor { def receive = { case Some(msg) ⇒ val replyMsg = doSomeDangerousWork(msg.toString) sender ! replyMsg context.stop(self) } def doSomeDangerousWork(msg: String): String = { "done" } })) forward m } }
Example 5
Source File: AggregateActor.scala From AI with Apache License 2.0 | 5 votes |
package com.bigchange.akka.actor import akka.actor.{ActorRef, UntypedActor} import akka.event.Logging import com.bigchange.akka.message.{ReduceData, Result} import scala.collection.mutable class AggregateActor(resultActor: ActorRef) extends UntypedActor { val finalHashMap = new mutable.HashMap[String, Int]() val log = Logging(context.system, this) @scala.throws[Throwable](classOf[Throwable]) override def onReceive(message: Any): Unit = { message match { case data: String => log.info("Aggregate got message:" + data) log.info("Aggregate ok!") case reduceData:ReduceData => aggregateInMemoryReduce(reduceData.reduceHashMap) println("path:" + sender().path) resultActor ! new Result(finalHashMap) // 给ResultActor发送计算结果 case message:Result => println("AggregateActor:" + message.resultValue.toString()) case _ => log.info("map unhandled message") unhandled(message) } } // 聚合 def aggregateInMemoryReduce(reduceMap: mutable.HashMap[String, Int]): Unit = { var count = 0 reduceMap.foreach(x => { if(finalHashMap.contains(x._1)) { count = x._2 count += finalHashMap.get(x._1).get finalHashMap.put(x._1,count) } else { finalHashMap.put(x._1,x._2) } }) } }
Example 6
Source File: TestSpec.scala From intro-to-akka-streams with Apache License 2.0 | 5 votes |
package com.github.dnvriend.streams import akka.NotUsed import akka.actor.{ ActorRef, ActorSystem, PoisonPill } import akka.event.{ Logging, LoggingAdapter } import akka.stream.Materializer import akka.stream.scaladsl.Source import akka.stream.testkit.TestSubscriber import akka.stream.testkit.scaladsl.TestSink import akka.testkit.TestProbe import akka.util.Timeout import com.github.dnvriend.streams.util.ClasspathResources import org.scalatest._ import org.scalatest.concurrent.{ Eventually, ScalaFutures } import org.scalatestplus.play.guice.GuiceOneServerPerSuite import play.api.inject.BindingKey import play.api.libs.json.{ Format, Json } import play.api.test.WsTestClient import scala.collection.immutable._ import scala.concurrent.duration._ import scala.concurrent.{ ExecutionContext, Future } import scala.reflect.ClassTag import scala.util.Try object Person { implicit val format: Format[Person] = Json.format[Person] } final case class Person(firstName: String, age: Int) class TestSpec extends FlatSpec with Matchers with GivenWhenThen with OptionValues with TryValues with ScalaFutures with WsTestClient with BeforeAndAfterAll with BeforeAndAfterEach with Eventually with ClasspathResources with GuiceOneServerPerSuite { def getComponent[A: ClassTag] = app.injector.instanceOf[A] def getNamedComponent[A](name: String)(implicit ct: ClassTag[A]): A = app.injector.instanceOf[A](BindingKey(ct.runtimeClass.asInstanceOf[Class[A]]).qualifiedWith(name)) // set the port number of the HTTP server override lazy val port: Int = 8081 implicit val timeout: Timeout = 1.second implicit val pc: PatienceConfig = PatienceConfig(timeout = 30.seconds, interval = 300.millis) implicit val system: ActorSystem = getComponent[ActorSystem] implicit val ec: ExecutionContext = getComponent[ExecutionContext] implicit val mat: Materializer = getComponent[Materializer] val log: LoggingAdapter = Logging(system, this.getClass) // ================================== Supporting Operations ==================================== def id: String = java.util.UUID.randomUUID().toString implicit class FutureToTry[T](f: Future[T]) { def toTry: Try[T] = Try(f.futureValue) } implicit class SourceOps[A](src: Source[A, NotUsed]) { def testProbe(f: TestSubscriber.Probe[A] ⇒ Unit): Unit = f(src.runWith(TestSink.probe(system))) } def withIterator[T](start: Int = 0)(f: Source[Int, NotUsed] ⇒ T): T = f(Source.fromIterator(() ⇒ Iterator from start)) def fromCollection[A](xs: Iterable[A])(f: TestSubscriber.Probe[A] ⇒ Unit): Unit = f(Source(xs).runWith(TestSink.probe(system))) def killActors(refs: ActorRef*): Unit = { val tp = TestProbe() refs.foreach { ref ⇒ tp watch ref tp.send(ref, PoisonPill) tp.expectTerminated(ref) } } }
Example 7
Source File: AlsoTo.scala From akka_streams_tutorial with MIT License | 5 votes |
package sample.stream_divert import akka.actor.ActorSystem import akka.event.Logging import akka.stream.Attributes import akka.stream.scaladsl.{Flow, Sink, Source} object AlsoTo extends App { implicit val system = ActorSystem("AlsoTo") implicit val executionContext = system.dispatcher implicit val adapter = Logging(system, this.getClass) val source = Source(1 to 10) val sink = Sink.foreach { x: Int => adapter.log(Logging.InfoLevel, s" --> Element: $x reached sink") } def sinkBlocking = Sink.foreach { x: Int => Thread.sleep(1000) adapter.log(Logging.InfoLevel, s" --> Element: $x logged in alsoTo sinkBlocking by ${Thread.currentThread().getName}") } val flow = Flow[Int] .log("before alsoTo") .alsoTo(sinkBlocking) .log("after alsoTo") .withAttributes( Attributes.logLevels( onElement = Logging.InfoLevel, onFinish = Logging.InfoLevel, onFailure = Logging.DebugLevel )) val done = source.via(flow).runWith(sink) done.onComplete(_ => system.terminate()) }
Example 8
Source File: CouchbaseReplayExtension.scala From akka-persistence-couchbase with Apache License 2.0 | 5 votes |
package akka.persistence.couchbase.replay import java.util.concurrent.TimeUnit import akka.actor.{ExtendedActorSystem, Extension, ExtensionId, ExtensionIdProvider} import akka.event.Logging import akka.persistence.couchbase.CouchbaseExtension import com.couchbase.client.java.document.JsonLongDocument import com.couchbase.client.java.document.json.JsonObject import com.couchbase.client.java.view._ import scala.util.{Failure, Try} trait CouchbaseReplay extends Extension { def replayConfig: CouchbaseReplayConfig def replay(callback: ReplayCallback, journalMessageIdOption: Option[Long] = None): Unit def storeMessageId(identifier: String, journalMessageId: Long): Unit def readMessageId(identifier: String): Option[Long] } private class DefaultCouchbaseReplay(val system: ExtendedActorSystem) extends CouchbaseReplay { private val log = Logging(system, getClass.getName) val couchbase = CouchbaseExtension(system) override val replayConfig = CouchbaseReplayConfig(system) val cluster = replayConfig.createCluster(couchbase.environment) val replayBucket = replayConfig.openBucket(cluster) updateJournalDesignDocs() private def updateJournalDesignDocs(): Unit = { val designDocs = JsonObject.create() .put("views", JsonObject.create() .put("commits", JsonObject.create() .put("map", replayConfig.replayViewCode) ) ) Try { val designDocument = DesignDocument.from("recovery", designDocs) couchbase.journalBucket.bucketManager.upsertDesignDocument(designDocument) } recoverWith { case e => log.error(e, "Updating design documents for recovery") Failure(e) } } override def replay(callback: ReplayCallback, journalMessageIdOption: Option[Long]): Unit = { system.actorOf(ReplayActor.props(callback)) ! ReplayActor.Recover(journalMessageIdOption) } override def storeMessageId(identifier: String, journalMessageId: Long): Unit = { Try { replayBucket.upsert( JsonLongDocument.create(s"replayId::$identifier", journalMessageId), replayConfig.persistTo, replayConfig.replicateTo, replayConfig.timeout.toSeconds, TimeUnit.SECONDS ) } recoverWith { case e => log.error(e, "Store replay id: {}", journalMessageId) Failure(e) } } override def readMessageId(identifier: String): Option[Long] = { Option( replayBucket.get( JsonLongDocument.create(s"replayId::$identifier"), replayConfig.timeout.toSeconds, TimeUnit.SECONDS ) ).map(_.content()) } } object CouchbaseReplayExtension extends ExtensionId[CouchbaseReplay] with ExtensionIdProvider { override def lookup(): ExtensionId[CouchbaseReplay] = CouchbaseReplayExtension override def createExtension(system: ExtendedActorSystem): CouchbaseReplay = { new DefaultCouchbaseReplay(system) } }
Example 9
Source File: FilesystemSnapshotStore.scala From eventuate with Apache License 2.0 | 5 votes |
package com.rbmhtechnology.eventuate.snapshot.filesystem import java.io._ import java.net.URLEncoder import akka.event.{ LogSource, Logging } import com.rbmhtechnology.eventuate._ import com.rbmhtechnology.eventuate.snapshot.SnapshotStore import org.apache.commons.io.IOUtils import scala.concurrent.Future import scala.collection.immutable.Seq import scala.util._ object FilesystemSnapshotStore { implicit val logSource = LogSource.fromAnyClass[FilesystemSnapshotStore] } class FilesystemSnapshotStore(settings: FilesystemSnapshotStoreSettings, logId: String) extends SnapshotStore { private val log = Logging(settings.system, classOf[FilesystemSnapshotStore]) private val rootDir = new File(settings.rootDir, URLEncoder.encode(logId, "UTF-8")) rootDir.mkdirs() override def deleteAsync(lowerSequenceNr: Long): Future[Unit] = { import settings.writeDispatcher Future(delete(lowerSequenceNr)) } override def saveAsync(snapshot: Snapshot): Future[Unit] = { import settings.writeDispatcher Future(withOutputStream(dstDir(snapshot.metadata.emitterId), snapshot.metadata.sequenceNr)(serialize(_, snapshot))) } override def loadAsync(emitterId: String): Future[Option[Snapshot]] = { import settings.readDispatcher Future(load(dstDir(emitterId))) } def delete(lowerSequenceNr: Long): Unit = for { emitterId <- rootDir.listFiles emitterDir = dstDir(emitterId.getName) sequenceNr <- decreasingSequenceNrs(emitterDir) if sequenceNr >= lowerSequenceNr } dstFile(emitterDir, sequenceNr).delete() def load(dir: File): Option[Snapshot] = { @annotation.tailrec def go(snrs: Seq[Long]): Option[Snapshot] = snrs.headOption match { case None => None case Some(snr) => Try(withInputStream(dir, snr)(deserialize)) match { case Success(s) => Some(s) case Failure(e) => log.error(e, s"error loading snapshot ${dstFile(dir, snr)}") go(snrs.tail) } } go(decreasingSequenceNrs(dir)) } private def serialize(outputStream: OutputStream, snapshot: Snapshot): Unit = outputStream.write(settings.serialization.serialize(snapshot).get) private def deserialize(inputStream: InputStream): Snapshot = settings.serialization.deserialize(IOUtils.toByteArray(inputStream), classOf[Snapshot]).get private def withOutputStream(dir: File, snr: Long)(body: OutputStream => Unit): Unit = { val dst = dstFile(dir, snr) val tmp = tmpFile(dir, snr) dir.mkdirs() withStream(new BufferedOutputStream(new FileOutputStream(tmp)), body) tmp.renameTo(dst) // do not keep more than the configured maximum number of snapshot files decreasingSequenceNrs(dir).drop(settings.snapshotsPerEmitterMax).foreach { snr => dstFile(dir, snr).delete() } } private def withInputStream[A](dir: File, snr: Long)(body: InputStream => A): A = withStream(new BufferedInputStream(new FileInputStream(dstFile(dir, snr))), body) private def withStream[A <: Closeable, B](stream: A, p: A => B): B = try { p(stream) } finally { stream.close() } private val DstFilenamePattern = """^snr-(\d+)""".r private[eventuate] def dstDir(emitterId: String): File = new File(rootDir, URLEncoder.encode(emitterId, "UTF-8")) private[eventuate] def dstFile(dstDir: File, sequenceNr: Long): File = new File(dstDir, s"snr-${sequenceNr}") private[eventuate] def tmpFile(dstDir: File, sequenceNr: Long): File = new File(dstDir, s"tmp-${sequenceNr}") private[eventuate] def decreasingSequenceNrs(dir: File): Seq[Long] = if (!dir.exists) Nil else dir.listFiles.map(_.getName).collect { case DstFilenamePattern(snr) => snr.toLong }.toList.sorted.reverse }
Example 10
Source File: LeaderAwareCustomAutoDownBase.scala From akka-cluster-custom-downing with Apache License 2.0 | 5 votes |
package tanukki.akka.cluster.autodown import akka.actor.Address import akka.cluster.ClusterEvent._ import akka.event.Logging import scala.concurrent.duration.FiniteDuration abstract class LeaderAwareCustomAutoDownBase(autoDownUnreachableAfter: FiniteDuration) extends CustomAutoDownBase(autoDownUnreachableAfter) { private val log = Logging(context.system, this) private var leader = false def onLeaderChanged(leader: Option[Address]): Unit = {} def isLeader: Boolean = leader override def receiveEvent: Receive = { case LeaderChanged(leaderOption) => leader = leaderOption.contains(selfAddress) if (isLeader) { log.info("This node is the new Leader") } onLeaderChanged(leaderOption) case UnreachableMember(m) => log.info("{} is unreachable", m) unreachableMember(m) case ReachableMember(m) => log.info("{} is reachable", m) remove(m) case MemberRemoved(m, _) => log.info("{} was removed from the cluster", m) remove(m) } override def initialize(state: CurrentClusterState): Unit = { leader = state.leader.exists(_ == selfAddress) super.initialize(state) } }
Example 11
Source File: RoleLeaderAwareCustomAutoDownBase.scala From akka-cluster-custom-downing with Apache License 2.0 | 5 votes |
package tanukki.akka.cluster.autodown import akka.actor.Address import akka.cluster.ClusterEvent._ import akka.event.Logging import scala.concurrent.duration.FiniteDuration abstract class RoleLeaderAwareCustomAutoDownBase(autoDownUnreachableAfter: FiniteDuration) extends CustomAutoDownBase(autoDownUnreachableAfter) { private val log = Logging(context.system, this) private var roleLeader: Map[String, Boolean] = Map.empty def isRoleLeaderOf(role: String): Boolean = roleLeader.getOrElse(role, false) def onRoleLeaderChanged(role: String, leader: Option[Address]): Unit = {} override def receiveEvent: Receive = { case RoleLeaderChanged(role, leaderOption) => roleLeader = roleLeader + (role -> leaderOption.contains(selfAddress)) if (isRoleLeaderOf(role)) { log.info("This node is the new role leader for role {}", role) } onRoleLeaderChanged(role, leaderOption) case UnreachableMember(m) => log.info("{} is unreachable", m) unreachableMember(m) case ReachableMember(m) => log.info("{} is reachable", m) remove(m) case MemberRemoved(m, _) => log.info("{} was removed from the cluster", m) remove(m) } override def initialize(state: CurrentClusterState): Unit = { roleLeader = state.roleLeaderMap.mapValues(_.exists(_ == selfAddress)).toMap super.initialize(state) } }
Example 12
Source File: CarbonClient.scala From akka-http-metrics with Apache License 2.0 | 5 votes |
package fr.davit.akka.http.metrics.graphite import java.time.{Clock, Instant} import akka.NotUsed import akka.actor.ActorSystem import akka.event.Logging import akka.stream.scaladsl.{Flow, Keep, RestartFlow, Sink, Source, Tcp} import akka.stream.{OverflowStrategy, QueueOfferResult} import akka.util.ByteString import fr.davit.akka.http.metrics.core.Dimension import scala.concurrent.Await import scala.concurrent.duration.{Duration, _} object CarbonClient { def apply(host: String, port: Int)(implicit system: ActorSystem): CarbonClient = new CarbonClient(host, port) } class CarbonClient(host: String, port: Int)(implicit system: ActorSystem) extends AutoCloseable { private val logger = Logging(system.eventStream, classOf[CarbonClient]) protected val clock: Clock = Clock.systemUTC() private def serialize[T](name: String, value: T, dimensions: Seq[Dimension], ts: Instant): ByteString = { val tags = dimensions.map(d => d.key + "=" + d.value).toList val taggedMetric = (name :: tags).mkString(";") ByteString(s"$taggedMetric $value ${ts.getEpochSecond}\n") } // TODO read backoff from config private def connection: Flow[ByteString, ByteString, NotUsed] = RestartFlow.withBackoff( minBackoff = 3.seconds, maxBackoff = 30.seconds, randomFactor = 0.2, // adds 20% "noise" to vary the intervals slightly maxRestarts = -1 // keep retrying forever )(() => Tcp().outgoingConnection(host, port)) private val queue = Source .queue[ByteString](19, OverflowStrategy.dropHead) .via(connection) .toMat(Sink.ignore)(Keep.left) .run() def publish[T]( name: String, value: T, dimensions: Seq[Dimension] = Seq.empty, ts: Instant = Instant .now(clock) ): Unit = { // it's reasonable to block until the message in enqueued Await.result(queue.offer(serialize(name, value, dimensions, ts)), Duration.Inf) match { case QueueOfferResult.Enqueued => logger.debug("Metric {} enqueued", name) case QueueOfferResult.Dropped => logger.debug("Metric {} dropped", name) case QueueOfferResult.Failure(e) => logger.error(e, s"Failed publishing metric $name") case QueueOfferResult.QueueClosed => throw new Exception("Failed publishing metric to closed carbon client") } } override def close(): Unit = { queue.complete() Await.result(queue.watchCompletion(), Duration.Inf) } }
Example 13
Source File: Main.scala From reactive-kafka-microservice-template with Apache License 2.0 | 5 votes |
package com.omearac import akka.actor.{ActorSystem, Props} import akka.event.Logging import akka.http.scaladsl.Http import akka.http.scaladsl.Http.ServerBinding import com.omearac.consumers.ConsumerStreamManager.InitializeConsumerStream import com.omearac.consumers.{ConsumerStreamManager, DataConsumer, EventConsumer} import com.omearac.http.HttpService import com.omearac.producers.ProducerStreamManager.InitializeProducerStream import com.omearac.producers.{DataProducer, EventProducer, ProducerStreamManager} import com.omearac.settings.Settings import com.omearac.shared.AkkaStreams import com.omearac.shared.KafkaMessages.{ExampleAppEvent, KafkaMessage} import scala.concurrent.{Await, Future} import scala.concurrent.duration._ import scala.io.StdIn object Main extends App with HttpService with AkkaStreams { implicit val system = ActorSystem("akka-reactive-kafka-app") val log = Logging(system, this.getClass.getName) //Start the akka-http server and listen for http requests val akkaHttpServer = startAkkaHTTPServer() //Create the Producer Stream Manager and Consumer Stream Manager val producerStreamManager = system.actorOf(Props(new ProducerStreamManager), "producerStreamManager") val consumerStreamManager = system.actorOf(Props(new ConsumerStreamManager), "consumerStreamManager") //Create actor to publish event messages to kafka stream. val eventProducer = system.actorOf(EventProducer.props, "eventProducer") producerStreamManager ! InitializeProducerStream(eventProducer, ExampleAppEvent) //Create actor to consume event messages from kafka stream. val eventConsumer = system.actorOf(EventConsumer.props, "eventConsumer") consumerStreamManager ! InitializeConsumerStream(eventConsumer, ExampleAppEvent) //Create actor to publish data messages to kafka stream. val dataProducer = system.actorOf(DataProducer.props, "dataProducer") producerStreamManager ! InitializeProducerStream(dataProducer, KafkaMessage) //Create actor to consume data messages from kafka stream. val dataConsumer = system.actorOf(DataConsumer.props, "dataConsumer") consumerStreamManager ! InitializeConsumerStream(dataConsumer, KafkaMessage) //Shutdown shutdownApplication() private def startAkkaHTTPServer(): Future[ServerBinding] = { val settings = Settings(system).Http val host = settings.host println(s"Specify the TCP port do you want to host the HTTP server at (e.g. 8001, 8080..etc)? \nHit Return when finished:") val portNum = StdIn.readInt() println(s"Waiting for http requests at http://$host:$portNum/") Http().bindAndHandle(routes, host, portNum) } private def shutdownApplication(): Unit = { scala.sys.addShutdownHook({ println("Terminating the Application...") akkaHttpServer.flatMap(_.unbind()) system.terminate() Await.result(system.whenTerminated, 30 seconds) println("Application Terminated") }) } }
Example 14
Source File: ConsumerStreamManager.scala From reactive-kafka-microservice-template with Apache License 2.0 | 5 votes |
package com.omearac.consumers import akka.actor._ import akka.event.Logging import akka.kafka.scaladsl.Consumer.Control import com.omearac.consumers.ConsumerStreamManager.{InitializeConsumerStream, TerminateConsumerStream} import com.omearac.settings.Settings import com.omearac.shared.EventMessages.{ActivatedConsumerStream, TerminatedConsumerStream} import com.omearac.shared.JsonMessageConversion.Conversion import com.omearac.shared.KafkaMessages.{ExampleAppEvent, KafkaMessage} import scala.collection.mutable object ConsumerStreamManager { //Command Messages case class InitializeConsumerStream(consumerActorRef: ActorRef, msgType: Any) case class TerminateConsumerStream(kafkaTopic: String) def props: Props = Props(new ConsumerStreamManager) } class ConsumerStreamManager extends Actor with ConsumerStream { implicit val system = context.system val log = Logging(system, this.getClass.getName) //Once the stream is created, we store its reference and associated kafka topic so we can shut it down on command var activeConsumerStreams: mutable.Map[String, Control] = mutable.Map() //Get Kafka Consumer Config Settings val settings = Settings(system).KafkaConsumers def receive: Receive = { case InitializeConsumerStream(consumerActorRef, KafkaMessage) => //Get consumer properties corresponding to that which subscribes to message type KafkaMessage val consumerProperties = settings.KafkaConsumerInfo("KafkaMessage") startConsumerStream[KafkaMessage](consumerActorRef, consumerProperties) case InitializeConsumerStream(consumerActorRef, ExampleAppEvent) => //Get consumer properties corresponding to that which subscribes to the message type ExampleAppEvent val consumerProperties = settings.KafkaConsumerInfo("ExampleAppEvent") startConsumerStream[ExampleAppEvent](consumerActorRef, consumerProperties) case TerminateConsumerStream(kafkaTopic) => terminateConsumerStream(sender, kafkaTopic) case other => log.error(s"Consumer Stream Manager got unknown message: $other") } def startConsumerStream[msgType: Conversion](consumerActorSink: ActorRef, consumerProperties: Map[String, String]) = { val streamSource = createStreamSource(consumerProperties) val streamFlow = createStreamFlow[msgType] val streamSink = createStreamSink(consumerActorSink) val consumerStream = streamSource.via(streamFlow).to(streamSink).run() //Add the active consumer stream reference and topic to the active stream collection val kafkaTopic = consumerProperties("subscription-topic") activeConsumerStreams += kafkaTopic -> consumerStream //Tell the consumer actor sink the stream has been started for the kafka topic and publish the event consumerActorSink ! ActivatedConsumerStream(kafkaTopic) publishLocalEvent(ActivatedConsumerStream(kafkaTopic)) } def terminateConsumerStream(consumerActorSink: ActorRef, kafkaTopic: String) = { try { println(s"ConsumerStreamManager got TerminateStream command for topic: $kafkaTopic. Terminating stream...") val stream = activeConsumerStreams(kafkaTopic) val stopped = stream.stop stopped.onComplete { case _ => stream.shutdown() //Remove the topic name from activeConsumerStreams collection activeConsumerStreams -= kafkaTopic //Publish an app event that the stream was killed. The stream will send an onComplete message to the Sink publishLocalEvent(TerminatedConsumerStream(kafkaTopic)) println(s"Terminated stream for topic: $kafkaTopic.") } } catch { case e: NoSuchElementException => consumerActorSink ! "STREAM_DONE" log.info(s"Stream Consumer in consuming mode but no stream to consume from: ($consumerActorSink,$kafkaTopic)") case e: Exception => log.error(s"Exception during manual termination of the Consumer Stream for topic $kafkaTopic : $e") } } }
Example 15
Source File: EventProducer.scala From reactive-kafka-microservice-template with Apache License 2.0 | 5 votes |
package com.omearac.producers import akka.actor.{Actor, Props} import akka.event.Logging import akka.stream.scaladsl.SourceQueueWithComplete import com.omearac.shared.EventMessages.ActivatedProducerStream import com.omearac.shared.EventSourcing import com.omearac.shared.KafkaMessages.ExampleAppEvent object EventProducer { def props: Props = Props(new EventProducer) } class EventProducer extends Actor with EventSourcing { import context._ implicit val system = context.system val log = Logging(system, this.getClass.getName) var producerStream: SourceQueueWithComplete[Any] = null val subscribedMessageTypes = Seq(classOf[ExampleAppEvent]) override def preStart(): Unit = { super.preStart() subscribedMessageTypes.foreach(system.eventStream.subscribe(self, _)) } override def postStop(): Unit = { subscribedMessageTypes.foreach(system.eventStream.unsubscribe(self, _)) super.postStop() } def receive: Receive = { case ActivatedProducerStream(streamRef, _) => producerStream = streamRef become(publishEvent) case msg: ExampleAppEvent => if (producerStream == null) self ! msg else producerStream.offer(msg) case other => log.error("EventProducer got the unknown message while in idle: " + other) } def publishEvent: Receive = { case msg: ExampleAppEvent => producerStream.offer(msg) case other => log.error("EventProducer got the unknown message while producing: " + other) } }
Example 16
Source File: DataProducer.scala From reactive-kafka-microservice-template with Apache License 2.0 | 5 votes |
package com.omearac.producers import akka.actor._ import akka.event.Logging import akka.stream.scaladsl.SourceQueueWithComplete import com.omearac.producers.DataProducer.PublishMessages import com.omearac.shared.EventMessages.{ActivatedProducerStream, MessagesPublished} import com.omearac.shared.EventSourcing import com.omearac.shared.KafkaMessages.KafkaMessage object DataProducer { //Command Messages case class PublishMessages(numberOfMessages: Int) def props: Props = Props(new DataProducer) } class DataProducer extends Actor with EventSourcing { import context._ implicit val system = context.system val log = Logging(system, this.getClass.getName) var producerStream: SourceQueueWithComplete[Any] = null def receive: Receive = { case ActivatedProducerStream(streamRef, kafkaTopic) => producerStream = streamRef become(publishData) case msg: PublishMessages => if (producerStream == null) self ! msg case other => log.error("DataProducer got the unknown message while in idle: " + other) } def publishData: Receive = { case PublishMessages(numberOfMessages) => for (i <- 1 to numberOfMessages) { val myPublishableMessage = KafkaMessage(timetag, " send me to kafka, yo!", i) producerStream.offer(myPublishableMessage) } //Tell the akka-http front end that messages were sent sender() ! MessagesPublished(numberOfMessages) publishLocalEvent(MessagesPublished(numberOfMessages)) case other => log.error("DataProducer got the unknown message while producing: " + other) } }
Example 17
Source File: HTTPInterfaceSpec.scala From reactive-kafka-microservice-template with Apache License 2.0 | 5 votes |
package akka import akka.event.Logging import akka.http.scaladsl.testkit.ScalatestRouteTest import akka.stream.QueueOfferResult import akka.stream.QueueOfferResult.Enqueued import akka.stream.scaladsl.SourceQueueWithComplete import akka.testkit.{TestActorRef, TestProbe} import com.omearac.consumers.{DataConsumer, EventConsumer} import com.omearac.http.routes.{ConsumerCommands, ProducerCommands} import com.omearac.producers.DataProducer import org.scalatest.{Matchers, WordSpec} import scala.concurrent.Future class HTTPInterfaceSpec extends WordSpec with Matchers with ScalatestRouteTest with ConsumerCommands with ProducerCommands { val log = Logging(system, this.getClass.getName) //Mocks for DataConsumer Tests val dataConsumer = TestActorRef(new DataConsumer) val manager = TestProbe() dataConsumer.underlyingActor.consumerStreamManager = manager.ref //Mocks for EventConsumer Tests val eventConsumer = TestActorRef(new EventConsumer) eventConsumer.underlyingActor.consumerStreamManager = manager.ref //Mocks for DataProducer Tests val dataProducer = TestActorRef(new DataProducer) val mockProducerStream: SourceQueueWithComplete[Any] = new SourceQueueWithComplete[Any] { override def complete(): Unit = println("complete") override def fail(ex: Throwable): Unit = println("fail") override def offer(elem: Any): Future[QueueOfferResult] = Future{Enqueued} override def watchCompletion(): Future[Done] = Future{Done} } "The HTTP interface to control the DataConsumerStream" should { "return a Already Stopped message for GET requests to /data_consumer/stop" in { Get("/data_consumer/stop") ~> dataConsumerHttpCommands ~> check { responseAs[String] shouldEqual "Data Consumer Stream Already Stopped" } } "return a Stream Started response for GET requests to /data_consumer/start" in { Get("/data_consumer/start") ~> dataConsumerHttpCommands ~> check { responseAs[String] shouldEqual "Data Consumer Stream Started" } } } "The HTTP interface to control the EventConsumerStream" should { "return a Already Stopped message for GET requests to /event_consumer/stop" in { Get("/event_consumer/stop") ~> eventConsumerHttpCommands ~> check { responseAs[String] shouldEqual "Event Consumer Stream Already Stopped" } } "return a Stream Started response for GET requests to /data_consumer/start" in { Get("/event_consumer/start") ~> eventConsumerHttpCommands ~> check { responseAs[String] shouldEqual "Event Consumer Stream Started" } } } "The HTTP interface to tell the DataProducer Actor to publish messages to Kafka" should { "return a Messages Produced message for GET requests to /data_producer/produce/10" in { dataProducer.underlyingActor.producerStream = mockProducerStream val producing = dataProducer.underlyingActor.publishData dataProducer.underlyingActor.context.become(producing) Get("/data_producer/produce/10") ~> producerHttpCommands ~> check { responseAs[String] shouldEqual "10 messages Produced as Ordered, Boss!" } } } }
Example 18
Source File: BasicRasService.scala From openwhisk with Apache License 2.0 | 5 votes |
package org.apache.openwhisk.http import akka.event.Logging import org.apache.openwhisk.common.{MetricsRoute, TransactionId} trait BasicRasService extends BasicHttpService { override def routes(implicit transid: TransactionId) = ping ~ MetricsRoute() override def loglevelForRoute(route: String): Logging.LogLevel = { if (route == "/ping" || route == "/metrics") { Logging.DebugLevel } else { super.loglevelForRoute(route) } } val ping = path("ping") { get { complete("pong") } } }
Example 19
Source File: ServiceApp.scala From BusFloatingData with Apache License 2.0 | 5 votes |
package de.nierbeck.floating.data.server import akka.actor.{ActorRef, ActorSystem, Props} import akka.event.Logging import akka.http.scaladsl.Http import akka.http.scaladsl.model.HttpMethods._ import akka.http.scaladsl.model.ws.UpgradeToWebSocket import akka.http.scaladsl.model.{HttpRequest, HttpResponse, Uri} import akka.stream.ActorMaterializer import de.nierbeck.floating.data.server.actors.websocket.{FLINK, RouterActor, SPARK, TiledVehiclesFromKafkaActor} import scala.concurrent.Await import scala.concurrent.duration.Duration import scala.util.{Failure, Success} object ServiceApp extends RestService { import ServiceConfig._ import system.dispatcher implicit val system = ActorSystem("service-api-http") implicit val mat = ActorMaterializer() override val logger = Logging(system, getClass.getName) override val session = CassandraConnector.connect() def main(args: Array[String]): Unit = { val router: ActorRef = system.actorOf(Props[RouterActor], "router") val sparkKafkaConsumer: ActorRef = system.actorOf(TiledVehiclesFromKafkaActor.props(router, "tiledVehicles", SPARK), "Kafka-Consumer-Spark") val flinkKafkaConsumer: ActorRef = system.actorOf(TiledVehiclesFromKafkaActor.props(router, "flinkTiledVehicles", FLINK), "Kafka-Consumer-Flink") val requestHandler: HttpRequest => HttpResponse = { case req@HttpRequest(GET, Uri.Path("/ws/vehicles"), _, _, _) => req.header[UpgradeToWebSocket] match { case Some(upgrade) => upgrade.handleMessages(Flows.graphFlowWithStats(router)) case None => HttpResponse(400, entity = "Not a valid websocket request!") } case _: HttpRequest => HttpResponse(404, entity = "Unknown resource!") } Http() .bindAndHandle(route(), serviceInterface, servicePort) .onComplete { case Success(_) => logger.info(s"Successfully bound to $serviceInterface:$servicePort") case Failure(e) => logger.error(s"Failed !!!! ${e.getMessage}") } Http() .bindAndHandleSync(requestHandler, serviceInterface, 8001) .onComplete { case Success(_) => logger.info(s"Successfully started Server to $serviceInterface:8001") case Failure(e) => logger.error(s"Failed !!!! ${e.getMessage}") } Await.ready(system.whenTerminated, Duration.Inf) CassandraConnector.close(session) } }
Example 20
Source File: SchemaManagerRouter.scala From schedoscope with Apache License 2.0 | 5 votes |
package org.schedoscope.scheduler.actors import akka.actor.SupervisorStrategy._ import akka.actor.{Actor, ActorInitializationException, ActorRef, OneForOneStrategy, Props} import akka.event.Logging import akka.routing.RoundRobinPool import org.schedoscope.conf.SchedoscopeSettings import org.schedoscope.scheduler.messages._ import org.schedoscope.scheduler.utils.BackOffSupervision import org.schedoscope.schema.RetryableSchemaManagerException import scala.concurrent.duration._ override val supervisorStrategy = OneForOneStrategy(maxNrOfRetries = -1) { case _: RetryableSchemaManagerException => Restart case _: ActorInitializationException => Restart case _ => Escalate } override def preStart { metadataLoggerActor = actorOf( MetadataLoggerActor.props(settings.jdbcUrl, settings.metastoreUri, settings.kerberosPrincipal), "metadata-logger") partitionCreatorActor = actorOf( PartitionCreatorActor.props(settings.jdbcUrl, settings.metastoreUri, settings.kerberosPrincipal, self) .withRouter(new RoundRobinPool(settings.metastoreConcurrency)), "partition-creator") } def scheduleTick(managedActor: ActorRef, backOffTime: FiniteDuration) { system.scheduler.scheduleOnce(backOffTime, managedActor, "tick") } def manageActorLifecycle(metaActor: ActorRef) { val slot = settings.backOffSlotTime millis val delay = settings.backOffMinimumDelay millis val backOffTime = metastoreActorsBackOffSupervision.manageActorLifecycle( managedActor = metaActor, backOffSlotTime = slot, backOffMinimumDelay = delay) scheduleTick(metaActor, backOffTime) } def receive = { case "tick" => manageActorLifecycle(sender) case m: CheckOrCreateTables => partitionCreatorActor forward m case a: AddPartitions => partitionCreatorActor forward a case s: SetViewVersion => metadataLoggerActor forward s case l: LogTransformationTimestamp => metadataLoggerActor forward l case g: GetMetaDataForMaterialize => partitionCreatorActor forward g } } object SchemaManagerRouter { def props(settings: SchedoscopeSettings) = (Props(classOf[SchemaManagerRouter], settings)).withDispatcher("akka.actor.schema-manager-dispatcher") }
Example 21
Source File: LowLevelServer.scala From akka-http-test with Apache License 2.0 | 5 votes |
package com.github.dnvriend.component.lowlevelserver import akka.NotUsed import akka.actor.{ ActorSystem, Props } import akka.event.{ Logging, LoggingAdapter } import akka.http.scaladsl.Http import akka.http.scaladsl.model._ import akka.pattern.ask import akka.stream.scaladsl.{ Flow, Sink, Source } import akka.stream.{ ActorMaterializer, Materializer } import akka.util.Timeout import com.github.dnvriend.component.lowlevelserver.dto.{ Person, PersonWithId } import com.github.dnvriend.component.lowlevelserver.marshaller.Marshaller import com.github.dnvriend.component.lowlevelserver.repository.PersonRepository import spray.json.{ DefaultJsonProtocol, _ } import scala.concurrent.duration._ import scala.concurrent.{ ExecutionContext, Future } class LowLevelServer(implicit val system: ActorSystem, mat: Materializer, ec: ExecutionContext, log: LoggingAdapter, timeout: Timeout) extends DefaultJsonProtocol with Marshaller { val personDb = system.actorOf(Props[PersonRepository]) def debug(t: Any)(implicit log: LoggingAdapter = null): Unit = if (Option(log).isEmpty) println(t) else log.debug(t.toString) def http200Okay(req: HttpRequest): HttpResponse = HttpResponse(StatusCodes.OK) def http200AsyncOkay(req: HttpRequest): Future[HttpResponse] = Future(http200Okay(req)) val http200OkayFlow: Flow[HttpRequest, HttpResponse, NotUsed] = Flow[HttpRequest].map { req => HttpResponse(StatusCodes.OK) } val serverSource: Source[Http.IncomingConnection, Future[Http.ServerBinding]] = Http().bind(interface = "localhost", port = 8080) val binding: Future[Http.ServerBinding] = serverSource.to(Sink.foreach { conn => // conn.handleWith(http200OkayFlow) // conn.handleWithSyncHandler(http200Okay) // conn.handleWithAsyncHandler(http200AsyncOkay, 8) conn.handleWithAsyncHandler(personRequestHandler) }).run() def personRequestHandler(req: HttpRequest): Future[HttpResponse] = req match { case HttpRequest(HttpMethods.GET, Uri.Path("/api/person"), _, _, _) => for { xs <- (personDb ? "findAll").mapTo[List[PersonWithId]] entity = HttpEntity(ContentTypes.`application/json`, xs.toJson.compactPrint) } yield HttpResponse(StatusCodes.OK, entity = entity) case HttpRequest(HttpMethods.POST, Uri.Path("/api/person"), _, ent, _) => for { strictEntity <- ent.toStrict(1.second) person <- (personDb ? strictEntity.data.utf8String.parseJson.convertTo[Person]).mapTo[PersonWithId] } yield HttpResponse(StatusCodes.OK, entity = person.toJson.compactPrint) case req => req.discardEntityBytes() Future.successful(HttpResponse(StatusCodes.NotFound)) } } object LowLevelServerLauncher extends App with DefaultJsonProtocol { // setting up some machinery implicit val system: ActorSystem = ActorSystem() implicit val mat: Materializer = ActorMaterializer() implicit val ec: ExecutionContext = system.dispatcher implicit val log: LoggingAdapter = Logging(system, this.getClass) implicit val timeout: Timeout = Timeout(10.seconds) new LowLevelServer() }
Example 22
Source File: SimpleServer.scala From akka-http-test with Apache License 2.0 | 5 votes |
package com.github.dnvriend.component.simpleserver import javax.inject.Inject import akka.actor.ActorSystem import akka.event.{ Logging, LoggingAdapter } import akka.http.scaladsl._ import akka.pattern.CircuitBreaker import akka.stream.{ ActorMaterializer, Materializer } import com.github.dnvriend.component.repository.PersonRepository import com.github.dnvriend.component.simpleserver.route._ import com.google.inject.Singleton import play.api.Configuration import scala.concurrent.ExecutionContext import scala.concurrent.duration._ @Singleton class SimpleServer @Inject() (personDao: PersonRepository, cb: CircuitBreaker, interface: String, port: Int)(implicit system: ActorSystem, mat: Materializer, ec: ExecutionContext) { Http().bindAndHandle(SimpleServerRestRoutes.routes(personDao, cb), interface, port) } object SimpleServerLauncher extends App { implicit val system: ActorSystem = ActorSystem() implicit val mat: Materializer = ActorMaterializer() implicit val ec: ExecutionContext = system.dispatcher implicit val log: LoggingAdapter = Logging(system, this.getClass) val maxFailures: Int = 3 val callTimeout: FiniteDuration = 1.seconds val resetTimeout: FiniteDuration = 10.seconds val cb = new CircuitBreaker(system.scheduler, maxFailures, callTimeout, resetTimeout) val config: play.api.Configuration = Configuration(system.settings.config) sys.addShutdownHook { system.terminate() } new SimpleServer(new PersonRepository, cb, config.getString("http.interface").getOrElse("0.0.0.0"), config.getInt("http.port").getOrElse(8080)) }
Example 23
Source File: TimerSchedulerImpl.scala From perf_tester with Apache License 2.0 | 5 votes |
package akka.actor import scala.concurrent.duration.FiniteDuration import akka.annotation.InternalApi import akka.event.Logging import akka.util.OptionVal @InternalApi private[akka] class TimerSchedulerImpl(ctx: ActorContext) extends TimerScheduler { import TimerSchedulerImpl._ private val log = Logging(ctx.system, classOf[TimerScheduler]) private var timers: Map[Any, Timer] = Map.empty private var timerGen = 0 private def nextTimerGen(): Int = { timerGen += 1 timerGen } override def startPeriodicTimer(key: Any, msg: Any, interval: FiniteDuration): Unit = startTimer(key, msg, interval, repeat = true) override def startSingleTimer(key: Any, msg: Any, timeout: FiniteDuration): Unit = startTimer(key, msg, timeout, repeat = false) private def startTimer(key: Any, msg: Any, timeout: FiniteDuration, repeat: Boolean): Unit = { timers.get(key) match { case Some(t) ⇒ cancelTimer(t) case None ⇒ } val nextGen = nextTimerGen() val timerMsg = TimerMsg(key, nextGen, this) val task = if (repeat) ctx.system.scheduler.schedule(timeout, timeout, ctx.self, timerMsg)(ctx.dispatcher) else ctx.system.scheduler.scheduleOnce(timeout, ctx.self, timerMsg)(ctx.dispatcher) val nextTimer = Timer(key, msg, repeat, nextGen, task) log.debug("Start timer [{}] with generation [{}]", key, nextGen) timers = timers.updated(key, nextTimer) } override def isTimerActive(key: Any): Boolean = timers.contains(key) override def cancel(key: Any): Unit = { timers.get(key) match { case None ⇒ // already removed/canceled case Some(t) ⇒ cancelTimer(t) } } private def cancelTimer(timer: Timer): Unit = { log.debug("Cancel timer [{}] with generation [{}]", timer.key, timer.generation) timer.task.cancel() timers -= timer.key } override def cancelAll(): Unit = { log.debug("Cancel all timers") timers.valuesIterator.foreach { timer ⇒ timer.task.cancel() } timers = Map.empty } def interceptTimerMsg(timerMsg: TimerMsg): OptionVal[AnyRef] = { timers.get(timerMsg.key) match { case None ⇒ // it was from canceled timer that was already enqueued in mailbox log.debug("Received timer [{}] that has been removed, discarding", timerMsg.key) OptionVal.None // message should be ignored case Some(t) ⇒ if (timerMsg.owner ne this) { // after restart, it was from an old instance that was enqueued in mailbox before canceled log.debug("Received timer [{}] from old restarted instance, discarding", timerMsg.key) OptionVal.None // message should be ignored } else if (timerMsg.generation == t.generation) { // valid timer if (!t.repeat) timers -= t.key OptionVal.Some(t.msg.asInstanceOf[AnyRef]) } else { // it was from an old timer that was enqueued in mailbox before canceled log.debug( "Received timer [{}] from from old generation [{}], expected generation [{}], discarding", timerMsg.key, timerMsg.generation, t.generation) OptionVal.None // message should be ignored } } } }
Example 24
Source File: ScanRequestActor.scala From project-matt with MIT License | 5 votes |
package org.datafy.aws.app.matt.app import akka.actor.{Actor, Props, PoisonPill} import org.datafy.aws.app.matt.classifiers.BaseClassifier import org.datafy.aws.app.matt.models.FullScanStats import akka.event.Logging class ScanRequestActor(val bucketName: String = null, val s3Prefix: Option[String] = None) extends Actor { val log = Logging(context.system, this) var execCount = 1 import ScanRequestActor._ if (bucketName == null) { throw new NullPointerException(s"Constructor AWS S3 BucketName cannot be null or empty") context.system.terminate() } val scanResultsActor = context.actorOf(ScanResultsActor.props, "scanResultsActor") def receive = { case Initialize => if(execCount > 0) { execCount -= 1 log.info(s"Initialied S3 Scan Request on " + s"Bucket: ${bucketName} and Prefix: ${s3Prefix.orNull} ") // do bucket scanning here and send message to ScanResultsActor val scanRequestMessage = BaseClassifier.setS3ScanInputPath(bucketName, s3Prefix.orNull) self ! ScanResultsActor.ScanResultsMessage("Done") } else { self ! PoisonPill } } } object ScanRequestActor { val props: Props = Props[ScanRequestActor] case object Initialize case class ScanRequestMessage(fullScanStats: FullScanStats) }
Example 25
Source File: ScanResultsActor.scala From project-matt with MIT License | 5 votes |
package org.datafy.aws.app.matt.app import akka.actor.{Actor, ActorLogging, PoisonPill, Props} import akka.event.Logging class ScanResultsActor extends Actor { val log = Logging(context.system, this) def receive = { case text => // save results here and send summary report log.info(s"New message from executing actor - ScanRequestActor: ${text}") sender() ! PoisonPill self ! PoisonPill } } object ScanResultsActor { val props = Props[ScanResultsActor] case object Initialize case class ScanResultsMessage(text: String) }
Example 26
Source File: GithubService.scala From scabot with Apache License 2.0 | 5 votes |
package scabot package github import akka.event.Logging import scala.util.{Success, Failure} trait GithubService extends GithubApi { private lazy val UserRepo = """([^/]+)/(.+)""".r def notifyProject(ev: ProjectMessage, repository: Repository): String = { val UserRepo(user, repo) = repository.full_name val log = s"Processing $ev for $user/$repo" system.log.info(log) broadcast(user, repo)(ev) log } def pullRequestEvent(ev: PullRequestEvent): String = ev match { case PullRequestEvent(action, number, pull_request) => notifyProject(ev, ev.pull_request.base.repo) } def pushEvent(ev: PushEvent): String = ev match { case PushEvent(_, _, repository) => notifyProject(ev, repository) } def issueCommentEvent(ev: IssueCommentEvent): String = ev match { case IssueCommentEvent(action, issue, comment, repository) => notifyProject(ev, repository) } def pullRequestReviewCommentEvent(ev: PullRequestReviewCommentEvent): String = ev match { case PullRequestReviewCommentEvent(action, pull_request, comment, repository) => notifyProject(ev, repository) } }
Example 27
Source File: Main.scala From sns with Apache License 2.0 | 5 votes |
package me.snov.sns import akka.actor.ActorSystem import akka.event.{Logging, LoggingAdapter} import akka.http.scaladsl.Http import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server._ import akka.stream.ActorMaterializer import akka.util.Timeout import com.typesafe.config.ConfigFactory import me.snov.sns.actor._ import me.snov.sns.api._ import me.snov.sns.service.FileDbService import me.snov.sns.util.ToStrict import scala.concurrent.ExecutionContext import scala.concurrent.duration._ import scala.util.Properties object Main extends App with ToStrict { implicit val system = ActorSystem("sns") implicit val executor: ExecutionContext = system.dispatcher implicit val materializer: ActorMaterializer = ActorMaterializer() implicit val logger: LoggingAdapter = Logging(system, getClass) implicit val timeout = new Timeout(1.second) val config = ConfigFactory.load() val dbService = new FileDbService(Properties.envOrElse("DB_PATH", config.getString("db.path"))) val dbActor = system.actorOf(DbActor.props(dbService), name = "DbActor") val homeActor = system.actorOf(HomeActor.props, name = "HomeActor") val subscribeActor = system.actorOf(SubscribeActor.props(dbActor), name = "SubscribeActor") val publishActor = system.actorOf(PublishActor.props(subscribeActor), name = "PublishActor") val routes: Route = toStrict { TopicApi.route(subscribeActor) ~ SubscribeApi.route(subscribeActor) ~ PublishApi.route(publishActor) ~ HealthCheckApi.route ~ HomeApi.route(homeActor) } logger.info("SNS v{} is starting", getClass.getPackage.getImplementationVersion) Http().bindAndHandle( handler = logRequestResult("akka-http-sns")(routes), interface = Properties.envOrElse("HTTP_INTERFACE", config.getString("http.interface")), port = Properties.envOrElse("HTTP_PORT", config.getString("http.port")).toInt ) }
Example 28
Source File: PublishApi.scala From sns with Apache License 2.0 | 5 votes |
package me.snov.sns.api import akka.actor.ActorRef import akka.event.Logging import akka.http.scaladsl.model.HttpResponse import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server.Route import akka.pattern.ask import akka.util.Timeout import me.snov.sns.actor.PublishActor.CmdPublish import me.snov.sns.model.{Message, MessageAttribute, TopicNotFoundException} import me.snov.sns.response.PublishResponse import spray.json.DefaultJsonProtocol._ import spray.json._ import scala.concurrent.{ExecutionContext, Future} case class InvalidTopicArnException(msg: String) extends Exception(msg) object PublishApi { private val arnPattern = """([\w+_:-]{1,512})""".r def route(actorRef: ActorRef)(implicit timeout: Timeout, ec: ExecutionContext): Route = { pathSingleSlash { formField('Action ! "Publish") { formFieldSeq { fields => val messageAttributes: Map[String, MessageAttribute] = MessageAttribute.parse(fields) formFields('TopicArn.?, 'TargetArn.?, 'MessageStructure.?, 'Message) { (topicArnMaybe, targetArnMaybe, messageStructure, message) => try { topicArn(topicArnMaybe, targetArnMaybe) match { case arnPattern(topic) => complete { val bodies = messageStructure match { case Some("json") => message.parseJson.asJsObject.convertTo[Map[String, String]] case Some(_) => throw new RuntimeException("Invalid MessageStructure value"); case None => Map("default" -> message) } (actorRef ? CmdPublish(topic, bodies, messageAttributes)).collect { case m: Message => PublishResponse.publish(m) }.recover { case t: TopicNotFoundException => PublishResponse.topicNotFound(t.getMessage) case t: Throwable => HttpResponse(500, entity = t.getMessage) } } case _ => complete(HttpResponse(400, entity = "Invalid topic ARN")) } } catch { case e: InvalidTopicArnException => complete(HttpResponse(400, entity = e.getMessage)) case e: RuntimeException => complete(HttpResponse(400, entity = e.getMessage)) } } } ~ complete(HttpResponse(400, entity = "TopicArn is required")) } } } private def topicArn(topicArnMaybe: Option[String], targetArnMaybe: Option[String]): String = { topicArnMaybe.getOrElse(targetArnMaybe.getOrElse(throw InvalidTopicArnException("Neither TopicArn nor TargetArn provided"))) } }
Example 29
Source File: OrderServiceApp.scala From 006877 with MIT License | 5 votes |
package aia.integration import scala.concurrent.Future import akka.actor.{ ActorSystem , Actor, Props } import akka.event.Logging import akka.util.Timeout import akka.http.scaladsl.Http import akka.http.scaladsl.Http.ServerBinding import akka.http.scaladsl.server.Directives._ import akka.stream.ActorMaterializer import com.typesafe.config.{ Config, ConfigFactory } object OrderServiceApp extends App with RequestTimeout { val config = ConfigFactory.load() val host = config.getString("http.host") val port = config.getInt("http.port") implicit val system = ActorSystem() implicit val ec = system.dispatcher val processOrders = system.actorOf( Props(new ProcessOrders), "process-orders" ) val api = new OrderServiceApi(system, requestTimeout(config), processOrders).routes implicit val materializer = ActorMaterializer() val bindingFuture: Future[ServerBinding] = Http().bindAndHandle(api, host, port) val log = Logging(system.eventStream, "order-service") bindingFuture.map { serverBinding => log.info(s"Bound to ${serverBinding.localAddress} ") }.failed.foreach { case ex: Exception => log.error(ex, "Failed to bind to {}:{}!", host, port) system.terminate() } } trait RequestTimeout { import scala.concurrent.duration._ def requestTimeout(config: Config): Timeout = { val t = config.getString("akka.http.server.request-timeout") val d = Duration(t) FiniteDuration(d.length, d.unit) } }
Example 30
Source File: ShoppersServiceSupport.scala From 006877 with MIT License | 5 votes |
package aia.persistence.rest import com.typesafe.config.Config import scala.concurrent.Future import akka.actor._ import akka.event.Logging import akka.util.Timeout import akka.http.scaladsl.Http import akka.http.scaladsl.Http.ServerBinding import akka.stream.ActorMaterializer import aia.persistence._ trait ShoppersServiceSupport extends RequestTimeout { def startService(shoppers: ActorRef)(implicit system: ActorSystem) = { val config = system.settings.config val settings = Settings(system) val host = settings.http.host val port = settings.http.port implicit val ec = system.dispatcher //bindAndHandle requires an implicit ExecutionContext val api = new ShoppersService(shoppers, system, requestTimeout(config)).routes // the RestApi provides a Route implicit val materializer = ActorMaterializer() val bindingFuture: Future[ServerBinding] = Http().bindAndHandle(api, host, port) val log = Logging(system.eventStream, "shoppers") bindingFuture.map { serverBinding => log.info(s"Shoppers API bound to ${serverBinding.localAddress} ") }.failed.foreach { case ex: Exception => log.error(ex, "Failed to bind to {}:{}!", host, port) system.terminate() } } } trait RequestTimeout { import scala.concurrent.duration._ def requestTimeout(config: Config): Timeout = { val t = config.getString("akka.http.server.request-timeout") val d = Duration(t) FiniteDuration(d.length, d.unit) } }
Example 31
Source File: Main.scala From 006877 with MIT License | 5 votes |
package com.goticks import scala.concurrent.Future import scala.util.{Failure, Success} import akka.actor.{ ActorSystem , Actor, Props } import akka.event.Logging import akka.util.Timeout import akka.http.scaladsl.Http import akka.http.scaladsl.Http.ServerBinding import akka.http.scaladsl.server.Directives._ import akka.stream.ActorMaterializer import com.typesafe.config.{ Config, ConfigFactory } object Main extends App with RequestTimeout { val config = ConfigFactory.load() val host = config.getString("http.host") // 설정으로부터 호스트와 포트를 가져온다 val port = config.getInt("http.port") implicit val system = ActorSystem() implicit val ec = system.dispatcher // bindAndHandle은 비동기적이며, ExecutionContext를 암시적으로 사용해야 한다 val api = new RestApi(system, requestTimeout(config)).routes // RestApi는 HTTP 루트를 제공한다 implicit val materializer = ActorMaterializer() val bindingFuture: Future[ServerBinding] = Http().bindAndHandle(api, host, port) // RestApi 루트를 가지고 HTTP 서버를 시작한다 val log = Logging(system.eventStream, "go-ticks") bindingFuture.map { serverBinding => log.info(s"RestApi bound to ${serverBinding.localAddress} ") }.onComplete { case Success(v) => case Failure(ex) => log.error(ex, "Failed to bind to {}:{}!", host, port) system.terminate() } } trait RequestTimeout { import scala.concurrent.duration._ def requestTimeout(config: Config): Timeout = { val t = config.getString("akka.http.server.request-timeout") val d = Duration(t) FiniteDuration(d.length, d.unit) } }
Example 32
Source File: FrontendRemoteDeployWatchMain.scala From 006877 with MIT License | 5 votes |
package com.goticks import akka.actor.{ ActorRef, ActorSystem } import akka.event.Logging import com.typesafe.config.ConfigFactory object FrontendRemoteDeployWatchMain extends App with Startup { val config = ConfigFactory.load("frontend-remote-deploy") implicit val system = ActorSystem("frontend", config) val api = new RestApi() { val log = Logging(system.eventStream, "frontend-remote-watch") implicit val requestTimeout = configuredRequestTimeout(config) implicit def executionContext = system.dispatcher def createBoxOffice: ActorRef = { system.actorOf( RemoteBoxOfficeForwarder.props, RemoteBoxOfficeForwarder.name ) } } startup(api.routes) }
Example 33
Source File: Startup.scala From 006877 with MIT License | 5 votes |
package com.goticks import scala.concurrent.Future import scala.util.{Failure, Success} import akka.actor.ActorSystem import akka.event.Logging import akka.http.scaladsl.Http import akka.http.scaladsl.Http.ServerBinding import akka.http.scaladsl.server.Route import akka.stream.ActorMaterializer trait Startup extends RequestTimeout { def startup(api: Route)(implicit system: ActorSystem) = { val host = system.settings.config.getString("http.host") // 설정에서 호스트와 포트를 가져온다 val port = system.settings.config.getInt("http.port") startHttpServer(api, host, port) } def startHttpServer(api: Route, host: String, port: Int) (implicit system: ActorSystem) = { implicit val ec = system.dispatcher // bindAndHandle에는 암시적인 ExecutionContext가 필요하다 implicit val materializer = ActorMaterializer() val bindingFuture: Future[ServerBinding] = Http().bindAndHandle(api, host, port) // HTTP 서버를 시작한다 val log = Logging(system.eventStream, "go-ticks") bindingFuture.map { serverBinding => log.info(s"RestApi bound to ${serverBinding.localAddress} ") }.onComplete { case Success(v) => case Failure(ex) => log.error(ex, "Failed to bind to {}:{}!", host, port) system.terminate() } } }
Example 34
Source File: SingleNodeMain.scala From 006877 with MIT License | 5 votes |
package com.goticks import akka.actor.{ ActorSystem, ActorRef } import akka.event.Logging import com.typesafe.config.ConfigFactory object SingleNodeMain extends App with Startup { val config = ConfigFactory.load("singlenode") implicit val system = ActorSystem("singlenode", config) val api = new RestApi() { val log = Logging(system.eventStream, "go-ticks") implicit val requestTimeout = configuredRequestTimeout(config) implicit def executionContext = system.dispatcher def createBoxOffice: ActorRef = system.actorOf(BoxOffice.props, BoxOffice.name) } startup(api.routes) }
Example 35
Source File: FrontendMain.scala From 006877 with MIT License | 5 votes |
package com.goticks import akka.actor.{ ActorRef, ActorSystem, Props } import akka.event.Logging import com.typesafe.config.ConfigFactory object FrontendMain extends App with Startup { val config = ConfigFactory.load("frontend") implicit val system = ActorSystem("frontend", config) val api = new RestApi() { val log = Logging(system.eventStream, "frontend") implicit val requestTimeout = configuredRequestTimeout(config) implicit def executionContext = system.dispatcher def createPath(): String = { val config = ConfigFactory.load("frontend").getConfig("backend") val host = config.getString("host") val port = config.getInt("port") val protocol = config.getString("protocol") val systemName = config.getString("system") val actorName = config.getString("actor") s"$protocol://$systemName@$host:$port/$actorName" } def createBoxOffice: ActorRef = { val path = createPath() system.actorOf(Props(new RemoteLookupProxy(path)), "lookupBoxOffice") } } startup(api.routes) }
Example 36
Source File: FrontendRemoteDeployMain.scala From 006877 with MIT License | 5 votes |
package com.goticks import akka.actor.{ ActorRef, ActorSystem } import akka.event.Logging import com.typesafe.config.ConfigFactory object FrontendRemoteDeployMain extends App with Startup { val config = ConfigFactory.load("frontend-remote-deploy") implicit val system = ActorSystem("frontend", config) val api = new RestApi() { val log = Logging(system.eventStream, "frontend-remote") implicit val requestTimeout = configuredRequestTimeout(config) implicit def executionContext = system.dispatcher def createBoxOffice: ActorRef = system.actorOf(BoxOffice.props, BoxOffice.name) } startup(api.routes) }
Example 37
Source File: FanLogsApp.scala From 006877 with MIT License | 5 votes |
package aia.stream import java.nio.file.{ Files, FileSystems, Path } import scala.concurrent.Future import scala.concurrent.duration._ import akka.NotUsed import akka.actor.{ ActorSystem , Actor, Props } import akka.event.Logging import akka.stream.{ ActorMaterializer, ActorMaterializerSettings, Supervision } import akka.http.scaladsl.Http import akka.http.scaladsl.Http.ServerBinding import akka.http.scaladsl.server.Directives._ import com.typesafe.config.{ Config, ConfigFactory } object FanLogsApp extends App { val config = ConfigFactory.load() val host = config.getString("http.host") val port = config.getInt("http.port") val logsDir = { val dir = config.getString("log-stream-processor.logs-dir") Files.createDirectories(FileSystems.getDefault.getPath(dir)) } val maxLine = config.getInt("log-stream-processor.max-line") val maxJsObject = config.getInt("log-stream-processor.max-json-object") implicit val system = ActorSystem() implicit val ec = system.dispatcher val decider : Supervision.Decider = { case _: LogStreamProcessor.LogParseException => Supervision.Resume case _ => Supervision.Stop } implicit val materializer = ActorMaterializer( ActorMaterializerSettings(system) .withSupervisionStrategy(decider) ) val api = new FanLogsApi(logsDir, maxLine, maxJsObject).routes val bindingFuture: Future[ServerBinding] = Http().bindAndHandle(api, host, port) val log = Logging(system.eventStream, "fan-logs") bindingFuture.map { serverBinding => log.info(s"Bound to ${serverBinding.localAddress} ") }.onFailure { case ex: Exception => log.error(ex, "Failed to bind to {}:{}!", host, port) system.terminate() } }
Example 38
Source File: LogsApp.scala From 006877 with MIT License | 5 votes |
package aia.stream import java.nio.file.{ Files, FileSystems, Path } import scala.concurrent.Future import scala.concurrent.duration._ import akka.NotUsed import akka.actor.{ ActorSystem , Actor, Props } import akka.event.Logging import akka.stream.{ ActorMaterializer, ActorMaterializerSettings, Supervision } import akka.http.scaladsl.Http import akka.http.scaladsl.Http.ServerBinding import akka.http.scaladsl.server.Directives._ import com.typesafe.config.{ Config, ConfigFactory } object LogsApp extends App { val config = ConfigFactory.load() val host = config.getString("http.host") val port = config.getInt("http.port") val logsDir = { val dir = config.getString("log-stream-processor.logs-dir") Files.createDirectories(FileSystems.getDefault.getPath(dir)) } val maxLine = config.getInt("log-stream-processor.max-line") implicit val system = ActorSystem() implicit val ec = system.dispatcher val decider : Supervision.Decider = { case _: LogStreamProcessor.LogParseException => Supervision.Stop case _ => Supervision.Stop } implicit val materializer = ActorMaterializer( ActorMaterializerSettings(system) .withSupervisionStrategy(decider) ) val api = new LogsApi(logsDir, maxLine).routes val bindingFuture: Future[ServerBinding] = Http().bindAndHandle(api, host, port) val log = Logging(system.eventStream, "logs") bindingFuture.map { serverBinding => log.info(s"Bound to ${serverBinding.localAddress} ") }.onFailure { case ex: Exception => log.error(ex, "Failed to bind to {}:{}!", host, port) system.terminate() } }
Example 39
Source File: LogStreamProcessorApp.scala From 006877 with MIT License | 5 votes |
package aia.stream import java.nio.file.{ Files, FileSystems, Path } import scala.concurrent.Future import scala.concurrent.duration._ import akka.NotUsed import akka.actor.{ ActorSystem , Actor, Props } import akka.event.Logging import akka.stream.{ ActorMaterializer, ActorMaterializerSettings, Supervision } import akka.http.scaladsl.Http import akka.http.scaladsl.Http.ServerBinding import akka.http.scaladsl.server.Directives._ import com.typesafe.config.{ Config, ConfigFactory } object LogStreamProcessorApp extends App { val config = ConfigFactory.load() val host = config.getString("http.host") val port = config.getInt("http.port") val logsDir = { val dir = config.getString("log-stream-processor.logs-dir") Files.createDirectories(FileSystems.getDefault.getPath(dir)) } val notificationsDir = { val dir = config.getString("log-stream-processor.notifications-dir") Files.createDirectories(FileSystems.getDefault.getPath(dir)) } val metricsDir = { val dir = config.getString("log-stream-processor.metrics-dir") Files.createDirectories(FileSystems.getDefault.getPath(dir)) } val maxLine = config.getInt("log-stream-processor.max-line") val maxJsObject = config.getInt("log-stream-processor.max-json-object") implicit val system = ActorSystem() implicit val ec = system.dispatcher val decider : Supervision.Decider = { case _: LogStreamProcessor.LogParseException => Supervision.Resume case _ => Supervision.Stop } implicit val materializer = ActorMaterializer( ActorMaterializerSettings(system) .withSupervisionStrategy(decider) ) val api = new LogStreamProcessorApi(logsDir, notificationsDir, metricsDir, maxLine, maxJsObject).routes val bindingFuture: Future[ServerBinding] = Http().bindAndHandle(api, host, port) val log = Logging(system.eventStream, "processor") bindingFuture.map { serverBinding => log.info(s"Bound to ${serverBinding.localAddress} ") }.onFailure { case ex: Exception => log.error(ex, "Failed to bind to {}:{}!", host, port) system.terminate() } }
Example 40
Source File: ContentNegLogsApp.scala From 006877 with MIT License | 5 votes |
package aia.stream import java.nio.file.{ Files, FileSystems, Path } import scala.concurrent.Future import scala.concurrent.duration._ import akka.NotUsed import akka.actor.{ ActorSystem , Actor, Props } import akka.event.Logging import akka.stream.{ ActorMaterializer, ActorMaterializerSettings, Supervision } import akka.http.scaladsl.Http import akka.http.scaladsl.Http.ServerBinding import akka.http.scaladsl.server.Directives._ import com.typesafe.config.{ Config, ConfigFactory } object ContentNegLogsApp extends App { val config = ConfigFactory.load() val host = config.getString("http.host") val port = config.getInt("http.port") val logsDir = { val dir = config.getString("log-stream-processor.logs-dir") Files.createDirectories(FileSystems.getDefault.getPath(dir)) } val maxLine = config.getInt("log-stream-processor.max-line") val maxJsObject = config.getInt("log-stream-processor.max-json-object") implicit val system = ActorSystem() implicit val ec = system.dispatcher val decider : Supervision.Decider = { case _: LogStreamProcessor.LogParseException => Supervision.Stop case _ => Supervision.Stop } implicit val materializer = ActorMaterializer( ActorMaterializerSettings(system) .withSupervisionStrategy(decider) ) val api = new ContentNegLogsApi(logsDir, maxLine, maxJsObject).routes val bindingFuture: Future[ServerBinding] = Http().bindAndHandle(api, host, port) val log = Logging(system.eventStream, "content-neg-logs") bindingFuture.map { serverBinding => log.info(s"Bound to ${serverBinding.localAddress} ") }.onFailure { case ex: Exception => log.error(ex, "Failed to bind to {}:{}!", host, port) system.terminate() } }
Example 41
Source File: Server.scala From bitcoin-s with MIT License | 5 votes |
package org.bitcoins.server import akka.actor.ActorSystem import akka.event.Logging import akka.http.scaladsl._ import akka.http.scaladsl.model._ import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server._ import akka.http.scaladsl.server.directives.DebuggingDirectives import de.heikoseeberger.akkahttpupickle.UpickleSupport._ import org.bitcoins.db.AppConfig import upickle.{default => up} import scala.concurrent.Future case class Server( conf: AppConfig, handlers: Seq[ServerRoute], rpcport: Int = 9999)(implicit system: ActorSystem) extends HttpLogger { implicit private val config: AppConfig = conf import system.dispatcher def httpSuccess[T](body: T)(implicit writer: up.Writer[T]): HttpEntity.Strict = { val response = Response(result = Some(up.writeJs(body))) HttpEntity( ContentTypes.`application/json`, up.write(response.toJsonMap) ) } def httpError( msg: String, status: StatusCode = StatusCodes.InternalServerError): HttpResponse = { val entity = { val response = Response(error = Some(msg)) HttpEntity( ContentTypes.`application/json`, up.write(response.toJsonMap) ) } HttpResponse(status = status, entity = entity) } }
Example 42
Source File: ActorSystemSpec.scala From lagom with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.persistence import java.lang.reflect.Modifier import akka.actor.ActorSystem import akka.actor.CoordinatedShutdown import akka.actor.setup.ActorSystemSetup import akka.event.Logging import akka.event.LoggingAdapter import akka.testkit.ImplicitSender import akka.testkit.TestKit import com.typesafe.config.Config import com.typesafe.config.ConfigFactory import org.scalactic.CanEqual import org.scalactic.TypeCheckedTripleEquals import org.scalatest.BeforeAndAfterAll import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AnyWordSpecLike object ActorSystemSpec { // taken from akka-testkit's AkkaSpec private def testNameFromCallStack(classToStartFrom: Class[_]): String = { def isAbstractClass(className: String): Boolean = { try { Modifier.isAbstract(Class.forName(className).getModifiers) } catch { case _: Throwable => false // yes catch everything, best effort check } } val startFrom = classToStartFrom.getName val filteredStack = Thread.currentThread.getStackTrace.iterator .map(_.getClassName) // drop until we find the first occurrence of classToStartFrom .dropWhile(!_.startsWith(startFrom)) // then continue to the next entry after classToStartFrom that makes sense .dropWhile { case `startFrom` => true case str if str.startsWith(startFrom + "$") => true // lambdas inside startFrom etc case str if isAbstractClass(str) => true case _ => false } if (filteredStack.isEmpty) throw new IllegalArgumentException(s"Couldn't find [${classToStartFrom.getName}] in call stack") // sanitize for actor system name scrubActorSystemName(filteredStack.next()) } // taken from akka-testkit's AkkaSpec private def scrubActorSystemName(name: String): String = { name .replaceFirst("""^.*\.""", "") // drop package name .replaceAll("""\$\$?\w+""", "") // drop scala anonymous functions/classes .replaceAll("[^a-zA-Z_0-9]", "_") } } abstract class ActorSystemSpec(actorSystemFactory: () => ActorSystem) extends TestKit(actorSystemFactory()) with AnyWordSpecLike with Matchers with BeforeAndAfterAll with TypeCheckedTripleEquals with ImplicitSender { def this(testName: String, config: Config) = this(() => ActorSystem(testName, config)) def this(config: Config) = this(ActorSystemSpec.testNameFromCallStack(classOf[ActorSystemSpec]), config) def this(setup: ActorSystemSetup) = this(() => ActorSystem(ActorSystemSpec.testNameFromCallStack(classOf[ActorSystemSpec]), setup)) def this() = this(ConfigFactory.empty()) override def afterAll(): Unit = { shutdown() super.afterAll() } val log: LoggingAdapter = Logging(system, this.getClass) val coordinatedShutdown: CoordinatedShutdown = CoordinatedShutdown(system) // for ScalaTest === compare of Class objects implicit def classEqualityConstraint[A, B]: CanEqual[Class[A], Class[B]] = new CanEqual[Class[A], Class[B]] { def areEqual(a: Class[A], b: Class[B]) = a == b } }
Example 43
Source File: CassandraReadSideSessionProvider.scala From lagom with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.internal.persistence.cassandra import akka.Done import akka.actor.ActorSystem import akka.actor.ExtendedActorSystem import akka.event.Logging import akka.persistence.cassandra.session.CassandraSessionSettings import akka.persistence.cassandra.session.scaladsl.{ CassandraSession => AkkaScaladslCassandraSession } import akka.persistence.cassandra.CassandraPluginConfig import akka.persistence.cassandra.SessionProvider import com.datastax.driver.core.Session import scala.concurrent.ExecutionContext import scala.concurrent.Future private[lagom] object CassandraReadSideSessionProvider { def apply( system: ActorSystem, settings: CassandraSessionSettings, executionContext: ExecutionContext ): AkkaScaladslCassandraSession = { import akka.persistence.cassandra.ListenableFutureConverter import akka.util.Helpers.Requiring import scala.collection.JavaConverters._ // implicit asScala conversion val cfg = settings.config val replicationStrategy: String = CassandraPluginConfig.getReplicationStrategy( cfg.getString("replication-strategy"), cfg.getInt("replication-factor"), cfg.getStringList("data-center-replication-factors").asScala.toSeq ) val keyspaceAutoCreate: Boolean = cfg.getBoolean("keyspace-autocreate") val keyspace: String = cfg .getString("keyspace") .requiring( !keyspaceAutoCreate || _ > "", "'keyspace' configuration must be defined, or use keyspace-autocreate=off" ) def init(session: Session): Future[Done] = { implicit val ec = executionContext if (keyspaceAutoCreate) { val result1 = session.executeAsync(s""" CREATE KEYSPACE IF NOT EXISTS $keyspace WITH REPLICATION = { 'class' : $replicationStrategy } """).asScala result1 .flatMap { _ => session.executeAsync(s"USE $keyspace;").asScala } .map(_ => Done) } else if (keyspace != "") session.executeAsync(s"USE $keyspace;").asScala.map(_ => Done) else Future.successful(Done) } val metricsCategory = "lagom-" + system.name // using the scaladsl API because the init function new AkkaScaladslCassandraSession( system, SessionProvider(system.asInstanceOf[ExtendedActorSystem], settings.config), settings, executionContext, Logging.getLogger(system, this.getClass), metricsCategory, init ) } }
Example 44
Source File: MarathonApiServiceDiscovery.scala From akka-management with Apache License 2.0 | 5 votes |
package akka.discovery.marathon import java.net.InetAddress import akka.actor.ActorSystem import akka.discovery._ import akka.http.scaladsl._ import akka.http.scaladsl.model._ import akka.http.scaladsl.unmarshalling.Unmarshal import akka.stream.ActorMaterializer import scala.collection.immutable.Seq import scala.concurrent.Future import scala.concurrent.duration.FiniteDuration import scala.util.Try import AppList._ import JsonFormat._ import akka.annotation.ApiMayChange import akka.discovery.ServiceDiscovery.{ Resolved, ResolvedTarget } import akka.event.Logging @ApiMayChange object MarathonApiServiceDiscovery { @ApiMayChange class MarathonApiServiceDiscovery(system: ActorSystem) extends ServiceDiscovery { import MarathonApiServiceDiscovery._ import system.dispatcher private val log = Logging(system, getClass) private val http = Http()(system) private val settings = Settings(system) private implicit val mat: ActorMaterializer = ActorMaterializer()(system) override def lookup(lookup: Lookup, resolveTimeout: FiniteDuration): Future[Resolved] = { val uri = Uri(settings.appApiUrl).withQuery( Uri.Query( "embed" -> "apps.tasks", "embed" -> "apps.deployments", "label" -> settings.appLabelQuery.format(lookup.serviceName))) val request = HttpRequest(uri = uri) log.info("Requesting seed nodes by: {}", request.uri) val portName = lookup.portName match { case Some(name) => name case None => settings.appPortName } for { response <- http.singleRequest(request) entity <- response.entity.toStrict(resolveTimeout) appList <- { log.debug("Marathon API entity: [{}]", entity.data.utf8String) val unmarshalled = Unmarshal(entity).to[AppList] unmarshalled.failed.foreach { _ => log.error( "Failed to unmarshal Marathon API response status [{}], entity: [{}], uri: [{}]", response.status.value, entity.data.utf8String, uri) } unmarshalled } } yield Resolved(lookup.serviceName, targets(appList, portName)) } }
Example 45
Source File: ClusterBootstrap.scala From akka-management with Apache License 2.0 | 5 votes |
package akka.management.cluster.bootstrap import java.util.concurrent.atomic.AtomicReference import akka.AkkaVersion import scala.concurrent.{ Future, Promise, TimeoutException } import scala.concurrent.duration._ import akka.actor.ActorSystem import akka.actor.ClassicActorSystemProvider import akka.actor.ExtendedActorSystem import akka.actor.Extension import akka.actor.ExtensionId import akka.actor.ExtensionIdProvider import akka.annotation.InternalApi import akka.cluster.Cluster import akka.discovery.{ Discovery, ServiceDiscovery } import akka.event.Logging import akka.http.scaladsl.model.Uri import akka.http.scaladsl.server.Route import akka.management.cluster.bootstrap.contactpoint.HttpClusterBootstrapRoutes import akka.management.cluster.bootstrap.internal.BootstrapCoordinator import akka.management.scaladsl.ManagementRouteProviderSettings import akka.management.scaladsl.ManagementRouteProvider final class ClusterBootstrap(implicit system: ExtendedActorSystem) extends Extension with ManagementRouteProvider { import ClusterBootstrap.Internal._ import system.dispatcher private val log = Logging(system, classOf[ClusterBootstrap]) private final val bootstrapStep = new AtomicReference[BootstrapStep](NotRunning) AkkaVersion.require("cluster-bootstrap", "2.5.27") val settings: ClusterBootstrapSettings = ClusterBootstrapSettings(system.settings.config, log) // used for initial discovery of contact points lazy val discovery: ServiceDiscovery = settings.contactPointDiscovery.discoveryMethod match { case "akka.discovery" => val discovery = Discovery(system).discovery log.info("Bootstrap using default `akka.discovery` method: {}", Logging.simpleName(discovery)) discovery case otherDiscoveryMechanism => log.info("Bootstrap using `akka.discovery` method: {}", otherDiscoveryMechanism) Discovery(system).loadServiceDiscovery(otherDiscoveryMechanism) } private val joinDecider: JoinDecider = { system.dynamicAccess .createInstanceFor[JoinDecider]( settings.joinDecider.implClass, List((classOf[ActorSystem], system), (classOf[ClusterBootstrapSettings], settings)) ) .get } private[this] val _selfContactPointUri: Promise[Uri] = Promise() override def routes(routeProviderSettings: ManagementRouteProviderSettings): Route = { log.info(s"Using self contact point address: ${routeProviderSettings.selfBaseUri}") this.setSelfContactPoint(routeProviderSettings.selfBaseUri) new HttpClusterBootstrapRoutes(settings).routes } def start(): Unit = if (Cluster(system).settings.SeedNodes.nonEmpty) { log.warning( "Application is configured with specific `akka.cluster.seed-nodes`: {}, bailing out of the bootstrap process! " + "If you want to use the automatic bootstrap mechanism, make sure to NOT set explicit seed nodes in the configuration. " + "This node will attempt to join the configured seed nodes.", Cluster(system).settings.SeedNodes.mkString("[", ", ", "]") ) } else if (bootstrapStep.compareAndSet(NotRunning, Initializing)) { log.info("Initiating bootstrap procedure using {} method...", settings.contactPointDiscovery.discoveryMethod) ensureSelfContactPoint() val bootstrapProps = BootstrapCoordinator.props(discovery, joinDecider, settings) val bootstrap = system.systemActorOf(bootstrapProps, "bootstrapCoordinator") // Bootstrap already logs in several other execution points when it can't form a cluster, and why. selfContactPoint.foreach { uri => bootstrap ! BootstrapCoordinator.Protocol.InitiateBootstrapping(uri) } } else log.warning("Bootstrap already initiated, yet start() method was called again. Ignoring.") private[bootstrap] object Internal { sealed trait BootstrapStep case object NotRunning extends BootstrapStep case object Initializing extends BootstrapStep } }
Example 46
Source File: HttpClusterBootstrapRoutes.scala From akka-management with Apache License 2.0 | 5 votes |
package akka.management.cluster.bootstrap.contactpoint import scala.concurrent.duration._ import akka.actor.ActorSystem import akka.cluster.Cluster import akka.cluster.Member import akka.event.Logging import akka.event.LoggingAdapter import akka.http.javadsl.server.directives.RouteAdapter import akka.http.scaladsl.model.HttpRequest import akka.http.scaladsl.model.Uri import akka.http.scaladsl.server.Route import akka.management.cluster.bootstrap.ClusterBootstrapSettings import akka.management.cluster.bootstrap.contactpoint.HttpBootstrapJsonProtocol.ClusterMember import akka.management.cluster.bootstrap.contactpoint.HttpBootstrapJsonProtocol.SeedNodes final class HttpClusterBootstrapRoutes(settings: ClusterBootstrapSettings) extends HttpBootstrapJsonProtocol { import akka.http.scaladsl.server.Directives._ private def routeGetSeedNodes: Route = extractClientIP { clientIp => extractActorSystem { implicit system => import akka.cluster.MemberStatus val cluster = Cluster(system) def memberToClusterMember(m: Member): ClusterMember = ClusterMember(m.uniqueAddress.address, m.uniqueAddress.longUid, m.status.toString, m.roles) val state = cluster.state // TODO shuffle the members so in a big deployment nodes start joining different ones and not all the same? val members = state.members .diff(state.unreachable) .filter(m => m.status == MemberStatus.up || m.status == MemberStatus.weaklyUp || m.status == MemberStatus.joining) .take(settings.contactPoint.httpMaxSeedNodesToExpose) .map(memberToClusterMember) val info = SeedNodes(cluster.selfMember.uniqueAddress.address, members) log.info( "Bootstrap request from {}: Contact Point returning {} seed-nodes [{}]", clientIp, members.size, members.map(_.node).mkString(", ")) complete(info) } } def getRoutes: akka.http.javadsl.server.Route = RouteAdapter(routes) private def log(implicit sys: ActorSystem): LoggingAdapter = Logging(sys, classOf[HttpClusterBootstrapRoutes]) } object ClusterBootstrapRequests { import akka.http.scaladsl.client.RequestBuilding._ def bootstrapSeedNodes(baseUri: Uri): HttpRequest = Get(baseUri + "/bootstrap/seed-nodes") }
Example 47
Source File: MockDiscovery.scala From akka-management with Apache License 2.0 | 5 votes |
package akka.discovery import java.util.concurrent.atomic.AtomicReference import akka.actor.ActorSystem import akka.annotation.InternalApi import akka.discovery.ServiceDiscovery.Resolved import akka.event.Logging import scala.concurrent.Future import scala.concurrent.duration.FiniteDuration @InternalApi object MockDiscovery { private val data = new AtomicReference[Map[Lookup, () => Future[Resolved]]](Map.empty) def set(name: Lookup, to: () => Future[Resolved]): Unit = { val d = data.get() if (data.compareAndSet(d, d.updated(name, to))) () else set(name, to) // retry } def remove(name: Lookup): Unit = { val d = data.get() if (data.compareAndSet(d, d - name)) () else remove(name) // retry } } @InternalApi final class MockDiscovery(system: ActorSystem) extends ServiceDiscovery { private val log = Logging(system, getClass) override def lookup(query: Lookup, resolveTimeout: FiniteDuration): Future[Resolved] = { MockDiscovery.data.get().get(query) match { case Some(res) => val items = res() log.info("Mock-resolved [{}] to [{}:{}]", query, items, items.value) items case None => log.info("No mock-data for [{}], resolving as 'Nil'. Current mocks: {}", query, MockDiscovery.data.get()) Future.successful(Resolved(query.serviceName, Nil)) } } }
Example 48
Source File: CouchbasePersistenceModule.scala From akka-persistence-couchbase with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.javadsl.persistence.couchbase import java.net.URI import akka.actor.ActorSystem import akka.event.Logging import akka.stream.alpakka.couchbase.javadsl.CouchbaseSession import akka.stream.alpakka.couchbase.CouchbaseSessionSettings import com.google.inject.Provider import com.lightbend.lagom.internal.javadsl.persistence.couchbase.{ CouchbasePersistentEntityRegistry, CouchbaseReadSideImpl, JavadslCouchbaseOffsetStore } import com.lightbend.lagom.internal.persistence.couchbase.{ CouchbaseConfigValidator, CouchbaseOffsetStore, ServiceLocatorAdapter, ServiceLocatorHolder } import com.lightbend.lagom.javadsl.api.ServiceLocator import com.lightbend.lagom.javadsl.persistence.PersistentEntityRegistry import com.lightbend.lagom.spi.persistence.OffsetStore import com.typesafe.config.Config import javax.inject.Inject import play.api.inject.{Binding, Injector, Module} import play.api.{Configuration, Environment} import scala.compat.java8.FutureConverters._ import scala.concurrent.duration._ import scala.concurrent.{Await, Future} import scala.util.Try class CouchbasePersistenceModule extends Module { override def bindings(environment: Environment, configuration: Configuration): Seq[Binding[_]] = Seq( bind[CouchbasePersistenceModule.InitServiceLocatorHolder].toSelf.eagerly(), bind[PersistentEntityRegistry].to[CouchbasePersistentEntityRegistry], bind[CouchbaseSession].toProvider[CouchbaseProvider], bind[CouchbaseReadSide].to[CouchbaseReadSideImpl], //TODO: add other modules similar to Cassandra // bind[CassandraReadSideSettings].toSelf, bind[CouchbaseOffsetStore].to(bind[JavadslCouchbaseOffsetStore]), bind[OffsetStore].to(bind[CouchbaseOffsetStore]) ) } private[lagom] class CouchbaseProvider @Inject() (system: ActorSystem, cfg: Config) extends Provider[CouchbaseSession] { private val log = Logging(system, classOf[CouchbaseProvider]) CouchbaseConfigValidator.validateBucket("lagom.persistence.read-side.couchbase", cfg, log) private val readSideCouchbaseConfig: Config = cfg.getConfig("lagom.persistence.read-side.couchbase") private val sessionSettings = CouchbaseSessionSettings( readSideCouchbaseConfig.getConfig("connection") ) private val bucket = readSideCouchbaseConfig.getString("bucket") // FIXME is there a way to have async component creation in lagom instead of letting every component know that the thing is async? // if not we should pass Future[CouchbaseSession] around and let the use sites mix in AsyncCouchbaseSession - but if we use // that from Lagom it needs to be made public API // FIXME this should be the Java API of CouchbaseSession, when there is one lazy val couchbase: CouchbaseSession = Await.result(CouchbaseSession.create(sessionSettings, bucket, system.dispatcher).toScala, 30.seconds) override def get(): CouchbaseSession = couchbase } private[lagom] object CouchbasePersistenceModule { class InitServiceLocatorHolder @Inject() (system: ActorSystem, injector: Injector) { def init(): Unit = Try(injector.instanceOf[ServiceLocator]).foreach { locator => ServiceLocatorHolder(system).setServiceLocator(new ServiceLocatorAdapter { override def locateAll(name: String): Future[List[URI]] = { import system.dispatcher import scala.collection.JavaConverters._ import scala.compat.java8.FutureConverters._ locator.locateAll(name).toScala.map(_.asScala.toList) } }) } } }
Example 49
Source File: CouchbasePersistenceComponents.scala From akka-persistence-couchbase with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.scaladsl.persistence.couchbase import akka.event.Logging import akka.stream.alpakka.couchbase.{CouchbaseSessionRegistry, CouchbaseSessionSettings} import akka.stream.alpakka.couchbase.scaladsl.CouchbaseSession import com.lightbend.lagom.internal.persistence.couchbase.{CouchbaseConfigValidator, CouchbaseOffsetStore} import com.lightbend.lagom.internal.scaladsl.persistence.couchbase.{ CouchbasePersistentEntityRegistry, CouchbaseReadSideImpl, ScaladslCouchbaseOffsetStore } import com.lightbend.lagom.scaladsl.api.ServiceLocator import com.lightbend.lagom.scaladsl.persistence.{ PersistenceComponents, PersistentEntityRegistry, ReadSidePersistenceComponents, WriteSidePersistenceComponents } import com.lightbend.lagom.spi.persistence.OffsetStore import com.typesafe.config.Config import scala.concurrent.duration._ import scala.concurrent.Await trait ReadSideCouchbasePersistenceComponents extends ReadSidePersistenceComponents { private val log = Logging(actorSystem, classOf[ReadSideCouchbasePersistenceComponents]) CouchbaseConfigValidator.validateBucket("lagom.persistence.read-side.couchbase", configuration.underlying, log) private val readSideCouchbaseConfig: Config = configuration.underlying.getConfig("lagom.persistence.read-side.couchbase") private val sessionSettings = CouchbaseSessionSettings( readSideCouchbaseConfig.getConfig("connection") ) private val bucket = readSideCouchbaseConfig.getString("bucket") // FIXME is there a way to have async component creation in lagom instead of letting every component know that the thing is async? // if not we should pass Future[CouchbaseSession] around and let the use sites mix in AsyncCouchbaseSession - but if we use // that from Lagom it needs to be made public API lazy val couchbase: CouchbaseSession = Await.result(CouchbaseSessionRegistry(actorSystem).sessionFor(sessionSettings, bucket), 30.seconds) private[lagom] lazy val couchbaseOffsetStore: CouchbaseOffsetStore = new ScaladslCouchbaseOffsetStore(actorSystem, couchbase, readSideConfig) lazy val offsetStore: OffsetStore = couchbaseOffsetStore lazy val couchbaseReadSide: CouchbaseReadSide = new CouchbaseReadSideImpl(actorSystem, couchbase, couchbaseOffsetStore) }
Example 50
Source File: ActorSystemSpec.scala From akka-persistence-couchbase with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.persistence import akka.actor.ActorSystem import akka.actor.setup.ActorSystemSetup import akka.event.{ Logging, LoggingAdapter } import akka.testkit.{ ImplicitSender, TestKit } import com.typesafe.config.{ Config, ConfigFactory } import org.scalactic.{ CanEqual, TypeCheckedTripleEquals } import org.scalatest.{ BeforeAndAfterAll, Matchers, WordSpecLike } object ActorSystemSpec { def getCallerName(clazz: Class[_]): String = { val s = (Thread.currentThread.getStackTrace map (_.getClassName) drop 1) .dropWhile(_ matches "(java.lang.Thread|.*ActorSystemSpec.?$)") val reduced = s.lastIndexWhere(_ == clazz.getName) match { case -1 ⇒ s case z ⇒ s drop (z + 1) } reduced.head.replaceFirst(""".*\.""", "").replaceAll("[^a-zA-Z_0-9]", "_") } } abstract class ActorSystemSpec(system: ActorSystem) extends TestKit(system) with WordSpecLike with Matchers with BeforeAndAfterAll with TypeCheckedTripleEquals with ImplicitSender { def this(testName: String, config: Config) = this(ActorSystem(testName, config)) def this(config: Config) = this(ActorSystemSpec.getCallerName(getClass), config) def this(setup: ActorSystemSetup) = this(ActorSystem(ActorSystemSpec.getCallerName(getClass), setup)) def this() = this(ConfigFactory.empty()) override protected def afterAll(): Unit = { shutdown() super.afterAll() } val log: LoggingAdapter = Logging(system, this.getClass) // for ScalaTest === compare of Class objects implicit def classEqualityConstraint[A, B]: CanEqual[Class[A], Class[B]] = new CanEqual[Class[A], Class[B]] { def areEqual(a: Class[A], b: Class[B]) = a == b } }
Example 51
Source File: CouchbaseConfigValidatorSpec.scala From akka-persistence-couchbase with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.internal.persistence.couchbase import akka.actor.ActorSystem import akka.event.Logging import akka.testkit.EventFilter import com.typesafe.config.ConfigFactory import org.scalatest.{BeforeAndAfterAll, MustMatchers, WordSpec} import scala.concurrent.duration._ import scala.concurrent.Await class MyException extends RuntimeException("MyException") class CouchbaseConfigValidatorSpec extends WordSpec with MustMatchers with BeforeAndAfterAll { val akkaTestLogging = ConfigFactory.parseString("akka.loggers = [akka.testkit.TestEventListener]") implicit val system = ActorSystem("test", akkaTestLogging) val log = Logging(system, classOf[CouchbaseConfigValidatorSpec]) override def afterAll = Await.result(system.terminate(), Duration.Inf) "CouchbaseConfigValidator" should { "detect when bucket is not set" in { val config = ConfigFactory.parseString("""some.config.setting = 1""".stripMargin) EventFilter .error("Configuration for [test.bucket] must be set in application.conf ", occurrences = 1) .intercept { CouchbaseConfigValidator.validateBucket("test", config, log) } } "detect when bucket is set to null" in { val config = ConfigFactory.parseString("""testpath1.bucket = null""".stripMargin) EventFilter .error("Configuration for [testpath1.bucket] must be set in application.conf ", occurrences = 1) .intercept { CouchbaseConfigValidator.validateBucket("testpath1", config, log) } } "pass when bucket is specified" in { val config = ConfigFactory.parseString("""sample.path.bucket = bucketname""".stripMargin) // expect only one "another error" in the log EventFilter.error(occurrences = 1).intercept { CouchbaseConfigValidator.validateBucket("sample.path", config, log) log.error("another error") } } } }
Example 52
Source File: HttpService.scala From ddd-leaven-akka-v2 with MIT License | 5 votes |
package ecommerce.sales.app import akka.actor.{Actor, ActorLogging, Props} import akka.event.Logging import akka.http.scaladsl.Http import akka.util.Timeout import ecommerce.sales.ReservationOfficeId import io.github.lhotari.akka.http.health.HealthEndpoint.createDefaultHealthRoute import org.json4s.Formats import pl.newicom.dddd.serialization.JsonSerHints.fromConfig import pl.newicom.dddd.writefront.HttpCommandHandler import scala.concurrent.duration.FiniteDuration object HttpService { def props(interface: String, port: Int, askTimeout: FiniteDuration): Props = Props(new HttpService(interface, port)(askTimeout)) } class HttpService(interface: String, port: Int)(implicit val timeout: Timeout) extends Actor with SalesFrontConfiguration with HttpCommandHandler with ActorLogging { import context.dispatcher implicit val formats: Formats = fromConfig(config) Http(context.system).bindAndHandle(route, interface, port) log.info(s"Listening on $interface:$port") override def receive: Receive = Actor.emptyBehavior override def offices = Set(ReservationOfficeId) private def route = logRequestResult(("sales", Logging.InfoLevel)) { pathPrefix("ecommerce" / "sales") { createDefaultHealthRoute() ~ handle[ecommerce.sales.Command] } } }
Example 53
Source File: CassandraHealth.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.service.routes import akka.actor.{ExtendedActorSystem, Extension, ExtensionId, ExtensionIdProvider} import akka.event.Logging import akka.stream.alpakka.cassandra.CassandraSessionSettings import akka.stream.alpakka.cassandra.scaladsl.CassandraSessionRegistry import ch.epfl.bluebrain.nexus.sourcing.projections.Projections._ import scala.concurrent.{ExecutionContext, Future} trait CassandraHealth extends Extension { def check: Future[Boolean] } object CassandraHealth extends ExtensionId[CassandraHealth] with ExtensionIdProvider { override def lookup(): ExtensionId[_ <: Extension] = CassandraHealth override def createExtension(as: ExtendedActorSystem): CassandraHealth = { implicit val ec: ExecutionContext = as.dispatcher val log = Logging(as, "CassandraHeathCheck") val keyspace: String = journalConfig(as).getString("keyspace") val session = CassandraSessionRegistry.get(as).sessionFor(CassandraSessionSettings(cassandraDefaultConfigPath)) new CassandraHealth { private val query = s"SELECT now() FROM $keyspace.messages;" override def check: Future[Boolean] = { session.selectOne(query).map(_ => true).recover { case err => log.error("Error while attempting to query for health check", err) false } } } } }
Example 54
Source File: Main.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.storage import java.nio.file.Paths import java.time.Clock import akka.actor.ActorSystem import akka.event.{Logging, LoggingAdapter} import akka.http.scaladsl.Http import akka.http.scaladsl.server.Route import akka.util.Timeout import cats.effect.Effect import ch.epfl.bluebrain.nexus.storage.Storages.DiskStorage import ch.epfl.bluebrain.nexus.storage.attributes.AttributesCache import ch.epfl.bluebrain.nexus.storage.config.{AppConfig, Settings} import ch.epfl.bluebrain.nexus.storage.config.AppConfig._ import ch.epfl.bluebrain.nexus.storage.routes.Routes import com.typesafe.config.{Config, ConfigFactory} import kamon.Kamon import monix.eval.Task import monix.execution.Scheduler import scala.concurrent.duration._ import scala.concurrent.{Await, ExecutionContext, Future} import scala.util.{Failure, Success} //noinspection TypeAnnotation // $COVERAGE-OFF$ object Main { def loadConfig(): Config = { val cfg = sys.env.get("STORAGE_CONFIG_FILE") orElse sys.props.get("storage.config.file") map { str => val file = Paths.get(str).toAbsolutePath.toFile ConfigFactory.parseFile(file) } getOrElse ConfigFactory.empty() (cfg withFallback ConfigFactory.load()).resolve() } def setupMonitoring(config: Config): Unit = { if (sys.env.getOrElse("KAMON_ENABLED", "false").toBoolean) { Kamon.reconfigure(config) Kamon.loadModules() } } def shutdownMonitoring(): Unit = { if (sys.env.getOrElse("KAMON_ENABLED", "false").toBoolean) { Await.result(Kamon.stopModules(), 10.seconds) } } @SuppressWarnings(Array("UnusedMethodParameter")) def main(args: Array[String]): Unit = { val config = loadConfig() setupMonitoring(config) implicit val appConfig: AppConfig = Settings(config).appConfig implicit val as: ActorSystem = ActorSystem(appConfig.description.fullName, config) implicit val ec: ExecutionContext = as.dispatcher implicit val eff: Effect[Task] = Task.catsEffect(Scheduler.global) implicit val iamIdentities: IamIdentitiesClient[Task] = new IamIdentitiesClient[Task](appConfig.iam) implicit val timeout = Timeout(1.minute) implicit val clock = Clock.systemUTC val storages: Storages[Task, AkkaSource] = new DiskStorage(appConfig.storage, appConfig.digest, AttributesCache[Task, AkkaSource]) val logger: LoggingAdapter = Logging(as, getClass) logger.info("==== Cluster is Live ====") val routes: Route = Routes(storages) val httpBinding: Future[Http.ServerBinding] = { Http().bindAndHandle(routes, appConfig.http.interface, appConfig.http.port) } httpBinding onComplete { case Success(binding) => logger.info(s"Bound to ${binding.localAddress.getHostString}: ${binding.localAddress.getPort}") case Failure(th) => logger.error(th, "Failed to perform an http binding on {}:{}", appConfig.http.interface, appConfig.http.port) Await.result(as.terminate(), 10.seconds) } as.registerOnTermination { shutdownMonitoring() } // attempt to leave the cluster before shutting down val _ = sys.addShutdownHook { Await.result(as.terminate().map(_ => ()), 10.seconds) } } } // $COVERAGE-ON$
Example 55
Source File: EventSourcedSupportFactory.scala From cloudstate with Apache License 2.0 | 5 votes |
package io.cloudstate.proxy.eventsourced import akka.NotUsed import akka.actor.{ActorRef, ActorSystem} import akka.cluster.sharding.ShardRegion.HashCodeMessageExtractor import akka.cluster.sharding.{ClusterSharding, ClusterShardingSettings} import akka.event.Logging import akka.grpc.GrpcClientSettings import akka.stream.Materializer import akka.stream.scaladsl.{Flow, Source} import akka.util.Timeout import com.google.protobuf.Descriptors.ServiceDescriptor import io.cloudstate.protocol.entity.{Entity, Metadata} import io.cloudstate.protocol.event_sourced.EventSourcedClient import io.cloudstate.proxy._ import io.cloudstate.proxy.entity.{EntityCommand, UserFunctionReply} import scala.concurrent.{ExecutionContext, Future} import scala.collection.JavaConverters._ class EventSourcedSupportFactory(system: ActorSystem, config: EntityDiscoveryManager.Configuration, grpcClientSettings: GrpcClientSettings, concurrencyEnforcer: ActorRef, statsCollector: ActorRef)(implicit ec: ExecutionContext, mat: Materializer) extends EntityTypeSupportFactory { private final val log = Logging.getLogger(system, this.getClass) private val eventSourcedClient = EventSourcedClient(grpcClientSettings)(system) override def buildEntityTypeSupport(entity: Entity, serviceDescriptor: ServiceDescriptor, methodDescriptors: Map[String, EntityMethodDescriptor]): EntityTypeSupport = { validate(serviceDescriptor, methodDescriptors) val stateManagerConfig = EventSourcedEntity.Configuration(entity.serviceName, entity.persistenceId, config.passivationTimeout, config.relayOutputBufferSize) log.debug("Starting EventSourcedEntity for {}", entity.persistenceId) val clusterSharding = ClusterSharding(system) val clusterShardingSettings = ClusterShardingSettings(system) val eventSourcedEntity = clusterSharding.start( typeName = entity.persistenceId, entityProps = EventSourcedEntitySupervisor.props(eventSourcedClient, stateManagerConfig, concurrencyEnforcer, statsCollector), settings = clusterShardingSettings, messageExtractor = new EntityIdExtractor(config.numberOfShards), allocationStrategy = new DynamicLeastShardAllocationStrategy(1, 10, 2, 0.0), handOffStopMessage = EventSourcedEntity.Stop ) new EventSourcedSupport(eventSourcedEntity, config.proxyParallelism, config.relayTimeout) } private def validate(serviceDescriptor: ServiceDescriptor, methodDescriptors: Map[String, EntityMethodDescriptor]): Unit = { val streamedMethods = methodDescriptors.values.filter(m => m.method.toProto.getClientStreaming || m.method.toProto.getServerStreaming) if (streamedMethods.nonEmpty) { val offendingMethods = streamedMethods.map(_.method.getName).mkString(",") throw EntityDiscoveryException( s"Event sourced entities do not support streamed methods, but ${serviceDescriptor.getFullName} has the following streamed methods: ${offendingMethods}" ) } val methodsWithoutKeys = methodDescriptors.values.filter(_.keyFieldsCount < 1) if (methodsWithoutKeys.nonEmpty) { val offendingMethods = methodsWithoutKeys.map(_.method.getName).mkString(",") throw new EntityDiscoveryException( s"Event sourced entities do not support methods whose parameters do not have at least one field marked as entity_key, " + "but ${serviceDescriptor.getFullName} has the following methods without keys: ${offendingMethods}" ) } } } private class EventSourcedSupport(eventSourcedEntity: ActorRef, parallelism: Int, private implicit val relayTimeout: Timeout) extends EntityTypeSupport { import akka.pattern.ask override def handler(method: EntityMethodDescriptor, metadata: Metadata): Flow[EntityCommand, UserFunctionReply, NotUsed] = Flow[EntityCommand].mapAsync(parallelism)( command => (eventSourcedEntity ? EntityTypeSupport.mergeStreamLevelMetadata(metadata, command)) .mapTo[UserFunctionReply] ) override def handleUnary(command: EntityCommand): Future[UserFunctionReply] = (eventSourcedEntity ? command).mapTo[UserFunctionReply] } private final class EntityIdExtractor(shards: Int) extends HashCodeMessageExtractor(shards) { override final def entityId(message: Any): String = message match { case command: EntityCommand => command.entityId } }
Example 56
Source File: TimeResponseDirective.scala From graphcool-framework with Apache License 2.0 | 5 votes |
package cool.graph.metrics.extensions import akka.event.Logging.LogLevel import akka.event.{Logging, LoggingAdapter} import akka.http.scaladsl.model.HttpRequest import akka.http.scaladsl.server.RouteResult.{Complete, Rejected} import akka.http.scaladsl.server.directives.{DebuggingDirectives, LoggingMagnet} import cool.graph.metrics.{CustomTag, MetricsManager, TimerMetric} trait TimeResponseDirective { def captureResponseTimeFunction( loggingAdapter: LoggingAdapter, requestTimestamp: Long, level: LogLevel = Logging.InfoLevel )(req: HttpRequest)(res: Any): Unit = { res match { case Complete(resp) => val responseTimestamp: Long = System.nanoTime val elapsedTime: Long = (responseTimestamp - requestTimestamp) / 1000000 requestTimer.record(elapsedTime, Seq(resp.status.toString())) case Rejected(_) => } } def captureResponseTime(log: LoggingAdapter) = { val requestTimestamp = System.nanoTime captureResponseTimeFunction(log, requestTimestamp)(_) } val timeResponse = DebuggingDirectives.logRequestResult(LoggingMagnet(captureResponseTime(_))) } case class TimeResponseDirectiveImpl(metricsManager: MetricsManager) extends TimeResponseDirective { val requestTimer: TimerMetric = metricsManager.defineTimer("responseTime", CustomTag("status")) }
Example 57
Source File: CassandraHealthCheck.scala From akka-persistence-cassandra with Apache License 2.0 | 5 votes |
package akka.persistence.cassandra.healthcheck import akka.actor.ActorSystem import akka.event.Logging import akka.pattern.{ ask, AskTimeoutException } import akka.persistence.Persistence import akka.persistence.cassandra.PluginSettings import akka.persistence.cassandra.journal.CassandraJournal.HealthCheckQuery import akka.util.Timeout import scala.concurrent.{ ExecutionContextExecutor, Future } import scala.util.control.NonFatal final class CassandraHealthCheck(system: ActorSystem) extends (() => Future[Boolean]) { private val log = Logging.getLogger(system, getClass) private val settings = new PluginSettings(system, system.settings.config.getConfig("akka.persistence.cassandra")) private val healthCheckSettings = settings.healthCheckSettings private val journalPluginId = s"${healthCheckSettings.pluginLocation}.journal" private val journalRef = Persistence(system).journalFor(journalPluginId) private implicit val ec: ExecutionContextExecutor = system.dispatchers.lookup(s"$journalPluginId.plugin-dispatcher") private implicit val timeout: Timeout = healthCheckSettings.timeout override def apply(): Future[Boolean] = { (journalRef ? HealthCheckQuery).map(_ => true).recoverWith { case _: AskTimeoutException => log.warning("Failed to execute health check due to ask timeout") Future.successful(false) case NonFatal(e) => log.warning("Failed to execute health check due to: {}", e) Future.successful(false) } } }
Example 58
Source File: TagViewSequenceNumberScanner.scala From akka-persistence-cassandra with Apache License 2.0 | 5 votes |
package akka.persistence.cassandra.query import java.lang.{ Long => JLong } import java.util.UUID import akka.NotUsed import akka.annotation.InternalApi import akka.event.Logging import akka.persistence.cassandra.journal.CassandraJournal._ import akka.persistence.cassandra.journal.TimeBucket import akka.persistence.cassandra.formatOffset import akka.persistence.cassandra.query.TagViewSequenceNumberScanner.Session import akka.stream.Materializer import akka.stream.scaladsl.Source import com.datastax.oss.driver.api.core.cql.{ PreparedStatement, Row } import scala.concurrent.duration.{ Deadline, FiniteDuration } import scala.concurrent.{ ExecutionContext, Future } import akka.persistence.cassandra.BucketSize import akka.stream.alpakka.cassandra.scaladsl.CassandraSession private[akka] def scan( tag: String, fromOffset: UUID, toOffset: UUID, bucketSize: BucketSize, scanningPeriod: FiniteDuration, whichToKeep: (TagPidSequenceNr, TagPidSequenceNr) => TagPidSequenceNr) : Future[Map[PersistenceId, (TagPidSequenceNr, UUID)]] = { val deadline: Deadline = Deadline.now + scanningPeriod def doIt(): Future[Map[PersistenceId, (TagPidSequenceNr, UUID)]] = { // How many buckets is this spread across? val startBucket = TimeBucket(fromOffset, bucketSize) val endBucket = TimeBucket(toOffset, bucketSize) require(startBucket <= endBucket) if (log.isDebugEnabled) { log.debug( s"Scanning tag: $tag from: {}, to: {}. Bucket {} to {}", formatOffset(fromOffset), formatOffset(toOffset), startBucket, endBucket) } Source .unfold(startBucket)(current => { if (current <= endBucket) { Some((current.next(), current)) } else { None } }) .flatMapConcat(bucket => { log.debug("Scanning bucket {}", bucket) session.selectTagSequenceNrs(tag, bucket, fromOffset, toOffset) }) .map(row => (row.getString("persistence_id"), row.getLong("tag_pid_sequence_nr"), row.getUuid("timestamp"))) .runFold(Map.empty[Tag, (TagPidSequenceNr, UUID)]) { case (acc, (pid, tagPidSequenceNr, timestamp)) => val (newTagPidSequenceNr, newTimestamp) = acc.get(pid) match { case None => (tagPidSequenceNr, timestamp) case Some((currentTagPidSequenceNr, currentTimestamp)) => if (whichToKeep(tagPidSequenceNr, currentTagPidSequenceNr) == tagPidSequenceNr) (tagPidSequenceNr, timestamp) else (currentTagPidSequenceNr, currentTimestamp) } acc + (pid -> ((newTagPidSequenceNr, newTimestamp))) } .flatMap { result => if (deadline.hasTimeLeft()) { doIt() } else { Future.successful(result) } } } doIt() } }
Example 59
Source File: DeleteTagViewForPersistenceId.scala From akka-persistence-cassandra with Apache License 2.0 | 5 votes |
package akka.persistence.cassandra.reconciler import akka.actor.ActorSystem import akka.persistence.cassandra.PluginSettings import akka.Done import akka.event.Logging import akka.persistence.cassandra.query.scaladsl.CassandraReadJournal import akka.annotation.InternalApi import akka.persistence.query.NoOffset import akka.persistence.cassandra.journal.TimeBucket import akka.stream.scaladsl.Sink import scala.concurrent.Future @InternalApi private[akka] final class DeleteTagViewForPersistenceId( persistenceIds: Set[String], tag: String, system: ActorSystem, session: ReconciliationSession, settings: PluginSettings, queries: CassandraReadJournal) { private val log = Logging(system, s"DeleteTagView($tag)") private implicit val sys = system import system.dispatcher def execute(): Future[Done] = { queries .currentEventsByTagInternal(tag, NoOffset) .filter(persistenceIds contains _.persistentRepr.persistenceId) // Make the parallelism configurable? .mapAsync(1) { uuidPr => val bucket = TimeBucket(uuidPr.offset, settings.eventsByTagSettings.bucketSize) val timestamp = uuidPr.offset val persistenceId = uuidPr.persistentRepr.persistenceId val tagPidSequenceNr = uuidPr.tagPidSequenceNr log.debug("Issuing delete {} {} {} {}", persistenceId, bucket, timestamp, tagPidSequenceNr) session.deleteFromTagView(tag, bucket, timestamp, persistenceId, tagPidSequenceNr) } .runWith(Sink.ignore) .flatMap(_ => Future.traverse(persistenceIds) { pid => val progress = session.deleteTagProgress(tag, pid) val scanning = session.deleteTagScannning(pid) for { _ <- progress _ <- scanning } yield Done }) .map(_ => Done) } }
Example 60
Source File: BuildTagViewForPersistenceId.scala From akka-persistence-cassandra with Apache License 2.0 | 5 votes |
package akka.persistence.cassandra.reconciler import akka.actor.ActorSystem import akka.persistence.cassandra.PluginSettings import akka.Done import akka.persistence.cassandra.journal.TagWriter._ import scala.concurrent.duration._ import scala.concurrent.Future import akka.stream.scaladsl.Source import akka.actor.ExtendedActorSystem import akka.persistence.query.PersistenceQuery import akka.persistence.cassandra.query.scaladsl.CassandraReadJournal import akka.event.Logging import akka.persistence.cassandra.journal.CassandraTagRecovery import akka.persistence.cassandra.Extractors import akka.util.Timeout import akka.stream.OverflowStrategy import akka.stream.scaladsl.Sink import akka.annotation.InternalApi import akka.serialization.SerializationExtension @InternalApi private[akka] final class BuildTagViewForPersisetceId( persistenceId: String, system: ActorSystem, recovery: CassandraTagRecovery, settings: PluginSettings) { import system.dispatcher private implicit val sys = system private val log = Logging(system, classOf[BuildTagViewForPersisetceId]) private val serialization = SerializationExtension(system) private val queries: CassandraReadJournal = PersistenceQuery(system.asInstanceOf[ExtendedActorSystem]) .readJournalFor[CassandraReadJournal]("akka.persistence.cassandra.query") private implicit val flushTimeout = Timeout(30.seconds) def reconcile(flushEvery: Int = 1000): Future[Done] = { val recoveryPrep = for { tp <- recovery.lookupTagProgress(persistenceId) _ <- recovery.setTagProgress(persistenceId, tp) } yield tp Source .futureSource(recoveryPrep.map((tp: Map[String, TagProgress]) => { log.debug("[{}] Rebuilding tag view table from: [{}]", persistenceId, tp) queries .eventsByPersistenceId( persistenceId, 0, Long.MaxValue, Long.MaxValue, None, settings.journalSettings.readProfile, "BuildTagViewForPersistenceId", extractor = Extractors.rawEvent(settings.eventsByTagSettings.bucketSize, serialization, system)) .map(recovery.sendMissingTagWriteRaw(tp, actorRunning = false)) .buffer(flushEvery, OverflowStrategy.backpressure) .mapAsync(1)(_ => recovery.flush(flushTimeout)) })) .runWith(Sink.ignore) } }
Example 61
Source File: CassandraEventUpdateSpec.scala From akka-persistence-cassandra with Apache License 2.0 | 5 votes |
package akka.persistence.cassandra.journal import java.util.UUID import scala.concurrent.Await import akka.Done import akka.event.Logging import akka.persistence.PersistentRepr import akka.persistence.cassandra.journal.CassandraJournal.Serialized import akka.persistence.cassandra.{ CassandraLifecycle, CassandraSpec, TestTaggingActor, _ } import akka.serialization.SerializationExtension import com.typesafe.config.ConfigFactory import scala.concurrent.ExecutionContext import scala.concurrent.Future import akka.actor.ExtendedActorSystem import akka.stream.alpakka.cassandra.CqlSessionProvider import akka.stream.alpakka.cassandra.scaladsl.CassandraSession object CassandraEventUpdateSpec { val config = ConfigFactory.parseString(""" """).withFallback(CassandraLifecycle.config) } class CassandraEventUpdateSpec extends CassandraSpec(CassandraEventUpdateSpec.config) { s => private[akka] val log = Logging(system, getClass) private val serialization = SerializationExtension(system) val updater = new CassandraEventUpdate { override private[akka] val log = s.log override private[akka] def settings: PluginSettings = PluginSettings(system) override private[akka] implicit val ec: ExecutionContext = system.dispatcher // use separate session, not shared via CassandraSessionRegistry because init is different private val sessionProvider = CqlSessionProvider( system.asInstanceOf[ExtendedActorSystem], system.settings.config.getConfig(PluginSettings.DefaultConfigPath)) override private[akka] val session: CassandraSession = new CassandraSession( system, sessionProvider, ec, log, systemName, init = _ => Future.successful(Done), onClose = () => ()) } "CassandraEventUpdate" must { "update the event in messages" in { val pid = nextPid val a = system.actorOf(TestTaggingActor.props(pid)) a ! "e-1" expectMsgType[TestTaggingActor.Ack.type] val eventsBefore = events(pid) eventsBefore.map(_.pr.payload) shouldEqual Seq("e-1") val originalEvent = eventsBefore.head val modifiedEvent = serialize(originalEvent.pr.withPayload("secrets"), originalEvent.offset, Set("ignored")) updater.updateEvent(modifiedEvent).futureValue shouldEqual Done eventPayloadsWithTags(pid) shouldEqual Seq(("secrets", Set())) } "update the event in tag_views" in { val pid = nextPid val b = system.actorOf(TestTaggingActor.props(pid, Set("red", "blue"))) b ! "e-1" expectMsgType[TestTaggingActor.Ack.type] val eventsBefore = events(pid).head val modifiedEvent = serialize(eventsBefore.pr.withPayload("hidden"), eventsBefore.offset, Set("ignored")) expectEventsForTag(tag = "red", "e-1") expectEventsForTag(tag = "blue", "e-1") updater.updateEvent(modifiedEvent).futureValue shouldEqual Done expectEventsForTag(tag = "red", "hidden") expectEventsForTag(tag = "blue", "hidden") } def serialize(pr: PersistentRepr, offset: UUID, tags: Set[String]): Serialized = { import system.dispatcher Await.result(serializeEvent(pr, tags, offset, Hour, serialization, system), remainingOrDefault) } } }
Example 62
Source File: Service.scala From reactive-microservices with MIT License | 5 votes |
import akka.actor.ActorSystem import akka.event.Logging import java.math.BigInteger import java.security.SecureRandom import scala.concurrent.{ExecutionContext, Future} class Service(repository: Repository)(implicit actorSystem: ActorSystem, ec: ExecutionContext) extends Config { def relogin(reloginRequest: ReloginRequest): Future[Option[Token]] = { repository.addMethodToValidTokenByValue(reloginRequest.tokenValue, reloginRequest.authMethod) } def login(loginRequest: LoginRequest): Future[Token] = { val newToken = createFreshToken(loginRequest.identityId, loginRequest.authMethod) repository.insertToken(newToken).map(_ => newToken) } def findAndRefreshToken(tokenValue: String): Future[Option[Token]] = { repository.findValidTokenByValue(tokenValue).map { tokenOption => tokenOption.map { token => val newToken = refreshToken(token) if (newToken != token) repository.updateTokenByValue(token.value, newToken).onFailure { case t => logger.error(t, "Token refreshment failed") } newToken } } } def logout(tokenValue: String): Unit = { repository.deleteTokenByValue(tokenValue).onFailure { case t => logger.error(t, "Token deletion failed") } } private def createFreshToken(identityId: Long, authMethod: String): Token = { Token(generateToken, System.currentTimeMillis() + tokenTtl, identityId, Set(authMethod)) } private def generateToken: String = new BigInteger(255, random).toString(32) private def refreshToken(token: Token): Token = token.copy(validTo = math.max(token.validTo, System.currentTimeMillis() + sessionTtl)) private val random = new SecureRandom() private val logger = Logging(actorSystem, getClass) }
Example 63
Source File: ChatController.scala From Scala-Reactive-Programming with MIT License | 5 votes |
package controllers import java.net.URI import javax.inject._ import akka.actor.ActorSystem import akka.event.Logging import akka.stream.Materializer import akka.stream.scaladsl.{BroadcastHub, Flow, Keep, MergeHub, Source} import play.api.Logger import play.api.mvc._ import scala.concurrent.{ExecutionContext, Future} @Singleton class ChatController @Inject()(cc: ControllerComponents) (implicit actorSystem: ActorSystem, mat: Materializer, executionContext: ExecutionContext, webJarsUtil: org.webjars.play.WebJarsUtil) extends AbstractController(cc) with RequestMarkerContext { private type WSMessage = String private val logger = Logger(getClass) private implicit val logging = Logging(actorSystem.eventStream, logger.underlyingLogger.getName) private val (chatSink, chatSource) = { val source = MergeHub.source[WSMessage] .log("source") .recoverWithRetries(-1, { case _: Exception ⇒ Source.empty }) val sink = BroadcastHub.sink[WSMessage] source.toMat(sink)(Keep.both).run() } private val userFlow: Flow[WSMessage, WSMessage, _] = { Flow.fromSinkAndSource(chatSink, chatSource) } def index: Action[AnyContent] = Action { implicit request: RequestHeader => val webSocketUrl = routes.ChatController.chat().webSocketURL() logger.info(s"index: ") Ok(views.html.index(webSocketUrl)) } def chat(): WebSocket = { WebSocket.acceptOrResult[WSMessage, WSMessage] { case rh if sameOriginCheck(rh) => Future.successful(userFlow).map { flow => Right(flow) }.recover { case e: Exception => val msg = "Cannot create websocket" logger.error(msg, e) val result = InternalServerError(msg) Left(result) } case rejected => logger.error(s"Request ${rejected} failed same origin check") Future.successful { Left(Forbidden("forbidden")) } } } private def sameOriginCheck(implicit rh: RequestHeader): Boolean = { logger.debug("Checking the ORIGIN ") rh.headers.get("Origin") match { case Some(originValue) if originMatches(originValue) => logger.debug(s"originCheck: originValue = $originValue") true case Some(badOrigin) => logger.error(s"originCheck: rejecting request because Origin header value ${badOrigin} is not in the same origin") false case None => logger.error("originCheck: rejecting request because no Origin header found") false } } private def originMatches(origin: String): Boolean = { try { val url = new URI(origin) url.getHost == "localhost" && (url.getPort match { case 9000 | 19001 => true; case _ => false }) } catch { case e: Exception => false } } }
Example 64
Source File: Endpoints.scala From akka-http-microservice-templates with MIT License | 5 votes |
package io.github.gabfssilva.endpoints import java.lang.System.currentTimeMillis import akka.actor.ActorSystem import akka.event.{Logging, LoggingAdapter} import akka.http.scaladsl.model.HttpRequest import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server.Route import akka.http.scaladsl.server.RouteResult.Complete import akka.http.scaladsl.server.directives.{DebuggingDirectives, LogEntry, LoggingMagnet} import akka.http.scaladsl.settings.RoutingSettings import akka.stream.{ActorMaterializer, Materializer} import scala.concurrent.ExecutionContext class Endpoints(greetingEndpoint: GreetingEndpoint, healthCheckEndpoint: HealthCheckEndpoint) { def routes(implicit sys: ActorSystem, mat: ActorMaterializer, ec: ExecutionContext) = loggableRoute { Route.seal { greetingEndpoint.greetingRoute ~ healthCheckEndpoint.healthCheckRoute } } def logRequestAndResponse(loggingAdapter: LoggingAdapter, before: Long)(req: HttpRequest)(res: Any): Unit = { val entry = res match { case Complete(resp) => val message = s"{path=${req.uri}, method=${req.method.value}, status=${resp.status.intValue()}, elapsedTime=${currentTimeMillis() - before}" LogEntry(message, Logging.InfoLevel) case other => LogEntry(other, Logging.InfoLevel) } entry.logTo(loggingAdapter) } def loggableRoute(route: Route)(implicit m: Materializer, ex: ExecutionContext, routingSettings: RoutingSettings): Route = { DebuggingDirectives.logRequestResult(LoggingMagnet(log => { val requestTimestamp = currentTimeMillis() logRequestAndResponse(log, requestTimestamp) }))(route) } }
Example 65
Source File: Endpoints.scala From akka-http-microservice-templates with MIT License | 5 votes |
package endpoints import java.lang.System.currentTimeMillis import akka.actor.ActorSystem import akka.event.{Logging, LoggingAdapter} import akka.http.scaladsl.model.HttpRequest import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server.RouteResult.Complete import akka.http.scaladsl.server._ import akka.http.scaladsl.server.directives._ import akka.http.scaladsl.settings.RoutingSettings import akka.stream.{ActorMaterializer, Materializer} import scala.concurrent.ExecutionContext class Endpoints(userEndpoint: UserEndpoint, healthCheckEndpoint: HealthCheckEndpoint) { def routes(implicit sys: ActorSystem, mat: ActorMaterializer, ec: ExecutionContext) = loggableRoute { Route.seal { userEndpoint.userRoutes ~ healthCheckEndpoint.healthCheckRoute } } def logRequestAndResponse(loggingAdapter: LoggingAdapter, before: Long)(req: HttpRequest)(res: Any): Unit = { val entry = res match { case Complete(resp) => val message = s"{path=${req.uri}, method=${req.method.value}, status=${resp.status.intValue()}, elapsedTime=${currentTimeMillis() - before}" LogEntry(message, Logging.InfoLevel) case other => LogEntry(other, Logging.InfoLevel) } entry.logTo(loggingAdapter) } def loggableRoute(route: Route)(implicit m: Materializer, ex: ExecutionContext, routingSettings: RoutingSettings): Route = { DebuggingDirectives.logRequestResult(LoggingMagnet(log => { val requestTimestamp = currentTimeMillis() logRequestAndResponse(log, requestTimestamp) }))(route) } }
Example 66
Source File: Endpoints.scala From akka-http-microservice-templates with MIT License | 5 votes |
package endpoints import java.lang.System.currentTimeMillis import akka.actor.ActorSystem import akka.event.{Logging, LoggingAdapter} import akka.http.scaladsl.model.HttpRequest import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server.RouteResult.Complete import akka.http.scaladsl.server._ import akka.http.scaladsl.server.directives._ import akka.http.scaladsl.settings.RoutingSettings import akka.stream.{ActorMaterializer, Materializer} import scala.concurrent.ExecutionContext class Endpoints(userEndpoint: UserEndpoint, healthCheckEndpoint: HealthCheckEndpoint) { def routes(implicit sys: ActorSystem, mat: ActorMaterializer, ec: ExecutionContext) = loggableRoute { Route.seal { userEndpoint.userRoutes ~ healthCheckEndpoint.healthCheckRoute } } def logRequestAndResponse(loggingAdapter: LoggingAdapter, before: Long)(req: HttpRequest)(res: Any): Unit = { val entry = res match { case Complete(resp) => val message = s"{path=${req.uri}, method=${req.method.value}, status=${resp.status.intValue()}, elapsedTime=${currentTimeMillis() - before}" LogEntry(message, Logging.InfoLevel) case other => LogEntry(other, Logging.InfoLevel) } entry.logTo(loggingAdapter) } def loggableRoute(route: Route)(implicit m: Materializer, ex: ExecutionContext, routingSettings: RoutingSettings): Route = { DebuggingDirectives.logRequestResult(LoggingMagnet(log => { val requestTimestamp = currentTimeMillis() logRequestAndResponse(log, requestTimestamp) }))(route) } }
Example 67
Source File: Endpoints.scala From akka-http-microservice-templates with MIT License | 5 votes |
package endpoints import java.lang.System.currentTimeMillis import akka.actor.ActorSystem import akka.event.{Logging, LoggingAdapter} import akka.http.scaladsl.model.HttpRequest import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server.RouteResult.Complete import akka.http.scaladsl.server._ import akka.http.scaladsl.server.directives._ import akka.http.scaladsl.settings.RoutingSettings import akka.stream.{ActorMaterializer, Materializer} import scala.concurrent.ExecutionContext class Endpoints(userEndpoint: UserEndpoint, healthCheckEndpoint: HealthCheckEndpoint) { def routes(implicit sys: ActorSystem, mat: ActorMaterializer, ec: ExecutionContext) = loggableRoute { Route.seal { userEndpoint.userRoutes ~ healthCheckEndpoint.healthCheckRoute } } def logRequestAndResponse(loggingAdapter: LoggingAdapter, before: Long)(req: HttpRequest)(res: Any): Unit = { val entry = res match { case Complete(resp) => val message = s"{path=${req.uri}, method=${req.method.value}, status=${resp.status.intValue()}, elapsedTime=${currentTimeMillis() - before}" LogEntry(message, Logging.InfoLevel) case other => LogEntry(other, Logging.InfoLevel) } entry.logTo(loggingAdapter) } def loggableRoute(route: Route)(implicit m: Materializer, ex: ExecutionContext, routingSettings: RoutingSettings): Route = { DebuggingDirectives.logRequestResult(LoggingMagnet(log => { val requestTimestamp = currentTimeMillis() logRequestAndResponse(log, requestTimestamp) }))(route) } }
Example 68
Source File: Node.scala From affinity with Apache License 2.0 | 5 votes |
package io.amient.affinity.core.cluster import java.util.concurrent.{CountDownLatch, TimeUnit, TimeoutException} import akka.actor.{Actor, Props} import akka.event.Logging import akka.util.Timeout import com.typesafe.config.{Config, ConfigFactory} import io.amient.affinity.core.ack import io.amient.affinity.core.actor.Controller._ import io.amient.affinity.core.actor.Gateway.{GatewayClusterStatus, GatewayConf} import io.amient.affinity.core.actor._ import io.amient.affinity.core.config._ import io.amient.affinity.{AffinityActorSystem, Conf} import scala.concurrent.duration._ import scala.concurrent.{Await, Future, Promise} import scala.language.{implicitConversions, postfixOps} import scala.reflect.ClassTag object Node { class NodeConf extends CfgStruct[NodeConf] { val Containers: CfgGroup[CfgIntList] = group("container", classOf[CfgIntList], false) .doc("Array of partitions assigned to this node, <ID> represents the Keyspace, e.g. assigning first four partitions of MyKeySpace: affinity.node.container.MyKeySpace = [0,1,2,3] ") val Gateway: GatewayConf = struct("gateway", new GatewayConf, false) val SuspendQueueMaxSize = integer("suspend.queue.max.size", 1000).doc("Size of the queue when the cluster enters suspended mode") val StartupTimeoutMs = longint("startup.timeout.ms", Integer.MAX_VALUE).doc("Maximum time a node can take to startup - this number must account for any potential state bootstrap") val ShutdownTimeoutMs = longint("shutdown.timeout.ms", 30000).doc("Maximum time a node can take to shutdown gracefully") val DataDir = filepath("data.dir", false).doc("Location under which any local state or registers will be kept - this is required if running in a distributed mode or when using persisted kv stores") val DataAutoAssign = bool("data.auto.assign", true, false).doc("Determines whether this node auto-balances data its containers; if set tot false the fixed list of container partitions will be used") val DataAutoDelete = bool("data.auto.delete", true, false).doc("If set to true, any unassigned partitions will be deleted from the local storage") } } class Node(config: Config) { def this(configResource: String) = this(ConfigFactory.parseResources(configResource).resolve) val conf = Conf(config) val startupTimeout = conf.Affi.Node.StartupTimeoutMs().toLong milliseconds val shutdownTimeout = conf.Affi.Node.ShutdownTimeoutMs().toLong milliseconds implicit val system = AffinityActorSystem.create(config) private val log = Logging.getLogger(system, this) private val controller = system.actorOf(Props(new Controller), name = "controller") private val httpGatewayPort = Promise[List[Int]]() private val clusterReady = new CountDownLatch(1) @volatile private var shuttingDown = false @volatile private var fatalError: Option[Throwable] = None import scala.concurrent.ExecutionContext.Implicits.global val systemEventsWatcher = system.actorOf(Props(new Actor { override def receive: Receive = { case GatewayClusterStatus(false) => clusterReady.countDown() case FatalErrorShutdown(e) => fatalError = Some(e) shutdown() } })) system.eventStream.subscribe(systemEventsWatcher, classOf[GatewayClusterStatus]) system.eventStream.subscribe(systemEventsWatcher, classOf[FatalErrorShutdown]) sys.addShutdownHook { if (!shuttingDown) { log.info("process killed - attempting graceful shutdown") fatalError = None shutdown() } Await.ready(system.terminate, shutdownTimeout) } def start[T <: Gateway](creator: => T)(implicit tag: ClassTag[T]): Future[List[Int]] = { controller ! StartRebalance() implicit val timeout = Timeout(startupTimeout) val result = controller ?? CreateGateway(Props(creator)) httpGatewayPort.completeWith(result) result } }
Example 69
Source File: TransactionalProducer.scala From affinity with Apache License 2.0 | 5 votes |
package io.amient.affinity.kafka import java.util.Properties import akka.actor.Actor import akka.actor.Status.{Failure, Success} import akka.event.Logging import com.typesafe.config.Config import io.amient.affinity.Conf import io.amient.affinity.core.actor.{TransactionAbort, TransactionBegin, TransactionCommit, TransactionalRecord} import io.amient.affinity.core.config.CfgStruct import io.amient.affinity.core.storage.StorageConf import io.amient.affinity.kafka.KafkaStorage.{KafkaConsumerConf, KafkaProducerConf} import org.apache.kafka.clients.producer.{Callback, KafkaProducer, ProducerRecord, RecordMetadata} import org.apache.kafka.common.serialization.ByteArraySerializer import scala.collection.JavaConverters._ object KafkaConf extends KafkaConf { override def apply(config: Config): KafkaConf = new KafkaConf().apply(config) } class KafkaConf extends CfgStruct[KafkaConf](classOf[StorageConf]) { val BootstrapServers = string("kafka.bootstrap.servers", true).doc("kafka connection string used for consumer and/or producer") val Producer = struct("kafka.producer", new KafkaProducerConf, false).doc("any settings that the underlying version of kafka producer client supports") val Consumer = struct("kafka.consumer", new KafkaConsumerConf, false).doc("any settings that the underlying version of kafka consumer client supports") } class TransactionalProducer extends Actor { val logger = Logging.getLogger(context.system, this) private[this] var producer: KafkaProducer[Array[Byte], Array[Byte]] = null val kafkaConf = KafkaConf(Conf(context.system.settings.config).Affi.Storage) val producerConfig = new Properties() { if (kafkaConf.Producer.isDefined) { val producerConfig = kafkaConf.Producer.toMap() if (producerConfig.containsKey("bootstrap.servers")) throw new IllegalArgumentException("bootstrap.servers cannot be overriden for KafkaStroage producer") if (producerConfig.containsKey("key.serializer")) throw new IllegalArgumentException("Binary kafka stream cannot use custom key.serializer") if (producerConfig.containsKey("value.serializer")) throw new IllegalArgumentException("Binary kafka stream cannot use custom value.serializer") producerConfig.entrySet.asScala.filter(_.getValue.isDefined).foreach { case (entry) => put(entry.getKey, entry.getValue.apply.toString) } } put("bootstrap.servers", kafkaConf.BootstrapServers()) put("value.serializer", classOf[ByteArraySerializer].getName) put("key.serializer", classOf[ByteArraySerializer].getName) } override def receive: Receive = { case req@TransactionBegin(transactionalId) => req(sender) ! { if (producer == null) { producerConfig.put("transactional.id", transactionalId) producer = new KafkaProducer[Array[Byte], Array[Byte]](producerConfig) logger.debug(s"Transactions.Init(transactional.id = $transactionalId)") producer.initTransactions() } logger.debug("Transactions.Begin()") producer.beginTransaction() } case TransactionalRecord(topic, key, value, timestamp, partition) => val replyto = sender val producerRecord = new ProducerRecord( topic, partition.map(new Integer(_)).getOrElse(null), timestamp.map(new java.lang.Long(_)).getOrElse(null), key, value) logger.debug(s"Transactions.Append(topic=$topic)") producer.send(producerRecord, new Callback { override def onCompletion(metadata: RecordMetadata, exception: Exception): Unit = { if (exception != null) { replyto ! Failure(exception) } else { replyto ! Success(metadata.offset()) } } }) case req@TransactionCommit() => req(sender) ! { logger.debug("Transactions.Commit()") producer.commitTransaction() } case req@TransactionAbort() => req(sender) ! { logger.debug("Transactions.Abort()") producer.abortTransaction() } } }
Example 70
Source File: CassandraHeath.scala From nexus-iam with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.iam.routes import akka.Done import akka.actor.{ExtendedActorSystem, Extension, ExtensionId, ExtensionIdProvider} import akka.event.Logging import akka.persistence.cassandra.CassandraPluginConfig import akka.persistence.cassandra.session.scaladsl.CassandraSession import scala.concurrent.Future trait CassandraHeath extends Extension { def check: Future[Boolean] } object CassandraHeath extends ExtensionId[CassandraHeath] with ExtensionIdProvider { override def lookup(): ExtensionId[_ <: Extension] = CassandraHeath override def createExtension(as: ExtendedActorSystem): CassandraHeath = { implicit val ec = as.dispatcher val log = Logging(as, "CassandraHeathCheck") val config = new CassandraPluginConfig(as, as.settings.config.getConfig("cassandra-journal")) val (p, s) = (config.sessionProvider, config.sessionSettings) val session = new CassandraSession(as, p, s, ec, log, "health", _ => Future.successful(Done.done())) new CassandraHeath { private val query = s"SELECT now() FROM ${config.keyspace}.messages;" override def check: Future[Boolean] = { session.selectOne(query).map(_ => true).recover { case err => log.error("Error while attempting to query for health check", err) false } } } } }
Example 71
Source File: Remoting.scala From spark1.52 with Apache License 2.0 | 5 votes |
package ch8 import org.learningconcurrency._ import ch8._ import akka.actor.Actor import akka.actor.ActorIdentity import akka.actor.ActorSelection.toScala import akka.actor.Identify import akka.actor.Props import akka.actor.actorRef2Scala import akka.event.Logging object RemotingPongySystem extends App { val system = remotingSystem("PongyDimension", 24321) val pongy = system.actorOf(Props[Pongy], "pongy") Thread.sleep(15000) system.shutdown() } class Runner extends Actor { val log = Logging(context.system, this) val pingy = context.actorOf(Props[Pingy], "pingy") def receive = { case "start" => val path = context.actorSelection("akka.tcp://[email protected]:24321/user/pongy") path ! Identify(0) case ActorIdentity(0, Some(ref)) => pingy ! ref case ActorIdentity(0, None) => log.info("Something's wrong -- no pongy anywhere!") context.stop(self) case "pong" => log.info("got a pong from another dimension.") context.stop(self) } } object RemotingPingySystem extends App { val system = remotingSystem("PingyDimension", 24567) val runner = system.actorOf(Props[Runner], "runner") runner ! "start" Thread.sleep(5000) system.shutdown() }
Example 72
Source File: DefaultJournalCassandraSession.scala From aecor with MIT License | 5 votes |
package akka.persistence.cassandra import akka.Done import akka.actor.{ ActorSystem, ExtendedActorSystem } import akka.event.Logging import akka.persistence.cassandra.Session.Init import akka.persistence.cassandra.session.CassandraSessionSettings import akka.persistence.cassandra.session.scaladsl.CassandraSession import cats.effect.{ ContextShift, Effect } import cats.implicits._ object DefaultJournalCassandraSession { def apply[F[_]: ContextShift]( system: ActorSystem, metricsCategory: String, init: Init[F], sessionProvider: Option[SessionProvider] = None )(implicit F: Effect[F]): F[CassandraSession] = F.delay { val log = Logging(system, classOf[CassandraSession]) val provider = sessionProvider.getOrElse( SessionProvider( system.asInstanceOf[ExtendedActorSystem], system.settings.config.getConfig("cassandra-journal") ) ) val settings = CassandraSessionSettings(system.settings.config.getConfig("cassandra-journal")) new CassandraSession(system, provider, settings, system.dispatcher, log, metricsCategory, { x => F.toIO(init(Session[F](x)).as(Done)).unsafeToFuture() }) } }
Example 73
Source File: Main.scala From distributed-cache-on-k8s-poc with MIT License | 5 votes |
import akka.actor.ActorSystem import akka.event.Logging import akka.http.scaladsl.Http import akka.stream.{ ActorMaterializer, ActorMaterializerSettings } import cluster.ClusterStateInformer import com.typesafe.config.{ Config, ConfigFactory, ConfigValueFactory } import http.Route import scala.util.{ Failure, Success } import scala.concurrent.ExecutionContext.Implicits.global object Main { def main(args: Array[String]): Unit = { val config: Config = { import scala.collection.JavaConverters._ val seedNodes = ClusterSetup.seedNodes() ConfigFactory.empty() .withValue("akka.cluster.seed-nodes", ConfigValueFactory.fromIterable(seedNodes.map(seedNode => s"akka.tcp://${ClusterSetup.actorSystemName()}@$seedNode").asJava)) .withValue("akka.remote.netty.tcp.hostname", ConfigValueFactory.fromAnyRef(ClusterSetup.podName() + "." + ClusterSetup.domain())) .withValue("akka.remote.netty.tcp.port", ConfigValueFactory.fromAnyRef(ClusterSetup.remoteBindingPort())) .withFallback(ConfigFactory.load()) .resolve() } implicit val system: ActorSystem = ActorSystem(ClusterSetup.actorSystemName(), config) val logging = Logging(system, "main") implicit val mat = ActorMaterializer(materializerSettings = Some(ActorMaterializerSettings(system))) val routes = new Route(system) Http().bindAndHandle(routes.routes, "0.0.0.0", 9000).onComplete { case Success(s) => logging.info("Successfully started") case Failure(f) => logging.error(f, "Server cannot be started!!!!") } system.actorOf(ClusterStateInformer.props(), "cluster-informer") } } object ClusterSetup { def seedNodes(): Iterable[String] = sys.env.get("AKKA_SEED_NODES").map(_.split(",")).get.toIterable def domain(): String = sys.env.getOrElse("AKKA_REMOTING_BIND_DOMAIN", throw new RuntimeException("No domain found.")) def podName(): String = sys.env.getOrElse("POD_NAME", throw new RuntimeException("No podname found.")) def remoteBindingPort(): String = sys.env.getOrElse("AKKA_REMOTING_BIND_PORT", throw new RuntimeException("No port found.")) def actorSystemName(): String = sys.env.getOrElse("AKKA_ACTOR_SYSTEM_NAME", throw new RuntimeException("No actorsystem name found.")) }
Example 74
Source File: CustomerApp.scala From reactive-application-development-scala with Apache License 2.0 | 5 votes |
package com.rarebooks.library import akka.actor.{ActorSelection, ActorSystem, Address, RootActorPath} import akka.event.Logging import scala.annotation.tailrec import scala.concurrent.Await import scala.concurrent.duration.{Duration, FiniteDuration, MILLISECONDS => Millis} import scala.io.StdIn object CustomerApp { protected def createCustomer(count: Int, odds: Int, tolerance: Int): Unit = { val selection: ActorSelection = system.actorSelection( RootActorPath(rareBooksAddress) / "user" / "rare-books") selection.resolveOne(resolveTimeout).onComplete { case scala.util.Success(rareBooks) => for (_ <- 1 to count) system.actorOf(Customer.props(rareBooks, odds, tolerance)) case scala.util.Failure(ex) => log.error(ex, ex.getMessage) } } }
Example 75
Source File: RareBooksApp.scala From reactive-application-development-scala with Apache License 2.0 | 5 votes |
package com.rarebooks.library import akka.actor.{ ActorRef, ActorSystem } import akka.event.Logging import scala.annotation.tailrec import scala.concurrent.Await import scala.concurrent.duration.Duration import scala.io.StdIn object RareBooksApp { protected def createRareBooks(): ActorRef = { system.actorOf(RareBooks.props, "rare-books") } def run(): Unit = { log.warning(f"{} running%nWaiting for customer requests.", getClass.getSimpleName) commandLoop() Await.ready(system.whenTerminated, Duration.Inf) } @tailrec private def commandLoop(): Unit = Command(StdIn.readLine()) match { case Command.Customer(count, odds, tolerance) => log.warning(s"Enter customer commands from the customer app prompt.") commandLoop() case Command.Quit => system.terminate() case Command.Unknown(command) => log.warning(s"Unknown command $command") commandLoop() } }
Example 76
Source File: FileMonitorActor.scala From graphql-gateway with Apache License 2.0 | 5 votes |
package sangria.gateway.file import java.nio.file.{NoSuchFileException, StandardWatchEventKinds} import akka.actor.{Actor, ActorRef, Cancellable, PoisonPill, Props} import akka.event.Logging import better.files._ import sangria.gateway.file.FileWatcher._ import scala.collection.mutable import scala.concurrent.duration.FiniteDuration class FileMonitorActor(paths: Seq[File], threshold: FiniteDuration, globs: Seq[String], cb: Vector[File] ⇒ Unit) extends Actor { import FileMonitorActor._ import context.dispatcher val log = Logging(context.system, this) var watchers: Seq[ActorRef] = _ val pendingFiles: mutable.HashSet[File] = mutable.HashSet[File]() var scheduled: Option[Cancellable] = None override def preStart(): Unit = { watchers = paths.map(_.newWatcher(recursive = true)) watchers.foreach { watcher ⇒ watcher ! when(events = StandardWatchEventKinds.ENTRY_CREATE, StandardWatchEventKinds.ENTRY_MODIFY, StandardWatchEventKinds.ENTRY_DELETE) { case (_, file) ⇒ self ! FileChange(file) } } } def receive = { case FileChange(file) ⇒ try { if (file.exists && !file.isDirectory && globs.exists(file.glob(_, includePath = false).nonEmpty)) { pendingFiles += file if (scheduled.isEmpty) scheduled = Some(context.system.scheduler.scheduleOnce(threshold, self, Threshold)) } } catch { case _: NoSuchFileException ⇒ // ignore, it's ok } case Threshold ⇒ val files = pendingFiles.toVector.sortBy(_.name) if (files.nonEmpty) cb(files) pendingFiles.clear() scheduled = None } } object FileMonitorActor { case class FileChange(file: File) case object Threshold def props(paths: Seq[File], threshold: FiniteDuration, globs: Seq[String], cb: Vector[File] ⇒ Unit) = Props(new FileMonitorActor(paths, threshold, globs, cb)) }
Example 77
Source File: FutureRetryUtilitySpec.scala From NSDb with Apache License 2.0 | 5 votes |
package io.radicalbit.nsdb.util import akka.actor.{ActorSystem, Scheduler, Status} import akka.event.{Logging, LoggingAdapter} import akka.testkit.{TestKit, TestProbe} import org.scalatest.{Matchers, WordSpecLike} import scala.collection.mutable import scala.concurrent.duration._ import scala.concurrent.{Await, Future} import scala.concurrent.ExecutionContext.Implicits.global class FutureRetryUtilitySpec extends TestKit(ActorSystem("MySpec")) with WordSpecLike with Matchers with FutureRetryUtility { implicit val schedule: Scheduler = system.scheduler implicit val logger: LoggingAdapter = Logging.getLogger(system, this) private final val delay: FiniteDuration = 2.seconds private final val retries: Int = 3 private def future(flag: Boolean) = if (flag) Future.successful(3) else Future.failed(new RuntimeException("Failure")) "retry function in FutureRetryUtility" must { "successfully returns whether, after retries, the future is eventually successful" in { Await.result(future(true).retry(delay, retries)(_ > 2), Duration.Inf) shouldBe 3 } "thrown an Exception whether, after retries, the future eventually returns an Exception" in { an[RuntimeException] shouldBe thrownBy(Await.result(future(false).retry(delay, retries)(_ => true), Duration.Inf)) } "consider the number of retries" in { val q = mutable.Queue(0) def future = { val nRetries = q.dequeue() if (nRetries < 2) { q.enqueue(nRetries + 1); Future.failed(new RuntimeException) } else { q.enqueue(nRetries + 1); Future.successful(nRetries) } } Await.result(future.retry(delay, retries)(_ > 2), Duration.Inf) shouldBe 3 } } "pipeTo function in FutureRetryUtility" must { "returns a successful future and send the content of it through pipe" in { val testProbe = TestProbe("actor-test") future(true).pipeTo(delay, retries, testProbe.testActor)() testProbe.expectMsg(3) } "return a failed future and send a status failure through pipe" in { val testProbe = TestProbe("actor-test") future(false).pipeTo(delay, retries, testProbe.testActor)() testProbe.expectMsgAllClassOf(classOf[Status.Failure]) } } }
Example 78
Source File: NsdbNodeEndpoint.scala From NSDb with Apache License 2.0 | 5 votes |
package io.radicalbit.nsdb.cluster import akka.actor.{ActorRef, ActorSystem} import akka.event.{Logging, LoggingAdapter} import com.typesafe.config.Config import io.radicalbit.nsdb.cluster.endpoint.GrpcEndpoint import io.radicalbit.nsdb.security.NsdbSecurity import io.radicalbit.nsdb.web.{BitSerializer, CustomSerializers, WebResources} import org.json4s.{DefaultFormats, Formats} class NsdbNodeEndpoint(readCoordinator: ActorRef, writeCoordinator: ActorRef, metadataCoordinator: ActorRef, publisher: ActorRef)(override implicit val system: ActorSystem) extends WebResources with NsdbSecurity { override val config: Config = system.settings.config override implicit val logger: LoggingAdapter = Logging.getLogger(system, this) new GrpcEndpoint(readCoordinator = readCoordinator, writeCoordinator = writeCoordinator, metadataCoordinator = metadataCoordinator) implicit val formats: Formats = DefaultFormats ++ CustomSerializers.customSerializers + BitSerializer initWebEndpoint(writeCoordinator, readCoordinator, metadataCoordinator, publisher) }
Example 79
Source File: ServerSettingsTemplate.scala From akka-http-circe-json-template with Apache License 2.0 | 5 votes |
package com.vitorsvieira.http.config import akka.actor.ActorSystem import akka.event.{ LogSource, Logging } import akka.stream.ActorMaterializer import com.typesafe.config.{ Config, ConfigFactory } import scala.concurrent.ExecutionContextExecutor trait ServerSettingsTemplate { lazy private val config: Config = ConfigFactory.load() private val httpConfig: Config = config.getConfig("http") val httpInterface: String = httpConfig.getString("interface") val httpPort: Int = httpConfig.getInt("port") implicit val actorSystem: ActorSystem = ActorSystem("akka-http-circe-json") implicit val materializer: ActorMaterializer = ActorMaterializer() implicit val executionContext: ExecutionContextExecutor = actorSystem.dispatcher private implicit val logSource: LogSource[ServerSettingsTemplate] = (t: ServerSettingsTemplate) ⇒ t.getClass.getSimpleName private def logger(implicit logSource: LogSource[_ <: ServerSettingsTemplate]) = Logging(actorSystem, this.getClass) implicit val log = logger } object ServerSettingsTemplate extends ServerSettingsTemplate
Example 80
Source File: TestNGRouteTest.scala From squbs with Apache License 2.0 | 5 votes |
package org.squbs.testkit.japi import akka.actor.ActorSystem import akka.event.Logging import akka.http.javadsl.model.HttpRequest import akka.http.javadsl.server.RouteResult import akka.http.javadsl.testkit.{RouteTest, TestRouteResult} import akka.stream.{ActorMaterializer, Materializer} import com.typesafe.config.{Config, ConfigFactory} import org.scalatest.testng.TestNGSuiteLike import org.testng.Assert import org.testng.annotations.{AfterClass, BeforeClass} import scala.concurrent.{Await, Future} import scala.concurrent.duration._ import scala.util.control.NonFatal trait TestNGRouteTestBase extends RouteTest with RouteDefinitionTest with TestNGSuiteLike { protected def systemResource: SystemResource implicit def system: ActorSystem = systemResource.system implicit def materializer: Materializer = systemResource.materializer override protected def createTestRouteResultAsync(request: HttpRequest, result: Future[RouteResult]): TestRouteResult = new TestRouteResult(result, awaitDuration)(system.dispatcher, materializer) { protected def assertEquals(expected: AnyRef, actual: AnyRef, message: String): Unit = reportDetails { Assert.assertEquals(actual, expected, message) } protected def assertEquals(expected: Int, actual: Int, message: String): Unit = Assert.assertEquals(actual, expected, message) protected def assertTrue(predicate: Boolean, message: String): Unit = Assert.assertTrue(predicate, message) protected def fail(message: String): Unit = { Assert.fail(message) throw new IllegalStateException("Assertion should have failed") } def reportDetails[T](block: ⇒ T): T = { try block catch { case NonFatal(t) ⇒ throw new AssertionError(t.getMessage + "\n" + " Request was: " + request + "\n" + " Route result was: " + result + "\n", t) } } } } abstract class TestNGRouteTest extends TestNGRouteTestBase { protected def additionalConfig: Config = ConfigFactory.empty() private[this] val _systemResource = new SystemResource(Logging.simpleName(getClass), additionalConfig) protected def systemResource: SystemResource = _systemResource @BeforeClass(alwaysRun=true) def setup(): Unit = { systemResource.before() } @AfterClass(alwaysRun=true) def teardown(): Unit = { systemResource.after() } } class SystemResource(name: String, additionalConfig: Config) { protected def config = additionalConfig.withFallback(ConfigFactory.load()) protected def createSystem(): ActorSystem = ActorSystem(name, config) protected def createMaterializer(system: ActorSystem): ActorMaterializer = ActorMaterializer()(system) implicit def system: ActorSystem = _system implicit def materializer: ActorMaterializer = _materializer private[this] var _system: ActorSystem = null private[this] var _materializer: ActorMaterializer = null def before(): Unit = { require((_system eq null) && (_materializer eq null)) _system = createSystem() _materializer = createMaterializer(_system) } def after(): Unit = { Await.result(_system.terminate(), 5.seconds) _system = null _materializer = null } }
Example 81
Source File: Deduplicate.scala From squbs with Apache License 2.0 | 5 votes |
package org.squbs.streams import akka.event.Logging import akka.stream.ActorAttributes.SupervisionStrategy import akka.stream._ import akka.stream.stage.{GraphStage, GraphStageLogic, InHandler, OutHandler} import scala.compat.java8.FunctionConverters._ import scala.util.control.NonFatal object Deduplicate { def apply[T, U](key: T => U, duplicateCount: Long) = new Deduplicate[T, U](key, duplicateCount, new java.util.HashMap[U, MutableLong]()) def apply[T, U](key: T => U, duplicateCount: Long, registry: java.util.Map[U, MutableLong]) = new Deduplicate[T, U](key, duplicateCount, registry) def apply[T](duplicateCount: Long = Long.MaxValue, registry: java.util.Map[T, MutableLong] = new java.util.HashMap[T, MutableLong]()): Deduplicate[T, T] = Deduplicate(t => t, duplicateCount, registry) } case class MutableLong(var value: Long = 0L) { def increment() = { value += 1 value } }
Example 82
Source File: DemandSupplyMetrics.scala From squbs with Apache License 2.0 | 5 votes |
package org.squbs.streams import akka.actor.ActorSystem import akka.event.Logging import akka.stream._ import akka.stream.scaladsl.Flow import akka.stream.stage._ import com.codahale.metrics.MetricRegistry import org.squbs.metrics.MetricsExtension object DemandSupplyMetrics { class DemandSupplyMetricsStage[T](name: String)(implicit system: ActorSystem) extends GraphStage[FlowShape[T, T]] { val domain = MetricsExtension(system).Domain val metrics = MetricsExtension(system).metrics val in = Inlet[T](Logging.simpleName(this) + ".in") val out = Outlet[T](Logging.simpleName(this) + ".out") override val shape = FlowShape.of(in, out) // naming convention "domain:key-property-list" val upstreamCounter = MetricRegistry.name(domain, s"$name-upstream-counter") val downstreamCounter = MetricRegistry.name(domain, s"$name-downstream-counter") override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) { setHandler(in, new InHandler { override def onPush(): Unit = { val elem = grab(in) metrics.meter(upstreamCounter).mark push(out, elem) } }) setHandler(out, new OutHandler { override def onPull(): Unit = { metrics.meter(downstreamCounter).mark pull(in) } }) } }
Example 83
Source File: Main.scala From cloudflow with Apache License 2.0 | 5 votes |
package cloudflow.installer import akka.actor._ import akka.event.Logging import akka.event.LoggingAdapter import akka.stream._ import skuber._ import skuber.api.Configuration import skuber.apiextensions._ import scala.concurrent._ import scala.concurrent.duration._ object Main { def main(args: Array[String]): Unit = { if (!ResourceDirectory.exists()) { println("The Cloudflow installer could not locate the resource directory.") System.exit(1) } implicit val system = ActorSystem() implicit val log: LoggingAdapter = Logging(system, "Cloudflow Installer") try { implicit val mat = createMaterializer() implicit val ec = system.dispatcher implicit val settings = Settings(system) Diagnostics.logStartOperatorMessage(settings) val client = connectToKubernetes() installCRD(client) HealthChecks.serve(settings) Operator.handleEvents(client) } catch { case t: Throwable ⇒ log.error(t, "Unexpected error starting Cloudflow install operator, terminating.") system.registerOnTermination(exitWithFailure) system.terminate() } } private def createMaterializer()(implicit system: ActorSystem) = { val decider: Supervision.Decider = _ ⇒ Supervision.Stop ActorMaterializer(ActorMaterializerSettings(system).withSupervisionStrategy(decider)) } private def exitWithFailure(): Unit = System.exit(-1) private def connectToKubernetes()(implicit system: ActorSystem, mat: Materializer, log: LoggingAdapter) = { val conf = Configuration.defaultK8sConfig val client = k8sInit(conf).usingNamespace("") log.info(s"Connected to Kubernetes cluster: ${conf.currentContext.cluster.server}") client } private def installCRD(client: skuber.api.client.KubernetesClient)(implicit ec: ExecutionContext): Unit = { val crdTimeout = 20.seconds // TODO check if version is the same, if not, also create. import CloudflowInstance._ Await.ready( client.getOption[CustomResourceDefinition](CRD.name).map { result ⇒ result.fold(client.create(CRD)) { crd ⇒ if (crd.spec.version != CRD.spec.version) client.create(CRD) else Future.successful(crd) } }, crdTimeout ) } }
Example 84
Source File: EventSpec.scala From cloudflow with Apache License 2.0 | 5 votes |
package cloudflow.installer import akka.actor.ActorSystem import akka.event.Logging import akka.event.LoggingAdapter import akka.stream.ActorMaterializer import akka.stream.scaladsl._ import org.scalatest._ import scala.concurrent.Await import scala.concurrent.duration._ class EventSpec extends WordSpec with MustMatchers with GivenWhenThen with EitherValues with Inspectors { implicit val system: ActorSystem = ActorSystem("TestSystem") implicit val mat: ActorMaterializer = ActorMaterializer() implicit val ec = mat.executionContext implicit val log: LoggingAdapter = Logging(system, "Cloudflow Installer") implicit val settings = Settings(system) "Cloudflow event" should { "transform an install event to a install action" in { val instance = TestInstance.get val clusterFeatures = ClusterFeatures() val installEvent = InstallEvent(instance, None, instance.metadata.namespace, clusterFeatures) val future = Source(List(installEvent)).via(CloudflowEvent.toAction).runWith(Sink.headOption) val result = Await.result(future, 3.seconds) result must not be empty result.get mustBe a[CompositeAction] val action = result.get.asInstanceOf[CompositeAction] action.childActions must have size 5 action.childActions(0) mustBe a[Strimzi] action.childActions(1) mustBe a[SparkOperator] action.childActions(2) mustBe a[FlinkOperator] action.childActions(3) mustBe a[CloudflowOperatorManagedStrimzi] action.childActions(4) mustBe a[PatchOwnerReferenceOfSparkMutatingWebhookConfig] } "transform an un-install event to an un-install action" in { val instance = TestInstance.get val clusterFeatures = ClusterFeatures() val installEvent = UninstallEvent(instance, None, instance.metadata.namespace, clusterFeatures) val future = Source(List(installEvent)).via(CloudflowEvent.toAction).runWith(Sink.headOption) val result = Await.result(future, 3.seconds) result must not be empty result.get mustBe a[CompositeAction] val action = result.get.asInstanceOf[CompositeAction] action.childActions must have size 2 action.childActions(0) mustBe a[RemoveCloudflowClusterwideResources] action.childActions(1) mustBe a[RemoveCloudflowNamespacedResources] } "verify that detected cluster features are present" in { val instance = TestInstance.get val clusterFeatures = ClusterFeatures(hasSecurityContextConstraints = true) val installEvent = InstallEvent(instance, None, instance.metadata.namespace, clusterFeatures) val future = Source(List(installEvent)).via(CloudflowEvent.toAction).runWith(Sink.headOption) val result = Await.result(future, 3.seconds) result must not be empty result.get mustBe a[CompositeAction] val action = result.get.asInstanceOf[CompositeAction] action.childActions must have size 6 action.childActions(0) mustBe a[Strimzi] action.childActions(1) mustBe a[SparkOperator] action.childActions(2) mustBe a[FlinkOperator] action.childActions(3) mustBe a[AddSccToSparkServiceAccount] action.childActions(4) mustBe a[CloudflowOperatorManagedStrimzi] action.childActions(5) mustBe a[PatchOwnerReferenceOfSparkMutatingWebhookConfig] } } "transform an pre-requisite failure event to an no operation action" in { val instance = TestInstance.get val failures = List(CloudflowInstance.ValidationFailure("The cluster does not have a storage class named 'test'")) val installEvent = PreRequisiteFailed(instance, failures) val future = Source(List(installEvent)).via(CloudflowEvent.toAction).runWith(Sink.headOption) val result = Await.result(future, 3.seconds) result must not be empty result.get mustBe a[UpdateCRStatusAction] val action = result.get.asInstanceOf[UpdateCRStatusAction] val caught = Await.result(action.execute(), 3.seconds) caught.stdErr mustBe Some("The cluster does not have a storage class named 'test'") } }
Example 85
Source File: AkkaBoss.scala From akka-kubernetes-tests with Apache License 2.0 | 5 votes |
package akka.kubernetes.sample import akka.actor.Actor import akka.cluster.Cluster import akka.event.Logging import akka.kubernetes.sample.AkkaBoss.{GoToJobCentre, JobSpec, WhatCanIDo} object AkkaBoss { case class WhatCanIDo(name: String) case class JobSpec(roles: Set[String]) case class GoToJobCentre(name: String) } class AkkaBoss(name: String) extends Actor { val log = Logging(this) val cluster = Cluster(context.system) log.info("The boss is up and running on Cluster Node [{}]", cluster.selfMember) val teamMembers = Map( "virtualvoid" -> Set("Any and everything"), "johan" -> Set("radiation", "streams"), "raboof" -> Set("Buy a house", "HTTP"), "ktoso" -> Set("Stop fruiting and come home"), "helena" -> Set("Stopped fruiting and came home", "Drink complexi tea"), "chbatey" -> Set("Anything but Cassandra", "Drink coffee") ) override def receive: Receive = { case "hello" => log.info("Boss to say hello to [{}]", sender()) sender() ! s"Hello from $name" case WhatCanIDo(n) => teamMembers.get(n) match { case Some(roles) => sender() ! JobSpec(roles) case None => sender() ! GoToJobCentre(n) } case msg => log.info("Boss just says hello, what is this? [{}]", msg) } }
Example 86
Source File: AkkaMember.scala From akka-kubernetes-tests with Apache License 2.0 | 5 votes |
package akka.kubernetes.sample import akka.actor.{Actor, Stash} import akka.cluster.sharding.ShardRegion import akka.cluster.singleton.{ClusterSingletonProxy, ClusterSingletonProxySettings} import akka.event.Logging import akka.kubernetes.sample.AkkaBoss.{GoToJobCentre, JobSpec, WhatCanIDo} import akka.kubernetes.sample.AkkaMember.Hello case object AkkaMember { case class Hello(name: String) val extractEntityId: ShardRegion.ExtractEntityId = { case msg @ Hello(id) ⇒ (id, msg) } val numberOfShards = 100 val extractShardId: ShardRegion.ExtractShardId = { case Hello(id) ⇒ math.abs(id.hashCode % numberOfShards).toString } } class AkkaMember() extends Actor with Stash { val bossProxy = context.system.actorOf( ClusterSingletonProxy.props(singletonManagerPath = "/user/boss", settings = ClusterSingletonProxySettings(context.system)) ) val name = self.path.name val log = Logging(this) override def preStart(): Unit = { // TODO retry log.info("/me good morning {}", name) bossProxy ! WhatCanIDo(name) } override def receive: Receive = { case JobSpec(roles) => log.info("I'm part of the team. I can do {}", roles) unstashAll() context.become(ready(roles)) case GoToJobCentre(n) => log.info("Seems I am not in the team :( I'll go fruit picking") context.stop(self) case _ => stash() } def ready(roles: Set[String]): Receive = { case Hello(_) => sender() ! s"hello from $name. What can I do for you? It better be in ${roles.mkString(", ")}" case _ => sender() ! "what?" } }
Example 87
Source File: StatsEndpoint.scala From akka-kubernetes-tests with Apache License 2.0 | 5 votes |
package akka.kubernetes.soak import akka.actor.{ActorRef, ActorSystem} import akka.event.Logging import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport import akka.http.scaladsl.model.StatusCodes import akka.http.scaladsl.server.{Directives, Route} import spray.json.DefaultJsonProtocol import akka.pattern.ask import akka.util.Timeout import scala.concurrent.duration._ import scala.util.{Failure, Success} // collect your json format instances into a support trait: trait StatsJsonSupport extends SprayJsonSupport with DefaultJsonProtocol { implicit val testResultFormat = jsonFormat2(TestResult) implicit val testResultsFormat = jsonFormat7(TestResults) } class StatsEndpoint(system: ActorSystem, client: ActorRef) extends Directives with StatsJsonSupport { private implicit val askTimeout = Timeout(5.seconds) private val log = Logging(system, getClass) val route: Route = path("stats") { get { onComplete(client.ask(GetTestResults()).mapTo[TestResults]) { case Failure(t) => log.error(t, "Failed to get test results") complete(StatusCodes.InternalServerError) case Success(value) => complete(value) } } } }
Example 88
Source File: ClusterSoakSpec.scala From akka-kubernetes-tests with Apache License 2.0 | 5 votes |
package akka.cluster.soak import akka.actor.ActorSystem import akka.discovery.ServiceDiscovery.Resolved import akka.event.Logging import akka.http.scaladsl.Http import akka.http.scaladsl.model.{HttpRequest, StatusCodes} import akka.http.scaladsl.unmarshalling.Unmarshal import akka.kubernetes.soak.Tests.{ResponseTimeNanos, Target} import akka.kubernetes.soak.{StatsJsonSupport, TestResults} import akka.stream.ActorMaterializer import akka.stream.scaladsl.{Sink, Source} import org.scalatest.concurrent.ScalaFutures import org.scalatest.time.{Seconds, Span} import org.scalatest.{Matchers, WordSpec} import akka.util.PrettyDuration._ import scala.collection.immutable import scala.concurrent.Future import scala.concurrent.duration._ class ClusterSoakSpec(endpoints: Resolved)(implicit system: ActorSystem) extends WordSpec with StatsJsonSupport with ScalaFutures with Matchers { import system.dispatcher implicit val mat = ActorMaterializer() implicit override val patienceConfig = PatienceConfig(timeout = Span(30, Seconds), interval = Span(2, Seconds)) val log = Logging(system, getClass) "The Clustered service" should { "not have had any failures" in { val responses: immutable.Seq[TestResults] = Source(endpoints.addresses) .mapAsyncUnordered(10) { rt => log.info("Hitting {}", rt.host) val request = HttpRequest(uri = s"http://${rt.host}:${rt.port.getOrElse(8080)}/stats") for { response <- Http().singleRequest(request) entity <- response.entity.toStrict(1.second) results <- response.status match { case StatusCodes.OK => Unmarshal(entity).to[TestResults] case unexpected => Future.failed( new RuntimeException(s"Unexpected response code: $unexpected body: ${entity.data.utf8String}") ) } } yield results } .runWith(Sink.seq) .futureValue log.info("{} nodes tested", responses.size) val maxJoinTimes = responses.map(_.joiningTime).sorted.reverse.take(5).map(_.nanos.pretty) log.info("Max join times: {}", maxJoinTimes) val maxResponseTimePerNode: immutable.Seq[(Target, ResponseTimeNanos)] = responses.map(_.lastResult.responses.maxBy(_._2)) val averageResponseTimesPerNode = responses .map((eachNode: TestResults) => { val total = eachNode.lastResult.responses.map(_._2).sum.nanos val count = eachNode.lastResult.responses.size total / count }) .sorted .reverse log.info("All response times: {}", responses) log.info("Slowest response times across all node pings: {}", maxResponseTimePerNode.sortBy(_._2).reverse.take(5).map(_._2.nanos.pretty)) log.info("Slowest average response times across all node pings: {}", averageResponseTimesPerNode.take(5).map(_.pretty)) responses.filter(_.testsFailed != 0) shouldEqual Nil withClue("Response took longer than 2 seconds. Do some investigation") { responses.filter(_.lastResult.responses.exists(_._2.nanos > 2.seconds)) shouldEqual Nil } withClue("Found unreachable events") { responses.filter(_.memberUnreachableEvents != 0) shouldEqual Nil } withClue("Found downed events") { responses.filter(_.memberDownedEvents != 0) shouldEqual Nil } } } }
Example 89
Source File: BankAccountEventJSONSerializer.scala From akka-ddd-cqrs-es-example with MIT License | 5 votes |
package com.github.j5ik2o.bank.adaptor.serialization import akka.actor.ExtendedActorSystem import akka.event.{ Logging, LoggingAdapter } import akka.serialization.SerializerWithStringManifest import com.github.j5ik2o.bank.domain.model._ import org.slf4j.LoggerFactory import pureconfig._ object BankAccountEventJSONManifest { final val CREATE = BankAccountOpened.getClass.getName.stripSuffix("$") final val UPDATE = BankAccountEventUpdated.getClass.getName.stripSuffix("$") final val DEPOSIT = BankAccountDeposited.getClass.getName.stripSuffix("$") final val WITHDRAW = BankAccountWithdrawn.getClass.getName.stripSuffix("$") final val DESTROY = BankAccountClosed.getClass.getName.stripSuffix("$") } class BankAccountEventJSONSerializer(system: ExtendedActorSystem) extends SerializerWithStringManifest { import BankAccountCreatedJson._ import BankAccountEventJSONManifest._ import io.circe.generic.auto._ private val logger = LoggerFactory.getLogger(getClass) private val config = loadConfigOrThrow[BankAccountEventJSONSerializerConfig]( system.settings.config.getConfig("bank.interface.bank-account-event-json-serializer") ) private implicit val log: LoggingAdapter = Logging.getLogger(system, getClass) private val isDebugEnabled = config.isDebuged override def identifier: Int = 50 override def manifest(o: AnyRef): String = { val result = o.getClass.getName logger.debug(s"manifest: $result") result } override def toBinary(o: AnyRef): Array[Byte] = o match { case orig: BankAccountOpened => CirceJsonSerialization.toBinary(orig, isDebugEnabled) case orig: BankAccountEventUpdated => CirceJsonSerialization.toBinary(orig, isDebugEnabled) case orig: BankAccountDeposited => CirceJsonSerialization.toBinary(orig, isDebugEnabled) case orig: BankAccountWithdrawn => CirceJsonSerialization.toBinary(orig, isDebugEnabled) case orig: BankAccountClosed => CirceJsonSerialization.toBinary(orig, isDebugEnabled) } override def fromBinary(bytes: Array[Byte], manifest: String): AnyRef = { logger.debug(s"fromBinary: $manifest") manifest match { case CREATE => CirceJsonSerialization.fromBinary[BankAccountOpened, BankAccountCreatedJson](bytes, isDebugEnabled) case UPDATE => CirceJsonSerialization.fromBinary[BankAccountEventUpdated, BankAccountUpdatedJson](bytes, isDebugEnabled) case DEPOSIT => CirceJsonSerialization.fromBinary[BankAccountDeposited, BankAccountDepositedJson](bytes, isDebugEnabled) case WITHDRAW => CirceJsonSerialization.fromBinary[BankAccountWithdrawn, BankAccountWithdrawedJson](bytes, isDebugEnabled) case DESTROY => CirceJsonSerialization.fromBinary[BankAccountClosed, BankAccountDestroyedJson](bytes, isDebugEnabled) } } }
Example 90
Source File: RouteLogging.scala From akka-ddd-cqrs-es-example with MIT License | 5 votes |
package com.github.j5ik2o.bank.adaptor.controller import akka.event.Logging import akka.http.scaladsl.model.headers.{ Authorization, BasicHttpCredentials } import akka.http.scaladsl.model.{ HttpRequest, HttpResponse } import akka.http.scaladsl.server.{ Directive0, RouteResult } import akka.http.scaladsl.server.directives.{ DebuggingDirectives, LogEntry } trait RequestFormatter { def formatRequest(request: HttpRequest): LogEntry } trait RequestResultFormatter { def formatRequestResponse(request: HttpRequest): RouteResult => Option[LogEntry] } object DefaultRequestLoggingFormatter { implicit object requestFormatter extends RequestFormatter { override def formatRequest(request: HttpRequest): LogEntry = { LogEntry(s"${formatRequestToString(request)}/", Logging.InfoLevel) } } implicit object requestResultFormatter extends RequestResultFormatter { override def formatRequestResponse(request: HttpRequest): (RouteResult) => Option[LogEntry] = { case RouteResult.Complete(response) => val req = formatRequestToString(request) val res = formatResponseToString(response) Some(LogEntry(s"$req/$res", Logging.InfoLevel)) case _ => val req = formatRequestToString(request) Some(LogEntry(req, Logging.InfoLevel)) } } private def formatRequestToString(request: HttpRequest): String = { val protocol = request.protocol.value val method = request.method.name() val path = request.uri.toString() val headers = request.headers .collect { case Authorization(_: BasicHttpCredentials) => "authorization:Basic ***" case Authorization(_) => "authorization:***" case h => s"'${h.lowercaseName()}':'${h.value()}'" } .mkString(", ") s"$protocol $method $path [$headers]" } private def formatResponseToString(request: HttpResponse): String = { val status = request.status.value val headers = request.headers.map(h => s"'${h.lowercaseName()}':'${h.value()}'").mkString(", ") s"$status [$headers]" } } class RouteLogging()(implicit requestFormatter: RequestFormatter, requestResultFormatter: RequestResultFormatter) { val httpLogRequest: Directive0 = DebuggingDirectives.logRequest(requestFormatter.formatRequest _) val httpLogRequestResult: Directive0 = DebuggingDirectives.logRequestResult(requestResultFormatter.formatRequestResponse _) } object RouteLogging { def apply()(implicit requestFormatter: RequestFormatter, requestResultFormatter: RequestResultFormatter): RouteLogging = new RouteLogging() val default: RouteLogging = new RouteLogging()(DefaultRequestLoggingFormatter.requestFormatter, DefaultRequestLoggingFormatter.requestResultFormatter) }
Example 91
Source File: LoggingReporter.scala From korolev with Apache License 2.0 | 5 votes |
package korolev.akka.util import akka.actor.ActorSystem import akka.event.{LogSource, Logging} import korolev.effect.Reporter final class LoggingReporter(actorSystem: ActorSystem) extends Reporter { private implicit val logSource: LogSource[LoggingReporter] = new LogSource[LoggingReporter] { def genString(t: LoggingReporter): String = "korolev" } private val log = Logging(actorSystem, this) def error(message: String, cause: Throwable): Unit = log.error(cause, message) def error(message: String): Unit = log.error(message) def warning(message: String, cause: Throwable): Unit = log.warning(s"$message: {}", cause) def warning(message: String): Unit = log.warning(message) def info(message: String): Unit = log.info(message) }
Example 92
Source File: Status.scala From nexus-kg with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.kg.routes import akka.Done import akka.actor.ActorSystem import akka.cluster.{Cluster, MemberStatus} import akka.event.Logging import akka.persistence.cassandra.CassandraPluginConfig import akka.persistence.cassandra.session.scaladsl.CassandraSession import ch.epfl.bluebrain.nexus.kg.config.AppConfig.PersistenceConfig import monix.eval.Task import scala.concurrent.Future sealed trait Status { def check: Task[Boolean] } object Status { class CassandraStatus(implicit as: ActorSystem, persistence: PersistenceConfig) extends Status { implicit val ec = as.dispatcher private val log = Logging(as, "CassandraHeathCheck") private val config = new CassandraPluginConfig(as, as.settings.config.getConfig(persistence.journalPlugin)) private val (p, s) = (config.sessionProvider, config.sessionSettings) private val session = new CassandraSession(as, p, s, ec, log, "health", _ => Future.successful(Done.done())) private val query = s"SELECT now() FROM ${config.keyspace}.messages;" override def check: Task[Boolean] = Task.deferFuture(session.selectOne(query).map(_ => true).recover { case err => log.error("Error while attempting to query for health check", err) false }) } class ClusterStatus(cluster: Cluster) extends Status { override def check: Task[Boolean] = Task.pure( !cluster.isTerminated && cluster.state.leader.isDefined && cluster.state.members.nonEmpty && !cluster.state.members.exists(_.status != MemberStatus.Up) && cluster.state.unreachable.isEmpty ) } }
Example 93
Source File: akkaStreams.scala From sangria-akka-streams with Apache License 2.0 | 5 votes |
package sangria.streaming import scala.language.higherKinds import akka.NotUsed import akka.event.Logging import akka.stream.ActorAttributes.SupervisionStrategy import akka.stream._ import akka.stream.scaladsl.{Merge, Sink, Source} import akka.stream.stage.{GraphStage, GraphStageLogic, InHandler, OutHandler} import scala.concurrent.Future object akkaStreams { type AkkaSource[+T] = Source[T, NotUsed] abstract class SimpleLinearGraphStage[T] extends GraphStage[FlowShape[T, T]] { val in = Inlet[T](Logging.simpleName(this) + ".in") val out = Outlet[T](Logging.simpleName(this) + ".out") override val shape = FlowShape(in, out) } class AkkaStreamsSubscriptionStream(implicit materializer: Materializer) extends SubscriptionStream[AkkaSource] { def supported[T[_]](other: SubscriptionStream[T]) = other.isInstanceOf[AkkaStreamsSubscriptionStream] def map[A, B](source: AkkaSource[A])(fn: A => B) = source.map(fn) def singleFuture[T](value: Future[T]) = Source.fromFuture(value) def single[T](value: T) = Source.single(value) def mapFuture[A, B](source: AkkaSource[A])(fn: A => Future[B]) = source.mapAsync(1)(fn) def first[T](s: AkkaSource[T]) = s.runWith(Sink.head) def failed[T](e: Throwable) = Source.failed(e).asInstanceOf[AkkaSource[T]] def onComplete[Ctx, Res](result: AkkaSource[Res])(op: => Unit) = result .via(OnComplete(() => op)) .recover {case e => op; throw e} .asInstanceOf[AkkaSource[Res]] def flatMapFuture[Ctx, Res, T](future: Future[T])(resultFn: T => AkkaSource[Res]) = Source.fromFuture(future).flatMapMerge(1, resultFn) def merge[T](streams: Vector[AkkaSource[T]]) = { if (streams.size > 1) Source.combine(streams(0), streams(1), streams.drop(2): _*)(Merge(_)) else if (streams.nonEmpty) streams.head else throw new IllegalStateException("No streams produced!") } def recover[T](stream: AkkaSource[T])(fn: Throwable => T) = stream recover {case e => fn(e)} } implicit def akkaSubscriptionStream(implicit materializer: Materializer): SubscriptionStream[AkkaSource] = new AkkaStreamsSubscriptionStream implicit def akkaStreamIsValidSubscriptionStream[A[_, _], Ctx, Res, Out](implicit materializer: Materializer, ev1: ValidOutStreamType[Res, Out]): SubscriptionStreamLike[Source[A[Ctx, Res], NotUsed], A, Ctx, Res, Out] = new SubscriptionStreamLike[Source[A[Ctx, Res], NotUsed], A, Ctx, Res, Out] { type StreamSource[X] = AkkaSource[X] val subscriptionStream = new AkkaStreamsSubscriptionStream } private final case class OnComplete[T](op: () => Unit) extends SimpleLinearGraphStage[T] { override def toString: String = "OnComplete" override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with OutHandler with InHandler { def decider = inheritedAttributes.get[SupervisionStrategy].map(_.decider).getOrElse(Supervision.stoppingDecider) override def onPush(): Unit = { push(out, grab(in)) } override def onPull(): Unit = pull(in) override def onDownstreamFinish() = { op() super.onDownstreamFinish() } override def onUpstreamFinish() = { op() super.onUpstreamFinish() } setHandlers(in, out, this) } } }
Example 94
Source File: Boot.scala From akka-http-rest-api with MIT License | 5 votes |
package core import _root_.authentication.{UserAuthService, UserAuthRepository} import akka.actor.ActorSystem import akka.event.Logging import akka.http.scaladsl.Http import akka.stream.ActorMaterializer import com.typesafe.config.ConfigFactory import core.config.{RedisConfig, DatabaseConfig, ServerConfig} import redis.RedisClient import slick.driver.PostgresDriver.api._ import token.TokenRepository import user.{UserRepository, UserService} object Boot extends App with ApiRouter with ServerConfig with DatabaseConfig with RedisConfig { override val config = ConfigFactory.load() override implicit val actorSystem = ActorSystem("rest-api-app") override implicit val executor = actorSystem.dispatcher override implicit val materializer = ActorMaterializer() override val logger = Logging(actorSystem, getClass) override implicit val redis = RedisClient(host = redisAuthHost, port = redisAuthPort, password = Option(redisAuthPassword), db = Option(redisAuthDb)) implicit val db = Database.forURL(url = dbUrl, user = dbUser, password = dbPassword, driver = "org.postgresql.Driver") lazy val tokenRepo = new TokenRepository lazy val userAuthRepo = new UserAuthRepository lazy val userRepo = new UserRepository override lazy val userAuthService = new UserAuthService(tokenRepo, userAuthRepo, userRepo) override lazy val userService = new UserService(tokenRepo, userAuthRepo, userRepo) Http().bindAndHandle(apiRoutes, serverInterface, serverPort) }
Example 95
Source File: TestSpec.scala From reactive-programming with Apache License 2.0 | 5 votes |
package com.test import java.io.IOException import java.util.UUID import akka.actor.{ ActorRef, ActorSystem, PoisonPill } import akka.event.{ Logging, LoggingAdapter } import akka.testkit.TestProbe import akka.util.Timeout import org.scalatest.concurrent.{ Eventually, ScalaFutures } import org.scalatest.exceptions.TestFailedException import org.scalatest._ import rx.lang.scala._ import scala.concurrent.duration._ import scala.concurrent.{ ExecutionContextExecutor, Future } import scala.util.{ Random ⇒ Rnd, Try } object Random { def apply(): Rnd = new Rnd() } trait TestSpec extends FlatSpec with Matchers with ScalaFutures with TryValues with OptionValues with Eventually with BeforeAndAfterAll { implicit val system: ActorSystem = ActorSystem("test") implicit val ec: ExecutionContextExecutor = system.dispatcher val log: LoggingAdapter = Logging(system, this.getClass) implicit val pc: PatienceConfig = PatienceConfig(timeout = 50.seconds) implicit val timeout = Timeout(50.seconds) override protected def afterAll(): Unit = { system.terminate() } def cleanup(actors: ActorRef*): Unit = { actors.foreach { (actor: ActorRef) ⇒ actor ! PoisonPill probe watch actor } } implicit class PimpedByteArray(self: Array[Byte]) { def getString: String = new String(self) } implicit class PimpedFuture[T](self: Future[T]) { def toTry: Try[T] = Try(self.futureValue) } implicit class PimpedObservable[T](self: Observable[T]) { def waitFor: Unit = { self.toBlocking.toIterable.last } } implicit class MustBeWord[T](self: T) { def mustBe(pf: PartialFunction[T, Unit]): Unit = if (!pf.isDefinedAt(self)) throw new TestFailedException("Unexpected: " + self, 0) } object Socket { def apply() = new Socket } class Socket { def readFromMemory: Future[Array[Byte]] = Future { Thread.sleep(100) // sleep 100 millis "fromMemory".getBytes } def send(payload: Array[Byte], from: String, failed: Boolean): Future[Array[Byte]] = if (failed) Future.failed(new IOException(s"Network error: $from")) else { Future { Thread.sleep(250) // sleep 250 millis, not real life time, but hey s"${payload.getString}->$from".getBytes } } def sendToEurope(payload: Array[Byte], failed: Boolean = false): Future[Array[Byte]] = send(payload, "fromEurope", failed) def sendToUsa(payload: Array[Byte], failed: Boolean = false): Future[Array[Byte]] = send(payload, "fromUsa", failed) } }
Example 96
Source File: RestPi.scala From apache-spark-test with Apache License 2.0 | 5 votes |
package com.github.dnvriend import akka.actor.ActorSystem import akka.event.{ Logging, LoggingAdapter } import akka.http.scaladsl._ import akka.http.scaladsl.common.{ EntityStreamingSupport, JsonEntityStreamingSupport } import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport import akka.http.scaladsl.server.{ Directives, Route } import akka.stream.scaladsl.{ Flow, Source } import akka.stream.{ ActorMaterializer, Materializer } import akka.util.ByteString import com.github.dnvriend.spark.CalculatePi import org.apache.spark.SparkContext import org.apache.spark.sql.SparkSession import spray.json.DefaultJsonProtocol import scala.concurrent.{ ExecutionContext, Future } object RestPi extends App with Directives with SprayJsonSupport with DefaultJsonProtocol { implicit val system: ActorSystem = ActorSystem() implicit val mat: Materializer = ActorMaterializer() implicit val ec: ExecutionContext = system.dispatcher implicit val log: LoggingAdapter = Logging(system, this.getClass) val spark = SparkSession.builder() .config("spark.sql.warehouse.dir", "file:/tmp/spark-warehouse") .config("spark.scheduler.mode", "FAIR") .config("spark.sql.crossJoin.enabled", "true") .master("local") // use as many threads as cores .appName("RestPi") // The appName parameter is a name for your application to show on the cluster UI. .getOrCreate() final case class Pi(pi: Double) implicit val piJsonFormat = jsonFormat1(Pi) val start = ByteString.empty val sep = ByteString("\n") val end = ByteString.empty implicit val jsonStreamingSupport: JsonEntityStreamingSupport = EntityStreamingSupport.json() .withFramingRenderer(Flow[ByteString].intersperse(start, sep, end)) .withParallelMarshalling(parallelism = 8, unordered = true) def sparkContext: SparkContext = spark.newSession().sparkContext def calculatePi(num: Long = 1000000, slices: Int = 2): Future[Double] = Future(CalculatePi(sparkContext, num, slices)).map(count => slices.toDouble * count / (num - 1)) val route: Route = pathEndOrSingleSlash { complete(calculatePi().map(Pi)) } ~ path("pi" / LongNumber / IntNumber) { (num, slices) => complete(calculatePi(num, slices).map(Pi)) } ~ path("stream" / "pi" / LongNumber) { num => complete(Source.fromFuture(calculatePi()).map(Pi) .flatMapConcat(Source.repeat).take(num)) } Http().bindAndHandle(route, "0.0.0.0", 8008) sys.addShutdownHook { spark.stop() system.terminate() } }
Example 97
Source File: CreateZipcodesSpark.scala From apache-spark-test with Apache License 2.0 | 5 votes |
package com.github.dnvriend import akka.actor.ActorSystem import akka.event.{ Logging, LoggingAdapter } import akka.stream.{ ActorMaterializer, Materializer } import org.apache.spark.sql.{ SaveMode, SparkSession } import scala.concurrent.ExecutionContext object CreateZipcodesSpark extends App { implicit val system: ActorSystem = ActorSystem() implicit val mat: Materializer = ActorMaterializer() implicit val ec: ExecutionContext = system.dispatcher implicit val log: LoggingAdapter = Logging(system, this.getClass) val spark = SparkSession.builder() .config("spark.sql.warehouse.dir", "file:/tmp/spark-warehouse") .config("spark.cores.max", "4") .config("spark.scheduler.mode", "FAIR") .config("spark.sql.crossJoin.enabled", "true") .master("local[*]") // use as many threads as cores .appName("CreateZipcodesSpark").getOrCreate() import spark.implicits._ // define an RDD for the district range val districts = spark.sparkContext.parallelize(1000 to 9000).map(_.toString).toDS // create temp view districts.createOrReplaceTempView("districts") // define an RDD with a range for the letters val l1 = spark.sparkContext.parallelize('A' to 'Z').map(_.toString).toDS l1.createOrReplaceTempView("l1") // join the letters val letters = spark.sql("SELECT concat(a.value, b.value) letters from l1 a join l1 b") // define temp view letters.createOrReplaceTempView("letters") // define an RDD for the houses val houses = spark.sparkContext.makeRDD(1 to 399).toDS // create temp view houses.createOrReplaceTempView("houses") // join letters and houses val lettersWithHouseNr = spark.sql( """ |SELECT CONCAT(letters, '-', nr) letterswithhousenr FROM letters |JOIN |(SELECT format_string("%03d", value) nr FROM houses) """.stripMargin ) // create temp view lettersWithHouseNr.createOrReplaceTempView("lwh") // join the districts with the house numbers val tickets = spark.sql("SELECT concat(value, letterswithhousenr) value FROM districts JOIN lwh LIMIT 5000000") tickets.write.mode(SaveMode.Overwrite).parquet("/tmp/tickets_spark.parquet") shutdown def shutdown: Unit = { spark.stop() system.terminate() } sys.addShutdownHook(shutdown) }
Example 98
Source File: HelloWorld.scala From apache-spark-test with Apache License 2.0 | 5 votes |
package com.github.dnvriend import akka.actor.ActorSystem import akka.event.{ Logging, LoggingAdapter } import akka.stream.{ ActorMaterializer, Materializer } import com.github.dnvriend.spark.CalculatePi import org.apache.spark.sql.SparkSession import scala.concurrent.{ ExecutionContext, Future } object HelloWorld extends App { implicit val system: ActorSystem = ActorSystem() implicit val mat: Materializer = ActorMaterializer() implicit val ec: ExecutionContext = system.dispatcher implicit val log: LoggingAdapter = Logging(system, this.getClass) val n = 10000000 // The first thing a Spark program must do is to create a SparkSession object, // which tells Spark how to access a cluster, or to run in local mode val spark = SparkSession.builder() .config("spark.sql.warehouse.dir", "file:/tmp/spark-warehouse") .config("spark.scheduler.mode", "FAIR") .config("spark.sql.crossJoin.enabled", "true") .master("local[*]") // use as many threads as cores .appName("Hello World") // The appName parameter is a name for your application to show on the cluster UI. .getOrCreate() for { count <- Future(CalculatePi(spark.sparkContext, n)) _ <- system.terminate() } yield { val pi = 2.0 * count / (n - 1) println(s"Hello World, Pi = $pi") spark.stop() } sys.addShutdownHook { spark.stop() system.terminate() } }
Example 99
Source File: Worker.scala From scala-tutorials with MIT License | 5 votes |
package com.baeldung.hello.akka import akka.actor.{Actor, Props} import akka.event.Logging import com.lightbend.lagom.scaladsl.pubsub.{PubSubRegistry, TopicId} object Worker { def props(pubSub: PubSubRegistry) = Props(new Worker(pubSub)) } class Worker(pubSub: PubSubRegistry) extends Actor { private val log = Logging.getLogger(context.system, this) override def receive = { case job@Job(id, task, payload) => log.info("Working on job: {}", job) sender ! JobAccepted(id) val topic = pubSub.refFor(TopicId[JobStatus]("job-status")) topic.publish(JobStatus(job.jobId, "started")) // perform the work... topic.publish(JobStatus(job.jobId, "completed")) } }
Example 100
Source File: Web.scala From full-scala-stack with Apache License 2.0 | 5 votes |
package web import akka.event.{ Logging, LoggingAdapter } import akka.http.scaladsl.Http import akka.stream.scaladsl._ import api.{ Api, Config } import core.{ Core, CoreActors } import scala.concurrent.{ ExecutionContext, Future } import scala.util.control.NonFatal // $COVERAGE-OFF$ This is actual code that we can't test, so we shouldn't report on it trait Web extends Config { this: Api with CoreActors with Core => val log: LoggingAdapter = Logging.getLogger(actorSystem, this) val serverSource: Source[Http.IncomingConnection, Future[Http.ServerBinding]] = { implicit def executionContext: ExecutionContext = actorSystem.dispatcher val host = config.getString("full-scala-stack.host") val port = config.getInt("full-scala-stack.port") Http() .bind(interface = host, port = port) .mapMaterializedValue { bind => bind.foreach { server => log.info(server.localAddress.toString) } bind } } val bindingFuture: Future[Http.ServerBinding] = serverSource .to(Sink.foreach { connection => // foreach materializes the source log.debug("Accepted new connection from " + connection.remoteAddress) // ... and then actually handle the connection try { connection.flow.joinMat(routes)(Keep.both).run() () } catch { case NonFatal(e) => log.error(e, "Could not materialize handling flow for {}", connection) throw e } }) .run() } // $COVERAGE-ON$
Example 101
Source File: ParallelWork.scala From hyperspark with Apache License 2.0 | 5 votes |
package pfsp.parallel; import it.polimi.hyperh.solution.Solution import it.polimi.hyperh.solution.EvaluatedSolution import pfsp.problem.PfsProblem import scala.util.Random import akka.actor.Actor import akka.actor.Props import akka.event.Logging import akka.actor.ActorRef import akka.actor.ActorSystem import akka.routing.RoundRobinPool import pfsp.solution.PfsSolution import pfsp.solution.PfsEvaluatedSolution object ParallelWork extends App { override def main(args: Array[String]) { def calculate(p:PfsProblem, evOldSolution:PfsEvaluatedSolution, nrOfWorkers: Int, sizeOfNeighbourhood: Int) { // Create an Akka system val system = ActorSystem("ParallelSystem") // create the result listener, which will print the result and // shutdown the system val listener = system.actorOf(Props[Listener], name = "listener") // create the master val master = system.actorOf(Props(new Master(p, evOldSolution, nrOfWorkers, sizeOfNeighbourhood, listener)), name = "master") // start the calculation master ! Calculate } val p = PfsProblem.fromResources("inst_ta001.txt") val permutationList = Random.shuffle(p.jobs.toList) val oldSolution = PfsSolution(permutationList) var evOldSolution = p.evaluate(oldSolution).asInstanceOf[PfsEvaluatedSolution] calculate(p, evOldSolution, 7, 300) } case object Calculate case class Work(p: PfsProblem, solution: PfsSolution, initEndTimesMatrix: Array[Array[Int]]) case class SingleResult(evSolution: EvaluatedSolution) case class FinalResult(evSolution: EvaluatedSolution, startMillis: Long) } class Worker extends Actor { import ParallelWork._ def receive = { case Work(p, solution, initEndTimesMatrix) => val evSolution = p.evaluatePartialSolution(solution.permutation) sender ! SingleResult(evSolution) } } class Listener extends Actor { import ParallelWork._ override def receive = { case FinalResult(evSolution, duration) => println("bestSolution: " + evSolution + " millis: " + duration) context.system.shutdown() } } class Master(p: PfsProblem, evOldSolution: PfsEvaluatedSolution, nrOfWorkers: Int, sizeOfNeighbourhood: Int, listener: ActorRef) extends Actor { import ParallelWork._ var nrOfResults: Int = 0 val startMillis: Long = System.currentTimeMillis val initEndTimesMatrix = p.jobsInitialTimes() var bestSolution: EvaluatedSolution = evOldSolution val workerRouter = context.actorOf( Props[Worker].withRouter(RoundRobinPool(nrOfWorkers)), name = "workerRouter") override def receive = { case Calculate => for (i <- 0 until sizeOfNeighbourhood) workerRouter ! Work(p, PfsSolution(Random.shuffle(p.jobs.toList)), initEndTimesMatrix) case SingleResult(evNewSolution) => nrOfResults += 1 bestSolution = List(evNewSolution, bestSolution).min if (nrOfResults == sizeOfNeighbourhood) { // Send the result to the listener listener ! FinalResult(bestSolution, System.currentTimeMillis - startMillis) // Stops this actor and all its supervised children context.stop(self) } } }
Example 102
Source File: PluginCollection.scala From sumobot with Apache License 2.0 | 5 votes |
package com.sumologic.sumobot.plugins import akka.actor.{ActorSystem, Props} import akka.event.Logging import com.sumologic.sumobot.core.Bootstrap import com.typesafe.config.ConfigException import scala.util.{Failure, Success, Try} trait PluginCollection { protected def addPlugin(name: String, props: Props)(implicit system: ActorSystem): Unit = { lazy val log = Logging.getLogger(system, this) val property = s"plugins.$name.enabled" Try(system.settings.config.getBoolean(property)) match { case Success(true) => system.actorOf(props, name) case Success(false) => log.debug(s"Plugin $name is disabled.") case Failure(_: ConfigException.Missing) => log.debug(s"Could not find $property. Enabling plugin by default.") system.actorOf(props, name) case Failure(other) => throw other } } def setup(implicit system: ActorSystem): Unit }
Example 103
Source File: FetchWithCacheConfigClient.scala From izanami with Apache License 2.0 | 5 votes |
package izanami.configs import java.util.concurrent.TimeUnit import akka.actor.ActorSystem import akka.event.Logging import akka.http.scaladsl.util.FastFuture import akka.stream.Materializer import akka.util.Timeout import com.google.common.cache.{Cache, CacheBuilder} import izanami.Strategy.FetchWithCacheStrategy import izanami.scaladsl._ import izanami._ import play.api.libs.json.Json import scala.concurrent.Future import scala.concurrent.duration.DurationInt import scala.util.{Failure, Success} object FetchWithCacheConfigClient { def apply( clientConfig: ClientConfig, fallback: Configs, underlyingStrategy: ConfigClient, cacheConfig: FetchWithCacheStrategy )(implicit izanamiDispatcher: IzanamiDispatcher, actorSystem: ActorSystem, materializer: Materializer): FetchWithCacheConfigClient = new FetchWithCacheConfigClient(clientConfig, fallback, underlyingStrategy, cacheConfig, underlyingStrategy.cudConfigClient) } private[configs] class FetchWithCacheConfigClient( clientConfig: ClientConfig, fallback: Configs, underlyingStrategy: ConfigClient, cacheConfig: FetchWithCacheStrategy, override val cudConfigClient: CUDConfigClient )(implicit val izanamiDispatcher: IzanamiDispatcher, actorSystem: ActorSystem, val materializer: Materializer) extends ConfigClient { import actorSystem.dispatcher implicit val timeout = Timeout(10.second) private val logger = Logging(actorSystem, this.getClass.getName) private val cache: Cache[String, Seq[Config]] = CacheBuilder .newBuilder() .maximumSize(cacheConfig.maxElement) .expireAfterWrite(cacheConfig.duration.toMillis, TimeUnit.MILLISECONDS) .build[String, Seq[Config]]() override def configs(pattern: Seq[String]): Future[Configs] = { val convertedPattern = Option(pattern).map(_.map(_.replace(".", ":")).mkString(",")).getOrElse("*") Option(cache.getIfPresent(convertedPattern)) match { case Some(configs) => FastFuture.successful(Configs(configs)) case None => val futureConfigs = underlyingStrategy.configs(convertedPattern) futureConfigs.onComplete { case Success(c) => cache.put(convertedPattern, c.configs) case Failure(e) => logger.error(e, "Error fetching configs") } futureConfigs } } override def config(key: String) = { require(key != null, "key should not be null") val convertedKey: String = key.replace(".", ":") Option(cache.getIfPresent(convertedKey)) match { case Some(configs) => FastFuture.successful(configs.find(_.id == convertedKey).map(_.value).getOrElse(Json.obj())) case None => val futureConfig: Future[Configs] = underlyingStrategy.configs(convertedKey) futureConfig.onComplete { case Success(configs) => cache.put(convertedKey, configs.configs) case Failure(e) => logger.error(e, "Error fetching features") } futureConfig .map( _.configs .find(_.id == convertedKey) .map(c => c.value) .getOrElse(Json.obj()) ) } } override def configsSource(pattern: String) = underlyingStrategy.configsSource(pattern) override def configsStream(pattern: String) = underlyingStrategy.configsStream(pattern) }