akka.event.LoggingAdapter Scala Examples
The following examples show how to use akka.event.LoggingAdapter.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: ModelService.scala From reactive-machine-learning-systems with MIT License | 6 votes |
package com.reactivemachinelearning import akka.actor.ActorSystem import akka.event.{Logging, LoggingAdapter} import akka.http.scaladsl.Http import akka.http.scaladsl.marshalling.ToResponseMarshallable import akka.http.scaladsl.model.StatusCodes._ import akka.http.scaladsl.server.Directives._ import akka.stream.{ActorMaterializer, Materializer} //import spray.json._ import spray.json.DefaultJsonProtocol import scala.concurrent.{ExecutionContextExecutor, Future} case class Prediction(id: Long, timestamp: Long, value: Double) trait Protocols extends DefaultJsonProtocol { implicit val ipInfoFormat = jsonFormat3(Prediction.apply) } trait Service extends Protocols { implicit val system: ActorSystem implicit def executor: ExecutionContextExecutor implicit val materializer: Materializer val logger: LoggingAdapter // private def parseFeatures(features: String): Map[Long, Double] = { // features.parseJson.convertTo[Map[Long, Double]] // } def predict(features: String): Future[Prediction] = { Future(Prediction(123, 456, 0.5)) } val routes = { logRequestResult("predictive-service") { pathPrefix("ip") { (get & path(Segment)) { features => complete { predict(features).map[ToResponseMarshallable] { // case prediction: Prediction => prediction case _ => BadRequest } } } } } } } object PredictiveService extends App with Service { override implicit val system = ActorSystem() override implicit val executor = system.dispatcher override implicit val materializer = ActorMaterializer() override val logger = Logging(system, getClass) Http().bindAndHandle(routes, "0.0.0.0", 9000) }
Example 2
Source File: FutureRetryUtility.scala From NSDb with Apache License 2.0 | 5 votes |
package io.radicalbit.nsdb.util import akka.actor.{Actor, ActorRef, Scheduler, Status} import akka.event.LoggingAdapter import akka.pattern.after import scala.concurrent.duration.FiniteDuration import scala.concurrent.{ExecutionContext, Future} import scala.util.{Failure, Success} trait FutureRetryUtility { implicit class FutureRetry[T](f: => Future[T]) { def retry(delay: FiniteDuration, retries: Int)( wasSuccessful: T => Boolean)(implicit ec: ExecutionContext, s: Scheduler, log: LoggingAdapter): Future[T] = (for { a <- f result <- if (wasSuccessful(a) || retries < 1) Future(a) else { log.warning("{}. Retrying...", a); after(delay, s)(retry(delay, retries - 1)(wasSuccessful)) } } yield result) recoverWith { case t if retries > 0 => log.warning("{}. Retrying...", t); after(delay, s)(retry(delay, retries - 1)(wasSuccessful)) } } implicit class PipeToFutureRetry[T](f: => Future[T]) { def pipeTo(delay: FiniteDuration, retries: Int, recipient: ActorRef)(wasSuccessful: T => Boolean = _ => true)( implicit ec: ExecutionContext, s: Scheduler, log: LoggingAdapter, sender: ActorRef = Actor.noSender) = f.retry(delay, retries)(wasSuccessful) andThen { case Success(r) ⇒ recipient ! r case Failure(f) ⇒ recipient ! Status.Failure(f) } } }
Example 3
Source File: EventSpec.scala From cloudflow with Apache License 2.0 | 5 votes |
package cloudflow.installer import akka.actor.ActorSystem import akka.event.Logging import akka.event.LoggingAdapter import akka.stream.ActorMaterializer import akka.stream.scaladsl._ import org.scalatest._ import scala.concurrent.Await import scala.concurrent.duration._ class EventSpec extends WordSpec with MustMatchers with GivenWhenThen with EitherValues with Inspectors { implicit val system: ActorSystem = ActorSystem("TestSystem") implicit val mat: ActorMaterializer = ActorMaterializer() implicit val ec = mat.executionContext implicit val log: LoggingAdapter = Logging(system, "Cloudflow Installer") implicit val settings = Settings(system) "Cloudflow event" should { "transform an install event to a install action" in { val instance = TestInstance.get val clusterFeatures = ClusterFeatures() val installEvent = InstallEvent(instance, None, instance.metadata.namespace, clusterFeatures) val future = Source(List(installEvent)).via(CloudflowEvent.toAction).runWith(Sink.headOption) val result = Await.result(future, 3.seconds) result must not be empty result.get mustBe a[CompositeAction] val action = result.get.asInstanceOf[CompositeAction] action.childActions must have size 5 action.childActions(0) mustBe a[Strimzi] action.childActions(1) mustBe a[SparkOperator] action.childActions(2) mustBe a[FlinkOperator] action.childActions(3) mustBe a[CloudflowOperatorManagedStrimzi] action.childActions(4) mustBe a[PatchOwnerReferenceOfSparkMutatingWebhookConfig] } "transform an un-install event to an un-install action" in { val instance = TestInstance.get val clusterFeatures = ClusterFeatures() val installEvent = UninstallEvent(instance, None, instance.metadata.namespace, clusterFeatures) val future = Source(List(installEvent)).via(CloudflowEvent.toAction).runWith(Sink.headOption) val result = Await.result(future, 3.seconds) result must not be empty result.get mustBe a[CompositeAction] val action = result.get.asInstanceOf[CompositeAction] action.childActions must have size 2 action.childActions(0) mustBe a[RemoveCloudflowClusterwideResources] action.childActions(1) mustBe a[RemoveCloudflowNamespacedResources] } "verify that detected cluster features are present" in { val instance = TestInstance.get val clusterFeatures = ClusterFeatures(hasSecurityContextConstraints = true) val installEvent = InstallEvent(instance, None, instance.metadata.namespace, clusterFeatures) val future = Source(List(installEvent)).via(CloudflowEvent.toAction).runWith(Sink.headOption) val result = Await.result(future, 3.seconds) result must not be empty result.get mustBe a[CompositeAction] val action = result.get.asInstanceOf[CompositeAction] action.childActions must have size 6 action.childActions(0) mustBe a[Strimzi] action.childActions(1) mustBe a[SparkOperator] action.childActions(2) mustBe a[FlinkOperator] action.childActions(3) mustBe a[AddSccToSparkServiceAccount] action.childActions(4) mustBe a[CloudflowOperatorManagedStrimzi] action.childActions(5) mustBe a[PatchOwnerReferenceOfSparkMutatingWebhookConfig] } } "transform an pre-requisite failure event to an no operation action" in { val instance = TestInstance.get val failures = List(CloudflowInstance.ValidationFailure("The cluster does not have a storage class named 'test'")) val installEvent = PreRequisiteFailed(instance, failures) val future = Source(List(installEvent)).via(CloudflowEvent.toAction).runWith(Sink.headOption) val result = Await.result(future, 3.seconds) result must not be empty result.get mustBe a[UpdateCRStatusAction] val action = result.get.asInstanceOf[UpdateCRStatusAction] val caught = Await.result(action.execute(), 3.seconds) caught.stdErr mustBe Some("The cluster does not have a storage class named 'test'") } }
Example 4
Source File: KubectlActionExecutor.scala From cloudflow with Apache License 2.0 | 5 votes |
package cloudflow.installer import java.nio.file.NoSuchFileException import akka.actor.ActorSystem import akka.stream._ import scala.concurrent._ import akka.event.LoggingAdapter trait ActionExecutor { def execute( action: Action )(implicit system: ActorSystem, materializer: Materializer, ec: ExecutionContext, log: LoggingAdapter, settings: Settings): Future[ActionResult] } case object KubectlActionExecutor extends ActionExecutor { override def execute( action: Action )(implicit system: ActorSystem, materializer: Materializer, ec: ExecutionContext, log: LoggingAdapter, settings: Settings): Future[ActionResult] = action.execute().recover { case actionFailure: ActionFailure => actionFailure case exception: NoSuchFileException => ActionFailure(action, 1, Some(s"Cannot find file '${exception.getMessage}")) case exception: Exception => ActionFailure(action, 1, Some(exception.getMessage)) } }
Example 5
Source File: HealthChecks.scala From cloudflow with Apache License 2.0 | 5 votes |
package cloudflow.installer import akka.actor._ import akka.event.LoggingAdapter import akka.http.scaladsl._ import akka.http.scaladsl.model._ import akka.http.scaladsl.server.Directives._ import akka.stream._ import scala.concurrent._ import scala.util._ object HealthChecks { def serve(settings: Settings)(implicit system: ActorSystem, mat: ActorMaterializer, ec: ExecutionContext, log: LoggingAdapter) = Http() .bindAndHandle( route, settings.api.bindInterface, settings.api.bindPort ) .onComplete { case Success(serverBinding) ⇒ log.info(s"Bound to ${serverBinding.localAddress}.") case Failure(e) ⇒ log.error(e, s"Failed to bind.") system.terminate().foreach { _ ⇒ println("Exiting, could not bind http.") sys.exit(-1) } } def route = // format: OFF path("robots.txt") { getFromResource("robots.txt") } ~ pathPrefix("checks") { path("healthy") { complete(StatusCodes.OK) } ~ path("ready") { complete(StatusCodes.OK) } } // format: ON }
Example 6
Source File: Detector.scala From cloudflow with Apache License 2.0 | 5 votes |
package cloudflow.installer import akka.event.LoggingAdapter import java.util.concurrent.TimeUnit import org.zeroturnaround.exec._ import scala.annotation.tailrec import scala.collection.JavaConverters._ import scala.util.{ Failure, Success, Try } sealed trait ClusterFeature final case object Scc extends ClusterFeature final case class StorageClasses(classes: Set[StorageClass]) extends ClusterFeature final case class StorageClass(name: String, provisioner: String) case class ClusterFeatures( storageClasses: Set[StorageClass] = Set.empty, hasSecurityContextConstraints: Boolean = false ) { private val set = { var s = Set.empty[ClusterFeature] if (storageClasses.nonEmpty) s += StorageClasses(storageClasses) if (hasSecurityContextConstraints) s += Scc s } def contains(feature: ClusterFeature) = set.contains(feature) def print()(implicit log: LoggingAdapter): Unit = { val header = s"""+${"-" * 80}+""" log.info(header) log.info("Features detected:") log.info("") if (hasSecurityContextConstraints) log.info("Scc") storageClasses.foreach { case StorageClass(name, provisioner) => log.info(s"Storage class: $name - $provisioner") } log.info(header) } } object Detector { def apply(): Detector = Detector(executor) def executor(commandLine: Array[String], log: LoggingAdapter, settings: Settings): Try[String] = { val command = s"${commandLine.mkString(" ")}" log.info(s"Executing command '$command'") Try( new ProcessExecutor() .command(commandLine.toList.asJava) .readOutput(true) .exitValues(0) .timeout(settings.executionTimeout, TimeUnit.SECONDS) .execute() .outputUTF8() ) } } case class Detector(executor: (Array[String], LoggingAdapter, Settings) => Try[String]) { def detectClusterFeatures()(implicit log: LoggingAdapter, settings: Settings): ClusterFeatures = ClusterFeatures(getStorageClasses(), hasSecurityContextConstraints()) def hasSecurityContextConstraints()(implicit log: LoggingAdapter, settings: Settings): Boolean = executor("oc get scc".split(" "), log, settings).isSuccess def getStorageClasses()(implicit log: LoggingAdapter, settings: Settings): Set[StorageClass] = { @tailrec def extractStorageClass(a: List[String], b: Set[StorageClass] = Set.empty): Set[StorageClass] = a match { case name :: provisioner :: _ :: tail => extractStorageClass(tail, b + StorageClass(name, provisioner)) case nil @ _ => b } executor( "kubectl get sc --no-headers".split(" "), log, settings ) match { case Success(result) => if (result.startsWith("error:")) Set.empty else if (result.contains("No resources found")) Set.empty else extractStorageClass(result.replaceAll("\n", " ").split(" ").filter(s => s != "(default)" && s != "").toList) case Failure(ex) => log.error(s"Failed to query storage classes, ${ex.getMessage()}") Set.empty } } }
Example 7
Source File: CloudflowEvent.scala From cloudflow with Apache License 2.0 | 5 votes |
package cloudflow.installer import akka.NotUsed import akka.event.LoggingAdapter import akka.stream.scaladsl._ import skuber._ import skuber.api.client._ sealed trait CloudflowEvent case class InstallEvent(instance: CloudflowInstance.CR, currentInstance: Option[CloudflowInstance.CR], namespace: String, availableClusterFeatures: ClusterFeatures) extends CloudflowEvent case class UninstallEvent(instance: CloudflowInstance.CR, currentInstance: Option[CloudflowInstance.CR], namespace: String, availableClusterFeatures: ClusterFeatures) extends CloudflowEvent case class PreRequisiteFailed(instance: CloudflowInstance.CR, validationFailures: List[CloudflowInstance.ValidationFailure]) extends CloudflowEvent object CloudflowEvent { def fromWatchEvent[O <: ObjectResource]()(implicit log: LoggingAdapter, settings: Settings) = Flow[WatchEvent[CloudflowInstance.CR]] .statefulMapConcat { () ⇒ var currentInstances = Map[String, WatchEvent[CloudflowInstance.CR]]() watchEvent ⇒ { val instance = watchEvent._object val namespace = instance.metadata.namespace val id = instance.metadata.name val currentInstance = currentInstances.get(id).map(_._object) val detector = Detector() val clusterFeatures = detector.detectClusterFeatures() clusterFeatures.print watchEvent._type match { // case EventType.DELETED ⇒ // currentInstances = currentInstances - id // List(UninstallEvent(instance, currentInstance, namespace, clusterFeatures)) case EventType.ADDED | EventType.MODIFIED ⇒ if (currentInstances.get(id).forall { existingEvent ⇒ existingEvent._object.resourceVersion != watchEvent._object.resourceVersion && // the spec must change, otherwise it is not a deploy event (but likely a status update). existingEvent._object.spec != watchEvent._object.spec }) { currentInstances = currentInstances + (id -> watchEvent) val validationFailures = CloudflowInstance.validateClusterFeatures(instance, clusterFeatures) if (validationFailures.nonEmpty) List(PreRequisiteFailed(instance, validationFailures)) else List(InstallEvent(instance, currentInstance, namespace, clusterFeatures)) } else List.empty case _ => List.empty } } } def toAction[O <: ObjectResource](): Flow[CloudflowEvent, Action, NotUsed] = Flow[CloudflowEvent].map { case install: InstallEvent => implicit val c = install.instance Actions(install.availableClusterFeatures).installCloudflow case uninstall: UninstallEvent => implicit val c = uninstall.instance Actions(uninstall.availableClusterFeatures).uninstallCloudflow case failure: PreRequisiteFailed => implicit val c = failure.instance UpdateCRStatusAction(failure.validationFailures) } }
Example 8
Source File: Main.scala From cloudflow with Apache License 2.0 | 5 votes |
package cloudflow.installer import akka.actor._ import akka.event.Logging import akka.event.LoggingAdapter import akka.stream._ import skuber._ import skuber.api.Configuration import skuber.apiextensions._ import scala.concurrent._ import scala.concurrent.duration._ object Main { def main(args: Array[String]): Unit = { if (!ResourceDirectory.exists()) { println("The Cloudflow installer could not locate the resource directory.") System.exit(1) } implicit val system = ActorSystem() implicit val log: LoggingAdapter = Logging(system, "Cloudflow Installer") try { implicit val mat = createMaterializer() implicit val ec = system.dispatcher implicit val settings = Settings(system) Diagnostics.logStartOperatorMessage(settings) val client = connectToKubernetes() installCRD(client) HealthChecks.serve(settings) Operator.handleEvents(client) } catch { case t: Throwable ⇒ log.error(t, "Unexpected error starting Cloudflow install operator, terminating.") system.registerOnTermination(exitWithFailure) system.terminate() } } private def createMaterializer()(implicit system: ActorSystem) = { val decider: Supervision.Decider = _ ⇒ Supervision.Stop ActorMaterializer(ActorMaterializerSettings(system).withSupervisionStrategy(decider)) } private def exitWithFailure(): Unit = System.exit(-1) private def connectToKubernetes()(implicit system: ActorSystem, mat: Materializer, log: LoggingAdapter) = { val conf = Configuration.defaultK8sConfig val client = k8sInit(conf).usingNamespace("") log.info(s"Connected to Kubernetes cluster: ${conf.currentContext.cluster.server}") client } private def installCRD(client: skuber.api.client.KubernetesClient)(implicit ec: ExecutionContext): Unit = { val crdTimeout = 20.seconds // TODO check if version is the same, if not, also create. import CloudflowInstance._ Await.ready( client.getOption[CustomResourceDefinition](CRD.name).map { result ⇒ result.fold(client.create(CRD)) { crd ⇒ if (crd.spec.version != CRD.spec.version) client.create(CRD) else Future.successful(crd) } }, crdTimeout ) } }
Example 9
Source File: NsdbNodeEndpoint.scala From NSDb with Apache License 2.0 | 5 votes |
package io.radicalbit.nsdb.cluster import akka.actor.{ActorRef, ActorSystem} import akka.event.{Logging, LoggingAdapter} import com.typesafe.config.Config import io.radicalbit.nsdb.cluster.endpoint.GrpcEndpoint import io.radicalbit.nsdb.security.NsdbSecurity import io.radicalbit.nsdb.web.{BitSerializer, CustomSerializers, WebResources} import org.json4s.{DefaultFormats, Formats} class NsdbNodeEndpoint(readCoordinator: ActorRef, writeCoordinator: ActorRef, metadataCoordinator: ActorRef, publisher: ActorRef)(override implicit val system: ActorSystem) extends WebResources with NsdbSecurity { override val config: Config = system.settings.config override implicit val logger: LoggingAdapter = Logging.getLogger(system, this) new GrpcEndpoint(readCoordinator = readCoordinator, writeCoordinator = writeCoordinator, metadataCoordinator = metadataCoordinator) implicit val formats: Formats = DefaultFormats ++ CustomSerializers.customSerializers + BitSerializer initWebEndpoint(writeCoordinator, readCoordinator, metadataCoordinator, publisher) }
Example 10
Source File: FutureRetryUtilitySpec.scala From NSDb with Apache License 2.0 | 5 votes |
package io.radicalbit.nsdb.util import akka.actor.{ActorSystem, Scheduler, Status} import akka.event.{Logging, LoggingAdapter} import akka.testkit.{TestKit, TestProbe} import org.scalatest.{Matchers, WordSpecLike} import scala.collection.mutable import scala.concurrent.duration._ import scala.concurrent.{Await, Future} import scala.concurrent.ExecutionContext.Implicits.global class FutureRetryUtilitySpec extends TestKit(ActorSystem("MySpec")) with WordSpecLike with Matchers with FutureRetryUtility { implicit val schedule: Scheduler = system.scheduler implicit val logger: LoggingAdapter = Logging.getLogger(system, this) private final val delay: FiniteDuration = 2.seconds private final val retries: Int = 3 private def future(flag: Boolean) = if (flag) Future.successful(3) else Future.failed(new RuntimeException("Failure")) "retry function in FutureRetryUtility" must { "successfully returns whether, after retries, the future is eventually successful" in { Await.result(future(true).retry(delay, retries)(_ > 2), Duration.Inf) shouldBe 3 } "thrown an Exception whether, after retries, the future eventually returns an Exception" in { an[RuntimeException] shouldBe thrownBy(Await.result(future(false).retry(delay, retries)(_ => true), Duration.Inf)) } "consider the number of retries" in { val q = mutable.Queue(0) def future = { val nRetries = q.dequeue() if (nRetries < 2) { q.enqueue(nRetries + 1); Future.failed(new RuntimeException) } else { q.enqueue(nRetries + 1); Future.successful(nRetries) } } Await.result(future.retry(delay, retries)(_ > 2), Duration.Inf) shouldBe 3 } } "pipeTo function in FutureRetryUtility" must { "returns a successful future and send the content of it through pipe" in { val testProbe = TestProbe("actor-test") future(true).pipeTo(delay, retries, testProbe.testActor)() testProbe.expectMsg(3) } "return a failed future and send a status failure through pipe" in { val testProbe = TestProbe("actor-test") future(false).pipeTo(delay, retries, testProbe.testActor)() testProbe.expectMsgAllClassOf(classOf[Status.Failure]) } } }
Example 11
Source File: DetectorSpec.scala From cloudflow with Apache License 2.0 | 5 votes |
package cloudflow.installer package cloudflow.installer import akka.actor._ import org.scalatest._ import akka.event.LoggingAdapter import scala.util.Try class DetectorSpec extends WordSpec with MustMatchers { implicit val system = ActorSystem() implicit val settings = Settings(system) implicit val logging = system.log "Detector" should { "correctly create storage classes from input" in { def storageClassExecutor(commandLine: Array[String], log: LoggingAdapter, settings: Settings): Try[String] = { val _ = (commandLine, log, settings) Try("""gke-ssd kubernetes.io/gce-pd 2d |glusterfs-storage kubernetes.io/glusterfs 204d |gp2 (default) kubernetes.io/aws-ebs 204d """.stripMargin) } val detector = Detector(storageClassExecutor) val storageClasses = detector.getStorageClasses() storageClasses must have size 3 storageClasses.find(_.name == "glusterfs-storage").get mustBe StorageClass("glusterfs-storage", "kubernetes.io/glusterfs") storageClasses.find(_.name == "gp2").get mustBe StorageClass("gp2", "kubernetes.io/aws-ebs") storageClasses.find(_.name == "gke-ssd").get mustBe StorageClass("gke-ssd", "kubernetes.io/gce-pd") } "not create storage classes, when no storage classes where found" in { def storageClassExecutor(commandLine: Array[String], log: LoggingAdapter, settings: Settings): Try[String] = { val _ = (commandLine, log, settings) Try("""No resources found in default namespace. """.stripMargin) } val detector = Detector(storageClassExecutor) val storageClasses = detector.getStorageClasses() storageClasses must have size 0 } } }
Example 12
Source File: Security.scala From NSDb with Apache License 2.0 | 5 votes |
package io.radicalbit.nsdb.security import akka.event.LoggingAdapter import com.typesafe.config.Config import io.radicalbit.nsdb.common.exception.NsdbSecurityException import io.radicalbit.nsdb.security.http.{EmptyAuthorization, NSDBAuthProvider} import scala.util.{Failure, Success, Try} lazy val authProvider: Try[NSDBAuthProvider] = if (!security) { logger.info("Security is not enabled") Success(new EmptyAuthorization) } else if (authProviderClassName != "") { logger.debug(s"Trying to load class $authProviderClassName") Try(Class.forName(authProviderClassName).asSubclass(classOf[NSDBAuthProvider]).newInstance) } else { Failure(new NsdbSecurityException("a valid classname must be provided if security is enabled")) } }
Example 13
Source File: ApiResources.scala From NSDb with Apache License 2.0 | 5 votes |
package io.radicalbit.nsdb.web import akka.actor.ActorRef import akka.event.LoggingAdapter import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server._ import akka.util.Timeout import com.typesafe.config.Config import io.radicalbit.nsdb.common.configuration.NSDbConfig.HighLevel._ import io.radicalbit.nsdb.security.http.NSDBAuthProvider import io.radicalbit.nsdb.web.routes._ import io.radicalbit.nsdb.web.swagger.SwaggerDocService import org.json4s.Formats import scala.concurrent.ExecutionContext class ApiResources(val publisherActor: ActorRef, val readCoordinator: ActorRef, val writeCoordinator: ActorRef, val metadataCoordinator: ActorRef, val authenticationProvider: NSDBAuthProvider)(override implicit val timeout: Timeout, implicit val logger: LoggingAdapter, override implicit val ec: ExecutionContext, override implicit val formats: Formats) extends CommandApi with QueryApi with QueryValidationApi with DataApi { def healthCheckApi: Route = { pathPrefix("status") { (pathEnd & get) { complete("RUNNING") } } } def swagger = path("swagger") { getFromResource("swagger-ui/index.html") } ~ getFromResourceDirectory("swagger-ui") def apiResources(config: Config)(implicit ec: ExecutionContext): Route = queryApi ~ queryValidationApi ~ dataApi ~ healthCheckApi ~ commandsApi ~ swagger ~ new SwaggerDocService(config.getString(HttpInterface), config.getInt(HttpPort)).routes }
Example 14
Source File: WebResources.scala From NSDb with Apache License 2.0 | 5 votes |
package io.radicalbit.nsdb.web import java.util.concurrent.TimeUnit import akka.actor.ActorRef import akka.event.LoggingAdapter import akka.http.scaladsl.Http import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server.Route import akka.stream.ActorMaterializer import akka.util.Timeout import com.typesafe.config.Config import io.radicalbit.nsdb.common.configuration.NSDbConfig.HighLevel._ import io.radicalbit.nsdb.security.NsdbSecurity import org.json4s.Formats import scala.concurrent.duration._ import scala.concurrent.{Await, Future} import scala.util.{Failure, Success} trait WebResources extends WsResources with SSLSupport { this: NsdbSecurity => import CORSSupport._ import VersionHeader._ implicit def formats: Formats def config: Config implicit lazy val materializer = ActorMaterializer() implicit lazy val dispatcher = system.dispatcher implicit lazy val httpTimeout: Timeout = Timeout(config.getDuration("nsdb.http-endpoint.timeout", TimeUnit.SECONDS), TimeUnit.SECONDS) def initWebEndpoint(writeCoordinator: ActorRef, readCoordinator: ActorRef, metadataCoordinator: ActorRef, publisher: ActorRef)(implicit logger: LoggingAdapter) = authProvider match { case Success(provider) => val api: Route = wsResources(publisher, provider) ~ new ApiResources(publisher, readCoordinator, writeCoordinator, metadataCoordinator, provider).apiResources(config) val httpExt = akka.http.scaladsl.Http() val http: Future[Http.ServerBinding] = if (isSSLEnabled) { val interface = config.getString(HttpInterface) val port = config.getInt(HttpsPort) logger.info(s"Cluster Apis started with https protocol at interface $interface on port $port") httpExt.bindAndHandle(withCors(withNSDbVersion(api)), interface, port, connectionContext = serverContext) } else { val interface = config.getString(HttpInterface) val port = config.getInt(HttpPort) logger.info(s"Cluster Apis started with http protocol at interface $interface and port $port") httpExt.bindAndHandle(withCors(withNSDbVersion(api)), interface, port) } scala.sys.addShutdownHook { http .flatMap(_.unbind()) .onComplete { _ => system.terminate() } Await.result(system.whenTerminated, 60 seconds) } case Failure(ex) => logger.error("error on loading authorization provider", ex) System.exit(1) } }
Example 15
Source File: QueryValidationApi.scala From NSDb with Apache License 2.0 | 5 votes |
package io.radicalbit.nsdb.web.routes import akka.actor.ActorRef import akka.event.LoggingAdapter import akka.http.scaladsl.model.HttpResponse import akka.http.scaladsl.model.StatusCodes._ import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server.Route import akka.pattern.ask import akka.util.Timeout import io.radicalbit.nsdb.common.statement.SelectSQLStatement import io.radicalbit.nsdb.protocol.MessageProtocol.Commands.ValidateStatement import io.radicalbit.nsdb.protocol.MessageProtocol.Events._ import io.radicalbit.nsdb.security.http.NSDBAuthProvider import io.radicalbit.nsdb.security.model.Metric import io.radicalbit.nsdb.sql.parser.SQLStatementParser import io.radicalbit.nsdb.sql.parser.StatementParserResult._ import io.swagger.annotations._ import javax.ws.rs.Path import org.json4s.Formats import scala.annotation.meta.field import scala.util.{Failure, Success} @ApiModel(description = "Query Validation body") case class QueryValidationBody(@(ApiModelProperty @field)(value = "database name ") db: String, @(ApiModelProperty @field)(value = "namespace name ") namespace: String, @(ApiModelProperty @field)(value = "metric name ") metric: String, @(ApiModelProperty @field)(value = "sql query string") queryString: String) extends Metric @Api(value = "/query/validate", produces = "application/json") @Path("/query/validate") trait QueryValidationApi { import io.radicalbit.nsdb.web.NSDbJson._ def readCoordinator: ActorRef def authenticationProvider: NSDBAuthProvider implicit val timeout: Timeout implicit val formats: Formats @ApiOperation(value = "Perform query", nickname = "query", httpMethod = "POST", response = classOf[String]) @ApiImplicitParams( Array( new ApiImplicitParam(name = "body", value = "query definition", required = true, dataTypeClass = classOf[QueryValidationBody], paramType = "body") )) @ApiResponses( Array( new ApiResponse(code = 200, message = "Query is valid"), new ApiResponse(code = 404, message = "Not found item reason"), new ApiResponse(code = 400, message = "statement is invalid") )) def queryValidationApi(implicit logger: LoggingAdapter): Route = { path("query" / "validate") { post { entity(as[QueryValidationBody]) { qb => optionalHeaderValueByName(authenticationProvider.headerName) { header => authenticationProvider.authorizeMetric(ent = qb, header = header, writePermission = false) { new SQLStatementParser().parse(qb.db, qb.namespace, qb.queryString) match { case SqlStatementParserSuccess(_, statement: SelectSQLStatement) => onComplete(readCoordinator ? ValidateStatement(statement)) { case Success(SelectStatementValidated(_)) => complete(HttpResponse(OK)) case Success(SelectStatementValidationFailed(_, reason, MetricNotFound(_))) => complete(HttpResponse(NotFound, entity = reason)) case Success(SelectStatementValidationFailed(_, reason, _)) => complete(HttpResponse(BadRequest, entity = reason)) case Success(r) => logger.error("unknown response received {}", r) complete(HttpResponse(InternalServerError, entity = "unknown response")) case Failure(ex) => logger.error("", ex) complete(HttpResponse(InternalServerError, entity = ex.getMessage)) } case SqlStatementParserSuccess(queryString, _) => complete(HttpResponse(BadRequest, entity = s"statement ${queryString} is not a select statement")) case SqlStatementParserFailure(queryString, _) => complete(HttpResponse(BadRequest, entity = s"statement ${queryString} is invalid")) } } } } } } } }
Example 16
Source File: ChannelUtils.scala From akka-grpc with Apache License 2.0 | 5 votes |
package akka.grpc.internal import java.util.concurrent.CompletionStage import akka.Done import akka.annotation.InternalApi import akka.event.LoggingAdapter import io.grpc.{ ConnectivityState, ManagedChannel } import scala.compat.java8.FutureConverters._ import scala.concurrent.{ Future, Promise } @InternalApi private[akka] def monitorChannel( ready: Promise[Unit], done: Promise[Done], channel: ManagedChannel, maxConnectionAttempts: Option[Int], log: LoggingAdapter): Unit = { def monitor(currentState: ConnectivityState, connectionAttempts: Int): Unit = { log.debug(s"monitoring with state $currentState and connectionAttempts $connectionAttempts") val newAttemptOpt = currentState match { case ConnectivityState.TRANSIENT_FAILURE => if (maxConnectionAttempts.contains(connectionAttempts + 1)) { val ex = new ClientConnectionException(s"Unable to establish connection after [$maxConnectionAttempts]") ready.tryFailure(ex) || done.tryFailure(ex) None } else Some(connectionAttempts + 1) case ConnectivityState.READY => ready.trySuccess(()) Some(0) case ConnectivityState.SHUTDOWN => done.trySuccess(Done) None case ConnectivityState.IDLE | ConnectivityState.CONNECTING => Some(connectionAttempts) } newAttemptOpt.foreach { attempts => channel.notifyWhenStateChanged(currentState, () => monitor(channel.getState(false), attempts)) } } monitor(channel.getState(false), 0) } }
Example 17
Source File: LogCollector.scala From shield with MIT License | 5 votes |
package shield.actors.listeners import akka.actor.{Actor, ActorLogging, ActorRef} import akka.event.LoggingAdapter import nl.grons.metrics.scala.{Meter, Timer} import shield.actors.{RequestProcessorCompleted, RestartLogging} import org.joda.time.format.ISODateTimeFormat import shield.config.{HttpServiceLocation, Settings} import shield.config.{DomainSettings, Settings} import shield.metrics.Instrumented import spray.http.{HttpHeader, HttpResponse} import spray.json._ import scala.collection.mutable.ArrayBuffer import scala.concurrent.duration._ import scala.util.{Failure, Success, Try} case object FlushLogs case object LogsFlushed case class AccessLogs(buffer: Seq[JsObject]) object LogCollector { def handleResults(self: ActorRef, droppedMeter: Meter, log: LoggingAdapter, logCount: Int) : PartialFunction[Try[HttpResponse], Unit] = { case Success(r) => self ! LogsFlushed if (r.status.isFailure) { droppedMeter.mark(logCount) log.warning(s"Error forwarding access logs: ${r.entity.asString}") } case Failure(f) => self ! LogsFlushed droppedMeter.mark(logCount) log.warning(s"Error forwarding access logs: $f") } } class LogCollector(id: String, domain: DomainSettings, forwarders: Seq[ActorRef], maxBufferSize: Int) extends Actor with ActorLogging with RestartLogging with Instrumented { import context.dispatcher val settings = Settings(context.system) val shieldHost = JsString(settings.DefaultServiceLocation.baseUrl.toString) var buffer = ArrayBuffer[JsObject]() val dateTimeFormat = ISODateTimeFormat.dateTime() val logSerializationTimer: Timer = metrics.timer("log-serialization") // todo: profiling optimization - 1% of CPU time is spent here while under load def logJson(r: RequestProcessorCompleted): JsObject = logSerializationTimer.time { JsObject(Map( // todo: profiling optimization: use seconds, and cache it per second "@timestamp" -> JsString(dateTimeFormat.print(System.currentTimeMillis() - r.overallTiming)), "method" -> JsString(r.completion.request.method.toString()), // todo: profiling optimization: uri.toString is used in several places - can we cache it? "request_headers" -> JsObject(extractHeaders(r.completion.request.headers, domain.loggedRequestHeaders)), "response_headers" -> JsObject(extractHeaders(r.completion.details.response.headers, domain.loggedResponseHeaders)), "path" -> JsString(r.completion.request.uri.toString()), "template" -> JsString(r.completion.details.template.path.toString), "responding_service" -> JsString(r.completion.details.serviceName), "responding_host" -> JsString(r.completion.details.serviceLocation.locationName), "shield_host" -> shieldHost, "overall_time" -> JsNumber(r.overallTiming), "middleware_time" -> JsObject(r.middlewareTiming.map { case (attr, timing) => attr -> JsNumber(timing) }), // todo: cache header name should be config driven "cache_status" -> JsString(r.completion.details.response.headers.find(_.lowercaseName == "x-cache").map(_.value).getOrElse("nocache")), "response_size" -> JsNumber(r.completion.details.response.entity.data.length), "response_status" -> JsNumber(r.completion.details.response.status.intValue) )) } val bufferSizeHistogram = metrics.histogram("bufferSizeOnFlush", id) var flushTimer = context.system.scheduler.scheduleOnce(100.millis, self, FlushLogs) def flushLogs() = { flushTimer.cancel() bufferSizeHistogram += buffer.length if (buffer.nonEmpty) { val msg = AccessLogs(buffer) forwarders.foreach { _ ! msg } buffer = ArrayBuffer() } flushTimer = context.system.scheduler.scheduleOnce(100.millis, self, FlushLogs) } def receive: Receive = { case r: RequestProcessorCompleted => buffer += logJson(r) if (buffer.length >= maxBufferSize) { flushLogs() } case FlushLogs => flushLogs() } def extractHeaders(headers: List[HttpHeader], toExtract: Set[String]): Map[String, JsString] = { headers.filter(h => toExtract.contains(h.lowercaseName)).map(h => h.name -> JsString(h.value)).toMap } }
Example 18
Source File: Endpoints.scala From akka-http-microservice-templates with MIT License | 5 votes |
package endpoints import java.lang.System.currentTimeMillis import akka.actor.ActorSystem import akka.event.{Logging, LoggingAdapter} import akka.http.scaladsl.model.HttpRequest import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server.RouteResult.Complete import akka.http.scaladsl.server._ import akka.http.scaladsl.server.directives._ import akka.http.scaladsl.settings.RoutingSettings import akka.stream.{ActorMaterializer, Materializer} import scala.concurrent.ExecutionContext class Endpoints(userEndpoint: UserEndpoint, healthCheckEndpoint: HealthCheckEndpoint) { def routes(implicit sys: ActorSystem, mat: ActorMaterializer, ec: ExecutionContext) = loggableRoute { Route.seal { userEndpoint.userRoutes ~ healthCheckEndpoint.healthCheckRoute } } def logRequestAndResponse(loggingAdapter: LoggingAdapter, before: Long)(req: HttpRequest)(res: Any): Unit = { val entry = res match { case Complete(resp) => val message = s"{path=${req.uri}, method=${req.method.value}, status=${resp.status.intValue()}, elapsedTime=${currentTimeMillis() - before}" LogEntry(message, Logging.InfoLevel) case other => LogEntry(other, Logging.InfoLevel) } entry.logTo(loggingAdapter) } def loggableRoute(route: Route)(implicit m: Materializer, ex: ExecutionContext, routingSettings: RoutingSettings): Route = { DebuggingDirectives.logRequestResult(LoggingMagnet(log => { val requestTimestamp = currentTimeMillis() logRequestAndResponse(log, requestTimestamp) }))(route) } }
Example 19
Source File: Endpoints.scala From akka-http-microservice-templates with MIT License | 5 votes |
package endpoints import java.lang.System.currentTimeMillis import akka.actor.ActorSystem import akka.event.{Logging, LoggingAdapter} import akka.http.scaladsl.model.HttpRequest import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server.RouteResult.Complete import akka.http.scaladsl.server._ import akka.http.scaladsl.server.directives._ import akka.http.scaladsl.settings.RoutingSettings import akka.stream.{ActorMaterializer, Materializer} import scala.concurrent.ExecutionContext class Endpoints(userEndpoint: UserEndpoint, healthCheckEndpoint: HealthCheckEndpoint) { def routes(implicit sys: ActorSystem, mat: ActorMaterializer, ec: ExecutionContext) = loggableRoute { Route.seal { userEndpoint.userRoutes ~ healthCheckEndpoint.healthCheckRoute } } def logRequestAndResponse(loggingAdapter: LoggingAdapter, before: Long)(req: HttpRequest)(res: Any): Unit = { val entry = res match { case Complete(resp) => val message = s"{path=${req.uri}, method=${req.method.value}, status=${resp.status.intValue()}, elapsedTime=${currentTimeMillis() - before}" LogEntry(message, Logging.InfoLevel) case other => LogEntry(other, Logging.InfoLevel) } entry.logTo(loggingAdapter) } def loggableRoute(route: Route)(implicit m: Materializer, ex: ExecutionContext, routingSettings: RoutingSettings): Route = { DebuggingDirectives.logRequestResult(LoggingMagnet(log => { val requestTimestamp = currentTimeMillis() logRequestAndResponse(log, requestTimestamp) }))(route) } }
Example 20
Source File: AkkaHttpClient.scala From sttp with Apache License 2.0 | 5 votes |
package sttp.client.akkahttp import akka.actor.ActorSystem import akka.event.LoggingAdapter import akka.http.scaladsl.model.ws.{Message, WebSocketRequest, WebSocketUpgradeResponse} import akka.http.scaladsl.model.{HttpRequest, HttpResponse} import akka.http.scaladsl.server.{ExceptionHandler, RejectionHandler, Route, RoutingLog} import akka.http.scaladsl.settings.{ClientConnectionSettings, ConnectionPoolSettings, ParserSettings, RoutingSettings} import akka.http.scaladsl.{Http, HttpsConnectionContext} import akka.stream.Materializer import akka.stream.scaladsl.Flow import scala.concurrent.{ExecutionContext, ExecutionContextExecutor, Future} trait AkkaHttpClient { def singleRequest( request: HttpRequest, settings: ConnectionPoolSettings ): Future[HttpResponse] def singleWebsocketRequest[WS_RESULT]( request: WebSocketRequest, clientFlow: Flow[Message, Message, WS_RESULT], settings: ClientConnectionSettings )(implicit ec: ExecutionContext, mat: Materializer): Future[(WebSocketUpgradeResponse, WS_RESULT)] } object AkkaHttpClient { def default( system: ActorSystem, connectionContext: Option[HttpsConnectionContext], customLog: Option[LoggingAdapter] ): AkkaHttpClient = new AkkaHttpClient { private val http = Http()(system) override def singleRequest( request: HttpRequest, settings: ConnectionPoolSettings ): Future[HttpResponse] = { http.singleRequest( request, connectionContext.getOrElse(http.defaultClientHttpsContext), settings, customLog.getOrElse(system.log) ) } override def singleWebsocketRequest[WS_RESULT]( request: WebSocketRequest, clientFlow: Flow[Message, Message, WS_RESULT], settings: ClientConnectionSettings )(implicit ec: ExecutionContext, mat: Materializer): Future[(WebSocketUpgradeResponse, WS_RESULT)] = { val (wsResponse, wsResult) = http.singleWebSocketRequest( request, clientFlow, connectionContext.getOrElse(http.defaultClientHttpsContext), None, settings, customLog.getOrElse(system.log) ) wsResponse.map((_, wsResult)) } } def stubFromAsyncHandler(run: HttpRequest => Future[HttpResponse]): AkkaHttpClient = new AkkaHttpClient { def singleRequest(request: HttpRequest, settings: ConnectionPoolSettings): Future[HttpResponse] = run(request) override def singleWebsocketRequest[WS_RESULT]( request: WebSocketRequest, clientFlow: Flow[Message, Message, WS_RESULT], settings: ClientConnectionSettings )(implicit ec: ExecutionContext, mat: Materializer): Future[(WebSocketUpgradeResponse, WS_RESULT)] = Future.failed(new RuntimeException("Websockets are not supported")) } def stubFromRoute(route: Route)(implicit routingSettings: RoutingSettings, parserSettings: ParserSettings, materializer: Materializer, routingLog: RoutingLog, executionContext: ExecutionContextExecutor = null, rejectionHandler: RejectionHandler = RejectionHandler.default, exceptionHandler: ExceptionHandler = null ): AkkaHttpClient = stubFromAsyncHandler(Route.asyncHandler(route)) }
Example 21
Source File: CirceJsonSerialization.scala From akka-ddd-cqrs-es-example with MIT License | 5 votes |
package com.github.j5ik2o.bank.adaptor.serialization import java.nio.charset.StandardCharsets import akka.event.LoggingAdapter import io.circe._ import io.circe.parser._ import io.circe.syntax._ object StringToByteConversion { implicit class StringToByte(text: String) { def toUTF8Byte: Array[Byte] = text.getBytes(StandardCharsets.UTF_8) } } trait EventToJsonReprIso[Event, JsonRepr] { def convertTo(event: Event): JsonRepr def convertFrom(json: JsonRepr): Event } import com.github.j5ik2o.bank.adaptor.serialization.StringToByteConversion._ class CirceDeserializationException(message: String, cause: Throwable) extends Exception(message, cause) object CirceJsonSerialization { def toBinary[Event, JsonRepr]( orig: Event, isDebugEnabled: Boolean = false )(implicit iso: EventToJsonReprIso[Event, JsonRepr], encoder: Encoder[JsonRepr], log: LoggingAdapter): Array[Byte] = { val event = iso.convertTo(orig) val jsonString = event.asJson.noSpaces if (isDebugEnabled) log.debug(s"toBinary: jsonString = $jsonString") jsonString.toUTF8Byte } def fromBinary[Event, JsonRepr]( bytes: Array[Byte], isDebugEnabled: Boolean = false )(implicit iso: EventToJsonReprIso[Event, JsonRepr], d: Decoder[JsonRepr], log: LoggingAdapter): Event = { val jsonString = new String(bytes, StandardCharsets.UTF_8) if (isDebugEnabled) log.debug(s"fromBinary: jsonString = $jsonString") val result = for { json <- parse(jsonString).right resultJson <- json.as[JsonRepr].right } yield iso.convertFrom(resultJson) result match { case Left(failure) => throw new CirceDeserializationException(failure.getMessage, failure) case Right(event) => event } } }
Example 22
Source File: BankAccountEventJSONSerializer.scala From akka-ddd-cqrs-es-example with MIT License | 5 votes |
package com.github.j5ik2o.bank.adaptor.serialization import akka.actor.ExtendedActorSystem import akka.event.{ Logging, LoggingAdapter } import akka.serialization.SerializerWithStringManifest import com.github.j5ik2o.bank.domain.model._ import org.slf4j.LoggerFactory import pureconfig._ object BankAccountEventJSONManifest { final val CREATE = BankAccountOpened.getClass.getName.stripSuffix("$") final val UPDATE = BankAccountEventUpdated.getClass.getName.stripSuffix("$") final val DEPOSIT = BankAccountDeposited.getClass.getName.stripSuffix("$") final val WITHDRAW = BankAccountWithdrawn.getClass.getName.stripSuffix("$") final val DESTROY = BankAccountClosed.getClass.getName.stripSuffix("$") } class BankAccountEventJSONSerializer(system: ExtendedActorSystem) extends SerializerWithStringManifest { import BankAccountCreatedJson._ import BankAccountEventJSONManifest._ import io.circe.generic.auto._ private val logger = LoggerFactory.getLogger(getClass) private val config = loadConfigOrThrow[BankAccountEventJSONSerializerConfig]( system.settings.config.getConfig("bank.interface.bank-account-event-json-serializer") ) private implicit val log: LoggingAdapter = Logging.getLogger(system, getClass) private val isDebugEnabled = config.isDebuged override def identifier: Int = 50 override def manifest(o: AnyRef): String = { val result = o.getClass.getName logger.debug(s"manifest: $result") result } override def toBinary(o: AnyRef): Array[Byte] = o match { case orig: BankAccountOpened => CirceJsonSerialization.toBinary(orig, isDebugEnabled) case orig: BankAccountEventUpdated => CirceJsonSerialization.toBinary(orig, isDebugEnabled) case orig: BankAccountDeposited => CirceJsonSerialization.toBinary(orig, isDebugEnabled) case orig: BankAccountWithdrawn => CirceJsonSerialization.toBinary(orig, isDebugEnabled) case orig: BankAccountClosed => CirceJsonSerialization.toBinary(orig, isDebugEnabled) } override def fromBinary(bytes: Array[Byte], manifest: String): AnyRef = { logger.debug(s"fromBinary: $manifest") manifest match { case CREATE => CirceJsonSerialization.fromBinary[BankAccountOpened, BankAccountCreatedJson](bytes, isDebugEnabled) case UPDATE => CirceJsonSerialization.fromBinary[BankAccountEventUpdated, BankAccountUpdatedJson](bytes, isDebugEnabled) case DEPOSIT => CirceJsonSerialization.fromBinary[BankAccountDeposited, BankAccountDepositedJson](bytes, isDebugEnabled) case WITHDRAW => CirceJsonSerialization.fromBinary[BankAccountWithdrawn, BankAccountWithdrawedJson](bytes, isDebugEnabled) case DESTROY => CirceJsonSerialization.fromBinary[BankAccountClosed, BankAccountDestroyedJson](bytes, isDebugEnabled) } } }
Example 23
Source File: MarathonCache.scala From vamp with Apache License 2.0 | 5 votes |
package io.vamp.container_driver.marathon import akka.event.LoggingAdapter import io.vamp.common.CacheStore import scala.concurrent.Future import scala.concurrent.duration.FiniteDuration class MarathonCache(config: MarathonCacheConfig) { private lazy val cache = new CacheStore() def read[T](id: String, request: () ⇒ Future[T])(implicit logger: LoggingAdapter): Future[T] = { getOrPutIfAbsent("read", r(id), request)(config.readTimeToLivePeriod) } def write[T](operation: String, id: String, request: () ⇒ Future[T])(implicit logger: LoggingAdapter): Future[T] = { getOrPutIfAbsent(operation, w(id), request)(config.writeTimeToLivePeriod) } def readFailure[T](id: String)(implicit logger: LoggingAdapter): Unit = markFailure("read", r(id)) def writeFailure[T](operation: String, id: String)(implicit logger: LoggingAdapter): Unit = markFailure(operation, w(id)) def inCache(id: String): Boolean = cache.contains(r(id)) || cache.contains(w(id)) def invalidate(id: String)(implicit logger: LoggingAdapter): Unit = { remove(r(id)) remove(w(id)) } def close(): Unit = cache.close() private def getOrPutIfAbsent[T](operation: String, key: String, put: () ⇒ T)(timeToLivePeriod: FiniteDuration)(implicit logger: LoggingAdapter): T = synchronized { get[T](key) match { case Some(result) if operation == result._1 ⇒ logger.debug(s"cache get: $key") result._2 case _ ⇒ this.put[T](operation, key, put)(timeToLivePeriod)._2 } } private def markFailure[T](operation: String, id: String)(implicit logger: LoggingAdapter): Unit = get[T](id).foreach { value ⇒ put[T](operation, id, () ⇒ value._2)(config.failureTimeToLivePeriod) } private def get[T](id: String): Option[(String, T)] = cache.get[(String, T)](id) private def put[T](operation: String, key: String, putValue: () ⇒ T)(timeToLivePeriod: FiniteDuration)(implicit logger: LoggingAdapter): (String, T) = synchronized { logger.info(s"cache put [${timeToLivePeriod.toSeconds} s]: $key") val value = operation → putValue() cache.put[(String, T)](key, value, timeToLivePeriod) value } private def remove(key: String)(implicit logger: LoggingAdapter): Unit = synchronized { logger.info(s"cache remove: $key") cache.remove(key) } @inline private def r(id: String): String = s"r$id" @inline private def w(id: String): String = s"w$id" }
Example 24
Source File: MarathonAllAppCache.scala From vamp with Apache License 2.0 | 5 votes |
package io.vamp.container_driver.marathon import akka.event.LoggingAdapter import io.vamp.common.CacheStore import io.vamp.common.http.HttpClient import scala.concurrent.{ ExecutionContext, Future } class MarathonAllAppCache { private val key = "apps" private lazy val cache = new CacheStore() private var config: MarathonCacheConfig = _ def all(url: String, headers: List[(String, String)])(implicit httpClient: HttpClient, executionContext: ExecutionContext, logger: LoggingAdapter): Future[List[App]] = synchronized { cache.get[Future[List[App]]](key) match { case Some(result) ⇒ logger.debug(s"cache get all apps") result.asInstanceOf[Future[List[App]]] case None ⇒ logger.info(s"marathon sending get all request") val future = { httpClient .get[AppsResponse](s"$url?embed=apps.tasks&embed=apps.taskStats", headers, logError = false) .recover { case t: Throwable ⇒ logger.error(s"Error while getting apps ⇒ ${t.getMessage}", t) cache.get[Future[List[App]]](key).foreach { value ⇒ cache.put[Future[List[App]]](key, value, config.failureTimeToLivePeriod) } AppsResponse(Nil) }.map(_.apps) } cache.put[Future[List[App]]](key, future, config.readTimeToLivePeriod) future } } def invalidate(implicit logger: LoggingAdapter): Unit = synchronized { logger.info(s"cache remove all apps") cache.remove(key) } def updateConfig(config: MarathonCacheConfig): Unit = { if (this.config == null) this.config = config else { if (this.config.readTimeToLivePeriod > config.readTimeToLivePeriod) this.config.copy(readTimeToLivePeriod = config.readTimeToLivePeriod) if (this.config.failureTimeToLivePeriod > config.failureTimeToLivePeriod) this.config.copy(failureTimeToLivePeriod = config.failureTimeToLivePeriod) } } }
Example 25
Source File: MarathonSse.scala From vamp with Apache License 2.0 | 5 votes |
package io.vamp.container_driver.marathon import akka.actor.ActorSystem import akka.event.LoggingAdapter import akka.http.scaladsl.model.sse.ServerSentEvent import io.vamp.common.Namespace import io.vamp.common.http.{ SseConnector, SseListener } import org.json4s._ import org.json4s.native.JsonMethods._ import scala.util.Try case class MarathonSse(config: MarathonClientConfig, namespace: Namespace, listener: (String, String) ⇒ Unit) extends SseListener { def open()(implicit system: ActorSystem, logger: LoggingAdapter): Unit = { logger.info(s"Subscribing to Marathon SSE stream: ${namespace.name}") SseConnector.open(s"${config.marathonUrl}/v2/events", config.headers, config.tlsCheck)(this) } def close()(implicit logger: LoggingAdapter): Unit = { logger.info(s"Unsubscribing from Marathon SSE stream: ${namespace.name}") SseConnector.close(this) } final override def onEvent(event: ServerSentEvent): Unit = { event.eventType.filter(_.startsWith("deployment")).foreach { _ ⇒ Try( (parse(StringInput(event.data), useBigDecimalForDouble = true) \ "plan" \ "steps" \\ "app" \\ classOf[JString]).toSet ).foreach { _.foreach(id ⇒ listener(event.eventType.getOrElse(""), id)) } } } }
Example 26
Source File: SseConnector.scala From vamp with Apache License 2.0 | 5 votes |
package io.vamp.common.http import akka.Done import akka.actor.ActorSystem import akka.event.LoggingAdapter import akka.http.scaladsl.model.HttpHeader.ParsingResult.Ok import akka.http.scaladsl.model.sse.ServerSentEvent import akka.http.scaladsl.model.{ HttpHeader, HttpRequest, HttpResponse, Uri } import akka.stream.ActorMaterializer import akka.stream.scaladsl.{ Sink, Source } import io.vamp.common.http.EventSource.EventSource import scala.collection.mutable import scala.concurrent.Future import scala.concurrent.duration.{ FiniteDuration, _ } import scala.language.postfixOps import scala.util.{ Failure, Success } private case class SseConnectionConfig(url: String, headers: List[(String, String)], tlsCheck: Boolean) private case class SseConnectionEntryValue(source: EventSource) trait SseListener { def onEvent(event: ServerSentEvent): Unit } object SseConnector { private val retryDelay: FiniteDuration = 5 second private val listeners: mutable.Map[SseConnectionConfig, Set[SseListener]] = mutable.Map() private val connections: mutable.Map[SseConnectionConfig, Future[Done]] = mutable.Map() def open(url: String, headers: List[(String, String)] = Nil, tlsCheck: Boolean)(listener: SseListener)(implicit system: ActorSystem, logger: LoggingAdapter): Unit = synchronized { val config = SseConnectionConfig(url, headers, tlsCheck) implicit val materializer: ActorMaterializer = ActorMaterializer() listeners.update(config, listeners.getOrElse(config, Set()) + listener) connections.getOrElseUpdate(config, { logger.info(s"Opening SSE connection: $url") EventSource(Uri(url), send(config), None, retryDelay).takeWhile { event ⇒ event.eventType.foreach(t ⇒ logger.info(s"SSE: $t")) val receivers = listeners.getOrElse(config, Set()) receivers.foreach(_.onEvent(event)) val continue = receivers.nonEmpty if (!continue) logger.info(s"Closing SSE connection: $url") continue }.runWith(Sink.ignore) }) } def close(listener: SseListener): Unit = synchronized { listeners.transform((_, v) ⇒ v - listener) } private def send(config: SseConnectionConfig)(request: HttpRequest)(implicit system: ActorSystem, materializer: ActorMaterializer): Future[HttpResponse] = { val httpHeaders = config.headers.map { case (k, v) ⇒ HttpHeader.parse(k, v) } collect { case Ok(h, _) ⇒ h } filterNot request.headers.contains Source.single(request.withHeaders(request.headers ++ httpHeaders) → 1).via(HttpClient.pool[Any](config.url, config.tlsCheck)).map { case (Success(response: HttpResponse), _) ⇒ response case (Failure(f), _) ⇒ throw new RuntimeException(f.getMessage) }.runWith(Sink.head) } }
Example 27
Source File: TestSpec.scala From reactive-programming with Apache License 2.0 | 5 votes |
package com.test import java.io.IOException import java.util.UUID import akka.actor.{ ActorRef, ActorSystem, PoisonPill } import akka.event.{ Logging, LoggingAdapter } import akka.testkit.TestProbe import akka.util.Timeout import org.scalatest.concurrent.{ Eventually, ScalaFutures } import org.scalatest.exceptions.TestFailedException import org.scalatest._ import rx.lang.scala._ import scala.concurrent.duration._ import scala.concurrent.{ ExecutionContextExecutor, Future } import scala.util.{ Random ⇒ Rnd, Try } object Random { def apply(): Rnd = new Rnd() } trait TestSpec extends FlatSpec with Matchers with ScalaFutures with TryValues with OptionValues with Eventually with BeforeAndAfterAll { implicit val system: ActorSystem = ActorSystem("test") implicit val ec: ExecutionContextExecutor = system.dispatcher val log: LoggingAdapter = Logging(system, this.getClass) implicit val pc: PatienceConfig = PatienceConfig(timeout = 50.seconds) implicit val timeout = Timeout(50.seconds) override protected def afterAll(): Unit = { system.terminate() } def cleanup(actors: ActorRef*): Unit = { actors.foreach { (actor: ActorRef) ⇒ actor ! PoisonPill probe watch actor } } implicit class PimpedByteArray(self: Array[Byte]) { def getString: String = new String(self) } implicit class PimpedFuture[T](self: Future[T]) { def toTry: Try[T] = Try(self.futureValue) } implicit class PimpedObservable[T](self: Observable[T]) { def waitFor: Unit = { self.toBlocking.toIterable.last } } implicit class MustBeWord[T](self: T) { def mustBe(pf: PartialFunction[T, Unit]): Unit = if (!pf.isDefinedAt(self)) throw new TestFailedException("Unexpected: " + self, 0) } object Socket { def apply() = new Socket } class Socket { def readFromMemory: Future[Array[Byte]] = Future { Thread.sleep(100) // sleep 100 millis "fromMemory".getBytes } def send(payload: Array[Byte], from: String, failed: Boolean): Future[Array[Byte]] = if (failed) Future.failed(new IOException(s"Network error: $from")) else { Future { Thread.sleep(250) // sleep 250 millis, not real life time, but hey s"${payload.getString}->$from".getBytes } } def sendToEurope(payload: Array[Byte], failed: Boolean = false): Future[Array[Byte]] = send(payload, "fromEurope", failed) def sendToUsa(payload: Array[Byte], failed: Boolean = false): Future[Array[Byte]] = send(payload, "fromUsa", failed) } }
Example 28
Source File: RestPi.scala From apache-spark-test with Apache License 2.0 | 5 votes |
package com.github.dnvriend import akka.actor.ActorSystem import akka.event.{ Logging, LoggingAdapter } import akka.http.scaladsl._ import akka.http.scaladsl.common.{ EntityStreamingSupport, JsonEntityStreamingSupport } import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport import akka.http.scaladsl.server.{ Directives, Route } import akka.stream.scaladsl.{ Flow, Source } import akka.stream.{ ActorMaterializer, Materializer } import akka.util.ByteString import com.github.dnvriend.spark.CalculatePi import org.apache.spark.SparkContext import org.apache.spark.sql.SparkSession import spray.json.DefaultJsonProtocol import scala.concurrent.{ ExecutionContext, Future } object RestPi extends App with Directives with SprayJsonSupport with DefaultJsonProtocol { implicit val system: ActorSystem = ActorSystem() implicit val mat: Materializer = ActorMaterializer() implicit val ec: ExecutionContext = system.dispatcher implicit val log: LoggingAdapter = Logging(system, this.getClass) val spark = SparkSession.builder() .config("spark.sql.warehouse.dir", "file:/tmp/spark-warehouse") .config("spark.scheduler.mode", "FAIR") .config("spark.sql.crossJoin.enabled", "true") .master("local") // use as many threads as cores .appName("RestPi") // The appName parameter is a name for your application to show on the cluster UI. .getOrCreate() final case class Pi(pi: Double) implicit val piJsonFormat = jsonFormat1(Pi) val start = ByteString.empty val sep = ByteString("\n") val end = ByteString.empty implicit val jsonStreamingSupport: JsonEntityStreamingSupport = EntityStreamingSupport.json() .withFramingRenderer(Flow[ByteString].intersperse(start, sep, end)) .withParallelMarshalling(parallelism = 8, unordered = true) def sparkContext: SparkContext = spark.newSession().sparkContext def calculatePi(num: Long = 1000000, slices: Int = 2): Future[Double] = Future(CalculatePi(sparkContext, num, slices)).map(count => slices.toDouble * count / (num - 1)) val route: Route = pathEndOrSingleSlash { complete(calculatePi().map(Pi)) } ~ path("pi" / LongNumber / IntNumber) { (num, slices) => complete(calculatePi(num, slices).map(Pi)) } ~ path("stream" / "pi" / LongNumber) { num => complete(Source.fromFuture(calculatePi()).map(Pi) .flatMapConcat(Source.repeat).take(num)) } Http().bindAndHandle(route, "0.0.0.0", 8008) sys.addShutdownHook { spark.stop() system.terminate() } }
Example 29
Source File: CreateZipcodesSpark.scala From apache-spark-test with Apache License 2.0 | 5 votes |
package com.github.dnvriend import akka.actor.ActorSystem import akka.event.{ Logging, LoggingAdapter } import akka.stream.{ ActorMaterializer, Materializer } import org.apache.spark.sql.{ SaveMode, SparkSession } import scala.concurrent.ExecutionContext object CreateZipcodesSpark extends App { implicit val system: ActorSystem = ActorSystem() implicit val mat: Materializer = ActorMaterializer() implicit val ec: ExecutionContext = system.dispatcher implicit val log: LoggingAdapter = Logging(system, this.getClass) val spark = SparkSession.builder() .config("spark.sql.warehouse.dir", "file:/tmp/spark-warehouse") .config("spark.cores.max", "4") .config("spark.scheduler.mode", "FAIR") .config("spark.sql.crossJoin.enabled", "true") .master("local[*]") // use as many threads as cores .appName("CreateZipcodesSpark").getOrCreate() import spark.implicits._ // define an RDD for the district range val districts = spark.sparkContext.parallelize(1000 to 9000).map(_.toString).toDS // create temp view districts.createOrReplaceTempView("districts") // define an RDD with a range for the letters val l1 = spark.sparkContext.parallelize('A' to 'Z').map(_.toString).toDS l1.createOrReplaceTempView("l1") // join the letters val letters = spark.sql("SELECT concat(a.value, b.value) letters from l1 a join l1 b") // define temp view letters.createOrReplaceTempView("letters") // define an RDD for the houses val houses = spark.sparkContext.makeRDD(1 to 399).toDS // create temp view houses.createOrReplaceTempView("houses") // join letters and houses val lettersWithHouseNr = spark.sql( """ |SELECT CONCAT(letters, '-', nr) letterswithhousenr FROM letters |JOIN |(SELECT format_string("%03d", value) nr FROM houses) """.stripMargin ) // create temp view lettersWithHouseNr.createOrReplaceTempView("lwh") // join the districts with the house numbers val tickets = spark.sql("SELECT concat(value, letterswithhousenr) value FROM districts JOIN lwh LIMIT 5000000") tickets.write.mode(SaveMode.Overwrite).parquet("/tmp/tickets_spark.parquet") shutdown def shutdown: Unit = { spark.stop() system.terminate() } sys.addShutdownHook(shutdown) }
Example 30
Source File: HelloWorld.scala From apache-spark-test with Apache License 2.0 | 5 votes |
package com.github.dnvriend import akka.actor.ActorSystem import akka.event.{ Logging, LoggingAdapter } import akka.stream.{ ActorMaterializer, Materializer } import com.github.dnvriend.spark.CalculatePi import org.apache.spark.sql.SparkSession import scala.concurrent.{ ExecutionContext, Future } object HelloWorld extends App { implicit val system: ActorSystem = ActorSystem() implicit val mat: Materializer = ActorMaterializer() implicit val ec: ExecutionContext = system.dispatcher implicit val log: LoggingAdapter = Logging(system, this.getClass) val n = 10000000 // The first thing a Spark program must do is to create a SparkSession object, // which tells Spark how to access a cluster, or to run in local mode val spark = SparkSession.builder() .config("spark.sql.warehouse.dir", "file:/tmp/spark-warehouse") .config("spark.scheduler.mode", "FAIR") .config("spark.sql.crossJoin.enabled", "true") .master("local[*]") // use as many threads as cores .appName("Hello World") // The appName parameter is a name for your application to show on the cluster UI. .getOrCreate() for { count <- Future(CalculatePi(spark.sparkContext, n)) _ <- system.terminate() } yield { val pi = 2.0 * count / (n - 1) println(s"Hello World, Pi = $pi") spark.stop() } sys.addShutdownHook { spark.stop() system.terminate() } }
Example 31
Source File: LogProgress.scala From apache-spark-test with Apache License 2.0 | 5 votes |
package com.github.dnvriend import akka.NotUsed import akka.event.LoggingAdapter import akka.stream.FlowShape import akka.stream.scaladsl.{ Broadcast, Flow, GraphDSL, Sink } import scala.compat.Platform import scala.collection.immutable._ object LogProgress { def flow[A](each: Long = 1000)(implicit log: LoggingAdapter = null): Flow[A, A, NotUsed] = Flow.fromGraph[A, A, NotUsed](GraphDSL.create() { implicit b => import GraphDSL.Implicits._ val logFlow = Flow[A].statefulMapConcat { () => var last = Platform.currentTime var num = 0L (x: A) => num += 1 if (num % each == 0) { val duration = Platform.currentTime - last val logOpt = Option(log) Option(log).foreach(_.info("[{} ms / {}]: {}", duration, each, num)) if (logOpt.isEmpty) println(s"[$duration ms / $each]: $num") last = Platform.currentTime } Iterable(x) } val bcast = b.add(Broadcast[A](2, eagerCancel = false)) bcast ~> logFlow ~> Sink.ignore FlowShape.of(bcast.in, bcast.out(1)) }) }
Example 32
Source File: SupervisorStrategies.scala From coral with Apache License 2.0 | 5 votes |
package io.coral.actors import akka.actor.OneForOneStrategy import akka.actor.SupervisorStrategy.Resume import akka.event.LoggingAdapter import io.coral.utils.Utils object SupervisorStrategies { def logAndContinue(log: LoggingAdapter) = OneForOneStrategy() { case e: Exception => { log.error(s"Caught exception: ${e.getMessage}") log.error(Utils.stackTraceToString(e)) log.info("Continue running due to supervisor strategy 'Resume'.") Resume } } }
Example 33
Source File: AkkaHttpServerOptions.scala From tapir with Apache License 2.0 | 5 votes |
package sttp.tapir.server.akkahttp import java.io.File import akka.event.LoggingAdapter import akka.http.scaladsl.server.RequestContext import sttp.tapir.Defaults import sttp.tapir.server.{DecodeFailureHandler, LogRequestHandling, ServerDefaults} import scala.concurrent.Future case class AkkaHttpServerOptions( createFile: RequestContext => Future[File], decodeFailureHandler: DecodeFailureHandler, logRequestHandling: LogRequestHandling[LoggingAdapter => Unit] ) object AkkaHttpServerOptions { implicit lazy val default: AkkaHttpServerOptions = AkkaHttpServerOptions( defaultCreateFile, ServerDefaults.decodeFailureHandler, defaultLogRequestHandling ) lazy val defaultCreateFile: RequestContext => Future[File] = { _ => import scala.concurrent.ExecutionContext.Implicits.global Future(Defaults.createTempFile()) } lazy val defaultLogRequestHandling: LogRequestHandling[LoggingAdapter => Unit] = LogRequestHandling( doLogWhenHandled = debugLog, doLogAllDecodeFailures = debugLog, doLogLogicExceptions = (msg: String, ex: Throwable) => log => log.error(ex, msg), noLog = _ => () ) private def debugLog(msg: String, exOpt: Option[Throwable]): LoggingAdapter => Unit = exOpt match { case None => log => log.debug(msg) case Some(ex) => log => log.debug(s"$msg; exception: {}", ex) } }
Example 34
Source File: Web.scala From full-scala-stack with Apache License 2.0 | 5 votes |
package web import akka.event.{ Logging, LoggingAdapter } import akka.http.scaladsl.Http import akka.stream.scaladsl._ import api.{ Api, Config } import core.{ Core, CoreActors } import scala.concurrent.{ ExecutionContext, Future } import scala.util.control.NonFatal // $COVERAGE-OFF$ This is actual code that we can't test, so we shouldn't report on it trait Web extends Config { this: Api with CoreActors with Core => val log: LoggingAdapter = Logging.getLogger(actorSystem, this) val serverSource: Source[Http.IncomingConnection, Future[Http.ServerBinding]] = { implicit def executionContext: ExecutionContext = actorSystem.dispatcher val host = config.getString("full-scala-stack.host") val port = config.getInt("full-scala-stack.port") Http() .bind(interface = host, port = port) .mapMaterializedValue { bind => bind.foreach { server => log.info(server.localAddress.toString) } bind } } val bindingFuture: Future[Http.ServerBinding] = serverSource .to(Sink.foreach { connection => // foreach materializes the source log.debug("Accepted new connection from " + connection.remoteAddress) // ... and then actually handle the connection try { connection.flow.joinMat(routes)(Keep.both).run() () } catch { case NonFatal(e) => log.error(e, "Could not materialize handling flow for {}", connection) throw e } }) .run() } // $COVERAGE-ON$
Example 35
Source File: MesosCommandBuilderTests.scala From mesos-actor with Apache License 2.0 | 5 votes |
package com.adobe.api.platform.runtime.mesos.mesos import java.net.URI import akka.actor.ActorSystem import akka.event.LoggingAdapter import com.adobe.api.platform.runtime.mesos.{CommandDef, CommandURIDef, DefaultCommandBuilder} import org.junit.runner.RunWith import org.scalatest.junit.JUnitRunner import org.scalatest.{FlatSpec, Matchers} @RunWith(classOf[JUnitRunner]) class MesosCommandBuilderTests extends FlatSpec with Matchers { behavior of "Mesos Default TaskBuilder" implicit val actorSystem: ActorSystem = ActorSystem("test-system") implicit val logger: LoggingAdapter = actorSystem.log it should "set URIs on a Command Proto from CommandDef" in { val uris = 0.to(3).map((i: Int) => { new CommandURIDef(new URI(f"http://$i.com")) }) val command = CommandDef(uris = uris) val mesosCommandInfo = new DefaultCommandBuilder()(command) mesosCommandInfo.getUris(0).getValue shouldBe "http://0.com" mesosCommandInfo.getUris(1).getValue shouldBe "http://1.com" mesosCommandInfo.getUris(2).getValue shouldBe "http://2.com" mesosCommandInfo.getUris(3).getValue shouldBe "http://3.com" } it should "retain Options set on CommandDef when creating Command Proto" in { val uris = 0 .to(0) .map((i: Int) => { new CommandURIDef(new URI(f"http://$i.com"), extract = false, cache = true, executable = true) }) val command = CommandDef(uris = uris) val mesosCommandInfo = new DefaultCommandBuilder()(command) mesosCommandInfo.getUris(0).getExecutable shouldBe true mesosCommandInfo.getUris(0).getCache shouldBe true mesosCommandInfo.getUris(0).getExtract shouldBe false } it should "retain enviormment variables" in { val environment = Map("VAR1" -> "VAL1", "VAR2" -> "VAL2") val command = CommandDef(environment = environment) val mesosCommandInfo = new DefaultCommandBuilder()(command) mesosCommandInfo.getEnvironment.getVariables(0).getName shouldBe "VAR1" mesosCommandInfo.getEnvironment.getVariables(0).getValue shouldBe "VAL1" mesosCommandInfo.getEnvironment.getVariables(1).getName shouldBe "VAR2" mesosCommandInfo.getEnvironment.getVariables(1).getValue shouldBe "VAL2" } }
Example 36
Source File: LogFromAkka.scala From kafka-journal with MIT License | 5 votes |
package com.evolutiongaming.kafka.journal import akka.event.LoggingAdapter import cats.effect.Sync import com.evolutiongaming.catshelper.Log object LogFromAkka { def apply[F[_] : Sync](log: LoggingAdapter): Log[F] = new Log[F] { def debug(msg: => String) = { Sync[F].delay { if (log.isDebugEnabled) log.debug(msg) } } def info(msg: => String) = { Sync[F].delay { if (log.isInfoEnabled) log.info(msg) } } def warn(msg: => String) = { Sync[F].delay { if (log.isWarningEnabled) log.warning(msg) } } def warn(msg: => String, cause: Throwable) = { Sync[F].delay { if (log.isWarningEnabled) log.warning(s"$msg: $cause") } } def error(msg: => String) = { Sync[F].delay { if (log.isErrorEnabled) log.error(msg) } } def error(msg: => String, cause: Throwable) = { Sync[F].delay { if (log.isErrorEnabled) log.error(cause, msg) } } } }
Example 37
Source File: DbService.scala From sns with Apache License 2.0 | 5 votes |
package me.snov.sns.service import java.nio.charset.StandardCharsets import java.nio.file.{Files, Paths, StandardOpenOption} import akka.event.LoggingAdapter import me.snov.sns.model.{Configuration, Subscription, Topic} import spray.json._ trait DbService { def load(): Option[Configuration] def save(configuration: Configuration) } class MemoryDbService extends DbService { override def load(): Option[Configuration] = { Some(Configuration(subscriptions= List[Subscription](), topics= List[Topic]())) } override def save(configuration: Configuration): Unit = {} } class FileDbService(dbFilePath: String)(implicit log: LoggingAdapter) extends DbService { val subscriptionsName = "subscriptions" val topicsName = "topics" val path = Paths.get(dbFilePath) def load(): Option[Configuration] = { if (Files.exists(path)) { log.debug("Loading DB") try { val configuration = read().parseJson.convertTo[Configuration] log.info("Loaded DB") return Some(configuration) } catch { case e: DeserializationException => log.error(e, "Unable to parse configuration") case e: RuntimeException => log.error(e,"Unable to load configuration") } } None } def save(configuration: Configuration) = { log.debug("Saving DB") write(configuration.toJson.prettyPrint) } private def write(contents: String) = { Files.write(path, contents.getBytes(StandardCharsets.UTF_8), StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING) } private def read(): String = { new String(Files.readAllBytes(path)) } }
Example 38
Source File: Main.scala From sns with Apache License 2.0 | 5 votes |
package me.snov.sns import akka.actor.ActorSystem import akka.event.{Logging, LoggingAdapter} import akka.http.scaladsl.Http import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server._ import akka.stream.ActorMaterializer import akka.util.Timeout import com.typesafe.config.ConfigFactory import me.snov.sns.actor._ import me.snov.sns.api._ import me.snov.sns.service.FileDbService import me.snov.sns.util.ToStrict import scala.concurrent.ExecutionContext import scala.concurrent.duration._ import scala.util.Properties object Main extends App with ToStrict { implicit val system = ActorSystem("sns") implicit val executor: ExecutionContext = system.dispatcher implicit val materializer: ActorMaterializer = ActorMaterializer() implicit val logger: LoggingAdapter = Logging(system, getClass) implicit val timeout = new Timeout(1.second) val config = ConfigFactory.load() val dbService = new FileDbService(Properties.envOrElse("DB_PATH", config.getString("db.path"))) val dbActor = system.actorOf(DbActor.props(dbService), name = "DbActor") val homeActor = system.actorOf(HomeActor.props, name = "HomeActor") val subscribeActor = system.actorOf(SubscribeActor.props(dbActor), name = "SubscribeActor") val publishActor = system.actorOf(PublishActor.props(subscribeActor), name = "PublishActor") val routes: Route = toStrict { TopicApi.route(subscribeActor) ~ SubscribeApi.route(subscribeActor) ~ PublishApi.route(publishActor) ~ HealthCheckApi.route ~ HomeApi.route(homeActor) } logger.info("SNS v{} is starting", getClass.getPackage.getImplementationVersion) Http().bindAndHandle( handler = logRequestResult("akka-http-sns")(routes), interface = Properties.envOrElse("HTTP_INTERFACE", config.getString("http.interface")), port = Properties.envOrElse("HTTP_PORT", config.getString("http.port")).toInt ) }
Example 39
Source File: JsonStreamingRoute.scala From akka-http-test with Apache License 2.0 | 5 votes |
package com.github.dnvriend.component.simpleserver.route import akka.event.LoggingAdapter import akka.http.scaladsl.common.{ EntityStreamingSupport, JsonEntityStreamingSupport } import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport import akka.http.scaladsl.server.{ Directives, Route } import akka.stream.Materializer import akka.stream.scaladsl.Flow import akka.util.ByteString import com.github.dnvriend.component.repository.PersonRepository import com.github.dnvriend.component.simpleserver.dto.http.Person import com.github.dnvriend.component.simpleserver.marshaller.Marshallers import scala.concurrent.ExecutionContext object JsonStreamingRoute extends Directives with SprayJsonSupport with Marshallers { val start = ByteString.empty val sep = ByteString("\n") val end = ByteString.empty implicit val jsonStreamingSupport: JsonEntityStreamingSupport = EntityStreamingSupport.json() .withFramingRenderer(Flow[ByteString].intersperse(start, sep, end)) .withParallelMarshalling(parallelism = 8, unordered = true) def route(dao: PersonRepository)(implicit mat: Materializer, ec: ExecutionContext): Route = path("stream" / IntNumber) { numberOfPersons => (get & pathEnd) { complete(dao.people(numberOfPersons)) } } ~ (post & path("stream") & entity(asSourceOf[Person])) { people => val total = people.log("people").runFold(0) { case (c, _) => c + 1 } complete(total.map(n => s"Received $n number of person")) } }
Example 40
Source File: SimpleServer.scala From akka-http-test with Apache License 2.0 | 5 votes |
package com.github.dnvriend.component.simpleserver import javax.inject.Inject import akka.actor.ActorSystem import akka.event.{ Logging, LoggingAdapter } import akka.http.scaladsl._ import akka.pattern.CircuitBreaker import akka.stream.{ ActorMaterializer, Materializer } import com.github.dnvriend.component.repository.PersonRepository import com.github.dnvriend.component.simpleserver.route._ import com.google.inject.Singleton import play.api.Configuration import scala.concurrent.ExecutionContext import scala.concurrent.duration._ @Singleton class SimpleServer @Inject() (personDao: PersonRepository, cb: CircuitBreaker, interface: String, port: Int)(implicit system: ActorSystem, mat: Materializer, ec: ExecutionContext) { Http().bindAndHandle(SimpleServerRestRoutes.routes(personDao, cb), interface, port) } object SimpleServerLauncher extends App { implicit val system: ActorSystem = ActorSystem() implicit val mat: Materializer = ActorMaterializer() implicit val ec: ExecutionContext = system.dispatcher implicit val log: LoggingAdapter = Logging(system, this.getClass) val maxFailures: Int = 3 val callTimeout: FiniteDuration = 1.seconds val resetTimeout: FiniteDuration = 10.seconds val cb = new CircuitBreaker(system.scheduler, maxFailures, callTimeout, resetTimeout) val config: play.api.Configuration = Configuration(system.settings.config) sys.addShutdownHook { system.terminate() } new SimpleServer(new PersonRepository, cb, config.getString("http.interface").getOrElse("0.0.0.0"), config.getInt("http.port").getOrElse(8080)) }
Example 41
Source File: WeatherClient.scala From akka-http-test with Apache License 2.0 | 5 votes |
package com.github.dnvriend.component.webservices.weather import akka.NotUsed import akka.actor.ActorSystem import akka.event.LoggingAdapter import akka.http.scaladsl.model.{ HttpRequest, HttpResponse } import akka.stream.Materializer import akka.stream.scaladsl.Flow import com.github.dnvriend.component.webservices.generic.HttpClient import spray.json.DefaultJsonProtocol import scala.concurrent.{ ExecutionContext, Future } import scala.util.Try case class Wind(speed: Double, deg: Double) case class Main(temp: Double, temp_min: Double, temp_max: Double, pressure: Double, sea_level: Option[Double], grnd_level: Option[Double], humidity: Int) case class Cloud(all: Int) case class Weather(id: Int, main: String, description: String, icon: String) case class Sys(message: Double, country: String, sunrise: Long, sunset: Long) case class Coord(lon: Double, lat: Double) case class WeatherResult(coord: Coord, sys: Sys, weather: List[Weather], base: String, main: Main, wind: Wind, clouds: Cloud, dt: Long, id: Int, name: String, cod: Int) trait Marshallers extends DefaultJsonProtocol { implicit val windJsonFormat = jsonFormat2(Wind) implicit val mainJsonFormat = jsonFormat7(Main) implicit val cloudJsonFormat = jsonFormat1(Cloud) implicit val weatherJsonFormat = jsonFormat4(Weather) implicit val sysJsonFormat = jsonFormat4(Sys) implicit val coordJsonFormat = jsonFormat2(Coord) implicit val weatherResultJsonFormat = jsonFormat11(WeatherResult) } case class GetWeatherRequest(zip: String, country: String) trait OpenWeatherApi { def getWeather(zip: String, country: String): Future[Option[WeatherResult]] def getWeather[T](implicit system: ActorSystem, mat: Materializer, ec: ExecutionContext): Flow[(GetWeatherRequest, T), (Option[WeatherResult], T), NotUsed] } object OpenWeatherApi { import spray.json._ def apply()(implicit system: ActorSystem, mat: Materializer, ec: ExecutionContext, log: LoggingAdapter) = new OpenWeatherApiImpl def mapResponseToWeatherResult(json: String)(implicit reader: JsonReader[WeatherResult]): Option[WeatherResult] = Try(json.parseJson.convertTo[WeatherResult]).toOption def responseToString(resp: HttpResponse)(implicit system: ActorSystem, mat: Materializer, ec: ExecutionContext): Future[String] = HttpClient.responseToString(resp) def getWeatherRequestFlow[T]: Flow[(GetWeatherRequest, T), (HttpRequest, T), NotUsed] = Flow[(GetWeatherRequest, T)].map { case (request, id) => (HttpClient.mkGetRequest(s"/data/2.5/weather?zip=${request.zip},${request.country}"), id) } def mapResponseToWeatherResultFlow[T](implicit system: ActorSystem, mat: Materializer, ec: ExecutionContext, reader: JsonReader[WeatherResult]): Flow[(Try[HttpResponse], T), (Option[WeatherResult], T), NotUsed] = HttpClient.responseToString[T].map { case (json, id) => (mapResponseToWeatherResult(json), id) } } class OpenWeatherApiImpl()(implicit val system: ActorSystem, val ec: ExecutionContext, val mat: Materializer, val log: LoggingAdapter) extends OpenWeatherApi with Marshallers { import OpenWeatherApi._ private val client = HttpClient("weather") override def getWeather(zip: String, country: String): Future[Option[WeatherResult]] = client.get(s"/data/2.5/weather?zip=$zip,$country"). flatMap(responseToString) .map(mapResponseToWeatherResult) override def getWeather[T](implicit system: ActorSystem, mat: Materializer, ec: ExecutionContext): Flow[(GetWeatherRequest, T), (Option[WeatherResult], T), NotUsed] = getWeatherRequestFlow[T] .via(client.cachedHostConnectionFlow[T]) .via(mapResponseToWeatherResultFlow[T]) }
Example 42
Source File: PostcodeClient.scala From akka-http-test with Apache License 2.0 | 5 votes |
package com.github.dnvriend.component.webservices.postcode import akka.NotUsed import akka.actor.ActorSystem import akka.event.LoggingAdapter import akka.http.scaladsl.model.{ HttpRequest, HttpResponse } import akka.stream.Materializer import akka.stream.scaladsl.Flow import com.github.dnvriend.component.webservices.generic.HttpClient import spray.json.DefaultJsonProtocol import scala.concurrent.{ ExecutionContext, Future } import scala.util.Try import scala.util.matching.Regex case class Address( street: String, houseNumber: Int, houseNumberAddition: String, postcode: String, city: String, municipality: String, province: String, rdX: Option[Int], rdY: Option[Int], latitude: Double, longitude: Double, bagNumberDesignationId: String, bagAddressableObjectId: String, addressType: String, purposes: Option[List[String]], surfaceArea: Int, houseNumberAdditions: List[String] ) trait Marshallers extends DefaultJsonProtocol { implicit val addressJsonFormat = jsonFormat17(Address) } case class GetAddressRequest(zip: String, houseNumber: String) trait PostcodeClient { def address(postcode: String, houseNumber: Int): Future[Option[Address]] def address[T](implicit system: ActorSystem, mat: Materializer, ec: ExecutionContext): Flow[(GetAddressRequest, T), (Option[Address], T), NotUsed] } object PostcodeClient { import spray.json._ val ZipcodeWithoutSpacePattern: Regex = """([1-9][0-9]{3})([A-Za-z]{2})""".r val ZipcodeWithSpacePattern: Regex = """([1-9][0-9]{3})[\s]([A-Za-z]{2})""".r def mapToAddress(json: String)(implicit reader: JsonReader[Address]): Option[Address] = Try(json.parseJson.convertTo[Address]).toOption def responseToString(resp: HttpResponse)(implicit system: ActorSystem, mat: Materializer, ec: ExecutionContext): Future[String] = HttpClient.responseToString(resp) def getAddressRequestFlow[T]: Flow[(GetAddressRequest, T), (HttpRequest, T), NotUsed] = Flow[(GetAddressRequest, T)].map { case (request, id) => (HttpClient.mkGetRequest(s"/rest/addresses/${request.zip}/${request.houseNumber}/"), id) } def mapResponseToAddressFlow[T](implicit system: ActorSystem, mat: Materializer, ec: ExecutionContext, reader: JsonReader[Address]): Flow[(Try[HttpResponse], T), (Option[Address], T), NotUsed] = HttpClient.responseToString[T].map { case (json, id) => (mapToAddress(json), id) } def normalizeZipcode(zipcode: String): Option[String] = zipcode.toUpperCase match { case ZipcodeWithoutSpacePattern(numbers, letters) => Option(s"$numbers$letters") case ZipcodeWithSpacePattern(numbers, letters) => Option(s"$numbers$letters") case _ => None } def apply()(implicit system: ActorSystem, mat: Materializer, ec: ExecutionContext, log: LoggingAdapter) = new PostcodeClientImpl } class PostcodeClientImpl()(implicit val system: ActorSystem, val mat: Materializer, val ec: ExecutionContext, val log: LoggingAdapter) extends PostcodeClient with Marshallers { import PostcodeClient._ private val client = HttpClient("postcode") override def address(postcode: String, houseNumber: Int): Future[Option[Address]] = normalizeZipcode(postcode) match { case Some(zip) => client.get(s"/rest/addresses/$zip/$houseNumber/") .flatMap(responseToString).map(mapToAddress) case None => Future.successful(None) } override def address[T](implicit system: ActorSystem, mat: Materializer, ec: ExecutionContext): Flow[(GetAddressRequest, T), (Option[Address], T), NotUsed] = getAddressRequestFlow[T] .via(client.cachedHostConnectionFlow[T]) .via(mapResponseToAddressFlow[T]) }
Example 43
Source File: LowLevelServer.scala From akka-http-test with Apache License 2.0 | 5 votes |
package com.github.dnvriend.component.lowlevelserver import akka.NotUsed import akka.actor.{ ActorSystem, Props } import akka.event.{ Logging, LoggingAdapter } import akka.http.scaladsl.Http import akka.http.scaladsl.model._ import akka.pattern.ask import akka.stream.scaladsl.{ Flow, Sink, Source } import akka.stream.{ ActorMaterializer, Materializer } import akka.util.Timeout import com.github.dnvriend.component.lowlevelserver.dto.{ Person, PersonWithId } import com.github.dnvriend.component.lowlevelserver.marshaller.Marshaller import com.github.dnvriend.component.lowlevelserver.repository.PersonRepository import spray.json.{ DefaultJsonProtocol, _ } import scala.concurrent.duration._ import scala.concurrent.{ ExecutionContext, Future } class LowLevelServer(implicit val system: ActorSystem, mat: Materializer, ec: ExecutionContext, log: LoggingAdapter, timeout: Timeout) extends DefaultJsonProtocol with Marshaller { val personDb = system.actorOf(Props[PersonRepository]) def debug(t: Any)(implicit log: LoggingAdapter = null): Unit = if (Option(log).isEmpty) println(t) else log.debug(t.toString) def http200Okay(req: HttpRequest): HttpResponse = HttpResponse(StatusCodes.OK) def http200AsyncOkay(req: HttpRequest): Future[HttpResponse] = Future(http200Okay(req)) val http200OkayFlow: Flow[HttpRequest, HttpResponse, NotUsed] = Flow[HttpRequest].map { req => HttpResponse(StatusCodes.OK) } val serverSource: Source[Http.IncomingConnection, Future[Http.ServerBinding]] = Http().bind(interface = "localhost", port = 8080) val binding: Future[Http.ServerBinding] = serverSource.to(Sink.foreach { conn => // conn.handleWith(http200OkayFlow) // conn.handleWithSyncHandler(http200Okay) // conn.handleWithAsyncHandler(http200AsyncOkay, 8) conn.handleWithAsyncHandler(personRequestHandler) }).run() def personRequestHandler(req: HttpRequest): Future[HttpResponse] = req match { case HttpRequest(HttpMethods.GET, Uri.Path("/api/person"), _, _, _) => for { xs <- (personDb ? "findAll").mapTo[List[PersonWithId]] entity = HttpEntity(ContentTypes.`application/json`, xs.toJson.compactPrint) } yield HttpResponse(StatusCodes.OK, entity = entity) case HttpRequest(HttpMethods.POST, Uri.Path("/api/person"), _, ent, _) => for { strictEntity <- ent.toStrict(1.second) person <- (personDb ? strictEntity.data.utf8String.parseJson.convertTo[Person]).mapTo[PersonWithId] } yield HttpResponse(StatusCodes.OK, entity = person.toJson.compactPrint) case req => req.discardEntityBytes() Future.successful(HttpResponse(StatusCodes.NotFound)) } } object LowLevelServerLauncher extends App with DefaultJsonProtocol { // setting up some machinery implicit val system: ActorSystem = ActorSystem() implicit val mat: Materializer = ActorMaterializer() implicit val ec: ExecutionContext = system.dispatcher implicit val log: LoggingAdapter = Logging(system, this.getClass) implicit val timeout: Timeout = Timeout(10.seconds) new LowLevelServer() }
Example 44
Source File: ActorSystemSpec.scala From lagom with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.persistence import java.lang.reflect.Modifier import akka.actor.ActorSystem import akka.actor.CoordinatedShutdown import akka.actor.setup.ActorSystemSetup import akka.event.Logging import akka.event.LoggingAdapter import akka.testkit.ImplicitSender import akka.testkit.TestKit import com.typesafe.config.Config import com.typesafe.config.ConfigFactory import org.scalactic.CanEqual import org.scalactic.TypeCheckedTripleEquals import org.scalatest.BeforeAndAfterAll import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AnyWordSpecLike object ActorSystemSpec { // taken from akka-testkit's AkkaSpec private def testNameFromCallStack(classToStartFrom: Class[_]): String = { def isAbstractClass(className: String): Boolean = { try { Modifier.isAbstract(Class.forName(className).getModifiers) } catch { case _: Throwable => false // yes catch everything, best effort check } } val startFrom = classToStartFrom.getName val filteredStack = Thread.currentThread.getStackTrace.iterator .map(_.getClassName) // drop until we find the first occurrence of classToStartFrom .dropWhile(!_.startsWith(startFrom)) // then continue to the next entry after classToStartFrom that makes sense .dropWhile { case `startFrom` => true case str if str.startsWith(startFrom + "$") => true // lambdas inside startFrom etc case str if isAbstractClass(str) => true case _ => false } if (filteredStack.isEmpty) throw new IllegalArgumentException(s"Couldn't find [${classToStartFrom.getName}] in call stack") // sanitize for actor system name scrubActorSystemName(filteredStack.next()) } // taken from akka-testkit's AkkaSpec private def scrubActorSystemName(name: String): String = { name .replaceFirst("""^.*\.""", "") // drop package name .replaceAll("""\$\$?\w+""", "") // drop scala anonymous functions/classes .replaceAll("[^a-zA-Z_0-9]", "_") } } abstract class ActorSystemSpec(actorSystemFactory: () => ActorSystem) extends TestKit(actorSystemFactory()) with AnyWordSpecLike with Matchers with BeforeAndAfterAll with TypeCheckedTripleEquals with ImplicitSender { def this(testName: String, config: Config) = this(() => ActorSystem(testName, config)) def this(config: Config) = this(ActorSystemSpec.testNameFromCallStack(classOf[ActorSystemSpec]), config) def this(setup: ActorSystemSetup) = this(() => ActorSystem(ActorSystemSpec.testNameFromCallStack(classOf[ActorSystemSpec]), setup)) def this() = this(ConfigFactory.empty()) override def afterAll(): Unit = { shutdown() super.afterAll() } val log: LoggingAdapter = Logging(system, this.getClass) val coordinatedShutdown: CoordinatedShutdown = CoordinatedShutdown(system) // for ScalaTest === compare of Class objects implicit def classEqualityConstraint[A, B]: CanEqual[Class[A], Class[B]] = new CanEqual[Class[A], Class[B]] { def areEqual(a: Class[A], b: Class[B]) = a == b } }
Example 45
Source File: MesosTaskBuilderTests.scala From mesos-actor with Apache License 2.0 | 5 votes |
package com.adobe.api.platform.runtime.mesos.mesos import akka.actor.ActorSystem import akka.event.LoggingAdapter import com.adobe.api.platform.runtime.mesos._ import org.apache.mesos.v1.Protos.ContainerInfo.DockerInfo.Network import org.apache.mesos.v1.Protos.ContainerInfo.DockerInfo.PortMapping import org.apache.mesos.v1.Protos.Resource import org.apache.mesos.v1.Protos.Value import org.junit.runner.RunWith import org.scalatest.FlatSpec import org.scalatest.Matchers import org.scalatest.junit.JUnitRunner @RunWith(classOf[JUnitRunner]) class MesosTaskBuilderTests extends FlatSpec with Matchers { behavior of "Mesos Default TaskBuilder" implicit val actorSystem: ActorSystem = ActorSystem("test-system") implicit val logger: LoggingAdapter = actorSystem.log it should "set TaskInfo properties from TaskDef" in { val offers = ProtobufUtil.getOffers("/offer1.json") val resources = Seq( Resource .newBuilder() .setName("cpus") .setType(Value.Type.SCALAR) .setScalar(Value.Scalar.newBuilder().setValue(0.1)) .build()) val portMappings = Seq( PortMapping .newBuilder() .setHostPort(31000) .setContainerPort(112233) .build()) val parameters = Map( "dns" -> Set("1.2.3.4", "8.8.8.8"), "cap-drop" -> Set("NET_RAW", "NET_ADMIN"), "ulimit" -> Set("nofile=1024:1024")) val environment = Map("VAR1" -> "VAL1", "VAR2" -> "VAL2") val taskDef = TaskDef( "taskId", "taskName", "dockerImage:someTag", 0.1, 256, List(112233), healthCheckParams = Some(HealthCheckConfig(0, 1, 2, 5, gracePeriod = 30, maxConsecutiveFailures = 2)), true, User("usernet"), parameters, Some(CommandDef(environment = environment))) val taskInfo = new DefaultTaskBuilder()(taskDef, offers.getOffers(0), resources, portMappings) taskInfo.getTaskId.getValue shouldBe taskDef.taskId taskInfo.getName shouldBe taskDef.taskName taskInfo.getContainer.getDocker.getImage shouldBe taskDef.dockerImage taskInfo.getResources(0).getName shouldBe "cpus" taskInfo.getResources(0).getScalar.getValue shouldBe taskDef.cpus taskInfo.getContainer.getNetworkInfos(0).getName shouldBe "usernet" taskInfo.getContainer.getDocker.getNetwork shouldBe Network.USER taskInfo.getContainer.getDocker.getPortMappings(0).getContainerPort shouldBe taskDef.ports(0) taskInfo.getContainer.getDocker.getPortMappings(0).getHostPort shouldBe 31000 taskInfo.getContainer.getDocker.getParameters(0).getKey shouldBe "dns" taskInfo.getContainer.getDocker.getParameters(0).getValue shouldBe "1.2.3.4" taskInfo.getContainer.getDocker.getParameters(1).getKey shouldBe "dns" taskInfo.getContainer.getDocker.getParameters(1).getValue shouldBe "8.8.8.8" taskInfo.getContainer.getDocker.getParameters(2).getKey shouldBe "cap-drop" taskInfo.getContainer.getDocker.getParameters(2).getValue shouldBe "NET_RAW" taskInfo.getContainer.getDocker.getParameters(3).getKey shouldBe "cap-drop" taskInfo.getContainer.getDocker.getParameters(3).getValue shouldBe "NET_ADMIN" taskInfo.getContainer.getDocker.getParameters(4).getKey shouldBe "ulimit" taskInfo.getContainer.getDocker.getParameters(4).getValue shouldBe "nofile=1024:1024" taskInfo.getCommand.getEnvironment.getVariables(0).getName shouldBe "VAR1" taskInfo.getCommand.getEnvironment.getVariables(0).getValue shouldBe "VAL1" taskInfo.getCommand.getEnvironment.getVariables(1).getName shouldBe "VAR2" taskInfo.getCommand.getEnvironment.getVariables(1).getValue shouldBe "VAL2" taskInfo.getHealthCheck.getDelaySeconds shouldBe 1 taskInfo.getHealthCheck.getIntervalSeconds shouldBe 2 taskInfo.getHealthCheck.getTimeoutSeconds shouldBe 5 taskInfo.getHealthCheck.getGracePeriodSeconds shouldBe 30 taskInfo.getHealthCheck.getConsecutiveFailures shouldBe 2 taskInfo.getHealthCheck.getTcp.getPort shouldBe 112233 } }
Example 46
Source File: TaskBuilder.scala From mesos-actor with Apache License 2.0 | 5 votes |
package com.adobe.api.platform.runtime.mesos import akka.event.LoggingAdapter import org.apache.mesos.v1.Protos.ContainerInfo import org.apache.mesos.v1.Protos.ContainerInfo.DockerInfo import org.apache.mesos.v1.Protos.ContainerInfo.DockerInfo.PortMapping import org.apache.mesos.v1.Protos.HealthCheck import org.apache.mesos.v1.Protos.HealthCheck.TCPCheckInfo import org.apache.mesos.v1.Protos.NetworkInfo import org.apache.mesos.v1.Protos.Offer import org.apache.mesos.v1.Protos.Parameter import org.apache.mesos.v1.Protos.Resource import org.apache.mesos.v1.Protos.TaskID import org.apache.mesos.v1.Protos.TaskInfo import scala.collection.JavaConverters._ trait TaskBuilder { def commandBuilder: CommandBuilder def apply(reqs: TaskDef, offer: Offer, resources: Seq[Resource], portMappings: Seq[PortMapping])( implicit logger: LoggingAdapter): TaskInfo } class DefaultTaskBuilder extends TaskBuilder { val commandBuilder = new DefaultCommandBuilder() def apply(reqs: TaskDef, offer: Offer, resources: Seq[Resource], portMappings: Seq[PortMapping])( implicit logger: LoggingAdapter): TaskInfo = { val parameters = reqs.dockerRunParameters.flatMap { case (k, v) => v.map(pv => Parameter.newBuilder().setKey(k).setValue(pv).build()) }.asJava val dockerNetwork = reqs.network match { case _: User => DockerInfo.Network.USER case Host => DockerInfo.Network.HOST case Bridge => DockerInfo.Network.BRIDGE } //for case of user network, create a single NetworkInfo with the name val networkInfos = reqs.network match { case u: User => Seq[NetworkInfo]( NetworkInfo .newBuilder() .setName(u.name) .build()).asJava case _ => Seq[NetworkInfo]().asJava } val taskBuilder = TaskInfo.newBuilder .setName(reqs.taskName) .setTaskId(TaskID.newBuilder .setValue(reqs.taskId)) .setAgentId(offer.getAgentId) .setContainer( ContainerInfo.newBuilder .setType(ContainerInfo.Type.DOCKER) .addAllNetworkInfos(networkInfos) .setDocker( DockerInfo.newBuilder .setImage(reqs.dockerImage) .setNetwork(dockerNetwork) .addAllParameters(parameters) .addAllPortMappings(portMappings.asJava) .build()) .build()) .addAllResources(resources.asJava) reqs.commandDef.foreach(c => { taskBuilder.setCommand(commandBuilder(c)) }) reqs.healthCheckParams.foreach(hcp => { taskBuilder.setHealthCheck( HealthCheck .newBuilder() .setType(HealthCheck.Type.TCP) .setTcp(TCPCheckInfo .newBuilder() .setPort(reqs.ports(hcp.healthCheckPortIndex))) .setDelaySeconds(hcp.delay) .setIntervalSeconds(hcp.interval) .setTimeoutSeconds(hcp.timeout) .setGracePeriodSeconds(hcp.gracePeriod) .setConsecutiveFailures(hcp.maxConsecutiveFailures) .build()) }) taskBuilder.build() } }
Example 47
Source File: ProducerCommands.scala From reactive-kafka-microservice-template with Apache License 2.0 | 5 votes |
package com.omearac.http.routes import akka.actor.ActorRef import akka.event.LoggingAdapter import akka.http.scaladsl.model.StatusCodes import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server.Route import akka.pattern.ask import akka.util.Timeout import com.omearac.producers.DataProducer.PublishMessages import com.omearac.shared.EventMessages.MessagesPublished import scala.concurrent.duration._ trait ProducerCommands { def log: LoggingAdapter def dataProducer: ActorRef val producerHttpCommands: Route = pathPrefix("data_producer"){ implicit val timeout = Timeout(10 seconds) path("produce" / IntNumber) { {numOfMessagesToProduce => get { onSuccess(dataProducer ? PublishMessages(numOfMessagesToProduce)) { case MessagesPublished(numberOfMessages) => complete(StatusCodes.OK, numberOfMessages + " messages Produced as Ordered, Boss!") case _ => complete(StatusCodes.InternalServerError) } } } } } }
Example 48
Source File: ConsumerCommands.scala From reactive-kafka-microservice-template with Apache License 2.0 | 5 votes |
package com.omearac.http.routes import akka.actor.ActorRef import akka.event.LoggingAdapter import akka.http.scaladsl.model.StatusCodes import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server._ import akka.pattern.ask import akka.util.Timeout import com.omearac.consumers.DataConsumer.{ConsumerActorReply, ManuallyInitializeStream, ManuallyTerminateStream} import scala.concurrent.duration._ trait ConsumerCommands { def dataConsumer: ActorRef def eventConsumer: ActorRef def log: LoggingAdapter val dataConsumerHttpCommands: Route = pathPrefix("data_consumer") { implicit val timeout = Timeout(10 seconds) path("stop") { get { onSuccess(dataConsumer ? ManuallyTerminateStream) { case m: ConsumerActorReply => log.info(m.message); complete(StatusCodes.OK, m.message); case _ => complete(StatusCodes.InternalServerError) } } } ~ path("start") { get { onSuccess(dataConsumer ? ManuallyInitializeStream) { case m: ConsumerActorReply => log.info(m.message); complete(StatusCodes.OK, m.message) case _ => complete(StatusCodes.InternalServerError) } } } } val eventConsumerHttpCommands: Route = pathPrefix("event_consumer") { implicit val timeout = Timeout(10 seconds) path("stop") { get { onSuccess(eventConsumer ? ManuallyTerminateStream) { case m: ConsumerActorReply => log.info(m.message); complete(StatusCodes.OK, m.message); case _ => complete(StatusCodes.InternalServerError) } } } ~ path("start") { get { onSuccess(eventConsumer ? ManuallyInitializeStream) { case m: ConsumerActorReply => log.info(m.message); complete(StatusCodes.OK, m.message) case _ => complete(StatusCodes.InternalServerError) } } } } }
Example 49
Source File: StageLogging.scala From akka-stream-sqs with Apache License 2.0 | 5 votes |
package me.snov.akka.sqs.shape import akka.event.{LoggingAdapter, NoLogging} import akka.stream.ActorMaterializer import akka.stream.stage.GraphStageLogic private[sqs] trait StageLogging { self: GraphStageLogic => private var loggingAdapter: LoggingAdapter = _ def log: LoggingAdapter = { if (loggingAdapter eq null) { materializer match { case actorMaterializer: ActorMaterializer => loggingAdapter = akka.event.Logging(actorMaterializer.system, self.getClass) case _ => loggingAdapter = NoLogging } } loggingAdapter } }
Example 50
Source File: TestSpec.scala From intro-to-akka-streams with Apache License 2.0 | 5 votes |
package com.github.dnvriend.streams import akka.NotUsed import akka.actor.{ ActorRef, ActorSystem, PoisonPill } import akka.event.{ Logging, LoggingAdapter } import akka.stream.Materializer import akka.stream.scaladsl.Source import akka.stream.testkit.TestSubscriber import akka.stream.testkit.scaladsl.TestSink import akka.testkit.TestProbe import akka.util.Timeout import com.github.dnvriend.streams.util.ClasspathResources import org.scalatest._ import org.scalatest.concurrent.{ Eventually, ScalaFutures } import org.scalatestplus.play.guice.GuiceOneServerPerSuite import play.api.inject.BindingKey import play.api.libs.json.{ Format, Json } import play.api.test.WsTestClient import scala.collection.immutable._ import scala.concurrent.duration._ import scala.concurrent.{ ExecutionContext, Future } import scala.reflect.ClassTag import scala.util.Try object Person { implicit val format: Format[Person] = Json.format[Person] } final case class Person(firstName: String, age: Int) class TestSpec extends FlatSpec with Matchers with GivenWhenThen with OptionValues with TryValues with ScalaFutures with WsTestClient with BeforeAndAfterAll with BeforeAndAfterEach with Eventually with ClasspathResources with GuiceOneServerPerSuite { def getComponent[A: ClassTag] = app.injector.instanceOf[A] def getNamedComponent[A](name: String)(implicit ct: ClassTag[A]): A = app.injector.instanceOf[A](BindingKey(ct.runtimeClass.asInstanceOf[Class[A]]).qualifiedWith(name)) // set the port number of the HTTP server override lazy val port: Int = 8081 implicit val timeout: Timeout = 1.second implicit val pc: PatienceConfig = PatienceConfig(timeout = 30.seconds, interval = 300.millis) implicit val system: ActorSystem = getComponent[ActorSystem] implicit val ec: ExecutionContext = getComponent[ExecutionContext] implicit val mat: Materializer = getComponent[Materializer] val log: LoggingAdapter = Logging(system, this.getClass) // ================================== Supporting Operations ==================================== def id: String = java.util.UUID.randomUUID().toString implicit class FutureToTry[T](f: Future[T]) { def toTry: Try[T] = Try(f.futureValue) } implicit class SourceOps[A](src: Source[A, NotUsed]) { def testProbe(f: TestSubscriber.Probe[A] ⇒ Unit): Unit = f(src.runWith(TestSink.probe(system))) } def withIterator[T](start: Int = 0)(f: Source[Int, NotUsed] ⇒ T): T = f(Source.fromIterator(() ⇒ Iterator from start)) def fromCollection[A](xs: Iterable[A])(f: TestSubscriber.Probe[A] ⇒ Unit): Unit = f(Source(xs).runWith(TestSink.probe(system))) def killActors(refs: ActorRef*): Unit = { val tp = TestProbe() refs.foreach { ref ⇒ tp watch ref tp.send(ref, PoisonPill) tp.expectTerminated(ref) } } }
Example 51
Source File: ClientUtils.scala From akka-persistence-dynamodb with Apache License 2.0 | 5 votes |
package com.github.j5ik2o.akka.persistence.dynamodb.utils import akka.actor.DynamicAccess import akka.event.LoggingAdapter import com.amazonaws.services.dynamodbv2.{ AmazonDynamoDB, AmazonDynamoDBAsync } import com.github.j5ik2o.akka.persistence.dynamodb.config.PluginConfig import com.github.j5ik2o.akka.persistence.dynamodb.config.client.DynamoDBClientConfig import com.github.j5ik2o.reactive.aws.dynamodb.{ DynamoDbAsyncClient, DynamoDbSyncClient } import software.amazon.awssdk.services.dynamodb.{ DynamoDbAsyncClient => JavaDynamoDbAsyncClient, DynamoDbClient => JavaDynamoDbSyncClient } object ClientUtils { def createV2SyncClient( dynamicAccess: DynamicAccess, pluginConfig: PluginConfig )(f: JavaDynamoDbSyncClient => Unit): DynamoDbSyncClient = { val javaSyncClientV2 = V2DynamoDbClientBuilderUtils .setupSync( dynamicAccess, pluginConfig ).build() f(javaSyncClientV2) DynamoDbSyncClient(javaSyncClientV2) } def createV2AsyncClient( dynamicAccess: DynamicAccess, pluginConfig: PluginConfig )(f: JavaDynamoDbAsyncClient => Unit): DynamoDbAsyncClient = { val javaAsyncClientV2 = V2DynamoDbClientBuilderUtils .setupAsync( dynamicAccess, pluginConfig ).build() f(javaAsyncClientV2) DynamoDbAsyncClient(javaAsyncClientV2) } def createV1AsyncClient( dynamicAccess: DynamicAccess, pluginConfig: PluginConfig ): AmazonDynamoDBAsync = { V1DynamoDBClientBuilderUtils.setupAsync(dynamicAccess, pluginConfig).build() } def createV1SyncClient( dynamicAccess: DynamicAccess, configRootPath: String, pluginConfig: PluginConfig )( implicit log: LoggingAdapter ): AmazonDynamoDB = { if (pluginConfig.clientConfig.v1ClientConfig.dispatcherName.isEmpty) log.warning( s"Please set a dispatcher name defined by you to `${configRootPath}.dynamo-db-client.v1.dispatcher-name` if you are using the AWS-SDK API for blocking I/O" ) V1DynamoDBClientBuilderUtils.setupSync(dynamicAccess, pluginConfig).build() } def createV1DaxSyncClient( configRootPath: String, dynamoDBClientConfig: DynamoDBClientConfig )(implicit log: LoggingAdapter): AmazonDynamoDB = { if (dynamoDBClientConfig.v1DaxClientConfig.dispatcherName.isEmpty) log.warning( s"Please set a dispatcher name defined by you to `${configRootPath}.dynamo-db-client.v1-dax.dispatcher-name` if you are using the AWS-SDK API for blocking I/O" ) V1DaxClientBuilderUtils.setupSync(dynamoDBClientConfig).build() } def createV1DaxAsyncClient(dynamoDBClientConfig: DynamoDBClientConfig): AmazonDynamoDBAsync = { V1DaxClientBuilderUtils.setupAsync(dynamoDBClientConfig).build() } }
Example 52
Source File: Endpoints.scala From akka-http-microservice-templates with MIT License | 5 votes |
package endpoints import java.lang.System.currentTimeMillis import akka.actor.ActorSystem import akka.event.{Logging, LoggingAdapter} import akka.http.scaladsl.model.HttpRequest import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server.RouteResult.Complete import akka.http.scaladsl.server._ import akka.http.scaladsl.server.directives._ import akka.http.scaladsl.settings.RoutingSettings import akka.stream.{ActorMaterializer, Materializer} import scala.concurrent.ExecutionContext class Endpoints(userEndpoint: UserEndpoint, healthCheckEndpoint: HealthCheckEndpoint) { def routes(implicit sys: ActorSystem, mat: ActorMaterializer, ec: ExecutionContext) = loggableRoute { Route.seal { userEndpoint.userRoutes ~ healthCheckEndpoint.healthCheckRoute } } def logRequestAndResponse(loggingAdapter: LoggingAdapter, before: Long)(req: HttpRequest)(res: Any): Unit = { val entry = res match { case Complete(resp) => val message = s"{path=${req.uri}, method=${req.method.value}, status=${resp.status.intValue()}, elapsedTime=${currentTimeMillis() - before}" LogEntry(message, Logging.InfoLevel) case other => LogEntry(other, Logging.InfoLevel) } entry.logTo(loggingAdapter) } def loggableRoute(route: Route)(implicit m: Materializer, ex: ExecutionContext, routingSettings: RoutingSettings): Route = { DebuggingDirectives.logRequestResult(LoggingMagnet(log => { val requestTimestamp = currentTimeMillis() logRequestAndResponse(log, requestTimestamp) }))(route) } }
Example 53
Source File: CassandraKeyspaceConfig.scala From lagom with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.internal.persistence.cassandra import akka.event.LoggingAdapter import com.typesafe.config.Config private[lagom] object CassandraKeyspaceConfig { def validateKeyspace(namespace: String, config: Config, log: LoggingAdapter): Unit = { if (log.isErrorEnabled) { val keyspacePath = s"$namespace.keyspace" if (!config.hasPath(keyspacePath)) { log.error("Configuration for [{}] must be set in application.conf ", keyspacePath) } } } }
Example 54
Source File: MultiNodeExpect.scala From lagom with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.internal.cluster import akka.Done import akka.actor.ActorRef import akka.actor.ActorSystem import akka.actor.Scheduler import akka.annotation.ApiMayChange import akka.cluster.ddata.DistributedData import akka.cluster.ddata.Flag import akka.cluster.ddata.FlagKey import akka.cluster.ddata.Replicator.Get import akka.cluster.ddata.Replicator.GetSuccess import akka.cluster.ddata.Replicator.ReadLocal import akka.cluster.ddata.Replicator.Update import akka.cluster.ddata.Replicator.UpdateSuccess import akka.cluster.ddata.Replicator.WriteAll import akka.cluster.ddata.Replicator.WriteConsistency import akka.cluster.ddata.SelfUniqueAddress import akka.event.LoggingAdapter import akka.testkit.TestProbe import scala.concurrent.ExecutionContext import scala.concurrent.Future import scala.concurrent.duration._ import scala.reflect.ClassTag import akka.pattern.after import akka.pattern.ask import akka.util.Timeout import scala.util.control.NonFatal @ApiMayChange class MultiNodeExpect(probe: TestProbe)(implicit system: ActorSystem) { private implicit val scheduler: Scheduler = system.scheduler private implicit val executionContext: ExecutionContext = system.dispatcher val replicator: ActorRef = DistributedData(system).replicator implicit val node: SelfUniqueAddress = DistributedData(system).selfUniqueAddress def expectMsgType[T](expectationKey: String, max: FiniteDuration)(implicit t: ClassTag[T]): Future[Done] = { val eventualT = () => Future(errorAsRuntime(probe.expectMsgType[T](max))) doExpect(eventualT)(expectationKey, max) } // prevents Errors from turning into BoxedError when using `Future(f)` (where f throws Error) private def errorAsRuntime[T](f: => T): T = { try { f } catch { case NonFatal(t) => throw t case x: Throwable => throw new RuntimeException(x) } } private def doExpect[T](eventualT: () => Future[T])(expectationKey: String, max: FiniteDuration): Future[Done] = { val DataKey: FlagKey = FlagKey(expectationKey) val writeAll: WriteConsistency = WriteAll(max) implicit val timeout: Timeout = Timeout(max) val retryDelay = 3.second val fTimeout = after(max, scheduler)(Future.failed(new RuntimeException(s"timeout $max expired"))) // If the local expectation wins, it must notify others. val fLocalExpect: Future[Done] = eventualT() .map { _ => (replicator ? Update(DataKey, Flag.empty, writeAll)( _.switchOn )).mapTo[UpdateSuccess[Flag]] } .map(_ => Done) // if a remote expectation wins, we can move on. val poll: () => Future[Done] = () => (replicator ? Get(DataKey, ReadLocal)).map { case g @ GetSuccess(DataKey, _) if g.get(DataKey).enabled => Done case _ => throw new RuntimeException("Flag unset") } val fRemoteExpect: Future[Done] = retry( poll, retryDelay, Int.MaxValue // keep retrying, there's a timeout later ) Future .firstCompletedOf( Seq( fLocalExpect, fRemoteExpect, fTimeout ) ) } // From vklang's https://gist.github.com/viktorklang/9414163 def retry[T](op: () => Future[T], delay: FiniteDuration, retries: Int): Future[T] = op().recoverWith { case _ if retries > 0 => after(delay, scheduler)(retry(op, delay, retries - 1)) } }
Example 55
Source File: HttpClusterBootstrapRoutes.scala From akka-management with Apache License 2.0 | 5 votes |
package akka.management.cluster.bootstrap.contactpoint import scala.concurrent.duration._ import akka.actor.ActorSystem import akka.cluster.Cluster import akka.cluster.Member import akka.event.Logging import akka.event.LoggingAdapter import akka.http.javadsl.server.directives.RouteAdapter import akka.http.scaladsl.model.HttpRequest import akka.http.scaladsl.model.Uri import akka.http.scaladsl.server.Route import akka.management.cluster.bootstrap.ClusterBootstrapSettings import akka.management.cluster.bootstrap.contactpoint.HttpBootstrapJsonProtocol.ClusterMember import akka.management.cluster.bootstrap.contactpoint.HttpBootstrapJsonProtocol.SeedNodes final class HttpClusterBootstrapRoutes(settings: ClusterBootstrapSettings) extends HttpBootstrapJsonProtocol { import akka.http.scaladsl.server.Directives._ private def routeGetSeedNodes: Route = extractClientIP { clientIp => extractActorSystem { implicit system => import akka.cluster.MemberStatus val cluster = Cluster(system) def memberToClusterMember(m: Member): ClusterMember = ClusterMember(m.uniqueAddress.address, m.uniqueAddress.longUid, m.status.toString, m.roles) val state = cluster.state // TODO shuffle the members so in a big deployment nodes start joining different ones and not all the same? val members = state.members .diff(state.unreachable) .filter(m => m.status == MemberStatus.up || m.status == MemberStatus.weaklyUp || m.status == MemberStatus.joining) .take(settings.contactPoint.httpMaxSeedNodesToExpose) .map(memberToClusterMember) val info = SeedNodes(cluster.selfMember.uniqueAddress.address, members) log.info( "Bootstrap request from {}: Contact Point returning {} seed-nodes [{}]", clientIp, members.size, members.map(_.node).mkString(", ")) complete(info) } } def getRoutes: akka.http.javadsl.server.Route = RouteAdapter(routes) private def log(implicit sys: ActorSystem): LoggingAdapter = Logging(sys, classOf[HttpClusterBootstrapRoutes]) } object ClusterBootstrapRequests { import akka.http.scaladsl.client.RequestBuilding._ def bootstrapSeedNodes(baseUri: Uri): HttpRequest = Get(baseUri + "/bootstrap/seed-nodes") }
Example 56
Source File: ActorSystemSpec.scala From akka-persistence-couchbase with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.persistence import akka.actor.ActorSystem import akka.actor.setup.ActorSystemSetup import akka.event.{ Logging, LoggingAdapter } import akka.testkit.{ ImplicitSender, TestKit } import com.typesafe.config.{ Config, ConfigFactory } import org.scalactic.{ CanEqual, TypeCheckedTripleEquals } import org.scalatest.{ BeforeAndAfterAll, Matchers, WordSpecLike } object ActorSystemSpec { def getCallerName(clazz: Class[_]): String = { val s = (Thread.currentThread.getStackTrace map (_.getClassName) drop 1) .dropWhile(_ matches "(java.lang.Thread|.*ActorSystemSpec.?$)") val reduced = s.lastIndexWhere(_ == clazz.getName) match { case -1 ⇒ s case z ⇒ s drop (z + 1) } reduced.head.replaceFirst(""".*\.""", "").replaceAll("[^a-zA-Z_0-9]", "_") } } abstract class ActorSystemSpec(system: ActorSystem) extends TestKit(system) with WordSpecLike with Matchers with BeforeAndAfterAll with TypeCheckedTripleEquals with ImplicitSender { def this(testName: String, config: Config) = this(ActorSystem(testName, config)) def this(config: Config) = this(ActorSystemSpec.getCallerName(getClass), config) def this(setup: ActorSystemSetup) = this(ActorSystem(ActorSystemSpec.getCallerName(getClass), setup)) def this() = this(ConfigFactory.empty()) override protected def afterAll(): Unit = { shutdown() super.afterAll() } val log: LoggingAdapter = Logging(system, this.getClass) // for ScalaTest === compare of Class objects implicit def classEqualityConstraint[A, B]: CanEqual[Class[A], Class[B]] = new CanEqual[Class[A], Class[B]] { def areEqual(a: Class[A], b: Class[B]) = a == b } }
Example 57
Source File: TagSequenceNumbering.scala From akka-persistence-couchbase with Apache License 2.0 | 5 votes |
package akka.persistence.couchbase.internal import java.util.concurrent.ConcurrentHashMap import akka.annotation.InternalApi import akka.event.LoggingAdapter import akka.persistence.couchbase.internal.CouchbaseSchema.{Fields, Queries} import com.couchbase.client.java.query.N1qlParams import scala.concurrent.{ExecutionContext, Future} def evictSeqNrsFor(pid: PersistenceId): Unit = { val keys = taggingPerPidSequenceNumbers.keySet.iterator() while (keys.hasNext) { val key @ (keyPid, _) = keys.next() if (keyPid == pid) keys.remove() } } protected def currentTagSeqNrFromDb(pid: PersistenceId, tag: Tag): Future[Option[Long]] = withCouchbaseSession { session => val query = highestTagSequenceNumberQuery(pid, tag, queryConsistency) log.debug("currentTagSeqNrFromDb: {}", query) session.singleResponseQuery(query).map { case Some(json) => Some(json.getLong(Fields.TagSeqNr)) case None => None } } }
Example 58
Source File: StaleChannels.scala From eclair with Apache License 2.0 | 5 votes |
package fr.acinq.eclair.router import akka.actor.ActorContext import akka.event.LoggingAdapter import fr.acinq.eclair.db.NetworkDb import fr.acinq.eclair.router.Router.{ChannelDesc, Data, PublicChannel, hasChannels} import fr.acinq.eclair.wire.{ChannelAnnouncement, ChannelUpdate} import fr.acinq.eclair.{ShortChannelId, TxCoordinates} import scala.collection.mutable import scala.compat.Platform import scala.concurrent.duration._ object StaleChannels { def handlePruneStaleChannels(d: Data, db: NetworkDb, currentBlockHeight: Long)(implicit ctx: ActorContext, log: LoggingAdapter): Data = { // first we select channels that we will prune val staleChannels = getStaleChannels(d.channels.values, currentBlockHeight) val staleChannelIds = staleChannels.map(_.ann.shortChannelId) // then we remove nodes that aren't tied to any channels anymore (and deduplicate them) val potentialStaleNodes = staleChannels.flatMap(c => Set(c.ann.nodeId1, c.ann.nodeId2)).toSet val channels1 = d.channels -- staleChannelIds // no need to iterate on all nodes, just on those that are affected by current pruning val staleNodes = potentialStaleNodes.filterNot(nodeId => hasChannels(nodeId, channels1.values)) // let's clean the db and send the events db.removeChannels(staleChannelIds) // NB: this also removes channel updates // we keep track of recently pruned channels so we don't revalidate them (zombie churn) db.addToPruned(staleChannelIds) staleChannelIds.foreach { shortChannelId => log.info("pruning shortChannelId={} (stale)", shortChannelId) ctx.system.eventStream.publish(ChannelLost(shortChannelId)) } val staleChannelsToRemove = new mutable.ArrayBuffer[ChannelDesc] staleChannels.foreach(ca => { staleChannelsToRemove += ChannelDesc(ca.ann.shortChannelId, ca.ann.nodeId1, ca.ann.nodeId2) staleChannelsToRemove += ChannelDesc(ca.ann.shortChannelId, ca.ann.nodeId2, ca.ann.nodeId1) }) val graph1 = d.graph.removeEdges(staleChannelsToRemove) staleNodes.foreach { nodeId => log.info("pruning nodeId={} (stale)", nodeId) db.removeNode(nodeId) ctx.system.eventStream.publish(NodeLost(nodeId)) } d.copy(nodes = d.nodes -- staleNodes, channels = channels1, graph = graph1) } def isStale(u: ChannelUpdate): Boolean = isStale(u.timestamp) def isStale(timestamp: Long): Boolean = { // BOLT 7: "nodes MAY prune channels should the timestamp of the latest channel_update be older than 2 weeks" // but we don't want to prune brand new channels for which we didn't yet receive a channel update val staleThresholdSeconds = (System.currentTimeMillis.milliseconds - 14.days).toSeconds timestamp < staleThresholdSeconds } def isAlmostStale(timestamp: Long): Boolean = { // we define almost stale as 2 weeks minus 4 days val staleThresholdSeconds = (System.currentTimeMillis.milliseconds - 10.days).toSeconds timestamp < staleThresholdSeconds } def isStale(channel: ChannelAnnouncement, update1_opt: Option[ChannelUpdate], update2_opt: Option[ChannelUpdate], currentBlockHeight: Long): Boolean = { // BOLT 7: "nodes MAY prune channels should the timestamp of the latest channel_update be older than 2 weeks (1209600 seconds)" // but we don't want to prune brand new channels for which we didn't yet receive a channel update, so we keep them as long as they are less than 2 weeks (2016 blocks) old val staleThresholdBlocks = currentBlockHeight - 2016 val TxCoordinates(blockHeight, _, _) = ShortChannelId.coordinates(channel.shortChannelId) blockHeight < staleThresholdBlocks && update1_opt.forall(isStale) && update2_opt.forall(isStale) } def getStaleChannels(channels: Iterable[PublicChannel], currentBlockHeight: Long): Iterable[PublicChannel] = channels.filter(data => isStale(data.ann, data.update_1_opt, data.update_2_opt, currentBlockHeight)) }
Example 59
Source File: ActorLazyLogging.scala From scala-commons with MIT License | 5 votes |
package com.avsystem.commons package redis.util import akka.actor.Actor import akka.event.LoggingAdapter trait ActorLazyLogging { self: Actor => object log { val rawLog: LoggingAdapter = akka.event.Logging(context.system, self) def error(msg: => String, cause: Throwable = null): Unit = if (rawLog.isErrorEnabled) { if (cause == null) { rawLog.error(msg) } else { rawLog.error(cause, msg) } } def warning(msg: => String): Unit = macro macros.misc.LazyLoggingMacros.warningImpl def info(msg: => String): Unit = macro macros.misc.LazyLoggingMacros.infoImpl def debug(msg: => String): Unit = macro macros.misc.LazyLoggingMacros.debugImpl } }
Example 60
Source File: Main.scala From nexus with Apache License 2.0 | 5 votes |
package ch.epfl.bluebrain.nexus.storage import java.nio.file.Paths import java.time.Clock import akka.actor.ActorSystem import akka.event.{Logging, LoggingAdapter} import akka.http.scaladsl.Http import akka.http.scaladsl.server.Route import akka.util.Timeout import cats.effect.Effect import ch.epfl.bluebrain.nexus.storage.Storages.DiskStorage import ch.epfl.bluebrain.nexus.storage.attributes.AttributesCache import ch.epfl.bluebrain.nexus.storage.config.{AppConfig, Settings} import ch.epfl.bluebrain.nexus.storage.config.AppConfig._ import ch.epfl.bluebrain.nexus.storage.routes.Routes import com.typesafe.config.{Config, ConfigFactory} import kamon.Kamon import monix.eval.Task import monix.execution.Scheduler import scala.concurrent.duration._ import scala.concurrent.{Await, ExecutionContext, Future} import scala.util.{Failure, Success} //noinspection TypeAnnotation // $COVERAGE-OFF$ object Main { def loadConfig(): Config = { val cfg = sys.env.get("STORAGE_CONFIG_FILE") orElse sys.props.get("storage.config.file") map { str => val file = Paths.get(str).toAbsolutePath.toFile ConfigFactory.parseFile(file) } getOrElse ConfigFactory.empty() (cfg withFallback ConfigFactory.load()).resolve() } def setupMonitoring(config: Config): Unit = { if (sys.env.getOrElse("KAMON_ENABLED", "false").toBoolean) { Kamon.reconfigure(config) Kamon.loadModules() } } def shutdownMonitoring(): Unit = { if (sys.env.getOrElse("KAMON_ENABLED", "false").toBoolean) { Await.result(Kamon.stopModules(), 10.seconds) } } @SuppressWarnings(Array("UnusedMethodParameter")) def main(args: Array[String]): Unit = { val config = loadConfig() setupMonitoring(config) implicit val appConfig: AppConfig = Settings(config).appConfig implicit val as: ActorSystem = ActorSystem(appConfig.description.fullName, config) implicit val ec: ExecutionContext = as.dispatcher implicit val eff: Effect[Task] = Task.catsEffect(Scheduler.global) implicit val iamIdentities: IamIdentitiesClient[Task] = new IamIdentitiesClient[Task](appConfig.iam) implicit val timeout = Timeout(1.minute) implicit val clock = Clock.systemUTC val storages: Storages[Task, AkkaSource] = new DiskStorage(appConfig.storage, appConfig.digest, AttributesCache[Task, AkkaSource]) val logger: LoggingAdapter = Logging(as, getClass) logger.info("==== Cluster is Live ====") val routes: Route = Routes(storages) val httpBinding: Future[Http.ServerBinding] = { Http().bindAndHandle(routes, appConfig.http.interface, appConfig.http.port) } httpBinding onComplete { case Success(binding) => logger.info(s"Bound to ${binding.localAddress.getHostString}: ${binding.localAddress.getPort}") case Failure(th) => logger.error(th, "Failed to perform an http binding on {}:{}", appConfig.http.interface, appConfig.http.port) Await.result(as.terminate(), 10.seconds) } as.registerOnTermination { shutdownMonitoring() } // attempt to leave the cluster before shutting down val _ = sys.addShutdownHook { Await.result(as.terminate().map(_ => ()), 10.seconds) } } } // $COVERAGE-ON$
Example 61
Source File: RecordProcessorFactoryImpl.scala From kinesis-stream with MIT License | 5 votes |
package px.kinesis.stream.consumer import akka.NotUsed import akka.event.LoggingAdapter import akka.stream.scaladsl.{Keep, Sink, Source} import akka.stream.{KillSwitch, Materializer, OverflowStrategy} import px.kinesis.stream.consumer.checkpoint.CheckpointTracker import software.amazon.kinesis.processor.{ShardRecordProcessor, ShardRecordProcessorFactory} import scala.collection.immutable.Seq import scala.concurrent.ExecutionContext class RecordProcessorFactoryImpl( sink: Sink[Record, NotUsed], workerId: String, checkpointTracker: CheckpointTracker, killSwitch: KillSwitch )(implicit am: Materializer, ec: ExecutionContext, logging: LoggingAdapter) extends ShardRecordProcessorFactory { override def shardRecordProcessor(): ShardRecordProcessor = { val queue = Source .queue[Seq[Record]](0, OverflowStrategy.backpressure) .mapConcat(identity) .toMat(sink)(Keep.left) .run() new RecordProcessorImpl(queue, checkpointTracker, killSwitch, workerId) } }
Example 62
Source File: TimeResponseDirective.scala From graphcool-framework with Apache License 2.0 | 5 votes |
package cool.graph.metrics.extensions import akka.event.Logging.LogLevel import akka.event.{Logging, LoggingAdapter} import akka.http.scaladsl.model.HttpRequest import akka.http.scaladsl.server.RouteResult.{Complete, Rejected} import akka.http.scaladsl.server.directives.{DebuggingDirectives, LoggingMagnet} import cool.graph.metrics.{CustomTag, MetricsManager, TimerMetric} trait TimeResponseDirective { def captureResponseTimeFunction( loggingAdapter: LoggingAdapter, requestTimestamp: Long, level: LogLevel = Logging.InfoLevel )(req: HttpRequest)(res: Any): Unit = { res match { case Complete(resp) => val responseTimestamp: Long = System.nanoTime val elapsedTime: Long = (responseTimestamp - requestTimestamp) / 1000000 requestTimer.record(elapsedTime, Seq(resp.status.toString())) case Rejected(_) => } } def captureResponseTime(log: LoggingAdapter) = { val requestTimestamp = System.nanoTime captureResponseTimeFunction(log, requestTimestamp)(_) } val timeResponse = DebuggingDirectives.logRequestResult(LoggingMagnet(captureResponseTime(_))) } case class TimeResponseDirectiveImpl(metricsManager: MetricsManager) extends TimeResponseDirective { val requestTimer: TimerMetric = metricsManager.defineTimer("responseTime", CustomTag("status")) }
Example 63
Source File: DocSvr.scala From Raphtory with Apache License 2.0 | 5 votes |
package com.raphtory.core.clustersetup import akka.actor.ActorSystem import akka.actor.Address import akka.actor.ExtendedActorSystem import akka.cluster.Cluster import akka.cluster.Member import akka.event.LoggingAdapter import akka.management.cluster.bootstrap.ClusterBootstrap import akka.management.javadsl.AkkaManagement import com.raphtory.core.clustersetup.util.ConfigUtils._ import com.raphtory.core.utils.Utils import com.typesafe.config.Config import com.typesafe.config.ConfigFactory import com.typesafe.config.ConfigValueFactory import scala.collection.JavaConversions import scala.collection.JavaConversions._ trait DocSvr { def seedLoc: String implicit val system: ActorSystem val docker = System.getenv().getOrDefault("DOCKER", "false").trim.toBoolean val clusterSystemName: String = Utils.clusterSystemName val ssn: String = java.util.UUID.randomUUID.toString def printConfigInfo(config: Config, system: ActorSystem): Unit = { val log: LoggingAdapter = system.log val systemConfig: SystemConfig = config.parse() val bindAddress: SocketAddress = systemConfig.bindAddress val tcpAddress: SocketAddress = systemConfig.tcpAddress log.info(s"Created ActorSystem with ID: $ssn") log.info(s"Binding ActorSystem internally to address ${bindAddress.host}:${bindAddress.port}") log.info(s"Binding ActorSystem externally to host ${tcpAddress.host}:${tcpAddress.port}") log.info(s"Registering the following seeds to ActorSystem: ${systemConfig.seeds}") log.info(s"Registering the following roles to ActorSystem: ${systemConfig.roles}") // FIXME: This is bit unorthodox ... val akkaSystemUrl: Address = system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress log.info(s"ActorSystem successfully initialised at the following Akka URL: $akkaSystemUrl") } }
Example 64
Source File: CassandraStatements.scala From akka-persistence-cassandra with Apache License 2.0 | 5 votes |
package akka.persistence.cassandra import scala.concurrent.ExecutionContext import scala.concurrent.Future import akka.Done import akka.annotation.InternalApi import akka.event.LoggingAdapter import akka.persistence.cassandra.journal.CassandraJournalStatements import akka.persistence.cassandra.snapshot.CassandraSnapshotStatements import com.datastax.oss.driver.api.core.CqlSession import com.datastax.oss.driver.api.core.cql.Row @InternalApi private[akka] class ColumnDefinitionCache { private def hasColumn(column: String, row: Row, cached: Option[Boolean], updateCache: Boolean => Unit): Boolean = { cached match { case Some(b) => b case None => val b = row.getColumnDefinitions.contains(column) updateCache(b) b } } @volatile private var _hasMetaColumns: Option[Boolean] = None private val updateMetaColumnsCache: Boolean => Unit = b => _hasMetaColumns = Some(b) def hasMetaColumns(row: Row): Boolean = hasColumn("meta", row, _hasMetaColumns, updateMetaColumnsCache) @volatile private var _hasOldTagsColumns: Option[Boolean] = None private val updateOldTagsColumnsCache: Boolean => Unit = b => _hasOldTagsColumns = Some(b) def hasOldTagsColumns(row: Row): Boolean = hasColumn("tag1", row, _hasOldTagsColumns, updateOldTagsColumnsCache) @volatile private var _hasTagsColumn: Option[Boolean] = None private val updateTagsColumnCache: Boolean => Unit = b => _hasTagsColumn = Some(b) def hasTagsColumn(row: Row): Boolean = hasColumn("tags", row, _hasTagsColumn, updateTagsColumnCache) @volatile private var _hasMessageColumn: Option[Boolean] = None private val updateMessageColumnCache: Boolean => Unit = b => _hasMessageColumn = Some(b) def hasMessageColumn(row: Row): Boolean = hasColumn("message", row, _hasMessageColumn, updateMessageColumnCache) }
Example 65
Source File: UpdatesQueue.scala From fotm-info with MIT License | 5 votes |
package info.fotm.crawler import akka.event.{LoggingAdapter, NoLogging} import info.fotm.util.ObservableStream import scala.collection.mutable class UpdatesQueue[T](maxSize: Int = -1)(implicit ordering: Ordering[T], log: LoggingAdapter = NoLogging) extends ObservableStream[(T, T)] { val history = mutable.TreeSet.empty def process(current: T): Unit = { if (history.add(current)) { val before = history.until(current) val after = history.from(current).tail val strBefore = before.toIndexedSeq.map(_ => "_").mkString val strAfter = after.toIndexedSeq.map(_ => "_").mkString val prev: Option[T] = before.lastOption prev.foreach { p => log.debug("Signaling _X") publish(p, current) } val next = history.from(current).tail.headOption next.foreach { n => log.debug("Signaling X_") publish(current, n) } if (maxSize != -1 && history.size > maxSize) { history -= history.head } log.debug(s"History queue (${history.size}): ${strBefore}X${strAfter}") } else { log.debug("Update already in history.") } } }
Example 66
Source File: Endpoints.scala From akka-http-microservice-templates with MIT License | 5 votes |
package io.github.gabfssilva.endpoints import java.lang.System.currentTimeMillis import akka.actor.ActorSystem import akka.event.{Logging, LoggingAdapter} import akka.http.scaladsl.model.HttpRequest import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server.Route import akka.http.scaladsl.server.RouteResult.Complete import akka.http.scaladsl.server.directives.{DebuggingDirectives, LogEntry, LoggingMagnet} import akka.http.scaladsl.settings.RoutingSettings import akka.stream.{ActorMaterializer, Materializer} import scala.concurrent.ExecutionContext class Endpoints(greetingEndpoint: GreetingEndpoint, healthCheckEndpoint: HealthCheckEndpoint) { def routes(implicit sys: ActorSystem, mat: ActorMaterializer, ec: ExecutionContext) = loggableRoute { Route.seal { greetingEndpoint.greetingRoute ~ healthCheckEndpoint.healthCheckRoute } } def logRequestAndResponse(loggingAdapter: LoggingAdapter, before: Long)(req: HttpRequest)(res: Any): Unit = { val entry = res match { case Complete(resp) => val message = s"{path=${req.uri}, method=${req.method.value}, status=${resp.status.intValue()}, elapsedTime=${currentTimeMillis() - before}" LogEntry(message, Logging.InfoLevel) case other => LogEntry(other, Logging.InfoLevel) } entry.logTo(loggingAdapter) } def loggableRoute(route: Route)(implicit m: Materializer, ex: ExecutionContext, routingSettings: RoutingSettings): Route = { DebuggingDirectives.logRequestResult(LoggingMagnet(log => { val requestTimestamp = currentTimeMillis() logRequestAndResponse(log, requestTimestamp) }))(route) } }
Example 67
Source File: TestSpec.scala From akka-serialization-test with Apache License 2.0 | 5 votes |
package com.github.dnvriend import akka.actor.{ ActorRef, ActorSystem, PoisonPill } import akka.event.{ Logging, LoggingAdapter } import akka.serialization.SerializationExtension import akka.stream.{ ActorMaterializer, Materializer } import akka.testkit.TestProbe import akka.util.Timeout import org.scalatest.concurrent.{ Eventually, ScalaFutures } import org.scalatest.prop.PropertyChecks import org.scalatest.{ BeforeAndAfterAll, FlatSpec, GivenWhenThen, Matchers } import scala.concurrent.duration._ import scala.concurrent.{ ExecutionContext, Future } import scala.util.Try trait TestSpec extends FlatSpec with Matchers with GivenWhenThen with ScalaFutures with BeforeAndAfterAll with Eventually with PropertyChecks with AkkaPersistenceQueries with AkkaStreamUtils with InMemoryCleanup { implicit val timeout: Timeout = Timeout(10.seconds) implicit val system: ActorSystem = ActorSystem() implicit val ec: ExecutionContext = system.dispatcher implicit val mat: Materializer = ActorMaterializer() implicit val log: LoggingAdapter = Logging(system, this.getClass) implicit val pc: PatienceConfig = PatienceConfig(timeout = 50.seconds) val serialization = SerializationExtension(system) implicit class FutureToTry[T](f: Future[T]) { def toTry: Try[T] = Try(f.futureValue) } def killActors(actors: ActorRef*): Unit = { val probe = TestProbe() actors.foreach { actor ⇒ probe watch actor actor ! PoisonPill probe expectTerminated actor } } override protected def afterAll(): Unit = { system.terminate() system.whenTerminated.toTry should be a 'success } }