akka.stream.ActorMaterializer Scala Examples
The following examples show how to use akka.stream.ActorMaterializer.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: IntegrationTest.scala From kmq with Apache License 2.0 | 6 votes |
package com.softwaremill.kmq.redelivery import java.time.Duration import java.util.Random import akka.actor.ActorSystem import akka.kafka.scaladsl.{Consumer, Producer} import akka.kafka.{ConsumerSettings, ProducerMessage, ProducerSettings, Subscriptions} import akka.stream.ActorMaterializer import akka.testkit.TestKit import com.softwaremill.kmq._ import com.softwaremill.kmq.redelivery.infrastructure.KafkaSpec import org.apache.kafka.clients.consumer.ConsumerConfig import org.apache.kafka.clients.producer.{ProducerConfig, ProducerRecord} import org.apache.kafka.common.serialization.StringDeserializer import org.scalatest.concurrent.Eventually import org.scalatest.time.{Seconds, Span} import org.scalatest.{BeforeAndAfterAll, FlatSpecLike, Matchers} import scala.collection.mutable.ArrayBuffer class IntegrationTest extends TestKit(ActorSystem("test-system")) with FlatSpecLike with KafkaSpec with BeforeAndAfterAll with Eventually with Matchers { implicit val materializer = ActorMaterializer() import system.dispatcher "KMQ" should "resend message if not committed" in { val bootstrapServer = s"localhost:${testKafkaConfig.kafkaPort}" val kmqConfig = new KmqConfig("queue", "markers", "kmq_client", "kmq_redelivery", Duration.ofSeconds(1).toMillis, 1000) val consumerSettings = ConsumerSettings(system, new StringDeserializer, new StringDeserializer) .withBootstrapServers(bootstrapServer) .withGroupId(kmqConfig.getMsgConsumerGroupId) .withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") val markerProducerSettings = ProducerSettings(system, new MarkerKey.MarkerKeySerializer(), new MarkerValue.MarkerValueSerializer()) .withBootstrapServers(bootstrapServer) .withProperty(ProducerConfig.PARTITIONER_CLASS_CONFIG, classOf[ParititionFromMarkerKey].getName) val markerProducer = markerProducerSettings.createKafkaProducer() val random = new Random() lazy val processedMessages = ArrayBuffer[String]() lazy val receivedMessages = ArrayBuffer[String]() val control = Consumer.committableSource(consumerSettings, Subscriptions.topics(kmqConfig.getMsgTopic)) // 1. get messages from topic .map { msg => ProducerMessage.Message( new ProducerRecord[MarkerKey, MarkerValue](kmqConfig.getMarkerTopic, MarkerKey.fromRecord(msg.record), new StartMarker(kmqConfig.getMsgTimeoutMs)), msg) } .via(Producer.flow(markerProducerSettings, markerProducer)) // 2. write the "start" marker .map(_.message.passThrough) .mapAsync(1) { msg => msg.committableOffset.commitScaladsl().map(_ => msg.record) // this should be batched } .map { msg => receivedMessages += msg.value msg } .filter(_ => random.nextInt(5) != 0) .map { processedMessage => processedMessages += processedMessage.value new ProducerRecord[MarkerKey, MarkerValue](kmqConfig.getMarkerTopic, MarkerKey.fromRecord(processedMessage), EndMarker.INSTANCE) } .to(Producer.plainSink(markerProducerSettings, markerProducer)) // 5. write "end" markers .run() val redeliveryHook = RedeliveryTracker.start(new KafkaClients(bootstrapServer), kmqConfig) val messages = (0 to 20).map(_.toString) messages.foreach(msg => sendToKafka(kmqConfig.getMsgTopic,msg)) eventually { receivedMessages.size should be > processedMessages.size processedMessages.sortBy(_.toInt).distinct shouldBe messages }(PatienceConfig(timeout = Span(15, Seconds)), implicitly) redeliveryHook.close() control.shutdown() } override def afterAll(): Unit = { super.afterAll() TestKit.shutdownActorSystem(system) } }
Example 2
Source File: ModelService.scala From reactive-machine-learning-systems with MIT License | 6 votes |
package com.reactivemachinelearning import akka.actor.ActorSystem import akka.event.{Logging, LoggingAdapter} import akka.http.scaladsl.Http import akka.http.scaladsl.marshalling.ToResponseMarshallable import akka.http.scaladsl.model.StatusCodes._ import akka.http.scaladsl.server.Directives._ import akka.stream.{ActorMaterializer, Materializer} //import spray.json._ import spray.json.DefaultJsonProtocol import scala.concurrent.{ExecutionContextExecutor, Future} case class Prediction(id: Long, timestamp: Long, value: Double) trait Protocols extends DefaultJsonProtocol { implicit val ipInfoFormat = jsonFormat3(Prediction.apply) } trait Service extends Protocols { implicit val system: ActorSystem implicit def executor: ExecutionContextExecutor implicit val materializer: Materializer val logger: LoggingAdapter // private def parseFeatures(features: String): Map[Long, Double] = { // features.parseJson.convertTo[Map[Long, Double]] // } def predict(features: String): Future[Prediction] = { Future(Prediction(123, 456, 0.5)) } val routes = { logRequestResult("predictive-service") { pathPrefix("ip") { (get & path(Segment)) { features => complete { predict(features).map[ToResponseMarshallable] { // case prediction: Prediction => prediction case _ => BadRequest } } } } } } } object PredictiveService extends App with Service { override implicit val system = ActorSystem() override implicit val executor = system.dispatcher override implicit val materializer = ActorMaterializer() override val logger = Logging(system, getClass) Http().bindAndHandle(routes, "0.0.0.0", 9000) }
Example 3
Source File: OAuthFailedSpec.scala From kanadi with MIT License | 5 votes |
package org.zalando.kanadi import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.stream.ActorMaterializer import com.typesafe.config.ConfigFactory import io.circe.Json import org.mdedetrich.webmodels.{FlowId, OAuth2Token, OAuth2TokenProvider} import org.specs2.Specification import org.specs2.concurrent.ExecutionEnv import org.specs2.execute.Skipped import org.specs2.matcher.FutureMatchers import org.specs2.specification.core.SpecStructure import org.zalando.kanadi.api.{Events, Subscriptions} import org.zalando.kanadi.models._ import scala.concurrent.Future import scala.concurrent.duration._ class OAuthFailedSpec(implicit ec: ExecutionEnv) extends Specification with FutureMatchers with Config { val config = ConfigFactory.load() implicit val system = ActorSystem() implicit val http = Http() implicit val materializer = ActorMaterializer() val failingOauth2TokenProvider = Some( OAuth2TokenProvider(() => Future.successful(OAuth2Token("Failing token"))) ) val subscriptionsClient = Subscriptions(nakadiUri, failingOauth2TokenProvider) val eventsClient = Events(nakadiUri, failingOauth2TokenProvider) override def is: SpecStructure = s2""" Call to subscriptions list should fail with invalid token $oAuthCallSubscriptions Call to publishEvents should fail with invalid token $oAuthPublishEvents """ def oAuthCallSubscriptions = Skipped("No way for current Nakadi docker image to detect \"wrong\" tokens") def oAuthPublishEvents = Skipped("No way for current Nakadi docker image to detect \"wrong\" tokens") }
Example 4
Source File: SubscriptionsSpec.scala From kanadi with MIT License | 5 votes |
package org.zalando.kanadi import java.util.UUID import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.stream.ActorMaterializer import com.typesafe.config.ConfigFactory import org.mdedetrich.webmodels.FlowId import org.specs2.Specification import org.specs2.concurrent.ExecutionEnv import org.specs2.specification.core.SpecStructure import org.specs2.specification.{AfterAll, BeforeAll} import org.zalando.kanadi.api.{Category, EventType, EventTypes, Events, Subscription, Subscriptions} import org.zalando.kanadi.models.{EventTypeName, SubscriptionId} import scala.collection.parallel.mutable import scala.concurrent.duration._ import scala.concurrent.{Await, Future} class SubscriptionsSpec(implicit ec: ExecutionEnv) extends Specification with Config with BeforeAll with AfterAll { override def is: SpecStructure = sequential ^ s2""" Create enough subscriptions to ensure that pagination is used $createEnoughSubscriptionsToUsePagination """ val config = ConfigFactory.load() implicit val system = ActorSystem() implicit val http = Http() implicit val materializer = ActorMaterializer() val eventTypeName = EventTypeName(s"Kanadi-Test-Event-${UUID.randomUUID().toString}") val OwningApplication = "KANADI" val consumerGroup: String = UUID.randomUUID().toString val subscriptionsClient = Subscriptions(nakadiUri, None) val eventsClient = Events(nakadiUri, None) val eventsTypesClient = EventTypes(nakadiUri, None) val subscriptionIds: mutable.ParSet[SubscriptionId] = mutable.ParSet.empty eventTypeName.pp s"Consumer Group: $consumerGroup".pp def createEventType = eventsTypesClient.create(EventType(eventTypeName, OwningApplication, Category.Business)) override def beforeAll = Await.result(createEventType, 10 seconds) override def afterAll = { Await.result( for { res1 <- Future.sequence(subscriptionIds.toList.map(s => subscriptionsClient.delete(s))) res2 <- eventsTypesClient.delete(eventTypeName) } yield (res1, res2), 10 seconds ) () } def createEnoughSubscriptionsToUsePagination = (name: String) => { implicit val flowId: FlowId = Utils.randomFlowId() flowId.pp(name) val createdSubscriptions = Future.sequence(for { _ <- 1 to 22 subscription = subscriptionsClient.create( Subscription(None, s"$OwningApplication-${UUID.randomUUID().toString}", Some(List(eventTypeName)))) } yield { subscription.foreach { s => subscriptionIds += s.id.get } subscription }) val retrievedSubscriptions = (for { subscriptions <- createdSubscriptions retrievedSubscription = Future.sequence(subscriptions.map { subscription => subscriptionsClient.createIfDoesntExist(subscription) }) } yield retrievedSubscription).flatMap(a => a) Await.result(createdSubscriptions, 10 seconds) mustEqual Await.result(retrievedSubscriptions, 10 seconds) } }
Example 5
Source File: GroupedAverage.scala From streams-tests with Apache License 2.0 | 5 votes |
package com.softwaremill.streams import akka.actor.ActorSystem import akka.stream.ActorMaterializer import akka.stream.scaladsl.{Sink, Source} import com.softwaremill.streams.util.Timed._ import scala.concurrent.Await import scala.concurrent.duration._ import scalaz.stream.{Process, Process0} trait GroupedAverage { def run(input: () => Iterator[Int]): Option[Double] } object AkkaStreamsGroupedAverage extends GroupedAverage { private lazy implicit val system = ActorSystem() def run(input: () => Iterator[Int]): Option[Double] = { implicit val mat = ActorMaterializer() val r = Source.fromIterator(input) .mapConcat(n => List(n, n+1)) .filter(_ % 17 != 0) .grouped(10) .map(group => group.sum / group.size.toDouble) .runWith(Sink.fold[Option[Double], Double](None)((_, el) => Some(el))) Await.result(r, 1.hour) } def shutdown() = { system.terminate() } } object ScalazStreamsGroupedAverage extends GroupedAverage { def run(input: () => Iterator[Int]): Option[Double] = { processFromIterator(input) .flatMap(n => Process(n, n+1)) .filter(_ % 17 != 0) .chunk(10) .map(group => group.sum / group.size.toDouble) .toSource.runLast.run } private def processFromIterator[T](input: () => Iterator[T]): Process0[T] = Process.suspend { val iterator = input() def go(): Process0[T] = { if (iterator.hasNext) { Process.emit(iterator.next()) ++ go() } else Process.halt } go() } } object GroupedAverageRunner extends App { val impls = List(AkkaStreamsGroupedAverage, ScalazStreamsGroupedAverage) val ranges = List(1000, 100000, 1000000, 10000000) val tests = for { impl <- impls range <- ranges } yield ( s"${if (impl == ScalazStreamsGroupedAverage) "scalaz" else "akka"}, 1->$range", () => impl.run(() => Iterator.range(1, range+1)).toString) runTests(tests, 3) AkkaStreamsGroupedAverage.shutdown() }
Example 6
Source File: TransferTransformFile.scala From streams-tests with Apache License 2.0 | 5 votes |
package com.softwaremill.streams import java.io.File import akka.actor.ActorSystem import akka.stream.ActorMaterializer import akka.stream.io.Framing import akka.stream.scaladsl.{FileIO, Keep} import akka.util.ByteString import com.softwaremill.streams.util.TestFiles import com.softwaremill.streams.util.Timed._ import scala.concurrent.{Await, Future} import scalaz.stream.{io, text} import scala.concurrent.duration._ trait TransferTransformFile { def run(from: File, to: File): Long } object AkkaStreamsTransferTransformFile extends TransferTransformFile { private lazy implicit val system = ActorSystem() override def run(from: File, to: File) = { implicit val mat = ActorMaterializer() val r: Future[Long] = FileIO.fromFile(from) .via(Framing.delimiter(ByteString("\n"), 1048576)) .map(_.utf8String) .filter(!_.contains("#!@")) .map(_.replace("*", "0")) .intersperse("\n") .map(ByteString(_)) .toMat(FileIO.toFile(to))(Keep.right) .run() Await.result(r, 1.hour) } def shutdown() = { system.terminate() } } object ScalazStreamsTransferTransformFile extends TransferTransformFile { override def run(from: File, to: File) = { io.linesR(from.getAbsolutePath) .filter(!_.contains("#!@")) .map(_.replace("*", "0")) .intersperse("\n") .pipe(text.utf8Encode) .to(io.fileChunkW(to.getAbsolutePath)) .run .run to.length() } } object TransferTransformFileRunner extends App { def runTransfer(ttf: TransferTransformFile, sizeMB: Int): String = { val output = File.createTempFile("fft", "txt") try { ttf.run(TestFiles.testFile(sizeMB), output).toString } finally output.delete() } val tests = List( (ScalazStreamsTransferTransformFile, 10), (ScalazStreamsTransferTransformFile, 100), (ScalazStreamsTransferTransformFile, 500), (AkkaStreamsTransferTransformFile, 10), (AkkaStreamsTransferTransformFile, 100), (AkkaStreamsTransferTransformFile, 500) ) runTests(tests.map { case (ttf, sizeMB) => (s"${if (ttf == ScalazStreamsTransferTransformFile) "scalaz" else "akka"}, $sizeMB MB", () => runTransfer(ttf, sizeMB)) }, 3) AkkaStreamsTransferTransformFile.shutdown() }
Example 7
Source File: SlowConsumer.scala From streams-tests with Apache License 2.0 | 5 votes |
package com.softwaremill.streams import akka.actor.ActorSystem import akka.stream.ActorMaterializer import akka.stream.scaladsl.Source import scala.concurrent.Await import scala.concurrent.duration._ import scalaz.concurrent.{Strategy, Task} import scalaz.stream.{Process, async, time} object AkkaSlowConsumer extends App { implicit val system = ActorSystem() implicit val mat = ActorMaterializer() try { val future = Source.tick(0.millis, 100.millis, 1) .conflate(identity)(_ + _) .runForeach { el => Thread.sleep(1000L) println(el) } Await.result(future, 1.hour) } finally system.terminate() } object ScalazSlowConsumer extends App { implicit val scheduler = Strategy.DefaultTimeoutScheduler val queue = async.boundedQueue[Int](10000) val enqueueProcess = time.awakeEvery(100.millis) .map(_ => 1) .to(queue.enqueue) val dequeueProcess = queue.dequeueAvailable .map(_.sum) .flatMap(el => Process.eval_(Task { Thread.sleep(1000L) println(el) })) (enqueueProcess merge dequeueProcess).run.run }
Example 8
Source File: JsonRpcHttpsServer.scala From mantis with Apache License 2.0 | 5 votes |
package io.iohk.ethereum.jsonrpc.server import java.io.{File, FileInputStream} import java.security.{KeyStore, SecureRandom} import javax.net.ssl.{KeyManagerFactory, SSLContext, TrustManagerFactory} import akka.actor.ActorSystem import akka.http.scaladsl.model.headers.HttpOriginRange import akka.http.scaladsl.{ConnectionContext, Http} import akka.stream.ActorMaterializer import io.iohk.ethereum.jsonrpc.JsonRpcController import io.iohk.ethereum.jsonrpc.server.JsonRpcHttpsServer.HttpsSetupResult import io.iohk.ethereum.jsonrpc.server.JsonRpcServer.JsonRpcServerConfig import io.iohk.ethereum.utils.Logger import scala.concurrent.ExecutionContext.Implicits.global import scala.io.Source import scala.util.{Failure, Success, Try} class JsonRpcHttpsServer(val jsonRpcController: JsonRpcController, config: JsonRpcServerConfig, secureRandom: SecureRandom)(implicit val actorSystem: ActorSystem) extends JsonRpcServer with Logger { def run(): Unit = { implicit val materializer = ActorMaterializer() val maybeSslContext = validateCertificateFiles(config.certificateKeyStorePath, config.certificateKeyStoreType, config.certificatePasswordFile).flatMap{ case (keystorePath, keystoreType, passwordFile) => val passwordReader = Source.fromFile(passwordFile) try { val password = passwordReader.getLines().mkString obtainSSLContext(keystorePath, keystoreType, password) } finally { passwordReader.close() } } val maybeHttpsContext = maybeSslContext.map(sslContext => ConnectionContext.https(sslContext)) maybeHttpsContext match { case Right(httpsContext) => Http().setDefaultServerHttpContext(httpsContext) val bindingResultF = Http().bindAndHandle(route, config.interface, config.port, connectionContext = httpsContext) bindingResultF onComplete { case Success(serverBinding) => log.info(s"JSON RPC HTTPS server listening on ${serverBinding.localAddress}") case Failure(ex) => log.error("Cannot start JSON HTTPS RPC server", ex) } case Left(error) => log.error(s"Cannot start JSON HTTPS RPC server due to: $error") } } private def validateCertificateFiles(maybeKeystorePath: Option[String], maybeKeystoreType: Option[String], maybePasswordFile: Option[String]): HttpsSetupResult[(String, String, String)] = (maybeKeystorePath, maybeKeystoreType, maybePasswordFile) match { case (Some(keystorePath), Some(keystoreType), Some(passwordFile)) => val keystoreDirMissing = !new File(keystorePath).isFile val passwordFileMissing = !new File(passwordFile).isFile if(keystoreDirMissing && passwordFileMissing) Left("Certificate keystore path and password file configured but files are missing") else if(keystoreDirMissing) Left("Certificate keystore path configured but file is missing") else if(passwordFileMissing) Left("Certificate password file configured but file is missing") else Right((keystorePath, keystoreType, passwordFile)) case _ => Left("HTTPS requires: certificate-keystore-path, certificate-keystore-type and certificate-password-file to be configured") } override def corsAllowedOrigins: HttpOriginRange = config.corsAllowedOrigins } object JsonRpcHttpsServer { type HttpsSetupResult[T] = Either[String, T] }
Example 9
Source File: JsonRpcHttpServer.scala From mantis with Apache License 2.0 | 5 votes |
package io.iohk.ethereum.jsonrpc.server import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.model.headers.HttpOriginRange import akka.stream.ActorMaterializer import io.iohk.ethereum.jsonrpc._ import io.iohk.ethereum.jsonrpc.server.JsonRpcServer.JsonRpcServerConfig import io.iohk.ethereum.utils.Logger import scala.concurrent.ExecutionContext.Implicits.global import scala.util.{Failure, Success} class JsonRpcHttpServer(val jsonRpcController: JsonRpcController, config: JsonRpcServerConfig) (implicit val actorSystem: ActorSystem) extends JsonRpcServer with Logger { def run(): Unit = { implicit val materializer = ActorMaterializer() val bindingResultF = Http(actorSystem).bindAndHandle(route, config.interface, config.port) bindingResultF onComplete { case Success(serverBinding) => log.info(s"JSON RPC HTTP server listening on ${serverBinding.localAddress}") case Failure(ex) => log.error("Cannot start JSON HTTP RPC server", ex) } } override def corsAllowedOrigins: HttpOriginRange = config.corsAllowedOrigins }
Example 10
Source File: TestSpec.scala From akka-serialization-test with Apache License 2.0 | 5 votes |
package com.github.dnvriend import akka.actor.{ ActorRef, ActorSystem, PoisonPill } import akka.event.{ Logging, LoggingAdapter } import akka.serialization.SerializationExtension import akka.stream.{ ActorMaterializer, Materializer } import akka.testkit.TestProbe import akka.util.Timeout import org.scalatest.concurrent.{ Eventually, ScalaFutures } import org.scalatest.prop.PropertyChecks import org.scalatest.{ BeforeAndAfterAll, FlatSpec, GivenWhenThen, Matchers } import scala.concurrent.duration._ import scala.concurrent.{ ExecutionContext, Future } import scala.util.Try trait TestSpec extends FlatSpec with Matchers with GivenWhenThen with ScalaFutures with BeforeAndAfterAll with Eventually with PropertyChecks with AkkaPersistenceQueries with AkkaStreamUtils with InMemoryCleanup { implicit val timeout: Timeout = Timeout(10.seconds) implicit val system: ActorSystem = ActorSystem() implicit val ec: ExecutionContext = system.dispatcher implicit val mat: Materializer = ActorMaterializer() implicit val log: LoggingAdapter = Logging(system, this.getClass) implicit val pc: PatienceConfig = PatienceConfig(timeout = 50.seconds) val serialization = SerializationExtension(system) implicit class FutureToTry[T](f: Future[T]) { def toTry: Try[T] = Try(f.futureValue) } def killActors(actors: ActorRef*): Unit = { val probe = TestProbe() actors.foreach { actor ⇒ probe watch actor actor ! PoisonPill probe expectTerminated actor } } override protected def afterAll(): Unit = { system.terminate() system.whenTerminated.toTry should be a 'success } }
Example 11
Source File: GraphOperation.scala From incubator-s2graph with Apache License 2.0 | 5 votes |
package org.apache.s2graph.counter.core.v2 import akka.actor.ActorSystem import akka.stream.ActorMaterializer import com.typesafe.config.Config import org.apache.http.HttpStatus import org.apache.s2graph.counter.config.S2CounterConfig import org.apache.s2graph.counter.core.v2.ExactStorageGraph._ import org.asynchttpclient.DefaultAsyncHttpClientConfig import org.slf4j.LoggerFactory import play.api.libs.json.{JsObject, JsValue, Json} import scala.concurrent.Await import scala.concurrent.duration._ class GraphOperation(config: Config) { // using play-ws without play app implicit val materializer = ActorMaterializer.create(ActorSystem(getClass.getSimpleName)) private val builder = new DefaultAsyncHttpClientConfig.Builder() private val wsClient = new play.api.libs.ws.ning.NingWSClient(builder.build) private val s2config = new S2CounterConfig(config) val s2graphUrl = s2config.GRAPH_URL private[counter] val log = LoggerFactory.getLogger(this.getClass) import scala.concurrent.ExecutionContext.Implicits.global def createLabel(json: JsValue): Boolean = { // fix counter label's schemaVersion val newJson = json.as[JsObject] ++ Json.obj("schemaVersion" -> "v2") val future = wsClient.url(s"$s2graphUrl/graphs/createLabel").post(newJson).map { resp => resp.status match { case HttpStatus.SC_OK => true case _ => throw new RuntimeException(s"failed createLabel. errCode: ${resp.status} body: ${resp.body} query: $json") } } Await.result(future, 10 second) } def deleteLabel(label: String): Boolean = { val future = wsClient.url(s"$s2graphUrl/graphs/deleteLabel/$label").put("").map { resp => resp.status match { case HttpStatus.SC_OK => true case _ => throw new RuntimeException(s"failed deleteLabel. errCode: ${resp.status} body: ${resp.body}") } } Await.result(future, 10 second) } }
Example 12
Source File: Server.scala From incubator-s2graph with Apache License 2.0 | 5 votes |
package org.apache.s2graph.http import java.time.Instant import scala.language.postfixOps import scala.concurrent.{Await, ExecutionContext, Future} import scala.concurrent.duration.Duration import scala.util.{Failure, Success} import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.model.{ContentTypes, HttpEntity, HttpResponse, StatusCodes} import akka.http.scaladsl.server.Route import akka.http.scaladsl.server.Directives._ import akka.stream.ActorMaterializer import com.typesafe.config.ConfigFactory import org.apache.s2graph.core.S2Graph import org.slf4j.LoggerFactory object Server extends App with S2GraphTraversalRoute with S2GraphAdminRoute with S2GraphMutateRoute with S2GraphQLRoute { implicit val system: ActorSystem = ActorSystem("S2GraphHttpServer") implicit val materializer: ActorMaterializer = ActorMaterializer() implicit val executionContext: ExecutionContext = system.dispatcher val config = ConfigFactory.load() override val s2graph = new S2Graph(config) override val logger = LoggerFactory.getLogger(this.getClass) val port = sys.props.get("http.port").fold(8000)(_.toInt) val interface = sys.props.get("http.interface").fold("0.0.0.0")(identity) val startAt = System.currentTimeMillis() def uptime = System.currentTimeMillis() - startAt def serverHealth = s"""{ "port": ${port}, "interface": "${interface}", "started_at": ${Instant.ofEpochMilli(startAt)}, "uptime": "${uptime} millis" """ def health = HttpResponse(status = StatusCodes.OK, entity = HttpEntity(ContentTypes.`application/json`, serverHealth)) // Allows you to determine routes to expose according to external settings. lazy val routes: Route = concat( pathPrefix("graphs")(traversalRoute), pathPrefix("mutate")(mutateRoute), pathPrefix("admin")(adminRoute), pathPrefix("graphql")(graphqlRoute), get(complete(health)) ) val binding: Future[Http.ServerBinding] = Http().bindAndHandle(routes, interface, port) binding.onComplete { case Success(bound) => logger.info(s"Server online at http://${bound.localAddress.getHostString}:${bound.localAddress.getPort}/") case Failure(e) => logger.error(s"Server could not start!", e) } scala.sys.addShutdownHook { () => s2graph.shutdown() system.terminate() logger.info("System terminated") } Await.result(system.whenTerminated, Duration.Inf) }
Example 13
Source File: ReadJournalDaoImpl.scala From akka-persistence-dynamodb with Apache License 2.0 | 5 votes |
package com.github.j5ik2o.akka.persistence.dynamodb.query.dao import akka.NotUsed import akka.actor.ActorSystem import akka.persistence.PersistentRepr import akka.stream.ActorMaterializer import akka.stream.scaladsl.{ Flow, Source } import com.github.j5ik2o.akka.persistence.dynamodb.config.QueryPluginConfig import com.github.j5ik2o.akka.persistence.dynamodb.journal.dao.{ DaoSupport, JournalRowReadDriver } import com.github.j5ik2o.akka.persistence.dynamodb.journal.JournalRow import com.github.j5ik2o.akka.persistence.dynamodb.metrics.MetricsReporter import com.github.j5ik2o.akka.persistence.dynamodb.model.{ PersistenceId, SequenceNumber } import com.github.j5ik2o.akka.persistence.dynamodb.serialization.FlowPersistentReprSerializer import scala.collection.immutable.Set import scala.concurrent.ExecutionContext import scala.util.Try class ReadJournalDaoImpl( queryProcessor: QueryProcessor, override protected val journalRowDriver: JournalRowReadDriver, pluginConfig: QueryPluginConfig, override val serializer: FlowPersistentReprSerializer[JournalRow], override protected val metricsReporter: Option[MetricsReporter] )(implicit val ec: ExecutionContext, system: ActorSystem) extends ReadJournalDao with DaoSupport { implicit val mat = ActorMaterializer() override def allPersistenceIds(max: Long): Source[PersistenceId, NotUsed] = queryProcessor.allPersistenceIds(max) private def perfectlyMatchTag(tag: String, separator: String): Flow[JournalRow, JournalRow, NotUsed] = Flow[JournalRow].filter(_.tags.exists(tags => tags.split(separator).contains(tag))) override def eventsByTag( tag: String, offset: Long, maxOffset: Long, max: Long ): Source[Try[(PersistentRepr, Set[String], Long)], NotUsed] = eventsByTagAsJournalRow(tag, offset, maxOffset, max) .via(perfectlyMatchTag(tag, pluginConfig.tagSeparator)) .via(serializer.deserializeFlowAsTry) override def eventsByTagAsJournalRow( tag: String, offset: Long, maxOffset: Long, max: Long ): Source[JournalRow, NotUsed] = queryProcessor.eventsByTagAsJournalRow(tag, offset, maxOffset, max) override def journalSequence(offset: Long, limit: Long): Source[Long, NotUsed] = queryProcessor.journalSequence(offset, limit) override def getMessagesAsJournalRow( persistenceId: PersistenceId, fromSequenceNr: SequenceNumber, toSequenceNr: SequenceNumber, max: Long, deleted: Option[Boolean] ): Source[JournalRow, NotUsed] = journalRowDriver.getJournalRows(persistenceId, fromSequenceNr, toSequenceNr, max, deleted) override def maxJournalSequence(): Source[Long, NotUsed] = { Source.single(Long.MaxValue) } }
Example 14
Source File: DynamoDBSnapshotStore.scala From akka-persistence-dynamodb with Apache License 2.0 | 5 votes |
package com.github.j5ik2o.akka.persistence.dynamodb.snapshot import akka.actor.ExtendedActorSystem import akka.persistence.snapshot.SnapshotStore import akka.persistence.{ SelectedSnapshot, SnapshotMetadata, SnapshotSelectionCriteria } import akka.serialization.SerializationExtension import akka.stream.ActorMaterializer import akka.stream.scaladsl.{ Sink, Source } import com.github.j5ik2o.akka.persistence.dynamodb.config.SnapshotPluginConfig import com.github.j5ik2o.akka.persistence.dynamodb.model.{ PersistenceId, SequenceNumber } import com.github.j5ik2o.akka.persistence.dynamodb.snapshot.dao.{ SnapshotDao, SnapshotDaoImpl } import com.github.j5ik2o.akka.persistence.dynamodb.utils.V2DynamoDbClientBuilderUtils import com.github.j5ik2o.reactive.aws.dynamodb.DynamoDbAsyncClient import com.typesafe.config.Config import software.amazon.awssdk.services.dynamodb.{ DynamoDbAsyncClient => JavaDynamoDbAsyncClient } import scala.concurrent.{ ExecutionContext, Future } object DynamoDBSnapshotStore { def toSelectedSnapshot(tupled: (SnapshotMetadata, Any)): SelectedSnapshot = tupled match { case (meta: SnapshotMetadata, snapshot: Any) => SelectedSnapshot(meta, snapshot) } } class DynamoDBSnapshotStore(config: Config) extends SnapshotStore { import DynamoDBSnapshotStore._ implicit val ec: ExecutionContext = context.dispatcher implicit val system: ExtendedActorSystem = context.system.asInstanceOf[ExtendedActorSystem] implicit val mat = ActorMaterializer() private val serialization = SerializationExtension(system) protected val pluginConfig: SnapshotPluginConfig = SnapshotPluginConfig.fromConfig(config) protected val javaClient: JavaDynamoDbAsyncClient = V2DynamoDbClientBuilderUtils.setupAsync(system.dynamicAccess, pluginConfig).build() protected val asyncClient: DynamoDbAsyncClient = DynamoDbAsyncClient(javaClient) protected val snapshotDao: SnapshotDao = new SnapshotDaoImpl(asyncClient, serialization, pluginConfig) override def loadAsync( persistenceId: String, criteria: SnapshotSelectionCriteria ): Future[Option[SelectedSnapshot]] = { val result = criteria match { case SnapshotSelectionCriteria(Long.MaxValue, Long.MaxValue, _, _) => snapshotDao.latestSnapshot(PersistenceId(persistenceId)) case SnapshotSelectionCriteria(Long.MaxValue, maxTimestamp, _, _) => snapshotDao.snapshotForMaxTimestamp(PersistenceId(persistenceId), maxTimestamp) case SnapshotSelectionCriteria(maxSequenceNr, Long.MaxValue, _, _) => snapshotDao.snapshotForMaxSequenceNr(PersistenceId(persistenceId), SequenceNumber(maxSequenceNr)) case SnapshotSelectionCriteria(maxSequenceNr, maxTimestamp, _, _) => snapshotDao.snapshotForMaxSequenceNrAndMaxTimestamp( PersistenceId(persistenceId), SequenceNumber(maxSequenceNr), maxTimestamp ) case _ => Source.empty } result.map(_.map(toSelectedSnapshot)).runWith(Sink.head) } override def saveAsync(metadata: SnapshotMetadata, snapshot: Any): Future[Unit] = snapshotDao.save(metadata, snapshot).runWith(Sink.ignore).map(_ => ()) override def deleteAsync(metadata: SnapshotMetadata): Future[Unit] = snapshotDao .delete(PersistenceId(metadata.persistenceId), SequenceNumber(metadata.sequenceNr)).map(_ => ()).runWith( Sink.ignore ).map(_ => ()) override def deleteAsync(persistenceId: String, criteria: SnapshotSelectionCriteria): Future[Unit] = { val pid = PersistenceId(persistenceId) criteria match { case SnapshotSelectionCriteria(Long.MaxValue, Long.MaxValue, _, _) => snapshotDao.deleteAllSnapshots(pid).runWith(Sink.ignore).map(_ => ()) case SnapshotSelectionCriteria(Long.MaxValue, maxTimestamp, _, _) => snapshotDao.deleteUpToMaxTimestamp(pid, maxTimestamp).runWith(Sink.ignore).map(_ => ()) case SnapshotSelectionCriteria(maxSequenceNr, Long.MaxValue, _, _) => snapshotDao .deleteUpToMaxSequenceNr(pid, SequenceNumber(maxSequenceNr)).runWith(Sink.ignore).map(_ => ()) case SnapshotSelectionCriteria(maxSequenceNr, maxTimestamp, _, _) => snapshotDao .deleteUpToMaxSequenceNrAndMaxTimestamp(pid, SequenceNumber(maxSequenceNr), maxTimestamp).runWith( Sink.ignore ).map(_ => ()) case _ => Future.successful(()) } } }
Example 15
Source File: NominatimLookup.scala From daf-semantics with Apache License 2.0 | 5 votes |
package examples.nominatim import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.Await import scala.concurrent.duration.Duration import play.api.libs.ws.ahc.AhcWSClient import akka.actor.ActorSystem import akka.stream.ActorMaterializer import com.fasterxml.jackson.databind.ObjectMapper import com.fasterxml.jackson.databind.JsonNode import clients.HTTPClient // SEE: Prefix.cc Lookup - http://prefix.cc/foaf.file.json class NominatimLookup { val http = HTTPClient def start() { http.start() } def stop() { http.stop() } def nominatim(address: String) = { val url = "http://nominatim.openstreetmap.org/search" val parameters = Map( "q" -> address, "addressdetails" -> "1", "format" -> "json", "limit" -> "4", "addressdetails" -> "1", "dedupe" -> "1", "extratags" -> "1", "namedetails" -> "1").toList val ret = http.ws.url(url) .withQueryString(parameters: _*) .get() .map { response => response.status match { case 200 => response.body case _ => "{}" } } ret } } object MainNominatimLookup extends App { import scala.collection.JavaConversions._ import scala.collection.JavaConverters._ val nominatim = new NominatimLookup nominatim.start() val json_mapper = new ObjectMapper val json_reader = json_mapper.reader() val result = Await.ready(nominatim.nominatim("135 pilkington avenue, birmingham"), Duration.Inf) .value.get.get val json_list: List[JsonNode] = json_reader.readTree(result).elements().toList // simulazione di output... if (json_list.size > 0) { println(s"RESULTS [${json_list.size}]") json_list .zipWithIndex .foreach { case (node, i) => println(s"result ${i + 1}") println(node.get("place_id")) println(node.get("address").get("road").asText() + ", " + node.get("address").get("house_number").asText()) } } else { println("cannot find results...") } nominatim.stop() }
Example 16
Source File: TestingHttpApi.scala From daf-semantics with Apache License 2.0 | 5 votes |
package it.almawave.linkeddata.kb.http import play.api.inject.guice.GuiceApplicationBuilder import org.junit.Test import org.junit.After import play.api.Application import org.junit.Before import it.almawave.linkeddata.kb.utils.JSONHelper import play.api.libs.ws.WSClient import org.asynchttpclient.DefaultAsyncHttpClient import play.api.libs.ws.ssl.SystemConfiguration import akka.stream.ActorMaterializer import play.api.libs.ws.ahc.AhcWSClient import scala.concurrent.Await import scala.concurrent.duration.Duration import java.net.URL import com.typesafe.config.ConfigFactory class TestingHttpApi { var app: Application = null var conf = ConfigFactory.empty() var ws: WSClient = null var app_url = new URL("http://localhost:8080") @Test def testing_contexts() { // curl -X GET http://localhost:8999/kb/v1/prefixes/lookup?prefix=no_pref // -H "accept: application/json" // -H "content-type: application/json" val fut = ws.url(s"http://localhost:8999/kb/v1/prefixes/lookup") .withHeaders(("accept", "application/json")) .withHeaders(("content-type", "application/json")) .withFollowRedirects(true) .withQueryString(("prefix", "muapit")) .get() val results = Await.result(fut, Duration.Inf) println(results.body) } @Before def before() { app = GuiceApplicationBuilder() .build() conf = app.configuration.underlying // play.app.local.url // play.server.http.address // play.server.http.port println(JSONHelper.writeToString(conf.root().unwrapped())) app_url = new URL(conf.getString("app.local.url")) println(s"\n\nrunning at ${app_url}") val materializer = ActorMaterializer()(app.actorSystem) ws = AhcWSClient()(materializer) } @After def after() { ws.close() app.stop() } }
Example 17
Source File: Akka.scala From parquet4s with MIT License | 5 votes |
package com.github.mjakubowski84.parquet4s.indefinite import akka.actor.ActorSystem import akka.stream.{ActorMaterializer, Materializer} import scala.concurrent.duration._ import scala.concurrent.{Await, ExecutionContext} trait Akka { this: Logger => implicit lazy val system: ActorSystem = ActorSystem() implicit lazy val materializer: Materializer = ActorMaterializer() implicit def executionContext: ExecutionContext = system.dispatcher def stopAkka(): Unit = { logger.info("Stopping Akka...") Await.ready(system.terminate(), 1.second) } }
Example 18
Source File: WriteAndReadFilteredAkkaApp.scala From parquet4s with MIT License | 5 votes |
package com.github.mjakubowski84.parquet4s.akka import akka.actor.ActorSystem import akka.stream.scaladsl.{Sink, Source} import akka.stream.{ActorMaterializer, Materializer} import com.github.mjakubowski84.parquet4s.{Col, ParquetReader, ParquetStreams} import com.google.common.io.Files import scala.concurrent.Future import scala.util.Random object WriteAndReadFilteredAkkaApp extends App { object Dict { val A = "A" val B = "B" val C = "C" val D = "D" val values: List[String] = List(A, B, C, D) def random: String = values(Random.nextInt(values.length)) } case class Data(id: Int, dict: String) val count = 100 val data = (1 to count).map { i => Data(id = i, dict = Dict.random) } val path = Files.createTempDir().getAbsolutePath implicit val system: ActorSystem = ActorSystem() implicit val materializer: Materializer = ActorMaterializer() import system.dispatcher val options = ParquetReader.Options() val printingSink = Sink.foreach(println) for { // write _ <- Source(data).runWith(ParquetStreams.toParquetSingleFile(s"$path/data.parquet")) // read filtered _ <- Future(println("""dict == "A"""")) _ <- ParquetStreams.fromParquet[Data](path, options = options, filter = Col("dict") === Dict.A).runWith(printingSink) _ <- Future(println("""id >= 20 && id < 40""")) _ <- ParquetStreams.fromParquet[Data](path, options = options, filter = Col("id") >= 20 && Col("id") < 40).runWith(printingSink) // finish _ <- system.terminate() } yield () }
Example 19
Source File: WriteAndReadCustomTypeAkkaApp.scala From parquet4s with MIT License | 5 votes |
package com.github.mjakubowski84.parquet4s.akka import akka.actor.ActorSystem import akka.stream.scaladsl.{Sink, Source} import akka.stream.{ActorMaterializer, Materializer} import com.github.mjakubowski84.parquet4s.CustomType._ import com.github.mjakubowski84.parquet4s.ParquetStreams import com.google.common.io.Files object WriteAndReadCustomTypeAkkaApp extends App { object Data { def generate(count: Int): Iterator[Data] = Iterator.range(1, count).map { i => Data(id = i, dict = Dict.random) } } case class Data(id: Long, dict: Dict.Type) val data = () => Data.generate(count = 100) val path = Files.createTempDir().getAbsolutePath implicit val system: ActorSystem = ActorSystem() implicit val materializer: Materializer = ActorMaterializer() import system.dispatcher for { // write _ <- Source.fromIterator(data).runWith(ParquetStreams.toParquetSingleFile(s"$path/data.parquet")) // read // hint: you can filter by dict using string value, for example: filter = Col("dict") === "A" _ <- ParquetStreams.fromParquet[Data](path).runWith(Sink.foreach(println)) // finish _ <- system.terminate() } yield () }
Example 20
Source File: WriteAndReadAkkaApp.scala From parquet4s with MIT License | 5 votes |
package com.github.mjakubowski84.parquet4s.akka import akka.actor.ActorSystem import akka.stream.scaladsl.{Sink, Source} import akka.stream.{ActorMaterializer, Materializer} import com.github.mjakubowski84.parquet4s.ParquetStreams import com.google.common.io.Files import scala.util.Random object WriteAndReadAkkaApp extends App { case class Data(id: Int, text: String) val count = 100 val data = (1 to count).map { i => Data(id = i, text = Random.nextString(4)) } val path = Files.createTempDir().getAbsolutePath implicit val system: ActorSystem = ActorSystem() implicit val materializer: Materializer = ActorMaterializer() import system.dispatcher for { // write _ <- Source(data).runWith(ParquetStreams.toParquetSingleFile(s"$path/data.parquet")) // read _ <- ParquetStreams.fromParquet[Data](path).runWith(Sink.foreach(println)) // finish _ <- system.terminate() } yield () }
Example 21
Source File: NifiProcessorSpec.scala From daf with BSD 3-Clause "New" or "Revised" License | 5 votes |
package it.gov.daf.ingestion.nifi import akka.actor.ActorSystem import akka.stream.ActorMaterializer import com.typesafe.config.Config import it.gov.daf.catalogmanager.MetaCatalog import it.gov.daf.catalogmanager.json._ import it.gov.daf.ingestion.metacatalog.MetaCatalogProcessor import org.scalatest.{AsyncFlatSpec, Matchers} import play.api.libs.json._ import play.api.libs.ws.WSResponse import play.api.libs.ws.ahc.AhcWSClient import scala.concurrent.Future import scala.io.Source class NifiProcessorSpec extends AsyncFlatSpec with Matchers { "A Nifi Processor " should "create a nifi pipeline for a correct meta catalog entry" in { val in = this.getClass.getResourceAsStream("/data_test.json") val sMetaCatalog = Source.fromInputStream(in).getLines().mkString(" ") in.close() val parsed = Json.parse(sMetaCatalog) val metaCatalog: JsResult[MetaCatalog] = Json.fromJson[MetaCatalog](parsed) metaCatalog.isSuccess shouldBe true implicit val system: ActorSystem = ActorSystem() implicit val materializer: ActorMaterializer = ActorMaterializer() implicit val wsClient: AhcWSClient = AhcWSClient() implicit val config: Config = com.typesafe.config.ConfigFactory.load() implicit val ec = system.dispatcher def closeAll(): Unit = { system.terminate() materializer.shutdown() wsClient.close() } val fResult = NifiProcessor(metaCatalog.get).createDataFlow() fResult.map { response => println(response) closeAll() true shouldBe true } } }
Example 22
Source File: DatasetFunctionsSpec.scala From daf with BSD 3-Clause "New" or "Revised" License | 5 votes |
package daf.dataset import java.io.ByteArrayInputStream import akka.stream.ActorMaterializer import akka.stream.scaladsl.StreamConverters import controllers.modules.TestAbstractModule import daf.filesystem.MergeStrategy import daf.instances.{ AkkaInstance, ConfigurationInstance } import org.scalatest.{ BeforeAndAfterAll, MustMatchers, WordSpecLike } import scala.concurrent.Await import scala.concurrent.duration._ import scala.util.Random class DatasetFunctionsSpec extends TestAbstractModule with WordSpecLike with MustMatchers with BeforeAndAfterAll with ConfigurationInstance with AkkaInstance { implicit lazy val executionContext = actorSystem.dispatchers.lookup("akka.actor.test-dispatcher") protected implicit lazy val materializer = ActorMaterializer.create { actorSystem } override def beforeAll() = { startAkka() } def data = (1 to 5) .map { i => Random.alphanumeric.grouped(20).take(5).map { s => s"$i - ${s.mkString}" }.toStream :+ defaultSeparator } def stream = MergeStrategy.coalesced { data.map { iter => new ByteArrayInputStream( iter.mkString(defaultSeparator).getBytes("UTF-8") ) } } def source = StreamConverters.fromInputStream(() => stream, 5) "Source manipulation" must { "convert to a string source" in { Await.result( wrapDefault { asStringSource(source) }.runFold("") { _ + _ }, 5.seconds ).split(defaultSeparator).length must be { 25 } } "convert to a json source" in { Await.result( wrapJson { asStringSource(source) }.runFold("") { _ + _ }, 5.seconds ).split(jsonSeparator).length must be { 25 } } } }
Example 23
Source File: DirManager.scala From daf with BSD 3-Clause "New" or "Revised" License | 5 votes |
package it.gov.daf.catalogmanager.listeners import java.net.URLEncoder import akka.actor.ActorSystem import akka.stream.ActorMaterializer import akka.stream.scaladsl.{ FileIO, Source } import net.caoticode.dirwatcher.FSListener import play.api.libs.ws.WSClient import play.api.libs.ws.ahc.AhcWSClient import play.api.mvc.MultipartFormData.FilePart import play.Logger import scala.concurrent.Future class DirManager() extends FSListener { import java.nio.file.Path import scala.concurrent.ExecutionContext.Implicits.global val logger = Logger.underlying() override def onCreate(ref: Path): Unit = { implicit val system = ActorSystem() implicit val materializer = ActorMaterializer() val wsClient = AhcWSClient() val name = ref.getParent.getFileName.toString println(name) val uri: Option[String] = IngestionUtils.datasetsNameUri.get(name) val logicalUri = URLEncoder.encode(uri.get, "UTF-8") logger.debug("logicalUri: " + logicalUri) call(wsClient) .andThen { case _ => wsClient.close() } .andThen { case _ => system.terminate() } def call(wsClient: WSClient): Future[Unit] = { wsClient.url("http://localhost:9001/ingestion-manager/v1/add-datasets/" + logicalUri) //.withHeaders("content-type" -> "multipart/form-data") .post( Source(FilePart("upfile", name, None, FileIO.fromPath(ref)) :: List())).map { response => val statusText: String = response.statusText logger.debug(s"Got a response $statusText") } } logger.debug(s"created $ref") } override def onDelete(ref: Path): Unit = println(s"deleted $ref") override def onModify(ref: Path): Unit = println(s"modified $ref") }
Example 24
Source File: CatalogControllersSpec.scala From daf with BSD 3-Clause "New" or "Revised" License | 5 votes |
import java.io.IOException import java.net.ServerSocket import akka.actor.ActorSystem import akka.stream.ActorMaterializer import catalog_manager.yaml.MetaCatalog import org.specs2.mutable.Specification import play.api.Application import play.api.http.Status import play.api.routing.Router import play.api.inject.guice.GuiceApplicationBuilder import play.api.libs.json.{JsArray, JsValue, Json} import play.api.libs.ws.WSResponse import play.api.libs.ws.ahc.AhcWSClient import play.api.test._ import it.gov.daf.catalogmanager import it.gov.daf.catalogmanager.client.Catalog_managerClient import scala.concurrent.duration.Duration import scala.concurrent.{Await, Future} class CatalogControllersSpec extends Specification { def application: Application = GuiceApplicationBuilder().build() import catalog_manager.yaml.BodyReads.MetaCatalogReads "The catalog-manager" should { "Call catalog-manager/v1/dataset-catalogs return ok status" in new WithServer(app = application, port = 9000) { WsTestClient.withClient { implicit client => val response: WSResponse = Await.result[WSResponse](client. url(s"http://localhost:9001/catalog-manager/v1/dataset-catalogs"). execute, Duration.Inf) println(response.status) response.status must be equalTo Status.OK } } "Call catalog-manager/v1/dataset-catalogs return a non empty list if" + "you have error maybe is necessaty to add data to db" in new WithServer(app = application, port = 9000) { WsTestClient.withClient { implicit client => val response: WSResponse = Await.result[WSResponse](client. url(s"http://localhost:9001/catalog-manager/v1/dataset-catalogs"). execute, Duration.Inf) println(response.status) println("ALE") println(response.body) val json: JsValue = Json.parse(response.body) json.as[JsArray].value.size must be greaterThan (0) } } "The catalog-manager" should { "Call catalog-manager/v1/dataset-catalogs/{logical_uri} return ok status" in new WithServer(app = application, port = 9000) { val logicalUri = "daf://dataset/std/standard/standard/uri_cultura/standard" val url = s"http://localhost:9001/catalog-manager/v1/dataset-catalogs/$logicalUri" println(url) WsTestClient.withClient { implicit client => val response: WSResponse = Await.result[WSResponse](client. url(url). execute, Duration.Inf) println(response.status) response.status must be equalTo Status.OK } } } "The catalog-manager" should { "Call catalog-manager/v1/dataset-catalogs/{anything} return 401" in new WithServer(app = application, port = 9000) { val logicalUri = "anything" val url = s"http://localhost:9001/catalog-manager/v1/dataset-catalogs/$logicalUri" println(url) WsTestClient.withClient { implicit client => val response: WSResponse = Await.result[WSResponse](client. url(url). execute, Duration.Inf) println(response.status) response.status must be equalTo 401 } } } } }
Example 25
Source File: Boot.scala From akka-http-rest with MIT License | 5 votes |
package me.archdev.restapi import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.stream.ActorMaterializer import me.archdev.restapi.core.auth.{ AuthService, JdbcAuthDataStorage } import me.archdev.restapi.core.profiles.{ JdbcUserProfileStorage, UserProfileService } import me.archdev.restapi.http.HttpRoute import me.archdev.restapi.utils.Config import me.archdev.restapi.utils.db.{ DatabaseConnector, DatabaseMigrationManager } import scala.concurrent.ExecutionContext object Boot extends App { def startApplication() = { implicit val actorSystem = ActorSystem() implicit val executor: ExecutionContext = actorSystem.dispatcher implicit val materializer: ActorMaterializer = ActorMaterializer() val config = Config.load() new DatabaseMigrationManager( config.database.jdbcUrl, config.database.username, config.database.password ).migrateDatabaseSchema() val databaseConnector = new DatabaseConnector( config.database.jdbcUrl, config.database.username, config.database.password ) val userProfileStorage = new JdbcUserProfileStorage(databaseConnector) val authDataStorage = new JdbcAuthDataStorage(databaseConnector) val usersService = new UserProfileService(userProfileStorage) val authService = new AuthService(authDataStorage, config.secretKey) val httpRoute = new HttpRoute(usersService, authService, config.secretKey) Http().bindAndHandle(httpRoute.route, config.http.host, config.http.port) } startApplication() }
Example 26
Source File: LogCollector.scala From pulse with Apache License 2.0 | 5 votes |
package io.phdata.pulse.logcollector import java.io.FileInputStream import java.util.Properties import java.util.concurrent.TimeUnit import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.stream.{ ActorMaterializer, Materializer } import com.typesafe.scalalogging.LazyLogging import io.phdata.pulse.common.SolrService import io.phdata.pulse.solr.SolrProvider import org.apache.kudu.client.KuduClient.KuduClientBuilder import scala.concurrent.duration.Duration import scala.concurrent.{ Await, Future } import scala.util.{ Failure, Success } def main(args: Array[String]): Unit = System.getProperty("java.security.auth.login.config") match { case null => { logger.info( "java.security.auth.login.config is not set, continuing without kerberos authentication") } case _ => { KerberosContext.scheduleKerberosLogin(0, 9, TimeUnit.HOURS) } start(args) } private def start(args: Array[String]): Unit = { val cliParser = new LogCollectorCliParser(args) val solrService = SolrProvider.create(cliParser.zkHosts().split(",").toList) val solrStream = new SolrCloudStream(solrService) val kuduClient = cliParser.kuduMasters.toOption.map(masters => KerberosContext.runPrivileged(new KuduClientBuilder(masters).build())) val kuduService = kuduClient.map(client => KerberosContext.runPrivileged(new KuduService(client))) val routes = new LogCollectorRoutes(solrStream, kuduService) cliParser.mode() match { case "kafka" => { kafka(solrService, cliParser.kafkaProps(), cliParser.topic()) } case _ => { http(cliParser.port(), routes) } } } // Starts Http Service def http(port: Int, routes: LogCollectorRoutes): Future[Unit] = { implicit val actorSystem: ActorSystem = ActorSystem() implicit val ec = actorSystem.dispatchers.lookup("akka.actor.http-dispatcher") implicit val materializer: Materializer = ActorMaterializer.create(actorSystem) val httpServerFuture = Http().bindAndHandle(routes.routes, "0.0.0.0", port)(materializer) map { binding => logger.info(s"Log Collector interface bound to: ${binding.localAddress}") } httpServerFuture.onComplete { case Success(v) => () case Failure(ex) => { logger.error("HTTP server failed, exiting. ", ex) System.exit(1) } } Await.ready( httpServerFuture, Duration.Inf ) } // Starts Kafka Consumer def kafka(solrService: SolrService, kafkaProps: String, topic: String): Unit = { val solrCloudStream = new SolrCloudStream(solrService) val kafkaConsumer = new PulseKafkaConsumer(solrCloudStream) val kafkaConsumerProps = new Properties() kafkaConsumerProps.load(new FileInputStream(kafkaProps)) kafkaConsumer.read(kafkaConsumerProps, topic) } }
Example 27
Source File: App.scala From avoin-voitto with MIT License | 5 votes |
package liigavoitto import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.server.Directives import akka.stream.ActorMaterializer import liigavoitto.api.{ ApiRoutes, BaseRoutes } import liigavoitto.util.Logging import scala.util.Properties object App extends Directives with ApiRoutes with Logging { implicit lazy val system = ActorSystem("liiga-voitto") lazy val port = Properties.envOrElse("APP_PORT", "45258").toInt def main(args: Array[String]) { implicit val executionContext = system.dispatcher implicit val fm = ActorMaterializer() Http().bindAndHandle(routes, "0.0.0.0", port) log.info(s"Server online at http://0.0.0.0:$port/") } val routes = BaseRoutes.baseRoutes ~ apiRoutes ~ localRoute }
Example 28
Source File: ScoresApiSupport.scala From avoin-voitto with MIT License | 5 votes |
package liigavoitto.scores import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.model.HttpMethods._ import akka.http.scaladsl.model.{ HttpRequest, HttpResponse } import akka.stream.ActorMaterializer import liigavoitto.util.Logging import org.joda.time.format.DateTimeFormat import scala.concurrent.Await import scala.concurrent.duration._ import scala.util.{ Failure, Properties, Success, Try } trait ScoresApiSupport extends Logging { implicit val system: ActorSystem implicit val ec = system.dispatcher implicit val fm = ActorMaterializer() val oneHundredMegabytes = 100000000 val apiUrl = Properties.envOrElse("SCORES_API_URL", "http://scores.api.yle.fi/v0/") val scoresAuth = Map[String, String]( "app_id" -> Properties.envOrElse("SCORES_API_APP_ID", ""), "app_key" -> Properties.envOrElse("SCORES_API_APP_KEY", "") ) val dateFormat = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss") val timeout = 15.seconds protected def get(url: String) = { Try { val request = HttpRequest(GET, url) log.info("REQUEST: " + request) Http().singleRequest(request).map(r => getStr(r)) } match { case Success(s) => s case Failure(e) => log.warn(s"Failed to get $url: " + e.getMessage) e.printStackTrace() throw new RuntimeException("Failure: " + e) } } protected def getStr(r: HttpResponse) = { Try { val entity = Await.result(r.entity.withSizeLimit(oneHundredMegabytes).toStrict(timeout), timeout) entity.data.decodeString("UTF-8") } match { case Success(s) => s case Failure(e) => throw new RuntimeException(s"Scores api failure: " + e.getMessage) } } }
Example 29
Source File: ApiRoutesSpec.scala From avoin-voitto with MIT License | 5 votes |
package liigavoitto.api import akka.http.scaladsl.model.StatusCodes import akka.http.scaladsl.testkit.ScalatestRouteTest import akka.stream.ActorMaterializer import org.scalatest.{ Matchers, WordSpec } class ApiRoutesSpec extends WordSpec with Matchers with ScalatestRouteTest with ApiRoutes { implicit val actorMaterializer: ActorMaterializer = ActorMaterializer() "SimpleRoute" should { "answer to GET requests to `/report/s24-123456`" in { Get("/report/s24-123456") ~> apiRoutes ~> check { status shouldBe StatusCodes.OK } } "handle reports in two languages" in { Get("/report/s24-123456?lang=sv") ~> apiRoutes ~> check { status shouldBe StatusCodes.OK responseAs[String] should include("lang: sv") } } } override val handler = new ApiHandler() { override def report(id: String, lang: String) = s"OK, $id. lang: $lang" } }
Example 30
Source File: ClientSpec.scala From seals with Apache License 2.0 | 5 votes |
package com.example.lib import scala.concurrent.Await import scala.concurrent.duration._ import cats.effect.IO import org.scalatest.matchers.should.Matchers import org.scalatest.flatspec.AnyFlatSpec import fs2.Stream import akka.actor.ActorSystem import akka.stream.{ Materializer, ActorMaterializer } import Protocol.v1.{ Response, RandInt, Seeded } class ClientSpec extends AnyFlatSpec with Matchers with com.example.lib.TcpTest { implicit lazy val sys: ActorSystem = ActorSystem("InteropSpec") implicit lazy val mat: Materializer = ActorMaterializer() protected override def ec = sys.dispatcher override def afterAll(): Unit = { super.afterAll() sys.terminate() } "Client" should "receive the correct response" in { val sem = cats.effect.concurrent.Semaphore[IO](0).unsafeRunSync() Stream(Server.serve(1237, sockGroup).drain, Stream.eval(sem.acquire)) .parJoin(Int.MaxValue) .take(1) .compile .drain .unsafeRunAsync(_ => ()) try { val resp = Await.result(Client.client(1237), 2.seconds) // constant, because we always seed with the same value: resp should === (Vector[Response](Seeded, RandInt(42))) } finally { sem.release.unsafeRunSync() } } }
Example 31
Source File: AmqpSubscriberPerfSpec.scala From reliable-http-client with Apache License 2.0 | 5 votes |
package rhttpc.transport.amqp import akka.Done import akka.actor.{Actor, ActorSystem, Props} import akka.http.scaladsl.Http import akka.http.scaladsl.model.{HttpRequest, HttpResponse} import akka.pattern._ import akka.stream.ActorMaterializer import akka.testkit.{TestKit, TestProbe} import dispatch.url import org.scalatest.{BeforeAndAfterAll, FlatSpecLike, Ignore} import rhttpc.transport.{Deserializer, InboundQueueData, OutboundQueueData, Serializer} import scala.concurrent.duration._ import scala.concurrent.{Await, Future} import scala.util.{Random, Try} @Ignore class AmqpSubscriberPerfSpec extends TestKit(ActorSystem("AmqpSubscriberPerfSpec")) with FlatSpecLike with BeforeAndAfterAll { import system.dispatcher implicit val materializer = ActorMaterializer() implicit def serializer[Msg] = new Serializer[Msg] { override def serialize(obj: Msg): String = obj.toString } implicit def deserializer[Msg] = new Deserializer[Msg] { override def deserialize(value: String): Try[Msg] = Try(value.asInstanceOf[Msg]) } val queueName = "request" val outboundQueueData = OutboundQueueData(queueName, autoDelete = true, durability = false) val inboundQueueData = InboundQueueData(queueName, batchSize = 10, parallelConsumers = 10, autoDelete = true, durability = false) val count = 100 private val interface = "localhost" private val port = 8081 def handle(request: HttpRequest) = { val delay = 5 + Random.nextInt(10) after(delay.seconds, system.scheduler)(Future.successful(HttpResponse())) } it should "have a good throughput" in { val bound = Await.result( Http().bindAndHandleAsync( handle, interface, port ), 5.seconds ) val http = dispatch.Http() // .configure(_.setMaxConnections(count) // .setExecutorService(Executors.newFixedThreadPool(count))) val connection = Await.result(AmqpConnectionFactory.connect(system), 5 seconds) val transport = AmqpTransport( connection = connection ) val publisher = transport.publisher[String](outboundQueueData) val probe = TestProbe() val actor = system.actorOf(Props(new Actor { override def receive: Receive = { case str: String => http(url(s"http://$interface:$port") OK identity).map(_ => Done).pipeTo(self)(sender()) case Done => probe.ref ! Done sender() ! Done } })) val subscriber = transport.subscriber[String](inboundQueueData, actor) subscriber.start() try { measureMeanThroughput(count) { (1 to count).foreach { _ => publisher.publish("x") } probe.receiveWhile(10 minutes, messages = count) { case a => a } } } finally { Await.result(subscriber.stop(), 5.seconds) connection.close(5 * 1000) Await.result(bound.unbind(), 5.seconds) } } def measureMeanThroughput(count: Int)(consume: => Unit) = { val before = System.currentTimeMillis() consume val msgsPerSecond = count / ((System.currentTimeMillis() - before).toDouble / 1000) println(s"Throughput was: $msgsPerSecond msgs/sec") } override protected def afterAll(): Unit = { shutdown() } }
Example 32
Source File: SampleApp.scala From reliable-http-client with Apache License 2.0 | 5 votes |
package rhttpc.sample import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.model._ import akka.http.scaladsl.server._ import akka.pattern._ import akka.stream.ActorMaterializer import akka.util.Timeout import rhttpc.akkahttp.ReliableHttpClientFactory import rhttpc.akkapersistence.{RecoverAllActors, RecoverableActorsManager, SendMsgToChild} import rhttpc.client.subscription.ReplyFuture import scala.concurrent.Await import scala.concurrent.duration._ import scala.language.reflectiveCalls object SampleApp extends App with Directives { implicit val system = ActorSystem("rhttpc-sample") implicit val materializer = ActorMaterializer() import system.dispatcher val rhttpc = Await.result(ReliableHttpClientFactory().withOwnAmqpConnection.inOutWithSubscriptions(), 10 seconds) val client = new DelayedEchoClient { override def requestResponse(msg: String): ReplyFuture = { rhttpc.send(HttpRequest().withUri("http://sampleecho:8082").withMethod(HttpMethods.POST).withEntity(msg)) } } val manager = system.actorOf(RecoverableActorsManager.props( FooBarActor.persistenceCategory, id => FooBarActor.props(id, rhttpc.subscriptionManager, client) ), "foobar") Await.result((manager ? RecoverAllActors)(Timeout(20 seconds)), 15 seconds) rhttpc.start() val route = path("healthcheck") { get { complete("OK") } } ~ path(Segment) { id => (post & entity(as[String])) { msg => complete { implicit val sendMsgTimeout = Timeout(5 seconds) (manager ? SendMsgToChild(id, SendMsg(msg))).map(_ => "OK") } } ~ get { complete { implicit val currentStateTimeout = Timeout(5 seconds) (manager ? SendMsgToChild(id, CurrentState)).mapTo[FooBarState].map(_.toString) } } } Http().bindAndHandle(route, interface = "0.0.0.0", port = 8081).map { binding => Runtime.getRuntime.addShutdownHook(new Thread { override def run(): Unit = { Await.result(rhttpc.stop(), 10 seconds) } }) } }
Example 33
Source File: EchoApp.scala From reliable-http-client with Apache License 2.0 | 5 votes |
package rhttpc.echo import akka.actor.ActorSystem import akka.agent.Agent import akka.http.scaladsl.Http import akka.http.scaladsl.server._ import akka.pattern._ import akka.stream.ActorMaterializer import scala.concurrent.Future import scala.concurrent.duration._ object EchoApp extends App with Directives { implicit val system = ActorSystem("rhttpc-echo") implicit val materializer = ActorMaterializer() import system.dispatcher val retryAgent = Agent(Map.empty[String, Int]) val route = (post & entity(as[String])) { case request@FailNTimesThanReplyWithMessage(failsCount, eventualMessage) => complete { retryAgent.alter { currectRetryMap => val current = currectRetryMap.getOrElse(request, 0) val next = current + 1 if (next > failsCount) { currectRetryMap - request } else { currectRetryMap + (request -> next) } }.flatMap { retryMapAfterChange => retryMapAfterChange.get(request) match { case Some(retry) => Future.failed(new Exception(s"Failed $retry time")) case None => Future.successful(eventualMessage) } } } case msg => complete { system.log.debug(s"Got: $msg") after(5 seconds, system.scheduler) { system.log.debug(s"Reply with: $msg") Future.successful(msg) } } } Http().bindAndHandle(route, interface = "0.0.0.0", port = 8082) } object FailNTimesThanReplyWithMessage { private val Regex = "fail-(\\d*)-times-than-reply-with-(.*)".r("failsCount", "eventualMessage") def unapply(str: String): Option[(Int, String)] = str match { case Regex(failsCount, eventualMessage) => Some(failsCount.toInt, eventualMessage) case other => None } }
Example 34
Source File: DemoApp.scala From sbt-reactive-app with Apache License 2.0 | 5 votes |
package foo import akka.actor.{ Actor, ActorLogging, ActorSystem, Props } import akka.cluster.ClusterEvent.ClusterDomainEvent import akka.cluster.{ Cluster, ClusterEvent } import akka.http.scaladsl.Http import akka.http.scaladsl.model._ import akka.http.scaladsl.server.Directives._ import akka.management.AkkaManagement import akka.management.cluster.bootstrap.ClusterBootstrap import akka.stream.ActorMaterializer object DemoApp extends App { implicit val system = ActorSystem("Appka") import system.log implicit val mat = ActorMaterializer() val cluster = Cluster(system) log.info(s"Started [$system], cluster.selfAddress = ${cluster.selfAddress}") log.info("something2") //#start-akka-management AkkaManagement(system).start() //#start-akka-management ClusterBootstrap(system).start() cluster.subscribe( system.actorOf(Props[ClusterWatcher]), ClusterEvent.InitialStateAsEvents, classOf[ClusterDomainEvent]) // add real app routes here val routes = path("hello") { get { complete( HttpEntity( ContentTypes.`text/html(UTF-8)`, "<h1>Hello</h1>")) } } Http().bindAndHandle(routes, "0.0.0.0", 8080) Cluster(system).registerOnMemberUp({ log.info("Cluster member is up!") }) } class ClusterWatcher extends Actor with ActorLogging { val cluster = Cluster(context.system) override def receive = { case msg ⇒ log.info(s"Cluster ${cluster.selfAddress} >>> " + msg) } }
Example 35
Source File: RequestFactoriesSpec.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.ingest.bootstrap import akka.actor.ActorSystem import akka.http.scaladsl.model.HttpRequest import akka.stream.ActorMaterializer import akka.testkit.TestKit import org.scalatest.concurrent.ScalaFutures import org.scalatest.matchers.should.Matchers import org.scalatest.funspec.AnyFunSpecLike import org.scalatest.BeforeAndAfterAll import scala.concurrent.duration._ class RequestFactoriesSpec extends TestKit(ActorSystem("RequestFactoriesSpec")) with Matchers with AnyFunSpecLike with BeforeAndAfterAll with ScalaFutures { override def afterAll = TestKit.shutdownActorSystem( system, verifySystemShutdown = true, duration = 10.seconds ) import RequestFactories._ describe("The RequestFactories") { it("build a Hydra request from an HTTP request") { val hr = HttpRequest(entity = "test") val hydraReq = createRequest("1", hr) whenReady(hydraReq) { r => r.payload shouldBe "test" } } } }
Example 36
Source File: BootstrapEndpointActors.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.kafka.endpoints import akka.actor.{ActorRef, ActorSystem} import akka.stream.{ActorMaterializer, Materializer} import hydra.avro.registry.ConfluentSchemaRegistry import hydra.common.config.ConfigSupport import hydra.core.akka.SchemaRegistryActor import hydra.kafka.services.{StreamsManagerActor, TopicBootstrapActor} import hydra.kafka.util.KafkaUtils import scala.concurrent.ExecutionContext trait BootstrapEndpointActors extends ConfigSupport { implicit val system: ActorSystem private[kafka] val kafkaIngestor = system.actorSelection(path = applicationConfig.getString("kafka-ingestor-path") ) private[kafka] val schemaRegistryActor = system.actorOf(SchemaRegistryActor.props(applicationConfig)) private[kafka] val bootstrapKafkaConfig = applicationConfig.getConfig("bootstrap-config") private[kafka] val streamsManagerProps = StreamsManagerActor.props( bootstrapKafkaConfig, KafkaUtils.BootstrapServers, ConfluentSchemaRegistry.forConfig(applicationConfig).registryClient ) val bootstrapActor: ActorRef = system.actorOf( TopicBootstrapActor.props( schemaRegistryActor, kafkaIngestor, streamsManagerProps, Some(bootstrapKafkaConfig) ) ) }
Example 37
Source File: DurableEventLogs.scala From eventuate with Apache License 2.0 | 5 votes |
package com.rbmhtechnology.example.stream //# durable-event-logs import akka.actor.{ ActorRef, ActorSystem } import akka.stream.{ ActorMaterializer, Materializer } import com.rbmhtechnology.eventuate.log.leveldb.LeveldbEventLog //# trait DurableEventLogs { //# durable-event-logs implicit val system: ActorSystem = ActorSystem("example") implicit val materializer: Materializer = ActorMaterializer() val logAId = "A" val logBId = "B" val logCId = "C" val logA: ActorRef = createLog(logAId) val logB: ActorRef = createLog(logBId) val logC: ActorRef = createLog(logCId) def createLog(id: String): ActorRef = system.actorOf(LeveldbEventLog.props(id)) //# }
Example 38
Source File: AkkaUnitTestLike.scala From reactive-kinesis with Apache License 2.0 | 5 votes |
package com.weightwatchers.reactive.kinesis.common import akka.actor.{ActorSystem, Scheduler} import akka.stream.{ActorMaterializer, Materializer} import akka.testkit.TestKitBase import com.typesafe.config.{Config, ConfigFactory} import org.scalatest.concurrent.ScalaFutures import org.scalatest.{BeforeAndAfterAll, Suite} import scala.concurrent.ExecutionContextExecutor trait AkkaUnitTestLike extends TestKitBase with ScalaFutures with BeforeAndAfterAll { self: Suite => implicit lazy val config: Config = ConfigFactory.load("sample.conf") implicit lazy val system: ActorSystem = ActorSystem(suiteName, config) implicit lazy val scheduler: Scheduler = system.scheduler implicit lazy val mat: Materializer = ActorMaterializer() implicit lazy val ctx: ExecutionContextExecutor = system.dispatcher abstract override def afterAll(): Unit = { super.afterAll() // intentionally shutdown the actor system last. system.terminate().futureValue } }
Example 39
Source File: Main.scala From akka-api-gateway-example with MIT License | 5 votes |
package jp.co.dzl.example.akka.api import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.stream.{ ActorMaterializer, Materializer } import jp.co.dzl.example.akka.api.di.{ ServiceModule, HandlerModule, ConfigModule, AkkaModule } import jp.co.dzl.example.akka.api.handler.RootHandler import scaldi.{ Injector, Injectable } import scala.concurrent.ExecutionContextExecutor trait MainService extends Injectable { implicit val module: Injector = new AkkaModule :: new ConfigModule :: new HandlerModule :: new ServiceModule implicit val system: ActorSystem = inject[ActorSystem] implicit val executor: ExecutionContextExecutor = system.dispatcher implicit val materializer: Materializer = ActorMaterializer() val host = inject[String](identified by "http.listen.host") val port = inject[Int](identified by "http.listen.port") val handler = inject[RootHandler] } object Main extends App with MainService { Http().bindAndHandle(handler.routes, host, port) }
Example 40
Source File: UsersHandler.scala From akka-api-gateway-example with MIT License | 5 votes |
package jp.co.dzl.example.akka.api.handler.v1.github import akka.actor.ActorSystem import akka.http.scaladsl.model._ import akka.http.scaladsl.server.Directives._ import akka.stream.ActorMaterializer import akka.stream.scaladsl.{ Sink, Source } import jp.co.dzl.example.akka.api.handler.Handler import jp.co.dzl.example.akka.api.service.GitHub import scala.util.{ Failure, Success } class UsersHandler( actorSystem: ActorSystem, github: GitHub ) extends Handler { implicit val system = actorSystem implicit val executor = system.dispatcher implicit val materializer = ActorMaterializer() def routes = pathPrefix("v1" / "github") { path("users" / """^[a-zA-Z0-9\-]+$""".r) { login => get { extractRequest { req => val result = Source.single(HttpRequest(HttpMethods.GET, s"/users/$login")) .via(github.from(req)) .via(github.send) .runWith(Sink.head) onComplete(result) { case Success(response) => complete(response) case Failure(error) => complete(StatusCodes.ServiceUnavailable -> error.toString) } } } } } }
Example 41
Source File: GitHubSpec.scala From akka-api-gateway-example with MIT License | 5 votes |
package jp.co.dzl.example.akka.api.service import akka.actor.ActorSystem import akka.http.scaladsl.model.headers.RawHeader import akka.http.scaladsl.model.{ HttpMethods, HttpRequest, HttpResponse } import akka.stream.ActorMaterializer import akka.stream.scaladsl.{ Flow, Source } import akka.stream.testkit.scaladsl.TestSink import org.scalamock.scalatest.MockFactory import org.scalatest.concurrent.ScalaFutures import org.scalatest.{ BeforeAndAfterAll, FlatSpec, Matchers } import scala.concurrent.Await import scala.concurrent.duration.Duration class GitHubSpec extends FlatSpec with Matchers with ScalaFutures with BeforeAndAfterAll with MockFactory { implicit val system = ActorSystem("github-spec") implicit val executor = system.dispatcher implicit val materializer = ActorMaterializer() override protected def afterAll: Unit = { Await.result(system.terminate(), Duration.Inf) } "#from" should "merge original headers to github request" in { val github = new GitHubImpl("127.0.0.1", 8000, 5, mock[HttpClient]) val request = HttpRequest(HttpMethods.GET, "/") .addHeader(RawHeader("host", "dummy")) .addHeader(RawHeader("timeout-access", "dummy")) val result = Source.single(HttpRequest(HttpMethods.GET, "/v1/github/users/xxxxxx")) .via(github.from(request)) .runWith(TestSink.probe[HttpRequest]) .request(1) .expectNext() result.headers.filter(_.lowercaseName() == "host") shouldBe empty result.headers.filter(_.lowercaseName() == "timeout-access") shouldBe empty result.headers.filter(_.lowercaseName() == "x-forwarded-host") shouldNot be(empty) } "#send" should "connect using http client" in { val httpResponse = HttpResponse() val httpClient = mock[HttpClient] (httpClient.connectionHttps _).expects(*, *, *).returning(Flow[HttpRequest].map(_ => httpResponse)) val github = new GitHubImpl("127.0.0.1", 8000, 5, httpClient) val result = Source.single(HttpRequest(HttpMethods.GET, "/")) .via(github.send) .runWith(TestSink.probe[HttpResponse]) .request(1) .expectNext() result shouldBe httpResponse } }
Example 42
Source File: HostBalancerTest.scala From clickhouse-scala-client with GNU Lesser General Public License v3.0 | 5 votes |
package com.crobox.clickhouse.balancing import akka.stream.{ActorMaterializer, Materializer} import com.crobox.clickhouse.ClickhouseClientSpec import com.crobox.clickhouse.internal.ClickhouseHostBuilder import com.typesafe.config.ConfigFactory import scala.concurrent.duration._ class HostBalancerTest extends ClickhouseClientSpec { it should "resolve to single host balancer" in { HostBalancer() match { case SingleHostBalancer(host) => host shouldEqual ClickhouseHostBuilder.toHost("localhost", Some(8123)) } } it should "resolve to multi host balancer" in { HostBalancer(Some(ConfigFactory.parseString(""" | connection: { | type: "balancing-hosts" | hosts: [ | { | host: "localhost", | port: 8123 | } | ] | health-check { | timeout = 1 second | interval = 1 second | } | } | """.stripMargin).withFallback(config.getConfig("crobox.clickhouse.client")))) match { case MultiHostBalancer(hosts, _) => hosts.toSeq should contain theSameElementsInOrderAs Seq(ClickhouseHostBuilder.toHost("localhost", Some(8123))) } } it should "resolve to cluster aware host balancer" in { HostBalancer(Some(ConfigFactory.parseString(""" | connection: { | type: "cluster-aware" | host: "localhost" | port: 8123 | cluster: "cluster" | scanning-interval = 1 second | health-check { | timeout = 1 second | interval = 1 second | } | } | """.stripMargin).withFallback(config.getConfig("crobox.clickhouse.client")))) match { case ClusterAwareHostBalancer(host, cluster, _, builtTimeout) => host shouldEqual ClickhouseHostBuilder.toHost("localhost", Some(8123)) cluster shouldBe "cluster" builtTimeout shouldBe (1 second) } } }
Example 43
Source File: ClickhouseClientSpec.scala From clickhouse-scala-client with GNU Lesser General Public License v3.0 | 5 votes |
package com.crobox.clickhouse import java.util.UUID import akka.actor.ActorSystem import akka.stream.{ActorMaterializer, Materializer} import akka.testkit.TestKit import com.typesafe.config.{Config, ConfigFactory} import org.scalatest.BeforeAndAfterAll import org.scalatest.concurrent.ScalaFutures import org.scalatest.flatspec.AnyFlatSpecLike import org.scalatest.matchers.should.Matchers import scala.concurrent.duration._ import scala.concurrent.{Await, ExecutionContext} import scala.util.Random abstract class ClickhouseClientSpec(val config: Config = ConfigFactory.load()) extends TestKit(ActorSystem("clickhouseClientTestSystem", config.getConfig("crobox.clickhouse.client"))) with AnyFlatSpecLike with Matchers with BeforeAndAfterAll with ScalaFutures { implicit val materializer: Materializer = ActorMaterializer() implicit val ec: ExecutionContext = system.dispatcher override implicit def patienceConfig: PatienceConfig = PatienceConfig(1.seconds, 50.millis) override protected def afterAll(): Unit = { try super.afterAll() finally Await.result(system.terminate(), 10.seconds) } def randomUUID: UUID = UUID.randomUUID def randomString: String = Random.alphanumeric.take(10).mkString def randomInt: Int = Random.nextInt(100000) }
Example 44
Source File: ClickhouseClientAsyncSpec.scala From clickhouse-scala-client with GNU Lesser General Public License v3.0 | 5 votes |
package com.crobox.clickhouse import akka.actor.{ActorRef, ActorSystem} import akka.http.scaladsl.model.Uri import akka.pattern.ask import akka.stream.{ActorMaterializer, Materializer} import akka.testkit.TestKit import akka.util.Timeout import akka.util.Timeout.durationToTimeout import com.crobox.clickhouse.balancing.HostBalancer import com.crobox.clickhouse.balancing.discovery.ConnectionManagerActor.GetConnection import com.typesafe.config.{Config, ConfigFactory} import org.scalatest._ import scala.concurrent.{Await, Future} import scala.concurrent.duration._ import org.scalatest.flatspec.AsyncFlatSpecLike import org.scalatest.matchers.should.Matchers abstract class ClickhouseClientAsyncSpec(val config: Config = ConfigFactory.load()) extends TestKit(ActorSystem("clickhouseClientAsyncTestSystem", config.getConfig("crobox.clickhouse.client"))) with AsyncFlatSpecLike with Matchers with BeforeAndAfterAll with BeforeAndAfterEach { implicit val timeout: Timeout = 5.second implicit val materializer: Materializer = ActorMaterializer() override protected def afterAll(): Unit = { try super.afterAll() finally Await.result(system.terminate(), 10.seconds) } def requestParallelHosts(balancer: HostBalancer, connections: Int = 10): Future[Seq[Uri]] = Future.sequence( (1 to connections) .map(_ => { balancer.nextHost }) ) def getConnections(manager: ActorRef, connections: Int = 10): Future[Seq[Uri]] = Future.sequence( (1 to connections) .map(_ => { (manager ? GetConnection()).mapTo[Uri] }) ) // TODO change this methods to custom matchers def returnsConnectionsInRoundRobinFashion(manager: ActorRef, expectedConnections: Set[Uri]): Future[Assertion] = { val RequestConnectionsPerHost = 100 getConnections(manager, RequestConnectionsPerHost * expectedConnections.size) .map(connections => { expectedConnections.foreach( uri => connections .count(_ == uri) shouldBe (RequestConnectionsPerHost +- RequestConnectionsPerHost / 10) //10% delta for warm-up phase ) succeed }) } }
Example 45
Source File: StageLogging.scala From akka-stream-sqs with Apache License 2.0 | 5 votes |
package me.snov.akka.sqs.shape import akka.event.{LoggingAdapter, NoLogging} import akka.stream.ActorMaterializer import akka.stream.stage.GraphStageLogic private[sqs] trait StageLogging { self: GraphStageLogic => private var loggingAdapter: LoggingAdapter = _ def log: LoggingAdapter = { if (loggingAdapter eq null) { materializer match { case actorMaterializer: ActorMaterializer => loggingAdapter = akka.event.Logging(actorMaterializer.system, self.getClass) case _ => loggingAdapter = NoLogging } } loggingAdapter } }
Example 46
Source File: TestHttpProxy.scala From akka-stream-sqs with Apache License 2.0 | 5 votes |
package me.snov.akka.sqs import akka.actor.{ActorSystem, Terminated} import akka.http.scaladsl.Http import akka.http.scaladsl.server.{RequestContext, Route} import akka.stream.ActorMaterializer import akka.stream.scaladsl.{Sink, Source} import scala.concurrent.{Await, Future} import scala.concurrent.duration._ class TestHttpProxy( interface: String = "localhost", port: Int, remoteHost: String = "localhost", remotePort: Int = 9324 ) { implicit var system: ActorSystem = createActorSystem() private def createActorSystem() = ActorSystem("test-http-server") def start(): Unit = { implicit val materializer: ActorMaterializer = ActorMaterializer() implicit val executionContext = system.dispatcher val proxy = Route { context: RequestContext => context.log.debug("Opening connection to %s:%d".format(remoteHost, remotePort)) Source.single(context.request) .via(Http(system).outgoingConnection(remoteHost, remotePort)) .runWith(Sink.head) .flatMap(context.complete(_)) } Http().bindAndHandle(handler = proxy, interface = interface, port = port) } def stop(): Unit = { Await.ready(system.terminate(), 1.second) } def asyncStartAfter(d: FiniteDuration) = { system = createActorSystem() system.scheduler.scheduleOnce(d, new Runnable { override def run(): Unit = start() })(system.dispatcher) } }
Example 47
Source File: RokkuS3Proxy.scala From rokku with Apache License 2.0 | 5 votes |
package com.ing.wbaa.rokku.proxy import akka.Done import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.server.Directives._ import akka.stream.ActorMaterializer import com.ing.wbaa.rokku.proxy.api.{ AdminService, HealthService, PostRequestActions, ProxyServiceWithListAllBuckets } import com.ing.wbaa.rokku.proxy.config.HttpSettings import com.typesafe.scalalogging.LazyLogging import scala.concurrent.{ ExecutionContext, Future } import scala.util.{ Failure, Success } trait RokkuS3Proxy extends LazyLogging with ProxyServiceWithListAllBuckets with PostRequestActions with HealthService with AdminService { protected[this] implicit def system: ActorSystem implicit val materializer: ActorMaterializer = ActorMaterializer()(system) protected[this] def httpSettings: HttpSettings protected[this] implicit val executionContext: ExecutionContext = system.dispatcher // The routes we serve. final val allRoutes = adminRoute ~ healthRoute ~ proxyServiceRoute // Details about the server binding. lazy val startup: Future[Http.ServerBinding] = Http(system).bindAndHandle(allRoutes, httpSettings.httpBind, httpSettings.httpPort) .andThen { case Success(binding) => logger.info(s"Proxy service started listening: ${binding.localAddress}") case Failure(reason) => logger.error("Proxy service failed to start.", reason) } def shutdown(): Future[Done] = { startup.flatMap(_.unbind) .andThen { case Success(_) => logger.info("Proxy service stopped.") case Failure(reason) => logger.error("Proxy service failed to stop.", reason) } } }
Example 48
Source File: RequestHandlerS3Cache.scala From rokku with Apache License 2.0 | 5 votes |
package com.ing.wbaa.rokku.proxy.handler import akka.http.scaladsl.model._ import akka.http.scaladsl.model.headers.RawHeader import akka.stream.ActorMaterializer import akka.util.ByteString import com.ing.wbaa.rokku.proxy.cache.{ CacheRulesV1, HazelcastCacheWithConf } import com.ing.wbaa.rokku.proxy.data.RequestId import com.ing.wbaa.rokku.proxy.handler.parsers.CacheHelpers._ import com.ing.wbaa.rokku.proxy.handler.parsers.{ GetCacheValueObject, HeadCacheValueObject } import com.ing.wbaa.rokku.proxy.handler.parsers.RequestParser.AWSRequestType import scala.concurrent.Future import scala.concurrent.duration._ import scala.util.{ Failure, Success } trait RequestHandlerS3Cache extends HazelcastCacheWithConf with RequestHandlerS3 with CacheRulesV1 { private val logger = new LoggerHandlerWithId implicit val materializer: ActorMaterializer def awsRequestFromRequest(request: HttpRequest): AWSRequestType def isEligibleToBeCached(request: HttpRequest)(implicit id: RequestId): Boolean def isEligibleToBeInvalidated(request: HttpRequest)(implicit id: RequestId): Boolean override protected[this] def fireRequestToS3(request: HttpRequest)(implicit id: RequestId): Future[HttpResponse] = { if (storageS3Settings.isCacheEnabled) { if (isEligibleToBeCached(request)) { getObjectFromCacheOrStorage(request) } else { invalidateEntryIfObjectInCache(request) super.fireRequestToS3(request) } } else { super.fireRequestToS3(request) } } private def readFromStorageAndUpdateCache(request: HttpRequest)(implicit id: RequestId): Future[Unit] = { Future { val key = getKey(request) super.fireRequestToS3(request).flatMap { response => if (isHead(request)) { response.entity.discardBytes() Future.successful(HeadCacheValueObject(response.headers, response.entity.contentLengthOption, response.status)) } else if (isEligibleSize(response)) { //todo: add head request to reduce amount of get's response.entity.toStrict(getStrictCacheDownloadTimeoutInSeconds.seconds, getMaxEligibleCacheObjectSizeInBytes).flatMap { r => r.dataBytes.runFold(ByteString.empty) { case (acc, b) => acc ++ b } }.map(bs => GetCacheValueObject(bs)) } else { response.entity.discardBytes() Future.failed(new ObjectTooBigException()) } }.onComplete { case Failure(exception: ObjectTooBigException) => logger.debug("Object too big to be stored in cache {}", key, exception) case Failure(exception) => logger.error("Cannot store object () in cache {}", key, exception) case Success(headValue: HeadCacheValueObject) => val value = processHeadersForCache(headValue.headers, headValue.contentLength, headValue.statusCode) logger.debug("head object cache value {} for key {}", value, key) putObject(key, ByteString(value)) case Success(getValue: GetCacheValueObject) => putObject(key, getValue.data) } } } class ObjectTooBigException extends Exception }
Example 49
Source File: FilterRecursiveMultiDelete.scala From rokku with Apache License 2.0 | 5 votes |
package com.ing.wbaa.rokku.proxy.handler import akka.stream.ActorMaterializer import akka.stream.alpakka.xml.scaladsl.XmlParsing import akka.stream.alpakka.xml.{ EndElement, StartElement, TextEvent } import akka.stream.scaladsl.{ Sink, Source } import akka.util.ByteString import scala.collection.immutable import scala.collection.mutable.ListBuffer import scala.concurrent.Future object FilterRecursiveMultiDelete { def exctractMultideleteObjectsFlow(source: Source[ByteString, Any])(implicit materializer: ActorMaterializer): Future[Seq[String]] = { var isKeyTag = false source .via(XmlParsing.parser) .statefulMapConcat(() => { val keys = new ListBuffer[String] isKeyTag = false parseEvent => parseEvent match { case e: StartElement if e.localName.startsWith("Delete") => keys.clear() immutable.Seq.empty case e: StartElement if e.localName == "Key" => isKeyTag = true immutable.Seq.empty case e: EndElement if e.localName == "Key" => isKeyTag = false immutable.Seq.empty case e: TextEvent => if (isKeyTag) keys.append(e.text) immutable.Seq.empty case e: EndElement if e.localName == "Delete" => immutable.Seq(keys).flatten case _ => immutable.Seq.empty } }).runWith(Sink.seq) } }
Example 50
Source File: HttpRequestRecorderItTest.scala From rokku with Apache License 2.0 | 5 votes |
package com.ing.wbaa.rokku.proxy.persistence import akka.Done import akka.actor.{ActorSystem, Props} import akka.http.scaladsl.model.HttpRequest import akka.http.scaladsl.model.Uri.{Authority, Host} import akka.persistence.cassandra.query.scaladsl.CassandraReadJournal import akka.persistence.query.PersistenceQuery import akka.stream.ActorMaterializer import akka.stream.scaladsl.Sink import com.amazonaws.services.s3.AmazonS3 import com.ing.wbaa.rokku.proxy.RokkuS3Proxy import com.ing.wbaa.rokku.proxy.config.{HttpSettings, KafkaSettings, StorageS3Settings} import com.ing.wbaa.rokku.proxy.data._ import com.ing.wbaa.rokku.proxy.handler.parsers.RequestParser import com.ing.wbaa.rokku.proxy.handler.{FilterRecursiveListBucketHandler, RequestHandlerS3Cache} import com.ing.wbaa.rokku.proxy.provider.{AuditLogProvider, MessageProviderKafka, SignatureProviderAws} import com.ing.wbaa.rokku.proxy.queue.MemoryUserRequestQueue import com.ing.wbaa.testkit.RokkuFixtures import org.scalatest.Assertion import org.scalatest.diagrams.Diagrams import org.scalatest.wordspec.AsyncWordSpec import scala.concurrent.duration._ import scala.concurrent.{Await, Future} class HttpRequestRecorderItTest extends AsyncWordSpec with Diagrams with RokkuFixtures { implicit val testSystem: ActorSystem = ActorSystem.create("test-system") implicit val mat: ActorMaterializer = ActorMaterializer() val rokkuHttpSettings: HttpSettings = new HttpSettings(testSystem.settings.config) { override val httpPort: Int = 0 override val httpBind: String = "127.0.0.1" } def withS3SdkToMockProxy(testCode: AmazonS3 => Assertion): Future[Assertion] = { val proxy: RokkuS3Proxy = new RokkuS3Proxy with RequestHandlerS3Cache with SignatureProviderAws with FilterRecursiveListBucketHandler with MessageProviderKafka with AuditLogProvider with MemoryUserRequestQueue with RequestParser { override implicit lazy val system: ActorSystem = testSystem override val httpSettings: HttpSettings = rokkuHttpSettings override def isUserAuthorizedForRequest(request: S3Request, user: User)(implicit id: RequestId): Boolean = true override def isUserAuthenticated(httpRequest: HttpRequest, awsSecretKey: AwsSecretKey)(implicit id: RequestId): Boolean = true override val storageS3Settings: StorageS3Settings = StorageS3Settings(testSystem) override val kafkaSettings: KafkaSettings = KafkaSettings(testSystem) override def areCredentialsActive(awsRequestCredential: AwsRequestCredential)(implicit id: RequestId): Future[Option[User]] = Future(Some(User(UserRawJson("userId", Some(Set("group")), "accesskey", "secretkey", None)))) def createLineageFromRequest(httpRequest: HttpRequest, userSTS: User, userIPs: UserIps)(implicit id: RequestId): Future[Done] = Future.successful(Done) override protected def auditEnabled: Boolean = false override val requestPersistenceEnabled: Boolean = true override val configuredPersistenceId: String = "localhost-1" } proxy.startup.map { binding => try testCode(getAmazonS3( authority = Authority(Host(binding.localAddress.getAddress), binding.localAddress.getPort) )) finally proxy.shutdown() } } private val CHECKER_PERSISTENCE_ID = "localhost-1" val requestRecorder = testSystem.actorOf(Props(classOf[HttpRequestRecorder]), CHECKER_PERSISTENCE_ID) val queries = PersistenceQuery(testSystem) .readJournalFor[CassandraReadJournal](CassandraReadJournal.Identifier) "S3 Proxy" should { s"with Request Recorder" that { "persists requests in Cassandra" in withS3SdkToMockProxy { sdk => withBucket(sdk) { bucketName => Thread.sleep(6000) val storedInCassandraF = queries.currentEventsByPersistenceId(CHECKER_PERSISTENCE_ID, 1L, Long.MaxValue) .map(_.event) .runWith(Sink.seq) .mapTo[Seq[ExecutedRequestEvt]] val r = Await.result(storedInCassandraF, 5.seconds).filter(_.httpRequest.getUri().toString.contains(bucketName)) assert(r.size == 1) assert(r.head.userSTS.userName.value == "userId") } } } } }
Example 51
Source File: AuthenticationProviderSTSItTest.scala From rokku with Apache License 2.0 | 5 votes |
package com.ing.wbaa.rokku.proxy.provider import akka.actor.ActorSystem import akka.stream.ActorMaterializer import com.amazonaws.services.securitytoken.model.{AssumeRoleRequest, GetSessionTokenRequest} import com.ing.wbaa.rokku.proxy.config.StsSettings import com.ing.wbaa.rokku.proxy.data._ import com.ing.wbaa.testkit.awssdk.StsSdkHelpers import com.ing.wbaa.testkit.oauth.OAuth2TokenRequest import org.scalatest.Assertion import org.scalatest.diagrams.Diagrams import org.scalatest.wordspec.AsyncWordSpec import scala.concurrent.{ExecutionContext, Future} class AuthenticationProviderSTSItTest extends AsyncWordSpec with Diagrams with AuthenticationProviderSTS with StsSdkHelpers with OAuth2TokenRequest { override implicit val testSystem: ActorSystem = ActorSystem.create("test-system") override implicit val system: ActorSystem = testSystem override implicit val executionContext: ExecutionContext = testSystem.dispatcher override implicit val materializer: ActorMaterializer = ActorMaterializer()(testSystem) override val stsSettings: StsSettings = StsSettings(testSystem) implicit val requestId: RequestId = RequestId("test") private val validKeycloakCredentials = Map( "grant_type" -> "password", "username" -> "testuser", "password" -> "password", "client_id" -> "sts-rokku" ) private val userOneKeycloakCredentials = Map( "grant_type" -> "password", "username" -> "userone", "password" -> "password", "client_id" -> "sts-rokku" ) def withAwsCredentialsValidInSTS(testCode: AwsRequestCredential => Future[Assertion]): Future[Assertion] = { val stsSdk = getAmazonSTSSdk(StsSettings(testSystem).stsBaseUri) retrieveKeycloackToken(validKeycloakCredentials).flatMap { keycloakToken => val cred = stsSdk.getSessionToken(new GetSessionTokenRequest() .withTokenCode(keycloakToken.access_token)) .getCredentials testCode(AwsRequestCredential(AwsAccessKey(cred.getAccessKeyId), Some(AwsSessionToken(cred.getSessionToken)))) } } def withAssumeRoleInSTS(testCode: AwsRequestCredential => Future[Assertion]): Future[Assertion] = { val stsSdk = getAmazonSTSSdk(StsSettings(testSystem).stsBaseUri) retrieveKeycloackToken(userOneKeycloakCredentials).flatMap { keycloakToken => val assumeRoleReq = new AssumeRoleRequest().withTokenCode(keycloakToken.access_token) assumeRoleReq.setRoleArn("arn:aws:iam::account-id:role/admin") assumeRoleReq.setRoleSessionName("testRole") val cred = stsSdk.assumeRole(assumeRoleReq).getCredentials testCode(AwsRequestCredential(AwsAccessKey(cred.getAccessKeyId), Some(AwsSessionToken(cred.getSessionToken)))) } } "Authentication Provider STS" should { "check authentication" that { "succeeds for valid credentials" in { withAwsCredentialsValidInSTS { awsCredential => areCredentialsActive(awsCredential).map { userResult => assert(userResult.map(_.userName).contains(UserName("testuser"))) assert(userResult.map(_.userGroups).head.contains(UserGroup("testgroup"))) assert(userResult.map(_.userGroups).head.contains(UserGroup("group3"))) assert(userResult.map(_.userGroups).head.size == 2) assert(userResult.exists(_.accessKey.value.length == 32)) assert(userResult.exists(_.secretKey.value.length == 32)) } } } "fail when user is not authenticated" in { areCredentialsActive(AwsRequestCredential(AwsAccessKey("notauthenticated"), Some(AwsSessionToken("okSessionToken")))).map { userResult => assert(userResult.isEmpty) } } "succeeds for valid role" in { withAssumeRoleInSTS { awsCredential => areCredentialsActive(awsCredential).map { roleResult => assert(roleResult.map(_.userRole).contains(UserAssumeRole("admin"))) assert(roleResult.map(_.userGroups).contains(Set())) assert(roleResult.exists(_.accessKey.value.length == 32)) assert(roleResult.exists(_.secretKey.value.length == 32)) } } } } } }
Example 52
Source File: FilterRecursiveListBucketHandlerSpec.scala From rokku with Apache License 2.0 | 5 votes |
package com.ing.wbaa.rokku.proxy.handler import akka.NotUsed import akka.actor.ActorSystem import akka.http.scaladsl.model.{ HttpMethods, MediaTypes, RemoteAddress, Uri } import akka.stream.scaladsl.{ Sink, Source } import akka.stream.{ ActorMaterializer, Materializer } import akka.util.ByteString import com.ing.wbaa.rokku.proxy.data._ import org.scalatest.diagrams.Diagrams import org.scalatest.wordspec.AsyncWordSpec import scala.concurrent.ExecutionContext class FilterRecursiveListBucketHandlerSpec extends AsyncWordSpec with Diagrams with FilterRecursiveListBucketHandler { implicit val system: ActorSystem = ActorSystem.create("test-system") override implicit val executionContext: ExecutionContext = system.dispatcher implicit val requestId: RequestId = RequestId("test") implicit def materializer: Materializer = ActorMaterializer()(system) def isUserAuthorizedForRequest(request: S3Request, user: User)(implicit id: RequestId): Boolean = { user match { case User(userName, _, _, _, _) if userName.value == "admin" => true case User(userName, _, _, _, _) if userName.value == "user1" => request match { case S3Request(_, s3BucketPath, _, _, _, _, _) => if (s3BucketPath.get.startsWith("/demobucket/user/user2")) false else true } case _ => true } } val listBucketXmlResponse: String = scala.io.Source.fromResource("listBucket.xml").mkString.stripMargin.trim val adminUser = User(UserRawJson("admin", Some(Set.empty[String]), "a", "s", None)) val user1 = User(UserRawJson("user1", Some(Set.empty[String]), "a", "s", None)) val s3Request = S3Request(AwsRequestCredential(AwsAccessKey(""), None), Uri.Path("/demobucket/user"), HttpMethods.GET, RemoteAddress.Unknown, HeaderIPs(), MediaTypes.`text/plain`) val data: Source[ByteString, NotUsed] = Source.single(ByteString.fromString(listBucketXmlResponse)) "List bucket object response" should { "returns all objects to admin" in { data.via(filterRecursiveListObjects(adminUser, s3Request)).map(_.utf8String).runWith(Sink.seq).map(x => { assert(x.mkString.stripMargin.equals(listBucketXmlResponse)) }) } val filteredXml: String = scala.io.Source.fromResource("filteredListBucket.xml").mkString.stripMargin.trim "returns filtered object for user 1" in { data.via(filterRecursiveListObjects(user1, s3Request)).map(_.utf8String).runWith(Sink.seq).map(x => { assert(x.mkString.stripMargin.replaceAll("[\n\r\\s]", "") .equals(filteredXml.replaceAll("[\n\r\\s]", ""))) }) } } }
Example 53
Source File: FilterRecursiveMultiDeleteSpec.scala From rokku with Apache License 2.0 | 5 votes |
package com.ing.wbaa.rokku.proxy.handler import akka.NotUsed import akka.actor.ActorSystem import akka.stream.ActorMaterializer import akka.stream.scaladsl.Source import akka.util.ByteString import com.ing.wbaa.rokku.proxy.handler.FilterRecursiveMultiDelete._ import org.scalatest.diagrams.Diagrams import org.scalatest.wordspec.AsyncWordSpec import scala.collection.mutable.ListBuffer import scala.concurrent.ExecutionContext import scala.util.Random class FilterRecursiveMultiDeleteSpec extends AsyncWordSpec with Diagrams { implicit val system: ActorSystem = ActorSystem.create("test-system") override implicit val executionContext: ExecutionContext = system.dispatcher implicit def materializer: ActorMaterializer = ActorMaterializer()(system) val multiDeleteRequestXml: String = scala.io.Source.fromResource("multiDeleteRequest.xml").mkString.stripMargin.trim val multiDeleteRequestV4Xml: String = scala.io.Source.fromResource("multiDeleteRequestV4.xml").mkString.stripMargin.trim val multiPartComplete: String = scala.io.Source.fromResource("multipartUploadComplete.xml").mkString.stripMargin.trim val data: Source[ByteString, NotUsed] = Source.single(ByteString.fromString(multiDeleteRequestXml)) val dataV4: Source[ByteString, NotUsed] = Source.single(ByteString.fromString(multiDeleteRequestV4Xml)) val otherData: Source[ByteString, NotUsed] = Source.single(ByteString.fromString(multiPartComplete)) val numberOfObjects = 1000 "multiDelete request" should { "should be parsed to objects list" in { exctractMultideleteObjectsFlow(data).map { r => assert(r.contains("testuser/file1")) assert(r.contains("testuser/file2")) assert(r.contains("testuser/file3")) } } "v4 should be parsed to objects list" in { exctractMultideleteObjectsFlow(dataV4).map { r => assert(r.contains("testuser/issue")) assert(!r.contains("true")) } } "should return empty list" in { exctractMultideleteObjectsFlow(otherData).map(r => assert(r == Vector())) } "should return correct size for large xml objects" in { val rand = new Random() val doc = new ListBuffer[String]() for (c <- 1 to numberOfObjects) doc += s"<Object><Key>testuser/one/two/three/four/five/six/seven/eight/nine/ten/eleven/twelve/sub$c/${rand.alphanumeric.take(32).mkString}=${rand.alphanumeric.take(12).mkString}.txt</Key></Object>" exctractMultideleteObjectsFlow(Source.single(ByteString("<Delete>" + doc.mkString + "</Delete>"))).map { r => assert(r.length == numberOfObjects) } } } }
Example 54
Source File: DemoApp.scala From constructr-consul with Apache License 2.0 | 5 votes |
package com.tecsisa.constructr.coordination package demo import akka.actor.{ ActorRef, ActorSystem, Address } import akka.http.scaladsl.Http import akka.http.scaladsl.server.Directives import akka.pattern.ask import akka.stream.ActorMaterializer import akka.util.Timeout import com.typesafe.config.ConfigFactory import scala.concurrent.duration.{ Duration, MILLISECONDS } object DemoApp { val conf = ConfigFactory.load() val hostname = conf.getString("demo.hostname") val httpPort = conf.getInt("demo.port") def main(args: Array[String]): Unit = { // Create an Akka system implicit val system = ActorSystem("ConstructR-Consul") import system.dispatcher implicit val mat = ActorMaterializer() // Create an actor that handles cluster domain events val cluster = system.actorOf(SimpleClusterListener.props, SimpleClusterListener.Name) Http().bindAndHandle(route(cluster), hostname, httpPort) } private def route(cluster: ActorRef) = { import Directives._ implicit val timeout = Timeout( Duration( conf.getDuration("demo.cluster-view-timeout").toMillis, MILLISECONDS ) ) path("member-nodes") { // List cluster nodes get { onSuccess( (cluster ? SimpleClusterListener.GetMemberNodes).mapTo[Set[Address]] )(addresses => complete(addresses.mkString("\n"))) } } } }
Example 55
Source File: DarwinService.scala From darwin with Apache License 2.0 | 5 votes |
package it.agilelab.darwin.server.rest import akka.actor.ActorSystem import akka.http.scaladsl.model.{HttpResponse, StatusCodes} import akka.http.scaladsl.server.directives.DebuggingDirectives import akka.http.scaladsl.server.{Directives, Route} import akka.stream.ActorMaterializer import akka.stream.Attributes.LogLevels import it.agilelab.darwin.manager.AvroSchemaManager import org.apache.avro.Schema trait DarwinService extends Service with Directives with DebuggingDirectives with JsonSupport { val manager: AvroSchemaManager override def route: Route = logRequestResult(("darwin", LogLevels.Debug)) { get { path("schemas" / LongNumber.?) { case Some(id) => manager.getSchema(id) match { case Some(schema) => complete(schema) case None => complete { HttpResponse(StatusCodes.NotFound) } } case None => complete(manager.getAll) } } ~ post { path("schemas" / PathEnd) { entity(as[Seq[Schema]]) { schemas => complete { manager.registerAll(schemas).map(_._1) } } } } } } object DarwinService { def apply(asm: AvroSchemaManager)(implicit s: ActorSystem, m: ActorMaterializer): DarwinService = new DarwinService { override implicit val materializer: ActorMaterializer = m override implicit val system: ActorSystem = s override val manager: AvroSchemaManager = asm } }
Example 56
Source File: HttpApp.scala From darwin with Apache License 2.0 | 5 votes |
package it.agilelab.darwin.server.rest import java.util.concurrent.Executor import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.server.RouteConcatenation import akka.stream.ActorMaterializer import com.typesafe.config.Config import it.agilelab.darwin.common.Logging import scala.concurrent.duration.Duration import scala.concurrent.{Await, ExecutionContext, ExecutionContextExecutor} class HttpApp(config: Config, services: Service*) (implicit system: ActorSystem, materializer: ActorMaterializer) extends Logging { def run(): Unit = { val interface = config.getString("interface") val port = config.getInt("port") val route = RouteConcatenation.concat(services.map(_.route): _*) log.info("Starting http server on {}:{}", interface, port) val eventuallyBinding = Http().bindAndHandle(route, interface, port) val binding = Await.result(eventuallyBinding, Duration.Inf) log.info("Started http server on {}:{}", interface, port) val shutdownThread = new Thread(new Runnable { override def run(): Unit = { implicit val ec: ExecutionContext = newSameThreadExecutor log.info("Received shutdown hook") val termination = for { _ <- binding.unbind() terminated <- system.terminate() } yield terminated Await.ready(termination, Duration.Inf) log.info("Shutdown") } }) shutdownThread.setName("shutdown") Runtime.getRuntime.addShutdownHook(shutdownThread) log.info("registered shutdown hook") } private def newSameThreadExecutor: ExecutionContextExecutor = ExecutionContext.fromExecutor(new Executor { override def execute(command: Runnable): Unit = command.run() }) } object HttpApp { def apply(config:Config, services: Service*)(implicit system: ActorSystem, materializer: ActorMaterializer): HttpApp = new HttpApp(config, services: _*) }
Example 57
Source File: AccessTokenSpec.scala From akka-http-oauth2-client with Apache License 2.0 | 5 votes |
package com.github.dakatsuka.akka.http.oauth2.client import akka.actor.ActorSystem import akka.http.scaladsl.model.{ HttpEntity, HttpResponse, StatusCodes } import akka.http.scaladsl.model.ContentTypes.`application/json` import akka.stream.{ ActorMaterializer, Materializer } import org.scalatest.concurrent.ScalaFutures import org.scalatest.time.{ Millis, Seconds, Span } import org.scalatest.{ BeforeAndAfterAll, DiagrammedAssertions, FlatSpec } import scala.concurrent.{ Await, ExecutionContext } import scala.concurrent.duration.Duration class AccessTokenSpec extends FlatSpec with DiagrammedAssertions with ScalaFutures with BeforeAndAfterAll { implicit val system: ActorSystem = ActorSystem() implicit val ec: ExecutionContext = system.dispatcher implicit val materializer: Materializer = ActorMaterializer() implicit val defaultPatience: PatienceConfig = PatienceConfig(timeout = Span(5, Seconds), interval = Span(700, Millis)) override def afterAll(): Unit = { Await.ready(system.terminate(), Duration.Inf) } behavior of "AccessToken" it should "apply from HttpResponse" in { val accessToken = "xxx" val tokenType = "bearer" val expiresIn = 86400 val refreshToken = "yyy" val httpResponse = HttpResponse( status = StatusCodes.OK, headers = Nil, entity = HttpEntity( `application/json`, s""" |{ | "access_token": "$accessToken", | "token_type": "$tokenType", | "expires_in": $expiresIn, | "refresh_token": "$refreshToken" |} """.stripMargin ) ) val result = AccessToken(httpResponse) whenReady(result) { token => assert(token.accessToken == accessToken) assert(token.tokenType == tokenType) assert(token.expiresIn == expiresIn) assert(token.refreshToken.contains(refreshToken)) } } }
Example 58
Source File: JustinDB.scala From JustinDB with Apache License 2.0 | 5 votes |
package justin.db import akka.actor.ActorSystem import akka.cluster.Cluster import akka.cluster.http.management.ClusterHttpManagement import akka.http.scaladsl.Http import akka.http.scaladsl.server.Directives._ import akka.stream.{ActorMaterializer, Materializer} import buildinfo.BuildInfo import com.typesafe.scalalogging.StrictLogging import justin.db.actors.{StorageNodeActor, StorageNodeActorRef} import justin.db.client.ActorRefStorageNodeClient import justin.db.cluster.datacenter.Datacenter import justin.db.consistenthashing.{NodeId, Ring} import justin.db.replica.N import justin.db.storage.PluggableStorageProtocol import justin.db.storage.provider.StorageProvider import justin.httpapi.{BuildInfoRouter, HealthCheckRouter, HttpRouter} import scala.concurrent.duration._ import scala.concurrent.{Await, ExecutionContext, Promise} import scala.language.reflectiveCalls // $COVERAGE-OFF$ final class JustinDB object JustinDB extends StrictLogging { private[this] def validConfiguration(justinDBConfig: JustinDBConfig): Unit = { require(justinDBConfig.replication.N > 0, "replication N factor can't be smaller or equal 0") require(justinDBConfig.ring.`members-count` > 0, "members-counter can't be smaller or equal 0") require(justinDBConfig.ring.partitions > 0, "ring partitions can't be smaller or equal 0") require(justinDBConfig.ring.partitions >= justinDBConfig.ring.`members-count`, "number of ring partitions can't be smaller than number of members-count") require(justinDBConfig.replication.N <= justinDBConfig.ring.`members-count`, "replication N factor can't be bigger than defined members-count number") } private[this] def initStorage(justinConfig: JustinDBConfig) = { val provider = StorageProvider.apply(justinConfig.storage.provider) logger.info("Storage provider: " + provider.name) provider.init } def init(justinConfig: JustinDBConfig)(implicit actorSystem: ActorSystem): JustinDB = { validConfiguration(justinConfig) val processOrchestrator = Promise[JustinDB] implicit val executor: ExecutionContext = actorSystem.dispatcher implicit val materializer: Materializer = ActorMaterializer() val storage: PluggableStorageProtocol = initStorage(justinConfig) val cluster = Cluster(actorSystem) cluster.registerOnMemberUp { // STORAGE ACTOR val storageNodeActorRef = StorageNodeActorRef { val nodeId = NodeId(justinConfig.`kubernetes-hostname`.split("-").last.toInt) val ring = Ring(justinConfig.ring.`members-count`, justinConfig.ring.partitions) val n = N(justinConfig.replication.N) val datacenter = Datacenter(justinConfig.dc.`self-data-center`) actorSystem.actorOf( props = StorageNodeActor.props(nodeId, datacenter, storage, ring, n), name = StorageNodeActor.name(nodeId, datacenter) ) } // AKKA-MANAGEMENT ClusterHttpManagement(cluster).start().map { _ => logger.info("Cluster HTTP-Management is ready!") }.recover { case ex => processOrchestrator.failure(ex) } // HTTP API val routes = logRequestResult(actorSystem.name) { new HttpRouter(new ActorRefStorageNodeClient(storageNodeActorRef)).routes ~ new HealthCheckRouter().routes ~ new BuildInfoRouter().routes(BuildInfo.toJson) } Http() .bindAndHandle(routes, justinConfig.http.interface, justinConfig.http.port) .map { binding => logger.info(s"HTTP server started at ${binding.localAddress}"); processOrchestrator.trySuccess(new JustinDB) } .recover { case ex => logger.error("Could not start HTTP server", ex); processOrchestrator.failure(ex) } } Await.result(processOrchestrator.future, 2.minutes) } } // $COVERAGE-ON$
Example 59
Source File: CodebaseAnalyzerStreamApp.scala From CodeAnalyzerTutorial with Apache License 2.0 | 5 votes |
package tutor import akka.actor.ActorSystem import akka.stream.ActorMaterializer import akka.stream.scaladsl._ import com.typesafe.scalalogging.StrictLogging import tutor.utils.BenchmarkUtil import scala.collection.mutable.ArrayBuffer import scala.concurrent.Future import scala.util.{Failure, Success} object CodebaseAnalyzerStreamApp extends App with DirectoryScanner with SourceCodeAnalyzer with ReportFormatter with StrictLogging { implicit val system = ActorSystem("CodebaseAnalyzer") implicit val materializer = ActorMaterializer() implicit val ec = system.dispatcher val path = args(0) val beginTime = BenchmarkUtil.recordStart(s"analyze $path with akka stream") val files = scan(path, PresetFilters.knownFileTypes, PresetFilters.ignoreFolders).iterator var errorProcessingFiles: ArrayBuffer[Throwable] = ArrayBuffer.empty val done = Source.fromIterator(() => files).mapAsync(8)(path => Future { processFile(path) }).fold(CodebaseInfo.empty) { (acc, trySourceCodeInfo) => trySourceCodeInfo match { case Success(sourceCodeInfo) => acc + sourceCodeInfo case Failure(e) => { errorProcessingFiles += e acc } } }.runForeach(codebaseInfo => { println(format(codebaseInfo)) println(s"there are ${errorProcessingFiles.size} files failed to process.") }) done.onComplete { _ => BenchmarkUtil.recordElapse(s"analyze $path with akka stream", beginTime) system.terminate() } }
Example 60
Source File: SampleFramework.scala From mesos-actor with Apache License 2.0 | 5 votes |
package com.adobe.api.platform.runtime.mesos.sample import akka.actor.ActorSystem import akka.pattern.ask import akka.stream.ActorMaterializer import akka.util.Timeout import com.adobe.api.platform.runtime.mesos._ import java.time.Instant import java.util.UUID import scala.concurrent.Await import scala.concurrent.Future import scala.concurrent.duration._ object SampleFramework { def main(args: Array[String]): Unit = { implicit val system = ActorSystem("sample-framework-system") implicit val mat = ActorMaterializer() implicit val log = system.log implicit val ec = system.dispatcher val taskLaunchTimeout = Timeout(15 seconds) val taskDeleteTimeout = Timeout(10 seconds) val subscribeTimeout = Timeout(5 seconds) val teardownTimeout = Timeout(5 seconds) val mesosClientActor = system.actorOf( MesosClient.props( () => "sample-" + UUID.randomUUID(), "sample-framework", "http://192.168.99.100:5050", "*", 30.seconds, taskStore = new LocalTaskStore)) mesosClientActor .ask(Subscribe)(subscribeTimeout) .mapTo[SubscribeComplete] .onComplete(complete => { log.info("subscribe completed successfully...") }) var taskCount = 0 def nextName() = { taskCount += 1 s"sample-task-${Instant.now.getEpochSecond}-${taskCount}" } def nextId() = "sample-task-" + UUID.randomUUID() (1 to 3).foreach(_ => { val task = TaskDef( nextId(), nextName(), "trinitronx/python-simplehttpserver", 0.1, 24, List(8080, 8081), Some(HealthCheckConfig(0)), commandDef = Some(CommandDef())) val launched: Future[TaskState] = mesosClientActor.ask(SubmitTask(task))(taskLaunchTimeout).mapTo[TaskState] launched map { case taskDetails: Running => { val taskHost = taskDetails.hostname val taskPorts = taskDetails.hostports log.info( s"launched task id ${taskDetails.taskId} with state ${taskDetails.taskStatus.getState} on agent ${taskHost} listening on ports ${taskPorts}") //schedule delete in 10 seconds system.scheduler.scheduleOnce(10.seconds) { log.info(s"removing previously created task ${taskDetails.taskId}") mesosClientActor .ask(DeleteTask(taskDetails.taskId))(taskDeleteTimeout) .mapTo[Deleted] .map(deleted => { log.info(s"task killed ended with state ${deleted.taskStatus.getState}") }) } } case s => log.error(s"failed to launch task; state is ${s}") } recover { case t => log.error(s"task launch failed ${t.getMessage}", t) } }) system.scheduler.scheduleOnce(30.seconds) { val complete: Future[Any] = mesosClientActor.ask(Teardown)(teardownTimeout) Await.result(complete, 10.seconds) println("teardown completed!") system.terminate().map(_ => System.exit(0)) } } }
Example 61
Source File: OrderConsumer.scala From kafka-k8s-monitoring with MIT License | 5 votes |
package com.xebia.orders import akka.Done import akka.actor.ActorSystem import akka.kafka.scaladsl._ import akka.kafka._ import akka.stream.{ActorMaterializer, Materializer} import akka.stream.scaladsl.Sink import org.apache.kafka.clients.consumer.ConsumerConfig import org.apache.kafka.common.serialization._ object OrderConsumer extends App { private implicit val actorSystem = ActorSystem("orders") implicit val mat: Materializer = ActorMaterializer() implicit val ec = actorSystem.dispatcher val settings = ConsumerSettings(actorSystem, new StringDeserializer, new ByteArrayDeserializer) .withBootstrapServers("kafka:9092") .withGroupId("my-group") .withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") Consumer.plainSource(settings, Subscriptions.topics("orders")) .map(_.value()) .map(new String(_)) .map(println) .runWith(Sink.ignore).onComplete { _ => println("Stream is dead!") sys.exit(1) } }
Example 62
Source File: StandaloneUserEventTests.scala From openwhisk with Apache License 2.0 | 5 votes |
package org.apache.openwhisk.standalone import akka.stream.ActorMaterializer import common.{FreePortFinder, WskProps} import org.apache.openwhisk.common.UserEventTests import org.junit.runner.RunWith import org.scalatest.junit.JUnitRunner @RunWith(classOf[JUnitRunner]) class StandaloneUserEventTests extends UserEventTests with StandaloneServerFixture { private implicit val materializer: ActorMaterializer = ActorMaterializer() private val kafkaPort = sys.props.get("whisk.kafka.port").map(_.toInt).getOrElse(FreePortFinder.freePort()) protected override val customConfig = Some(""" |include classpath("standalone.conf") |whisk { | user-events { | enabled = true | } |} """.stripMargin) override protected def extraArgs: Seq[String] = Seq("--kafka", "--dev-mode", "--kafka-port", kafkaPort.toString) override implicit val wskprops = WskProps().copy(apihost = serverUrl) override def userEventsEnabled = true override def kafkaHosts = s"localhost:$kafkaPort" }
Example 63
Source File: WhiskAdminCliTestBase.scala From openwhisk with Apache License 2.0 | 5 votes |
package org.apache.openwhisk.core.database import akka.stream.ActorMaterializer import common.{StreamLogging, WskActorSystem} import org.rogach.scallop.throwError import org.scalatest.concurrent.ScalaFutures import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, FlatSpec, Matchers} import org.apache.openwhisk.core.cli.{Conf, WhiskAdmin} import org.apache.openwhisk.core.database.test.DbUtils import org.apache.openwhisk.core.entity.WhiskAuthStore import scala.util.Random trait WhiskAdminCliTestBase extends FlatSpec with WskActorSystem with DbUtils with StreamLogging with BeforeAndAfterEach with BeforeAndAfterAll with ScalaFutures with Matchers { implicit val materializer = ActorMaterializer() //Bring in sync the timeout used by ScalaFutures and DBUtils implicit override val patienceConfig: PatienceConfig = PatienceConfig(timeout = dbOpTimeout) protected val authStore = WhiskAuthStore.datastore() //Ensure scalaop does not exit upon validation failure throwError.value = true override def afterEach(): Unit = { cleanup() } override def afterAll(): Unit = { println("Shutting down store connections") authStore.shutdown() super.afterAll() } protected def randomString(len: Int = 5): String = Random.alphanumeric.take(len).mkString protected def resultOk(args: String*): String = WhiskAdmin(new Conf(args.toSeq)) .executeCommand() .futureValue .right .get protected def resultNotOk(args: String*): String = WhiskAdmin(new Conf(args.toSeq)) .executeCommand() .futureValue .left .get .message }
Example 64
Source File: S3Minio.scala From openwhisk with Apache License 2.0 | 5 votes |
package org.apache.openwhisk.core.database.s3 import java.net.ServerSocket import actionContainers.ActionContainer import akka.actor.ActorSystem import akka.stream.ActorMaterializer import com.amazonaws.auth.{AWSStaticCredentialsProvider, BasicAWSCredentials} import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration import com.amazonaws.services.s3.AmazonS3ClientBuilder import com.typesafe.config.ConfigFactory import common.{SimpleExec, StreamLogging} import org.scalatest.{BeforeAndAfterAll, FlatSpec} import org.apache.openwhisk.common.{Logging, TransactionId} import org.apache.openwhisk.core.database.{AttachmentStore, DocumentSerializer} import scala.concurrent.duration._ import scala.reflect.ClassTag trait S3Minio extends FlatSpec with BeforeAndAfterAll with StreamLogging { def makeS3Store[D <: DocumentSerializer: ClassTag]()(implicit actorSystem: ActorSystem, logging: Logging, materializer: ActorMaterializer): AttachmentStore = { val config = ConfigFactory.parseString(s""" |whisk { | s3 { | alpakka { | aws { | credentials { | provider = static | access-key-id = "$accessKey" | secret-access-key = "$secretAccessKey" | } | region { | provider = static | default-region = us-west-2 | } | } | endpoint-url = "http://localhost:$port" | } | bucket = "$bucket" | $prefixConfig | } |} """.stripMargin).withFallback(ConfigFactory.load()) S3AttachmentStoreProvider.makeStore[D](config) } private val accessKey = "TESTKEY" private val secretAccessKey = "TESTSECRET" private val port = freePort() private val bucket = "test-ow-travis" private def prefixConfig = { if (bucketPrefix.nonEmpty) s"prefix = $bucketPrefix" else "" } protected def bucketPrefix: String = "" override protected def beforeAll(): Unit = { super.beforeAll() dockerExec( s"run -d -e MINIO_ACCESS_KEY=$accessKey -e MINIO_SECRET_KEY=$secretAccessKey -p $port:9000 minio/minio server /data") println(s"Started minio on $port") createTestBucket() } override def afterAll(): Unit = { super.afterAll() val containerId = dockerExec("ps -q --filter ancestor=minio/minio") containerId.split("\n").map(_.trim).foreach(id => dockerExec(s"stop $id")) println(s"Stopped minio container") } def createTestBucket(): Unit = { val endpoint = new EndpointConfiguration(s"http://localhost:$port", "us-west-2") val client = AmazonS3ClientBuilder.standard .withPathStyleAccessEnabled(true) .withEndpointConfiguration(endpoint) .withCredentials(new AWSStaticCredentialsProvider(new BasicAWSCredentials(accessKey, secretAccessKey))) .build org.apache.openwhisk.utils.retry(client.createBucket(bucket), 6, Some(1.minute)) println(s"Created bucket $bucket") } private def dockerExec(cmd: String): String = { implicit val tid: TransactionId = TransactionId.testing val command = s"${ActionContainer.dockerCmd} $cmd" val cmdSeq = command.split(" ").map(_.trim).filter(_.nonEmpty) val (out, err, code) = SimpleExec.syncRunCmd(cmdSeq) assert(code == 0, s"Error occurred for command '$command'. Exit code: $code, Error: $err") out } private def freePort(): Int = { val socket = new ServerSocket(0) try socket.getLocalPort finally if (socket != null) socket.close() } }
Example 65
Source File: S3AttachmentStoreBehaviorBase.scala From openwhisk with Apache License 2.0 | 5 votes |
package org.apache.openwhisk.core.database.s3 import akka.actor.ActorSystem import akka.stream.ActorMaterializer import org.scalatest.FlatSpec import org.apache.openwhisk.common.Logging import org.apache.openwhisk.core.database.{AttachmentStore, DocumentSerializer} import org.apache.openwhisk.core.database.memory.{MemoryArtifactStoreBehaviorBase, MemoryArtifactStoreProvider} import org.apache.openwhisk.core.database.test.AttachmentStoreBehaviors import org.apache.openwhisk.core.database.test.behavior.ArtifactStoreAttachmentBehaviors import org.apache.openwhisk.core.entity.WhiskEntity import scala.reflect.ClassTag import scala.util.Random trait S3AttachmentStoreBehaviorBase extends FlatSpec with MemoryArtifactStoreBehaviorBase with ArtifactStoreAttachmentBehaviors with AttachmentStoreBehaviors { override lazy val store = makeS3Store[WhiskEntity] override implicit val materializer: ActorMaterializer = ActorMaterializer() override val prefix = s"attachmentTCK_${Random.alphanumeric.take(4).mkString}" override protected def beforeAll(): Unit = { MemoryArtifactStoreProvider.purgeAll() super.beforeAll() } override def getAttachmentStore[D <: DocumentSerializer: ClassTag](): AttachmentStore = makeS3Store[D]() def makeS3Store[D <: DocumentSerializer: ClassTag]()(implicit actorSystem: ActorSystem, logging: Logging, materializer: ActorMaterializer): AttachmentStore }
Example 66
Source File: S3Aws.scala From openwhisk with Apache License 2.0 | 5 votes |
package org.apache.openwhisk.core.database.s3 import akka.actor.ActorSystem import akka.stream.ActorMaterializer import com.typesafe.config.ConfigFactory import org.scalatest.FlatSpec import org.apache.openwhisk.common.Logging import org.apache.openwhisk.core.database.{AttachmentStore, DocumentSerializer} import scala.reflect.ClassTag trait S3Aws extends FlatSpec { def cloudFrontConfig: String = "" def makeS3Store[D <: DocumentSerializer: ClassTag]()(implicit actorSystem: ActorSystem, logging: Logging, materializer: ActorMaterializer): AttachmentStore = { val config = ConfigFactory.parseString(s""" |whisk { | s3 { | alpakka { | aws { | credentials { | provider = static | access-key-id = "$accessKeyId" | secret-access-key = "$secretAccessKey" | } | region { | provider = static | default-region = "$region" | } | } | } | bucket = "$bucket" | $cloudFrontConfig | } |} """.stripMargin).withFallback(ConfigFactory.load()).resolve() S3AttachmentStoreProvider.makeStore[D](config) } override protected def withFixture(test: NoArgTest) = { assume( secretAccessKey != null, "'AWS_SECRET_ACCESS_KEY' env not configured. Configure following " + "env variables for test to run. 'AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY', 'AWS_REGION'") require(accessKeyId != null, "'AWS_ACCESS_KEY_ID' env variable not set") require(region != null, "'AWS_REGION' env variable not set") super.withFixture(test) } val bucket = Option(System.getenv("AWS_BUCKET")).getOrElse("test-ow-travis") val accessKeyId = System.getenv("AWS_ACCESS_KEY_ID") val secretAccessKey = System.getenv("AWS_SECRET_ACCESS_KEY") val region = System.getenv("AWS_REGION") }
Example 67
Source File: ActivationStoreBehaviorBase.scala From openwhisk with Apache License 2.0 | 5 votes |
package org.apache.openwhisk.core.database.test.behavior import java.time.Instant import akka.stream.ActorMaterializer import common.{StreamLogging, WskActorSystem} import org.apache.openwhisk.common.TransactionId import org.apache.openwhisk.core.database.{ActivationStore, CacheChangeNotification, UserContext} import org.apache.openwhisk.core.database.test.behavior.ArtifactStoreTestUtil.storeAvailable import org.apache.openwhisk.core.entity._ import org.scalatest.concurrent.{IntegrationPatience, ScalaFutures} import org.scalatest.{BeforeAndAfterEach, FlatSpec, Matchers, Outcome} import scala.collection.mutable.ListBuffer import scala.concurrent.Await import scala.concurrent.duration.Duration import scala.concurrent.duration.DurationInt import scala.language.postfixOps import scala.util.{Random, Try} trait ActivationStoreBehaviorBase extends FlatSpec with ScalaFutures with Matchers with StreamLogging with WskActorSystem with IntegrationPatience with BeforeAndAfterEach { protected implicit val materializer: ActorMaterializer = ActorMaterializer() protected implicit val notifier: Option[CacheChangeNotification] = None def context: UserContext def activationStore: ActivationStore private val docsToDelete = ListBuffer[(UserContext, ActivationId)]() def storeType: String protected def transId() = TransactionId(Random.alphanumeric.take(32).mkString) override def afterEach(): Unit = { cleanup() stream.reset() } override protected def withFixture(test: NoArgTest): Outcome = { assume(storeAvailable(storeAvailableCheck), s"$storeType not configured or available") val outcome = super.withFixture(test) if (outcome.isFailed) { println(logLines.mkString("\n")) } outcome } protected def storeAvailableCheck: Try[Any] = Try(true) //~----------------------------------------< utility methods > protected def store(activation: WhiskActivation, context: UserContext)( implicit transid: TransactionId, notifier: Option[CacheChangeNotification]): DocInfo = { val doc = activationStore.store(activation, context).futureValue docsToDelete.append((context, ActivationId(activation.docid.asString))) doc } protected def newActivation(ns: String, actionName: String, start: Long): WhiskActivation = { WhiskActivation( EntityPath(ns), EntityName(actionName), Subject(), ActivationId.generate(), Instant.ofEpochMilli(start), Instant.ofEpochMilli(start + 1000)) } def cleanup()(implicit timeout: Duration = 10 seconds): Unit = { implicit val tid: TransactionId = transId() docsToDelete.map { e => Try { Await.result(activationStore.delete(e._2, e._1), timeout) } } docsToDelete.clear() } }
Example 68
Source File: AttachmentSupportTests.scala From openwhisk with Apache License 2.0 | 5 votes |
package org.apache.openwhisk.core.database.test import akka.http.scaladsl.model.Uri import akka.stream.scaladsl.Source import akka.stream.{ActorMaterializer, Materializer} import akka.util.CompactByteString import common.WskActorSystem import org.junit.runner.RunWith import org.scalatest.concurrent.ScalaFutures import org.scalatest.junit.JUnitRunner import org.scalatest.{FlatSpec, Matchers} import org.apache.openwhisk.common.TransactionId import org.apache.openwhisk.core.database.{AttachmentSupport, InliningConfig} import org.apache.openwhisk.core.entity.WhiskEntity import org.apache.openwhisk.core.entity.size._ @RunWith(classOf[JUnitRunner]) class AttachmentSupportTests extends FlatSpec with Matchers with ScalaFutures with WskActorSystem { behavior of "Attachment inlining" implicit val materializer: Materializer = ActorMaterializer() it should "not inline if maxInlineSize set to zero" in { val inliner = new AttachmentSupportTestMock(InliningConfig(maxInlineSize = 0.KB)) val bs = CompactByteString("hello world") val bytesOrSource = inliner.inlineOrAttach(Source.single(bs)).futureValue val uri = inliner.uriOf(bytesOrSource, "foo") uri shouldBe Uri("test:foo") } class AttachmentSupportTestMock(val inliningConfig: InliningConfig) extends AttachmentSupport[WhiskEntity] { override protected[core] implicit val materializer: Materializer = ActorMaterializer() override protected def attachmentScheme: String = "test" override protected def executionContext = actorSystem.dispatcher override protected[database] def put(d: WhiskEntity)(implicit transid: TransactionId) = ??? } }
Example 69
Source File: NamespaceBlacklistTests.scala From openwhisk with Apache License 2.0 | 5 votes |
package org.apache.openwhisk.core.invoker.test import akka.stream.ActorMaterializer import common.{StreamLogging, WskActorSystem} import org.junit.runner.RunWith import org.scalatest.concurrent.{IntegrationPatience, ScalaFutures} import org.scalatest.junit.JUnitRunner import org.scalatest.{FlatSpec, Matchers} import spray.json._ import org.apache.openwhisk.common.TransactionId import org.apache.openwhisk.core.database.test.DbUtils import org.apache.openwhisk.core.entity._ import org.apache.openwhisk.core.invoker.NamespaceBlacklist import org.apache.openwhisk.utils.{retry => testRetry} import scala.concurrent.duration._ @RunWith(classOf[JUnitRunner]) class NamespaceBlacklistTests extends FlatSpec with Matchers with DbUtils with ScalaFutures with IntegrationPatience with WskActorSystem with StreamLogging { behavior of "NamespaceBlacklist" implicit val materializer = ActorMaterializer() implicit val tid = TransactionId.testing val authStore = WhiskAuthStore.datastore() val limitsAndAuths = Seq( new LimitEntity(EntityName("testnamespace1"), UserLimits(invocationsPerMinute = Some(0))), new LimitEntity(EntityName("testnamespace2"), UserLimits(concurrentInvocations = Some(0))), new LimitEntity( EntityName("testnamespace3"), UserLimits(invocationsPerMinute = Some(1), concurrentInvocations = Some(1)))) val uuid4 = UUID() val uuid5 = UUID() val ak4 = BasicAuthenticationAuthKey(uuid4, Secret()) val ak5 = BasicAuthenticationAuthKey(uuid5, Secret()) val ns4 = Namespace(EntityName("different1"), uuid4) val ns5 = Namespace(EntityName("different2"), uuid5) val blockedSubject = new ExtendedAuth(Subject(), Set(WhiskNamespace(ns4, ak4), WhiskNamespace(ns5, ak5)), true) val blockedNamespacesCount = 2 + blockedSubject.namespaces.size private def authToIdentities(auth: WhiskAuth): Set[Identity] = { auth.namespaces.map { ns => Identity(auth.subject, ns.namespace, ns.authkey) } } private def limitToIdentity(limit: LimitEntity): Identity = { val namespace = limit.docid.id.dropRight("/limits".length) Identity(limit.subject, Namespace(EntityName(namespace), UUID()), BasicAuthenticationAuthKey(UUID(), Secret())) } override def beforeAll() = { limitsAndAuths foreach (put(authStore, _)) put(authStore, blockedSubject) waitOnView(authStore, blockedNamespacesCount, NamespaceBlacklist.view) } override def afterAll() = { cleanup() super.afterAll() } it should "mark a namespace as blocked if limit is 0 in database or if one of its subjects is blocked" in { val blacklist = new NamespaceBlacklist(authStore) testRetry({ blacklist.refreshBlacklist().futureValue should have size blockedNamespacesCount }, 60, Some(1.second)) limitsAndAuths.map(limitToIdentity).map(blacklist.isBlacklisted) shouldBe Seq(true, true, false) authToIdentities(blockedSubject).toSeq.map(blacklist.isBlacklisted) shouldBe Seq(true, true) } class LimitEntity(name: EntityName, limits: UserLimits) extends WhiskAuth(Subject(), namespaces = Set.empty) { override def docid = DocId(s"${name.name}/limits") override def toJson = UserLimits.serdes.write(limits).asJsObject } class ExtendedAuth(subject: Subject, namespaces: Set[WhiskNamespace], blocked: Boolean) extends WhiskAuth(subject, namespaces) { override def toJson = JsObject(super.toJson.fields + ("blocked" -> JsBoolean(blocked))) } }
Example 70
Source File: PoolingRestClient.scala From openwhisk with Apache License 2.0 | 5 votes |
package org.apache.openwhisk.http import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._ import akka.http.scaladsl.marshalling._ import akka.http.scaladsl.model._ import akka.http.scaladsl.settings.ConnectionPoolSettings import akka.http.scaladsl.unmarshalling._ import akka.stream.{ActorMaterializer, OverflowStrategy, QueueOfferResult} import akka.stream.scaladsl.{Flow, _} import spray.json._ import scala.concurrent.{ExecutionContext, Future, Promise} import scala.concurrent.duration._ import scala.util.{Failure, Success, Try} def requestJson[T: RootJsonReader](futureRequest: Future[HttpRequest]): Future[Either[StatusCode, T]] = request(futureRequest).flatMap { response => if (response.status.isSuccess) { Unmarshal(response.entity.withoutSizeLimit).to[T].map(Right.apply) } else { Unmarshal(response.entity).to[String].flatMap { body => val statusCode = response.status val reason = if (body.nonEmpty) s"${statusCode.reason} (details: $body)" else statusCode.reason val customStatusCode = StatusCodes .custom(intValue = statusCode.intValue, reason = reason, defaultMessage = statusCode.defaultMessage) // This is important, as it drains the entity stream. // Otherwise the connection stays open and the pool dries up. response.discardEntityBytes().future.map(_ => Left(customStatusCode)) } } } def shutdown(): Future[Unit] = Future.successful(materializer.shutdown()) } object PoolingRestClient { def mkRequest(method: HttpMethod, uri: Uri, body: Future[MessageEntity] = Future.successful(HttpEntity.Empty), headers: List[HttpHeader] = List.empty)(implicit ec: ExecutionContext): Future[HttpRequest] = { body.map { b => HttpRequest(method, uri, headers, b) } } def mkJsonRequest(method: HttpMethod, uri: Uri, body: JsValue, headers: List[HttpHeader] = List.empty)( implicit ec: ExecutionContext): Future[HttpRequest] = { val b = Marshal(body).to[MessageEntity] mkRequest(method, uri, b, headers) } }
Example 71
Source File: ArtifactStoreProvider.scala From openwhisk with Apache License 2.0 | 5 votes |
package org.apache.openwhisk.core.database import akka.actor.ActorSystem import akka.stream.ActorMaterializer import com.typesafe.config.ConfigFactory import spray.json.RootJsonFormat import org.apache.openwhisk.common.Logging import org.apache.openwhisk.spi.{Spi, SpiLoader} import org.apache.openwhisk.core.entity.DocumentReader import scala.reflect.ClassTag trait ArtifactStoreProvider extends Spi { def makeStore[D <: DocumentSerializer: ClassTag](useBatching: Boolean = false)( implicit jsonFormat: RootJsonFormat[D], docReader: DocumentReader, actorSystem: ActorSystem, logging: Logging, materializer: ActorMaterializer): ArtifactStore[D] protected def getAttachmentStore[D <: DocumentSerializer: ClassTag]()( implicit actorSystem: ActorSystem, logging: Logging, materializer: ActorMaterializer): Option[AttachmentStore] = { if (ConfigFactory.load().hasPath("whisk.spi.AttachmentStoreProvider")) { Some(SpiLoader.get[AttachmentStoreProvider].makeStore[D]()) } else { None } } }
Example 72
Source File: NoopActivationStore.scala From openwhisk with Apache License 2.0 | 5 votes |
package org.apache.openwhisk.core.database.memory import java.time.Instant import akka.actor.ActorSystem import akka.stream.ActorMaterializer import org.apache.openwhisk.common.{Logging, TransactionId, WhiskInstants} import org.apache.openwhisk.core.database.{ ActivationStore, ActivationStoreProvider, CacheChangeNotification, UserContext } import org.apache.openwhisk.core.entity.{ActivationId, DocInfo, EntityName, EntityPath, Subject, WhiskActivation} import spray.json.{JsNumber, JsObject} import scala.concurrent.Future object NoopActivationStore extends ActivationStore with WhiskInstants { private val emptyInfo = DocInfo("foo") private val emptyCount = JsObject("activations" -> JsNumber(0)) private val dummyActivation = WhiskActivation( EntityPath("testnamespace"), EntityName("activation"), Subject(), ActivationId.generate(), start = Instant.now.inMills, end = Instant.now.inMills) override def store(activation: WhiskActivation, context: UserContext)( implicit transid: TransactionId, notifier: Option[CacheChangeNotification]): Future[DocInfo] = Future.successful(emptyInfo) override def get(activationId: ActivationId, context: UserContext)( implicit transid: TransactionId): Future[WhiskActivation] = { val activation = dummyActivation.copy(activationId = activationId) Future.successful(activation) } override def delete(activationId: ActivationId, context: UserContext)( implicit transid: TransactionId, notifier: Option[CacheChangeNotification]): Future[Boolean] = Future.successful(true) override def countActivationsInNamespace(namespace: EntityPath, name: Option[EntityPath], skip: Int, since: Option[Instant], upto: Option[Instant], context: UserContext)(implicit transid: TransactionId): Future[JsObject] = Future.successful(emptyCount) override def listActivationsMatchingName( namespace: EntityPath, name: EntityPath, skip: Int, limit: Int, includeDocs: Boolean, since: Option[Instant], upto: Option[Instant], context: UserContext)(implicit transid: TransactionId): Future[Either[List[JsObject], List[WhiskActivation]]] = Future.successful(Right(List.empty)) override def listActivationsInNamespace( namespace: EntityPath, skip: Int, limit: Int, includeDocs: Boolean, since: Option[Instant], upto: Option[Instant], context: UserContext)(implicit transid: TransactionId): Future[Either[List[JsObject], List[WhiskActivation]]] = Future.successful(Right(List.empty)) } object NoopActivationStoreProvider extends ActivationStoreProvider { override def instance(actorSystem: ActorSystem, actorMaterializer: ActorMaterializer, logging: Logging) = NoopActivationStore }
Example 73
Source File: MemoryAttachmentStore.scala From openwhisk with Apache License 2.0 | 5 votes |
package org.apache.openwhisk.core.database.memory import akka.actor.ActorSystem import akka.http.scaladsl.model.ContentType import akka.stream.ActorMaterializer import akka.stream.scaladsl.{Keep, Sink, Source} import akka.util.{ByteString, ByteStringBuilder} import org.apache.openwhisk.common.LoggingMarkers.{ DATABASE_ATTS_DELETE, DATABASE_ATT_DELETE, DATABASE_ATT_GET, DATABASE_ATT_SAVE } import org.apache.openwhisk.common.{Logging, TransactionId} import org.apache.openwhisk.core.database.StoreUtils._ import org.apache.openwhisk.core.database._ import org.apache.openwhisk.core.entity.DocId import scala.collection.concurrent.TrieMap import scala.concurrent.{ExecutionContext, Future} import scala.reflect.ClassTag object MemoryAttachmentStoreProvider extends AttachmentStoreProvider { override def makeStore[D <: DocumentSerializer: ClassTag]()(implicit actorSystem: ActorSystem, logging: Logging, materializer: ActorMaterializer): AttachmentStore = new MemoryAttachmentStore(implicitly[ClassTag[D]].runtimeClass.getSimpleName.toLowerCase) } override protected[core] def readAttachment[T](docId: DocId, name: String, sink: Sink[ByteString, Future[T]])( implicit transid: TransactionId): Future[T] = { val start = transid.started( this, DATABASE_ATT_GET, s"[ATT_GET] '$dbName' finding attachment '$name' of document 'id: $docId'") val f = attachments.get(attachmentKey(docId, name)) match { case Some(Attachment(bytes)) => val r = Source.single(bytes).toMat(sink)(Keep.right).run r.map(t => { transid.finished(this, start, s"[ATT_GET] '$dbName' completed: found attachment '$name' of document '$docId'") t }) case None => transid.finished( this, start, s"[ATT_GET] '$dbName', retrieving attachment '$name' of document '$docId'; not found.") Future.failed(NoDocumentException("Not found on 'readAttachment'.")) } reportFailure( f, start, failure => s"[ATT_GET] '$dbName' internal error, name: '$name', doc: '$docId', failure: '${failure.getMessage}'") } override protected[core] def deleteAttachments(docId: DocId)(implicit transid: TransactionId): Future[Boolean] = { val start = transid.started(this, DATABASE_ATTS_DELETE, s"[ATTS_DELETE] uploading attachment of document '$docId'") val prefix = docId + "/" attachments --= attachments.keySet.filter(_.startsWith(prefix)) transid.finished(this, start, s"[ATTS_DELETE] completed: delete attachment of document '$docId'") Future.successful(true) } override protected[core] def deleteAttachment(docId: DocId, name: String)( implicit transid: TransactionId): Future[Boolean] = { val start = transid.started(this, DATABASE_ATT_DELETE, s"[ATT_DELETE] uploading attachment of document '$docId'") attachments.remove(attachmentKey(docId, name)) transid.finished(this, start, s"[ATT_DELETE] completed: delete attachment of document '$docId'") Future.successful(true) } def attachmentCount: Int = attachments.size def isClosed = closed override def shutdown(): Unit = { closed = true } private def attachmentKey(docId: DocId, name: String) = s"${docId.id}/$name" }
Example 74
Source File: CouchDbStoreProvider.scala From openwhisk with Apache License 2.0 | 5 votes |
package org.apache.openwhisk.core.database import akka.actor.ActorSystem import akka.stream.ActorMaterializer import spray.json.RootJsonFormat import org.apache.openwhisk.common.Logging import org.apache.openwhisk.core.ConfigKeys import org.apache.openwhisk.core.entity.DocumentReader import org.apache.openwhisk.core.entity.size._ import pureconfig._ import pureconfig.generic.auto._ import scala.reflect.ClassTag case class CouchDbConfig(provider: String, protocol: String, host: String, port: Int, username: String, password: String, databases: Map[String, String]) { assume(Set(protocol, host, username, password).forall(_.nonEmpty), "At least one expected property is missing") def databaseFor[D](implicit tag: ClassTag[D]): String = { val entityType = tag.runtimeClass.getSimpleName databases.get(entityType) match { case Some(name) => name case None => throw new IllegalArgumentException(s"Database name mapping not found for $entityType") } } } object CouchDbStoreProvider extends ArtifactStoreProvider { def makeStore[D <: DocumentSerializer: ClassTag](useBatching: Boolean)( implicit jsonFormat: RootJsonFormat[D], docReader: DocumentReader, actorSystem: ActorSystem, logging: Logging, materializer: ActorMaterializer): ArtifactStore[D] = makeArtifactStore(useBatching, getAttachmentStore()) def makeArtifactStore[D <: DocumentSerializer: ClassTag](useBatching: Boolean, attachmentStore: Option[AttachmentStore])( implicit jsonFormat: RootJsonFormat[D], docReader: DocumentReader, actorSystem: ActorSystem, logging: Logging, materializer: ActorMaterializer): ArtifactStore[D] = { val dbConfig = loadConfigOrThrow[CouchDbConfig](ConfigKeys.couchdb) require( dbConfig.provider == "Cloudant" || dbConfig.provider == "CouchDB", s"Unsupported db.provider: ${dbConfig.provider}") val inliningConfig = loadConfigOrThrow[InliningConfig](ConfigKeys.db) new CouchDbRestStore[D]( dbConfig.protocol, dbConfig.host, dbConfig.port, dbConfig.username, dbConfig.password, dbConfig.databaseFor[D], useBatching, inliningConfig, attachmentStore) } }
Example 75
Source File: YARNComponentActor.scala From openwhisk with Apache License 2.0 | 5 votes |
package org.apache.openwhisk.core.yarn import akka.actor.{Actor, ActorSystem} import akka.http.scaladsl.model.{HttpMethods, StatusCodes} import akka.stream.ActorMaterializer import org.apache.openwhisk.common.Logging import org.apache.openwhisk.core.entity.ExecManifest.ImageName import org.apache.openwhisk.core.yarn.YARNComponentActor.{CreateContainerAsync, RemoveContainer} import spray.json.{JsArray, JsNumber, JsObject, JsString} import scala.concurrent.ExecutionContext object YARNComponentActor { case object CreateContainerAsync case class RemoveContainer(component_instance_name: String) } class YARNComponentActor(actorSystem: ActorSystem, logging: Logging, yarnConfig: YARNConfig, serviceName: String, imageName: ImageName) extends Actor { implicit val as: ActorSystem = actorSystem implicit val materializer: ActorMaterializer = ActorMaterializer() implicit val ec: ExecutionContext = actorSystem.dispatcher //Adding a container via the YARN REST API is actually done by flexing the component's container pool to a certain size. // This actor must track the current containerCount in order to make the correct scale-up request. var containerCount: Int = 0 def receive: PartialFunction[Any, Unit] = { case CreateContainerAsync => sender ! createContainerAsync case RemoveContainer(component_instance_name) => sender ! removeContainer(component_instance_name) case input => throw new IllegalArgumentException("Unknown input: " + input) sender ! false } def createContainerAsync(): Unit = { logging.info(this, s"Using YARN to create a container with image ${imageName.name}...") val body = JsObject("number_of_containers" -> JsNumber(containerCount + 1)).compactPrint val response = YARNRESTUtil.submitRequestWithAuth( yarnConfig.authType, HttpMethods.PUT, s"${yarnConfig.masterUrl}/app/v1/services/$serviceName/components/${imageName.name}", body) response match { case httpresponse(StatusCodes.OK, content) => logging.info(this, s"Added container: ${imageName.name}. Response: $content") containerCount += 1 case httpresponse(_, _) => YARNRESTUtil.handleYARNRESTError(logging) } } def removeContainer(component_instance_name: String): Unit = { logging.info(this, s"Removing ${imageName.name} container: $component_instance_name ") if (containerCount <= 0) { logging.warn(this, "Already at 0 containers") } else { val body = JsObject( "components" -> JsArray( JsObject( "name" -> JsString(imageName.name), "decommissioned_instances" -> JsArray(JsString(component_instance_name))))).compactPrint val response = YARNRESTUtil.submitRequestWithAuth( yarnConfig.authType, HttpMethods.PUT, s"${yarnConfig.masterUrl}/app/v1/services/$serviceName", body) response match { case httpresponse(StatusCodes.OK, content) => logging.info( this, s"Successfully removed ${imageName.name} container: $component_instance_name. Response: $content") containerCount -= 1 case httpresponse(_, _) => YARNRESTUtil.handleYARNRESTError(logging) } } } }
Example 76
Source File: Main.scala From openwhisk with Apache License 2.0 | 5 votes |
package org.apache.openwhisk.core.monitoring.metrics import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.stream.ActorMaterializer import kamon.Kamon import scala.concurrent.duration.DurationInt import scala.concurrent.{Await, ExecutionContextExecutor, Future} object Main { def main(args: Array[String]): Unit = { Kamon.init() implicit val system: ActorSystem = ActorSystem("events-actor-system") implicit val materializer: ActorMaterializer = ActorMaterializer() val binding = OpenWhiskEvents.start(system.settings.config) addShutdownHook(binding) } private def addShutdownHook(binding: Future[Http.ServerBinding])(implicit actorSystem: ActorSystem, materializer: ActorMaterializer): Unit = { implicit val ec: ExecutionContextExecutor = actorSystem.dispatcher sys.addShutdownHook { Await.result(binding.map(_.unbind()), 30.seconds) Await.result(actorSystem.whenTerminated, 30.seconds) } } }
Example 77
Source File: OpenWhiskEvents.scala From openwhisk with Apache License 2.0 | 5 votes |
package org.apache.openwhisk.core.monitoring.metrics import akka.actor.{ActorSystem, CoordinatedShutdown} import akka.event.slf4j.SLF4JLogging import akka.http.scaladsl.Http import akka.kafka.ConsumerSettings import akka.stream.ActorMaterializer import com.typesafe.config.Config import kamon.Kamon import kamon.prometheus.PrometheusReporter import org.apache.kafka.common.serialization.StringDeserializer import pureconfig._ import pureconfig.generic.auto._ import scala.concurrent.duration.FiniteDuration import scala.concurrent.{ExecutionContext, Future} object OpenWhiskEvents extends SLF4JLogging { case class MetricConfig(port: Int, enableKamon: Boolean, ignoredNamespaces: Set[String], renameTags: Map[String, String], retry: RetryConfig) case class RetryConfig(minBackoff: FiniteDuration, maxBackoff: FiniteDuration, randomFactor: Double, maxRestarts: Int) def start(config: Config)(implicit system: ActorSystem, materializer: ActorMaterializer): Future[Http.ServerBinding] = { implicit val ec: ExecutionContext = system.dispatcher val prometheusReporter = new PrometheusReporter() Kamon.registerModule("prometheus", prometheusReporter) Kamon.init(config) val metricConfig = loadConfigOrThrow[MetricConfig](config, "whisk.user-events") val prometheusRecorder = PrometheusRecorder(prometheusReporter, metricConfig) val recorders = if (metricConfig.enableKamon) Seq(prometheusRecorder, KamonRecorder) else Seq(prometheusRecorder) val eventConsumer = EventConsumer(eventConsumerSettings(defaultConsumerConfig(config)), recorders, metricConfig) CoordinatedShutdown(system).addTask(CoordinatedShutdown.PhaseBeforeServiceUnbind, "shutdownConsumer") { () => eventConsumer.shutdown() } val port = metricConfig.port val api = new PrometheusEventsApi(eventConsumer, prometheusRecorder) val httpBinding = Http().bindAndHandle(api.routes, "0.0.0.0", port) httpBinding.foreach(_ => log.info(s"Started the http server on http://localhost:$port"))(system.dispatcher) httpBinding } def eventConsumerSettings(config: Config): ConsumerSettings[String, String] = ConsumerSettings(config, new StringDeserializer, new StringDeserializer) def defaultConsumerConfig(globalConfig: Config): Config = globalConfig.getConfig("akka.kafka.consumer") }
Example 78
Source File: KafkaSpecBase.scala From openwhisk with Apache License 2.0 | 5 votes |
package org.apache.openwhisk.core.monitoring.metrics import akka.kafka.testkit.scaladsl.{EmbeddedKafkaLike, ScalatestKafkaSpec} import akka.stream.ActorMaterializer import net.manub.embeddedkafka.EmbeddedKafka import org.scalatest._ import org.scalatest.concurrent.{Eventually, IntegrationPatience, ScalaFutures} import scala.concurrent.duration.{DurationInt, FiniteDuration} abstract class KafkaSpecBase extends ScalatestKafkaSpec(6065) with Matchers with ScalaFutures with FlatSpecLike with EmbeddedKafka with EmbeddedKafkaLike with IntegrationPatience with Eventually with EventsTestHelper { this: Suite => implicit val timeoutConfig: PatienceConfig = PatienceConfig(1.minute) implicit val materializer: ActorMaterializer = ActorMaterializer() override val sleepAfterProduce: FiniteDuration = 10.seconds override protected val topicCreationTimeout = 60.seconds }
Example 79
Source File: EventsTestHelper.scala From openwhisk with Apache License 2.0 | 5 votes |
package org.apache.openwhisk.core.monitoring.metrics import java.net.ServerSocket import akka.actor.ActorSystem import akka.stream.ActorMaterializer import com.typesafe.config.Config import org.apache.openwhisk.core.monitoring.metrics.OpenWhiskEvents.MetricConfig import pureconfig._ import pureconfig.generic.auto._ trait EventsTestHelper { protected def createConsumer(kport: Int, globalConfig: Config, recorder: MetricRecorder)( implicit system: ActorSystem, materializer: ActorMaterializer) = { val settings = OpenWhiskEvents .eventConsumerSettings(OpenWhiskEvents.defaultConsumerConfig(globalConfig)) .withBootstrapServers(s"localhost:$kport") val metricConfig = loadConfigOrThrow[MetricConfig](globalConfig, "user-events") EventConsumer(settings, Seq(recorder), metricConfig) } protected def freePort(): Int = { val socket = new ServerSocket(0) try socket.getLocalPort finally if (socket != null) socket.close() } }
Example 80
Source File: LeanBalancer.scala From openwhisk with Apache License 2.0 | 5 votes |
package org.apache.openwhisk.core.loadBalancer import akka.actor.{ActorRef, ActorSystem, Props} import akka.stream.ActorMaterializer import org.apache.openwhisk.common._ import org.apache.openwhisk.core.WhiskConfig._ import org.apache.openwhisk.core.connector._ import org.apache.openwhisk.core.containerpool.ContainerPoolConfig import org.apache.openwhisk.core.entity.ControllerInstanceId import org.apache.openwhisk.core.entity._ import org.apache.openwhisk.core.invoker.InvokerProvider import org.apache.openwhisk.core.{ConfigKeys, WhiskConfig} import org.apache.openwhisk.spi.SpiLoader import org.apache.openwhisk.utils.ExecutionContextFactory import pureconfig._ import pureconfig.generic.auto._ import org.apache.openwhisk.core.entity.size._ import scala.concurrent.Future private def makeALocalThreadedInvoker(): Unit = { implicit val ec = ExecutionContextFactory.makeCachedThreadPoolExecutionContext() val limitConfig: ConcurrencyLimitConfig = loadConfigOrThrow[ConcurrencyLimitConfig](ConfigKeys.concurrencyLimit) SpiLoader.get[InvokerProvider].instance(config, invokerName, messageProducer, poolConfig, limitConfig) } makeALocalThreadedInvoker() override protected val invokerPool: ActorRef = actorSystem.actorOf(Props.empty) override protected def releaseInvoker(invoker: InvokerInstanceId, entry: ActivationEntry) = { // Currently do nothing } override protected def emitMetrics() = { super.emitMetrics() } } object LeanBalancer extends LoadBalancerProvider { override def instance(whiskConfig: WhiskConfig, instance: ControllerInstanceId)( implicit actorSystem: ActorSystem, logging: Logging, materializer: ActorMaterializer): LoadBalancer = { new LeanBalancer(whiskConfig, createFeedFactory(whiskConfig, instance), instance) } def requiredProperties = ExecManifest.requiredProperties ++ wskApiHost }
Example 81
Source File: Main.scala From openwhisk with Apache License 2.0 | 5 votes |
package org.apache.openwhisk.core.database.cosmosdb.cache import akka.actor.ActorSystem import akka.stream.ActorMaterializer import kamon.Kamon import org.apache.openwhisk.common.{AkkaLogging, ConfigMXBean, Logging} import org.apache.openwhisk.http.{BasicHttpService, BasicRasService} object Main { def main(args: Array[String]): Unit = { implicit val system: ActorSystem = ActorSystem("cache-invalidator-actor-system") implicit val materializer: ActorMaterializer = ActorMaterializer() implicit val log: Logging = new AkkaLogging(akka.event.Logging.getLogger(system, this)) ConfigMXBean.register() Kamon.init() val port = CacheInvalidatorConfig(system.settings.config).invalidatorConfig.port BasicHttpService.startHttpService(new BasicRasService {}.route, port, None) CacheInvalidator.start(system.settings.config) log.info(this, s"Started the server at http://localhost:$port") } }
Example 82
Source File: KafkaEventProducer.scala From openwhisk with Apache License 2.0 | 5 votes |
package org.apache.openwhisk.core.database.cosmosdb.cache import akka.Done import akka.actor.ActorSystem import akka.kafka.scaladsl.Producer import akka.kafka.{ProducerMessage, ProducerSettings} import akka.stream.scaladsl.{Keep, Sink, Source} import akka.stream.{ActorMaterializer, OverflowStrategy, QueueOfferResult} import org.apache.kafka.clients.consumer.ConsumerConfig import org.apache.kafka.clients.producer.ProducerRecord import org.apache.openwhisk.connector.kafka.KamonMetricsReporter import scala.collection.immutable.Seq import scala.concurrent.{ExecutionContext, Future, Promise} case class KafkaEventProducer( settings: ProducerSettings[String, String], topic: String, eventProducerConfig: EventProducerConfig)(implicit system: ActorSystem, materializer: ActorMaterializer) extends EventProducer { private implicit val executionContext: ExecutionContext = system.dispatcher private val queue = Source .queue[(Seq[String], Promise[Done])](eventProducerConfig.bufferSize, OverflowStrategy.dropNew) //TODO Use backpressure .map { case (msgs, p) => ProducerMessage.multi(msgs.map(newRecord), p) } .via(Producer.flexiFlow(producerSettings)) .map { case ProducerMessage.MultiResult(_, passThrough) => passThrough.success(Done) case _ => //As we use multi mode only other modes need not be handled } .toMat(Sink.ignore)(Keep.left) .run override def send(msg: Seq[String]): Future[Done] = { val promise = Promise[Done] queue.offer(msg -> promise).flatMap { case QueueOfferResult.Enqueued => promise.future case QueueOfferResult.Dropped => Future.failed(new Exception("Kafka request queue is full.")) case QueueOfferResult.QueueClosed => Future.failed(new Exception("Kafka request queue was closed.")) case QueueOfferResult.Failure(f) => Future.failed(f) } } def close(): Future[Done] = { queue.complete() queue.watchCompletion() } private def newRecord(msg: String) = new ProducerRecord[String, String](topic, "messages", msg) private def producerSettings = settings.withProperty(ConsumerConfig.METRIC_REPORTER_CLASSES_CONFIG, KamonMetricsReporter.name) }
Example 83
Source File: CacheInvalidator.scala From openwhisk with Apache License 2.0 | 5 votes |
package org.apache.openwhisk.core.database.cosmosdb.cache import akka.Done import akka.actor.{ActorSystem, CoordinatedShutdown} import akka.kafka.ProducerSettings import akka.stream.ActorMaterializer import com.google.common.base.Throwables import com.typesafe.config.Config import org.apache.kafka.common.serialization.StringSerializer import org.apache.openwhisk.common.Logging import org.apache.openwhisk.core.database.RemoteCacheInvalidation.cacheInvalidationTopic import scala.concurrent.{ExecutionContext, Future} import scala.util.{Failure, Success} object CacheInvalidator { val instanceId = "cache-invalidator" val whisksCollection = "whisks" def start( globalConfig: Config)(implicit system: ActorSystem, materializer: ActorMaterializer, log: Logging): Future[Done] = { implicit val ec: ExecutionContext = system.dispatcher val config = CacheInvalidatorConfig(globalConfig) val producer = KafkaEventProducer( kafkaProducerSettings(defaultProducerConfig(globalConfig)), cacheInvalidationTopic, config.eventProducerConfig) val observer = new WhiskChangeEventObserver(config.invalidatorConfig, producer) val feedConsumer = new ChangeFeedConsumer(whisksCollection, config, observer) feedConsumer.isStarted.andThen { case Success(_) => registerShutdownTasks(system, feedConsumer, producer) log.info(this, s"Started the Cache invalidator service. ClusterId [${config.invalidatorConfig.clusterId}]") case Failure(t) => log.error(this, "Error occurred while starting the Consumer" + Throwables.getStackTraceAsString(t)) } } private def registerShutdownTasks(system: ActorSystem, feedConsumer: ChangeFeedConsumer, producer: KafkaEventProducer)(implicit ec: ExecutionContext, log: Logging): Unit = { CoordinatedShutdown(system).addTask(CoordinatedShutdown.PhaseBeforeServiceUnbind, "closeFeedListeners") { () => feedConsumer .close() .flatMap { _ => producer.close().andThen { case Success(_) => log.info(this, "Kafka producer successfully shutdown") } } } } def kafkaProducerSettings(config: Config): ProducerSettings[String, String] = ProducerSettings(config, new StringSerializer, new StringSerializer) def defaultProducerConfig(globalConfig: Config): Config = globalConfig.getConfig("akka.kafka.producer") }
Example 84
Source File: RunServer.scala From mleap with Apache License 2.0 | 5 votes |
package ml.combust.mleap.grpc.server import java.util.concurrent.{Executors, TimeUnit} import akka.Done import akka.actor.{ActorSystem, CoordinatedShutdown} import akka.stream.{ActorMaterializer, Materializer} import com.typesafe.config.Config import com.typesafe.scalalogging.Logger import io.grpc.ServerBuilder import ml.combust.mleap.executor.MleapExecutor import ml.combust.mleap.pb.MleapGrpc import scala.concurrent.{ExecutionContext, Future} import scala.language.existentials import scala.util.{Failure, Success, Try} class RunServer(config: Config) (implicit system: ActorSystem) { private val logger = Logger(classOf[RunServer]) private var coordinator: Option[CoordinatedShutdown] = None def run(): Unit = { Try { logger.info("Starting MLeap gRPC Server") val coordinator = CoordinatedShutdown(system) this.coordinator = Some(coordinator) implicit val materializer: Materializer = ActorMaterializer() val grpcServerConfig = new GrpcServerConfig(config.getConfig("default")) val mleapExecutor = MleapExecutor(system) val port: Int = config.getInt("port") val threads: Option[Int] = if (config.hasPath("threads")) Some(config.getInt("threads")) else None val threadCount = threads.getOrElse { Math.min(Math.max(Runtime.getRuntime.availableProcessors() * 4, 32), 64) } logger.info(s"Creating thread pool for server with size $threadCount") val grpcThreadPool = Executors.newFixedThreadPool(threadCount) implicit val ec: ExecutionContext = ExecutionContext.fromExecutor(grpcThreadPool) coordinator.addTask(CoordinatedShutdown.PhaseServiceRequestsDone, "threadPoolShutdownNow") { () => Future { logger.info("Shutting down gRPC thread pool") grpcThreadPool.shutdown() grpcThreadPool.awaitTermination(5, TimeUnit.SECONDS) Done } } logger.info(s"Creating executor service") val grpcService: GrpcServer = new GrpcServer(mleapExecutor, grpcServerConfig) val builder = ServerBuilder.forPort(port) builder.intercept(new ErrorInterceptor) builder.addService(MleapGrpc.bindService(grpcService, ec)) val grpcServer = builder.build() logger.info(s"Starting server on port $port") grpcServer.start() coordinator.addTask(CoordinatedShutdown.PhaseServiceUnbind, "grpcServiceShutdown") { () => Future { logger.info("Shutting down gRPC") grpcServer.shutdown() grpcServer.awaitTermination(10, TimeUnit.SECONDS) Done }(ExecutionContext.global) } coordinator.addTask(CoordinatedShutdown.PhaseServiceStop, "grpcServiceShutdownNow") { () => Future { if (!grpcServer.isShutdown) { logger.info("Shutting down gRPC NOW!") grpcServer.shutdownNow() grpcServer.awaitTermination(5, TimeUnit.SECONDS) } Done }(ExecutionContext.global) } } match { case Success(_) => case Failure(err) => logger.error("Error encountered starting server", err) for (c <- this.coordinator) { c.run(CoordinatedShutdown.UnknownReason) } throw err } } }
Example 85
Source File: GrpcSpec.scala From mleap with Apache License 2.0 | 5 votes |
package ml.combust.mleap.grpc.server import akka.actor.ActorSystem import akka.stream.{ActorMaterializer, Materializer} import akka.testkit.TestKit import io.grpc.{ManagedChannel, Server} import ml.combust.mleap.executor.service.TransformService import ml.combust.mleap.executor.testkit.TransformServiceSpec import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach} import org.scalatest.concurrent.ScalaFutures import scala.concurrent.duration._ import ml.combust.mleap.grpc.server.TestUtil._ class GrpcSpec extends TestKit(ActorSystem("grpc-server-test")) with TransformServiceSpec with BeforeAndAfterEach with BeforeAndAfterAll with ScalaFutures { private lazy val server = createServer(system) private lazy val channel = inProcessChannel private lazy val client = createClient(channel) override lazy val transformService: TransformService = { server client } override implicit def materializer: Materializer = ActorMaterializer()(system) override protected def afterAll(): Unit = { server.shutdown() channel.shutdown() TestKit.shutdownActorSystem(system, 5.seconds, verifySystemShutdown = true) } }
Example 86
Source File: TestUtil.scala From mleap with Apache License 2.0 | 5 votes |
package ml.combust.mleap.grpc.server import java.io.File import java.net.URI import akka.actor.ActorSystem import akka.stream.ActorMaterializer import com.typesafe.config.ConfigFactory import io.grpc.{ManagedChannel, Server} import io.grpc.inprocess.{InProcessChannelBuilder, InProcessServerBuilder} import ml.combust.mleap.executor.MleapExecutor import ml.combust.mleap.grpc.GrpcClient import ml.combust.mleap.pb.MleapGrpc import ml.combust.mleap.pb.MleapGrpc.MleapStub import ml.combust.mleap.runtime.frame.DefaultLeapFrame import ml.combust.mleap.runtime.serialization.FrameReader import scala.concurrent.ExecutionContext import ExecutionContext.Implicits.global import scala.util.Try object TestUtil { lazy val lrUri: URI = URI.create(getClass.getClassLoader.getResource("models/airbnb.model.lr.zip").toURI.toString) lazy val frame: Try[DefaultLeapFrame] = FrameReader().read(new File(getClass.getClassLoader.getResource("leap_frame/frame.airbnb.json").getFile)) lazy val uniqueServerName : String = "in-process server for " + getClass def createServer(system: ActorSystem) : Server = { val config = new GrpcServerConfig(ConfigFactory.load().getConfig("ml.combust.mleap.grpc.server.default")) val ssd = MleapGrpc.bindService(new GrpcServer(MleapExecutor(system), config)(global, ActorMaterializer.create(system)), global) val builder = InProcessServerBuilder.forName(uniqueServerName) builder.directExecutor().addService(ssd).intercept(new ErrorInterceptor) val server = builder.build server.start() server } def createClient(channel: ManagedChannel): GrpcClient = new GrpcClient(new MleapStub(channel)) def inProcessChannel : ManagedChannel = InProcessChannelBuilder.forName(uniqueServerName).directExecutor.build }
Example 87
Source File: LocalTransformServiceActor.scala From mleap with Apache License 2.0 | 5 votes |
package ml.combust.mleap.executor.service import akka.actor.{Actor, ActorRef, Props, Status, Terminated} import akka.stream.{ActorMaterializer, Materializer} import ml.combust.mleap.executor.repository.RepositoryBundleLoader import ml.combust.mleap.executor._ import ml.combust.mleap.executor.error.NotFoundException import scala.util.{Failure, Success, Try} object LocalTransformServiceActor { def props(loader: RepositoryBundleLoader, config: ExecutorConfig): Props = { Props(new LocalTransformServiceActor(loader, config)) } object Messages { case object Close } } class LocalTransformServiceActor(loader: RepositoryBundleLoader, config: ExecutorConfig) extends Actor { import LocalTransformServiceActor.Messages private implicit val materializer: Materializer = ActorMaterializer()(context.system) private var lookup: Map[String, ActorRef] = Map() private var modelNameLookup: Map[ActorRef, String] = Map() override def postStop(): Unit = { for (child <- context.children) { context.unwatch(child) context.stop(child) } } override def receive: Receive = { case request: TransformFrameRequest => handleModelRequest(request) case request: GetBundleMetaRequest => handleModelRequest(request) case request: GetModelRequest => handleModelRequest(request) case request: CreateFrameStreamRequest => handleModelRequest(request) case request: CreateRowStreamRequest => handleModelRequest(request) case request: GetRowStreamRequest => handleModelRequest(request) case request: CreateFrameFlowRequest => handleModelRequest(request) case request: GetFrameStreamRequest => handleModelRequest(request) case request: CreateRowFlowRequest => handleModelRequest(request) case request: UnloadModelRequest => handleModelRequest(request) case request: LoadModelRequest => loadModel(request) case Messages.Close => context.stop(self) case Terminated(actor) => terminated(actor) } def handleModelRequest(request: ModelRequest): Unit = { lookup.get(request.modelName) match { case Some(actor) => actor.tell(request, sender) case None => sender ! Status.Failure(new NotFoundException(s"no model with name ${request.modelName}")) } } def loadModel(request: LoadModelRequest): Unit = { Try(context.actorOf(BundleActor.props(request, loader, config), request.modelName)) match { case Success(actor) => lookup += (request.modelName -> actor) modelNameLookup += (actor -> request.modelName) context.watch(actor) actor.tell(request, sender) case Failure(err) => sender ! Status.Failure(err) } } private def terminated(ref: ActorRef): Unit = { val uri = modelNameLookup(ref) modelNameLookup -= ref lookup -= uri } }
Example 88
Source File: MleapExecutorSpec.scala From mleap with Apache License 2.0 | 5 votes |
package ml.combust.mleap.executor import akka.actor.ActorSystem import akka.stream.{ActorMaterializer, Materializer} import akka.testkit.TestKit import ml.combust.mleap.executor.testkit.{TestUtil, TransformServiceSpec} import org.scalatest.concurrent.ScalaFutures import org.scalatest.BeforeAndAfterAll import scala.concurrent.duration._ class MleapExecutorSpec extends TestKit(ActorSystem("MleapExecutorSpec")) with TransformServiceSpec with BeforeAndAfterAll with ScalaFutures { override lazy val transformService: MleapExecutor = MleapExecutor(system) private val frame = TestUtil.frame override implicit val materializer: Materializer = ActorMaterializer()(system) override protected def afterAll(): Unit = { TestKit.shutdownActorSystem(system, 5.seconds, verifySystemShutdown = true) } }
Example 89
Source File: VehiclesPerBBoxActor.scala From BusFloatingData with Apache License 2.0 | 5 votes |
package de.nierbeck.floating.data.server.actors.rest import akka.actor.Props import akka.stream.ActorMaterializer import com.datastax.driver.core.{DefaultPreparedStatement, PreparedStatement, ResultSet} import de.nierbeck.floating.data.domain.{BoundingBox, Vehicle} import de.nierbeck.floating.data.server._ import de.nierbeck.floating.data.server.actors.CassandraQuery import de.nierbeck.floating.data.tiler.TileCalc import scala.collection.JavaConverters._ import scala.concurrent.{ExecutionContext, Future} import scala.util.Success object SparkVehiclesPerBBoxActor { def props():Props = Props(new SparkVehiclesPerBBoxActor()) } object FlinkVehiclesPerBBoxActor { def props(): Props = Props(new FlinkVehiclesPerBBoxActor()) } class SparkVehiclesPerBBoxActor extends VehiclesPerBBoxActor { override def selectTrajectoriesByBBox = session.prepare("SELECT * FROM streaming.vehicles_by_tileid WHERE tile_id = ? AND time_id IN ? AND time > ? ") } class FlinkVehiclesPerBBoxActor extends VehiclesPerBBoxActor { override def selectTrajectoriesByBBox = session.prepare("SELECT * FROM streaming.vehicles_by_tileid_flink WHERE tile_id = ? AND time_id IN ? AND time > ? ") } abstract class VehiclesPerBBoxActor extends CassandraQuery { implicit val executionContext = context.dispatcher implicit val actorMaterializer = ActorMaterializer() def selectTrajectoriesByBBox:PreparedStatement override def receive(): Receive = { case (boundingBox: BoundingBox,time: String) => { log.info("received a BBox query") val eventualVehicles = getVehiclesByBBox(boundingBox, time) log.info(s"X: ${eventualVehicles}") sender() ! eventualVehicles } case _ => log.error("Wrong request") } def getVehiclesByBBox(boundingBox: BoundingBox, time: String)(implicit executionContext: ExecutionContext): Future[List[Vehicle]] = { log.info(s"Querrying with bounding Box: ${boundingBox}") val tileIds: Set[String] = TileCalc.convertBBoxToTileIDs(boundingBox) log.info(s"extracted ${tileIds.size} tileIds") val timing = time.toInt val timeStamp = new java.util.Date(System.currentTimeMillis() - (timing * 60 * 1000)) val timeIdminusOne = TileCalc.transformTime(timeStamp).getTime val timeId = TileCalc.transformTime(new java.util.Date(System.currentTimeMillis())).getTime val timeList = new java.util.ArrayList(List(timeIdminusOne, timeId).asJavaCollection) log.info(s"timeId: ${timeIdminusOne},${timeId}") val futureResults: Set[Future[ResultSet]] = tileIds.map(tileId => session.executeAsync(selectTrajectoriesByBBox.bind(tileId, timeList, timeStamp)).toFuture) val futures: Set[Future[List[Vehicle]]] = futureResults.map( resultFuture => resultFuture.map( resultSet => resultSet.iterator().asScala.map(row => { Vehicle( row.getString("id"), Some(row.getTimestamp("time")), row.getDouble("latitude"), row.getDouble("longitude"), row.getInt("heading"), Some(row.getString("route_id"))) }).toList)) val futureVehicles: Future[List[Vehicle]] = Future.sequence( futures.map( futureToFutureTry(_))).map(_.collect { case Success(x) => x }).map(set => set.toList.flatten) futureVehicles } }
Example 90
Source File: HotSpotsActor.scala From BusFloatingData with Apache License 2.0 | 5 votes |
package de.nierbeck.floating.data.server.actors.rest import akka.actor.Props import akka.stream.ActorMaterializer import com.datastax.driver.core.{PreparedStatement, ResultSet} import de.nierbeck.floating.data.domain.{BoundingBox, VehicleCluster} import de.nierbeck.floating.data.server._ import de.nierbeck.floating.data.server.actors.CassandraQuery import de.nierbeck.floating.data.tiler.TileCalc import scala.collection.JavaConverters._ import scala.concurrent.{ExecutionContext, Future} import scala.util.Success object HotSpotsSparkActor { def props():Props = Props(new HotSpotsSparkActor()) } object HotSpotsFlinkActor { def props():Props = Props(new HotSpotsFlinkActor()) } class HotSpotsSparkActor extends HotSpotsActor { override def selectHotSpotsByBoundingBox:PreparedStatement = session.prepare("SELECT * FROM streaming.vehiclecluster_by_tileid WHERE tile_id = ?") } class HotSpotsFlinkActor extends HotSpotsActor{ override def selectHotSpotsByBoundingBox:PreparedStatement = session.prepare("SELECT * FROM streaming.vehiclecluster_by_tileid_flink WHERE tile_id = ?") } abstract class HotSpotsActor extends CassandraQuery{ implicit val executionContext = context.dispatcher implicit val actorMaterializer = ActorMaterializer() def selectHotSpotsByBoundingBox:PreparedStatement override def receive: Receive = { case boundingBox:BoundingBox => { log.info("received a BBox query") sender() ! getHotSpotsByBBox(boundingBox) } case _ => log.error("Wrong request") } def getHotSpotsByBBox(boundingBox: BoundingBox)(implicit executionContext: ExecutionContext): Future[List[VehicleCluster]] = { log.info(s"Querrying with bounding Box: ${boundingBox}") val tileIds: Set[String] = TileCalc.convertBBoxToTileIDs(boundingBox) log.info(s"extracted ${tileIds.size} tileIds") val futureResults: Set[Future[ResultSet]] = tileIds.map(tileId => session.executeAsync(selectHotSpotsByBoundingBox.bind(tileId)).toFuture) val futures: Set[Future[List[VehicleCluster]]] = futureResults.map( resultFuture => resultFuture.map{ resultSet => resultSet.iterator().asScala.map{ row => // log.info("found vehicleCluster in db") val vehicle = VehicleCluster( row.getInt("id"), row.getLong("time_stamp"), row.getDouble("latitude"), row.getDouble("longitude"), row.getInt("amount")) // log.info(s"vehicleCluster: ${vehicle}") vehicle }.toList}) val futureVehicleClusters: Future[List[VehicleCluster]] = Future.sequence( futures.map( futureToFutureTry(_))).map(_.collect { case Success(x) => x }).map(set => set.toList.flatten) futureVehicleClusters } }
Example 91
Source File: ServiceApp.scala From BusFloatingData with Apache License 2.0 | 5 votes |
package de.nierbeck.floating.data.server import akka.actor.{ActorRef, ActorSystem, Props} import akka.event.Logging import akka.http.scaladsl.Http import akka.http.scaladsl.model.HttpMethods._ import akka.http.scaladsl.model.ws.UpgradeToWebSocket import akka.http.scaladsl.model.{HttpRequest, HttpResponse, Uri} import akka.stream.ActorMaterializer import de.nierbeck.floating.data.server.actors.websocket.{FLINK, RouterActor, SPARK, TiledVehiclesFromKafkaActor} import scala.concurrent.Await import scala.concurrent.duration.Duration import scala.util.{Failure, Success} object ServiceApp extends RestService { import ServiceConfig._ import system.dispatcher implicit val system = ActorSystem("service-api-http") implicit val mat = ActorMaterializer() override val logger = Logging(system, getClass.getName) override val session = CassandraConnector.connect() def main(args: Array[String]): Unit = { val router: ActorRef = system.actorOf(Props[RouterActor], "router") val sparkKafkaConsumer: ActorRef = system.actorOf(TiledVehiclesFromKafkaActor.props(router, "tiledVehicles", SPARK), "Kafka-Consumer-Spark") val flinkKafkaConsumer: ActorRef = system.actorOf(TiledVehiclesFromKafkaActor.props(router, "flinkTiledVehicles", FLINK), "Kafka-Consumer-Flink") val requestHandler: HttpRequest => HttpResponse = { case req@HttpRequest(GET, Uri.Path("/ws/vehicles"), _, _, _) => req.header[UpgradeToWebSocket] match { case Some(upgrade) => upgrade.handleMessages(Flows.graphFlowWithStats(router)) case None => HttpResponse(400, entity = "Not a valid websocket request!") } case _: HttpRequest => HttpResponse(404, entity = "Unknown resource!") } Http() .bindAndHandle(route(), serviceInterface, servicePort) .onComplete { case Success(_) => logger.info(s"Successfully bound to $serviceInterface:$servicePort") case Failure(e) => logger.error(s"Failed !!!! ${e.getMessage}") } Http() .bindAndHandleSync(requestHandler, serviceInterface, 8001) .onComplete { case Success(_) => logger.info(s"Successfully started Server to $serviceInterface:8001") case Failure(e) => logger.error(s"Failed !!!! ${e.getMessage}") } Await.ready(system.whenTerminated, Duration.Inf) CassandraConnector.close(session) } }
Example 92
Source File: VisualMailboxMetricServer.scala From akka-visualmailbox with Apache License 2.0 | 5 votes |
package de.aktey.akka.visualmailbox import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.Http.ServerBinding import akka.io.Udp.{Bind, Bound, CommandFailed} import akka.io.{IO, Udp} import akka.pattern._ import akka.stream.ActorMaterializer import akka.util.Timeout import com.typesafe.config.ConfigFactory import de.aktey.akka.visualmailbox.data.DataSourceEndpoint import de.aktey.akka.visualmailbox.web.{Routing, WebConfig} import scala.concurrent.duration._ object VisualMailboxMetricServer extends App { val allConfig = ConfigFactory.load() val config = VisualMailboxMetricClientConfig.fromConfig(allConfig) implicit val system = ActorSystem("visualmailbox-visualizer") implicit val meterializer = ActorMaterializer() implicit val bindTimeout = Timeout(2.seconds) import system._ val router = system.actorOf(MetricsRouter.props(), "router") val dataHandler = system.actorOf(DataSourceEndpoint.props(router), "data-sink") (IO(Udp) ? Bind(dataHandler, config.serverAddress)).map { case CommandFailed(cmd) => system.terminate() case Bound(address) => log.info(s"""{"type":"udp-bound","address":"$address"}""") } val webConfig = WebConfig.fromConfig(allConfig) Http() .bindAndHandle(Routing.root(MetricFlow.metricSource(router)), webConfig.host, webConfig.port) .foreach { case ServerBinding(address) => log.info(s"""{"type":"http-bound","address":"$address"}""") } }
Example 93
Source File: RestActionTester.scala From naptime with Apache License 2.0 | 5 votes |
package org.coursera.naptime.actions import akka.actor.ActorSystem import akka.stream.ActorMaterializer import akka.stream.Materializer import org.coursera.naptime.NaptimeActionException import org.coursera.naptime.QueryFields import org.coursera.naptime.QueryIncludes import org.coursera.naptime.RequestEvidence import org.coursera.naptime.RequestPagination import org.coursera.naptime.RestContext import org.coursera.naptime.RestError import org.coursera.naptime.RestResponse import org.junit.After import org.scalatest.concurrent.ScalaFutures import org.scalatest.exceptions.TestFailedException import play.api.test.FakeRequest import scala.concurrent.ExecutionContext import scala.util.Try protected[this] implicit class RestActionTestOps[AuthType, BodyType, ResponseType]( action: RestAction[_, AuthType, BodyType, _, _, ResponseType]) { def testAction(ctx: RestContext[AuthType, BodyType]): RestResponse[ResponseType] = { val updatedAuthEither = action.restAuthGenerator.apply(ctx.body).check(ctx.auth) updatedAuthEither match { case Left(error) => RestError(error) case Right(updatedAuth) => val responseFuture = action.safeApply(ctx.copyWithAuth(updatedAuth)).recover { case e: NaptimeActionException => RestError(e) } Try(responseFuture.futureValue).recover { case e: TestFailedException => e.cause.map(throw _).getOrElse(throw e) }.get } } def testActionPassAuth(ctx: RestContext[AuthType, BodyType]): RestResponse[ResponseType] = { val responseFuture = action.safeApply(ctx).recover { case e: NaptimeActionException => RestError(e) } Try(responseFuture.futureValue).recover { case e: TestFailedException => e.cause.map(throw _).getOrElse(throw e) }.get } } }
Example 94
Source File: ResourceTestImplicits.scala From naptime with Apache License 2.0 | 5 votes |
package org.coursera.naptime import akka.actor.ActorSystem import akka.stream.ActorMaterializer import akka.stream.Materializer import org.junit.After import scala.concurrent.ExecutionContext trait ResourceTestImplicits { private[this] val internalActorSystem: ActorSystem = ActorSystem("test") private[this] val internalExecutionContext: ExecutionContext = actorSystem.dispatcher private[this] val internalMaterializer: Materializer = ActorMaterializer() implicit protected def actorSystem: ActorSystem = internalActorSystem implicit protected def executionContext: ExecutionContext = internalExecutionContext implicit protected def materializer: Materializer = internalMaterializer @After def shutDownActorSystem(): Unit = { actorSystem.terminate() } }
Example 95
Source File: XmlrpcConnection.scala From xmlrpc with MIT License | 5 votes |
package xmlrpc import akka.actor.ActorSystem import akka.stream.ActorMaterializer import akka.util.Timeout import org.scalatest.FunSpec import org.scalatest.concurrent.ScalaFutures import org.scalatest.time.{Millis, Seconds, Span} import xmlrpc.protocol.XmlrpcProtocol import scala.concurrent.duration._ import scala.language.postfixOps import scalaz.{Success, Failure} class XmlrpcConnection extends FunSpec with ScalaFutures { // Xmlrpc imports import Xmlrpc._ import XmlrpcProtocol._ // Scalatest setup implicit val default: PatienceConfig = PatienceConfig(timeout = Span(5, Seconds), interval = Span(500, Millis)) // Xmrpc setup, server is up but it is not mine, found on Internet implicit val testServer = XmlrpcServer("http://betty.userland.com/RPC2") // Spray setup implicit val system = ActorSystem() implicit val ma = ActorMaterializer() implicit val timeout = Timeout(5 seconds) import system.dispatcher describe("The connection with a XML-RPC server") { it("should invoke the test method successfully in the server") { val invocation = invokeMethod[Int, String]("examples.getStateName", 41).underlying val responseMessage = "South Dakota" whenReady(invocation) { case Success(value) => assertResult(responseMessage) {value} case Failure(errors) => fail("Errors when deserializing\n" + errors) } } } }
Example 96
Source File: AkkaHttpBackend.scala From drunk with Apache License 2.0 | 5 votes |
package com.github.jarlakxen.drunk.backend import scala.collection.immutable import scala.concurrent.{ExecutionContext, Future} import akka.actor.ActorSystem import akka.http.scaladsl.{Http, HttpExt} import akka.http.scaladsl.model.{ContentTypes, HttpEntity, HttpHeader, HttpMethods, HttpRequest, Uri} import akka.stream.ActorMaterializer class AkkaHttpBackend private[AkkaHttpBackend] ( uri: Uri, headers: immutable.Seq[HttpHeader], httpExt: HttpExt )(override implicit val as: ActorSystem, override implicit val mat: ActorMaterializer) extends AkkaBackend { def send(body: String): Future[(Int, String)] = { implicit val ec: ExecutionContext = as.dispatcher val req = HttpRequest(HttpMethods.POST, uri, headers, HttpEntity(ContentTypes.`application/json`, body)) val res = httpExt.singleRequest(req) res.flatMap { hr => val code = hr.status.intValue() val charsetFromHeaders = encodingFromContentType(hr.entity.contentType.toString).getOrElse("utf-8") val decodedResponse = decodeResponse(hr) val stringBody = bodyToString(decodedResponse, charsetFromHeaders) if (code >= 200 && code < 300) { stringBody.map { body => hr.discardEntityBytes() (code, body) } } else { stringBody.flatMap { body => hr.discardEntityBytes() Future.failed(new RuntimeException(s"${uri.toString} return $code with body: $body")) } } } } } object AkkaHttpBackend { val ContentTypeHeader = "Content-Type" def apply( uri: Uri, headers: immutable.Seq[HttpHeader] = Nil, httpExt: Option[HttpExt] = None )(implicit as: ActorSystem, mat: ActorMaterializer): AkkaHttpBackend = { val http = httpExt.getOrElse { Http(as) } new AkkaHttpBackend(uri, headers, http) } }
Example 97
Source File: AkkaConnectionBackend.scala From drunk with Apache License 2.0 | 5 votes |
package com.github.jarlakxen.drunk.backend import akka.actor.ActorSystem import akka.http.scaladsl.Http.OutgoingConnection import akka.http.scaladsl.model._ import akka.stream.ActorMaterializer import akka.stream.scaladsl.{Flow, Sink, Source} import scala.collection.immutable import scala.concurrent.{ExecutionContext, Future} class AkkaConnectionBackend private[AkkaConnectionBackend] ( uri: Uri, flow: Flow[HttpRequest, HttpResponse, Future[OutgoingConnection]], headers: immutable.Seq[HttpHeader] )(override implicit val as: ActorSystem, override implicit val mat: ActorMaterializer) extends AkkaBackend { def send(body: String): Future[(Int, String)] = { implicit val ec: ExecutionContext = as.dispatcher val req = HttpRequest( method = HttpMethods.POST, uri = uri, headers = headers, entity = HttpEntity(ContentTypes.`application/json`, body) ) val res = Source.single(req).via(flow).runWith(Sink.head) res.flatMap { hr => val code = hr.status.intValue() val charsetFromHeaders = encodingFromContentType(hr.entity.contentType.toString).getOrElse("utf-8") val decodedResponse = decodeResponse(hr) val stringBody = bodyToString(decodedResponse, charsetFromHeaders) if (code >= 200 && code < 300) { stringBody.map { body => hr.discardEntityBytes() (code, body) } } else { stringBody.flatMap { body => hr.discardEntityBytes() Future.failed(new RuntimeException(s"${uri.toString} return $code with body: $body")) } } } } } object AkkaConnectionBackend { def apply(uri: Uri, flow: Flow[HttpRequest, HttpResponse, Future[OutgoingConnection]], headers: immutable.Seq[HttpHeader] = Nil )( implicit as: ActorSystem, mat: ActorMaterializer): AkkaConnectionBackend = new AkkaConnectionBackend(uri, flow, headers) }
Example 98
Source File: AkkaBackend.scala From drunk with Apache License 2.0 | 5 votes |
package com.github.jarlakxen.drunk.backend import java.io.UnsupportedEncodingException import akka.actor.ActorSystem import akka.http.scaladsl.coding.{Deflate, Gzip, NoCoding} import akka.http.scaladsl.model.HttpResponse import akka.http.scaladsl.model.headers.HttpEncodings import akka.stream.ActorMaterializer import akka.util.ByteString import scala.concurrent.{ExecutionContext, Future} trait AkkaBackend { implicit val as: ActorSystem implicit val mat: ActorMaterializer def send(body: String): Future[(Int, String)] protected def encodingFromContentType(ct: String): Option[String] = ct.split(";").map(_.trim.toLowerCase).collectFirst { case s if s.startsWith("charset=") => s.substring(8) } protected def decodeResponse(response: HttpResponse): HttpResponse = { val decoder = response.encoding match { case HttpEncodings.gzip => Gzip case HttpEncodings.deflate => Deflate case HttpEncodings.identity => NoCoding case ce => throw new UnsupportedEncodingException(s"Unsupported encoding: $ce") } decoder.decodeMessage(response) } protected def bodyToString(hr: HttpResponse, charsetFromHeaders: String): Future[String] = { implicit val ec: ExecutionContext = as.dispatcher hr.entity.dataBytes .runFold(ByteString.empty)(_ ++ _) .map(_.decodeString(charsetFromHeaders)) } }
Example 99
Source File: package.scala From drunk with Apache License 2.0 | 5 votes |
package com.github.jarlakxen import java.net.InetSocketAddress import java.nio.channels.ServerSocketChannel import scala.concurrent._ import scala.concurrent.duration._ import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.Http.ServerBinding import akka.http.scaladsl.model._ import akka.http.scaladsl.server.Route import akka.stream.ActorMaterializer import akka.testkit._ import org.scalatest.BeforeAndAfterAll package object drunk { trait TestHttpServer extends BeforeAndAfterAll { this: Spec => implicit val system: ActorSystem = ActorSystem("drunk-test") implicit def executor = system.dispatcher implicit val materializer = ActorMaterializer() private def temporaryServerAddress(interface: String = "127.0.0.1"): InetSocketAddress = { val serverSocket = ServerSocketChannel.open() try { serverSocket.socket.bind(new InetSocketAddress(interface, 0)) val port = serverSocket.socket.getLocalPort new InetSocketAddress(interface, port) } finally serverSocket.close() } private def temporaryServerHostnameAndPort(interface: String = "127.0.0.1"): (String, Int) = { val socketAddress = temporaryServerAddress(interface) (socketAddress.getHostName, socketAddress.getPort) } val (host, port) = temporaryServerHostnameAndPort() override protected def beforeAll(): Unit = Http().bindAndHandle(serverRoutes, host, port).futureValue override protected def afterAll(): Unit = TestKit.shutdownActorSystem(system) def serverRoutes: Route } }
Example 100
Source File: LowLevelServer.scala From akka-http-test with Apache License 2.0 | 5 votes |
package com.github.dnvriend.component.lowlevelserver import akka.NotUsed import akka.actor.{ ActorSystem, Props } import akka.event.{ Logging, LoggingAdapter } import akka.http.scaladsl.Http import akka.http.scaladsl.model._ import akka.pattern.ask import akka.stream.scaladsl.{ Flow, Sink, Source } import akka.stream.{ ActorMaterializer, Materializer } import akka.util.Timeout import com.github.dnvriend.component.lowlevelserver.dto.{ Person, PersonWithId } import com.github.dnvriend.component.lowlevelserver.marshaller.Marshaller import com.github.dnvriend.component.lowlevelserver.repository.PersonRepository import spray.json.{ DefaultJsonProtocol, _ } import scala.concurrent.duration._ import scala.concurrent.{ ExecutionContext, Future } class LowLevelServer(implicit val system: ActorSystem, mat: Materializer, ec: ExecutionContext, log: LoggingAdapter, timeout: Timeout) extends DefaultJsonProtocol with Marshaller { val personDb = system.actorOf(Props[PersonRepository]) def debug(t: Any)(implicit log: LoggingAdapter = null): Unit = if (Option(log).isEmpty) println(t) else log.debug(t.toString) def http200Okay(req: HttpRequest): HttpResponse = HttpResponse(StatusCodes.OK) def http200AsyncOkay(req: HttpRequest): Future[HttpResponse] = Future(http200Okay(req)) val http200OkayFlow: Flow[HttpRequest, HttpResponse, NotUsed] = Flow[HttpRequest].map { req => HttpResponse(StatusCodes.OK) } val serverSource: Source[Http.IncomingConnection, Future[Http.ServerBinding]] = Http().bind(interface = "localhost", port = 8080) val binding: Future[Http.ServerBinding] = serverSource.to(Sink.foreach { conn => // conn.handleWith(http200OkayFlow) // conn.handleWithSyncHandler(http200Okay) // conn.handleWithAsyncHandler(http200AsyncOkay, 8) conn.handleWithAsyncHandler(personRequestHandler) }).run() def personRequestHandler(req: HttpRequest): Future[HttpResponse] = req match { case HttpRequest(HttpMethods.GET, Uri.Path("/api/person"), _, _, _) => for { xs <- (personDb ? "findAll").mapTo[List[PersonWithId]] entity = HttpEntity(ContentTypes.`application/json`, xs.toJson.compactPrint) } yield HttpResponse(StatusCodes.OK, entity = entity) case HttpRequest(HttpMethods.POST, Uri.Path("/api/person"), _, ent, _) => for { strictEntity <- ent.toStrict(1.second) person <- (personDb ? strictEntity.data.utf8String.parseJson.convertTo[Person]).mapTo[PersonWithId] } yield HttpResponse(StatusCodes.OK, entity = person.toJson.compactPrint) case req => req.discardEntityBytes() Future.successful(HttpResponse(StatusCodes.NotFound)) } } object LowLevelServerLauncher extends App with DefaultJsonProtocol { // setting up some machinery implicit val system: ActorSystem = ActorSystem() implicit val mat: Materializer = ActorMaterializer() implicit val ec: ExecutionContext = system.dispatcher implicit val log: LoggingAdapter = Logging(system, this.getClass) implicit val timeout: Timeout = Timeout(10.seconds) new LowLevelServer() }
Example 101
Source File: SimpleServer.scala From akka-http-test with Apache License 2.0 | 5 votes |
package com.github.dnvriend.component.simpleserver import javax.inject.Inject import akka.actor.ActorSystem import akka.event.{ Logging, LoggingAdapter } import akka.http.scaladsl._ import akka.pattern.CircuitBreaker import akka.stream.{ ActorMaterializer, Materializer } import com.github.dnvriend.component.repository.PersonRepository import com.github.dnvriend.component.simpleserver.route._ import com.google.inject.Singleton import play.api.Configuration import scala.concurrent.ExecutionContext import scala.concurrent.duration._ @Singleton class SimpleServer @Inject() (personDao: PersonRepository, cb: CircuitBreaker, interface: String, port: Int)(implicit system: ActorSystem, mat: Materializer, ec: ExecutionContext) { Http().bindAndHandle(SimpleServerRestRoutes.routes(personDao, cb), interface, port) } object SimpleServerLauncher extends App { implicit val system: ActorSystem = ActorSystem() implicit val mat: Materializer = ActorMaterializer() implicit val ec: ExecutionContext = system.dispatcher implicit val log: LoggingAdapter = Logging(system, this.getClass) val maxFailures: Int = 3 val callTimeout: FiniteDuration = 1.seconds val resetTimeout: FiniteDuration = 10.seconds val cb = new CircuitBreaker(system.scheduler, maxFailures, callTimeout, resetTimeout) val config: play.api.Configuration = Configuration(system.settings.config) sys.addShutdownHook { system.terminate() } new SimpleServer(new PersonRepository, cb, config.getString("http.interface").getOrElse("0.0.0.0"), config.getInt("http.port").getOrElse(8080)) }
Example 102
Source File: AkkaMoneyScope.scala From money with Apache License 2.0 | 5 votes |
package com.comcast.money.akka import akka.actor.ActorSystem import akka.stream.ActorMaterializer import akka.testkit.TestKit import com.comcast.money.akka.SpanHandlerMatchers.clearHandlerChain import com.typesafe.config.ConfigFactory import org.scalatest.{ BeforeAndAfterAll, BeforeAndAfterEach, Matchers, WordSpecLike } abstract class AkkaMoneyScope extends WordSpecLike with Matchers with BeforeAndAfterAll with BeforeAndAfterEach { val configString: String = """ | money { | handling = { | async = false | handlers = [ | { | class = "com.comcast.money.akka.CollectingSpanHandler" | log-level = "INFO" | }] | } | }""".stripMargin implicit val actorSystem: ActorSystem = ActorSystem("MoneyAkkaScope", ConfigFactory.parseString(configString)) implicit val moneyExtension: MoneyExtension = MoneyExtension(actorSystem) implicit val matierializer: ActorMaterializer = ActorMaterializer() override def afterAll(): Unit = TestKit.shutdownActorSystem(actorSystem) override def beforeEach(): Unit = clearHandlerChain }
Example 103
Source File: Server.scala From scalachain with MIT License | 5 votes |
package com.elleflorio.scalachain import akka.actor.{ActorRef, ActorSystem} import akka.cluster.pubsub.DistributedPubSub import akka.http.scaladsl.Http import akka.http.scaladsl.server.Route import akka.http.scaladsl.server.Directives._ import akka.stream.ActorMaterializer import com.elleflorio.scalachain.actor.Node import com.elleflorio.scalachain.api.NodeRoutes import com.elleflorio.scalachain.cluster.ClusterManager import com.typesafe.config.{Config, ConfigFactory} import scala.concurrent.Await import scala.concurrent.duration.Duration object Server extends App with NodeRoutes { implicit val system: ActorSystem = ActorSystem("scalachain") implicit val materializer: ActorMaterializer = ActorMaterializer() val config: Config = ConfigFactory.load() val address = config.getString("http.ip") val port = config.getInt("http.port") val nodeId = config.getString("scalachain.node.id") lazy val routes: Route = statusRoutes ~ transactionRoutes ~ mineRoutes val clusterManager: ActorRef = system.actorOf(ClusterManager.props(nodeId), "clusterManager") val mediator: ActorRef = DistributedPubSub(system).mediator val node: ActorRef = system.actorOf(Node.props(nodeId, mediator), "node") Http().bindAndHandle(routes, address, port) println(s"Server online at http://$address:$port/") Await.result(system.whenTerminated, Duration.Inf) }
Example 104
Source File: package.scala From healthchecks with MIT License | 5 votes |
package com.github.everpeace.healthchecks import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server.Route import akka.stream.ActorMaterializer import scala.concurrent.{ExecutionContext, Future} package object k8s { private def configPathRoot = "k8s_probe" private def config(system: ActorSystem, subPath: String = "") = { val path = if (subPath.nonEmpty) configPathRoot + "." + subPath else configPathRoot system.settings.config.getConfig(path) } def livenessProbe( checks: HealthCheck* )(implicit system: ActorSystem, ec: ExecutionContext ) = { LivenessProbe(checks.toList, config(system, "path").getString("liveness"), ec) } def readinessProbe( checks: HealthCheck* )(implicit system: ActorSystem, ec: ExecutionContext ) = { ReadinessProbe(checks.toList, config(system, "path").getString("readiness"), ec) } def bindAndHandleProbes( probe: K8sProbe, probes: K8sProbe* )(implicit system: ActorSystem, am: ActorMaterializer ): Future[Http.ServerBinding] = { val host = config(system).getString("host") val port = config(system).getInt("port") val routes = (probe +: probes).toList .map(_.toRoute) .reduce((r1: Route, r2: Route) => r1 ~ r2) Http(system).bindAndHandle(routes, host, port) } }
Example 105
Source File: K8sProbesTest.scala From healthchecks with MIT License | 5 votes |
package com.chatwork.healthcheck.k8s import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.model.{HttpRequest, StatusCodes} import akka.stream.ActorMaterializer import com.github.everpeace.healthchecks._ import com.github.everpeace.healthchecks.k8s._ import org.scalatest._ import scala.concurrent.duration._ import scala.concurrent.{Await, Future} class K8sProbesTest extends FreeSpec with Matchers { private def fixture(probe: K8sProbe, probes: K8sProbe*) = new {} "K8sProbes" - { "should start successfully and return correct response" in { implicit val system = ActorSystem() implicit val am = ActorMaterializer() implicit val ec = system.dispatcher val probeBinding = bindAndHandleProbes( readinessProbe(healthCheck("readiness_check")(healthy)), livenessProbe(asyncHealthCheck("liveness_check")(Future(healthy))) ) def requestToLivenessProbe = Http().singleRequest(HttpRequest(uri = "http://localhost:8086/live")) def requestToReadinessProbe = Http().singleRequest(HttpRequest(uri = "http://localhost:8086/ready")) val livenessResponse = Await.result(requestToLivenessProbe, 10 seconds) val redinessResponse = Await.result(requestToReadinessProbe, 10 seconds) livenessResponse.status shouldEqual StatusCodes.OK redinessResponse.status shouldEqual StatusCodes.OK system.terminate() } } }
Example 106
Source File: SpartaMarathonComponent.scala From sparta with Apache License 2.0 | 5 votes |
package com.stratio.sparta.serving.core.marathon import akka.actor.ActorSystem import akka.http.scaladsl.model.HttpMethods._ import akka.stream.ActorMaterializer import com.stratio.tikitakka.common.exceptions.ConfigurationException import com.stratio.tikitakka.common.util.ConfigComponent import com.stratio.tikitakka.updown.marathon.MarathonComponent import SpartaMarathonComponent._ trait SpartaMarathonComponent extends MarathonComponent { override lazy val uri = ConfigComponent.getString(SpartaMarathonComponent.uriField).getOrElse { throw ConfigurationException("The marathon uri has not been set") } override lazy val apiVersion = ConfigComponent.getString(versionField, defaultApiVersion) } object SpartaMarathonComponent { // Property field constants val uriField = "sparta.marathon.tikitakka.marathon.uri" val versionField = "sparta.marathon.tikitakka.marathon.api.version" // Default property constants val defaultApiVersion = "v2" val upComponentMethod = POST val downComponentMethod = DELETE def apply(implicit _system: ActorSystem, _materializer: ActorMaterializer): SpartaMarathonComponent = new SpartaMarathonComponent { implicit val actorMaterializer: ActorMaterializer = _materializer implicit val system: ActorSystem = _system } }
Example 107
Source File: Main.scala From sns with Apache License 2.0 | 5 votes |
package me.snov.sns import akka.actor.ActorSystem import akka.event.{Logging, LoggingAdapter} import akka.http.scaladsl.Http import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server._ import akka.stream.ActorMaterializer import akka.util.Timeout import com.typesafe.config.ConfigFactory import me.snov.sns.actor._ import me.snov.sns.api._ import me.snov.sns.service.FileDbService import me.snov.sns.util.ToStrict import scala.concurrent.ExecutionContext import scala.concurrent.duration._ import scala.util.Properties object Main extends App with ToStrict { implicit val system = ActorSystem("sns") implicit val executor: ExecutionContext = system.dispatcher implicit val materializer: ActorMaterializer = ActorMaterializer() implicit val logger: LoggingAdapter = Logging(system, getClass) implicit val timeout = new Timeout(1.second) val config = ConfigFactory.load() val dbService = new FileDbService(Properties.envOrElse("DB_PATH", config.getString("db.path"))) val dbActor = system.actorOf(DbActor.props(dbService), name = "DbActor") val homeActor = system.actorOf(HomeActor.props, name = "HomeActor") val subscribeActor = system.actorOf(SubscribeActor.props(dbActor), name = "SubscribeActor") val publishActor = system.actorOf(PublishActor.props(subscribeActor), name = "PublishActor") val routes: Route = toStrict { TopicApi.route(subscribeActor) ~ SubscribeApi.route(subscribeActor) ~ PublishApi.route(publishActor) ~ HealthCheckApi.route ~ HomeApi.route(homeActor) } logger.info("SNS v{} is starting", getClass.getPackage.getImplementationVersion) Http().bindAndHandle( handler = logRequestResult("akka-http-sns")(routes), interface = Properties.envOrElse("HTTP_INTERFACE", config.getString("http.interface")), port = Properties.envOrElse("HTTP_PORT", config.getString("http.port")).toInt ) }
Example 108
Source File: ToStrict.scala From sns with Apache License 2.0 | 5 votes |
package me.snov.sns.util import akka.http.scaladsl.server.Directives._ import akka.stream.ActorMaterializer import scala.concurrent.ExecutionContext import scala.concurrent.duration._ trait ToStrict { implicit val materializer: ActorMaterializer implicit val executor: ExecutionContext val toStrict = mapInnerRoute { innerRoute => val timeout = 1.second extractRequest { req => onSuccess(req.toStrict(timeout)) { strictReq => mapRequest(_ => strictReq) { innerRoute } } } } }
Example 109
Source File: AkkaInterpExampleMain.scala From hammock with MIT License | 5 votes |
package example.interpret import akka.actor.ActorSystem import akka.http.scaladsl.{Http, HttpExt} import akka.stream.ActorMaterializer import cats.effect.{ContextShift, IO} import example.repr.{GetResp, GetRespWithQueryString, Req, Resp} import hammock.{Hammock, Method} import hammock.marshalling._ import hammock.circe.implicits._ import io.circe.generic.auto._ import hammock.akka.AkkaInterpreter._ import scala.concurrent.ExecutionContext object AkkaInterpExampleMain extends App { implicit val actorSystem: ActorSystem = ActorSystem() implicit val materializer: ActorMaterializer = ActorMaterializer() implicit val ec: ExecutionContext = ExecutionContext.Implicits.global implicit val cs: ContextShift[IO] = IO.contextShift(ec) implicit val client: HttpExt = Http(actorSystem) //GET val getResp = Hammock .request(Method.GET, getUri, Map()) .as[GetResp] .exec[IO] .unsafeRunSync println(s"GET::Response = $getResp") //GET with query string val getRespWithQueryString = Hammock .request(Method.GET, getUriWithQueryString, Map()) .as[GetRespWithQueryString] .exec[IO] .unsafeRunSync println(s"GET with query string::Response = $getRespWithQueryString") //POST val postResp = Hammock .request(Method.POST, postUri, Map(), Some(Req("name", 4))) .as[Resp] .exec[IO] .unsafeRunSync println(s"POST::Response = $postResp") //PUT val putResp = Hammock .request(Method.PUT, putUri, Map(), Some(Req("name", 4))) .as[Resp] .exec[IO] .unsafeRunSync println(s"PUT::Response = $putResp") //DELETE val deleteResp = Hammock .request(Method.DELETE, deleteUri, Map(), Some(Req("name", 4))) .exec[IO] .unsafeRunSync println(s"DELETE::Response = $deleteResp") actorSystem.terminate() }
Example 110
Source File: RestServices.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.services import scala.concurrent.Await import scala.concurrent.duration._ import akka.actor.{ActorRef, ActorSystem} import akka.http.scaladsl.model.StatusCodes._ import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server.{Route, _} import akka.stream.ActorMaterializer import akka.util.Timeout import org.apache.commons.lang.exception.ExceptionUtils import org.apache.gearpump.jarstore.JarStoreClient import org.apache.gearpump.util.{Constants, LogUtil} // NOTE: This cannot be removed!!! import org.apache.gearpump.services.util.UpickleUtil._ class RestServices(master: ActorRef, mat: ActorMaterializer, system: ActorSystem) extends RouteService { private val LOG = LogUtil.getLogger(getClass) implicit val timeout = Constants.FUTURE_TIMEOUT private val config = system.settings.config private val jarStoreClient = new JarStoreClient(config, system) private val securityEnabled = config.getBoolean( Constants.GEARPUMP_UI_SECURITY_AUTHENTICATION_ENABLED) private val supervisorPath = system.settings.config.getString( Constants.GEARPUMP_SERVICE_SUPERVISOR_PATH) private val myExceptionHandler: ExceptionHandler = ExceptionHandler { case ex: Throwable => { extractUri { uri => LOG.error(s"Request to $uri could not be handled normally", ex) complete(InternalServerError, ExceptionUtils.getStackTrace(ex)) } } } // Makes sure staticRoute is the final one, as it will try to lookup resource in local path // if there is no match in previous routes private val static = new StaticService(system, supervisorPath).route def supervisor: ActorRef = { if (supervisorPath == null || supervisorPath.isEmpty()) { null } else { val actorRef = system.actorSelection(supervisorPath).resolveOne() Await.result(actorRef, new Timeout(Duration.create(5, "seconds")).duration) } } override def route: Route = { if (securityEnabled) { val security = new SecurityService(services, system) handleExceptions(myExceptionHandler) { security.route ~ static } } else { handleExceptions(myExceptionHandler) { services.route ~ static } } } private def services: RouteService = { val admin = new AdminService(system) val masterService = new MasterService(master, jarStoreClient, system) val worker = new WorkerService(master, system) val app = new AppMasterService(master, jarStoreClient, system) val sup = new SupervisorService(master, supervisor, system) new RouteService { override def route: Route = { admin.route ~ sup.route ~ masterService.route ~ worker.route ~ app.route } } } }
Example 111
Source File: MockOAuth2Server.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.services.security.oauth2 import scala.concurrent.{Await, Future} import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.Http.ServerBinding import akka.http.scaladsl.model.{HttpRequest, HttpResponse} import akka.stream.ActorMaterializer import akka.stream.scaladsl.Sink import org.apache.gearpump.util.Util // NOTE: This cannot be removed!! import org.apache.gearpump.services.util.UpickleUtil._ class MockOAuth2Server( actorSystem: ActorSystem, var requestHandler: HttpRequest => HttpResponse) { implicit val system: ActorSystem = actorSystem implicit val materializer = ActorMaterializer() implicit val ec = system.dispatcher private var _port: Int = 0 private var bindingFuture: Future[ServerBinding] = null def port: Int = _port def start(): Unit = { _port = Util.findFreePort().get val serverSource = Http().bind(interface = "127.0.0.1", port = _port) bindingFuture = { serverSource.to(Sink.foreach { connection => connection handleWithSyncHandler requestHandler }).run() } } def stop(): Unit = { import scala.concurrent.duration._ Await.result(bindingFuture.map(_.unbind()), 120.seconds) } }
Example 112
Source File: Test6.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.akkastream.example import akka.actor.{Actor, ActorSystem, Props} import akka.stream.scaladsl.Sink import akka.stream.{ActorMaterializer, ActorMaterializerSettings} import org.apache.gearpump.akkastream.GearpumpMaterializer import org.apache.gearpump.akkastream.scaladsl.GearSource import org.apache.gearpump.cluster.main.{ArgumentsParser, CLIOption} import org.apache.gearpump.streaming.dsl.scalaapi.CollectionDataSource import org.apache.gearpump.util.AkkaApp import scala.concurrent.Await import scala.concurrent.duration._ import org.apache.gearpump.akkastream.scaladsl.Implicits._ object Test6 extends AkkaApp with ArgumentsParser { // scalastyle:off println override val options: Array[(String, CLIOption[Any])] = Array( "gearpump" -> CLIOption[Boolean]("<boolean>", required = false, defaultValue = Some(false)) ) override def main(akkaConf: Config, args: Array[String]): Unit = { val config = parse(args) implicit val system = ActorSystem("Test6", akkaConf) implicit val materializer: ActorMaterializer = config.getBoolean("gearpump") match { case true => GearpumpMaterializer() case false => ActorMaterializer( ActorMaterializerSettings(system).withAutoFusing(false) ) } val echo = system.actorOf(Props(Echo())) val sink = Sink.actorRef(echo, "COMPLETE") val sourceData = new CollectionDataSource( List( "this is a good start", "this is a good time", "time to start", "congratulations", "green plant", "blue sky") ) val source = GearSource.from[String](sourceData) source.mapConcat({line => line.split(" ").toList }).groupBy2(x => x) .map(word => (word, 1)) .reduce({(a, b) => (a._1, a._2 + b._2) }) .log("word-count") .runWith(sink) Await.result(system.whenTerminated, 60.minutes) } case class Echo() extends Actor { def receive: Receive = { case any: AnyRef => println("Confirm received: " + any) } } // scalastyle:on println }
Example 113
Source File: Test2.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.akkastream.example import akka.actor.{Actor, ActorSystem, Props} import akka.stream.scaladsl._ import akka.stream.{ActorMaterializer, ClosedShape} import org.apache.gearpump.akkastream.GearpumpMaterializer import org.apache.gearpump.akkastream.scaladsl.{GearSink, GearSource} import org.apache.gearpump.cluster.main.ArgumentsParser import org.apache.gearpump.util.AkkaApp import scala.concurrent.Await import scala.concurrent.duration._ object Test2 extends AkkaApp with ArgumentsParser { // scalastyle:off println override def main(akkaConf: Config, args: Array[String]): Unit = { val config = parse(args) implicit val system = ActorSystem("Test2", akkaConf) val gearpumpMaterializer = GearpumpMaterializer() val echo = system.actorOf(Props(new Echo())) val source = GearSource.bridge[String, String] val sink = GearSink.bridge[String, String] val flow = Flow[String].filter(_.startsWith("red")).map("I want to order item: " + _) val (entry, exit) = flow.runWith(source, sink)(gearpumpMaterializer) val actorMaterializer = ActorMaterializer() val externalSource = Source( List("red hat", "yellow sweater", "blue jack", "red apple", "green plant", "blue sky") ) val externalSink = Sink.actorRef(echo, "COMPLETE") RunnableGraph.fromGraph( GraphDSL.create() { implicit b => import GraphDSL.Implicits._ externalSource ~> Sink.fromSubscriber(entry) Source.fromPublisher(exit) ~> externalSink ClosedShape } ).run()(actorMaterializer) Await.result(system.whenTerminated, 60.minutes) } class Echo extends Actor { def receive: Receive = { case any: AnyRef => println("Confirm received: " + any) } } // scalastyle:on println }
Example 114
Source File: Test3.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.akkastream.example import akka.actor.{Actor, ActorSystem, Props} import akka.stream.{ActorMaterializer, ActorMaterializerSettings} import org.apache.gearpump.akkastream.GearpumpMaterializer import org.apache.gearpump.akkastream.scaladsl.GearSource import akka.stream.scaladsl.Sink import org.apache.gearpump.cluster.main.{ArgumentsParser, CLIOption} import org.apache.gearpump.streaming.dsl.scalaapi.CollectionDataSource import org.apache.gearpump.util.AkkaApp import scala.concurrent.Await import scala.concurrent.duration._ object Test3 extends AkkaApp with ArgumentsParser { // scalastyle:off println override val options: Array[(String, CLIOption[Any])] = Array( "gearpump" -> CLIOption[Boolean]("<boolean>", required = false, defaultValue = Some(false)) ) override def main(akkaConf: Config, args: Array[String]): Unit = { val config = parse(args) implicit val system = ActorSystem("Test3", akkaConf) implicit val materializer: ActorMaterializer = config.getBoolean("gearpump") match { case true => GearpumpMaterializer() case false => ActorMaterializer( ActorMaterializerSettings(system).withAutoFusing(false) ) } val echo = system.actorOf(Props(new Echo())) val sink = Sink.actorRef(echo, "COMPLETE") val sourceData = new CollectionDataSource( List("red hat", "yellow sweater", "blue jack", "red apple", "green plant", "blue sky")) val source = GearSource.from[String](sourceData) source.filter(_.startsWith("red")).map("I want to order item: " + _).runWith(sink) Await.result(system.whenTerminated, 60.minutes) } class Echo extends Actor { def receive: Receive = { case any: AnyRef => println("Confirm received: " + any) } } // scalastyle:on println }
Example 115
Source File: Test9.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.akkastream.example import akka.NotUsed import akka.actor.{Actor, ActorSystem, Props} import akka.stream.{ActorMaterializer, ActorMaterializerSettings, ClosedShape} import akka.stream.scaladsl._ import org.apache.gearpump.akkastream.GearpumpMaterializer import org.apache.gearpump.cluster.main.{ArgumentsParser, CLIOption} import org.apache.gearpump.util.AkkaApp import scala.concurrent.Await import scala.concurrent.duration._ object Test9 extends AkkaApp with ArgumentsParser { // scalastyle:off println override val options: Array[(String, CLIOption[Any])] = Array( "gearpump" -> CLIOption[Boolean]("<boolean>", required = false, defaultValue = Some(false)) ) override def main(akkaConf: Config, args: Array[String]): Unit = { val config = parse(args) implicit val system = ActorSystem("Test9", akkaConf) implicit val materializer: ActorMaterializer = config.getBoolean("gearpump") match { case true => GearpumpMaterializer() case false => ActorMaterializer( ActorMaterializerSettings(system).withAutoFusing(false) ) } implicit val ec = system.dispatcher val sinkActor = system.actorOf(Props(new SinkActor())) val source = Source((1 to 5)) val sink = Sink.actorRef(sinkActor, "COMPLETE") val flowA: Flow[Int, Int, NotUsed] = Flow[Int].map { x => println(s"processing broadcasted element : $x in flowA"); x } val flowB: Flow[Int, Int, NotUsed] = Flow[Int].map { x => println(s"processing broadcasted element : $x in flowB"); x } val graph = RunnableGraph.fromGraph(GraphDSL.create() { implicit b => import GraphDSL.Implicits._ val broadcast = b.add(Broadcast[Int](2)) val merge = b.add(Merge[Int](2)) source ~> broadcast broadcast ~> flowA ~> merge broadcast ~> flowB ~> merge merge ~> sink ClosedShape }) graph.run() Await.result(system.whenTerminated, 60.minutes) } class SinkActor extends Actor { def receive: Receive = { case any: AnyRef => println("Confirm received: " + any) } } // scalastyle:on println }
Example 116
Source File: Test8.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.akkastream.example import akka.NotUsed import akka.actor.ActorSystem import akka.stream.{ActorMaterializer, ActorMaterializerSettings} import akka.stream.scaladsl._ import org.apache.gearpump.akkastream.GearpumpMaterializer import org.apache.gearpump.cluster.main.{ArgumentsParser, CLIOption} import org.apache.gearpump.util.AkkaApp import scala.concurrent.{Await, Future} import scala.concurrent.duration._ object Test8 extends AkkaApp with ArgumentsParser { // scalastyle:off println override val options: Array[(String, CLIOption[Any])] = Array( "gearpump" -> CLIOption[Boolean]("<boolean>", required = false, defaultValue = Some(false)) ) override def main(akkaConf: Config, args: Array[String]): Unit = { val config = parse(args) implicit val system = ActorSystem("Test8", akkaConf) implicit val materializer: ActorMaterializer = config.getBoolean("gearpump") match { case true => GearpumpMaterializer() case false => ActorMaterializer( ActorMaterializerSettings(system).withAutoFusing(false) ) } implicit val ec = system.dispatcher // Source gives 1 to 100 elements val source: Source[Int, NotUsed] = Source(Stream.from(1).take(100)) val sink: Sink[Int, Future[Int]] = Sink.fold[Int, Int](0)(_ + _) val result: Future[Int] = source.runWith(sink) result.map(sum => { println(s"Sum of stream elements => $sum") }) Await.result(system.whenTerminated, 60.minutes) } // scalastyle:on println }
Example 117
Source File: OrderServiceApp.scala From 006877 with MIT License | 5 votes |
package aia.integration import scala.concurrent.Future import akka.actor.{ ActorSystem , Actor, Props } import akka.event.Logging import akka.util.Timeout import akka.http.scaladsl.Http import akka.http.scaladsl.Http.ServerBinding import akka.http.scaladsl.server.Directives._ import akka.stream.ActorMaterializer import com.typesafe.config.{ Config, ConfigFactory } object OrderServiceApp extends App with RequestTimeout { val config = ConfigFactory.load() val host = config.getString("http.host") val port = config.getInt("http.port") implicit val system = ActorSystem() implicit val ec = system.dispatcher val processOrders = system.actorOf( Props(new ProcessOrders), "process-orders" ) val api = new OrderServiceApi(system, requestTimeout(config), processOrders).routes implicit val materializer = ActorMaterializer() val bindingFuture: Future[ServerBinding] = Http().bindAndHandle(api, host, port) val log = Logging(system.eventStream, "order-service") bindingFuture.map { serverBinding => log.info(s"Bound to ${serverBinding.localAddress} ") }.failed.foreach { case ex: Exception => log.error(ex, "Failed to bind to {}:{}!", host, port) system.terminate() } } trait RequestTimeout { import scala.concurrent.duration._ def requestTimeout(config: Config): Timeout = { val t = config.getString("akka.http.server.request-timeout") val d = Duration(t) FiniteDuration(d.length, d.unit) } }
Example 118
Source File: PaymentHistory.scala From 006877 with MIT License | 5 votes |
package aia.persistence import akka.actor._ import akka.persistence.query.PersistenceQuery import akka.persistence.query.journal.leveldb.scaladsl.LeveldbReadJournal import akka.stream.ActorMaterializer import akka.stream.scaladsl.Sink object PaymentHistory { def props(shopperId: Long) = Props(new PaymentHistory(shopperId)) def name(shopperId: Long) = s"payment_history_${shopperId}" case object GetHistory case class History(items: List[Item] = Nil) { def paid(paidItems: List[Item]) = { History(paidItems ++ items) } } } class PaymentHistory(shopperId: Long) extends Actor with ActorLogging { import PaymentHistory._ val queries = PersistenceQuery(context.system).readJournalFor[LeveldbReadJournal]( LeveldbReadJournal.Identifier) implicit val materializer = ActorMaterializer() queries.eventsByPersistenceId(Wallet.name(shopperId)).runWith(Sink.actorRef(self, None)) var history = History() def receive = { case Wallet.Paid(items, _) => history = history.paid(items) case GetHistory => sender() ! history } }
Example 119
Source File: ShoppersServiceSupport.scala From 006877 with MIT License | 5 votes |
package aia.persistence.rest import com.typesafe.config.Config import scala.concurrent.Future import akka.actor._ import akka.event.Logging import akka.util.Timeout import akka.http.scaladsl.Http import akka.http.scaladsl.Http.ServerBinding import akka.stream.ActorMaterializer import aia.persistence._ trait ShoppersServiceSupport extends RequestTimeout { def startService(shoppers: ActorRef)(implicit system: ActorSystem) = { val config = system.settings.config val settings = Settings(system) val host = settings.http.host val port = settings.http.port implicit val ec = system.dispatcher //bindAndHandle requires an implicit ExecutionContext val api = new ShoppersService(shoppers, system, requestTimeout(config)).routes // the RestApi provides a Route implicit val materializer = ActorMaterializer() val bindingFuture: Future[ServerBinding] = Http().bindAndHandle(api, host, port) val log = Logging(system.eventStream, "shoppers") bindingFuture.map { serverBinding => log.info(s"Shoppers API bound to ${serverBinding.localAddress} ") }.failed.foreach { case ex: Exception => log.error(ex, "Failed to bind to {}:{}!", host, port) system.terminate() } } } trait RequestTimeout { import scala.concurrent.duration._ def requestTimeout(config: Config): Timeout = { val t = config.getString("akka.http.server.request-timeout") val d = Duration(t) FiniteDuration(d.length, d.unit) } }
Example 120
Source File: CalculatorHistory.scala From 006877 with MIT License | 5 votes |
package aia.persistence.calculator import akka.actor._ import akka.persistence.query.PersistenceQuery import akka.persistence.query.journal.leveldb.scaladsl.LeveldbReadJournal import akka.stream.ActorMaterializer import akka.stream.scaladsl.Sink object CalculatorHistory { def props = Props(new CalculatorHistory) def name = "calculator-history" case object GetHistory case class History(added: Int = 0, subtracted: Int = 0, divided: Int = 0, multiplied: Int = 0) { def incrementAdded = copy(added = added + 1) def incrementSubtracted= copy(subtracted = subtracted + 1) def incrementDivided = copy(divided = divided + 1) def incrementMultiplied = copy(multiplied = multiplied + 1) } } class CalculatorHistory extends Actor { import Calculator._ import CalculatorHistory._ val queries = PersistenceQuery(context.system).readJournalFor[LeveldbReadJournal]( LeveldbReadJournal.Identifier) implicit val materializer = ActorMaterializer() queries.eventsByPersistenceId(Calculator.name).runWith(Sink.actorRef(self, None)) var history = History() def receive = { case _ : Added => history = history.incrementAdded case _ : Subtracted => history = history.incrementSubtracted case _ : Divided => history = history.incrementDivided case _ : Multiplied => history = history.incrementMultiplied case GetHistory => sender() ! history } }
Example 121
Source File: Main.scala From 006877 with MIT License | 5 votes |
package com.goticks import scala.concurrent.Future import scala.util.{Failure, Success} import akka.actor.{ ActorSystem , Actor, Props } import akka.event.Logging import akka.util.Timeout import akka.http.scaladsl.Http import akka.http.scaladsl.Http.ServerBinding import akka.http.scaladsl.server.Directives._ import akka.stream.ActorMaterializer import com.typesafe.config.{ Config, ConfigFactory } object Main extends App with RequestTimeout { val config = ConfigFactory.load() val host = config.getString("http.host") // 설정으로부터 호스트와 포트를 가져온다 val port = config.getInt("http.port") implicit val system = ActorSystem() implicit val ec = system.dispatcher // bindAndHandle은 비동기적이며, ExecutionContext를 암시적으로 사용해야 한다 val api = new RestApi(system, requestTimeout(config)).routes // RestApi는 HTTP 루트를 제공한다 implicit val materializer = ActorMaterializer() val bindingFuture: Future[ServerBinding] = Http().bindAndHandle(api, host, port) // RestApi 루트를 가지고 HTTP 서버를 시작한다 val log = Logging(system.eventStream, "go-ticks") bindingFuture.map { serverBinding => log.info(s"RestApi bound to ${serverBinding.localAddress} ") }.onComplete { case Success(v) => case Failure(ex) => log.error(ex, "Failed to bind to {}:{}!", host, port) system.terminate() } } trait RequestTimeout { import scala.concurrent.duration._ def requestTimeout(config: Config): Timeout = { val t = config.getString("akka.http.server.request-timeout") val d = Duration(t) FiniteDuration(d.length, d.unit) } }
Example 122
Source File: Startup.scala From 006877 with MIT License | 5 votes |
package com.goticks import scala.concurrent.Future import scala.util.{Failure, Success} import akka.actor.ActorSystem import akka.event.Logging import akka.http.scaladsl.Http import akka.http.scaladsl.Http.ServerBinding import akka.http.scaladsl.server.Route import akka.stream.ActorMaterializer trait Startup extends RequestTimeout { def startup(api: Route)(implicit system: ActorSystem) = { val host = system.settings.config.getString("http.host") // 설정에서 호스트와 포트를 가져온다 val port = system.settings.config.getInt("http.port") startHttpServer(api, host, port) } def startHttpServer(api: Route, host: String, port: Int) (implicit system: ActorSystem) = { implicit val ec = system.dispatcher // bindAndHandle에는 암시적인 ExecutionContext가 필요하다 implicit val materializer = ActorMaterializer() val bindingFuture: Future[ServerBinding] = Http().bindAndHandle(api, host, port) // HTTP 서버를 시작한다 val log = Logging(system.eventStream, "go-ticks") bindingFuture.map { serverBinding => log.info(s"RestApi bound to ${serverBinding.localAddress} ") }.onComplete { case Success(v) => case Failure(ex) => log.error(ex, "Failed to bind to {}:{}!", host, port) system.terminate() } } }
Example 123
Source File: ResumingEventFilter.scala From 006877 with MIT License | 5 votes |
package aia.stream import java.nio.file.{ Path, Paths } import java.nio.file.StandardOpenOption import java.nio.file.StandardOpenOption._ import scala.concurrent.Future import akka.NotUsed import akka.actor.ActorSystem import akka.stream.{ ActorMaterializer, IOResult } import akka.util.ByteString import spray.json._ import com.typesafe.config.{ Config, ConfigFactory } object ResumingEventFilter extends App with EventMarshalling { val config = ConfigFactory.load() val maxLine = config.getInt("log-stream-processor.max-line") if(args.length != 3) { System.err.println("Provide args: input-file output-file state") System.exit(1) } val inputFile = FileArg.shellExpanded(args(0)) val outputFile = FileArg.shellExpanded(args(1)) val filterState = args(2) match { case State(state) => state case unknown => System.err.println(s"Unknown state $unknown, exiting.") System.exit(1) } import akka.stream.scaladsl._ val source: Source[ByteString, Future[IOResult]] = FileIO.fromPath(inputFile) val sink: Sink[ByteString, Future[IOResult]] = FileIO.toPath(outputFile, Set(CREATE, WRITE, APPEND)) val frame: Flow[ByteString, String, NotUsed] = Framing.delimiter(ByteString("\n"), maxLine) .map(_.decodeString("UTF8")) import akka.stream.ActorAttributes import akka.stream.Supervision import LogStreamProcessor.LogParseException val decider : Supervision.Decider = { case _: LogParseException => Supervision.Resume case _ => Supervision.Stop } val parse: Flow[String, Event, NotUsed] = Flow[String].map(LogStreamProcessor.parseLineEx) .collect { case Some(e) => e } .withAttributes(ActorAttributes.supervisionStrategy(decider)) val filter: Flow[Event, Event, NotUsed] = Flow[Event].filter(_.state == filterState) val serialize: Flow[Event, ByteString, NotUsed] = Flow[Event].map(event => ByteString(event.toJson.compactPrint)) implicit val system = ActorSystem() implicit val ec = system.dispatcher val graphDecider : Supervision.Decider = { case _: LogParseException => Supervision.Resume case _ => Supervision.Stop } import akka.stream.ActorMaterializerSettings implicit val materializer = ActorMaterializer( ActorMaterializerSettings(system) .withSupervisionStrategy(graphDecider) ) val composedFlow: Flow[ByteString, ByteString, NotUsed] = frame.via(parse) .via(filter) .via(serialize) val runnableGraph: RunnableGraph[Future[IOResult]] = source.via(composedFlow).toMat(sink)(Keep.right) runnableGraph.run().foreach { result => println(s"Wrote ${result.count} bytes to '$outputFile'.") system.terminate() } }
Example 124
Source File: BidiEventFilter.scala From 006877 with MIT License | 5 votes |
package aia.stream import java.nio.file.{ Path, Paths } import java.nio.file.StandardOpenOption import java.nio.file.StandardOpenOption._ import scala.concurrent.Future import akka.NotUsed import akka.actor.ActorSystem import akka.stream.{ ActorMaterializer, IOResult } import akka.stream.scaladsl._ import akka.stream.scaladsl.JsonFraming import akka.util.ByteString import spray.json._ import com.typesafe.config.{ Config, ConfigFactory } object BidiEventFilter extends App with EventMarshalling { val config = ConfigFactory.load() val maxLine = config.getInt("log-stream-processor.max-line") val maxJsonObject = config.getInt("log-stream-processor.max-json-object") if(args.length != 5) { System.err.println("Provide args: input-format output-format input-file output-file state") System.exit(1) } val inputFile = FileArg.shellExpanded(args(2)) val outputFile = FileArg.shellExpanded(args(3)) val filterState = args(4) match { case State(state) => state case unknown => System.err.println(s"Unknown state $unknown, exiting.") System.exit(1) } val inFlow: Flow[ByteString, Event, NotUsed] = if(args(0).toLowerCase == "json") { JsonFraming.objectScanner(maxJsonObject) .map(_.decodeString("UTF8").parseJson.convertTo[Event]) } else { Framing.delimiter(ByteString("\n"), maxLine) .map(_.decodeString("UTF8")) .map(LogStreamProcessor.parseLineEx) .collect { case Some(event) => event } } val outFlow: Flow[Event, ByteString, NotUsed] = if(args(1).toLowerCase == "json") { Flow[Event].map(event => ByteString(event.toJson.compactPrint)) } else { Flow[Event].map{ event => ByteString(LogStreamProcessor.logLine(event)) } } val bidiFlow = BidiFlow.fromFlows(inFlow, outFlow) val source: Source[ByteString, Future[IOResult]] = FileIO.fromPath(inputFile) val sink: Sink[ByteString, Future[IOResult]] = FileIO.toPath(outputFile, Set(CREATE, WRITE, APPEND)) val filter: Flow[Event, Event, NotUsed] = Flow[Event].filter(_.state == filterState) val flow = bidiFlow.join(filter) val runnableGraph: RunnableGraph[Future[IOResult]] = source.via(flow).toMat(sink)(Keep.right) implicit val system = ActorSystem() implicit val ec = system.dispatcher implicit val materializer = ActorMaterializer() runnableGraph.run().foreach { result => println(s"Wrote ${result.count} bytes to '$outputFile'.") system.terminate() } }
Example 125
Source File: GenerateLogFile.scala From 006877 with MIT License | 5 votes |
package aia.stream import java.nio.file.{ Path, Paths } import java.nio.file.StandardOpenOption import java.nio.file.StandardOpenOption._ import java.time.ZonedDateTime import java.time.format.DateTimeFormatter import scala.concurrent.Future import akka.actor.ActorSystem import akka.stream.{ ActorMaterializer, IOResult } import akka.stream.scaladsl._ import akka.util.ByteString object GenerateLogFile extends App { val filePath = args(0) val numberOfLines = args(1).toInt val rnd = new java.util.Random() val sink = FileIO.toPath(FileArg.shellExpanded(filePath), Set(CREATE, WRITE, APPEND)) def line(i: Int) = { val host = "my-host" val service = "my-service" val time = ZonedDateTime.now.format(DateTimeFormatter.ISO_INSTANT) val state = if( i % 10 == 0) "warning" else if(i % 101 == 0) "error" else if(i % 1002 == 0) "critical" else "ok" val description = "Some description of what has happened." val tag = "tag" val metric = rnd.nextDouble() * 100 s"$host | $service | $state | $time | $description | $tag | $metric \n" } val graph = Source.fromIterator{() => Iterator.tabulate(numberOfLines)(line) }.map(l=> ByteString(l)).toMat(sink)(Keep.right) implicit val system = ActorSystem() implicit val ec = system.dispatcher implicit val materializer = ActorMaterializer() graph.run().foreach { result => println(s"Wrote ${result.count} bytes to '$filePath'.") system.terminate() } }
Example 126
Source File: FanLogsApp.scala From 006877 with MIT License | 5 votes |
package aia.stream import java.nio.file.{ Files, FileSystems, Path } import scala.concurrent.Future import scala.concurrent.duration._ import akka.NotUsed import akka.actor.{ ActorSystem , Actor, Props } import akka.event.Logging import akka.stream.{ ActorMaterializer, ActorMaterializerSettings, Supervision } import akka.http.scaladsl.Http import akka.http.scaladsl.Http.ServerBinding import akka.http.scaladsl.server.Directives._ import com.typesafe.config.{ Config, ConfigFactory } object FanLogsApp extends App { val config = ConfigFactory.load() val host = config.getString("http.host") val port = config.getInt("http.port") val logsDir = { val dir = config.getString("log-stream-processor.logs-dir") Files.createDirectories(FileSystems.getDefault.getPath(dir)) } val maxLine = config.getInt("log-stream-processor.max-line") val maxJsObject = config.getInt("log-stream-processor.max-json-object") implicit val system = ActorSystem() implicit val ec = system.dispatcher val decider : Supervision.Decider = { case _: LogStreamProcessor.LogParseException => Supervision.Resume case _ => Supervision.Stop } implicit val materializer = ActorMaterializer( ActorMaterializerSettings(system) .withSupervisionStrategy(decider) ) val api = new FanLogsApi(logsDir, maxLine, maxJsObject).routes val bindingFuture: Future[ServerBinding] = Http().bindAndHandle(api, host, port) val log = Logging(system.eventStream, "fan-logs") bindingFuture.map { serverBinding => log.info(s"Bound to ${serverBinding.localAddress} ") }.onFailure { case ex: Exception => log.error(ex, "Failed to bind to {}:{}!", host, port) system.terminate() } }
Example 127
Source File: LogsApp.scala From 006877 with MIT License | 5 votes |
package aia.stream import java.nio.file.{ Files, FileSystems, Path } import scala.concurrent.Future import scala.concurrent.duration._ import akka.NotUsed import akka.actor.{ ActorSystem , Actor, Props } import akka.event.Logging import akka.stream.{ ActorMaterializer, ActorMaterializerSettings, Supervision } import akka.http.scaladsl.Http import akka.http.scaladsl.Http.ServerBinding import akka.http.scaladsl.server.Directives._ import com.typesafe.config.{ Config, ConfigFactory } object LogsApp extends App { val config = ConfigFactory.load() val host = config.getString("http.host") val port = config.getInt("http.port") val logsDir = { val dir = config.getString("log-stream-processor.logs-dir") Files.createDirectories(FileSystems.getDefault.getPath(dir)) } val maxLine = config.getInt("log-stream-processor.max-line") implicit val system = ActorSystem() implicit val ec = system.dispatcher val decider : Supervision.Decider = { case _: LogStreamProcessor.LogParseException => Supervision.Stop case _ => Supervision.Stop } implicit val materializer = ActorMaterializer( ActorMaterializerSettings(system) .withSupervisionStrategy(decider) ) val api = new LogsApi(logsDir, maxLine).routes val bindingFuture: Future[ServerBinding] = Http().bindAndHandle(api, host, port) val log = Logging(system.eventStream, "logs") bindingFuture.map { serverBinding => log.info(s"Bound to ${serverBinding.localAddress} ") }.onFailure { case ex: Exception => log.error(ex, "Failed to bind to {}:{}!", host, port) system.terminate() } }
Example 128
Source File: LogStreamProcessorApp.scala From 006877 with MIT License | 5 votes |
package aia.stream import java.nio.file.{ Files, FileSystems, Path } import scala.concurrent.Future import scala.concurrent.duration._ import akka.NotUsed import akka.actor.{ ActorSystem , Actor, Props } import akka.event.Logging import akka.stream.{ ActorMaterializer, ActorMaterializerSettings, Supervision } import akka.http.scaladsl.Http import akka.http.scaladsl.Http.ServerBinding import akka.http.scaladsl.server.Directives._ import com.typesafe.config.{ Config, ConfigFactory } object LogStreamProcessorApp extends App { val config = ConfigFactory.load() val host = config.getString("http.host") val port = config.getInt("http.port") val logsDir = { val dir = config.getString("log-stream-processor.logs-dir") Files.createDirectories(FileSystems.getDefault.getPath(dir)) } val notificationsDir = { val dir = config.getString("log-stream-processor.notifications-dir") Files.createDirectories(FileSystems.getDefault.getPath(dir)) } val metricsDir = { val dir = config.getString("log-stream-processor.metrics-dir") Files.createDirectories(FileSystems.getDefault.getPath(dir)) } val maxLine = config.getInt("log-stream-processor.max-line") val maxJsObject = config.getInt("log-stream-processor.max-json-object") implicit val system = ActorSystem() implicit val ec = system.dispatcher val decider : Supervision.Decider = { case _: LogStreamProcessor.LogParseException => Supervision.Resume case _ => Supervision.Stop } implicit val materializer = ActorMaterializer( ActorMaterializerSettings(system) .withSupervisionStrategy(decider) ) val api = new LogStreamProcessorApi(logsDir, notificationsDir, metricsDir, maxLine, maxJsObject).routes val bindingFuture: Future[ServerBinding] = Http().bindAndHandle(api, host, port) val log = Logging(system.eventStream, "processor") bindingFuture.map { serverBinding => log.info(s"Bound to ${serverBinding.localAddress} ") }.onFailure { case ex: Exception => log.error(ex, "Failed to bind to {}:{}!", host, port) system.terminate() } }
Example 129
Source File: ContentNegLogsApp.scala From 006877 with MIT License | 5 votes |
package aia.stream import java.nio.file.{ Files, FileSystems, Path } import scala.concurrent.Future import scala.concurrent.duration._ import akka.NotUsed import akka.actor.{ ActorSystem , Actor, Props } import akka.event.Logging import akka.stream.{ ActorMaterializer, ActorMaterializerSettings, Supervision } import akka.http.scaladsl.Http import akka.http.scaladsl.Http.ServerBinding import akka.http.scaladsl.server.Directives._ import com.typesafe.config.{ Config, ConfigFactory } object ContentNegLogsApp extends App { val config = ConfigFactory.load() val host = config.getString("http.host") val port = config.getInt("http.port") val logsDir = { val dir = config.getString("log-stream-processor.logs-dir") Files.createDirectories(FileSystems.getDefault.getPath(dir)) } val maxLine = config.getInt("log-stream-processor.max-line") val maxJsObject = config.getInt("log-stream-processor.max-json-object") implicit val system = ActorSystem() implicit val ec = system.dispatcher val decider : Supervision.Decider = { case _: LogStreamProcessor.LogParseException => Supervision.Stop case _ => Supervision.Stop } implicit val materializer = ActorMaterializer( ActorMaterializerSettings(system) .withSupervisionStrategy(decider) ) val api = new ContentNegLogsApi(logsDir, maxLine, maxJsObject).routes val bindingFuture: Future[ServerBinding] = Http().bindAndHandle(api, host, port) val log = Logging(system.eventStream, "content-neg-logs") bindingFuture.map { serverBinding => log.info(s"Bound to ${serverBinding.localAddress} ") }.onFailure { case ex: Exception => log.error(ex, "Failed to bind to {}:{}!", host, port) system.terminate() } }
Example 130
Source File: LogJson.scala From 006877 with MIT License | 5 votes |
package aia.stream import java.nio.file.{ Files, Path } import java.io.File import java.time.ZonedDateTime import scala.concurrent.duration._ import scala.concurrent.ExecutionContext import scala.concurrent.Future import scala.util.{ Success, Failure } import akka.Done import akka.actor._ import akka.util.ByteString import akka.stream.{ ActorAttributes, ActorMaterializer, IOResult } import akka.stream.scaladsl.JsonFraming import akka.stream.scaladsl.{ FileIO, BidiFlow, Flow, Framing, Keep, Sink, Source } import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._ import akka.http.scaladsl.marshalling.Marshal import akka.http.scaladsl.model._ import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server._ import spray.json._ object LogJson extends EventMarshalling with NotificationMarshalling with MetricMarshalling { def textInFlow(maxLine: Int) = { Framing.delimiter(ByteString("\n"), maxLine) .map(_.decodeString("UTF8")) .map(LogStreamProcessor.parseLineEx) .collect { case Some(e) => e } } def jsonInFlow(maxJsonObject: Int) = { JsonFraming.objectScanner(maxJsonObject) .map(_.decodeString("UTF8").parseJson.convertTo[Event]) } def jsonFramed(maxJsonObject: Int) = JsonFraming.objectScanner(maxJsonObject) val jsonOutFlow = Flow[Event].map { event => ByteString(event.toJson.compactPrint) } val notifyOutFlow = Flow[Summary].map { ws => ByteString(ws.toJson.compactPrint) } val metricOutFlow = Flow[Metric].map { m => ByteString(m.toJson.compactPrint) } val textOutFlow = Flow[Event].map{ event => ByteString(LogStreamProcessor.logLine(event)) } def logToJson(maxLine: Int) = { BidiFlow.fromFlows(textInFlow(maxLine), jsonOutFlow) } def jsonToLog(maxJsonObject: Int) = { BidiFlow.fromFlows(jsonInFlow(maxJsonObject), textOutFlow) } def logToJsonFlow(maxLine: Int) = { logToJson(maxLine).join(Flow[Event]) } def jsonToLogFlow(maxJsonObject: Int) = { jsonToLog(maxJsonObject).join(Flow[Event]) } }
Example 131
Source File: RestService.scala From introduction-to-akkahttp with Apache License 2.0 | 5 votes |
package com.shashank.akkahttp.project import java.util.concurrent.ConcurrentHashMap import akka.actor.ActorSystem import akka.http.scaladsl.server.Directives._ import akka.stream.ActorMaterializer import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._ import com.shashank.akkahttp.project.Models.{LoadRequest, ServiceJsonProtoocol} import spray.json.JsArray import scala.collection.JavaConverters._ import spray.json.{DefaultJsonProtocol, JsArray, pimpAny} import spray.json.DefaultJsonProtocol._ import org.apache.spark.sql.SparkSession import org.apache.spark.sql._ trait RestService { implicit val system: ActorSystem implicit val materializer: ActorMaterializer implicit val sparkSession: SparkSession val datasetMap = new ConcurrentHashMap[String, Dataset[Row]]() import ServiceJsonProtoocol._ val route = pathSingleSlash { get { complete { "welcome to rest service" } } } ~ path("load") { post { entity(as[LoadRequest]) { loadRequest => complete { val id = "" + System.nanoTime() val dataset = sparkSession.read.format("csv") .option("header", "true") .load(loadRequest.path) datasetMap.put(id, dataset) id } } } } ~ path("view" / """[\w[0-9]-_]+""".r) { id => get { complete { val dataset = datasetMap.get(id) dataset.take(10).map(row => row.toString()) } } } }
Example 132
Source File: RestServer.scala From introduction-to-akkahttp with Apache License 2.0 | 5 votes |
package com.shashank.akkahttp.project import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.stream.ActorMaterializer import org.apache.spark.sql.SparkSession class RestServer(implicit val system:ActorSystem, implicit val materializer:ActorMaterializer,implicit val sparkSession:SparkSession) extends RestService{ def startServer(address:String, port:Int) = { Http().bindAndHandle(route,address,port) } } object RestServer { def main(args: Array[String]) { implicit val actorSystem = ActorSystem("rest-server") implicit val materializer = ActorMaterializer() implicit val sparkSession:SparkSession = SparkSession.builder().master("local"). appName("Rest Server context").getOrCreate() val server = new RestServer() server.startServer("localhost",8080) println("running server at localhost 8080") } }
Example 133
Source File: TestKit.scala From introduction-to-akkahttp with Apache License 2.0 | 5 votes |
package com.shashank.akkahttp.basic.routing import akka.actor.ActorSystem import akka.http.scaladsl.model.HttpMethods._ import akka.http.scaladsl.model.HttpRequest import akka.http.scaladsl.server.Directives._ import akka.stream.{ActorMaterializer, Materializer} import org.scalatest.{ Matchers, WordSpec } import akka.http.scaladsl.testkit.ScalatestRouteTest object TestKit extends WordSpec with Matchers with ScalatestRouteTest { def main(args: Array[String]) { val route = path("welcome"){ get{ complete { "welcome to rest service" } } } ~ path("demo"){ get{ complete { "welcome to demonstration" } } } val getRequest = HttpRequest(GET, "/welcome") getRequest ~> route ~> check { status.intValue shouldEqual 200 entityAs[String] shouldEqual "welcome to rest service" } system.terminate() } }
Example 134
Source File: RoutingDSL.scala From introduction-to-akkahttp with Apache License 2.0 | 5 votes |
package com.shashank.akkahttp.basic.routing import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.model.{HttpResponse, StatusCodes} import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server._ import akka.stream.{ActorMaterializer, Materializer} object RoutingDSL { def main(args: Array[String]) { implicit val sys = ActorSystem("IntroductionToAkkaHttp") implicit val mat:Materializer = ActorMaterializer() val route = path("welcome"){ get{ complete { "welcome to rest service" } } } ~ path("demo"){ get{ complete { "welcome to demonstration" } } } Http().bindAndHandle(route, "localhost", 8090) } }
Example 135
Source File: Failure.scala From introduction-to-akkahttp with Apache License 2.0 | 5 votes |
package com.shashank.akkahttp.basic.routing import akka.actor.ActorSystem import akka.http.scaladsl.model.StatusCodes import akka.http.scaladsl.Http import akka.http.scaladsl.model.HttpResponse import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server.ExceptionHandler import akka.stream.{ActorMaterializer, Materializer} object Failure { def main(args: Array[String]) { implicit val sys = ActorSystem("IntroductionToAkkaHttp") implicit val mat:Materializer = ActorMaterializer() implicit def myExceptionHandler = ExceptionHandler { case _: ArithmeticException => complete(HttpResponse(StatusCodes.BadRequest, entity = "Bad numbers, bad result!!!")) case e: Throwable => { println(e.getMessage) println(e.getStackTraceString) complete(HttpResponse(StatusCodes.BadRequest, entity = e.getMessage)) } } val route = path("welcome"){ get{ complete { "welcome to rest service" } } } ~ path("demo"){ get { complete { 100/0 "welcome to demonstration" } } } Http().bindAndHandle(route, "localhost", 8090) } }
Example 136
Source File: UnMarshalling.scala From introduction-to-akkahttp with Apache License 2.0 | 5 votes |
package com.shashank.akkahttp.basic.routing import akka.actor.ActorSystem import akka.http.scaladsl.marshalling.Marshal import akka.http.scaladsl.model.{HttpMethods, HttpRequest, HttpResponse, MessageEntity} import akka.http.scaladsl.unmarshalling.Unmarshal import akka.stream.{ActorMaterializer, Materializer} import akka.util.ByteString import scala.concurrent.Await import scala.concurrent.duration._ import scala.concurrent.ExecutionContext.Implicits.global import spray.json._ object UnMarshalling { def main(args: Array[String]) { implicit val sys = ActorSystem("IntroductionToAkkaHttp") implicit val mat:Materializer = ActorMaterializer() //type FromStringUnmarshaller[T] = Unmarshaller[String, T] val intFuture = Unmarshal("42").to[Int] val int = Await.result(intFuture, 1.second) println("int unmarshalling "+int) //type FromStringUnmarshaller[T] = Unmarshaller[String, T] val boolFuture = Unmarshal("off").to[Boolean] val bool = Await.result(boolFuture, 1.second) println("off unmarshalling "+bool) //type ToEntityMarshaller[T] = Marshaller[T, MessageEntity] val string = "Yeah" val entityFuture = Marshal(string).to[MessageEntity] val entity = Await.result(entityFuture, 1.second) // don't block in non-test code! println(entity) //type ToResponseMarshaller[T] = Marshaller[T, HttpResponse] val errorMsg = "Not found, pal!" val responseFuture = Marshal(404 -> errorMsg).to[HttpResponse] val response = Await.result(responseFuture, 1.second) println(response) //type FromEntityUnmarshaller[T] = Unmarshaller[HttpEntity, T] val jsonByteString = ByteString("""{"name":"Hello"}""") val httpRequest = HttpRequest(HttpMethods.POST, entity = jsonByteString) val jsonDataUnmarshalledFuture = Unmarshal(httpRequest).to[String] val jsonDataUnmarshalled = Await.result(jsonDataUnmarshalledFuture, 1.second) println(jsonDataUnmarshalled) sys.terminate() } }
Example 137
Source File: Rejection.scala From introduction-to-akkahttp with Apache License 2.0 | 5 votes |
package com.shashank.akkahttp.basic.routing import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.model.{HttpResponse, StatusCodes} import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server._ import akka.stream.{ActorMaterializer, Materializer} object Rejection { def main(args: Array[String]) { implicit val sys = ActorSystem("IntroductionToAkkaHttp") implicit val mat:Materializer = ActorMaterializer() implicit def myRejectionHandler = RejectionHandler.newBuilder().handle{ case MissingCookieRejection(cookieName) => complete(HttpResponse(StatusCodes.BadRequest, entity = "No cookies, no service!!!")) }.handleNotFound { complete((StatusCodes.NotFound, "Not here!")) }.result() val route = path("welcome"){ get{ complete { "welcome to rest service" } } } ~ path("demo"){ get{ complete { "welcome to demonstration" } } } ~ path("wrong"){ reject{ ValidationRejection("Invalid path", None) } } Http().bindAndHandle(route, "localhost", 8090) } }
Example 138
Source File: ConnectionLevel.scala From introduction-to-akkahttp with Apache License 2.0 | 5 votes |
package com.shashank.akkahttp.basic.client import akka.actor.ActorSystem import akka.http.javadsl.settings.ClientConnectionSettings import akka.http.scaladsl.Http import akka.http.scaladsl.model._ import akka.stream.ActorMaterializer import akka.stream.scaladsl._ import scala.concurrent.{Await, Future} import scala.concurrent.duration._ object ConnectionLevel { def main(args: Array[String]) { implicit val sys = ActorSystem("IntroductionToAkkaHttp") implicit val mat = ActorMaterializer() val connectionFlow = Http().outgoingConnection("localhost", 8090) val responseFuture = Source.single(HttpRequest(uri = "/welcome")) .via(connectionFlow) .runWith(Sink.head) val response = Await.result(responseFuture, 10 seconds) response.entity.dataBytes.map(_.utf8String).runForeach(println) sys.terminate() } }
Example 139
Source File: ReverseProxy.scala From introduction-to-akkahttp with Apache License 2.0 | 5 votes |
package com.shashank.akkahttp.basic.serving import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.model._ import akka.http.scaladsl.model.headers.{Host, `Access-Control-Allow-Origin`} import akka.stream.scaladsl.Flow import akka.stream.{ActorMaterializer, Materializer} object ReverseProxy { def main(args: Array[String]) { implicit val sys = ActorSystem("IntroductionToAkkaHttp") implicit val mat:Materializer = ActorMaterializer() val redirectHost = "localhost" val redirectPort = 8090 val requestFlow = Flow.fromFunction[HttpRequest, HttpRequest]( request => { request .withUri(request.uri.withAuthority(redirectHost, redirectPort)) .mapHeaders(headers => headers.filterNot(_.lowercaseName() == Host.lowercaseName)) .addHeader(Host(redirectHost, redirectPort)) }) val outgoingConnection = Http().outgoingConnection(redirectHost, redirectPort) val responseFlow = Flow.fromFunction[HttpResponse, HttpResponse]( response => { response.withHeaders(`Access-Control-Allow-Origin`.*) }) Http().bindAndHandle(requestFlow via outgoingConnection via responseFlow, "localhost", 8080) } }
Example 140
Source File: FriendJournalReader.scala From Akka-Cookbook with MIT License | 5 votes |
package com.packt.chapter6 import akka.actor.ActorSystem import akka.persistence.Recovery import akka.persistence.query.PersistenceQuery import akka.persistence.query.journal.leveldb.scaladsl.LeveldbReadJournal import akka.stream.ActorMaterializer import akka.stream.scaladsl.Sink import scala.concurrent.duration._ object FriendJournalReader extends App { implicit val system = ActorSystem() import system.dispatcher implicit val mat = ActorMaterializer()(system) val queries = PersistenceQuery(system).readJournalFor[LeveldbReadJournal](LeveldbReadJournal.Identifier) val laura = system.actorOf(FriendActor.props("Laura", Recovery())) val maria = system.actorOf(FriendActor.props("Maria", Recovery())) laura ! AddFriend(Friend("Hector")) laura ! AddFriend(Friend("Nancy")) maria ! AddFriend(Friend("Oliver")) maria ! AddFriend(Friend("Steve")) system.scheduler.scheduleOnce(5 second, maria, AddFriend(Friend("Steve"))) system.scheduler.scheduleOnce(10 second, maria, RemoveFriend(Friend("Oliver"))) Thread.sleep(2000) queries.allPersistenceIds().map(id => system.log.info(s"Id received [$id]")).to(Sink.ignore).run() queries.eventsByPersistenceId("Laura").map(e => log(e.persistenceId, e.event)).to(Sink.ignore).run() queries.eventsByPersistenceId("Maria").map(e => log(e.persistenceId, e.event)).to(Sink.ignore).run() def log(id: String, evt: Any) = system.log.info(s"Id [$id] Event [$evt]") }
Example 141
Source File: EncodingDecodingClientApplication.scala From Akka-Cookbook with MIT License | 5 votes |
package com.packt.chapter9 import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.coding.{Encoder, Gzip, NoCoding} import akka.http.scaladsl.model._ import akka.http.scaladsl.model.headers._ import akka.http.scaladsl.model.headers.HttpEncodings._ import akka.http.scaladsl.model.HttpMethods._ import headers.HttpEncodings import akka.stream.ActorMaterializer import akka.util.ByteString import scala.concurrent.duration._ import scala.concurrent.Future import scala.util.{Failure, Success} object EncodingDecodingClientApplication extends App { implicit val system = ActorSystem() implicit val materializer = ActorMaterializer() import system.dispatcher val http = Http() val uriServer = "http://localhost:8088/" val requests = Seq ( HttpRequest(POST, uriServer, List(`Accept-Encoding`(gzip)), HttpEntity("Hello!")), HttpRequest(POST, uriServer, List(`Content-Encoding`(gzip), `Accept-Encoding`(gzip)), HttpEntity(compress("Hello compressed!", Gzip)) ) ) Future.traverse(requests)(http.singleRequest(_).map(decodeResponse)) andThen { case Success(responses) => responses.foreach(response => response.entity.toStrict(5 seconds).map(_.data.decodeString("UTF-8")).andThen { case Success(content) => println(s"Response: $content") case _ => }) case Failure(e) => println(s"request failed $e") } private def decodeResponse(response: HttpResponse) = { val decoder = response.encoding match { case HttpEncodings.gzip => Gzip case HttpEncodings.identity => NoCoding } decoder.decode(response) } private def compress(input: String, encoder: Encoder): ByteString = encoder.encode(ByteString(input)) }
Example 142
Source File: UploadingFileClient.scala From Akka-Cookbook with MIT License | 5 votes |
package com.packt.chapter9 import java.nio.file.Paths import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.model.HttpMethods._ import akka.http.scaladsl.model._ import akka.stream.ActorMaterializer import scala.concurrent.Future import scala.concurrent.duration._ import scala.util.{Failure, Success} object UploadingFileClient extends App { implicit val system = ActorSystem() implicit val materializer = ActorMaterializer() import system.dispatcher val http = Http() val entity = Multipart.FormData.fromPath( "file", ContentTypes.`text/plain(UTF-8)`, Paths.get("./src/main/resources/testfile.txt") ).toEntity() val uris = Seq( "http://localhost:8088/regularupload", "http://localhost:8088/streamupload" ) val requests = uris.map(uri => HttpRequest(POST, uri, Nil, entity)) Future.traverse(requests)(http.singleRequest(_)) andThen { case Success(responses) => responses.foreach(response => response.entity.toStrict(5 seconds).map(_.data.utf8String).andThen { case Success(content) => println(s"Response: $content") case _ => }) case Failure(e) => println(s"request failed $e") } }
Example 143
Source File: RequestLevelClientAPIApplication.scala From Akka-Cookbook with MIT License | 5 votes |
package com.packt.chapter9 import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.model.HttpRequest import akka.stream.ActorMaterializer import scala.concurrent.duration._ import scala.util.Success object RequestLevelClientAPIApplication extends App { implicit val system = ActorSystem() implicit val materializer = ActorMaterializer() implicit val executionContext = system.dispatcher val akkaToolkitRequest = HttpRequest(uri = "https://api.github.com/repos/akka/akka-http") val responseFuture = Http().singleRequest(akkaToolkitRequest) responseFuture.andThen { case Success(response) => response.entity.toStrict(5 seconds).map(_.data.decodeString("UTF-8")).andThen { case Success(json) => val pattern = """.*"open_issues":(.*?),.*""".r pattern.findAllIn(json).matchData foreach { m => println(s"There are ${m.group(1)} open issues in Akka Http.") materializer.shutdown() system.terminate() } case _ => } case _ => println(s"request failed") } }
Example 144
Source File: HostLevelClientAPIApplication.scala From Akka-Cookbook with MIT License | 5 votes |
package com.packt.chapter9 import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.model.HttpRequest import akka.stream.ActorMaterializer import akka.stream.scaladsl.{Sink, Source} import scala.concurrent.duration._ import scala.util.{Failure, Success} object HostLevelClientAPIApplication extends App { implicit val system = ActorSystem() implicit val materializer = ActorMaterializer() implicit val executionContext = system.dispatcher val poolClientFlow = Http().cachedHostConnectionPoolHttps[String]("api.github.com") val akkaToolkitRequest = HttpRequest(uri = "/repos/akka/akka-http") -> """.*"open_issues":(.*?),.*""" val responseFuture = Source.single(akkaToolkitRequest).via(poolClientFlow).runWith(Sink.head) responseFuture.andThen { case Success(result) => val (tryResponse, regex) = result tryResponse match { case Success(response) => response.entity.toStrict(5 seconds).map(_.data.decodeString("UTF-8")).andThen { case Success(json) => val pattern = regex.r pattern.findAllIn(json).matchData foreach { m => println(s"There are ${m.group(1)} open issues in Akka Http.") materializer.shutdown() system.terminate() } case _ => } case _ => println("request failed") } case _ => println("request failed") } }
Example 145
Source File: ConnectionLevelClientAPIApplication.scala From Akka-Cookbook with MIT License | 5 votes |
package com.packt.chapter9 import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.model.HttpRequest import akka.stream.ActorMaterializer import akka.stream.scaladsl.{Sink, Source} import scala.util.{Failure, Success} import scala.concurrent.duration._ object ConnectionLevelClientAPIApplication extends App { implicit val system = ActorSystem() implicit val materializer = ActorMaterializer() implicit val executionContext = system.dispatcher val connectionFlow = Http().outgoingConnectionHttps("api.github.com") val akkaToolkitRequest = HttpRequest(uri = "/repos/akka/akka-http") val responseFuture = Source.single(akkaToolkitRequest).via(connectionFlow).runWith(Sink.head) responseFuture.andThen { case Success(response) => response.entity.toStrict(5 seconds).map(_.data.decodeString("UTF-8")).andThen { case Success(json) => val pattern = """.*"open_issues":(.*?),.*""".r pattern.findAllIn(json).matchData foreach { m => println(s"There are ${m.group(1)} open issues in Akka Http.") materializer.shutdown() system.terminate() } case _ => } case _ => println("request failed") } }
Example 146
Source File: SimpleStreamsApplication.scala From Akka-Cookbook with MIT License | 5 votes |
package com.packt.chapter8 import akka.actor.ActorSystem import akka.stream.ActorMaterializer import akka.stream.scaladsl.{Sink, Source} object SimpleStreamsApplication extends App { implicit val actorSystem = ActorSystem("SimpleStream") implicit val actorMaterializer = ActorMaterializer() val fileList = List( "src/main/resources/testfile1.text", "src/main/resources/testfile2.txt", "src/main/resources/testfile3.txt") val stream = Source(fileList) .map(new java.io.File(_)) .filter(_.exists()) .filter(_.length() != 0) .to(Sink.foreach(f => println(s"Absolute path: ${f.getAbsolutePath}"))) stream.run() }
Example 147
Source File: ComposingStreamsApplication.scala From Akka-Cookbook with MIT License | 5 votes |
package com.packt.chapter8 import java.io.File import akka.actor.ActorSystem import akka.stream.ActorMaterializer import akka.stream.scaladsl.{Sink, Source} object ComposingStreamsApplication extends App { implicit val actorSystem = ActorSystem("SimpleStream") implicit val actorMaterializer = ActorMaterializer() val fileList = List( "src/main/resources/testfile1.text", "src/main/resources/testfile2.txt", "src/main/resources/testfile3.txt") val stream = Source(fileList) .map(new File(_)) .filter(_.exists()) .filter(_.length() != 0) .to(Sink.foreach(f => println(s"Absolute path: ${f.getAbsolutePath}"))) stream.run() }
Example 148
Source File: HandlingErrorsApplication.scala From Akka-Cookbook with MIT License | 5 votes |
package com.packt.chapter8 import akka.actor.ActorSystem import akka.stream.{ActorAttributes, ActorMaterializer, ActorMaterializerSettings, Supervision} import akka.stream.scaladsl._ object HandlingErrorsApplication extends App { implicit val actorSystem = ActorSystem("HandlingErrors") val streamDecider: Supervision.Decider = { case e: IndexOutOfBoundsException => println("Dropping element because of IndexOufOfBoundException. Resuming.") Supervision.Resume case _ => Supervision.Stop } val flowDecider: Supervision.Decider = { case e: IllegalArgumentException => println("Dropping element because of IllegalArgumentException. Restarting.") Supervision.Restart case _ => Supervision.Stop } val actorMaterializerSettings = ActorMaterializerSettings(actorSystem).withSupervisionStrategy(streamDecider) implicit val actorMaterializer = ActorMaterializer(actorMaterializerSettings) val words = List("Handling", "Errors", "In", "Akka", "Streams", "") val flow = Flow[String].map(word => { if(word.length == 0) throw new IllegalArgumentException("Empty words are not allowed") word }).withAttributes(ActorAttributes.supervisionStrategy(flowDecider)) Source(words).via(flow).map(array => array(2)).to(Sink.foreach(println)).run() }
Example 149
Source File: TransformingStreamsApplication.scala From Akka-Cookbook with MIT License | 5 votes |
package com.packt.chapter8 import java.nio.file.Paths import akka.actor.ActorSystem import akka.stream.ActorMaterializer import akka.stream.scaladsl._ object TransformingStreamsApplication extends App { implicit val actorSystem = ActorSystem("TransformingStream") implicit val actorMaterializer = ActorMaterializer() val MaxGroups = 1000 val path = Paths.get("src/main/resources/gzipped-file.gz") val stream = FileIO.fromPath(path) .via(Compression.gunzip()) .map(_.utf8String.toUpperCase) .mapConcat(_.split(" ").toList) .collect { case w if w.nonEmpty => w.replaceAll("""[p{Punct}&&[^.]]""", "").replaceAll(System.lineSeparator(), "") } .groupBy(MaxGroups, identity) .map(_ -> 1) .reduce((l, r) => (l._1, l._2 + r._2)) .mergeSubstreams .to(Sink.foreach(println)) stream.run() }
Example 150
Source File: IntegratingWithActorsApplication.scala From Akka-Cookbook with MIT License | 5 votes |
package com.packt.chapter8 import akka.actor.{ActorSystem, Props} import akka.stream.{ActorMaterializer, OverflowStrategy} import akka.stream.scaladsl._ import akka.pattern.ask import akka.util.Timeout import com.packt.chapter8.SinkActor.{AckSinkActor, CompletedSinkActor, InitSinkActor} import scala.concurrent.duration._ object IntegratingWithActorsApplication extends App { implicit val actorSystem = ActorSystem("IntegratingWithActors") implicit val actorMaterializer = ActorMaterializer() implicit val askTimeout = Timeout(5 seconds) val stringCleaner = actorSystem.actorOf(Props[StringCleanerActor]) val sinkActor = actorSystem.actorOf(Props[SinkActor]) val source = Source.queue[String](100, OverflowStrategy.backpressure) val sink = Sink.actorRefWithAck[String](sinkActor, InitSinkActor, AckSinkActor, CompletedSinkActor) val queue = source .mapAsync(parallelism = 5)(elem => (stringCleaner ? elem).mapTo[String]) .to(sink) .run() actorSystem.actorOf(SourceActor.props(queue)) }
Example 151
Source File: ProcessingKafkaApplication.scala From Akka-Cookbook with MIT License | 5 votes |
package com.packt.chapter8 import akka.actor.ActorSystem import akka.kafka.scaladsl.{Consumer, Producer} import akka.kafka.{ConsumerSettings, ProducerSettings, Subscriptions} import akka.stream.{ActorMaterializer, ClosedShape} import akka.stream.scaladsl.{Flow, GraphDSL, RunnableGraph, Sink, Source} import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord} import org.apache.kafka.clients.producer.ProducerRecord import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.serialization.{ByteArrayDeserializer, ByteArraySerializer, StringDeserializer, StringSerializer} import scala.concurrent.duration._ object ProcessingKafkaApplication extends App { implicit val actorSystem = ActorSystem("SimpleStream") implicit val actorMaterializer = ActorMaterializer() val bootstrapServers = "localhost:9092" val kafkaTopic = "akka_streams_topic" val partition = 0 val subscription = Subscriptions.assignment(new TopicPartition(kafkaTopic, partition)) val consumerSettings = ConsumerSettings(actorSystem, new ByteArrayDeserializer, new StringDeserializer) .withBootstrapServers(bootstrapServers) .withGroupId("akka_streams_group") .withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") val producerSettings = ProducerSettings(actorSystem, new ByteArraySerializer, new StringSerializer) .withBootstrapServers(bootstrapServers) val runnableGraph = RunnableGraph.fromGraph(GraphDSL.create() { implicit builder => import GraphDSL.Implicits._ val tickSource = Source.tick(0 seconds, 5 seconds, "Hello from Akka Streams using Kafka!") val kafkaSource = Consumer.plainSource(consumerSettings, subscription) val kafkaSink = Producer.plainSink(producerSettings) val printlnSink = Sink.foreach(println) val mapToProducerRecord = Flow[String].map(elem => new ProducerRecord[Array[Byte], String](kafkaTopic, elem)) val mapFromConsumerRecord = Flow[ConsumerRecord[Array[Byte], String]].map(record => record.value()) tickSource ~> mapToProducerRecord ~> kafkaSink kafkaSource ~> mapFromConsumerRecord ~> printlnSink ClosedShape }) runnableGraph.run() }
Example 152
Source File: ProcessingRabbitMQApplication.scala From Akka-Cookbook with MIT License | 5 votes |
package com.packt.chapter8 import akka.actor.ActorSystem import akka.stream.ActorMaterializer import akka.stream.alpakka.amqp._ import akka.stream.alpakka.amqp.scaladsl.{AmqpSink, AmqpSource} import akka.util.ByteString object ProcessingRabbitMQApplication extends App { implicit val actorSystem = ActorSystem("SimpleStream") implicit val actorMaterializer = ActorMaterializer() val consumerQueueName = "akka_streams_consumer_queue" val consumerQueueDeclaration = QueueDeclaration(consumerQueueName) val sourceDeclarations = Seq(consumerQueueDeclaration) val exchangeName = "akka_streams_exchange" val exchangeDeclaration = ExchangeDeclaration(exchangeName, "direct") val destinationQueueName = "akka_streams_destination_queue" val destinationQueueDeclaration = QueueDeclaration(destinationQueueName) val bindingDeclaration = BindingDeclaration(destinationQueueName, exchangeName) val sinkDeclarations = Seq(exchangeDeclaration, destinationQueueDeclaration, bindingDeclaration) val credentials = AmqpCredentials("guest", "guest") val connectionSetting = AmqpConnectionDetails("127.0.0.1", 5672, Some(credentials)) val amqpSourceConfig = NamedQueueSourceSettings(connectionSetting, consumerQueueName, sourceDeclarations) val rabbitMQSource = AmqpSource(amqpSourceConfig, 1000) val amqpSinkConfig = AmqpSinkSettings(connectionSetting, Some(exchangeName), None, sinkDeclarations) val rabbitMQSink = AmqpSink(amqpSinkConfig) val stream = rabbitMQSource .map(incomingMessage => { val upperCased = incomingMessage.bytes.utf8String.toUpperCase OutgoingMessage(bytes = ByteString(upperCased), immediate = false, mandatory = false, props = None) }) .to(rabbitMQSink) stream.run() }
Example 153
Source File: ModularizingStreamsApplication.scala From Akka-Cookbook with MIT License | 5 votes |
package com.packt.chapter8 import java.nio.file.Paths import akka.actor.ActorSystem import akka.stream.ActorMaterializer import akka.stream.scaladsl._ import akka.util.ByteString object ModularizingStreamsApplication extends App { implicit val actorSystem = ActorSystem("TransformingStream") implicit val actorMaterializer = ActorMaterializer() val MaxGroups = 1000 val path = Paths.get("src/main/resources/gzipped-file.gz") val streamUppercase = source .via(gunzip) .via(utf8UppercaseMapper) .via(splitter) .via(punctuationMapper) .via(filterEmptyElements) .via(wordCountFlow) .to(printlnSink) val streamLowercase = source .via(gunzip) .via(utf8LowercaseMapper) .via(splitter) .via(punctuationMapper) .via(filterEmptyElements) .via(wordCountFlow) .to(printlnSink) streamUppercase.run() streamLowercase.run() // val sourceGunzip = source.via(gunzip) // val reusableProcessingFlow = Flow[String].via(splitter) // .via(punctuationMapper) // .via(filterEmptyElements) // .via(wordCountFlow) // // val streamLowercase = sourceGunzip // .via(utf8LowercaseMapper) // .via(reusableProcessingFlow) // .to(printlnSink) }
Example 154
Source File: PipeliningParallelizing.scala From Akka-Cookbook with MIT License | 5 votes |
package com.packt.chapter8 import akka.NotUsed import akka.actor.ActorSystem import akka.stream.{ActorMaterializer, FlowShape} import akka.stream.scaladsl.{Balance, Flow, GraphDSL, Merge, Sink, Source} import scala.util.Random trait PipeliningParallelizing extends App { implicit val actorSystem = ActorSystem("PipeliningParallelizing") implicit val actorMaterializer = ActorMaterializer() case class Wash(id: Int) case class Dry(id: Int) case class Done(id: Int) val tasks = (1 to 5).map(Wash) def washStage = Flow[Wash].map(wash => { val sleepTime = Random.nextInt(3) * 1000 println(s"Washing ${wash.id}. It will take $sleepTime milliseconds.") Thread.sleep(sleepTime) Dry(wash.id) }) def dryStage = Flow[Dry].map(dry => { val sleepTime = Random.nextInt(3) * 1000 println(s"Drying ${dry.id}. It will take $sleepTime milliseconds.") Thread.sleep(sleepTime) Done(dry.id) }) val parallelStage = Flow.fromGraph(GraphDSL.create() { implicit builder => import GraphDSL.Implicits._ val dispatchLaundry = builder.add(Balance[Wash](3)) val mergeLaundry = builder.add(Merge[Done](3)) dispatchLaundry.out(0) ~> washStage.async ~> dryStage.async ~> mergeLaundry.in(0) dispatchLaundry.out(1) ~> washStage.async ~> dryStage.async ~> mergeLaundry.in(1) dispatchLaundry.out(2) ~> washStage.async ~> dryStage.async ~> mergeLaundry.in(2) FlowShape(dispatchLaundry.in, mergeLaundry.out) }) def runGraph(testingFlow: Flow[Wash, Done, NotUsed]) = Source(tasks).via(testingFlow).to(Sink.foreach(println)).run() }
Example 155
Source File: WorkingIOStreamsApplication.scala From Akka-Cookbook with MIT License | 5 votes |
package com.packt.chapter8 import akka.actor.ActorSystem import akka.stream.ActorMaterializer import akka.stream.scaladsl.Tcp.{IncomingConnection, ServerBinding} import akka.stream.scaladsl._ import akka.util.ByteString import scala.concurrent.Future object WorkingIOStreamsApplication extends App { implicit val actorSystem = ActorSystem("WorkingIOStreams") implicit val actorMaterializer = ActorMaterializer() val MaxGroups = 1000 val connections = Tcp().bind("127.0.0.1", 1234) connections.runForeach(connection => connection.handleWith(wordCount)) val wordCount = Flow[ByteString].map(_.utf8String.toUpperCase) .mapConcat(_.split(" ").toList) .collect { case w if w.nonEmpty => w.replaceAll("""[p{Punct}&&[^.]]""", "").replaceAll(System.lineSeparator(), "") } .groupBy(MaxGroups, identity) .map(_ -> 1) .reduce((l, r) => (l._1, l._2 + r._2)) .mergeSubstreams .map(x => ByteString(s"[${x._1} => ${x._2}]\n")) }
Example 156
Source File: SetSessionScala.scala From akka-http-session with Apache License 2.0 | 5 votes |
package com.softwaremill.example.session import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.server.Directives._ import akka.stream.ActorMaterializer import com.softwaremill.session.CsrfDirectives._ import com.softwaremill.session.CsrfOptions._ import com.softwaremill.session.SessionDirectives._ import com.softwaremill.session.SessionOptions._ import com.softwaremill.session._ import com.typesafe.scalalogging.StrictLogging import scala.io.StdIn object SetSessionScala extends App with StrictLogging { implicit val system = ActorSystem("example") implicit val materializer = ActorMaterializer() import system.dispatcher val sessionConfig = SessionConfig.default( "c05ll3lesrinf39t7mc5h6un6r0c69lgfno69dsak3vabeqamouq4328cuaekros401ajdpkh60rrtpd8ro24rbuqmgtnd1ebag6ljnb65i8a55d482ok7o0nch0bfbe") implicit val sessionManager = new SessionManager[MyScalaSession](sessionConfig) implicit val refreshTokenStorage = new InMemoryRefreshTokenStorage[MyScalaSession] { def log(msg: String) = logger.info(msg) } def mySetSession(v: MyScalaSession) = setSession(refreshable, usingCookies, v) val myRequiredSession = requiredSession(refreshable, usingCookies) val myInvalidateSession = invalidateSession(refreshable, usingCookies) val routes = randomTokenCsrfProtection(checkHeader) { pathPrefix("api") { path("do_login") { post { entity(as[String]) { body => logger.info(s"Logging in $body") mySetSession(MyScalaSession(body)) { setNewCsrfToken(checkHeader) { ctx => ctx.complete("ok") } } } } } } } val bindingFuture = Http().bindAndHandle(routes, "localhost", 8080) println("Server started, press enter to stop. Visit http://localhost:8080 to see the demo.") StdIn.readLine() import system.dispatcher bindingFuture .flatMap(_.unbind()) .onComplete { _ => system.terminate() println("Server stopped") } }
Example 157
Source File: SessionInvalidationScala.scala From akka-http-session with Apache License 2.0 | 5 votes |
package com.softwaremill.example.session import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.server.Directives._ import akka.stream.ActorMaterializer import com.softwaremill.session.SessionDirectives._ import com.softwaremill.session.SessionOptions._ import com.softwaremill.session._ import com.typesafe.scalalogging.StrictLogging import scala.io.StdIn object SessionInvalidationScala extends App with StrictLogging { implicit val system = ActorSystem("example") implicit val materializer = ActorMaterializer() import system.dispatcher val sessionConfig = SessionConfig.default( "c05ll3lesrinf39t7mc5h6un6r0c69lgfno69dsak3vabeqamouq4328cuaekros401ajdpkh60rrtpd8ro24rbuqmgtnd1ebag6ljnb65i8a55d482ok7o0nch0bfbe") implicit val sessionManager = new SessionManager[MyScalaSession](sessionConfig) implicit val refreshTokenStorage = new InMemoryRefreshTokenStorage[MyScalaSession] { def log(msg: String) = logger.info(msg) } def mySetSession(v: MyScalaSession) = setSession(refreshable, usingCookies, v) val myRequiredSession = requiredSession(refreshable, usingCookies) val myInvalidateSession = invalidateSession(refreshable, usingCookies) val routes = path("logout") { post { myRequiredSession { session => myInvalidateSession { ctx => logger.info(s"Logging out $session") ctx.complete("ok") } } } } val bindingFuture = Http().bindAndHandle(routes, "localhost", 8080) println("Server started, press enter to stop. Visit http://localhost:8080 to see the demo.") StdIn.readLine() import system.dispatcher bindingFuture .flatMap(_.unbind()) .onComplete { _ => system.terminate() println("Server stopped") } }
Example 158
Source File: ScalaExample.scala From akka-http-session with Apache License 2.0 | 5 votes |
package com.softwaremill.example import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.model.StatusCodes._ import akka.http.scaladsl.server.Directives._ import akka.stream.ActorMaterializer import com.softwaremill.example.session.MyScalaSession import com.softwaremill.session.CsrfDirectives._ import com.softwaremill.session.CsrfOptions._ import com.softwaremill.session.SessionDirectives._ import com.softwaremill.session.SessionOptions._ import com.softwaremill.session._ import com.typesafe.scalalogging.StrictLogging import scala.io.StdIn object Example extends App with StrictLogging { implicit val system = ActorSystem("example") implicit val materializer = ActorMaterializer() import system.dispatcher val sessionConfig = SessionConfig.default( "c05ll3lesrinf39t7mc5h6un6r0c69lgfno69dsak3vabeqamouq4328cuaekros401ajdpkh60rrtpd8ro24rbuqmgtnd1ebag6ljnb65i8a55d482ok7o0nch0bfbe") implicit val sessionManager = new SessionManager[MyScalaSession](sessionConfig) implicit val refreshTokenStorage = new InMemoryRefreshTokenStorage[MyScalaSession] { def log(msg: String) = logger.info(msg) } def mySetSession(v: MyScalaSession) = setSession(refreshable, usingCookies, v) val myRequiredSession = requiredSession(refreshable, usingCookies) val myInvalidateSession = invalidateSession(refreshable, usingCookies) val routes = path("") { redirect("/site/index.html", Found) } ~ randomTokenCsrfProtection(checkHeader) { pathPrefix("api") { path("do_login") { post { entity(as[String]) { body => logger.info(s"Logging in $body") mySetSession(MyScalaSession(body)) { setNewCsrfToken(checkHeader) { ctx => ctx.complete("ok") } } } } } ~ // This should be protected and accessible only when logged in path("do_logout") { post { myRequiredSession { session => myInvalidateSession { ctx => logger.info(s"Logging out $session") ctx.complete("ok") } } } } ~ // This should be protected and accessible only when logged in path("current_login") { get { myRequiredSession { session => ctx => logger.info("Current session: " + session) ctx.complete(session.username) } } } } ~ pathPrefix("site") { getFromResourceDirectory("") } } val bindingFuture = Http().bindAndHandle(routes, "localhost", 8080) println("Server started, press enter to stop. Visit http://localhost:8080 to see the demo.") StdIn.readLine() import system.dispatcher bindingFuture .flatMap(_.unbind()) .onComplete { _ => system.terminate() println("Server stopped") } }
Example 159
Source File: Demo.scala From chordial with BSD 3-Clause "New" or "Revised" License | 5 votes |
package com.tristanpenman.chordial.demo import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.model.ws.TextMessage import akka.stream.scaladsl._ import akka.stream.{ActorAttributes, ActorMaterializer, OverflowStrategy, Supervision} import akka.util.Timeout import com.tristanpenman.chordial.core.Event import com.tristanpenman.chordial.core.Event._ import scala.concurrent.Await import scala.concurrent.duration._ object Demo extends App { implicit val system = ActorSystem("chordial-demo") implicit val mat = ActorMaterializer() implicit val ec = system.dispatcher implicit val timeout: Timeout = 3.seconds // Generate IDs ranging from 0 to 63 (inclusive) so that when visualising the network, // each node represents a ~5.625 degree arc on the ring private val keyspaceBits = 6 // Create an actor that is responsible for creating and terminating nodes, while ensuring // that nodes are assigned unique IDs in the Chord key-space private val governor = system.actorOf(Governor.props(keyspaceBits), "Governor") // Create an actor that will log events published by nodes private val eventWriter = system.actorOf(EventWriter.props, "EventWriter") // Subscribe the EventWriter actor to events published by nodes system.eventStream.subscribe(eventWriter, classOf[Event]) val (listener, eventsSource) = Source .actorRef[Event](Int.MaxValue, OverflowStrategy.fail) .map { case FingerReset(nodeId: Long, index: Int) => s"""{ "type": "FingerReset", "nodeId": $nodeId, "index": $index }""" case FingerUpdated(nodeId: Long, index: Int, fingerId: Long) => s"""{ "type": "FingerUpdated", "nodeId": $nodeId, "index": $index, "fingerId": $fingerId }""" case NodeCreated(nodeId, successorId) => s"""{ "type": "NodeCreated", "nodeId": $nodeId, "successorId": $successorId }""" case NodeShuttingDown(nodeId) => s"""{ "type": "NodeDeleted", "nodeId": $nodeId }""" case PredecessorReset(nodeId) => s"""{ "type": "PredecessorReset", "nodeId": $nodeId }""" case PredecessorUpdated(nodeId, predecessorId) => s"""{ "type": "PredecessorUpdated", "nodeId": $nodeId, "predecessorId": $predecessorId }""" case SuccessorListUpdated(nodeId, primarySuccessorId, _) => s"""{ "type": "SuccessorUpdated", "nodeId": $nodeId, "successorId": $primarySuccessorId }""" } .map(TextMessage(_)) .withAttributes(ActorAttributes.supervisionStrategy(Supervision.resumingDecider)) .toMat(BroadcastHub.sink[TextMessage](bufferSize = 16))(Keep.both) .run() system.eventStream.subscribe(listener, classOf[Event]) Http().bindAndHandle(WebSocketWorker(governor, eventsSource), "0.0.0.0", 4567) Await.result(system.whenTerminated, Duration.Inf) }
Example 160
Source File: Launcher.scala From udash-demos with GNU General Public License v3.0 | 5 votes |
package io.udash.demos.rest import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.stream.ActorMaterializer import io.udash.demos.rest.api.PhoneBookWebService import io.udash.logging.CrossLogging import scala.io.StdIn object Launcher extends CrossLogging { def main(args: Array[String]): Unit = { implicit val system = ActorSystem("my-system") implicit val materializer = ActorMaterializer() // needed for the future flatMap/onComplete in the end implicit val executionContext = system.dispatcher val service = new PhoneBookWebService val bindingFuture = Http().bindAndHandle(service.route, "localhost", 8080) logger.info(s"Server online at http://localhost:8080/\nPress Enter to stop...") StdIn.readLine() // let it run until user presses return bindingFuture .flatMap(_.unbind()) // trigger unbinding from the port .onComplete(_ => system.terminate()) // and shutdown when done } }
Example 161
Source File: AppContext.scala From ws_to_kafka with MIT License | 5 votes |
package com.pkinsky import akka.actor.ActorSystem import akka.stream.{ActorMaterializer, Materializer} import com.softwaremill.react.kafka.ReactiveKafka import scala.concurrent.duration._ import scala.concurrent.ExecutionContext import scala.language.postfixOps trait AppContext { //implicit context: actor system, execution context, materializer implicit val system: ActorSystem = ActorSystem() implicit val ec: ExecutionContext = system.dispatcher implicit val mat: Materializer = ActorMaterializer() //kafka setup val kafkaConf = KafkaServiceConf("192.168.99.100:9092") val kafkaClient: ReactiveKafka = new ReactiveKafka() val kafka = new KafkaService(kafkaClient, kafkaConf) //constants val eventTopic = "event_topic_newer" //kafka topic val port = 9000 //server port def awaitTermination() = { System.console().readLine() //wait for enter println(s"shutting down because enter was pressed") system.shutdown() system.awaitTermination(30 seconds) System.exit(0) } }
Example 162
Source File: MarathonApiServiceDiscovery.scala From akka-management with Apache License 2.0 | 5 votes |
package akka.discovery.marathon import java.net.InetAddress import akka.actor.ActorSystem import akka.discovery._ import akka.http.scaladsl._ import akka.http.scaladsl.model._ import akka.http.scaladsl.unmarshalling.Unmarshal import akka.stream.ActorMaterializer import scala.collection.immutable.Seq import scala.concurrent.Future import scala.concurrent.duration.FiniteDuration import scala.util.Try import AppList._ import JsonFormat._ import akka.annotation.ApiMayChange import akka.discovery.ServiceDiscovery.{ Resolved, ResolvedTarget } import akka.event.Logging @ApiMayChange object MarathonApiServiceDiscovery { @ApiMayChange class MarathonApiServiceDiscovery(system: ActorSystem) extends ServiceDiscovery { import MarathonApiServiceDiscovery._ import system.dispatcher private val log = Logging(system, getClass) private val http = Http()(system) private val settings = Settings(system) private implicit val mat: ActorMaterializer = ActorMaterializer()(system) override def lookup(lookup: Lookup, resolveTimeout: FiniteDuration): Future[Resolved] = { val uri = Uri(settings.appApiUrl).withQuery( Uri.Query( "embed" -> "apps.tasks", "embed" -> "apps.deployments", "label" -> settings.appLabelQuery.format(lookup.serviceName))) val request = HttpRequest(uri = uri) log.info("Requesting seed nodes by: {}", request.uri) val portName = lookup.portName match { case Some(name) => name case None => settings.appPortName } for { response <- http.singleRequest(request) entity <- response.entity.toStrict(resolveTimeout) appList <- { log.debug("Marathon API entity: [{}]", entity.data.utf8String) val unmarshalled = Unmarshal(entity).to[AppList] unmarshalled.failed.foreach { _ => log.error( "Failed to unmarshal Marathon API response status [{}], entity: [{}], uri: [{}]", response.status.value, entity.data.utf8String, uri) } unmarshalled } } yield Resolved(lookup.serviceName, targets(appList, portName)) } }
Example 163
Source File: DemoApp.scala From akka-management with Apache License 2.0 | 5 votes |
package akka.cluster.bootstrap import akka.actor.ActorSystem import akka.cluster.{ Cluster, MemberStatus } import akka.http.scaladsl.Http import akka.http.scaladsl.model._ import akka.http.scaladsl.server.Directives._ import akka.management.scaladsl.AkkaManagement import akka.management.cluster.bootstrap.ClusterBootstrap import akka.stream.ActorMaterializer object DemoApp extends App { implicit val system = ActorSystem("my-system") implicit val materializer = ActorMaterializer() val cluster = Cluster(system) def isReady() = { val selfNow = cluster.selfMember selfNow.status == MemberStatus.Up } def isHealthy() = { isReady() } val route = concat( path("ping")(complete("pong!")), path("healthy")(complete(if (isHealthy()) StatusCodes.OK else StatusCodes.ServiceUnavailable)), path("ready")(complete(if (isReady()) StatusCodes.OK else StatusCodes.ServiceUnavailable))) AkkaManagement(system).start() ClusterBootstrap(system).start() Http().bindAndHandle( route, sys.env.get("HOST").getOrElse("127.0.0.1"), sys.env.get("PORT_HTTP").map(_.toInt).getOrElse(8080)) }
Example 164
Source File: DemoApp.scala From akka-management with Apache License 2.0 | 5 votes |
package akka.cluster.bootstrap import akka.actor.{ Actor, ActorLogging, ActorSystem, Props } import akka.cluster.ClusterEvent.ClusterDomainEvent import akka.cluster.{ Cluster, ClusterEvent } import akka.http.scaladsl.Http import akka.http.scaladsl.model._ import akka.http.scaladsl.server.Directives._ import akka.management.cluster.bootstrap.ClusterBootstrap //#start-akka-management import akka.management.scaladsl.AkkaManagement //#start-akka-management import akka.stream.ActorMaterializer object DemoApp extends App { implicit val system = ActorSystem("Appka") import system.log implicit val mat = ActorMaterializer() val cluster = Cluster(system) log.info(s"Started [$system], cluster.selfAddress = ${cluster.selfAddress}") //#start-akka-management AkkaManagement(system).start() //#start-akka-management ClusterBootstrap(system).start() cluster.subscribe( system.actorOf(Props[ClusterWatcher]), ClusterEvent.InitialStateAsEvents, classOf[ClusterDomainEvent] ) // add real app routes here val routes = path("hello") { get { complete( HttpEntity( ContentTypes.`text/html(UTF-8)`, "<h1>Hello</h1>" ) ) } } Http().bindAndHandle(routes, "0.0.0.0", 8080) Cluster(system).registerOnMemberUp({ log.info("Cluster member is up!") }) } class ClusterWatcher extends Actor with ActorLogging { val cluster = Cluster(context.system) override def receive = { case msg => log.info(s"Cluster ${cluster.selfAddress} >>> " + msg) } }
Example 165
Source File: MarathonApiDockerDemoApp.scala From akka-management with Apache License 2.0 | 5 votes |
package akka.cluster.bootstrap import akka.actor.ActorSystem import akka.cluster.{ Cluster, MemberStatus } import akka.http.scaladsl.Http import akka.http.scaladsl.model._ import akka.http.scaladsl.server.Directives._ import akka.management.cluster.bootstrap.ClusterBootstrap import akka.management.scaladsl.AkkaManagement import akka.stream.ActorMaterializer object MarathonApiDockerDemoApp extends App { implicit val system = ActorSystem("my-system") implicit val materializer = ActorMaterializer() val cluster = Cluster(system) def isReady() = { val selfNow = cluster.selfMember selfNow.status == MemberStatus.Up } def isHealthy() = { isReady() } val route = concat( path("ping")(complete("pong!")), path("healthy")(complete(if (isHealthy()) StatusCodes.OK else StatusCodes.ServiceUnavailable)), path("ready")(complete(if (isReady()) StatusCodes.OK else StatusCodes.ServiceUnavailable)) ) AkkaManagement(system).start() ClusterBootstrap(system).start() Http().bindAndHandle( route, sys.env.get("HOST").getOrElse("127.0.0.1"), sys.env.get("PORT_HTTP").map(_.toInt).getOrElse(8080)) }
Example 166
Source File: ClusterApp.scala From akka-management with Apache License 2.0 | 5 votes |
package akka.cluster.bootstrap import akka.actor.{ Actor, ActorLogging, ActorSystem, PoisonPill, Props } import akka.cluster.ClusterEvent.ClusterDomainEvent import akka.cluster.singleton.{ ClusterSingletonManager, ClusterSingletonManagerSettings } import akka.cluster.{ Cluster, ClusterEvent } import akka.http.scaladsl.Http import akka.http.scaladsl.model._ import akka.http.scaladsl.server.Directives._ import akka.management.cluster.bootstrap.ClusterBootstrap import akka.management.scaladsl.AkkaManagement import akka.stream.ActorMaterializer object ClusterApp { def main(args: Array[String]): Unit = { implicit val system = ActorSystem() implicit val materializer = ActorMaterializer() implicit val executionContext = system.dispatcher val cluster = Cluster(system) system.log.info("Starting Akka Management") AkkaManagement(system).start() ClusterBootstrap(system).start() system.actorOf( ClusterSingletonManager.props( Props[NoisySingleton], PoisonPill, ClusterSingletonManagerSettings(system) ) ) Cluster(system).subscribe( system.actorOf(Props[ClusterWatcher]), ClusterEvent.InitialStateAsEvents, classOf[ClusterDomainEvent] ) // add real app routes here val routes = path("hello") { get { complete( HttpEntity(ContentTypes.`text/html(UTF-8)`, "<h1>Hello</h1>") ) } } Http().bindAndHandle(routes, "0.0.0.0", 8080) system.log.info( s"Server online at http://localhost:8080/\nPress RETURN to stop..." ) cluster.registerOnMemberUp(() => { system.log.info("Cluster member is up!") }) } class ClusterWatcher extends Actor with ActorLogging { val cluster = Cluster(context.system) override def receive = { case msg => log.info(s"Cluster ${cluster.selfAddress} >>> " + msg) } } }
Example 167
Source File: DemoApp.scala From akka-management with Apache License 2.0 | 5 votes |
package akka.cluster.bootstrap import akka.actor.{ Actor, ActorLogging, ActorSystem, Props } import akka.cluster.ClusterEvent.ClusterDomainEvent import akka.cluster.{ Cluster, ClusterEvent } import akka.http.scaladsl.Http import akka.management.scaladsl.AkkaManagement import akka.management.cluster.bootstrap.ClusterBootstrap import akka.stream.ActorMaterializer import akka.stream.scaladsl.Sink import akka.stream.scaladsl.Source import com.typesafe.config.ConfigFactory object DemoApp extends App { implicit val system = ActorSystem("simple") import system.log import system.dispatcher implicit val mat = ActorMaterializer() val cluster = Cluster(system) log.info("Started [{}], cluster.selfAddress = {}", system, cluster.selfAddress) AkkaManagement(system).start() ClusterBootstrap(system).start() cluster .subscribe(system.actorOf(Props[ClusterWatcher]), ClusterEvent.InitialStateAsEvents, classOf[ClusterDomainEvent]) import akka.http.scaladsl.server.Directives._ Http().bindAndHandle(complete("Hello world"), "0.0.0.0", 8080) } class ClusterWatcher extends Actor with ActorLogging { val cluster = Cluster(context.system) override def receive = { case msg => log.info("Cluster {} >>> {}", msg, cluster.selfAddress) } }
Example 168
Source File: MultiDcSpec.scala From akka-management with Apache License 2.0 | 5 votes |
package akka.management.cluster import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.model.{ HttpRequest, StatusCodes } import akka.http.scaladsl.unmarshalling.Unmarshal import akka.management.scaladsl.ManagementRouteProviderSettings import akka.stream.ActorMaterializer import akka.testkit.SocketUtil import com.typesafe.config.ConfigFactory import org.scalatest.{ Matchers, WordSpec } import org.scalatest.concurrent.{ Eventually, ScalaFutures } import org.scalatest.time.{ Millis, Seconds, Span } class MultiDcSpec extends WordSpec with Matchers with ScalaFutures with ClusterHttpManagementJsonProtocol with Eventually { implicit val patience: PatienceConfig = PatienceConfig(timeout = Span(10, Seconds), interval = Span(50, Millis)) val config = ConfigFactory.parseString( """ |akka.actor.provider = "cluster" |akka.remote.log-remote-lifecycle-events = off |akka.remote.netty.tcp.hostname = "127.0.0.1" |#akka.loglevel = DEBUG """.stripMargin ) "Http cluster management" must { "allow multiple DCs" in { val Vector(httpPortA, portA, portB) = SocketUtil.temporaryServerAddresses(3, "127.0.0.1").map(_.getPort) val dcA = ConfigFactory.parseString( s""" |akka.management.http.hostname = "127.0.0.1" |akka.management.http.port = $httpPortA |akka.cluster.seed-nodes = ["akka.tcp://[email protected]:$portA"] |akka.cluster.multi-data-center.self-data-center = "DC-A" |akka.remote.netty.tcp.port = $portA """.stripMargin ) val dcB = ConfigFactory.parseString( s""" |akka.cluster.seed-nodes = ["akka.tcp://[email protected]:$portA"] |akka.cluster.multi-data-center.self-data-center = "DC-B" |akka.remote.netty.tcp.port = $portB """.stripMargin ) implicit val dcASystem = ActorSystem("MultiDcSystem", config.withFallback(dcA)) val dcBSystem = ActorSystem("MultiDcSystem", config.withFallback(dcB)) implicit val materializer = ActorMaterializer() val routeSettings = ManagementRouteProviderSettings(selfBaseUri = s"http://127.0.0.1:$httpPortA", readOnly = false) try { Http() .bindAndHandle(ClusterHttpManagementRouteProvider(dcASystem).routes(routeSettings), "127.0.0.1", httpPortA) .futureValue eventually { val response = Http().singleRequest(HttpRequest(uri = s"http://127.0.0.1:$httpPortA/cluster/members")).futureValue response.status should equal(StatusCodes.OK) val members = Unmarshal(response.entity).to[ClusterMembers].futureValue members.members.size should equal(2) members.members.map(_.status) should equal(Set("Up")) } } finally { dcASystem.terminate() dcBSystem.terminate() } } } }
Example 169
Source File: AkkaJsonHandler.scala From chronicler with Apache License 2.0 | 5 votes |
package com.github.fsanaulla.chronicler.akka.shared.handlers import akka.http.scaladsl.model.HttpResponse import akka.stream.ActorMaterializer import com.github.fsanaulla.chronicler.akka.shared.implicits.futureFunctor import com.github.fsanaulla.chronicler.core.alias.ErrorOr import com.github.fsanaulla.chronicler.core.components.JsonHandler import org.typelevel.jawn.ast.JValue import scala.concurrent.{ExecutionContext, Future} final class AkkaJsonHandler( unm: AkkaBodyUnmarshaller )(implicit ex: ExecutionContext, mat: ActorMaterializer) extends JsonHandler[Future, HttpResponse] { override def responseBody(response: HttpResponse): Future[ErrorOr[JValue]] = unm(response.entity) override def responseHeader(response: HttpResponse): Seq[(String, String)] = response.headers.map(hd => hd.name() -> hd.value()) override def responseCode(response: HttpResponse): Int = response.status.intValue() }
Example 170
Source File: AkkaManagementClient.scala From chronicler with Apache License 2.0 | 5 votes |
package com.github.fsanaulla.chronicler.akka.management import _root_.akka.actor.ActorSystem import _root_.akka.http.scaladsl.HttpsConnectionContext import akka.http.scaladsl.model.{HttpResponse, RequestEntity, Uri} import akka.stream.ActorMaterializer import com.github.fsanaulla.chronicler.akka.shared.InfluxAkkaClient import com.github.fsanaulla.chronicler.akka.shared.handlers._ import com.github.fsanaulla.chronicler.core.ManagementClient import com.github.fsanaulla.chronicler.core.alias.ErrorOr import com.github.fsanaulla.chronicler.core.model._ import scala.concurrent.{ExecutionContext, Future} final class AkkaManagementClient( host: String, port: Int, credentials: Option[InfluxCredentials], httpsContext: Option[HttpsConnectionContext], terminateActorSystem: Boolean )(implicit val ex: ExecutionContext, val system: ActorSystem, val F: Functor[Future], val FK: FunctionK[Future, Future]) extends InfluxAkkaClient(terminateActorSystem, httpsContext) with ManagementClient[Future, Future, HttpResponse, Uri, RequestEntity] { implicit val mat: ActorMaterializer = ActorMaterializer() implicit val qb: AkkaQueryBuilder = new AkkaQueryBuilder(schema, host, port, credentials) implicit val jh: AkkaJsonHandler = new AkkaJsonHandler(new AkkaBodyUnmarshaller(false)) implicit val re: AkkaRequestExecutor = new AkkaRequestExecutor(ctx) implicit val rh: AkkaResponseHandler = new AkkaResponseHandler(jh) override def ping: Future[ErrorOr[InfluxDBInfo]] = { re.get(qb.buildQuery("/ping"), compressed = false) .flatMap(rh.pingResult) } }
Example 171
Source File: AkkaIOClient.scala From chronicler with Apache License 2.0 | 5 votes |
package com.github.fsanaulla.chronicler.akka.io import akka.actor.ActorSystem import akka.http.scaladsl.HttpsConnectionContext import akka.http.scaladsl.model.{HttpResponse, RequestEntity, Uri} import akka.stream.ActorMaterializer import com.github.fsanaulla.chronicler.akka.shared.InfluxAkkaClient import com.github.fsanaulla.chronicler.akka.shared.handlers._ import com.github.fsanaulla.chronicler.akka.shared.implicits._ import com.github.fsanaulla.chronicler.core.IOClient import com.github.fsanaulla.chronicler.core.alias.ErrorOr import com.github.fsanaulla.chronicler.core.model.{InfluxCredentials, InfluxDBInfo} import scala.concurrent.{ExecutionContext, Future} import scala.reflect.ClassTag final class AkkaIOClient( host: String, port: Int, credentials: Option[InfluxCredentials], compress: Boolean, httpsContext: Option[HttpsConnectionContext], terminateActorSystem: Boolean )(implicit ex: ExecutionContext, system: ActorSystem) extends InfluxAkkaClient(terminateActorSystem, httpsContext) with IOClient[Future, Future, HttpResponse, Uri, RequestEntity] { implicit val mat: ActorMaterializer = ActorMaterializer() implicit val bb: AkkaBodyBuilder = new AkkaBodyBuilder() implicit val qb: AkkaQueryBuilder = new AkkaQueryBuilder(schema, host, port, credentials) implicit val jh: AkkaJsonHandler = new AkkaJsonHandler(new AkkaBodyUnmarshaller(compress)) implicit val re: AkkaRequestExecutor = new AkkaRequestExecutor(ctx) implicit val rh: AkkaResponseHandler = new AkkaResponseHandler(jh) override def database(dbName: String): AkkaDatabaseApi = new AkkaDatabaseApi(dbName, compress) override def measurement[A: ClassTag]( dbName: String, measurementName: String ): AkkaMeasurementApi[A] = new AkkaMeasurementApi[A](dbName, measurementName, compress) override def ping: Future[ErrorOr[InfluxDBInfo]] = { re.get(qb.buildQuery("/ping", Nil), compressed = false) .flatMap(rh.pingResult) } }
Example 172
Source File: CouchbaseClusteredPersistentEntitySpec.scala From akka-persistence-couchbase with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.scaladsl.persistence.couchbase import java.io.File import akka.actor.{ActorSystem, CoordinatedShutdown} import akka.persistence.couchbase.CouchbaseClusterConnection import akka.stream.{ActorMaterializer, Materializer} import com.lightbend.lagom.internal.persistence.couchbase.TestConfig import com.lightbend.lagom.internal.persistence.testkit.AwaitPersistenceInit.awaitPersistenceInit import com.lightbend.lagom.scaladsl.api.ServiceLocator import com.lightbend.lagom.scaladsl.api.ServiceLocator.NoServiceLocator import com.lightbend.lagom.scaladsl.persistence.multinode.{ AbstractClusteredPersistentEntityConfig, AbstractClusteredPersistentEntitySpec } import com.lightbend.lagom.scaladsl.persistence.{ReadSideProcessor, TestEntity} import com.lightbend.lagom.scaladsl.playjson.JsonSerializerRegistry import com.typesafe.config.Config import play.api.{Configuration, Environment, Mode} import play.api.inject.DefaultApplicationLifecycle import scala.concurrent.{ExecutionContext, Future} object CouchbaseClusteredPersistentEntityConfig extends AbstractClusteredPersistentEntityConfig { override def additionalCommonConfig(databasePort: Int): Config = TestConfig.persistenceConfig } class CouchbaseClusteredPersistentEntitySpecMultiJvmNode1 extends CouchbaseClusteredPersistentEntitySpec class CouchbaseClusteredPersistentEntitySpecMultiJvmNode2 extends CouchbaseClusteredPersistentEntitySpec class CouchbaseClusteredPersistentEntitySpecMultiJvmNode3 extends CouchbaseClusteredPersistentEntitySpec class CouchbaseClusteredPersistentEntitySpec extends AbstractClusteredPersistentEntitySpec(CouchbaseClusteredPersistentEntityConfig) { import com.lightbend.lagom.scaladsl.persistence.couchbase.CouchbaseClusteredPersistentEntityConfig._ override protected def atStartup(): Unit = { runOn(node1) { CouchbaseClusterConnection.connect().cleanUp().close() awaitPersistenceInit(system) } enterBarrier("couchbase-started") super.atStartup() } lazy val defaultApplicationLifecycle = new DefaultApplicationLifecycle override lazy val components: CouchbasePersistenceComponents = new CouchbasePersistenceComponents { override def actorSystem: ActorSystem = system override def executionContext: ExecutionContext = system.dispatcher override def materializer: Materializer = ActorMaterializer()(system) override def configuration: Configuration = Configuration(system.settings.config) override def serviceLocator: ServiceLocator = NoServiceLocator override def environment: Environment = Environment(new File("."), getClass.getClassLoader, Mode.Test) override def jsonSerializerRegistry: JsonSerializerRegistry = ??? override def coordinatedShutdown: CoordinatedShutdown = CoordinatedShutdown(system) } def testEntityReadSide = new TestEntityReadSide(components.actorSystem, components.couchbase) override protected def readSideProcessor: () => ReadSideProcessor[TestEntity.Evt] = () => new TestEntityReadSide.TestEntityReadSideProcessor(system, components.couchbaseReadSide) override protected def getAppendCount(id: String): Future[Long] = testEntityReadSide.getAppendCount(id) }
Example 173
Source File: AbstractCouchbaseSpec.scala From akka-persistence-couchbase with Apache License 2.0 | 5 votes |
package akka.persistence.couchbase.scaladsl import akka.actor.{ActorRef, ActorSystem} import akka.persistence.couchbase.{CouchbaseBucketSetup, TestActor} import akka.persistence.query.PersistenceQuery import akka.stream.{ActorMaterializer, Materializer} import akka.testkit.{TestKit, TestProbe, WithLogCapturing} import com.typesafe.config.{Config, ConfigFactory} import org.scalatest.concurrent.ScalaFutures import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike} import scala.concurrent.duration._ abstract class AbstractCouchbaseSpec(testName: String, config: Config) extends TestKit( ActorSystem(testName, config.withFallback(ConfigFactory.load())) ) with WordSpecLike with BeforeAndAfterAll with Matchers with ScalaFutures with CouchbaseBucketSetup with WithLogCapturing { def this(testName: String) = this( testName, ConfigFactory.parseString(""" couchbase-journal.read { page-size = 10 } akka.loggers = ["akka.testkit.SilenceAllTestEventListener"] akka.loglevel=debug """) ) var idCounter = 0 def nextPersistenceId(): String = { idCounter += 1 val id = Integer.toString(idCounter, 24) id.toString } // provides a unique persistence-id per test case and some initial persisted events protected trait Setup { lazy val probe = TestProbe() implicit def sender: ActorRef = probe.ref // note must be a def or lazy val or else it doesn't work (init order) def initialPersistedEvents: Int = 0 def startPersistentActor(initialEvents: Int): (String, ActorRef) = { val pid = nextPersistenceId() system.log.debug("Starting actor with pid {}, and writing {} initial events", pid, initialPersistedEvents) val persistentActor = system.actorOf(TestActor.props(pid)) if (initialEvents > 0) { for (i <- 1 to initialEvents) { persistentActor ! s"$pid-$i" probe.expectMsg(s"$pid-$i-done") } } (pid, persistentActor) } val (pid, persistentActor) = startPersistentActor(initialPersistedEvents) // no guarantee we can immediately read our own writes def readingOurOwnWrites[A](f: => A): A = awaitAssert(f, readOurOwnWritesTimeout, interval = 250.millis) // no need to bombard the db with retries } protected val noMsgTimeout = 100.millis protected val readOurOwnWritesTimeout = 10.seconds override implicit val patienceConfig: PatienceConfig = PatienceConfig(readOurOwnWritesTimeout) implicit val materializer: Materializer = ActorMaterializer() lazy // #read-journal-access val queries: CouchbaseReadJournal = PersistenceQuery(system).readJournalFor[CouchbaseReadJournal](CouchbaseReadJournal.Identifier) // #read-journal-access protected override def afterAll(): Unit = { super.afterAll() shutdown(system) } }
Example 174
Source File: CouchbaseSnapshotSpec.scala From akka-persistence-couchbase with Apache License 2.0 | 5 votes |
package akka.persistence.couchbase import akka.actor.{ActorSystem, PoisonPill} import akka.persistence.couchbase.TestActor.{GetLastRecoveredEvent, SaveSnapshot} import akka.stream.ActorMaterializer import akka.testkit.{TestKit, TestProbe, WithLogCapturing} import com.typesafe.config.ConfigFactory import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, Matchers, WordSpecLike} import scala.concurrent.duration._ class CouchbaseSnapshotSpec extends TestKit( ActorSystem( "CouchbaseSnapshotSpec", ConfigFactory.parseString(""" |akka.loggers = ["akka.testkit.SilenceAllTestEventListener"] """.stripMargin).withFallback(ConfigFactory.load()) ) ) with WordSpecLike with BeforeAndAfterAll with Matchers with CouchbaseBucketSetup with BeforeAndAfterEach with WithLogCapturing { protected override def afterAll(): Unit = { super.afterAll() shutdown(system) } val waitTime = 100.millis implicit val materializer = ActorMaterializer() "entity" should { "recover" in { val senderProbe = TestProbe() implicit val sender = senderProbe.ref { val pa1 = system.actorOf(TestActor.props("p1")) pa1 ! "p1-evt-1" senderProbe.expectMsg("p1-evt-1-done") senderProbe.watch(pa1) pa1 ! PoisonPill senderProbe.expectTerminated(pa1) } { val pa1 = system.actorOf(TestActor.props("p1")) pa1 ! GetLastRecoveredEvent senderProbe.expectMsg("p1-evt-1") } } "recover after snapshot" in { val senderProbe = TestProbe() implicit val sender = senderProbe.ref { val pa1 = system.actorOf(TestActor.props("p2")) pa1 ! "p2-evt-1" senderProbe.expectMsg("p2-evt-1-done") pa1 ! SaveSnapshot senderProbe.expectMsgType[Long] senderProbe.watch(pa1) pa1 ! PoisonPill senderProbe.expectTerminated(pa1) } { val pa1 = system.actorOf(TestActor.props("p2")) pa1 ! GetLastRecoveredEvent senderProbe.expectMsg("p2-evt-1") } } } }
Example 175
Source File: StreamSpec.scala From akka-stream-eventsourcing with Apache License 2.0 | 5 votes |
package com.github.krasserm.ases import akka.stream.ActorMaterializer import akka.stream.scaladsl.{Flow, Keep} import akka.stream.testkit.scaladsl.{TestSink, TestSource} import akka.stream.testkit.{TestPublisher, TestSubscriber} import akka.testkit.TestKit import org.scalatest.{BeforeAndAfterAll, Suite} import scala.collection.immutable.Seq trait StreamSpec extends BeforeAndAfterAll { this: TestKit with Suite => implicit val materializer = ActorMaterializer() val emitterId = "emitter" override def afterAll(): Unit = { materializer.shutdown() TestKit.shutdownActorSystem(system) super.afterAll() } def probes[I, O, M](flow: Flow[I, O, M]): (TestPublisher.Probe[I], TestSubscriber.Probe[O]) = TestSource.probe[I].viaMat(flow)(Keep.left).toMat(TestSink.probe[O])(Keep.both).run() def durables[A](emitted: Seq[Emitted[A]], offset: Int = 0): Seq[Durable[A]] = emitted.zipWithIndex.map { case (e, i) => e.durable(i + offset) } }
Example 176
Source File: PubSubSinkIT.scala From akka-cloudpubsub with Apache License 2.0 | 5 votes |
package com.qubit.pubsub.akka import akka.NotUsed import akka.actor.ActorSystem import akka.stream.scaladsl.{Keep, Sink} import akka.stream.testkit.scaladsl.TestSource import akka.stream.{ActorMaterializer, Attributes, Graph, SinkShape} import com.google.common.base.Charsets import com.qubit.pubsub.PubSubIntegrationTest import com.qubit.pubsub.akka.attributes.{ PubSubClientAttribute, PubSubStageBufferSizeAttribute } import com.qubit.pubsub.client.PubSubMessage import org.scalatest.{BeforeAndAfterAll, FunSuite, Matchers} import scala.concurrent.Await import scala.concurrent.duration._ import scala.util.Try class PubSubSinkIT extends FunSuite with Matchers with BeforeAndAfterAll with PubSubIntegrationTest { implicit val actorSystem = ActorSystem("pubsub-stream-test") implicit val materializer = ActorMaterializer() override def testName = "pubsubsink" override def beforeAll(): Unit = { Await.ready(client.createTopic(testTopic), timeout) Await .ready(client.createSubscription(testSubscription, testTopic), timeout) } override def afterAll(): Unit = { actorSystem.terminate() Await.ready(client.deleteSubscription(testSubscription), timeout) Await.ready(client.deleteTopic(testTopic), timeout) } test("PubSubSink success") { val sinkGraph: Graph[SinkShape[PubSubMessage], NotUsed] = new PubSubSink(testTopic, 1.second) val sinkAttributes = Attributes( List(PubSubClientAttribute(client), PubSubStageBufferSizeAttribute(30))) val pubsubSink = Sink.fromGraph(sinkGraph).withAttributes(sinkAttributes) val (pub, _) = TestSource .probe[Array[Byte]] .map(PubSubMessage(_)) .toMat(pubsubSink)(Keep.both) .run() Range(0, 100) .map(i => s"xxx$i".getBytes(Charsets.UTF_8)) .foreach(pub.sendNext) pub.sendComplete() // wait for buffers to flush Try(Thread.sleep(1000)) val output = Await.result(client.pull(testSubscription, 100), timeout) client.ack(testSubscription, output.map(m => m.ackId)) output should not be (null) output should have size (100) output .map(m => new String(m.payload.payload, Charsets.UTF_8)) .forall(_.startsWith("xxx")) should be(true) } }
Example 177
Source File: PubSubSourceIT.scala From akka-cloudpubsub with Apache License 2.0 | 5 votes |
package com.qubit.pubsub.akka import akka.NotUsed import akka.actor.ActorSystem import akka.stream.scaladsl.Source import akka.stream.testkit.scaladsl.TestSink import akka.stream.{ActorMaterializer, Attributes, Graph, SourceShape} import com.google.common.base.Charsets import com.qubit.pubsub.PubSubIntegrationTest import com.qubit.pubsub.akka.attributes.{ PubSubClientAttribute, PubSubStageBufferSizeAttribute } import com.qubit.pubsub.client.PubSubMessage import org.scalatest.{BeforeAndAfterAll, FunSuite, Matchers} import scala.concurrent.Await import scala.concurrent.duration._ class PubSubSourceIT extends FunSuite with Matchers with BeforeAndAfterAll with PubSubIntegrationTest { implicit val actorSystem = ActorSystem("pubsub-stream-test") implicit val materializer = ActorMaterializer() override def testName = "pubsubsource" override def beforeAll(): Unit = { Await.ready(client.createTopic(testTopic), timeout) Await .ready(client.createSubscription(testSubscription, testTopic), timeout) } override def afterAll(): Unit = { actorSystem.terminate() Await.ready(client.deleteSubscription(testSubscription), timeout) Await.ready(client.deleteTopic(testTopic), timeout) } test("PubSubSource success") { val data = Range(0, 100) .map(i => s"msg$i".getBytes(Charsets.UTF_8)) .map(PubSubMessage(_)) Await.ready(client.publish(testTopic, data), timeout) val sourceGraph: Graph[SourceShape[PubSubMessage], NotUsed] = new PubSubSource(testSubscription, 1.millisecond) val sourceAttributes = Attributes( List(PubSubClientAttribute(client), PubSubStageBufferSizeAttribute(30))) val pubsubSource = Source.fromGraph(sourceGraph).withAttributes(sourceAttributes) val msgList = pubsubSource .runWith(TestSink.probe[PubSubMessage]) .request(100) .expectNextN(100) msgList should not be (null) msgList should have size (100) msgList .map(m => new String(m.payload, Charsets.UTF_8)) .forall(_.startsWith("msg")) should be(true) } }
Example 178
Source File: Example1.scala From tepkin with Apache License 2.0 | 5 votes |
package net.fehmicansaglam.tepkin.examples import akka.stream.ActorMaterializer import akka.stream.scaladsl.Source import akka.util.Timeout import net.fehmicansaglam.bson.BsonDocument import net.fehmicansaglam.bson.BsonDsl._ import net.fehmicansaglam.tepkin.MongoClient import scala.collection.immutable.Iterable import scala.concurrent.Await import scala.concurrent.duration._ object Example1 extends App { val begin = System.currentTimeMillis() // Connect to Mongo client val client = MongoClient("mongodb://localhost") // Use client's execution context for async operations import client.{context, ec} // Obtain reference to database "tepkin" using client val db = client("tepkin") // Obtain reference to the collection "collection1" using database val collection1 = db("collection1") // Obtain reference to the collection "collection2" using database val collection2 = db("collection2") implicit val timeout: Timeout = 30.seconds implicit val mat = ActorMaterializer() // Batch document source def documents(n: Int): Source[List[BsonDocument], akka.NotUsed] = Source { Iterable.tabulate(n) { _ => (1 to 1000).map(i => $document("name" := s"fehmi$i")).toList } } // Insert 3M documents and then read them all. val futureResult = for { delete1 <- collection1.drop() delete2 <- collection2.drop() insert1 <- collection1.insertFromSource(documents(1000)).runForeach(_ => ()) insert2 <- collection2.insertFromSource(documents(2000)).runForeach(_ => ()) source1 = collection1.find(BsonDocument.empty, batchMultiplier = 10000) source2 = collection2.find(BsonDocument.empty, batchMultiplier = 10000) fold1 = source1.runFold(0) { (total, documents) => total + documents.size } fold2 = source2.runFold(0) { (total, documents) => total + documents.size } result1 <- fold1 result2 <- fold2 } yield (result1, result2) val result = Await.result(futureResult, 90.seconds) println(s"collection1: ${result._1}") println(s"collection2: ${result._2}") println(s"Elapsed: ${System.currentTimeMillis() - begin}ms") // Drop created collections Await.ready(collection1.drop(), 10.seconds) Await.ready(collection2.drop(), 10.seconds) client.shutdown() }
Example 179
Source File: SinkExample.scala From tepkin with Apache License 2.0 | 5 votes |
package net.fehmicansaglam.tepkin.examples import akka.stream.ActorMaterializer import akka.stream.scaladsl.Source import net.fehmicansaglam.bson.BsonDsl._ import net.fehmicansaglam.bson.{BsonDocument, Bulk} import net.fehmicansaglam.tepkin.MongoClient import scala.collection.immutable.Iterable object SinkExample extends App { // Connect to Mongo client val client = MongoClient("mongodb://localhost") import client.context // Obtain reference to database "tepkin" using client val db = client("tepkin") // Obtain reference to the collection "collection1" using database val collection1 = db("collection1") // Obtain reference to the collection "collection2" using database val collection2 = db("collection2") implicit val mat = ActorMaterializer() // Batch document source def documents(n: Int): Source[List[BsonDocument], akka.NotUsed] = Source { Iterable.tabulate(n) { _ => (1 to 1000).map(i => $document("name" := s"fehmi$i")).toList } } val ref1 = documents(1000).map(Bulk).runWith(collection1.sink()) val ref2 = documents(2000).map(Bulk).runWith(collection2.sink()) client.shutdown(ref1, ref2) }
Example 180
Source File: MongoDatabaseSpec.scala From tepkin with Apache License 2.0 | 5 votes |
package net.fehmicansaglam.tepkin import akka.stream.ActorMaterializer import akka.util.Timeout import net.fehmicansaglam.bson.BsonDocument import org.scalatest._ import org.scalatest.concurrent.ScalaFutures import scala.concurrent.duration._ class MongoDatabaseSpec extends FlatSpec with Matchers with ScalaFutures with OptionValues with BeforeAndAfter with BeforeAndAfterAll { override implicit val patienceConfig = PatienceConfig(timeout = 30.seconds, interval = 1.seconds) val client = MongoClient("mongodb://localhost") val db = client("tepkin") import client.{context, ec} implicit val timeout: Timeout = 30.seconds override protected def afterAll() = client.shutdown() "A MongoDatabase" should "list collections" in { implicit val mat = ActorMaterializer() val result = for { source <- db.listCollections() collections <- source.runFold(List.empty[BsonDocument])(_ ++ _) } yield collections whenReady(result) { collections => Logger.debug(s"$collections") } } }
Example 181
Source File: WSClientProvider.scala From http-verbs with Apache License 2.0 | 5 votes |
package uk.gov.hmrc.play.connectors import akka.actor.ActorSystem import akka.stream.ActorMaterializer import play.api.libs.ws.WSClient import play.api.libs.ws.ahc.{AhcConfigBuilder, AhcWSClient} trait WSClientProvider { implicit val client: WSClient } trait DefaultWSClientProvider extends WSClientProvider { val builder = new AhcConfigBuilder() val ahcBuilder = builder.configure() val ahcConfig = ahcBuilder.build() implicit val system = ActorSystem() implicit val materializer = ActorMaterializer() implicit val client = new AhcWSClient(ahcConfig) }
Example 182
Source File: Boot.scala From eclair with Apache License 2.0 | 5 votes |
package fr.acinq.eclair import java.io.File import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.stream.{ActorMaterializer, BindFailedException} import fr.acinq.eclair.api.Service import grizzled.slf4j.Logging import kamon.Kamon import scala.concurrent.ExecutionContext import scala.util.{Failure, Success} def startApiServiceIfEnabled(kit: Kit)(implicit system: ActorSystem, ec: ExecutionContext) = { val config = system.settings.config.getConfig("eclair") if(config.getBoolean("api.enabled")){ logger.info(s"json API enabled on port=${config.getInt("api.port")}") implicit val materializer = ActorMaterializer() val apiPassword = config.getString("api.password") match { case "" => throw EmptyAPIPasswordException case valid => valid } val apiRoute = new Service { override val actorSystem = system override val mat = materializer override val password = apiPassword override val eclairApi: Eclair = new EclairImpl(kit) }.route Http().bindAndHandle(apiRoute, config.getString("api.binding-ip"), config.getInt("api.port")).recover { case _: BindFailedException => onError(TCPBindException(config.getInt("api.port"))) } } else { logger.info("json API disabled") } } def onError(t: Throwable): Unit = { val errorMsg = if (t.getMessage != null) t.getMessage else t.getClass.getSimpleName System.err.println(s"fatal error: $errorMsg") logger.error(s"fatal error: $errorMsg", t) System.exit(1) } }
Example 183
Source File: WSClientContext.scala From cluster-broccoli with Apache License 2.0 | 5 votes |
package de.frosner.broccoli.test.contexts import akka.actor.ActorSystem import akka.stream.ActorMaterializer import org.specs2.execute.{AsResult, Result} import org.specs2.specification.ForEach import org.specs2.specification.mutable.ExecutionEnvironment import play.api.libs.ws.WSClient import play.api.libs.ws.ahc.AhcWSClient trait WSClientContext extends ForEach[WSClient] { self: ExecutionEnvironment => override protected def foreach[R: AsResult](f: (WSClient) => R): Result = { implicit val actorSystem = ActorSystem("nomad-http-client") try { implicit val materializer = ActorMaterializer() val client: WSClient = AhcWSClient() try AsResult(f(client)) finally client.close() } finally { actorSystem.terminate() } } }
Example 184
Source File: HttpClient.scala From heimdallr with Apache License 2.0 | 5 votes |
package chat import scala.util.{Failure, Success, Try} import akka.actor._ import akka.http.scaladsl.Http import akka.http.scaladsl.model._ import akka.http.scaladsl.model.headers.RawHeader import akka.stream.ActorMaterializer import akka.util.ByteString import scala.concurrent.{ExecutionContext, Future} import chat.HttpClient._ import UserActor.CustomResponse object HttpClient { case class HttpClientGet(event: String, path : String) case class HttpClientPost(event: String, path : String, token: String, jsonBody: String) case class HttpClientResponseSuccess(event: String, resHttp: HttpResponse, recipient: ActorRef) case class HttpClientResponseFailure(event: String, reason: String, recipient: ActorRef) } class HttpClient()(implicit system: ActorSystem, mat: ActorMaterializer, dispatcher: ExecutionContext) extends Actor with ActorLogging { def pipeToSelf(event: String, future: Future[HttpResponse], recipient: ActorRef): Future[HttpResponse] = { future andThen { case Success(r) => self ! HttpClientResponseSuccess(event, r, recipient) case Failure(f) => self ! HttpClientResponseFailure(event, f.toString, recipient) } } def post(event: String, path: String, token: String, jsonBody: String, recipient: ActorRef) = { val objectEntity = HttpEntity(ContentTypes.`application/json`, jsonBody) val responseFuture: Future[HttpResponse] = Http().singleRequest(HttpRequest( method = HttpMethods.POST, uri = path, entity = objectEntity ).withHeaders( RawHeader("Authorization", "Token " + token) ) ) pipeToSelf(event, responseFuture, recipient) } def get(event: String, path: String, recipient:ActorRef) = { val responseFuture: Future[HttpResponse] = Http().singleRequest(HttpRequest( method = HttpMethods.GET, uri = path ) ) pipeToSelf(event, responseFuture, recipient) } def receive = { case HttpClientGet(event, path) => get(event, path, sender) case HttpClientPost(event, path, token, jsonBody) => post(event, path, token, jsonBody, sender) // connection success case HttpClientResponseSuccess(event, resp, recipient) => resp match { case HttpResponse(StatusCodes.OK, headers, entity, _) => entity.dataBytes.runFold(ByteString(""))(_ ++ _).foreach { body => log.info("Got response, body: " + body.utf8String) recipient ! CustomResponse(event, 200, body.utf8String) } case resp @ HttpResponse(code, _, _, _) => log.info("Request failed, response code: " + code) resp.discardEntityBytes() recipient ! CustomResponse(event, code.intValue(), s"Request failed, response code: $code") } // connection failure case HttpClientResponseFailure(event, resp, recipient) => log.info("Request failed, reason: " + resp) recipient ! CustomResponse(event, 599, s"Request failed, response code: ${resp}") case x => log.info(s"HttpClient Request failed: ${x}") } override def preStart(): Unit = { } override def preRestart(reason: Throwable, message: Option[Any]): Unit = { preStart() } override def postRestart(reason: Throwable): Unit = { log.info( reason.toString ) } override def postStop(): Unit = { } }
Example 185
Source File: AdminService.scala From heimdallr with Apache License 2.0 | 5 votes |
package chat.admin import scala.concurrent.ExecutionContext import akka.actor._ import akka.stream.ActorMaterializer import akka.http.scaladsl.model.{ContentTypes, HttpEntity} import akka.http.scaladsl.server.Directives._ import chat.{HealthyService, WebService} class AdminService(healthy: HealthyService) (implicit system: ActorSystem, mat: ActorMaterializer, dispatcher: ExecutionContext) extends WebService with CommonApi { private var chatSuper: ActorRef = null private val servicePort = 8090 private val serviceRoute= //<- adjustable depended on client url get { pathPrefix("health") { path("up") { healthy.start() httpRespJson( "200 OK" ) } ~ path("down") { healthy.stop() httpRespJson( "200 OK" ) } path("view") { var result: String = "" if(chatSuper != null) { chatSuper ! "akka://heimdallr/user/*" chatSuper ! "akka://heimdallr/user/cs/*" result = "200 OK" } else { result = "ChatSupervisor ActorRef is NULL" } httpRespJson(result) } } } def httpRespJson(body: String) = { complete( HttpEntity(ContentTypes.`application/json`, body+"\r\n") ) } def setChatSupervisorActorRef(actorRef: ActorRef) = { chatSuper = actorRef } def start(): Unit = { log.debug( "Admin Server staring ..." ) serviceBind(this.getClass.getSimpleName, serviceRoute, servicePort) } def stop(): Unit = { serviceUnbind(this.getClass.getSimpleName) } }
Example 186
Source File: HealthyService.scala From heimdallr with Apache License 2.0 | 5 votes |
package chat import akka.actor.ActorSystem import akka.stream.ActorMaterializer import akka.http.scaladsl.server.Directives._ class HealthyService()(implicit system: ActorSystem, mat: ActorMaterializer) extends WebService { private val servicePort = 8099 private val serviceRoute= //<- adjustable depended on client url get { pathEndOrSingleSlash { complete("Welcome to Heimdallr") } } def start(): Unit = { log.info( "Healthy Service staring ..." ) serviceBind(this.getClass.getSimpleName, serviceRoute, servicePort) } def stop(): Unit = { serviceUnbind(this.getClass.getSimpleName) } }
Example 187
Source File: ChatSupervisor.scala From heimdallr with Apache License 2.0 | 5 votes |
package chat import akka.actor._ import akka.actor.SupervisorStrategy._ import scala.concurrent.ExecutionContext import scala.concurrent.duration._ import org.json4s._ import org.json4s.{DefaultFormats, JValue} import java.util.concurrent.TimeUnit import EventConstants._ import akka.stream.ActorMaterializer def createNewChatRoom(number: Int): ActorRef = { var chatroom: ActorRef = null try { //creates new ChatRoomActor and returns as an ActorRef chatroom = context.actorOf(Props(new ChatRoomActor(number, envType)), s"${number}") ChatRooms.chatRooms += number -> chatroom } catch { case e: Exception => log.info(s"FIXME: Create new chat room(${number}) => " + e) self ! CreateChatRoom(number) } chatroom } def removeChatRoom(chatRoomID: Int): Unit = { this.synchronized { ChatRooms.chatRooms.remove(chatRoomID) } } override def receive: Receive = { case CreateChatRoom(chatRoomID) => getChatRoomActorRef(chatRoomID) case RemoveChatRoom(chatRoomID) => removeChatRoom(chatRoomID) case RegChatUser(chatRoomID, userActor) => userActor ! JoinRoom(getChatRoomActorRef(chatRoomID)) case RegProps(props, name) => context.actorOf(props, name) case HeimdallrError => throw new ArithmeticException() case HeimdallrChatStatus => log.info( "Heimdallr ChatSupervisor Running ..." ) // *** supervisor ! "akka://heimdallr/user/{Valid ActorName}" case path: String => log.debug(s"checking path => $path") context.actorSelection(path) ! Identify(path) case ActorIdentity(path, Some(ref)) => log.debug(s"found actor $ref on $path") // *** supervisor ! "/user/{Invalid ActorName}" case ActorIdentity(path, None) => log.debug(s"could not find an actor on $path") case Terminated(user) => log.info("Receive Terminated Event of ChatRoomActor") case x => log.warning("ChatSupervisor Unknown message : " + x) } }
Example 188
Source File: HeimdallrService.scala From heimdallr with Apache License 2.0 | 5 votes |
package chat import scala.concurrent.ExecutionContext import akka.actor._ import akka.stream.ActorMaterializer import com.typesafe.config.ConfigFactory import chat.admin.AdminService object Heimdallr extends App { override def main(args: Array[String]): Unit = { implicit val system = ActorSystem("heimdallr", ConfigFactory.load()) implicit val materializer: ActorMaterializer = ActorMaterializer() implicit val executionContext: ExecutionContext = system.dispatcher val env = argsValidation(system, args) val hs = new HealthyService val as = new AdminService(hs) val ws = new ChatService(env, as) hs.start() as.start() ws.start() } def argsValidation(system: ActorSystem, args: Array[String]) = { val env = args.length match { case 1 => args(0) match { case "live" => args(0) case "standby" => args(0) case "dev" => "development" case "development" => args(0) case _ => bye(system) } case _ => bye(system) } env } def bye(system: ActorSystem) = { println( "\nUsage : ") println( " - sbt \"run [live|standby|development or dev]\"\n") system.terminate() null } }
Example 189
Source File: LeagueProjection.scala From eventsourcing-intro with Apache License 2.0 | 5 votes |
package eu.reactivesystems.league.impl import akka.actor.{Actor, ActorLogging, Props, Status} import akka.pattern.pipe import akka.persistence.cassandra.query.scaladsl.CassandraReadJournal import akka.persistence.query.{EventEnvelope2, PersistenceQuery} import akka.stream.ActorMaterializer import akka.stream.scaladsl._ import com.lightbend.lagom.scaladsl.persistence.jdbc.JdbcSession class LeagueProjection(jdbcSession: JdbcSession) extends Actor with ActorLogging { import DBOperations._ override def receive: Receive = { case Status.Failure(ex) => log.error(ex, "read side generation terminated") context.stop(self) } override def preStart(): Unit = { val materializer = ActorMaterializer.create(context.system) val readJournal = PersistenceQuery .get(context.system) .readJournalFor[CassandraReadJournal](CassandraReadJournal.Identifier) import context.dispatcher val result = getOffset(jdbcSession) .flatMap( offset => readJournal .eventsByTag(LeagueEvent.Tag.tag, offset) .mapAsync(1)(e => projectEvent(e)) .runWith(Sink.ignore)(materializer)) result pipeTo self () } private def projectEvent(event: EventEnvelope2) = event.event match { case ClubRegistered(club) => addClub(jdbcSession, event.offset, club) case GamePlayed(game) => addGame(jdbcSession, event.offset, game) case ResultRevoked(game) => revokeResult(jdbcSession, event.offset, game) } } object LeagueProjection { val readSideId = "leagueProjection" def props(jdbcSession: JdbcSession) = Props(new LeagueProjection(jdbcSession)) }
Example 190
Source File: LeagueServiceImpl.scala From eventsourcing-intro with Apache License 2.0 | 5 votes |
package eu.reactivesystems.league.impl import akka.Done import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.stream.ActorMaterializer import eu.reactivesystems.league.api.{Club, Game, LeagueService} import scala.concurrent.Future import scala.io.StdIn object LeagueServiceImpl extends LeagueService { implicit val system = ActorSystem("league-actorsystem") implicit val materializer = ActorMaterializer() implicit val executionContext = system.dispatcher override def addClub(leagueId: String, club: Club): Future[Done] = // get sharded instance // send message using ask // process result Future.successful(Done) override def addGame(leagueId: String, game: Game): Future[Done] = Future.successful(Done) override def changeGame(leagueId: String, game: Game): Future[Done] = Future.successful(Done) def main(args: Array[String]): Unit = { val port = system.settings.config.getInt("eu.reactivesystems.league.http.port") val bindingFuture = Http().bindAndHandle(routes, "localhost", port) println( s"Server online at http://localhost:$port/\nPress RETURN to stop...") StdIn.readLine() // let it run until user presses return bindingFuture .flatMap(_.unbind()) // trigger unbinding from the port .onComplete(_ => system.terminate()) // and shutdown when done } }
Example 191
Source File: FrontendNodeApp.scala From akka-exchange with Apache License 2.0 | 5 votes |
package com.boldradius.akka_exchange.frontend import akka.http.scaladsl.Http import akka.http.scaladsl.server.Directives._ import akka.stream.ActorMaterializer import com.boldradius.akka_exchange.util.ExchangeNodeBootable object FrontendNodeApp extends ExchangeNodeBootable { import net.ceedubs.ficus.Ficus._ implicit val materializer = ActorMaterializer() val route = path("offers") { get { complete { "Here's some data... or would be if we had data." } } } val httpPort = config.as[Int]("akka-exchange.cluster.frontend.port") // todo - make me based on local address so we don't need special config that locks out multiple http nodes val httpAddr = config.as[String]("akka-exchange.cluster.frontend.address") Http().bindAndHandle(route, httpAddr, httpPort) } // vim: set ts=2 sw=2 sts=2 et:
Example 192
Source File: ServerMain.scala From scastie with Apache License 2.0 | 5 votes |
package com.olegych.scastie.web import com.olegych.scastie.web.routes._ import com.olegych.scastie.web.oauth2._ import com.olegych.scastie.balancer._ import com.olegych.scastie.util.ScastieFileUtil import akka.http.scaladsl._ import server.Directives._ import ch.megard.akka.http.cors.scaladsl.CorsDirectives._ import com.typesafe.config.ConfigFactory import com.typesafe.scalalogging.Logger import akka.actor.{ActorSystem, Props} import akka.stream.ActorMaterializer import scala.concurrent.duration._ import scala.concurrent.Await object ServerMain { def main(args: Array[String]): Unit = { val logger = Logger("ServerMain") val port = if (args.isEmpty) 9000 else args.head.toInt val config2 = ConfigFactory.load().getConfig("akka.remote.netty.tcp") println("akka tcp config") println(config2.getString("hostname")) println(config2.getInt("port")) val config = ConfigFactory.load().getConfig("com.olegych.scastie.web") val production = config.getBoolean("production") if (production) { ScastieFileUtil.writeRunningPid() } implicit val system: ActorSystem = ActorSystem("Web") import system.dispatcher implicit val materializer: ActorMaterializer = ActorMaterializer() val github = new Github val session = new GithubUserSession(system) val userDirectives = new UserDirectives(session) val progressActor = system.actorOf( Props[ProgressActor], name = "ProgressActor" ) val statusActor = system.actorOf( StatusActor.props, name = "StatusActor" ) val dispatchActor = system.actorOf( Props(new DispatchActor(progressActor, statusActor)), name = "DispatchActor" ) val routes = concat( cors()( pathPrefix("api")( concat( new ApiRoutes(dispatchActor, userDirectives).routes, new ProgressRoutes(progressActor).routes, new DownloadRoutes(dispatchActor).routes, new StatusRoutes(statusActor, userDirectives).routes, new ScalaJsRoutes(dispatchActor).routes ) ) ), new OAuth2Routes(github, session).routes, cors()( concat( new ScalaLangRoutes(dispatchActor, userDirectives).routes, new FrontPageRoutes(production).routes ) ) ) Await.result(Http().bindAndHandle(routes, "0.0.0.0", port), 1.seconds) logger.info(s"Scastie started (port: $port)") // scala.io.StdIn.readLine("press enter to stop server") // system.terminate() Await.result(system.whenTerminated, Duration.Inf) () } }
Example 193
Source File: Github.scala From scastie with Apache License 2.0 | 5 votes |
package com.olegych.scastie.web.oauth2 import com.olegych.scastie.web.PlayJsonSupport import akka.http.scaladsl._ import akka.http.scaladsl.model._ import HttpMethods.POST import headers._ import Uri._ import unmarshalling.Unmarshal import akka.actor.ActorSystem import akka.stream.ActorMaterializer import com.olegych.scastie.api.User import scala.concurrent.Future import com.typesafe.config.ConfigFactory import play.api.libs.json.{OFormat, Reads} case class AccessToken(access_token: String) class Github(implicit system: ActorSystem, materializer: ActorMaterializer) extends PlayJsonSupport { import system.dispatcher import play.api.libs.json._ implicit val formatUser: OFormat[User] = Json.format[User] implicit val readAccessToken: Reads[AccessToken] = Json.reads[AccessToken] private val config = ConfigFactory.load().getConfig("com.olegych.scastie.web.oauth2") val clientId: String = config.getString("client-id") private val clientSecret = config.getString("client-secret") private val redirectUri = config.getString("uri") + "/callback" def getUserWithToken(token: String): Future[User] = info(token) def getUserWithOauth2(code: String): Future[User] = { def access = { Http() .singleRequest( HttpRequest( method = POST, uri = Uri("https://github.com/login/oauth/access_token").withQuery( Query( "client_id" -> clientId, "client_secret" -> clientSecret, "code" -> code, "redirect_uri" -> redirectUri ) ), headers = List(Accept(MediaTypes.`application/json`)) ) ) .flatMap( response => Unmarshal(response).to[AccessToken].map(_.access_token) ) } access.flatMap(info) } private def info(token: String): Future[User] = { def fetchGithub(path: Path, query: Query = Query.Empty) = { HttpRequest( uri = Uri(s"https://api.github.com").withPath(path).withQuery(query), headers = List(Authorization(GenericHttpCredentials("token", token))) ) } Http() .singleRequest(fetchGithub(Path.Empty / "user")) .flatMap(response => Unmarshal(response).to[User]) } }
Example 194
Source File: Conseil.scala From Conseil with Apache License 2.0 | 5 votes |
package tech.cryptonomic.conseil.api import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.stream.ActorMaterializer import com.typesafe.scalalogging.LazyLogging import de.heikoseeberger.akkahttpcirce.FailFastCirceSupport import tech.cryptonomic.conseil.api.config.{ConseilAppConfig, ConseilConfiguration} import tech.cryptonomic.conseil.api.util.Retry.retry import tech.cryptonomic.conseil.common.config.Platforms.PlatformsConfiguration import tech.cryptonomic.conseil.common.config._ import scala.concurrent.duration._ import scala.concurrent.{Await, ExecutionContext, ExecutionContextExecutor} import scala.language.postfixOps import scala.util.Failure object Conseil extends App with LazyLogging with ConseilAppConfig with FailFastCirceSupport with ConseilMainOutput { loadApplicationConfiguration(args) match { case Left(errors) => //nothing to do case Right(config) => implicit val system: ActorSystem = ActorSystem("conseil-system") implicit val materializer: ActorMaterializer = ActorMaterializer() implicit val executionContext: ExecutionContextExecutor = system.dispatcher val retries = if (config.failFast.on) Some(0) else None val serverBinding = retry(maxRetry = retries, deadline = Some(config.server.startupDeadline fromNow))(ConseilApi.create(config)).andThen { case Failure(error) => logger.error( "The server was not started correctly, I failed to create the required Metadata service", error ) Await.ready(system.terminate(), 10.seconds) }.flatMap( runServer(_, config.server, config.platforms, config.verbose) ) sys.addShutdownHook { serverBinding .flatMap(_.unbind().andThen { case _ => logger.info("Server stopped...") }) .andThen { case _ => system.terminate() } .onComplete(_ => logger.info("We're done here, nothing else to see")) } } def runServer( api: ConseilApi, server: ConseilConfiguration, platforms: PlatformsConfiguration, verbose: VerboseOutput )(implicit executionContext: ExecutionContext, system: ActorSystem, mat: ActorMaterializer) = { val bindingFuture = Http().bindAndHandle(api.route, server.hostname, server.port) displayInfo(server) if (verbose.on) displayConfiguration(platforms) bindingFuture } }
Example 195
Source File: CrdtsClient.scala From cloudstate with Apache License 2.0 | 5 votes |
package io.cloudstate.samples import akka.actor.ActorSystem import akka.grpc.GrpcClientSettings import akka.stream.scaladsl.{Keep, Sink} import akka.stream.{ActorMaterializer, KillSwitches} import com.example.crdts.crdt_example._ import scala.concurrent.duration._ import scala.concurrent.{Await, Future} class CrdtsClient(hostname: String, port: Int, hostnameOverride: Option[String], sys: ActorSystem) { def this(hostname: String, port: Int, hostnameOverride: Option[String] = None) = this(hostname, port, hostnameOverride, ActorSystem()) private implicit val system = sys private implicit val materializer = ActorMaterializer() import sys.dispatcher val settings = { val s = GrpcClientSettings.connectToServiceAt(hostname, port).withTls(false) hostnameOverride.fold(s)(host => s.withChannelBuilderOverrides(_.overrideAuthority(host))) } println(s"Connecting to $hostname:$port") val service = CrdtExampleClient(settings) def shutdown(): Unit = { await(service.close()) await(system.terminate()) } def await[T](future: Future[T]): T = Await.result(future, 10.seconds) def getGCounter(id: String) = await(service.getGCounter(Get(id))).value def incrementGCounter(id: String, value: Long) = await(service.incrementGCounter(UpdateCounter(id, value))).value def getPNCounter(id: String) = await(service.getPNCounter(Get(id))).value def updatePNCounter(id: String, value: Long) = await(service.updatePNCounter(UpdateCounter(id, value))).value def getGSet(id: String) = await(service.getGSet(Get(id))).items def mutateGSet(id: String, values: Seq[SomeValue]) = await(service.mutateGSet(MutateSet(add = values))).size def getORSet(id: String) = await(service.getORSet(Get(id))).items def mutateORSet(id: String, add: Seq[SomeValue] = Nil, remove: Seq[SomeValue] = Nil, clear: Boolean = false) = await(service.mutateORSet(MutateSet(key = id, add = add, remove = remove, clear = clear))).size def connect(id: String) = service.connect(User(id)).viaMat(KillSwitches.single)(Keep.right).to(Sink.ignore).run() def monitor(monitorId: String, id: String) = service .monitor(User(id)) .viaMat(KillSwitches.single)(Keep.right) .to( Sink.foreach( status => println( s"Monitor $monitorId saw user $id go " + (if (status.online) "online" else "offline") ) ) ) .run() }
Example 196
Source File: ShoppingCartClient.scala From cloudstate with Apache License 2.0 | 5 votes |
package io.cloudstate.samples import akka.actor.ActorSystem import akka.grpc.GrpcClientSettings import akka.stream.ActorMaterializer import com.example.shoppingcart.shoppingcart._ import scala.concurrent.duration._ import scala.concurrent.{Await, Future} import scala.util.control.NonFatal object ShoppingCartClient { def main(args: Array[String]): Unit = { val client = new ShoppingCartClient("localhost", 9000, None) val userId = "viktor" val productId = "1337" val productName = "h4x0r" try { println(client.getCart(userId)) for (_ <- 1 to 8) { client.addItem(userId, productId, productName, 1) } println(client.getCart(userId)) client.removeItem(userId, productId) println(client.getCart(userId)) } catch { case NonFatal(e) => e.printStackTrace() } finally { try { client.shutdown() } finally { System.exit(0) } } } } class ShoppingCartClient(hostname: String, port: Int, hostnameOverride: Option[String], sys: ActorSystem) { def this(hostname: String, port: Int, hostnameOverride: Option[String] = None) = this(hostname, port, hostnameOverride, ActorSystem()) private implicit val system = sys private implicit val materializer = ActorMaterializer() import sys.dispatcher val settings = { val s = GrpcClientSettings.connectToServiceAt(hostname, port).withTls(false) hostnameOverride.fold(s)(host => s.withChannelBuilderOverrides(_.overrideAuthority(host))) } println(s"Connecting to $hostname:$port") val service = com.example.shoppingcart.shoppingcart.ShoppingCartClient(settings) def shutdown(): Unit = { await(service.close()) await(system.terminate()) } def await[T](future: Future[T]): T = Await.result(future, 10.seconds) def getCart(userId: String) = await(service.getCart(GetShoppingCart(userId))) def addItem(userId: String, productId: String, name: String, quantity: Int) = await(service.addItem(AddLineItem(userId, productId, name, quantity))) def removeItem(userId: String, productId: String) = await(service.removeItem(RemoveLineItem(userId, productId))) }
Example 197
Source File: Main.scala From quiz-management-service with Apache License 2.0 | 5 votes |
package com.danielasfregola.quiz.management import scala.concurrent.duration._ import akka.actor._ import akka.http.scaladsl.Http import akka.stream.ActorMaterializer import akka.util.Timeout import com.typesafe.config.ConfigFactory object Main extends App with RestInterface { val config = ConfigFactory.load() val host = config.getString("http.host") val port = config.getInt("http.port") implicit val system = ActorSystem("quiz-management-service") implicit val materializer = ActorMaterializer() implicit val executionContext = system.dispatcher implicit val timeout = Timeout(10 seconds) val api = routes Http().bindAndHandle(handler = api, interface = host, port = port) map { binding => println(s"REST interface bound to ${binding.localAddress}") } recover { case ex => println(s"REST interface could not bind to $host:$port", ex.getMessage) } }
Example 198
Source File: DownloadParentPoms.scala From scaladex with BSD 3-Clause "New" or "Revised" License | 5 votes |
package ch.epfl.scala.index package data package maven import java.nio.file.{Files, Path} import akka.actor.ActorSystem import akka.stream.ActorMaterializer import ch.epfl.scala.index.data.download.PlayWsDownloader import org.slf4j.LoggerFactory import play.api.libs.ws.{WSClient, WSRequest, WSResponse} import scala.util.Failure class DownloadParentPoms(repository: LocalPomRepository, paths: DataPaths, tmp: Option[Path] = None)( implicit val system: ActorSystem, implicit val materializer: ActorMaterializer ) extends PlayWsDownloader { private val log = LoggerFactory.getLogger(getClass) assert( repository == LocalPomRepository.MavenCentral || repository == LocalPomRepository.Bintray ) val parentPomsPath = paths.parentPoms(repository) val pomReader = tmp match { case Some(path) => PomsReader.tmp(paths, path) case None => PomsReader(repository, paths) } val parentPomsToDownload: Set[Dependency] = pomReader .load() .collect { case Failure(m: MissingParentPom) => m.dep } .toSet log.debug(s"to download: ${parentPomsToDownload.size}") log.debug(s"last failed: $lastFailedToDownload") if (parentPomsToDownload.size > lastFailedToDownload) { val downloaded = download[Dependency, Int]("Download parent POMs", parentPomsToDownload, downloadRequest, processResponse, parallelism = 32) val failedDownloads = downloaded.sum log.warn(s"failed downloads: $failedDownloads") if (0 < failedDownloads && parentPomsToDownload.size != failedDownloads) { run(failedDownloads) // grand-parent poms, etc } } } }
Example 199
Source File: BintrayDownloadPoms.scala From scaladex with BSD 3-Clause "New" or "Revised" License | 5 votes |
package ch.epfl.scala.index.data package bintray import download.PlayWsDownloader import java.nio.charset.StandardCharsets import java.nio.file.{Files, Path} import play.api.libs.ws.{WSClient, WSRequest, WSResponse} import play.api.libs.ws.ahc.AhcWSClient import akka.actor.ActorSystem import akka.stream.ActorMaterializer import org.slf4j.LoggerFactory class BintrayDownloadPoms(paths: DataPaths)( implicit val system: ActorSystem, implicit val materializer: ActorMaterializer ) extends PlayWsDownloader { private val log = LoggerFactory.getLogger(getClass) private val bintrayPomBase = paths.poms(LocalPomRepository.Bintray) def run(): Unit = { download[BintraySearch, Unit]("Downloading POMs", searchesBySha1, downloadRequest, processPomDownload, parallelism = 32) () } }
Example 200
Source File: PublishActor.scala From scaladex with BSD 3-Clause "New" or "Revised" License | 5 votes |
package ch.epfl.scala.index package server package routes package api package impl import data.DataPaths import akka.actor.{Actor, ActorSystem} import akka.stream.ActorMaterializer import ch.epfl.scala.index.search.DataRepository import scala.concurrent.Await import scala.concurrent.duration._ class PublishActor(paths: DataPaths, dataRepository: DataRepository, implicit val system: ActorSystem, implicit val materializer: ActorMaterializer) extends Actor { private val publishProcess = new impl.PublishProcess(paths, dataRepository) def receive = { case publishData: PublishData => { // TODO be non-blocking, by stashing incoming messages until // the publish process has completed sender ! Await.result(publishProcess.writeFiles(publishData), 1.minute) } } }