akka.stream.scaladsl.Sink Scala Examples

The following examples show how to use akka.stream.scaladsl.Sink. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: EvalFlowSuite.scala    From iep-apps   with Apache License 2.0 5 votes vote down vote up
package com.netflix.atlas.stream

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.Sink
import akka.stream.scaladsl.Source
import com.netflix.atlas.akka.DiagnosticMessage
import com.netflix.atlas.eval.stream.Evaluator.DataSource
import com.netflix.atlas.eval.stream.Evaluator.DataSources
import com.netflix.atlas.eval.stream.Evaluator.MessageEnvelope
import com.netflix.spectator.api.NoopRegistry
import com.typesafe.config.ConfigFactory
import org.scalatest.funsuite.AnyFunSuite

import scala.concurrent.Await
import scala.concurrent.duration.Duration

class EvalFlowSuite extends AnyFunSuite {
  private implicit val system = ActorSystem(getClass.getSimpleName)
  private implicit val mat = ActorMaterializer()
  private val config = ConfigFactory.load
  private val registry = new NoopRegistry()
  private val validateNoop: DataSource => Unit = _ => ()

  private val dataSourceStr =
    """[{"id":"abc", "step": 10, "uri":"http://local-dev/api/v1/graph?q=name,a,:eq"}]"""

  test("register and get message") {

    val evalService = new EvalService(config, registry, null, system) {
      override def updateDataSources(streamId: String, dataSources: DataSources): Unit = {
        val handler = getStreamInfo(streamId).handler
        handler.offer(new MessageEnvelope("mockId", DiagnosticMessage.info("mockMsg")))
        handler.complete()
      }
    }

    val evalFlow = EvalFlow.createEvalFlow(evalService, DataSourceValidator(10, validateNoop))

    Source.single(dataSourceStr).via(evalFlow)
    val future = Source
      .single(dataSourceStr)
      .via(evalFlow)
      .filter(envelope => envelope.getId != "_") //filter out heartbeat
      .runWith(Sink.head)
    val messageEnvelope = Await.result(future, Duration.Inf)

    assert(messageEnvelope.getId === "mockId")
  }
} 
Example 2
Source File: UserProjection.scala    From whirlwind-tour-akka-typed   with Apache License 2.0 5 votes vote down vote up
package de.heikoseeberger.wtat

import akka.actor.Scheduler
import akka.actor.typed.{ ActorRef, Behavior }
import akka.actor.typed.scaladsl.Actor
import akka.actor.typed.scaladsl.AskPattern.Askable
import akka.cluster.Cluster
import akka.cluster.ddata.{ ORSet, ORSetKey }
import akka.cluster.ddata.Replicator.WriteLocal
import akka.cluster.ddata.typed.scaladsl.{ DistributedData, Replicator }
import akka.persistence.query.EventEnvelope
import akka.persistence.query.scaladsl.EventsByPersistenceIdQuery
import akka.stream.Materializer
import akka.stream.scaladsl.Sink
import akka.util.Timeout
import cats.instances.string._
import cats.syntax.eq._
import org.apache.logging.log4j.scala.Logging
import scala.concurrent.duration.FiniteDuration

object UserProjection extends Logging {
  import akka.actor.typed.scaladsl.adapter._

  sealed trait Command
  final case object Stop                              extends Command
  private final case object HandleEventStreamComplete extends Command

  abstract class EventStreamCompleteException
      extends IllegalStateException("Event stream completed unexpectedly!")
  private final case object EventStreamCompleteException extends EventStreamCompleteException

  final val Name = "user-projection"

  final val usersKey: ORSetKey[User] =
    ORSetKey("users")

  def apply(readJournal: EventsByPersistenceIdQuery,
            userView: ActorRef[UserView.Command],
            askTimeout: FiniteDuration)(implicit mat: Materializer): Behavior[Command] =
    Actor.deferred { context =>
      implicit val c: Cluster   = Cluster(context.system.toUntyped)
      implicit val s: Scheduler = context.system.scheduler
      implicit val t: Timeout   = askTimeout
      val replicator            = DistributedData(context.system).replicator
      val self                  = context.self

      readJournal
        .eventsByPersistenceId(UserRepository.Name, 0, Long.MaxValue)
        .collect { case EventEnvelope(_, _, _, event: UserRepository.Event) => event }
        .mapAsync(1) {
          case UserRepository.UserAdded(user) =>
            replicator ? Replicator.Update(usersKey, ORSet.empty[User], WriteLocal)(_ + user)

          case UserRepository.UserRemoved(username) =>
            replicator ? Replicator.Update(usersKey, ORSet.empty[User], WriteLocal) { users =>
              users.elements.find(_.username.value === username).fold(users)(users - _)
            }
        }
        .runWith(Sink.onComplete(_ => self ! HandleEventStreamComplete))
      logger.debug("Running event stream")

      Actor.immutable {
        case (_, Stop)                      => Actor.stopped
        case (_, HandleEventStreamComplete) => throw EventStreamCompleteException
      }
    }
} 
Example 3
Source File: SQLQuerySpec.scala    From scruid   with Apache License 2.0 5 votes vote down vote up
package ing.wbaa.druid

import java.time.{ LocalDateTime, ZonedDateTime }

import akka.stream.scaladsl.Sink
import ing.wbaa.druid.SQL._
import ing.wbaa.druid.client.CirceDecoders
import io.circe.generic.auto._
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.matchers.should.Matchers
import org.scalatest.time.{ Millis, Seconds, Span }
import org.scalatest.wordspec.AnyWordSpec

class SQLQuerySpec extends AnyWordSpec with Matchers with ScalaFutures with CirceDecoders {
  implicit override val patienceConfig =
    PatienceConfig(timeout = Span(20, Seconds), interval = Span(5, Millis))
  private val totalNumberOfEntries  = 39244
  private val usOnlyNumberOfEntries = 528

  implicit val config = DruidConfig()
  implicit val mat    = config.client.actorMaterializer

  case class Result(hourTime: ZonedDateTime, count: Int)

  "SQL query" should {

    val sqlQuery: SQLQuery = dsql"""
      |SELECT FLOOR(__time to HOUR) AS hourTime, count(*) AS "count"
      |FROM wikipedia
      |WHERE "__time" BETWEEN TIMESTAMP '2015-09-12 00:00:00' AND TIMESTAMP '2015-09-13 00:00:00'
      |GROUP BY 1
      |""".stripMargin

    "successfully be interpreted by Druid" in {
      val resultsF = sqlQuery.execute()
      whenReady(resultsF) { response =>
        response.list[Result].map(_.count).sum shouldBe totalNumberOfEntries
      }
    }

    "support streaming" in {
      val resultsF = sqlQuery.streamAs[Result]().runWith(Sink.seq)

      whenReady(resultsF) { results =>
        results.map(_.count).sum shouldBe totalNumberOfEntries
      }
    }
  }

  "SQL parameterized query" should {

    val fromDateTime   = LocalDateTime.of(2015, 9, 12, 0, 0, 0, 0)
    val untilDateTime  = fromDateTime.plusDays(1)
    val countryIsoCode = "US"

    val sqlQuery: SQLQuery =
      dsql"""
      |SELECT FLOOR(__time to HOUR) AS hourTime, count(*) AS "count"
      |FROM wikipedia
      |WHERE "__time" BETWEEN ${fromDateTime} AND ${untilDateTime} AND countryIsoCode = ${countryIsoCode}
      |GROUP BY 1
      |""".stripMargin

    "be expressed as a parameterized query with three parameters" in {
      sqlQuery.query.count(_ == '?') shouldBe 3
      sqlQuery.parameters.size shouldBe 3

      sqlQuery.parameters(0) shouldBe SQLQueryParameter(SQLQueryParameterType.Timestamp,
                                                        "2015-09-12 00:00:00")
      sqlQuery.parameters(1) shouldBe SQLQueryParameter(SQLQueryParameterType.Timestamp,
                                                        "2015-09-13 00:00:00")
      sqlQuery.parameters(2) shouldBe SQLQueryParameter(SQLQueryParameterType.Varchar, "US")
    }

    "successfully be interpreted by Druid" in {
      val resultsF = sqlQuery.execute()
      whenReady(resultsF) { response =>
        response.list[Result].map(_.count).sum shouldBe usOnlyNumberOfEntries
      }
    }

    "support streaming" in {
      val resultsF = sqlQuery.streamAs[Result]().runWith(Sink.seq)

      whenReady(resultsF) { results =>
        results.map(_.count).sum shouldBe usOnlyNumberOfEntries
      }

    }

  }
} 
Example 4
Source File: ExperimentVariantEventTest.scala    From izanami   with Apache License 2.0 5 votes vote down vote up
package domains.abtesting
import java.time.LocalDateTime
import java.time.temporal.ChronoUnit

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.scaladsl.{Flow, Sink, Source}
import domains.Key
import domains.abtesting.events._
import org.scalatest.concurrent.{IntegrationPatience, ScalaFutures}
import test.IzanamiSpec

class ExperimentVariantEventTest extends IzanamiSpec with ScalaFutures with IntegrationPatience {

  "ExperimentVariantEvent" must {
    "aggregate event" in {

      implicit val system: ActorSystem = ActorSystem()

      val variantId = "vId"
      val variant   = Variant(variantId, "None", None, Traffic(0), None)
      val flow: Flow[ExperimentVariantEvent, VariantResult, NotUsed] =
        ExperimentVariantEvent.eventAggregation("experiment.id", 1, ChronoUnit.HOURS)

      val firstDate = LocalDateTime.now().minus(5, ChronoUnit.HOURS)

      val experimentKey = Key(s"experiment:id")
      def experimentVariantEventKey(counter: Int): ExperimentVariantEventKey =
        ExperimentVariantEventKey(experimentKey, variantId, s"client:id:$counter", "namespace", s"$counter")
      def clientId(i: Int): String    = s"client:id:$i"
      def date(i: Int): LocalDateTime = firstDate.plus(15 * i, ChronoUnit.MINUTES)

      val source = (1 to 20)
        .flatMap { counter =>
          val d   = date(counter)
          val key = experimentVariantEventKey(counter)

          counter match {
            case i if i % 2 > 0 =>
              List(ExperimentVariantDisplayed(key, experimentKey, clientId(i), variant, d, 0, variantId))
            case i =>
              List(
                ExperimentVariantDisplayed(key, experimentKey, clientId(i), variant, d, 0, variantId),
                ExperimentVariantWon(key, experimentKey, clientId(i), variant, d, 0, variantId)
              )
          }
        }

      val expectedEvents = Seq(
        ExperimentResultEvent(experimentKey, variant, date(1), 0.0, "vId"),
        ExperimentResultEvent(experimentKey, variant, date(5), 40.0, "vId"),
        ExperimentResultEvent(experimentKey, variant, date(9), 44.44444444444444, "vId"),
        ExperimentResultEvent(experimentKey, variant, date(13), 46.15384615384615, "vId"),
        ExperimentResultEvent(experimentKey, variant, date(17), 47.05882352941177, "vId")
      )

      val evts      = Source(source).via(flow).runWith(Sink.seq).futureValue
      val allEvents = evts.flatMap(_.events)

      allEvents must be(expectedEvents)
    }
  }

} 
Example 5
Source File: SearchController.scala    From izanami   with Apache License 2.0 5 votes vote down vote up
package controllers

import akka.actor.ActorSystem
import akka.stream.scaladsl.{GraphDSL, Interleave, Sink, Source}
import akka.stream.{ActorMaterializer, SourceShape}
import controllers.actions.SecuredAuthContext
import domains.abtesting.ExperimentService
import domains.config.ConfigService
import domains.feature.FeatureService
import domains.script.GlobalScriptService
import domains.configuration.GlobalContext
import play.api.libs.json.{JsArray, JsValue, Json}
import play.api.mvc.{AbstractController, ActionBuilder, AnyContent, ControllerComponents}
import store.Query
import zio.{Runtime, ZIO}
import libs.http.HttpContext

class SearchController(AuthAction: ActionBuilder[SecuredAuthContext, AnyContent], cc: ControllerComponents)(
    implicit system: ActorSystem,
    R: HttpContext[GlobalContext]
) extends AbstractController(cc) {

  import libs.http._

  def search(pattern: String, features: Boolean, configs: Boolean, experiments: Boolean, scripts: Boolean) =
    AuthAction.asyncTask[GlobalContext] { ctx =>
      val query: Query = Query.oneOf(ctx.authorizedPatterns).and(pattern.split(",").toList)

      for {
        featuresRes <- if (features)
                        FeatureService
                          .findByQuery(query, 1, 10)
                          .map(_.results.map(value => Json.obj("type" -> "features", "id" -> Json.toJson(value.id))))
                          .map(value => Source(value.toList))
                      else ZIO.succeed(Source.empty[JsValue])

        configsRes <- if (configs)
                       ConfigService
                         .findByQuery(query, 1, 10)
                         .map(
                           _.results.map(value => Json.obj("type" -> "configurations", "id" -> Json.toJson(value.id)))
                         )
                         .map(value => Source(value.toList))
                     else ZIO.succeed(Source.empty[JsValue])

        experimentsRes <- if (experiments)
                           ExperimentService
                             .findByQuery(query, 1, 10)
                             .map(
                               _.results.map(value => Json.obj("type" -> "experiments", "id" -> Json.toJson(value.id)))
                             )
                             .map(value => Source(value.toList))
                         else ZIO.succeed(Source.empty[JsValue])

        scriptsRes <- if (scripts)
                       GlobalScriptService
                         .findByQuery(query, 1, 10)
                         .map(_.results.map(value => Json.obj("type" -> "scripts", "id" -> Json.toJson(value.id))))
                         .map(value => Source(value.toList))
                     else ZIO.succeed(Source.empty[JsValue])

        res <- ZIO.fromFuture { implicit ec =>
                val all = Source.fromGraph(GraphDSL.create() { implicit builder =>
                  import GraphDSL.Implicits._

                  val interleave = builder.add(Interleave[JsValue](4, 1))
                  featuresRes ~> interleave.in(0)
                  configsRes ~> interleave.in(1)
                  experimentsRes ~> interleave.in(2)
                  scriptsRes ~> interleave.in(3)

                  SourceShape(interleave.out)
                })
                all.take(10).runWith(Sink.seq) map { jsons =>
                  Ok(JsArray(jsons))
                }
              }
      } yield res
    }

} 
Example 6
Source File: InitIza.scala    From izanami   with Apache License 2.0 5 votes vote down vote up
package experiments

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import akka.http.scaladsl.model.headers.RawHeader
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Sink, Source}
import akka.util.ByteString

import scala.collection.immutable
import scala.concurrent.{Future}

object InitIza extends App {

  implicit val system: ActorSystem             = ActorSystem()
  implicit val materializer: ActorMaterializer = ActorMaterializer()
  import system.dispatcher

  private val http = Http()

  private val features = "http://localhost:9000/api/features"
  //private val features = "http://izanami-perfs.cleverapps.io/api/features"

  Source(0 to 2000)
    .mapAsyncUnordered(10) { postFeature }
    .alsoTo(Sink.foreach {
      case (c, s) if c == StatusCodes.Created =>
      case (c, s) =>
        println(s"Oups $c $s")
    })
    .runWith(Sink.ignore)
    .onComplete { _ =>
      println("Done")
    }

  private def postFeature(i: Int): Future[(StatusCode, String)] = {

    val headers: immutable.Seq[HttpHeader] = immutable.Seq(
      RawHeader("Izanami-Client-Id", "xxxx"),
      RawHeader("Izanami-Client-Secret", "xxxx")
    )

    val body =
      s"""
        | {
        |   "id": "a:key:$i",
        |   "enabled": true,
        |   "activationStrategy": "NO_STRATEGY"
        | }
      """.stripMargin

    http
      .singleRequest(
        HttpRequest(
          HttpMethods.POST,
          Uri(features),
          headers = headers,
          entity = HttpEntity.Strict(ContentTypes.`application/json`, ByteString(body))
        )
      )
      .flatMap {
        case HttpResponse(code, _, entity, _) =>
          entity.dataBytes.map(_.utf8String).runFold("")((str, acc) => str + acc).map(s => (code, s))
      }
  }

} 
Example 7
Source File: AkkaStreamOps.scala    From phobos   with Apache License 2.0 5 votes vote down vote up
package ru.tinkoff.phobos.ops

import akka.NotUsed
import akka.stream.scaladsl.{Flow, Keep, Sink}
import javax.xml.stream.XMLStreamConstants
import ru.tinkoff.phobos.decoding._
import scala.concurrent.Future

private[phobos] trait AkkaStreamOps {

  
  def decodingFlowUnsafe[A: XmlDecoder](charset: String = "UTF-8"): Flow[Array[Byte], A, NotUsed] =
    decodingFlow(charset).map(_.fold(throw _, identity))

  def decodingSink[A: XmlDecoder](charset: String = "UTF-8"): Sink[Array[Byte], Future[Either[DecodingError, A]]] =
    decodingFlow(charset).toMat(Sink.head)(Keep.right)

  def decodingSinkUnsafe[A: XmlDecoder](charset: String = "UTF-8"): Sink[Array[Byte], Future[A]] =
    decodingFlowUnsafe(charset).toMat(Sink.head)(Keep.right)
}

private[phobos] case class SinkDecoderState[A](
    xmlStreamReader: XmlStreamReader,
    cursor: Cursor,
    elementDecoder: ElementDecoder[A]
) {
  def withEncoder(that: ElementDecoder[A]): SinkDecoderState[A] = copy(elementDecoder = that)
}

private[phobos] object SinkDecoderState {

  def initial[A](xmlDecoder: XmlDecoder[A], charset: String): SinkDecoderState[A] = {
    val sr: XmlStreamReader = XmlDecoder.createStreamReader(charset)
    val cursor              = new Cursor(sr)
    SinkDecoderState(
      xmlStreamReader = sr,
      cursor = cursor,
      elementDecoder = xmlDecoder.elementdecoder
    )
  }
} 
Example 8
Source File: UseCase.scala    From Fast-Data-Processing-Systems-with-SMACK-Stack   with MIT License 5 votes vote down vote up
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Sink, Source}
import com.softwaremill.react.kafka.KafkaMessages._
import org.apache.kafka.common.serialization.{StringSerializer, StringDeserializer}
import com.softwaremill.react.kafka.{ProducerMessage, ConsumerProperties, ProducerProperties, ReactiveKafka}
import org.reactivestreams.{ Publisher, Subscriber }

implicit val actorSystem = ActorSystem("ReactiveKafka")
implicit val materializer = ActorMaterializer()

val kafka = new ReactiveKafka()
val publisher: Publisher[StringConsumerRecord] = kafka.consume(ConsumerProperties(
 bootstrapServers = "localhost:9092",
 topic = "lowercaseStrings",
 groupId = "groupName",
 valueDeserializer = new StringDeserializer()
))

val subscriber: Subscriber[StringProducerMessage] = kafka.publish(ProducerProperties(
  bootstrapServers = "localhost:9092",
  topic = "uppercaseStrings",
  valueSerializer = new StringSerializer()
))

Source.fromPublisher(publisher).map(m => ProducerMessage(m.value().toUpperCase))
  .to(Sink.fromSubscriber(subscriber)).run() 
Example 9
Source File: Client.scala    From twitter4s   with Apache License 2.0 5 votes vote down vote up
package com.danielasfregola.twitter4s.http.clients

import akka.actor.ActorSystem
import akka.http.scaladsl.model.{HttpRequest, HttpResponse}
import akka.stream.Materializer
import akka.stream.scaladsl.{Sink, Source}
import com.danielasfregola.twitter4s.http.oauth.OAuth1Provider

import scala.concurrent.Future

trait Client extends OAuthClient {

  val withLogRequest = false
  val withLogRequestResponse = true

  def oauthProvider: OAuth1Provider

  protected def sendAndReceive[T](request: HttpRequest, f: HttpResponse => Future[T])(
      implicit system: ActorSystem,
      materializer: Materializer): Future[T] = {
    implicit val r: HttpRequest = request
    val requestStartTime = System.currentTimeMillis

    if (withLogRequest) logRequest

    Source
      .single(request)
      .via(connection)
      .mapAsync(1)(implicit response => unmarshal(requestStartTime, f))
      .runWith(Sink.head)
  }

} 
Example 10
Source File: TFServingModelServer.scala    From model-serving-tutorial   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.modelserving.tensorflowserving

import akka.actor.typed.scaladsl.Behaviors
import akka.actor.typed.scaladsl.adapter._
import akka.actor.typed.{ActorRef, ActorSystem}
import akka.http.scaladsl.Http
import akka.kafka.scaladsl.Consumer
import akka.kafka.{ConsumerSettings, Subscriptions}
import akka.stream.scaladsl.Sink
import akka.stream.typed.scaladsl.{ActorFlow, ActorMaterializer}
import akka.util.Timeout
import com.lightbend.modelserving.configuration.ModelServingConfiguration
import com.lightbend.modelserving.model.ServingResult
import com.lightbend.modelserving.winemodel.DataRecord
import org.apache.kafka.clients.consumer.ConsumerConfig
import org.apache.kafka.common.serialization.ByteArrayDeserializer

import scala.concurrent.duration._
import scala.util.Success


object TFServingModelServer {

  import ModelServingConfiguration._

  // Initialization

  implicit val modelServer = ActorSystem(
    Behaviors.setup[TFModelServerActor](
      context => new TFModelServerBehaviour(context)), "ModelServing")

  implicit val materializer = ActorMaterializer()
  implicit val executionContext = modelServer.executionContext
  implicit val askTimeout = Timeout(30.seconds)

  // Configuration properties for the Kafka topic.
  val dataSettings = ConsumerSettings(modelServer.toUntyped, new ByteArrayDeserializer, new ByteArrayDeserializer)
    .withBootstrapServers(KAFKA_BROKER)
    .withGroupId(DATA_GROUP)
    .withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")

  def main(args: Array[String]): Unit = {

    println(s"Akka application that uses TensorFlow Serving, brokers $KAFKA_BROKER")

    // Data stream processing
    Consumer.atMostOnceSource(dataSettings, Subscriptions.topics(DATA_TOPIC))
      .map(record => DataRecord.wineFromByteArray(record.value)).collect { case Success(a) => a }
      .via(ActorFlow.ask(1)(modelServer)((elem, replyTo : ActorRef[Option[ServingResult[Double]]]) => new ServeData(replyTo, elem)))
      .collect{ case Some(result) => result}
      .runWith(Sink.foreach(result =>
        println(s"Model served in ${System.currentTimeMillis() - result.submissionTs} ms, with result ${result.result} " +
          s"(model ${result.name}, data type ${result.dataType})")))
    // Rest Server
    startRest(modelServer)
  }

  def startRest(modelServerManager: ActorSystem[TFModelServerActor]): Unit = {

    implicit val timeout = Timeout(10.seconds)
    implicit val system = modelServerManager.toUntyped

    val host = "0.0.0.0"
    val port = MODELSERVING_PORT
    val routes = TFQueriesAkkaHttpResource.storeRoutes(modelServerManager)(modelServerManager.scheduler)

    val _ = Http().bindAndHandle(routes, host, port) map
      { binding =>
        println(s"Starting models observer on port ${binding.localAddress}") } recover {
      case ex =>
        println(s"Models observer could not bind to $host:$port - ${ex.getMessage}")
    }
  }
} 
Example 11
Source File: SocketWordCountTest.scala    From apache-spark-test   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend.spark.sstreaming

import akka.Done
import akka.stream.scaladsl.Tcp._
import akka.stream.scaladsl.{ Flow, Sink, Source, Tcp }
import akka.util.ByteString
import com.github.dnvriend.TestSpec
import org.scalatest.Ignore

import scala.collection.immutable._
import scala.concurrent.Future
import scala.concurrent.duration._

@Ignore
class SocketWordCountTest extends TestSpec {
  def withSocketServer(xs: Seq[String])(f: Future[Done] => Unit): Unit = {
    val connections: Source[IncomingConnection, Future[ServerBinding]] = Tcp().bind("127.0.0.1", 9999)
    val socketServer = connections.runForeach { connection =>
      println(s"New connection from: ${connection.remoteAddress}")
      val src = Source.cycle(() => xs.iterator).map(txt => ByteString(txt) ++ ByteString("\n"))
        .flatMapConcat(msg => Source.tick(0.seconds, 200.millis, msg))
      val echo = Flow.fromSinkAndSource(Sink.ignore, src)
      connection.handleWith(echo)
    }
    f(socketServer)
  }

  it should "a running word count of text data received via a TCP server" in withSparkSession { spark =>
    withSocketServer(List("apache spark")) { socketServer =>
      import spark.implicits._

      val lines = spark.readStream
        .format("socket")
        .option("host", "localhost")
        .option("port", 9999)
        .load()

      // Split the lines into words
      val words = lines.as[String].flatMap(_.split(" "))

      // Generate running word count
      val wordCounts = words.groupBy("value").count()

      // Start running the query that prints the running counts to the console
      val query = wordCounts.writeStream
        .outputMode("complete")
        .format("console")
        .start()

      query.awaitTermination(10.seconds)
    }
  }
} 
Example 12
Source File: LogProgress.scala    From apache-spark-test   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend

import akka.NotUsed
import akka.event.LoggingAdapter
import akka.stream.FlowShape
import akka.stream.scaladsl.{ Broadcast, Flow, GraphDSL, Sink }

import scala.compat.Platform
import scala.collection.immutable._

object LogProgress {
  def flow[A](each: Long = 1000)(implicit log: LoggingAdapter = null): Flow[A, A, NotUsed] = Flow.fromGraph[A, A, NotUsed](GraphDSL.create() { implicit b =>
    import GraphDSL.Implicits._
    val logFlow = Flow[A].statefulMapConcat { () =>
      var last = Platform.currentTime
      var num = 0L
      (x: A) =>
        num += 1
        if (num % each == 0) {
          val duration = Platform.currentTime - last
          val logOpt = Option(log)
          Option(log).foreach(_.info("[{} ms / {}]: {}", duration, each, num))
          if (logOpt.isEmpty) println(s"[$duration ms / $each]: $num")
          last = Platform.currentTime
        }
        Iterable(x)
    }
    val bcast = b.add(Broadcast[A](2, eagerCancel = false))
    bcast ~> logFlow ~> Sink.ignore
    FlowShape.of(bcast.in, bcast.out(1))
  })
} 
Example 13
Source File: ReadJournalSource.scala    From apache-spark-test   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.jdbc.spark.sql.execution.streaming

import akka.actor.{ ActorSystem, ExtendedActorSystem }
import akka.persistence.query.PersistenceQuery
import akka.persistence.query.scaladsl.{ CurrentEventsByPersistenceIdQuery, CurrentEventsByTagQuery, CurrentPersistenceIdsQuery, ReadJournal }
import akka.stream.scaladsl.Sink
import akka.stream.scaladsl.extension.{ Sink => Snk }
import akka.stream.{ ActorMaterializer, Materializer }
import org.apache.spark.sql._
import org.apache.spark.sql.execution.streaming.{ LongOffset, Offset, Source }
import org.apache.spark.sql.types.StructType

import scala.collection.immutable._
import scala.concurrent.duration.{ FiniteDuration, _ }
import scala.concurrent.{ Await, ExecutionContext, Future }

trait ReadJournalSource {
  _: Source =>
  def readJournalPluginId: String
  def sqlContext: SQLContext

  // some machinery
  implicit val system: ActorSystem = ActorSystem()
  implicit val mat: Materializer = ActorMaterializer()
  implicit val ec: ExecutionContext = system.dispatcher

  // read journal, only interested in the Current queries, as Spark isn't asynchronous
  lazy val readJournal = PersistenceQuery(system).readJournalFor(readJournalPluginId)
    .asInstanceOf[ReadJournal with CurrentPersistenceIdsQuery with CurrentEventsByPersistenceIdQuery with CurrentEventsByTagQuery]

  implicit class FutureOps[A](f: Future[A])(implicit ec: ExecutionContext, timeout: FiniteDuration = null) {
    def futureValue: A = Await.result(f, Option(timeout).getOrElse(10.seconds))
  }

  def maxPersistenceIds: Long =
    readJournal.currentPersistenceIds().runWith(Snk.count).futureValue

  def persistenceIds(start: Long, end: Long) =
    readJournal.currentPersistenceIds().drop(start).take(end).runWith(Sink.seq).futureValue

  def maxEventsByPersistenceId(pid: String): Long =
    readJournal.currentEventsByPersistenceId(pid, 0, Long.MaxValue).runWith(Snk.count).futureValue

  def eventsByPersistenceId(pid: String, start: Long, end: Long, eventMapperFQCN: String): Seq[Row] = {
    readJournal.currentEventsByPersistenceId(pid, start, end)
      .map(env => getMapper(eventMapperFQCN).get.row(env, sqlContext)).runWith(Sink.seq).futureValue
  }

  implicit def mapToDataFrame(rows: Seq[Row]): DataFrame = {
    import scala.collection.JavaConversions._
    sqlContext.createDataFrame(rows, schema)
  }

  def getStartEnd(_start: Option[Offset], _end: Offset): (Long, Long) = (_start, _end) match {
    case (Some(LongOffset(start)), LongOffset(end)) => (start, end)
    case (None, LongOffset(end))                    => (0L, end)
  }

  def getMapper(eventMapperFQCN: String): Option[EventMapper] =
    system.asInstanceOf[ExtendedActorSystem].dynamicAccess.createInstanceFor[EventMapper](eventMapperFQCN, List.empty)
      .recover { case cause => cause.printStackTrace(); null }.toOption

  override def stop(): Unit = {
    println("Stopping jdbc read journal")
    system.terminate()
  }
} 
Example 14
Source File: SparkImplicits.scala    From apache-spark-test   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend.spark.datasources

import java.util.Properties

import akka.NotUsed
import akka.stream.Materializer
import akka.stream.scaladsl.{ Sink, Source }
import org.apache.spark.sql._
import org.apache.spark.sql.streaming.DataStreamReader

import scala.collection.immutable._
import scala.concurrent.duration.{ FiniteDuration, _ }
import scala.concurrent.{ Await, Future }
import scala.reflect.runtime.universe._
import slick.driver.PostgresDriver.api._

object SparkImplicits {
  implicit class DataSourceOps(dfr: DataFrameReader) {
    def helloworld(path: String): DataFrame = dfr.format("helloworld").load(path)
    def person(path: String): DataFrame = dfr.format("person").load(path)
    def jdbc(table: String)(implicit jdbcOptions: Map[String, String]): DataFrame =
      dfr.format("jdbc").options(jdbcOptions ++ Map("dbtable" -> table)).load()
  }

  implicit class DataStreamReaderOps(dsr: DataStreamReader) {
    def currentPersistenceIds(path: String = "jdbc-read-journal"): DataFrame = dsr.format("current-persistence-id").load(path)
    def eventsByPersistenceId(path: String = "jdbc-read-journal"): DataFrame = dsr.format("current-events-by-persistence-id").load(path)
  }

  implicit class DataFrameWriterOps[T](dfw: DataFrameWriter[T]) {
    
    def ignore = dfw.mode(SaveMode.Ignore)

    def jdbc(table: String)(implicit jdbcOptions: Map[String, String]) = {
      val properties = jdbcOptions.foldLeft(new Properties) { case (prop, (k, v)) => prop.put(k, v); prop }
      dfw.jdbc(jdbcOptions("url"), table, properties)
      // does not (yet) work see: https://issues.apache.org/jira/browse/SPARK-7646
      // dfw.format("jdbc").mode(SaveMode.Overwrite).options(jdbcOptions ++ Map("dbtable" -> table))
    }
  }

  trait DataFrameQueryGenerator[A] {
    def upsert: String
  }

  implicit class DatasetOps(df: DataFrame) {
    def withSession[A](db: Database)(f: Session => A): A = {
      val session = db.createSession()
      try f(session) finally session.close()
    }

    def withStatement[A](db: Database)(f: java.sql.Statement => A): A =
      withSession(db)(session ⇒ session.withStatement()(f))

    def upsert[A](table: String)(implicit db: Database, dfq: DataFrameQueryGenerator[A]): DataFrame = withStatement(db) { stmt =>
      stmt.executeUpdate(dfq.upsert)
      df
    }
  }

  implicit class SparkSessionOps(spark: SparkSession) {
    def fromFuture[A <: Product: TypeTag](data: Future[Seq[A]])(implicit _timeout: FiniteDuration = null): DataFrame =
      spark.createDataFrame(Await.result(data, Option(_timeout).getOrElse(15.minutes)))

    def fromSource[A <: Product: TypeTag](data: Source[A, NotUsed])(implicit _timeout: FiniteDuration = null, mat: Materializer): DataFrame =
      fromFuture(data.runWith(Sink.seq))
  }
} 
Example 15
Source File: LocalFilePersistService.scala    From iep-apps   with Apache License 2.0 5 votes vote down vote up
package com.netflix.atlas.persistence

import akka.Done
import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.Flow
import akka.stream.scaladsl.Keep
import akka.stream.scaladsl.RestartFlow
import akka.stream.scaladsl.Sink
import com.netflix.atlas.akka.StreamOps
import com.netflix.atlas.akka.StreamOps.SourceQueue
import com.netflix.atlas.core.model.Datapoint
import com.netflix.iep.service.AbstractService
import com.netflix.spectator.api.Registry
import com.typesafe.config.Config
import com.typesafe.scalalogging.StrictLogging
import javax.inject.Inject
import javax.inject.Singleton

import scala.concurrent.Await
import scala.concurrent.Future
import scala.concurrent.duration.Duration

@Singleton
class LocalFilePersistService @Inject()(
  val config: Config,
  val registry: Registry,
  // S3CopyService is actually NOT used by this service, it is here just to guarantee that the
  // shutdown callback (stopImpl) of this service is invoked before S3CopyService's
  val s3CopyService: S3CopyService,
  implicit val system: ActorSystem
) extends AbstractService
    with StrictLogging {
  implicit val ec = scala.concurrent.ExecutionContext.global
  implicit val mat = ActorMaterializer()

  private val queueSize = config.getInt("atlas.persistence.queue-size")

  private val fileConfig = config.getConfig("atlas.persistence.local-file")
  private val dataDir = fileConfig.getString("data-dir")
  private val maxRecords = fileConfig.getLong("max-records")
  private val maxDurationMs = fileConfig.getDuration("max-duration").toMillis
  private val maxLateDurationMs = fileConfig.getDuration("max-late-duration").toMillis
  private val rollingConf = RollingConfig(maxRecords, maxDurationMs, maxLateDurationMs)

  require(queueSize > 0)
  require(maxRecords > 0)
  require(maxDurationMs > 0)

  private var queue: SourceQueue[Datapoint] = _
  private var flowComplete: Future[Done] = _

  override def startImpl(): Unit = {
    logger.info("Starting service")
    val (q, f) = StreamOps
      .blockingQueue[Datapoint](registry, "LocalFilePersistService", queueSize)
      .via(getRollingFileFlow)
      .toMat(Sink.ignore)(Keep.both)
      .run
    queue = q
    flowComplete = f
  }

  private def getRollingFileFlow(): Flow[Datapoint, NotUsed, NotUsed] = {
    import scala.concurrent.duration._
    RestartFlow.withBackoff(
      minBackoff = 1.second,
      maxBackoff = 3.seconds,
      randomFactor = 0,
      maxRestarts = -1
    ) { () =>
      Flow.fromGraph(
        new RollingFileFlow(dataDir, rollingConf, registry)
      )
    }
  }

  // This service should stop the Akka flow when application is shutdown gracefully, and let
  // S3CopyService do the cleanup. It should trigger:
  //   1. stop taking more data points (monitor droppedQueueClosed)
  //   2. close current file writer so that last file is ready to copy to s3
  override def stopImpl(): Unit = {
    logger.info("Stopping service")
    queue.complete()
    Await.result(flowComplete, Duration.Inf)
    logger.info("Stopped service")
  }

  def persist(dp: Datapoint): Unit = {
    queue.offer(dp)
  }
} 
Example 16
Source File: VoiceUDPHandler.scala    From AckCord   with MIT License 5 votes vote down vote up
package ackcord.voice

import java.net.InetSocketAddress

import scala.concurrent.duration._
import scala.util.{Failure, Success}

import ackcord.data.{RawSnowflake, UserId}
import akka.NotUsed
import akka.actor.typed._
import akka.actor.typed.scaladsl._
import akka.stream.OverflowStrategy
import akka.stream.scaladsl.{Keep, Sink, Source, SourceQueueWithComplete}
import akka.util.ByteString
import org.slf4j.Logger

object VoiceUDPHandler {

  def apply(
      address: String,
      port: Int,
      ssrc: Int,
      serverId: RawSnowflake,
      userId: UserId,
      soundProducer: Source[ByteString, NotUsed],
      soundConsumer: Sink[AudioAPIMessage, NotUsed],
      parent: ActorRef[VoiceHandler.Command]
  ): Behavior[Command] =
    Behaviors
      .supervise(
        Behaviors.setup[Command] { ctx =>
          implicit val system: ActorSystem[Nothing] = ctx.system

          val ((queue, futIp), watchDone) = soundProducer
            .viaMat(
              VoiceUDPFlow
                .flow(
                  new InetSocketAddress(address, port),
                  ssrc,
                  serverId,
                  userId,
                  Source.queue[Option[ByteString]](0, OverflowStrategy.dropBuffer)
                )
                .watchTermination()(Keep.both)
            )(Keep.right)
            .to(soundConsumer)
            .run()

          ctx.pipeToSelf(futIp) {
            case Success(value) => IPDiscoveryResult(value)
            case Failure(e)     => SendExeption(e)
          }
          ctx.pipeToSelf(watchDone)(_ => ConnectionDied)

          handle(ctx, ctx.log, ssrc, queue, parent)
        }
      )
      .onFailure(
        SupervisorStrategy
          .restartWithBackoff(100.millis, 5.seconds, 1D)
          .withResetBackoffAfter(10.seconds)
          .withMaxRestarts(5)
      )

  def handle(
      ctx: ActorContext[Command],
      log: Logger,
      ssrc: Int,
      queue: SourceQueueWithComplete[Option[ByteString]],
      parent: ActorRef[VoiceHandler.Command]
  ): Behavior[Command] = Behaviors.receiveMessage {
    case SendExeption(e) => throw e
    case ConnectionDied  => Behaviors.stopped
    case Shutdown =>
      queue.complete()
      Behaviors.same
    case IPDiscoveryResult(VoiceUDPFlow.FoundIP(localAddress, localPort)) =>
      parent ! VoiceHandler.GotLocalIP(localAddress, localPort)
      Behaviors.same
    case SetSecretKey(key) =>
      queue.offer(key)
      Behaviors.same
  }

  sealed trait Command

  case object Shutdown extends Command

  private case class SendExeption(e: Throwable)                       extends Command
  private case object ConnectionDied                                  extends Command
  private case class IPDiscoveryResult(foundIP: VoiceUDPFlow.FoundIP) extends Command
  private[voice] case class SetSecretKey(key: Option[ByteString])     extends Command
} 
Example 17
Source File: UpickleCustomizationSupportSpec.scala    From akka-http-json   with Apache License 2.0 5 votes vote down vote up
package de.heikoseeberger.akkahttpupickle

import akka.actor.ActorSystem
import akka.http.scaladsl.marshalling.Marshal
import akka.http.scaladsl.model.ContentTypes.{ `application/json`, `text/plain(UTF-8)` }
import akka.http.scaladsl.model._
import akka.http.scaladsl.unmarshalling.Unmarshaller.UnsupportedContentTypeException
import akka.http.scaladsl.unmarshalling.{ Unmarshal, Unmarshaller }
import akka.stream.scaladsl.{ Sink, Source }
import org.scalatest.BeforeAndAfterAll
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AsyncWordSpec
import upickle.AttributeTagged
import upickle.core.Visitor

import scala.concurrent.Await
import scala.concurrent.duration.DurationInt

final class UpickleCustomizationSupportSpec
    extends AsyncWordSpec
    with Matchers
    with BeforeAndAfterAll {

  private implicit val system = ActorSystem()

  object FooApi extends AttributeTagged {
    override implicit val IntWriter: FooApi.Writer[Int] = new Writer[Int] {
      override def write0[V](out: Visitor[_, V], v: Int): V = out.visitString("foo", -1)
    }
  }
  object UpickleFoo extends UpickleCustomizationSupport {
    override type Api = FooApi.type
    override def api: FooApi.type = FooApi
  }

  import UpickleFoo._

  "UpickleCustomizationSupport" should {
    "support custom configuration" in {
      Marshal(123)
        .to[RequestEntity]
        .flatMap(Unmarshal(_).to[String])
        .map(_ shouldBe "foo")
    }
  }

  override protected def afterAll() = {
    Await.ready(system.terminate(), 42.seconds)
    super.afterAll()
  }
} 
Example 18
Source File: UpickleSupportSpec.scala    From akka-http-json   with Apache License 2.0 5 votes vote down vote up
package de.heikoseeberger.akkahttpupickle

import akka.actor.ActorSystem
import akka.http.scaladsl.marshalling.Marshal
import akka.http.scaladsl.model.ContentTypes.{ `application/json`, `text/plain(UTF-8)` }
import akka.http.scaladsl.model._
import akka.http.scaladsl.unmarshalling.Unmarshaller.UnsupportedContentTypeException
import akka.http.scaladsl.unmarshalling.{ Unmarshal, Unmarshaller }
import akka.stream.scaladsl.{ Sink, Source }
import org.scalatest.BeforeAndAfterAll

import scala.concurrent.Await
import scala.concurrent.duration.DurationInt
import upickle.default.{ ReadWriter, macroRW }
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AsyncWordSpec

object UpickleSupportSpec {

  final object Foo {
    implicit val rw: ReadWriter[Foo] = macroRW
  }

  final case class Foo(bar: String) {
    require(bar startsWith "bar", "bar must start with 'bar'!")
  }
}

final class UpickleSupportSpec extends AsyncWordSpec with Matchers with BeforeAndAfterAll {
  import UpickleSupport._
  import UpickleSupportSpec._

  private implicit val system = ActorSystem()

  "UpickleSupport" should {
    "enable marshalling and unmarshalling of case classes" in {
      val foo = Foo("bar")
      Marshal(foo)
        .to[RequestEntity]
        .flatMap(Unmarshal(_).to[Foo])
        .map(_ shouldBe foo)
    }

    "enable streamed marshalling and unmarshalling for json arrays" in {
      val foos = (0 to 100).map(i => Foo(s"bar-$i")).toList

      Marshal(Source(foos))
        .to[RequestEntity]
        .flatMap(entity => Unmarshal(entity).to[SourceOf[Foo]])
        .flatMap(_.runWith(Sink.seq))
        .map(_ shouldBe foos)
    }

    "provide proper error messages for requirement errors" in {
      val entity = HttpEntity(MediaTypes.`application/json`, """{ "bar": "baz" }""")
      Unmarshal(entity)
        .to[Foo]
        .failed
        .map(_ should have message "requirement failed: bar must start with 'bar'!")
    }

    "fail with NoContentException when unmarshalling empty entities" in {
      val entity = HttpEntity.empty(`application/json`)
      Unmarshal(entity)
        .to[Foo]
        .failed
        .map(_ shouldBe Unmarshaller.NoContentException)
    }

    "fail with UnsupportedContentTypeException when Content-Type is not `application/json`" in {
      val entity = HttpEntity("""{ "bar": "bar" }""")
      Unmarshal(entity)
        .to[Foo]
        .failed
        .map(
          _ shouldBe UnsupportedContentTypeException(Some(`text/plain(UTF-8)`), `application/json`)
        )
    }

    "allow unmarshalling with passed in Content-Types" in {
      val foo = Foo("bar")
      val `application/json-home` =
        MediaType.applicationWithFixedCharset("json-home", HttpCharsets.`UTF-8`, "json-home")

      final object CustomUpickleSupport extends UpickleSupport {
        override def unmarshallerContentTypes = List(`application/json`, `application/json-home`)
      }
      import CustomUpickleSupport._

      val entity = HttpEntity(`application/json-home`, """{ "bar": "bar" }""")
      Unmarshal(entity).to[Foo].map(_ shouldBe foo)
    }
  }

  override protected def afterAll() = {
    Await.ready(system.terminate(), 42.seconds)
    super.afterAll()
  }
} 
Example 19
Source File: JsoniterScalaSupportSpec.scala    From akka-http-json   with Apache License 2.0 5 votes vote down vote up
package de.heikoseeberger.akkahttpjsoniterscala

import akka.actor.ActorSystem
import akka.http.scaladsl.marshalling.Marshal
import akka.http.scaladsl.model.ContentTypes.{ `application/json`, `text/plain(UTF-8)` }
import akka.http.scaladsl.model._
import akka.http.scaladsl.unmarshalling.Unmarshaller.UnsupportedContentTypeException
import akka.http.scaladsl.unmarshalling.{ Unmarshal, Unmarshaller }
import akka.stream.scaladsl.{ Sink, Source }
import com.github.plokhotnyuk.jsoniter_scala.core.JsonValueCodec
import com.github.plokhotnyuk.jsoniter_scala.macros._
import org.scalatest.BeforeAndAfterAll

import scala.concurrent.Await
import scala.concurrent.duration.DurationInt
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AsyncWordSpec

object JsoniterScalaSupportSpec {

  final case class Foo(bar: String) {
    require(bar startsWith "bar", "bar must start with 'bar'!")
  }
}

final class JsoniterScalaSupportSpec extends AsyncWordSpec with Matchers with BeforeAndAfterAll {
  import JsoniterScalaSupport._
  import JsoniterScalaSupportSpec._

  private implicit val system: ActorSystem        = ActorSystem()
  private implicit val codec: JsonValueCodec[Foo] = JsonCodecMaker.make[Foo](CodecMakerConfig)

  "JsoniterScalaSupport" should {
    "should enable marshalling and unmarshalling" in {
      val foo = Foo("bar")
      Marshal(foo)
        .to[RequestEntity]
        .flatMap(Unmarshal(_).to[Foo])
        .map(_ shouldBe foo)
    }

    "enable streamed marshalling and unmarshalling for json arrays" in {
      val foos = (0 to 100).map(i => Foo(s"bar-$i")).toList

      Marshal(Source(foos))
        .to[RequestEntity]
        .flatMap(entity => Unmarshal(entity).to[SourceOf[Foo]])
        .flatMap(_.runWith(Sink.seq))
        .map(_ shouldBe foos)
    }

    "provide proper error messages for requirement errors" in {
      val entity = HttpEntity(MediaTypes.`application/json`, """{ "bar": "baz" }""")
      Unmarshal(entity)
        .to[Foo]
        .failed
        .map(_ should have message "requirement failed: bar must start with 'bar'!")
    }

    "fail with NoContentException when unmarshalling empty entities" in {
      val entity = HttpEntity.empty(`application/json`)
      Unmarshal(entity)
        .to[Foo]
        .failed
        .map(_ shouldBe Unmarshaller.NoContentException)
    }

    "fail with UnsupportedContentTypeException when Content-Type is not `application/json`" in {
      val entity = HttpEntity("""{ "bar": "bar" }""")
      Unmarshal(entity)
        .to[Foo]
        .failed
        .map(
          _ shouldBe UnsupportedContentTypeException(Some(`text/plain(UTF-8)`), `application/json`)
        )
    }

    "allow unmarshalling with passed in Content-Types" in {
      val foo = Foo("bar")
      val `application/json-home` =
        MediaType.applicationWithFixedCharset("json-home", HttpCharsets.`UTF-8`, "json-home")

      final object CustomJsoniterScalaSupport extends JsoniterScalaSupport {
        override def unmarshallerContentTypes: List[ContentTypeRange] =
          List(`application/json`, `application/json-home`)
      }
      import CustomJsoniterScalaSupport._

      val entity = HttpEntity(`application/json-home`, """{ "bar": "bar" }""")
      Unmarshal(entity).to[Foo].map(_ shouldBe foo)
    }
  }

  override protected def afterAll(): Unit = {
    Await.ready(system.terminate(), 42.seconds)
    super.afterAll()
  }
} 
Example 20
Source File: BatchCommitConsumer.scala    From kafka-scala-api   with Apache License 2.0 5 votes vote down vote up
package com.example.consumer

import akka.Done
import akka.kafka.ConsumerMessage.CommittableOffsetBatch
import akka.kafka.Subscriptions
import akka.kafka.scaladsl.Consumer
import akka.stream.scaladsl.Sink
import com.example._

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future

object BatchCommitConsumer extends App {

  val done = Consumer.committableSource(consumerSettings, Subscriptions.topics(topic))
    .mapAsync(1) { msg =>
      println(s"BatchCommittableConsumer consume: $msg")
      Future.successful(Done).map(_ => msg.committableOffset)
    }
    .batch(max = 20, first => CommittableOffsetBatch.empty.updated(first)) { (batch, elem) =>
      batch.updated(elem)
    }
    .mapAsync(3)(_.commitScaladsl())
    .runWith(Sink.ignore)
} 
Example 21
Source File: PlainSourceConsumer.scala    From kafka-scala-api   with Apache License 2.0 5 votes vote down vote up
package com.example.consumer

import java.util.concurrent.atomic.AtomicLong

import akka.Done
import akka.kafka.Subscriptions
import akka.kafka.scaladsl.Consumer
import akka.stream.scaladsl.Sink
import com.example._
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.TopicPartition

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future

object PlainSourceConsumer extends App {

  val db = new DB
  db.loadOffset().foreach { fromOffset =>
    val partition = 0
    val subscription = Subscriptions.assignmentWithOffset(
      new TopicPartition(topic, partition) -> fromOffset
    )

    val done =
      Consumer.plainSource(consumerSettings, subscription)
        .mapAsync(1)(db.save)
        .runWith(Sink.ignore)
  }

}

//Zookeeper or DB storage mock
class DB {

  private val offset = new AtomicLong(2)

  def save(record: ConsumerRecord[Array[Byte], String]): Future[Done] = {
    println(s"DB.save: ${record.value}")
    offset.set(record.offset)
    Future.successful(Done)
  }

  def loadOffset(): Future[Long] =
    Future.successful(offset.get)

  def update(data: String): Future[Done] = {
    println(s"DB.update: $data")
    Future.successful(Done)
  }
} 
Example 22
Source File: CommitConsumerToFlowProducer.scala    From kafka-scala-api   with Apache License 2.0 5 votes vote down vote up
package com.example.integration

import akka.kafka.scaladsl.{Consumer, Producer}
import akka.kafka.{ProducerMessage, Subscriptions}
import akka.stream.scaladsl.Sink
import com.example._
import org.apache.kafka.clients.producer.ProducerRecord

object CommitConsumerToFlowProducer extends App {

  
  val done = Consumer.committableSource(consumerSettings, Subscriptions.topics(topic1))
    .map { msg =>
      println(s"topic1 -> topic2: $msg")
      ProducerMessage.Message(new ProducerRecord[Array[Byte], String](
        topic2,
        msg.record.value
      ), msg.committableOffset)
    }
    .via(Producer.flow(producerSettings))
    .mapAsync(producerSettings.parallelism) { result =>
      result.message.passThrough.commitScaladsl()
    }
    .runWith(Sink.ignore)
} 
Example 23
Source File: FlowProducer.scala    From kafka-scala-api   with Apache License 2.0 5 votes vote down vote up
package com.example.producer

import akka.kafka.ProducerMessage
import akka.kafka.scaladsl.Producer
import akka.stream.scaladsl.{Sink, Source}
import org.apache.kafka.clients.producer.ProducerRecord
import com.example._

object FlowProducer extends App {

  val done = Source(100 to 111)
    .map { n =>
      val partition = 1
      ProducerMessage.Message(new ProducerRecord[Array[Byte], String](
        topic , partition, null, n.toString
      ), n)
    }
    .via(Producer.flow(producerSettings))
    .map { result =>
      val record = result.message.record
      println(s"${record.topic}/${record.partition} ${result.offset}: ${record.value}" +
        s"(${result.message.passThrough})")
      result
    }
    .runWith(Sink.ignore)
} 
Example 24
Source File: akkaStreams.scala    From sangria-akka-streams   with Apache License 2.0 5 votes vote down vote up
package sangria.streaming

import scala.language.higherKinds
import akka.NotUsed
import akka.event.Logging
import akka.stream.ActorAttributes.SupervisionStrategy
import akka.stream._
import akka.stream.scaladsl.{Merge, Sink, Source}
import akka.stream.stage.{GraphStage, GraphStageLogic, InHandler, OutHandler}

import scala.concurrent.Future

object akkaStreams {
  type AkkaSource[+T] = Source[T, NotUsed]

  abstract class SimpleLinearGraphStage[T] extends GraphStage[FlowShape[T, T]] {
    val in = Inlet[T](Logging.simpleName(this) + ".in")
    val out = Outlet[T](Logging.simpleName(this) + ".out")
    override val shape = FlowShape(in, out)
  }

  class AkkaStreamsSubscriptionStream(implicit materializer: Materializer) extends SubscriptionStream[AkkaSource] {
    def supported[T[_]](other: SubscriptionStream[T]) = other.isInstanceOf[AkkaStreamsSubscriptionStream]

    def map[A, B](source: AkkaSource[A])(fn: A => B) = source.map(fn)

    def singleFuture[T](value: Future[T]) = Source.fromFuture(value)

    def single[T](value: T) = Source.single(value)

    def mapFuture[A, B](source: AkkaSource[A])(fn: A => Future[B]) =
      source.mapAsync(1)(fn)

    def first[T](s: AkkaSource[T]) = s.runWith(Sink.head)

    def failed[T](e: Throwable) = Source.failed(e).asInstanceOf[AkkaSource[T]]

    def onComplete[Ctx, Res](result: AkkaSource[Res])(op: => Unit) =
      result
        .via(OnComplete(() => op))
        .recover {case e => op; throw e}
        .asInstanceOf[AkkaSource[Res]]

    def flatMapFuture[Ctx, Res, T](future: Future[T])(resultFn: T => AkkaSource[Res]) =
      Source.fromFuture(future).flatMapMerge(1, resultFn)

    def merge[T](streams: Vector[AkkaSource[T]]) = {
      if (streams.size > 1)
        Source.combine(streams(0), streams(1), streams.drop(2): _*)(Merge(_))
      else if (streams.nonEmpty)
        streams.head
      else
        throw new IllegalStateException("No streams produced!")
    }

    def recover[T](stream: AkkaSource[T])(fn: Throwable => T) =
      stream recover {case e => fn(e)}
  }

  implicit def akkaSubscriptionStream(implicit materializer: Materializer): SubscriptionStream[AkkaSource] = new AkkaStreamsSubscriptionStream

  implicit def akkaStreamIsValidSubscriptionStream[A[_, _], Ctx, Res, Out](implicit materializer: Materializer, ev1: ValidOutStreamType[Res, Out]): SubscriptionStreamLike[Source[A[Ctx, Res], NotUsed], A, Ctx, Res, Out] =
    new SubscriptionStreamLike[Source[A[Ctx, Res], NotUsed], A, Ctx, Res, Out] {
      type StreamSource[X] = AkkaSource[X]
      val subscriptionStream = new AkkaStreamsSubscriptionStream
    }

  private final case class OnComplete[T](op: () => Unit) extends SimpleLinearGraphStage[T] {
    override def toString: String = "OnComplete"

    override def createLogic(inheritedAttributes: Attributes): GraphStageLogic =
      new GraphStageLogic(shape) with OutHandler with InHandler {
        def decider = inheritedAttributes.get[SupervisionStrategy].map(_.decider).getOrElse(Supervision.stoppingDecider)

        override def onPush(): Unit = {
          push(out, grab(in))
        }

        override def onPull(): Unit = pull(in)

        override def onDownstreamFinish() = {
          op()
          super.onDownstreamFinish()
        }

        override def onUpstreamFinish() = {
          op()
          super.onUpstreamFinish()
        }

        setHandlers(in, out, this)
      }
  }
} 
Example 25
Source File: Cache.scala    From AckCord   with MIT License 5 votes vote down vote up
package ackcord

import scala.collection.immutable

import ackcord.gateway.GatewayMessage
import akka.actor.typed.ActorSystem
import akka.stream.scaladsl.{Sink, Source}
import akka.{NotUsed, actor => classic}


  def create(
      cacheProcessor: MemoryCacheSnapshot.CacheProcessor = MemoryCacheSnapshot.defaultCacheProcessor,
      parallelism: Int = 4
  )(implicit system: ActorSystem[Nothing]): Cache = {
    val (publish, subscribe)               = CacheStreams.cacheStreams(cacheProcessor)
    val (gatewayPublish, gatewaySubscribe) = CacheStreams.gatewayEvents[Any]

    //Keep it drained if nothing else is using it
    subscribe.runWith(Sink.ignore)

    Cache(publish, subscribe, gatewayPublish, gatewaySubscribe, parallelism)
  }
} 
Example 26
Source File: CacheStreams.scala    From AckCord   with MIT License 5 votes vote down vote up
package ackcord

import scala.collection.mutable

import ackcord.cachehandlers.CacheSnapshotBuilder
import ackcord.gateway.GatewayEvent.ReadyData
import ackcord.gateway.GatewayMessage
import ackcord.requests.SupervisionStreams
import akka.NotUsed
import akka.actor.typed.ActorSystem
import akka.stream.scaladsl.{BroadcastHub, Flow, Keep, MergeHub, Sink, Source}
import org.slf4j.Logger

object CacheStreams {

  
  def cacheUpdater(
      cacheProcessor: MemoryCacheSnapshot.CacheProcessor
  )(implicit system: ActorSystem[Nothing]): Flow[CacheEvent, (CacheEvent, CacheState), NotUsed] =
    Flow[CacheEvent].statefulMapConcat { () =>
      var state: CacheState    = null
      implicit val log: Logger = system.log

      //We only handle events when we are ready to, and we have received the ready event.
      def isReady: Boolean = state != null

      {
        case readyEvent @ APIMessageCacheUpdate(_: ReadyData, _, _, _, _) =>
          val builder = new CacheSnapshotBuilder(
            0,
            null, //The event will populate this,
            mutable.Map.empty,
            mutable.Map.empty,
            mutable.Map.empty,
            mutable.Map.empty,
            mutable.Map.empty,
            mutable.Map.empty,
            mutable.Map.empty,
            mutable.Map.empty,
            cacheProcessor
          )

          readyEvent.process(builder)

          val snapshot = builder.toImmutable
          state = CacheState(snapshot, snapshot)
          List(readyEvent -> state)
        case handlerEvent: CacheEvent if isReady =>
          val builder = CacheSnapshotBuilder(state.current)
          handlerEvent.process(builder)

          state = state.update(builder.toImmutable)
          List(handlerEvent -> state)
        case _ if !isReady =>
          log.error("Received event before ready")
          Nil
      }
    }
} 
Example 27
Source File: SupervisionStreams.scala    From AckCord   with MIT License 5 votes vote down vote up
package ackcord.requests

import akka.actor.typed.ActorSystem
import akka.stream.javadsl.RunnableGraph
import akka.stream.scaladsl.{Flow, Sink, Source}
import akka.stream.{ActorAttributes, Attributes, Supervision}

object SupervisionStreams {

  def addLogAndContinueFunction[G](addAtributes: Attributes => G)(implicit system: ActorSystem[Nothing]): G =
    addAtributes(ActorAttributes.supervisionStrategy {
      case _: RetryFailedRequestException[_] => Supervision.Stop
      case e =>
        system.log.error("Unhandled exception in stream", e)
        Supervision.Resume
    })

  def logAndContinue[M](graph: RunnableGraph[M])(implicit system: ActorSystem[Nothing]): RunnableGraph[M] =
    addLogAndContinueFunction(graph.addAttributes)

  def logAndContinue[Out, Mat](source: Source[Out, Mat])(implicit system: ActorSystem[Nothing]): Source[Out, Mat] =
    addLogAndContinueFunction(source.addAttributes)

  def logAndContinue[In, Out, Mat](
      flow: Flow[In, Out, Mat]
  )(implicit system: ActorSystem[Nothing]): Flow[In, Out, Mat] =
    addLogAndContinueFunction(flow.addAttributes)

  def logAndContinue[In, Mat](sink: Sink[In, Mat])(implicit system: ActorSystem[Nothing]): Sink[In, Mat] =
    addLogAndContinueFunction(sink.addAttributes)
} 
Example 28
Source File: StreamInstances.scala    From AckCord   with MIT License 5 votes vote down vote up
package ackcord.util

import akka.NotUsed
import akka.stream.scaladsl.{Flow, Merge, Sink, Source}
import cats.{Alternative, Contravariant, Functor, MonadError, StackSafeMonad}

object StreamInstances {

  type SourceRequest[A] = Source[A, NotUsed]

  implicit val sourceInstance: MonadError[SourceRequest, Throwable] with Alternative[SourceRequest] =
    new MonadError[SourceRequest, Throwable] with Alternative[SourceRequest] with StackSafeMonad[SourceRequest] {

      override def empty[A]: SourceRequest[A] = Source.empty[A]

      override def pure[A](x: A): SourceRequest[A] = Source.single(x)

      override def map[A, B](fa: SourceRequest[A])(f: A => B): SourceRequest[B] = fa.map(f)

      override def flatMap[A, B](fa: SourceRequest[A])(f: A => SourceRequest[B]): SourceRequest[B] =
        fa.flatMapConcat[B, NotUsed](f)

      override def product[A, B](fa: SourceRequest[A], fb: SourceRequest[B]): SourceRequest[(A, B)] = fa.zip(fb)

      override def combineK[A](x: SourceRequest[A], y: SourceRequest[A]): SourceRequest[A] =
        Source.combine(x, y)(Merge.apply(_))

      override def raiseError[A](e: Throwable): SourceRequest[A] = Source.failed(e)
      override def handleErrorWith[A](fa: SourceRequest[A])(f: Throwable => SourceRequest[A]): SourceRequest[A] =
        fa.recoverWithRetries[A](
          5,
          {
            case e: Throwable => f(e).mapMaterializedValue(_ => NotUsed)
          }
        )
    }

  implicit def flowInstance[In, Mat]: Functor[Flow[In, *, Mat]] = new Functor[Flow[In, *, Mat]] {
    override def map[A, B](fa: Flow[In, A, Mat])(f: A => B): Flow[In, B, Mat] = fa.map(f)
  }

  implicit def sinkInstance[Mat]: Contravariant[Sink[*, Mat]] = new Contravariant[Sink[*, Mat]] {
    override def contramap[A, B](fa: Sink[A, Mat])(f: B => A): Sink[B, Mat] = fa.contramap(f)
  }

  //For syntax on Source can be brittle
  implicit class SourceFlatmap[A, M1](private val source: Source[A, M1]) extends AnyVal {
    def flatMap[B, M2](f: A => Source[B, M2]): Source[B, M1] = source.flatMapConcat(f)
  }
} 
Example 29
Source File: EventRegistration.scala    From AckCord   with MIT License 5 votes vote down vote up
package ackcord

import scala.concurrent.Future

import akka.Done
import akka.stream.{KillSwitches, UniqueKillSwitch}
import akka.stream.scaladsl.{Keep, RunnableGraph, Sink, Source}

case class EventRegistration[Mat](materialized: Mat, onDone: Future[Done], killSwitch: UniqueKillSwitch) {

  def stop(): Unit = killSwitch.shutdown()
}
object EventRegistration {
  def toSink[A, M](source: Source[A, M]): RunnableGraph[EventRegistration[M]] =
    source.viaMat(KillSwitches.single)(Keep.both).toMat(Sink.ignore) {
      case ((m, killSwitch), done) => EventRegistration(m, done, killSwitch)
    }

  def withRegistration[A, M](source: Source[A, M]): Source[A, EventRegistration[M]] =
    source.viaMat(KillSwitches.single)(Keep.both).watchTermination() {
      case ((m, killSwitch), done) => EventRegistration(m, done, killSwitch)
    }
} 
Example 30
Source File: Test15.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.akkastream.example

import akka.actor.ActorSystem
import akka.stream._
import akka.stream.scaladsl.{Balance, Broadcast, Flow, GraphDSL, Merge, RunnableGraph, Sink, Source}
import org.apache.gearpump.akkastream.GearpumpMaterializer
import org.apache.gearpump.cluster.main.{ArgumentsParser, CLIOption}
import org.apache.gearpump.util.AkkaApp

import scala.concurrent.Await
import scala.concurrent.duration._


object Test15 extends AkkaApp with ArgumentsParser {
  // scalastyle:off println
  override val options: Array[(String, CLIOption[Any])] = Array(
    "gearpump" -> CLIOption[Boolean]("<boolean>", required = false, defaultValue = Some(false))
  )

  override def main(akkaConf: Config, args: Array[String]): Unit = {
    val config = parse(args)
    implicit val system = ActorSystem("Test15", akkaConf)
    implicit val materializer: ActorMaterializer = config.getBoolean("gearpump") match {
      case true =>
        GearpumpMaterializer()
      case false =>
        ActorMaterializer(
          ActorMaterializerSettings(system).withAutoFusing(false)
        )
    }
    import akka.stream.scaladsl.GraphDSL.Implicits._
    RunnableGraph.fromGraph(GraphDSL.create() { implicit builder =>
      val A = builder.add(Source.single(0)).out
      val B = builder.add(Broadcast[Int](2))
      val C = builder.add(Merge[Int](2).named("C"))
      val D = builder.add(Flow[Int].map(_ + 1).named("D"))
      val E = builder.add(Balance[Int](2).named("E"))
      val F = builder.add(Merge[Int](2).named("F"))
      val G = builder.add(Sink.foreach(println).named("G")).in

      C <~ F
      A ~> B ~> C ~> F
      B ~> D ~> E ~> F
      E ~> G

      ClosedShape
    }).run()

    Await.result(system.whenTerminated, 60.minutes)
  }
  // scalastyle:on println
} 
Example 31
Source File: Test7.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.akkastream.example

import akka.actor.ActorSystem
import akka.stream.scaladsl.{Broadcast, Merge, Sink, Source}
import org.apache.gearpump.akkastream.GearpumpMaterializer
import org.apache.gearpump.cluster.main.ArgumentsParser
import org.apache.gearpump.util.AkkaApp

import scala.concurrent.Await
import scala.concurrent.duration._




object Test7 extends AkkaApp with ArgumentsParser {
  // scalastyle:off println
  override def main(akkaConf: Config, args: Array[String]): Unit = {
    implicit val system = ActorSystem("Test7", akkaConf)
    implicit val materializer = GearpumpMaterializer()
    implicit val ec = system.dispatcher
 
    val sourceA = Source(List(1))
    val sourceB = Source(List(2))
    val mergedSource = Source.combine(sourceA, sourceB)(Merge(_))

    val sinkA = Sink.foreach[Int](x => println(s"In SinkA : $x"))
    val sinkB = Sink.foreach[Int](x => println(s"In SinkB : $x"))
    val sink = Sink.combine(sinkA, sinkB)(Broadcast[Int](_))
    mergedSource.runWith(sink)

    Await.result(system.whenTerminated, 60.minutes)
  }
  // scalastyle:on println
} 
Example 32
Source File: Test3.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.akkastream.example

import akka.actor.{Actor, ActorSystem, Props}
import akka.stream.{ActorMaterializer, ActorMaterializerSettings}
import org.apache.gearpump.akkastream.GearpumpMaterializer
import org.apache.gearpump.akkastream.scaladsl.GearSource
import akka.stream.scaladsl.Sink
import org.apache.gearpump.cluster.main.{ArgumentsParser, CLIOption}
import org.apache.gearpump.streaming.dsl.scalaapi.CollectionDataSource
import org.apache.gearpump.util.AkkaApp

import scala.concurrent.Await
import scala.concurrent.duration._


object Test3 extends AkkaApp with ArgumentsParser {
  // scalastyle:off println
  override val options: Array[(String, CLIOption[Any])] = Array(
    "gearpump" -> CLIOption[Boolean]("<boolean>", required = false, defaultValue = Some(false))
  )

  override def main(akkaConf: Config, args: Array[String]): Unit = {
    val config = parse(args)
    implicit val system = ActorSystem("Test3", akkaConf)
    implicit val materializer: ActorMaterializer = config.getBoolean("gearpump") match {
      case true =>
        GearpumpMaterializer()
      case false =>
        ActorMaterializer(
          ActorMaterializerSettings(system).withAutoFusing(false)
        )
    }
    val echo = system.actorOf(Props(new Echo()))
    val sink = Sink.actorRef(echo, "COMPLETE")
    val sourceData = new CollectionDataSource(
      List("red hat", "yellow sweater", "blue jack", "red apple", "green plant", "blue sky"))
    val source = GearSource.from[String](sourceData)
    source.filter(_.startsWith("red")).map("I want to order item: " + _).runWith(sink)

    Await.result(system.whenTerminated, 60.minutes)
  }

  class Echo extends Actor {
    def receive: Receive = {
      case any: AnyRef =>
        println("Confirm received: " + any)
    }
  }
  // scalastyle:on println
} 
Example 33
Source File: PaymentHistory.scala    From 006877   with MIT License 5 votes vote down vote up
package aia.persistence

import akka.actor._

import akka.persistence.query.PersistenceQuery
import akka.persistence.query.journal.leveldb.scaladsl.LeveldbReadJournal
 
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.Sink

object PaymentHistory {
  def props(shopperId: Long) = Props(new PaymentHistory(shopperId))
  def name(shopperId: Long) = s"payment_history_${shopperId}"

  case object GetHistory

  case class History(items: List[Item] = Nil) {
    def paid(paidItems: List[Item]) = {
      History(paidItems ++ items)
    }
  }
}

class PaymentHistory(shopperId: Long) extends Actor
    with ActorLogging {
  import PaymentHistory._

  val queries = PersistenceQuery(context.system).readJournalFor[LeveldbReadJournal](
    LeveldbReadJournal.Identifier)
  implicit val materializer = ActorMaterializer()
  queries.eventsByPersistenceId(Wallet.name(shopperId)).runWith(Sink.actorRef(self, None))

  var history = History()

  def receive = {
    case Wallet.Paid(items, _) => history = history.paid(items)
    case GetHistory => sender() ! history
  }
} 
Example 34
Source File: CalculatorHistory.scala    From 006877   with MIT License 5 votes vote down vote up
package aia.persistence.calculator

import akka.actor._

import akka.persistence.query.PersistenceQuery
import akka.persistence.query.journal.leveldb.scaladsl.LeveldbReadJournal
 
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.Sink

object CalculatorHistory {
  def props = Props(new CalculatorHistory)
  def name = "calculator-history"
  case object GetHistory
  case class History(added: Int = 0, subtracted: Int = 0, divided: Int = 0, multiplied: Int = 0) {
    def incrementAdded = copy(added = added + 1)
    def incrementSubtracted= copy(subtracted = subtracted + 1)
    def incrementDivided = copy(divided = divided + 1)
    def incrementMultiplied = copy(multiplied = multiplied + 1)
  }
}

class CalculatorHistory extends Actor {
  import Calculator._
  import CalculatorHistory._

  val queries = PersistenceQuery(context.system).readJournalFor[LeveldbReadJournal](
    LeveldbReadJournal.Identifier)
  implicit val materializer = ActorMaterializer()
  queries.eventsByPersistenceId(Calculator.name).runWith(Sink.actorRef(self, None))

  var history = History()
  
  def receive = {
    case _ : Added => history = history.incrementAdded
    case _ : Subtracted => history = history.incrementSubtracted
    case _ : Divided => history = history.incrementDivided
    case _ : Multiplied => history = history.incrementMultiplied
    case GetHistory => sender() ! history
  }
} 
Example 35
Source File: LogJson.scala    From 006877   with MIT License 5 votes vote down vote up
package aia.stream

import java.nio.file.{ Files, Path }
import java.io.File
import java.time.ZonedDateTime

import scala.concurrent.duration._
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import scala.util.{ Success, Failure }

import akka.Done
import akka.actor._
import akka.util.ByteString

import akka.stream.{ ActorAttributes, ActorMaterializer, IOResult }
import akka.stream.scaladsl.JsonFraming
import akka.stream.scaladsl.{ FileIO, BidiFlow, Flow, Framing, Keep, Sink, Source }

import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._
import akka.http.scaladsl.marshalling.Marshal
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server._
import spray.json._

object LogJson extends EventMarshalling 
    with NotificationMarshalling 
    with MetricMarshalling {
  def textInFlow(maxLine: Int) = {
    Framing.delimiter(ByteString("\n"), maxLine)
    .map(_.decodeString("UTF8"))
    .map(LogStreamProcessor.parseLineEx)
    .collect { case Some(e) => e }
  }

  def jsonInFlow(maxJsonObject: Int) = {
    JsonFraming.objectScanner(maxJsonObject) 
      .map(_.decodeString("UTF8").parseJson.convertTo[Event])
  }

  def jsonFramed(maxJsonObject: Int) =
    JsonFraming.objectScanner(maxJsonObject) 

  val jsonOutFlow = Flow[Event].map { event => 
    ByteString(event.toJson.compactPrint)
  }

  val notifyOutFlow = Flow[Summary].map { ws => 
    ByteString(ws.toJson.compactPrint)
  }

  val metricOutFlow = Flow[Metric].map { m => 
    ByteString(m.toJson.compactPrint)
  }

  val textOutFlow = Flow[Event].map{ event => 
    ByteString(LogStreamProcessor.logLine(event))
  }

  def logToJson(maxLine: Int) = {
    BidiFlow.fromFlows(textInFlow(maxLine), jsonOutFlow)
  }

  def jsonToLog(maxJsonObject: Int) = {
    BidiFlow.fromFlows(jsonInFlow(maxJsonObject), textOutFlow)
  }

  def logToJsonFlow(maxLine: Int) = {
    logToJson(maxLine).join(Flow[Event])
  }

  def jsonToLogFlow(maxJsonObject: Int) = {
    jsonToLog(maxJsonObject).join(Flow[Event])
  }
} 
Example 36
Source File: GrpcAkkaStreamsServerCalls.scala    From grpcakkastream   with MIT License 5 votes vote down vote up
package grpc.akkastreams

import akka.stream.Materializer
import akka.stream.scaladsl.{Flow, Sink, Source}
import io.grpc.ServerCallHandler
import io.grpc.stub.{CallStreamObserver, ServerCalls, StreamObserver}

import scala.concurrent.Await
import scala.concurrent.duration.Duration
import scala.util.{Failure, Success}

object GrpcAkkaStreamsServerCalls {

  def unaryCall[I, O](service: Flow[I, O, _])(
    implicit mat: Materializer
  ): ServerCallHandler[I, O] = ServerCalls.asyncUnaryCall(
    new ServerCalls.UnaryMethod[I, O] {
      override def invoke(request: I, responseObserver: StreamObserver[O]) =
        Source
          .single(request)
          .via(service)
          .runForeach(responseObserver.onNext)
          .onComplete {
            case Success(_) => responseObserver.onCompleted()
            case Failure(t) => responseObserver.onError(t)
          }(mat.executionContext)
    }
  )

  def serverStreamingCall[I, O](service: Flow[I, O, _])(
    implicit mat: Materializer
  ): ServerCallHandler[I, O] =
    ServerCalls.asyncServerStreamingCall(
      new ServerCalls.ServerStreamingMethod[I, O] {
        override def invoke(request: I, responseObserver: StreamObserver[O]) =
          Source
            .single(request)
            .via(service)
            .runWith(Sink.fromGraph(new GrpcSinkStage[O](
              responseObserver.asInstanceOf[CallStreamObserver[O]]
            )))
      }
    )

  def clientStreamingCall[I, O](service: Flow[I, O, _])(
    implicit mat: Materializer
  ): ServerCallHandler[I, O] = ServerCalls.asyncClientStreamingCall(
    new ServerCalls.ClientStreamingMethod[I, O] {
      override def invoke(responseObserver: StreamObserver[O]): StreamObserver[I] =
      // blocks until the GraphStage is fully initialized
        Await.result(
          Source
            .fromGraph(new GrpcSourceStage[I, O](
              responseObserver.asInstanceOf[CallStreamObserver[O]]
            ))
            .via(service)
            .to(Sink.fromGraph(new GrpcSinkStage[O](
              responseObserver.asInstanceOf[CallStreamObserver[O]]
            ))).run(),
          Duration.Inf
        )
    }
  )

  def bidiStreamingCall[I, O](service: Flow[I, O, _])(
    implicit mat: Materializer
  ): ServerCallHandler[I, O] = ServerCalls.asyncBidiStreamingCall(
    new ServerCalls.BidiStreamingMethod[I, O] {
      override def invoke(responseObserver: StreamObserver[O]): StreamObserver[I] =
      // blocks until the GraphStage is fully initialized
        Await.result(
          Source
            .fromGraph(new GrpcSourceStage[I, O](
              responseObserver.asInstanceOf[CallStreamObserver[O]]
            ))
            .via(service)
            .to(Sink.fromGraph(new GrpcSinkStage[O](
              responseObserver.asInstanceOf[CallStreamObserver[O]]
            ))).run(),
          Duration.Inf
        )
    }
  )
} 
Example 37
Source File: FriendJournalReader.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter6

import akka.actor.ActorSystem
import akka.persistence.Recovery
import akka.persistence.query.PersistenceQuery
import akka.persistence.query.journal.leveldb.scaladsl.LeveldbReadJournal
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.Sink
import scala.concurrent.duration._

object FriendJournalReader extends App {
  implicit val system = ActorSystem()
  import system.dispatcher
  implicit val mat = ActorMaterializer()(system)
  val queries = PersistenceQuery(system).readJournalFor[LeveldbReadJournal](LeveldbReadJournal.Identifier)

  val laura = system.actorOf(FriendActor.props("Laura", Recovery()))
  val maria = system.actorOf(FriendActor.props("Maria", Recovery()))
  laura ! AddFriend(Friend("Hector"))
  laura ! AddFriend(Friend("Nancy"))
  maria ! AddFriend(Friend("Oliver"))
  maria ! AddFriend(Friend("Steve"))
  system.scheduler.scheduleOnce(5 second, maria, AddFriend(Friend("Steve")))
  system.scheduler.scheduleOnce(10 second, maria, RemoveFriend(Friend("Oliver")))
  Thread.sleep(2000)

  queries.allPersistenceIds().map(id => system.log.info(s"Id received [$id]")).to(Sink.ignore).run()
  queries.eventsByPersistenceId("Laura").map(e => log(e.persistenceId, e.event)).to(Sink.ignore).run()
  queries.eventsByPersistenceId("Maria").map(e => log(e.persistenceId, e.event)).to(Sink.ignore).run()

  def log(id: String, evt: Any) = system.log.info(s"Id [$id] Event [$evt]")
} 
Example 38
Source File: HostLevelClientAPIApplication.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter9

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.HttpRequest
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Sink, Source}

import scala.concurrent.duration._
import scala.util.{Failure, Success}

object HostLevelClientAPIApplication extends App {
  implicit val system = ActorSystem()
  implicit val materializer = ActorMaterializer()
  implicit val executionContext = system.dispatcher

  val poolClientFlow = Http().cachedHostConnectionPoolHttps[String]("api.github.com")
  val akkaToolkitRequest = HttpRequest(uri = "/repos/akka/akka-http") -> """.*"open_issues":(.*?),.*"""
  val responseFuture = Source.single(akkaToolkitRequest).via(poolClientFlow).runWith(Sink.head)

  responseFuture.andThen {
    case Success(result) =>
      val (tryResponse, regex) = result
      tryResponse match {
        case Success(response) =>
          response.entity.toStrict(5 seconds).map(_.data.decodeString("UTF-8")).andThen {
            case Success(json) =>
              val pattern = regex.r
              pattern.findAllIn(json).matchData foreach { m =>
                println(s"There are ${m.group(1)} open issues in Akka Http.")
                materializer.shutdown()
                system.terminate()
              }
            case _ =>
          }
        case _ => println("request failed")
      }
    case _ => println("request failed")
  }
} 
Example 39
Source File: ConnectionLevelClientAPIApplication.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter9

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.HttpRequest
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Sink, Source}

import scala.util.{Failure, Success}
import scala.concurrent.duration._

object ConnectionLevelClientAPIApplication extends App {

  implicit val system = ActorSystem()
  implicit val materializer = ActorMaterializer()
  implicit val executionContext = system.dispatcher

  val connectionFlow = Http().outgoingConnectionHttps("api.github.com")
  val akkaToolkitRequest = HttpRequest(uri = "/repos/akka/akka-http")

  val responseFuture = Source.single(akkaToolkitRequest).via(connectionFlow).runWith(Sink.head)

  responseFuture.andThen {
    case Success(response) =>
      response.entity.toStrict(5 seconds).map(_.data.decodeString("UTF-8")).andThen {
        case Success(json) =>
          val pattern = """.*"open_issues":(.*?),.*""".r
          pattern.findAllIn(json).matchData foreach { m =>
            println(s"There are ${m.group(1)} open issues in Akka Http.")
            materializer.shutdown()
            system.terminate()
          }
        case _ =>
      }
    case _ => println("request failed")
  }
} 
Example 40
Source File: SimpleStreamsApplication.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter8

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Sink, Source}

object SimpleStreamsApplication extends App {

  implicit val actorSystem = ActorSystem("SimpleStream")
  implicit val actorMaterializer = ActorMaterializer()

  val fileList = List(
    "src/main/resources/testfile1.text",
    "src/main/resources/testfile2.txt",
    "src/main/resources/testfile3.txt")

  val stream = Source(fileList)
    .map(new java.io.File(_))
    .filter(_.exists())
    .filter(_.length() != 0)
    .to(Sink.foreach(f => println(s"Absolute path: ${f.getAbsolutePath}")))

  stream.run()
} 
Example 41
Source File: ComposingStreamsApplication.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter8

import java.io.File

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Sink, Source}

object ComposingStreamsApplication extends App {

  implicit val actorSystem = ActorSystem("SimpleStream")
  implicit val actorMaterializer = ActorMaterializer()

  val fileList = List(
    "src/main/resources/testfile1.text",
    "src/main/resources/testfile2.txt",
    "src/main/resources/testfile3.txt")

  val stream = Source(fileList)
    .map(new File(_))
    .filter(_.exists())
    .filter(_.length() != 0)
    .to(Sink.foreach(f => println(s"Absolute path: ${f.getAbsolutePath}")))

  stream.run()
} 
Example 42
Source File: WorkingWithGraphsApplication.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter8

import akka.actor.ActorSystem
import akka.stream._
import akka.stream.scaladsl.{Balance, Broadcast, Flow, GraphDSL, Merge, RunnableGraph, Sink, Source}
import scala.concurrent.duration._
import scala.util.Random


object WorkingWithGraphsApplication extends App {

  implicit val actorSystem = ActorSystem("WorkingWithGraphs")
  implicit val actorMaterializer = ActorMaterializer()

  trait MobileMsg {
    def id = Random.nextInt(1000)
    def toGenMsg(origin: String) = GenericMsg(id, origin)
  }
  class AndroidMsg extends MobileMsg
  class IosMsg extends MobileMsg
  case class GenericMsg(id: Int, origin: String)

  val graph = RunnableGraph.fromGraph(GraphDSL.create() { implicit builder =>
    import GraphDSL.Implicits._

    //Sources
    val androidNotification = Source.tick(2 seconds, 500 millis, new AndroidMsg)
    val iOSNotification = Source.tick(700 millis, 600 millis, new IosMsg)

    //Flow
    val groupAndroid = Flow[AndroidMsg].map(_.toGenMsg("ANDROID")).groupedWithin(5, 5 seconds).async
    val groupIos = Flow[IosMsg].map(_.toGenMsg("IOS")).groupedWithin(5, 5 seconds).async
    def counter = Flow[Seq[GenericMsg]].via(new StatefulCounterFlow())
    def mapper = Flow[Seq[GenericMsg]].mapConcat(_.toList)

    //Junctions
    val aBroadcast = builder.add(Broadcast[Seq[GenericMsg]](2))
    val iBroadcast = builder.add(Broadcast[Seq[GenericMsg]](2))
    val balancer = builder.add(Balance[Seq[GenericMsg]](2))
    val notitificationMerge = builder.add(Merge[Seq[GenericMsg]](2))
    val genericNotitificationMerge = builder.add(Merge[GenericMsg](2))

    def counterSink(s: String) = Sink.foreach[Int](x => println(s"$s: [$x]"))

    //Graph
    androidNotification ~> groupAndroid ~> aBroadcast ~> counter ~> counterSink("Android")
                                           aBroadcast ~> notitificationMerge
                                           iBroadcast ~> notitificationMerge
    iOSNotification     ~> groupIos     ~> iBroadcast ~> counter ~> counterSink("Ios")

    notitificationMerge ~> balancer ~> mapper.async ~> genericNotitificationMerge
                           balancer ~> mapper.async ~> genericNotitificationMerge

    genericNotitificationMerge ~> Sink.foreach(println)

    ClosedShape
  })

  graph.run()
} 
Example 43
Source File: ProcessingKafkaApplication.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter8

import akka.actor.ActorSystem
import akka.kafka.scaladsl.{Consumer, Producer}
import akka.kafka.{ConsumerSettings, ProducerSettings, Subscriptions}
import akka.stream.{ActorMaterializer, ClosedShape}
import akka.stream.scaladsl.{Flow, GraphDSL, RunnableGraph, Sink, Source}
import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord}
import org.apache.kafka.clients.producer.ProducerRecord
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.{ByteArrayDeserializer, ByteArraySerializer, StringDeserializer, StringSerializer}

import scala.concurrent.duration._

object ProcessingKafkaApplication extends App {
  implicit val actorSystem = ActorSystem("SimpleStream")
  implicit val actorMaterializer = ActorMaterializer()

  val bootstrapServers = "localhost:9092"
  val kafkaTopic = "akka_streams_topic"
  val partition = 0
  val subscription = Subscriptions.assignment(new TopicPartition(kafkaTopic, partition))

  val consumerSettings = ConsumerSettings(actorSystem, new ByteArrayDeserializer, new StringDeserializer)
    .withBootstrapServers(bootstrapServers)
    .withGroupId("akka_streams_group")
    .withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")

  val producerSettings = ProducerSettings(actorSystem, new ByteArraySerializer, new StringSerializer)
    .withBootstrapServers(bootstrapServers)

  val runnableGraph = RunnableGraph.fromGraph(GraphDSL.create() { implicit builder =>
    import GraphDSL.Implicits._

    val tickSource = Source.tick(0 seconds, 5 seconds, "Hello from Akka Streams using Kafka!")
    val kafkaSource = Consumer.plainSource(consumerSettings, subscription)
    val kafkaSink = Producer.plainSink(producerSettings)
    val printlnSink = Sink.foreach(println)

    val mapToProducerRecord = Flow[String].map(elem => new ProducerRecord[Array[Byte], String](kafkaTopic, elem))
    val mapFromConsumerRecord = Flow[ConsumerRecord[Array[Byte], String]].map(record => record.value())

    tickSource  ~> mapToProducerRecord   ~> kafkaSink
    kafkaSource ~> mapFromConsumerRecord ~> printlnSink

    ClosedShape
  })

  runnableGraph.run()
} 
Example 44
Source File: PipeliningParallelizing.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter8

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.{ActorMaterializer, FlowShape}
import akka.stream.scaladsl.{Balance, Flow, GraphDSL, Merge, Sink, Source}

import scala.util.Random

trait PipeliningParallelizing extends App {

  implicit val actorSystem = ActorSystem("PipeliningParallelizing")
  implicit val actorMaterializer = ActorMaterializer()

  case class Wash(id: Int)
  case class Dry(id: Int)
  case class Done(id: Int)

  val tasks = (1 to 5).map(Wash)

  def washStage = Flow[Wash].map(wash => {
    val sleepTime = Random.nextInt(3) * 1000
    println(s"Washing ${wash.id}. It will take $sleepTime milliseconds.")
    Thread.sleep(sleepTime)
    Dry(wash.id)
  })

  def dryStage = Flow[Dry].map(dry => {
    val sleepTime = Random.nextInt(3) * 1000
    println(s"Drying ${dry.id}. It will take $sleepTime milliseconds.")
    Thread.sleep(sleepTime)
    Done(dry.id)
  })

  val parallelStage = Flow.fromGraph(GraphDSL.create() { implicit builder =>
    import GraphDSL.Implicits._

    val dispatchLaundry = builder.add(Balance[Wash](3))
    val mergeLaundry = builder.add(Merge[Done](3))

    dispatchLaundry.out(0) ~> washStage.async ~> dryStage.async ~> mergeLaundry.in(0)
    dispatchLaundry.out(1) ~> washStage.async ~> dryStage.async ~> mergeLaundry.in(1)
    dispatchLaundry.out(2) ~> washStage.async ~> dryStage.async ~> mergeLaundry.in(2)

    FlowShape(dispatchLaundry.in, mergeLaundry.out)
  })

  def runGraph(testingFlow: Flow[Wash, Done, NotUsed]) = Source(tasks).via(testingFlow).to(Sink.foreach(println)).run()
} 
Example 45
Source File: ReadSideTestDriver.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.scaladsl.testkit

import akka.Done
import akka.persistence.query.Offset
import akka.stream.Materializer
import akka.stream.scaladsl.Sink
import akka.stream.scaladsl.Source
import com.lightbend.lagom.scaladsl.persistence.ReadSideProcessor.ReadSideHandler
import com.lightbend.lagom.scaladsl.persistence.AggregateEvent
import com.lightbend.lagom.scaladsl.persistence.EventStreamElement
import com.lightbend.lagom.scaladsl.persistence.ReadSide
import com.lightbend.lagom.scaladsl.persistence.ReadSideProcessor

import scala.concurrent.ExecutionContext
import scala.concurrent.Future

class ReadSideTestDriver(implicit val materializer: Materializer, ec: ExecutionContext) extends ReadSide {
  private var processors = Map.empty[Class[_], Seq[Future[(ReadSideHandler[_], Offset)]]]

  override def register[Event <: AggregateEvent[Event]](processorFactory: => ReadSideProcessor[Event]): Unit = {
    val processor = processorFactory
    val eventTags = processor.aggregateTags
    val handler   = processor.buildHandler()
    val future = for {
      _      <- handler.globalPrepare()
      offset <- handler.prepare(eventTags.head)
    } yield {
      handler -> offset
    }
    synchronized {
      val handlers = processors.getOrElse(eventTags.head.eventType, Nil)
      processors += (eventTags.head.eventType -> (handlers :+ future))
    }
  }

  def feed[Event <: AggregateEvent[Event]](entityId: String, event: Event, offset: Offset): Future[Done] = {
    processors.get(event.aggregateTag.eventType) match {
      case None => sys.error(s"No processor registered for Event ${event.aggregateTag.eventType.getCanonicalName}")
      case Some(handlerFutures) =>
        for {
          handlers <- Future.sequence(handlerFutures)
          _ <- Future.sequence(handlers.map {
            case (handler: ReadSideHandler[Event], _) =>
              Source
                .single(new EventStreamElement(entityId, event, offset))
                .via(handler.handle())
                .runWith(Sink.ignore)
          })
        } yield {
          Done
        }
    }
  }
} 
Example 46
Source File: InternalSubscriberStub.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.internal.testkit

import akka.Done
import akka.actor.ActorRef
import akka.stream.Materializer
import akka.stream.OverflowStrategy
import akka.stream.scaladsl.Flow
import akka.stream.scaladsl.Keep
import akka.stream.scaladsl.Sink
import akka.stream.scaladsl.Source

import scala.concurrent.Future
import scala.language.higherKinds

private[lagom] class InternalSubscriberStub[Payload, Message[_]](
    groupId: String,
    topicBuffer: ActorRef
)(implicit materializer: Materializer) {
  def mostOnceSource: Source[Message[Payload], _] = {
    Source
      .actorRef[Message[Payload]](1024, OverflowStrategy.fail)
      .prependMat(Source.empty)(subscribeToBuffer)
  }

  def leastOnce(flow: Flow[Message[Payload], Done, _]): Future[Done] = {
    mostOnceSource
      .via(flow)
      .toMat(Sink.ignore)(Keep.right[Any, Future[Done]])
      .run()
  }

  private def subscribeToBuffer[R](ref: ActorRef, t: R) = {
    topicBuffer.tell(TopicBufferActor.SubscribeToBuffer(groupId, ref), ActorRef.noSender)
    t
  }
} 
Example 47
Source File: YetAnotherAkkaClient.scala    From telegram   with Apache License 2.0 5 votes vote down vote up
package com.bot4s.telegram.clients

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.marshalling.Marshal
import akka.http.scaladsl.model.Uri.Path
import akka.http.scaladsl.model._
import akka.http.scaladsl.unmarshalling.Unmarshal
import akka.stream.Materializer
import akka.stream.scaladsl.{Sink, Source}
import cats.instances.future._
import com.bot4s.telegram.api.RequestHandler
import com.bot4s.telegram.methods.{Request, Response}
import io.circe.{Decoder, Encoder}
import slogging.StrictLogging
import com.bot4s.telegram.marshalling.responseDecoder

import scala.concurrent.{ExecutionContext, Future}

class YetAnotherAkkaClient(token: String, telegramHost: String = "api.telegram.org")
                          (implicit system: ActorSystem, materializer: Materializer, ec: ExecutionContext)
  extends RequestHandler[Future] with StrictLogging {

  private val flow = Http().outgoingConnectionHttps(telegramHost)

  import com.bot4s.telegram.marshalling.AkkaHttpMarshalling._

  override def sendRequest[R, T <: Request[_]](request: T)(implicit encT: Encoder[T], decR: Decoder[R]): Future[R] = {
    Source.fromFuture(
      Marshal(request).to[RequestEntity]
        .map {
          re =>
            HttpRequest(HttpMethods.POST, Uri(path = Path(s"/bot$token/" + request.methodName)), entity = re)
        })
      .via(flow)
      .mapAsync(1)(r => Unmarshal(r.entity).to[Response[R]])
      .runWith(Sink.head)
      .map(processApiResponse[R])
  }
} 
Example 48
Source File: LoadTest.scala    From ws_to_kafka   with MIT License 5 votes vote down vote up
package com.pkinsky

import java.util.concurrent.atomic.AtomicInteger

import akka.http.scaladsl.model.ws.{InvalidUpgradeResponse, WebsocketUpgradeResponse, WebsocketRequest, TextMessage}
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.Uri
import akka.stream.ThrottleMode
import akka.stream.scaladsl.{Keep, Sink, RunnableGraph, Source}
import play.api.libs.json.Json

import scala.concurrent.{Future, Await}
import scala.concurrent.duration._
import scala.language.postfixOps

object LoadTest extends App with AppContext {
  val clients = 256
  val eventsPerClient = 256

  val eventsSent = new AtomicInteger(0)

  def testData(clientId: String): Source[Event, Unit] =
    Source.unfoldInf(1) { n =>
      val event = Event(s"msg number $n", clientId, System.currentTimeMillis())
      (n + 1, event)
    }.take(eventsPerClient).throttle(1, 100 millis, 1, ThrottleMode.Shaping)

  def wsClient(clientId: String): RunnableGraph[Future[WebsocketUpgradeResponse]] =
    testData(clientId).map(e => TextMessage.Strict(Json.toJson(e).toString))
      .map { x => eventsSent.incrementAndGet(); x }
      .viaMat(Http().websocketClientFlow(WebsocketRequest(Uri(s"ws://localhost:$port/ws"))))(Keep.right).to(Sink.ignore)

  //set up websocket connections
  (1 to clients).foreach { id =>
    wsClient(s"client $id").run()
  }

  //watch kafka for messages sent via websocket
  val kafkaConsumerGraph: RunnableGraph[Future[Seq[Event]]] =
    kafka.consume[Event](eventTopic, "group_new")
      .take(clients * eventsPerClient).takeWithin(2 minutes)
      .toMat(Sink.seq)(Keep.right)

  val res = Await.result(kafkaConsumerGraph.run, 5 minutes)
  println(s"sent ${eventsSent.get()} events total")
  println(s"res size: ${res.length}")
} 
Example 49
Source File: KafkaService.scala    From ws_to_kafka   with MIT License 5 votes vote down vote up
package com.pkinsky


import akka.actor.ActorSystem
import akka.stream.scaladsl.{Source, Flow, Sink}
import com.softwaremill.react.kafka.{ConsumerProperties, ProducerProperties, ProducerMessage, ReactiveKafka}
import org.apache.kafka.common.serialization.{Deserializer, Serializer}
import play.api.libs.json.{Json, Reads, Writes}

case class KafkaServiceConf(bootstrapServers: String)

class KafkaService(kafkaClient: ReactiveKafka, conf: KafkaServiceConf) {
  
  def consume[T](topic: String, groupId: String)(implicit writes: Reads[T], actorSystem: ActorSystem): Source[T, Unit] =
    Source.fromPublisher(kafkaClient.consume(
      ConsumerProperties(
        bootstrapServers = conf.bootstrapServers, // IP and port of local Kafka instance
        topic = topic, // topic to consume messages from
        groupId = groupId, // consumer group
        valueDeserializer = KafkaService.deserializer[T]
      )
    )).map(_.value())
}


object KafkaService {
  def serializer[T: Writes] = new Serializer[T] {
    override def serialize(topic: String, data: T): Array[Byte] = {
      val js = Json.toJson(data)
      js.toString().getBytes("UTF-8")
    }

    override def configure(configs: java.util.Map[String, _], isKey: Boolean): Unit = ()
    override def close(): Unit = ()
  }

  def deserializer[T: Reads] = new Deserializer[T] {
    override def deserialize(topic: String, data: Array[Byte]): T = {
      val s = new String(data, "UTF-8")
      Json.fromJson(Json.parse(s)).get //throw exception on error ¯\_(ツ)_/¯ (consider returning JsResult[T])
    }

    override def configure(configs: java.util.Map[String, _], isKey: Boolean): Unit = ()
    override def close(): Unit = ()
  }
} 
Example 50
Source File: DemoApp.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package akka.cluster.bootstrap

import akka.actor.{ Actor, ActorLogging, ActorSystem, Props }
import akka.cluster.ClusterEvent.ClusterDomainEvent
import akka.cluster.{ Cluster, ClusterEvent }
import akka.http.scaladsl.Http
import akka.management.scaladsl.AkkaManagement
import akka.management.cluster.bootstrap.ClusterBootstrap
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.Sink
import akka.stream.scaladsl.Source
import com.typesafe.config.ConfigFactory

object DemoApp extends App {

  implicit val system = ActorSystem("simple")

  import system.log
  import system.dispatcher
  implicit val mat = ActorMaterializer()
  val cluster = Cluster(system)

  log.info("Started [{}], cluster.selfAddress = {}", system, cluster.selfAddress)

  AkkaManagement(system).start()
  ClusterBootstrap(system).start()

  cluster
    .subscribe(system.actorOf(Props[ClusterWatcher]), ClusterEvent.InitialStateAsEvents, classOf[ClusterDomainEvent])

  import akka.http.scaladsl.server.Directives._
  Http().bindAndHandle(complete("Hello world"), "0.0.0.0", 8080)

}

class ClusterWatcher extends Actor with ActorLogging {
  val cluster = Cluster(context.system)

  override def receive = {
    case msg => log.info("Cluster {} >>> {}", msg, cluster.selfAddress)
  }
} 
Example 51
Source File: AkkaPersistenceEventLogSpec.scala    From akka-stream-eventsourcing   with Apache License 2.0 5 votes vote down vote up
package com.github.krasserm.ases.log

import akka.actor.ActorSystem
import akka.stream.scaladsl.{Sink, Source}
import akka.testkit.TestKit
import com.github.krasserm.ases._
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{Matchers, WordSpecLike}

import scala.collection.immutable.Seq

class AkkaPersistenceEventLogSpec extends TestKit(ActorSystem("test")) with WordSpecLike with Matchers with ScalaFutures with StreamSpec {
  val akkaPersistenceEventLog: AkkaPersistenceEventLog = new AkkaPersistenceEventLog(journalId = "akka.persistence.journal.inmem")

  "An Akka Persistence event log" must {
    "provide a sink for writing events and a source for delivering replayed events" in {
      val persistenceId = "1"
      val events = Seq("a", "b", "c").map(Emitted(_, emitterId))
      val expected = durables(events, offset = 1).map(Delivered(_)) :+ Recovered

      Source(events).runWith(akkaPersistenceEventLog.sink(persistenceId)).futureValue
      akkaPersistenceEventLog.source[String](persistenceId).runWith(Sink.seq).futureValue should be(expected)
    }
    "provide a flow with an input port for writing events and and output port for delivering replayed and live events" in {
      val persistenceId = "2"
      val events1 = Seq("a", "b", "c").map(Emitted(_, emitterId))
      val events2 = Seq("d", "e", "f").map(Emitted(_, emitterId))
      val expected = (durables(events1, offset = 1).map(Delivered(_)) :+ Recovered) ++ durables(events2, offset = 4).map(Delivered(_))

      Source(events1).runWith(akkaPersistenceEventLog.sink(persistenceId)).futureValue
      Source(events2).via(akkaPersistenceEventLog.flow(persistenceId)).runWith(Sink.seq).futureValue should be(expected)
    }
    "provide a source that only delivers events of compatible types" in {
      val persistenceId = "3"
      val events = Seq("a", "b", 1, 2).map(Emitted(_, emitterId))
      val expected = durables(events, offset = 1).drop(2).map(Delivered(_)) :+ Recovered

      Source(events).runWith(akkaPersistenceEventLog.sink(persistenceId)).futureValue
      akkaPersistenceEventLog.source[Int](persistenceId).runWith(Sink.seq).futureValue should be(expected)
    }
  }
} 
Example 52
Source File: KafkaEventLogSpec.scala    From akka-stream-eventsourcing   with Apache License 2.0 5 votes vote down vote up
package com.github.krasserm.ases.log

import akka.actor.ActorSystem
import akka.stream.scaladsl.{Sink, Source}
import akka.testkit.TestKit
import com.github.krasserm.ases._
import org.apache.kafka.common.TopicPartition
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Millis, Seconds, Span}
import org.scalatest.{Matchers, WordSpecLike}

import scala.collection.immutable.Seq

class KafkaEventLogSpec extends TestKit(ActorSystem("test")) with WordSpecLike with Matchers with ScalaFutures with StreamSpec with KafkaSpec {
  implicit val pc = PatienceConfig(timeout = Span(5, Seconds), interval = Span(10, Millis))

  val kafkaEventLog: KafkaEventLog = new KafkaEventLog(host, port)

  "A Kafka event log" must {
    "provide a sink for writing events and a source for delivering replayed events" in {
      val topicPartition = new TopicPartition("p-1", 0)
      val events = Seq("a", "b", "c").map(Emitted(_, emitterId))
      val expected = durables(events).map(Delivered(_)) :+ Recovered

      Source(events).runWith(kafkaEventLog.sink(topicPartition)).futureValue
      kafkaEventLog.source[String](topicPartition).take(4).runWith(Sink.seq).futureValue should be(expected)
    }
    "provide a flow with an input port for writing events and and output port for delivering replayed and live events" in {
      val topicPartition = new TopicPartition("p-2", 0)
      val events1 = Seq("a", "b", "c").map(Emitted(_, emitterId))
      val events2 = Seq("d", "e", "f").map(Emitted(_, emitterId))
      val expected = (durables(events1).map(Delivered(_)) :+ Recovered) ++ durables(events2, offset = 3).map(Delivered(_))

      Source(events1).runWith(kafkaEventLog.sink(topicPartition)).futureValue
      Source(events2).via(kafkaEventLog.flow(topicPartition)).take(7).runWith(Sink.seq).futureValue should be(expected)
    }
    "provide a source that only delivers events of compatible types" in {
      val topicPartition = new TopicPartition("p-3", 0)
      val events = Seq("a", "b", 1, 2).map(Emitted(_, emitterId))
      val expected = durables(events).drop(2).map(Delivered(_)) :+ Recovered

      Source(events).runWith(kafkaEventLog.sink(topicPartition)).futureValue
      kafkaEventLog.source[Int](topicPartition).take(3).runWith(Sink.seq).futureValue should be(expected)
    }
  }
} 
Example 53
Source File: RequestRoutingSpec.scala    From akka-stream-eventsourcing   with Apache License 2.0 5 votes vote down vote up
package com.github.krasserm.ases

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.scaladsl.{Flow, Sink, Source}
import akka.testkit.TestKit
import com.github.krasserm.ases.log.AkkaPersistenceEventLog
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{Matchers, WordSpecLike}
import scala.collection.immutable.Seq

object RequestRoutingSpec {
  import EventSourcing._

  sealed trait Request {
    def aggregateId: String
  }
  case class GetState(aggregateId: String) extends Request              // Query
  case class Increment(aggregateId: String, delta: Int) extends Request // Command
  case class Incremented(aggregateId: String, delta: Int)               // Event
  case class Response(aggregateId: String, state: Int)

  val requestHandler: RequestHandler[Int, Incremented, Request, Response] = {
    case (s, GetState(aggregateId))     => respond(Response(aggregateId, s))
    case (_, Increment(aggregateId, d)) => emit(Seq(Incremented(aggregateId, d)), Response(aggregateId, _))
  }

  val eventHandler: EventHandler[Int, Incremented] =
    (s, e) => s + e.delta
}

class RequestRoutingSpec extends TestKit(ActorSystem("test")) with WordSpecLike with Matchers with ScalaFutures with StreamSpec {
  import RequestRoutingSpec._

  val akkaPersistenceEventLog: AkkaPersistenceEventLog =
    new log.AkkaPersistenceEventLog(journalId = "akka.persistence.journal.inmem")

  def processor(aggregateId: String): Flow[Request, Response, NotUsed] =
    EventSourcing(aggregateId, 0, requestHandler, eventHandler).join(akkaPersistenceEventLog.flow(aggregateId))

  def router: Flow[Request, Response, NotUsed] =
    Router(_.aggregateId, processor)

  "A request router" when {
    "configured to route based on aggregate id" must {
      "dynamically create a request processor for each aggregate id" in {
        val aggregateId1 = "a1"
        val aggregateId2 = "a2"

        val (pub, sub) = probes(router)

        pub.sendNext(Increment(aggregateId1, 3))
        sub.requestNext(Response(aggregateId1, 3))

        pub.sendNext(Increment(aggregateId2, 1))
        sub.requestNext(Response(aggregateId2, 1))

        pub.sendNext(Increment(aggregateId1, 2))
        sub.requestNext(Response(aggregateId1, 5))

        pub.sendNext(Increment(aggregateId2, -4))
        sub.requestNext(Response(aggregateId2, -3))
      }
      "handle single command using Source.single" in {
        val request = Increment("a3", 3)
        val expected = Response("a3", 3)
        Source.single(request)
          .via(router)
          .runWith(Sink.head)
          .futureValue should be(expected)
      }
      "handle single command using Source.apply(Seq)" in {
        val request = Increment("a4", 3)
        val expected = Response("a4", 3)
        Source(Seq(request))
          .via(router)
          .runWith(Sink.head)
          .futureValue should be(expected)
      }
      "handle multiple commands" in {
        Source(Seq(Increment("a5", 1), Increment("a5", 2), Increment("a5", 3)))
          .via(router)
          .runWith(Sink.seq)
          .futureValue should be(Seq(Response("a5", 1), Response("a5", 3), Response("a5", 6)))
      }
    }
  }
} 
Example 54
Source File: EventCollaborationSpec.scala    From akka-stream-eventsourcing   with Apache License 2.0 5 votes vote down vote up
package com.github.krasserm.ases

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.scaladsl.{Flow, Sink}
import akka.testkit.TestKit
import com.github.krasserm.ases.log.{KafkaEventLog, KafkaSpec}
import org.apache.kafka.common.TopicPartition
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Millis, Seconds, Span}
import org.scalatest.{Matchers, WordSpecLike}

import scala.collection.immutable.Seq

class EventCollaborationSpec extends TestKit(ActorSystem("test")) with WordSpecLike with Matchers with ScalaFutures with StreamSpec with KafkaSpec {
  import EventSourcingSpec._

  implicit val pc = PatienceConfig(timeout = Span(5, Seconds), interval = Span(10, Millis))

  val emitterId1 = "processor1"
  val emitterId2 = "processor2"

  val kafkaEventLog: KafkaEventLog =
    new log.KafkaEventLog(host, port)

  def processor(emitterId: String, topicPartition: TopicPartition): Flow[Request, Response, NotUsed] =
    EventSourcing(emitterId, 0, requestHandler, eventHandler).join(kafkaEventLog.flow(topicPartition))

  "A group of EventSourcing stages" when {
    "joined with a shared event log" can {
      "collaborate via publish-subscribe" in {
        val topicPartition = new TopicPartition("p-1", 0)    // shared topic partition
        val (pub1, sub1) = probes(processor(emitterId1, topicPartition)) // processor 1
        val (pub2, sub2) = probes(processor(emitterId2, topicPartition)) // processor 2

        pub1.sendNext(Increment(3))
        // Both processors receive event but
        // only processor 1 creates response
        sub1.requestNext(Response(3))

        pub2.sendNext(Increment(-4))
        // Both processors receive event but
        // only processor 2 creates response
        sub2.requestNext(Response(-1))

        // consume and verify events emitted by both processors
        kafkaEventLog.source[Incremented](topicPartition).via(log.replayed).map {
          case Durable(event, eid, _, sequenceNr) => (event, eid, sequenceNr)
        }.runWith(Sink.seq).futureValue should be(Seq(
          (Incremented(3), emitterId1, 0L),
          (Incremented(-4), emitterId2, 1L)
        ))
      }
    }
  }
} 
Example 55
Source File: PubSubSinkIT.scala    From akka-cloudpubsub   with Apache License 2.0 5 votes vote down vote up
package com.qubit.pubsub.akka

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.scaladsl.{Keep, Sink}
import akka.stream.testkit.scaladsl.TestSource
import akka.stream.{ActorMaterializer, Attributes, Graph, SinkShape}
import com.google.common.base.Charsets
import com.qubit.pubsub.PubSubIntegrationTest
import com.qubit.pubsub.akka.attributes.{
  PubSubClientAttribute,
  PubSubStageBufferSizeAttribute
}
import com.qubit.pubsub.client.PubSubMessage
import org.scalatest.{BeforeAndAfterAll, FunSuite, Matchers}

import scala.concurrent.Await
import scala.concurrent.duration._
import scala.util.Try

class PubSubSinkIT
    extends FunSuite
    with Matchers
    with BeforeAndAfterAll
    with PubSubIntegrationTest {

  implicit val actorSystem = ActorSystem("pubsub-stream-test")
  implicit val materializer = ActorMaterializer()

  override def testName = "pubsubsink"

  override def beforeAll(): Unit = {
    Await.ready(client.createTopic(testTopic), timeout)
    Await
      .ready(client.createSubscription(testSubscription, testTopic), timeout)
  }

  override def afterAll(): Unit = {
    actorSystem.terminate()
    Await.ready(client.deleteSubscription(testSubscription), timeout)
    Await.ready(client.deleteTopic(testTopic), timeout)
  }

  test("PubSubSink success") {
    val sinkGraph: Graph[SinkShape[PubSubMessage], NotUsed] =
      new PubSubSink(testTopic, 1.second)
    val sinkAttributes = Attributes(
      List(PubSubClientAttribute(client), PubSubStageBufferSizeAttribute(30)))
    val pubsubSink = Sink.fromGraph(sinkGraph).withAttributes(sinkAttributes)

    val (pub, _) = TestSource
      .probe[Array[Byte]]
      .map(PubSubMessage(_))
      .toMat(pubsubSink)(Keep.both)
      .run()

    Range(0, 100)
      .map(i => s"xxx$i".getBytes(Charsets.UTF_8))
      .foreach(pub.sendNext)
    pub.sendComplete()

    // wait for buffers to flush
    Try(Thread.sleep(1000))

    val output = Await.result(client.pull(testSubscription, 100), timeout)
    client.ack(testSubscription, output.map(m => m.ackId))

    output should not be (null)
    output should have size (100)
    output
      .map(m => new String(m.payload.payload, Charsets.UTF_8))
      .forall(_.startsWith("xxx")) should be(true)
  }
} 
Example 56
Source File: elasticsearchExtensionsSpec.scala    From akka-stream-extensions   with Apache License 2.0 5 votes vote down vote up
package com.mfglabs.stream
package extensions.elasticsearch

import akka.actor.ActorSystem
import akka.stream._
import akka.stream.scaladsl.Sink

import org.elasticsearch.index.query.QueryBuilders
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Millis, Minutes, Span}
import org.scalatest.{BeforeAndAfterAll, Matchers, FlatSpec}

import scala.concurrent.duration._
import scala.util.Try

import org.elasticsearch.common.settings.Settings
import org.elasticsearch.node.Node

class ElasticExtensionsSpec extends FlatSpec with Matchers with ScalaFutures with BeforeAndAfterAll {
  implicit override val patienceConfig = PatienceConfig(timeout = Span(1, Minutes), interval = Span(100, Millis))

  implicit val as = ActorSystem()
  implicit val fm = ActorMaterializer()
  implicit val blockingEc = ExecutionContextForBlockingOps(scala.concurrent.ExecutionContext.Implicits.global)

  val settings = Settings.builder()
    .put("path.data", "target/elasticsearch-data")
    .put("path.home", "/")
    .put("transport.type", "local")
    .put("http.enabled", false)
    .build();

  lazy val node = new Node(settings).start();
  implicit lazy val client = node.client()

  val index = "test"
  val `type` = "type"

  "EsStream" should "execute a query a get the result as a stream" in {
    Try(client.admin.indices().prepareDelete(index).get())

    val toIndex = for (i <- 1 to 5002) yield (i, s"""{i: $i}""")
    toIndex.foreach { case (i, json) =>
      client.prepareIndex(index, `type`).setSource(json).setId(i.toString).get()
    }

    client.admin.indices.prepareRefresh(index).get() // to be sure that the data is indexed

    val res = EsStream.queryAsStream(QueryBuilders.matchAllQuery(), index, `type`, 1 minutes, 50)
      .runWith(Sink.seq)
      .futureValue

    res.sorted shouldEqual toIndex.map(_._2).sorted
  }

  override def afterAll(): Unit = {
    client.close()
    node.close()
  }

} 
Example 57
Source File: CleaningPipelineSpec.scala    From tap   with Apache License 2.0 5 votes vote down vote up
package io.heta.tap.pipelines

import akka.stream.scaladsl.{Flow, Keep, Sink, Source}
import io.heta.tap.UnitSpec

import scala.concurrent.Await
import scala.concurrent.duration._



class CleaningPipelineSpec extends UnitSpec {

  import io.heta.tap.pipelines.materialize.PipelineContext._

  val cleaning = new Cleaning


  def testSource(input:String) = Source.single(input)
  val testSink = Flow[String].toMat(Sink.head[String])(Keep.right)

  "revealInvisible" should "replace whitespace characters with visible characters" in {

    import cleaning.White._
    val input = s"1${sp}2${nb}3${nl}4${cr}5\u001e6\u00807"
    val future = testSource(input) via cleaning.Pipeline.revealInvisible runWith testSink
    val result = Await.result(future, 3 seconds)
    assert(result=="1·2·3¬4¬5�6�7")
  }

  "simplify" should "replace quotes and hyphens with single byte versions" in {

    import cleaning.Quote._
    val input = s"1${singleCurlyLeft}2${singleCurlyRight}3${doubleCurlyLeft}4${doubleCurlyRight}5${cleaning.Hyphen.rgx_hyphens}6"
    val future = testSource(input) via cleaning.Pipeline.simplify runWith testSink
    val result = Await.result(future, 3 seconds)
    assert(result=="1'2'3\"4\"5-|-|-|-|-|-|-|-6")
  }

  "lengthPreserve" should "replace control characters while preserving length" in {
    import cleaning.White._
    val input = s"1${sp}2${nb}3${nl}4${cr}5\u001e6\u00807"
    val future = testSource(input) via cleaning.Pipeline.lengthPreserve runWith testSink
    val result = Await.result(future, 3 seconds)
    assert(result=="1 2 3\n4\n5�6�7" && result.length==input.length)
  }

  "utfMinimal" should "strip control characters, and reduce whitespace" in {
    import cleaning.White._
    val input = s"1${sp}${nb}3${nl}${cr}5\u001e6\u00807"
    val future = testSource(input) via cleaning.Pipeline.utfMinimal runWith testSink
    val result = Await.result(future, 3 seconds)
    assert(result=="1 3\n567")
  }

  "utfSimplify" should "replace hyphens and quotes, strip controls and reduce whitespace" in {
    import cleaning.Quote._
    import cleaning.White._
    val input = s"1${sp}${nb}3${nl}${cr}5\u001e6\u00807${singleCurlyLeft}8${singleCurlyRight}9${doubleCurlyLeft}10${doubleCurlyRight}11${cleaning.Hyphen.rgx_hyphens}12"
    val future = testSource(input) via cleaning.Pipeline.utfSimplify runWith testSink
    val result = Await.result(future, 3 seconds)
    assert(result=="1 3\n567'8'9\"10\"11-|-|-|-|-|-|-|-12")
  }

//  "asciiOnly" should "replace or strip all non-ascii characters" in {
//    import cleaning.Quote._
//    import cleaning.White._
//    val input = s"1${sp}${nb}3${nl}${cr}56\u00807${singleCurlyLeft}8${singleCurlyRight}9${doubleCurlyLeft}10${doubleCurlyRight}11${cleaning.Hyphen.rgx_hyphens}12"
//    val future = testSource(input) via cleaning.Pipeline.asciiOnly runWith testSink
//    val result = Await.result(future, 3 seconds)
//    assert(result=="1 3\r\n567891011|||||||12")
//  }

} 
Example 58
Source File: WebService.scala    From heimdallr   with Apache License 2.0 5 votes vote down vote up
package chat

import scala.concurrent.ExecutionContext.Implicits._
import scala.util.{Failure,Success}
import akka.actor.ActorSystem
import akka.stream.Materializer
import akka.http.scaladsl.Http
import akka.http.scaladsl.Http.{ ServerBinding }
import akka.http.scaladsl.model.{ HttpRequest, HttpResponse, Uri }
import akka.stream.scaladsl.{ Flow, Sink, Source }
import org.slf4j.LoggerFactory

trait WebService {
  val log = LoggerFactory.getLogger("total")
  private var binding: scala.concurrent.Future[ServerBinding] = null

  def serviceBind(serviceName: String, bindRoute: Flow[HttpRequest, HttpResponse, Any], bindPort: Int)
                 (implicit actorSystem: ActorSystem, materializer: Materializer): Unit = {
    binding = Http().bindAndHandle(bindRoute,"0.0.0.0", bindPort)

    // the rest of the sample code will go here
    binding.onComplete {
      //binding success check
      case Success(binding) =>
        val localAddress = binding.localAddress
        log.info(s"${serviceName} is listening on ${localAddress.getAddress}:${localAddress.getPort}")

      case Failure(e) =>
        log.error(s"${serviceName} Binding failed with ${e.getMessage}")
    }
  }

  def serviceUnbind(serviceName: String) = {
    if( binding != null )
    {
      binding
        .flatMap(_.unbind())
        .onComplete(_ =>
          log.info(s"${serviceName} listening port unbinding ... ")
        )
    }
    else
      log.info( s"${serviceName} Unbinding Failed !" )
  }
} 
Example 59
Source File: AttributesComputation.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.storage.attributes

import java.nio.file.{Files, Path}
import java.security.MessageDigest

import akka.http.scaladsl.model.HttpCharsets.`UTF-8`
import akka.http.scaladsl.model.MediaTypes.{`application/octet-stream`, `application/x-tar`}
import akka.http.scaladsl.model.{ContentType, MediaType, MediaTypes}
import akka.stream.Materializer
import akka.stream.scaladsl.{Keep, Sink}
import akka.util.ByteString
import cats.effect.Effect
import cats.implicits._
import ch.epfl.bluebrain.nexus.storage.File.{Digest, FileAttributes}
import ch.epfl.bluebrain.nexus.storage.StorageError.InternalError
import ch.epfl.bluebrain.nexus.storage._
import org.apache.commons.io.FilenameUtils

import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success, Try}

trait AttributesComputation[F[_], Source] {

  
  implicit def akkaAttributes[F[_]](implicit
      ec: ExecutionContext,
      mt: Materializer,
      F: Effect[F]
  ): AttributesComputation[F, AkkaSource] =
    (path: Path, algorithm: String) => {
      if (!Files.exists(path)) F.raiseError(InternalError(s"Path not found '$path'"))
      else
        Try(MessageDigest.getInstance(algorithm)) match {
          case Success(msgDigest) =>
            val isDir  = Files.isDirectory(path)
            val source = if (isDir) folderSource(path) else fileSource(path)
            source
              .alsoToMat(sinkSize)(Keep.right)
              .toMat(sinkDigest(msgDigest)) { (bytesF, digestF) =>
                (bytesF, digestF).mapN {
                  case (bytes, digest) => FileAttributes(path.toAkkaUri, bytes, digest, detectMediaType(path, isDir))
                }
              }
              .run()
              .to[F]
          case Failure(_)         => F.raiseError(InternalError(s"Invalid algorithm '$algorithm'."))
        }

    }
} 
Example 60
Source File: Demo.scala    From toketi-iothubreact   with MIT License 5 votes vote down vote up
// Copyright (c) Microsoft. All rights reserved.

package C_Throughput

import akka.stream.scaladsl.Sink
import com.microsoft.azure.iot.iothubreact.MessageFromDevice
import com.microsoft.azure.iot.iothubreact.ResumeOnError._
import com.microsoft.azure.iot.iothubreact.scaladsl._

import scala.concurrent.duration._
import scala.language.postfixOps


object Demo extends App {

  val showStatsEvery = 1 second

  // Messages throughput monitoring sink
  val monitor = Sink.foreach[MessageFromDevice] {
    m ⇒ {
      Monitoring.total += 1

      val partition = m.runtimeInfo.partitionInfo.partitionNumber.get
      Monitoring.totals(partition) += 1
      Monitoring.remain(partition) = if (m.runtimeInfo.partitionInfo.lastSequenceNumber.isEmpty) 0
                                     else m.runtimeInfo.partitionInfo.lastSequenceNumber.get - m.sequenceNumber
    }
  }

  // Start processing the stream
  IoTHub().source
    .to(monitor)
    .run()

  // Print statistics at some interval
  Monitoring.printStatisticsWithFrequency(showStatsEvery)
} 
Example 61
Source File: Demo.scala    From toketi-iothubreact   with MIT License 5 votes vote down vote up
// Copyright (c) Microsoft. All rights reserved.

package D_Throttling

import akka.stream.ThrottleMode
import akka.stream.scaladsl.{Flow, Sink}
import com.microsoft.azure.iot.iothubreact.MessageFromDevice
import com.microsoft.azure.iot.iothubreact.scaladsl._
import com.microsoft.azure.iot.iothubreact.ResumeOnError._

import scala.concurrent.duration._
import scala.language.postfixOps

object Demo extends App {

  val maxSpeed = 100

  // Sink combining throttling and monitoring
  lazy val throttleAndMonitor = Flow[MessageFromDevice]
    .alsoTo(throttler)
    .to(monitor)

  // Stream throttling sink
  val throttler = Flow[MessageFromDevice]
    .throttle(maxSpeed, 1.second, maxSpeed / 10, ThrottleMode.Shaping)
    .to(Sink.ignore)

  // Messages throughput monitoring sink
  val monitor = Sink.foreach[MessageFromDevice] {
    m ⇒ {
      Monitoring.total += 1
      Monitoring.totals(m.runtimeInfo.partitionInfo.partitionNumber.get) += 1
    }
  }

  println(s"Streaming messages at ${maxSpeed} msg/sec")

  IoTHub().source
    .to(throttleAndMonitor)
    .run()

  // Print statistics at some interval
  Monitoring.printStatisticsWithFrequency(1 second)
} 
Example 62
Source File: Demo.scala    From toketi-iothubreact   with MIT License 5 votes vote down vote up
// Copyright (c) Microsoft. All rights reserved.

package E_Checkpoints

import akka.stream.scaladsl.Sink
import com.microsoft.azure.iot.iothubreact.{MessageFromDevice, SourceOptions}
import com.microsoft.azure.iot.iothubreact.ResumeOnError._
import com.microsoft.azure.iot.iothubreact.filters.Device
import com.microsoft.azure.iot.iothubreact.scaladsl._


object Demo extends App {

  val console = Sink.foreach[MessageFromDevice] {
    t ⇒ println(s"Message from ${t.deviceId} - Time: ${t.received}")
  }

  // Stream using checkpointing
  IoTHub().source(SourceOptions().saveOffsetsOnPull)
    .filter(Device("device1000"))
    .to(console)
    .run()
} 
Example 63
Source File: Watcher.scala    From cloudstate   with Apache License 2.0 5 votes vote down vote up
package io.cloudstate.operator

import akka.{Done, NotUsed}
import akka.stream.{KillSwitch, KillSwitches, Materializer}
import akka.stream.scaladsl.{Flow, Keep, RestartSource, Sink, Source}
import play.api.libs.json.Format
import skuber.{ListResource, ObjectResource, ResourceDefinition}
import skuber.api.client.{EventType, KubernetesClient, WatchEvent}

import scala.concurrent.duration._
import skuber.json.format._

import scala.concurrent.ExecutionContext

object Watcher {

  private implicit def listResourceFormat[Resource <: ObjectResource: Format]: Format[ListResource[Resource]] =
    ListResourceFormat(implicitly[Format[Resource]])

  def watch[Resource <: ObjectResource: Format: ResourceDefinition](
      client: KubernetesClient,
      handler: Flow[WatchEvent[Resource], _, _]
  )(implicit ec: ExecutionContext, mat: Materializer): KillSwitch =
    // Summary of what we want our event loop to look like:
    // * We start by listing all the resources, and process them.
    // * Then we start watching from the resourceVersion that we got in our list, so we get all updates.
    // * But we also want to periodically recheck all resources, since sometimes there are race conditions
    //   between operators handling dependent resources (eg, if you deploy a journal and a service that uses
    //   it at the same time), so we only run the watch for a maximum of that time (eg, 5 minutes), before
    //   restarting.
    // * Also, if errors are encountered, we don't want to continually restart in a hot loop, so we use the
    //   RestartSource to restart with backoff.
    RestartSource
      .onFailuresWithBackoff(2.seconds, 20.seconds, 0.2) { () =>
        val source = Source
          .repeat(NotUsed)
          .flatMapConcat { _ =>
            Source
              .fromFutureSource(
                client
                  .list[ListResource[Resource]]()
                  .map { resources =>
                    val watch = client
                      .watchAllContinuously[Resource](sinceResourceVersion = Some(resources.resourceVersion))

                    Source(resources)
                      .map(WatchEvent(EventType.MODIFIED, _))
                      .concat(watch)
                  }
              )
              .takeWithin(5.minutes)
          }

        source.via(handler)
      }
      .viaMat(KillSwitches.single)(Keep.right)
      .to(Sink.ignore)
      .run()

  def watchSingle[Resource <: ObjectResource: Format: ResourceDefinition](
      client: KubernetesClient,
      resourceName: String,
      handler: Flow[WatchEvent[Resource], _, _]
  )(implicit ec: ExecutionContext, mat: Materializer): KillSwitch =
    RestartSource
      .onFailuresWithBackoff(2.seconds, 20.seconds, 0.2) { () =>
        val source = Source
          .repeat(NotUsed)
          .flatMapConcat { _ =>
            Source
              .fromFutureSource(
                client.getOption[Resource](resourceName).map {
                  case Some(resource) =>
                    val watch =
                      client.watchContinuously[Resource](resourceName,
                                                         sinceResourceVersion = Some(resource.resourceVersion))
                    Source
                      .single(resource)
                      .map(WatchEvent(EventType.MODIFIED, _))
                      .concat(watch)
                  case None =>
                    throw new RuntimeException(
                      s"Resource $resourceName not found in namespace ${client.namespaceName}!"
                    )
                }
              )
              .takeWithin(5.minutes)
          }

        source.via(handler)
      }
      .viaMat(KillSwitches.single)(Keep.right)
      .to(Sink.ignore)
      .run()
} 
Example 64
Source File: CrdtsClient.scala    From cloudstate   with Apache License 2.0 5 votes vote down vote up
package io.cloudstate.samples

import akka.actor.ActorSystem
import akka.grpc.GrpcClientSettings
import akka.stream.scaladsl.{Keep, Sink}
import akka.stream.{ActorMaterializer, KillSwitches}
import com.example.crdts.crdt_example._

import scala.concurrent.duration._
import scala.concurrent.{Await, Future}


class CrdtsClient(hostname: String, port: Int, hostnameOverride: Option[String], sys: ActorSystem) {
  def this(hostname: String, port: Int, hostnameOverride: Option[String] = None) =
    this(hostname, port, hostnameOverride, ActorSystem())
  private implicit val system = sys
  private implicit val materializer = ActorMaterializer()
  import sys.dispatcher

  val settings = {
    val s = GrpcClientSettings.connectToServiceAt(hostname, port).withTls(false)
    hostnameOverride.fold(s)(host => s.withChannelBuilderOverrides(_.overrideAuthority(host)))
  }
  println(s"Connecting to $hostname:$port")
  val service = CrdtExampleClient(settings)

  def shutdown(): Unit = {
    await(service.close())
    await(system.terminate())
  }

  def await[T](future: Future[T]): T = Await.result(future, 10.seconds)

  def getGCounter(id: String) = await(service.getGCounter(Get(id))).value

  def incrementGCounter(id: String, value: Long) = await(service.incrementGCounter(UpdateCounter(id, value))).value

  def getPNCounter(id: String) = await(service.getPNCounter(Get(id))).value

  def updatePNCounter(id: String, value: Long) = await(service.updatePNCounter(UpdateCounter(id, value))).value

  def getGSet(id: String) = await(service.getGSet(Get(id))).items

  def mutateGSet(id: String, values: Seq[SomeValue]) = await(service.mutateGSet(MutateSet(add = values))).size

  def getORSet(id: String) = await(service.getORSet(Get(id))).items

  def mutateORSet(id: String, add: Seq[SomeValue] = Nil, remove: Seq[SomeValue] = Nil, clear: Boolean = false) =
    await(service.mutateORSet(MutateSet(key = id, add = add, remove = remove, clear = clear))).size

  def connect(id: String) =
    service.connect(User(id)).viaMat(KillSwitches.single)(Keep.right).to(Sink.ignore).run()

  def monitor(monitorId: String, id: String) =
    service
      .monitor(User(id))
      .viaMat(KillSwitches.single)(Keep.right)
      .to(
        Sink.foreach(
          status =>
            println(
              s"Monitor $monitorId saw user $id go " + (if (status.online) "online"
                                                        else "offline")
            )
        )
      )
      .run()
} 
Example 65
Source File: Main.scala    From kinesis-stream   with MIT License 5 votes vote down vote up
import akka.Done
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Keep, RunnableGraph, Sink}
import px.kinesis.stream.consumer

import scala.concurrent.Future

object Main extends App {

  implicit val system = ActorSystem("kinesis-source")
  implicit val ec = system.dispatcher
  implicit val mat = ActorMaterializer()

  // A simple consumer that will print to the console for now
  val console = Sink.foreach[String](println)

  val runnableGraph: RunnableGraph[Future[Done]] =
    consumer
      .source("test-stream", "test-app")
      .via(consumer.commitFlow(parallelism = 2))
      .map(r => r.data.utf8String)
      .toMat(console)(Keep.left)

  val done = runnableGraph.run()
  done.onComplete(_ => {
    println("Shutdown completed")
    system.terminate()
  })

} 
Example 66
Source File: Producer.scala    From kinesis-stream   with MIT License 5 votes vote down vote up
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Sink, Source}
import akka.util.ByteString
import com.amazonaws.auth.DefaultAWSCredentialsProviderChain
import com.amazonaws.services.kinesis.producer.KinesisProducerConfiguration
import com.contxt.kinesis.ScalaKinesisProducer

object Producer extends App {

  implicit val system = ActorSystem("kinesis-producer")
  implicit val ec = system.dispatcher
  implicit val mat = ActorMaterializer()

  val producer = ScalaKinesisProducer(
    "activity-test",
    new KinesisProducerConfiguration()
      .setRegion("us-east-1")
      .setCredentialsProvider(new DefaultAWSCredentialsProviderChain))

  Source(1 to 10)
    .map(i => (i.toString, ByteString(s"Data: $i")))
    .mapAsync(1) {
      case (key, data) => producer.send(key, data.toByteBuffer)
    }
    .runWith(Sink.foreach(r =>
      println(s"${r.getShardId}-${r.getSequenceNumber.takeRight(10)}")))
    .onComplete {
      case _ => system.terminate()
    }
} 
Example 67
Source File: RecordProcessorFactoryImpl.scala    From kinesis-stream   with MIT License 5 votes vote down vote up
package px.kinesis.stream.consumer

import akka.NotUsed
import akka.event.LoggingAdapter
import akka.stream.scaladsl.{Keep, Sink, Source}
import akka.stream.{KillSwitch, Materializer, OverflowStrategy}
import px.kinesis.stream.consumer.checkpoint.CheckpointTracker
import software.amazon.kinesis.processor.{ShardRecordProcessor, ShardRecordProcessorFactory}

import scala.collection.immutable.Seq
import scala.concurrent.ExecutionContext

class RecordProcessorFactoryImpl(
  sink: Sink[Record, NotUsed],
  workerId: String,
  checkpointTracker: CheckpointTracker,
  killSwitch: KillSwitch
)(implicit am: Materializer, ec: ExecutionContext, logging: LoggingAdapter) extends ShardRecordProcessorFactory {
  override def shardRecordProcessor(): ShardRecordProcessor = {
    val queue = Source
      .queue[Seq[Record]](0, OverflowStrategy.backpressure)
      .mapConcat(identity)
      .toMat(sink)(Keep.left)
      .run()

    new RecordProcessorImpl(queue, checkpointTracker, killSwitch, workerId)
  }
} 
Example 68
Source File: HatDataEventRouter.scala    From HAT2.0   with GNU Affero General Public License v3.0 5 votes vote down vote up
package org.hatdex.hat.api.service.monitoring

import javax.inject.{ Inject, Named }

import akka.{ Done, NotUsed }
import akka.actor.{ ActorRef, ActorSystem }
import akka.stream.scaladsl.{ Sink, Source }
import akka.stream.{ ActorMaterializer, OverflowStrategy }

import scala.concurrent.duration._

trait HatDataEventRouter {
  def init(): Done
}

class HatDataEventRouterImpl @Inject() (
    dataEventBus: HatDataEventBus,
    @Named("hatDataStatsProcessor") statsProcessor: ActorRef,
    implicit val actorSystem: ActorSystem) extends HatDataEventRouter {

  private implicit val materializer = ActorMaterializer()

  init()

  def init(): Done = {
    // Inbound/outbound data stats are reported via a buffering stage to control load and network traffic
    dataEventBus.subscribe(buffer(statsProcessor), classOf[HatDataEventBus.DataCreatedEvent])
    dataEventBus.subscribe(buffer(statsProcessor), classOf[HatDataEventBus.RichDataRetrievedEvent])
    // Data Debit Events are dispatched without buffering
    dataEventBus.subscribe(statsProcessor, classOf[HatDataEventBus.RichDataDebitEvent])
    dataEventBus.subscribe(statsProcessor, classOf[HatDataEventBus.DataDebitEvent])
    Done
  }

  
  private def buffer(target: ActorRef, batch: Int = 100, period: FiniteDuration = 60.seconds): ActorRef =
    Source.actorRef(bufferSize = 1000, OverflowStrategy.dropNew)
      .groupedWithin(batch, period)
      .to(Sink.actorRef(target, NotUsed))
      .run()

} 
Example 69
Source File: AkkaHttpLambdaHandler.scala    From scala-server-lambda   with MIT License 5 votes vote down vote up
package io.github.howardjohn.lambda.akka

import akka.actor.ActorSystem
import akka.http.scaladsl.model.HttpHeader.ParsingResult
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Route
import akka.http.scaladsl.unmarshalling.Unmarshal
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Keep, Sink, Source}
import io.github.howardjohn.lambda.ProxyEncoding._
import io.github.howardjohn.lambda.{LambdaHandler, ProxyEncoding}

import scala.concurrent.duration.Duration
import scala.concurrent.{Await, ExecutionContext, Future}

class AkkaHttpLambdaHandler(route: Route)(
  implicit system: ActorSystem,
  materializer: ActorMaterializer,
  ec: ExecutionContext
) extends LambdaHandler {
  import AkkaHttpLambdaHandler._

  override def handleRequest(request: ProxyRequest): ProxyResponse =
    Await.result(runRequest(proxyToAkkaRequest(request)), Duration.Inf)

  private def runRequest(request: HttpRequest): Future[ProxyResponse] = {
    val source = Source.single(request)
    val sink = Sink.head[HttpResponse]
    source
      .via(route)
      .toMat(sink)(Keep.right)
      .run()
      .flatMap(asProxyResponse)
  }

  private def proxyToAkkaRequest(request: ProxyRequest): HttpRequest =
    new HttpRequest(
      method = parseHttpMethod(request.httpMethod),
      uri = Uri(ProxyEncoding.reconstructPath(request)),
      headers = parseRequestHeaders(request.headers.getOrElse(Map.empty)),
      entity = parseEntity(request.headers.getOrElse(Map.empty), request.body),
      protocol = HttpProtocols.`HTTP/1.1`
    )

  private def parseEntity(headers: Map[String, String], body: Option[String]): MessageEntity = {
    val defaultContentType = ContentTypes.`text/plain(UTF-8)`
    val contentType = ContentType
      .parse(headers.getOrElse("Content-Type", defaultContentType.value))
      .getOrElse(defaultContentType)

    body match {
      case Some(b) => HttpEntity(contentType, b.getBytes)
      case None => HttpEntity.empty(contentType)
    }
  }

  private def asProxyResponse(resp: HttpResponse): Future[ProxyResponse] =
    Unmarshal(resp.entity)
      .to[String]
      .map { body =>
        ProxyResponse(
          resp.status.intValue(),
          resp.headers.map(h => h.name -> h.value).toMap,
          body
        )
      }
}

private object AkkaHttpLambdaHandler {
  private def parseRequestHeaders(headers: Map[String, String]): List[HttpHeader] =
    headers.map {
      case (k, v) =>
        HttpHeader.parse(k, v) match {
          case ParsingResult.Ok(header, _) => header
          case ParsingResult.Error(err) => throw new RuntimeException(s"Failed to parse header $k:$v with error $err.")
        }
    }.toList

  private def parseHttpMethod(method: String) = method.toUpperCase match {
    case "CONNECT" => HttpMethods.CONNECT
    case "DELETE" => HttpMethods.DELETE
    case "GET" => HttpMethods.GET
    case "HEAD" => HttpMethods.HEAD
    case "OPTIONS" => HttpMethods.OPTIONS
    case "PATCH" => HttpMethods.PATCH
    case "POST" => HttpMethods.POST
    case "PUT" => HttpMethods.PUT
    case "TRACE" => HttpMethods.TRACE
    case other => HttpMethod.custom(other)
  }
} 
Example 70
Source File: RequestRunner.scala    From aws-spi-akka-http   with Apache License 2.0 5 votes vote down vote up
package com.github.matsluni.akkahttpspi

import java.util.concurrent.CompletableFuture

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.HttpRequest
import akka.http.scaladsl.settings.ConnectionPoolSettings
import akka.stream.Materializer
import akka.stream.scaladsl.{Keep, Sink}
import org.slf4j.LoggerFactory
import software.amazon.awssdk.http.SdkHttpFullResponse
import software.amazon.awssdk.http.async.SdkAsyncHttpResponseHandler

import scala.compat.java8.FutureConverters
import scala.concurrent.ExecutionContext
import scala.collection.JavaConverters._

class RequestRunner(connectionPoolSettings: ConnectionPoolSettings)(implicit sys: ActorSystem,
                                                          ec: ExecutionContext,
                                                          mat: Materializer) {
  val logger = LoggerFactory.getLogger(this.getClass)

  def run(httpRequest: HttpRequest,
          handler: SdkAsyncHttpResponseHandler): CompletableFuture[Void] = {
    val result = Http()
      .singleRequest(httpRequest, settings = connectionPoolSettings)
      .flatMap { response =>
        val sdkResponse = SdkHttpFullResponse.builder()
          .headers(response.headers.groupBy(_.name()).map{ case (k, v) => k -> v.map(_.value()).asJava }.asJava)
          .statusCode(response.status.intValue())
          .statusText(response.status.reason)
          .build

        handler.onHeaders(sdkResponse)

        val (complete, publisher) = response
          .entity
          .dataBytes
          .map(_.asByteBuffer)
          .alsoToMat(Sink.ignore)(Keep.right)
          .toMat(Sink.asPublisher(fanout = false))(Keep.both)
          .run()

        handler.onStream(publisher)

        complete
      }

    result.failed.foreach(handler.onError)
    FutureConverters.toJava(result.map(_ => null: Void)).toCompletableFuture
  }
} 
Example 71
Source File: ScanAndScrollSourceTest.scala    From elasticsearch-client   with Apache License 2.0 5 votes vote down vote up
package com.sumologic.elasticsearch.akkahelpers

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Sink, Source}
import com.sumologic.elasticsearch.restlastic.RestlasticSearchClient.ReturnTypes._
import com.sumologic.elasticsearch.restlastic.ScrollClient
import com.sumologic.elasticsearch.restlastic.dsl.Dsl
import com.sumologic.elasticsearch.restlastic.dsl.Dsl._
import org.json4s.Extraction._
import org.json4s._
import org.junit.runner.RunWith
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{Matchers, WordSpec}
import org.scalatestplus.junit.JUnitRunner

import scala.concurrent.{ExecutionContext, Future}

@RunWith(classOf[JUnitRunner])
class ScanAndScrollSourceTest extends WordSpec with Matchers with ScalaFutures {
  val resultMaps: List[Map[String, AnyRef]] = List(Map("a" -> "1"), Map("a" -> "2"), Map("a" -> "3"))
  implicit val formats = org.json4s.DefaultFormats
  implicit val system = ActorSystem("test")
  implicit val materializer = ActorMaterializer()

  def searchResponseFromMap(map: Map[String, AnyRef]) = {
    val raw = RawSearchResponse(Hits(List(ElasticJsonDocument("index", "type", "id", Some(0.1f), decompose(map).asInstanceOf[JObject], highlight = None, inner_hits = None)), 1))
    SearchResponse(raw, "{}")
  }

  "ScanAndScrollSource" should {
    val index = Index("index")
    val tpe = Type("tpe")
    val queryRoot = new QueryRoot(MatchAll)

    "Read to the end of a source" in {
      val searchResponses = resultMaps.map(searchResponseFromMap)
      val client = new MockScrollClient(searchResponses)
      val source = Source.actorPublisher[SearchResponse](ScanAndScrollSource.props(index, tpe, queryRoot, client, sizeOpt = Some(5)))
      val fut = source
        .map(_.sourceAsMap)
        .grouped(10)
        .runWith(Sink.head)
      whenReady(fut) { resp =>
        resp.flatten should be(resultMaps)
      }
    }
  }
}


class MockScrollClient(results: List[SearchResponse]) extends ScrollClient {
  var id = 1
  var started = false
  var resultsQueue = results
  override val indexExecutionCtx: ExecutionContext = ExecutionContext.Implicits.global

  override def startScrollRequestIndices(indices: Seq[Dsl.Index],
                                         tpe: Dsl.Type,
                                         query: Dsl.QueryRoot,
                                         resultWindowOpt: Option[String] = None,
                                         fromOpt: Option[Int] = None,
                                         sizeOpt: Option[Int] = None,
                                         preference: Option[String] = None): Future[(ScrollId, SearchResponse)] = {
    if (!started) {
      started = true
      processRequest()
    } else {
      Future.failed(new RuntimeException("Scroll already started"))
    }
  }

  override def scroll(scrollId: ScrollId, resultWindowOpt: Option[String] = None): Future[(ScrollId, SearchResponse)] = {
    if (scrollId.id.toInt == id) {
      processRequest()
    } else {
      Future.failed(new RuntimeException("Invalid id"))
    }

  }

  private def processRequest(): Future[(ScrollId, SearchResponse)] = {
    id += 1
    resultsQueue match {
      case head :: rest =>
        resultsQueue = rest
        Future.successful((ScrollId(id.toString), head))
      case Nil =>
        Future.successful((ScrollId(id.toString), SearchResponse.empty))
    }
  }
} 
Example 72
Source File: DeleteTagViewForPersistenceId.scala    From akka-persistence-cassandra   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.cassandra.reconciler

import akka.actor.ActorSystem
import akka.persistence.cassandra.PluginSettings
import akka.Done
import akka.event.Logging
import akka.persistence.cassandra.query.scaladsl.CassandraReadJournal
import akka.annotation.InternalApi
import akka.persistence.query.NoOffset
import akka.persistence.cassandra.journal.TimeBucket
import akka.stream.scaladsl.Sink
import scala.concurrent.Future


@InternalApi
private[akka] final class DeleteTagViewForPersistenceId(
    persistenceIds: Set[String],
    tag: String,
    system: ActorSystem,
    session: ReconciliationSession,
    settings: PluginSettings,
    queries: CassandraReadJournal) {
  private val log = Logging(system, s"DeleteTagView($tag)")
  private implicit val sys = system
  import system.dispatcher

  def execute(): Future[Done] = {
    queries
      .currentEventsByTagInternal(tag, NoOffset)
      .filter(persistenceIds contains _.persistentRepr.persistenceId)
      // Make the parallelism configurable?
      .mapAsync(1) { uuidPr =>
        val bucket = TimeBucket(uuidPr.offset, settings.eventsByTagSettings.bucketSize)
        val timestamp = uuidPr.offset
        val persistenceId = uuidPr.persistentRepr.persistenceId
        val tagPidSequenceNr = uuidPr.tagPidSequenceNr
        log.debug("Issuing delete {} {} {} {}", persistenceId, bucket, timestamp, tagPidSequenceNr)
        session.deleteFromTagView(tag, bucket, timestamp, persistenceId, tagPidSequenceNr)
      }
      .runWith(Sink.ignore)
      .flatMap(_ =>
        Future.traverse(persistenceIds) { pid =>
          val progress = session.deleteTagProgress(tag, pid)
          val scanning = session.deleteTagScannning(pid)
          for {
            _ <- progress
            _ <- scanning
          } yield Done
        })
      .map(_ => Done)
  }

} 
Example 73
Source File: BuildTagViewForPersistenceId.scala    From akka-persistence-cassandra   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.cassandra.reconciler

import akka.actor.ActorSystem
import akka.persistence.cassandra.PluginSettings
import akka.Done
import akka.persistence.cassandra.journal.TagWriter._
import scala.concurrent.duration._
import scala.concurrent.Future
import akka.stream.scaladsl.Source
import akka.actor.ExtendedActorSystem
import akka.persistence.query.PersistenceQuery
import akka.persistence.cassandra.query.scaladsl.CassandraReadJournal
import akka.event.Logging
import akka.persistence.cassandra.journal.CassandraTagRecovery
import akka.persistence.cassandra.Extractors
import akka.util.Timeout
import akka.stream.OverflowStrategy
import akka.stream.scaladsl.Sink
import akka.annotation.InternalApi
import akka.serialization.SerializationExtension


@InternalApi
private[akka] final class BuildTagViewForPersisetceId(
    persistenceId: String,
    system: ActorSystem,
    recovery: CassandraTagRecovery,
    settings: PluginSettings) {

  import system.dispatcher

  private implicit val sys = system
  private val log = Logging(system, classOf[BuildTagViewForPersisetceId])
  private val serialization = SerializationExtension(system)

  private val queries: CassandraReadJournal =
    PersistenceQuery(system.asInstanceOf[ExtendedActorSystem])
      .readJournalFor[CassandraReadJournal]("akka.persistence.cassandra.query")

  private implicit val flushTimeout = Timeout(30.seconds)

  def reconcile(flushEvery: Int = 1000): Future[Done] = {

    val recoveryPrep = for {
      tp <- recovery.lookupTagProgress(persistenceId)
      _ <- recovery.setTagProgress(persistenceId, tp)
    } yield tp

    Source
      .futureSource(recoveryPrep.map((tp: Map[String, TagProgress]) => {
        log.debug("[{}] Rebuilding tag view table from: [{}]", persistenceId, tp)
        queries
          .eventsByPersistenceId(
            persistenceId,
            0,
            Long.MaxValue,
            Long.MaxValue,
            None,
            settings.journalSettings.readProfile,
            "BuildTagViewForPersistenceId",
            extractor = Extractors.rawEvent(settings.eventsByTagSettings.bucketSize, serialization, system))
          .map(recovery.sendMissingTagWriteRaw(tp, actorRunning = false))
          .buffer(flushEvery, OverflowStrategy.backpressure)
          .mapAsync(1)(_ => recovery.flush(flushTimeout))
      }))
      .runWith(Sink.ignore)

  }

} 
Example 74
Source File: RebuildAllPersisetceIdsSpec.scala    From akka-persistence-cassandra   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.cassandra.reconciler

import akka.persistence.cassandra.CassandraSpec
import akka.stream.scaladsl.Sink
import org.scalatest.concurrent.Eventually

class RebuildAllPersisetceIdsSpec extends CassandraSpec with Eventually {

  "RebuildAllPersisetceIds" should {

    val tag1 = "tag1"
    val pid1 = "pid1"
    val pid2 = "pid2"
    val pid3 = "pid3"
    val pid4 = "pid4"
    val pid5 = "pid5"

    "build from messages table" in {
      writeEventsFor(tag1, pid1, 2)
      writeEventsFor(tag1, pid2, 1)
      writeEventsFor(tag1, pid3, 5)

      val reconciliation = new Reconciliation(system)
      reconciliation.rebuildAllPersistenceIds().futureValue

      queries
        .currentPersistenceIds()
        .runWith(Sink.seq)
        .futureValue
        .toSet
        .filterNot(_.startsWith("persistenceInit")) should ===(Set(pid1, pid2, pid3))

      // add some more
      writeEventsFor(tag1, pid4, 2)
      writeEventsFor(tag1, pid5, 4)

      reconciliation.rebuildAllPersistenceIds().futureValue

      queries
        .currentPersistenceIds()
        .runWith(Sink.seq)
        .futureValue
        .toSet
        .filterNot(_.startsWith("persistenceInit")) should ===(Set(pid1, pid2, pid3, pid4, pid5))
    }
  }
} 
Example 75
Source File: TagQuerySpec.scala    From akka-persistence-cassandra   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.cassandra.reconciler

import akka.persistence.cassandra.CassandraSpec
import akka.stream.scaladsl.Sink
import org.scalatest.concurrent.Eventually

class TagQuerySpec extends CassandraSpec with Eventually {

  private lazy val reconciliation = new Reconciliation(system)

  "Tag querying" should {
    "return distinct tags for all tags" in {
      val pid1 = "pid1"
      val pid2 = "pid2"
      val tag1 = "tag1"
      val tag2 = "tag2"
      val tag3 = "tag3"
      reconciliation.allTags().runWith(Sink.seq).futureValue shouldEqual Nil
      writeEventsFor(Set(tag1, tag2), pid1, 3)
      writeEventsFor(Set(tag2, tag3), pid2, 3)
      eventually {
        val allTags = reconciliation.allTags().runWith(Sink.seq).futureValue
        allTags.size shouldEqual 3
        allTags.toSet shouldEqual Set(tag1, tag2, tag3)
      }
    }

    "return tags only if that pid has used them" in {
      val pid1 = "p11"
      val pid2 = "p12"
      val tag1 = "tag11"
      val tag2 = "tag12"
      val tag3 = "tag13"
      writeEventsFor(tag1, pid1, 3)
      writeEventsFor(Set(tag2, tag3), pid2, 3)
      eventually {
        val tags = reconciliation.tagsForPersistenceId(pid2).futureValue
        tags.size shouldEqual 2
        tags.toSet shouldEqual Set(tag2, tag3)
      }
    }
  }

} 
Example 76
Source File: Gateway.scala    From reactive-microservices   with MIT License 5 votes vote down vote up
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.client.RequestBuilding
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._
import akka.http.scaladsl.model.StatusCodes._
import akka.http.scaladsl.model.{HttpRequest, HttpResponse}
import akka.http.scaladsl.unmarshalling.Unmarshal
import akka.stream.FlowMaterializer
import akka.stream.scaladsl.{Sink, Source}
import java.io.IOException
import scala.concurrent.{ExecutionContext, Future}

case class InternalLoginRequest(identityId: Long, authMethod: String = "codecard")
case class InternalReloginRequest(tokenValue: String, authMethod: String = "codecard")

class Gateway(implicit actorSystem: ActorSystem, materializer: FlowMaterializer, ec: ExecutionContext)
  extends JsonProtocols with Config {

  private val identityManagerConnectionFlow = Http().outgoingConnection(identityManagerHost, identityManagerPort)
  private val tokenManagerConnectionFlow = Http().outgoingConnection(tokenManagerHost, tokenManagerPort)

  private def requestIdentityManager(request: HttpRequest): Future[HttpResponse] = {
    Source.single(request).via(identityManagerConnectionFlow).runWith(Sink.head)
  }

  private def requestTokenManager(request: HttpRequest): Future[HttpResponse] = {
    Source.single(request).via(tokenManagerConnectionFlow).runWith(Sink.head)
  }

  def requestToken(tokenValue: String): Future[Either[String, Token]] = {
    requestTokenManager(RequestBuilding.Get(s"/tokens/$tokenValue")).flatMap { response =>
      response.status match {
        case Success(_) => Unmarshal(response.entity).to[Token].map(Right(_))
        case NotFound => Future.successful(Left("Token expired or not found"))
        case _ => Future.failed(new IOException(s"Token request failed with status ${response.status} and error ${response.entity}"))
      }
    }
  }

  def requestNewIdentity(): Future[Identity] = {
    requestIdentityManager(RequestBuilding.Post("/identities")).flatMap { response =>
      response.status match {
        case Success(_) => Unmarshal(response.entity).to[Identity]
        case _ => Future.failed(new IOException(s"Identity request failed with status ${response.status} and error ${response.entity}"))
      }
    }
  }

  def requestLogin(identityId: Long): Future[Token] = {
    val loginRequest = InternalLoginRequest(identityId)
    requestTokenManager(RequestBuilding.Post("/tokens", loginRequest)).flatMap { response =>
      response.status match {
        case Success(_) => Unmarshal(response.entity).to[Token]
        case _ => Future.failed(new IOException(s"Login request failed with status ${response.status} and error ${response.entity}"))
      }
    }
  }

  def requestRelogin(tokenValue: String): Future[Option[Token]] = {
    requestTokenManager(RequestBuilding.Patch("/tokens", InternalReloginRequest(tokenValue))).flatMap { response =>
      response.status match {
        case Success(_) => Unmarshal(response.entity).to[Token].map(Option(_))
        case NotFound => Future.successful(None)
        case _ => Future.failed(new IOException(s"Relogin request failed with status ${response.status} and error ${response.entity}"))
      }
    }
  }
} 
Example 77
Source File: Gateway.scala    From reactive-microservices   with MIT License 5 votes vote down vote up
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.client.RequestBuilding
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._
import akka.http.scaladsl.model.StatusCodes._
import akka.http.scaladsl.model.{HttpRequest, HttpResponse}
import akka.http.scaladsl.unmarshalling.Unmarshal
import akka.stream.FlowMaterializer
import akka.stream.scaladsl.{Sink, Source}
import java.io.IOException
import scala.concurrent.{ExecutionContext, Future}

case class InternalLoginRequest(identityId: Long, authMethod: String = "password")
case class InternalReloginRequest(tokenValue: String, authMethod: String = "password")

class Gateway(implicit actorSystem: ActorSystem, materializer: FlowMaterializer, ec: ExecutionContext)
  extends JsonProtocols with Config {

  private val identityManagerConnectionFlow = Http().outgoingConnection(identityManagerHost, identityManagerPort)
  private val tokenManagerConnectionFlow = Http().outgoingConnection(tokenManagerHost, tokenManagerPort)

  private def requestIdentityManager(request: HttpRequest): Future[HttpResponse] = {
    Source.single(request).via(identityManagerConnectionFlow).runWith(Sink.head)
  }

  private def requestTokenManager(request: HttpRequest): Future[HttpResponse] = {
    Source.single(request).via(tokenManagerConnectionFlow).runWith(Sink.head)
  }

  def requestToken(tokenValue: String): Future[Either[String, Token]] = {
    requestTokenManager(RequestBuilding.Get(s"/tokens/$tokenValue")).flatMap { response =>
      response.status match {
        case Success(_) => Unmarshal(response.entity).to[Token].map(Right(_))
        case NotFound => Future.successful(Left("Token expired or not found"))
        case _ => Future.failed(new IOException(s"Token request failed with status ${response.status} and error ${response.entity}"))
      }
    }
  }

  def requestNewIdentity(): Future[Identity] = {
    requestIdentityManager(RequestBuilding.Post("/identities")).flatMap { response =>
      response.status match {
        case Success(_) => Unmarshal(response.entity).to[Identity]
        case _ => Future.failed(new IOException(s"Identity request failed with status ${response.status} and error ${response.entity}"))
      }
    }
  }

  def requestLogin(identityId: Long): Future[Token] = {
    val loginRequest = InternalLoginRequest(identityId)
    requestTokenManager(RequestBuilding.Post("/tokens", loginRequest)).flatMap { response =>
      response.status match {
        case Success(_) => Unmarshal(response.entity).to[Token]
        case _ => Future.failed(new IOException(s"Login request failed with status ${response.status} and error ${response.entity}"))
      }
    }
  }

  def requestRelogin(tokenValue: String): Future[Option[Token]] = {
    requestTokenManager(RequestBuilding.Patch("/tokens", InternalReloginRequest(tokenValue))).flatMap { response =>
      response.status match {
        case Success(_) => Unmarshal(response.entity).to[Token].map(Option(_))
        case NotFound => Future.successful(None)
        case _ => Future.failed(new IOException(s"Relogin request failed with status ${response.status} and error ${response.entity}"))
      }
    }
  }
} 
Example 78
Source File: SessionManager.scala    From reactive-microservices   with MIT License 5 votes vote down vote up
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.client.RequestBuilding
import akka.http.scaladsl.model.{HttpResponse, HttpRequest}
import akka.http.scaladsl.server.Directives._
import akka.stream.ActorFlowMaterializer
import akka.stream.scaladsl.{Sink, Source}
import com.typesafe.config.ConfigFactory
import scala.concurrent.Future

object SessionManager extends App {
  val config = ConfigFactory.load()
  val interface = config.getString("http.interface")
  val port = config.getInt("http.port")
  val tokenManagerHost = config.getString("services.token-manager.host")
  val tokenManagerPort = config.getInt("services.token-manager.port")

  implicit val actorSystem = ActorSystem()
  implicit val materializer = ActorFlowMaterializer()
  implicit val dispatcher = actorSystem.dispatcher

  val tokenManagerConnectionFlow = Http().outgoingConnection(tokenManagerHost, tokenManagerPort)

  def requestTokenManager(request: HttpRequest): Future[HttpResponse] = {
    Source.single(request).via(tokenManagerConnectionFlow).runWith(Sink.head)
  }

  Http().bindAndHandle(interface = interface, port = port, handler = {
    logRequestResult("session-manager") {
      path("session") {
        headerValueByName("Auth-Token") { tokenValue =>
          pathEndOrSingleSlash {
            get {
              complete {
                requestTokenManager(RequestBuilding.Get(s"/tokens/$tokenValue"))
              }
            } ~
            delete {
              complete {
                requestTokenManager(RequestBuilding.Delete(s"/tokens/$tokenValue"))
              }
            }
          }
        }
      }
    }
  })
} 
Example 79
Source File: Gateway.scala    From reactive-microservices   with MIT License 5 votes vote down vote up
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.client.RequestBuilding
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._
import akka.http.scaladsl.model.StatusCodes._
import akka.http.scaladsl.model.{HttpRequest, HttpResponse}
import akka.http.scaladsl.unmarshalling.Unmarshal
import akka.stream.FlowMaterializer
import akka.stream.scaladsl.{Sink, Source}
import com.restfb.DefaultFacebookClient
import com.restfb.types.User
import java.io.IOException
import scala.concurrent.{blocking, ExecutionContext, Future}
import scala.util.Try

case class InternalLoginRequest(identityId: Long, authMethod: String = "fb")
case class InternalReloginRequest(tokenValue: String, authMethod: String = "fb")

class Gateway(implicit actorSystem: ActorSystem, materializer: FlowMaterializer, ec: ExecutionContext)
  extends JsonProtocols with Config {

  private val identityManagerConnectionFlow = Http().outgoingConnection(identityManagerHost, identityManagerPort)
  private val tokenManagerConnectionFlow = Http().outgoingConnection(tokenManagerHost, tokenManagerPort)

  private def requestIdentityManager(request: HttpRequest): Future[HttpResponse] = {
    Source.single(request).via(identityManagerConnectionFlow).runWith(Sink.head)
  }

  private def requestTokenManager(request: HttpRequest): Future[HttpResponse] = {
    Source.single(request).via(tokenManagerConnectionFlow).runWith(Sink.head)
  }

  def requestToken(tokenValue: String): Future[Either[String, Token]] = {
    requestTokenManager(RequestBuilding.Get(s"/tokens/$tokenValue")).flatMap { response =>
      response.status match {
        case Success(_) => Unmarshal(response.entity).to[Token].map(Right(_))
        case NotFound => Future.successful(Left("Token expired or not found"))
        case _ => Future.failed(new IOException(s"Token request failed with status ${response.status} and error ${response.entity}"))
      }
    }
  }

  def requestNewIdentity(): Future[Identity] = {
    requestIdentityManager(RequestBuilding.Post("/identities")).flatMap { response =>
      response.status match {
        case Success(_) => Unmarshal(response.entity).to[Identity]
        case _ => Future.failed(new IOException(s"Identity request failed with status ${response.status} and error ${response.entity}"))
      }
    }
  }

  def requestLogin(identityId: Long): Future[Token] = {
    val loginRequest = InternalLoginRequest(identityId)
    requestTokenManager(RequestBuilding.Post("/tokens", loginRequest)).flatMap { response =>
      response.status match {
        case Success(_) => Unmarshal(response.entity).to[Token]
        case _ => Future.failed(new IOException(s"Login request failed with status ${response.status} and error ${response.entity}"))
      }
    }
  }

  def requestRelogin(tokenValue: String): Future[Option[Token]] = {
    requestTokenManager(RequestBuilding.Patch("/tokens", InternalReloginRequest(tokenValue))).flatMap { response =>
      response.status match {
        case Success(_) => Unmarshal(response.entity).to[Token].map(Option(_))
        case NotFound => Future.successful(None)
        case _ => Future.failed(new IOException(s"Relogin request failed with status ${response.status} and error ${response.entity}"))
      }
    }
  }

  def getFbUserDetails(accessToken: String): Try[User] = {
    Try {
      blocking {
        val client = new DefaultFacebookClient(accessToken)
        client.fetchObject("me", classOf[User])
      }
    }
  }
} 
Example 80
Source File: AkkaStreamsMergeHubApp.scala    From Scala-Reactive-Programming   with MIT License 5 votes vote down vote up
package com.packt.publishing.dynamic.akka.streams

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{MergeHub, RunnableGraph, Sink, Source}

object AkkaStreamsMergeHubApp extends App{

  implicit val actorSystem = ActorSystem("MergeHubSystem")
  implicit val materializer = ActorMaterializer()

  val consumer = Sink.foreach(println)
  val mergeHub = MergeHub.source[String](perProducerBufferSize = 16)
  val runnableGraph: RunnableGraph[Sink[String, NotUsed]] = mergeHub.to(consumer)
  val toConsumer: Sink[String, NotUsed] = runnableGraph.run()

  Source.single("Hello!").runWith(toConsumer)
  Source.single("MergeHub!").runWith(toConsumer)
  Source.single("World!").runWith(toConsumer)
  Thread.sleep(500)
  actorSystem.terminate
} 
Example 81
Source File: AkkaStreamsHelloWorldApp3.scala    From Scala-Reactive-Programming   with MIT License 5 votes vote down vote up
package com.packt.publishing.akka.streams.hello

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Flow, Keep, RunnableGraph, Sink, Source}
import akka.{Done, NotUsed}

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.util.{Failure, Success}

object AkkaStreamsHelloWorldApp3 extends App{

  implicit val actorSystem = ActorSystem("HelloWorldSystem")
  implicit val materializer = ActorMaterializer()

  val helloWorldSource:Source[String,NotUsed] = Source.single("Akka Streams Hello World")
  val helloWorldSink: Sink[String,Future[Done]] = Sink.foreach(println)
  val helloWorldFlow:Flow[String,String,NotUsed] = Flow[String].map(str => str.toUpperCase)

  val helloWorldGraph:RunnableGraph[NotUsed] = helloWorldSource
                                                  .via(helloWorldFlow)
                                                  .to(helloWorldSink)

  val helloWorldGraph2:RunnableGraph[Future[Done]] = helloWorldSource
                                                  .via(helloWorldFlow)
                                                  .toMat(helloWorldSink)(Keep.right)

  helloWorldGraph.run

  val helloWorldMaterializedValue: Future[Done] = helloWorldGraph2.run
  helloWorldMaterializedValue.onComplete{
    case Success(Done) =>
      println("HelloWorld Stream ran succssfully.")
    case Failure(exception) =>
      println(s"HelloWorld Stream ran into an issue: ${exception}.")
  }

  actorSystem.terminate
} 
Example 82
Source File: IngesterMain.scala    From CM-Well   with Apache License 2.0 5 votes vote down vote up
package cmwell.tools.data.ingester

import java.io.FileInputStream
import java.util.zip.GZIPInputStream

import akka.stream.scaladsl.Sink
import cmwell.tools.data.utils.akka.stats.IngesterStats
//import cmwell.tools.data.sparql.SparqlProcessorMain.Opts.opt
import cmwell.tools.data.utils.ArgsManipulations._
import cmwell.tools.data.utils.akka.Implicits._
import cmwell.tools.data.utils.akka._
import cmwell.tools.data.utils.ops._
import com.typesafe.scalalogging.LazyLogging
import org.rogach.scallop.ScallopConf

import scala.concurrent.ExecutionContext.Implicits.global

object IngesterMain extends App with LazyLogging {
  object Opts extends ScallopConf(args) {
    version(s"cm-well ingester ${getVersionFromManifest()} (c) 2015")

    val host = opt[String]("host", descr = "cm-well host name", required = true)
    val format = opt[String]("format", descr = "input format (e.g. ntriples, nquads, jsonld)", required = true)
    val file = opt[String]("file", descr = "input file path", default = None)
    val gzip = opt[Boolean]("gzip", descr = "is input file gzipped", default = Some(false))
    val token = opt[String]("token", descr = "cm-well write permission token", default = None)
    val replaceMode =
      opt[Boolean]("with-replace-mode", descr = "replace-mode parameter in cm-well", default = Some(false))
    val force = opt[Boolean]("force", descr = "force parameter in cm-well", default = Some(false))
    val priority = opt[Boolean]("priority", default = Some(false), descr = "ingest data in priority mode")
    val numConnections = opt[Int]("num-connections", descr = "number of http connections to open")

    dependsOnAll(gzip, List(file))
    verify()
  }

  val start = System.currentTimeMillis()

  var totalIngestedBytes = 0L
  var ingestedBytesInWindow = 0
  var ingestedInfotonsInWindow = 0
  var totalIngestedInfotons = 0L
  var totalFailedInfotons = 0L
  var lastTime = start
  var nextPrint = 0L
  var lastMessageSize = 0
  val windowSizeMillis = 1000

  val formatter = java.text.NumberFormat.getNumberInstance

  // resize akka http connection pool
  Opts.numConnections.toOption.map { numConnections =>
    System.setProperty("akka.http.host-connection-pool.max-connections", numConnections.toString)
  }

  val inputStream = if (Opts.file.isSupplied) {
    val inputFile = new FileInputStream(Opts.file())
    if (Opts.gzip()) {
      new GZIPInputStream(inputFile)
    } else {
      inputFile
    }
  } else {
    System.in
  }

  val result = Ingester
    .fromInputStream(
      baseUrl = formatHost(Opts.host()),
      format = Opts.format(),
      writeToken = Opts.token.toOption,
      replaceMode = Opts.replaceMode(),
      force = Opts.force(),
      isPriority = Opts.priority(),
      in = inputStream
    )
    .via(IngesterStats(isStderr = true))
    .runWith(Sink.ignore)

  // actor system is still alive, will be destroyed when finished
  result.onComplete { x =>
    System.err.println("\n")
    System.err.println(s"finished: $x")
    cleanup()
  }
} 
Example 83
Source File: SparqlUtils.scala    From CM-Well   with Apache License 2.0 5 votes vote down vote up
package cmwell.tools.data.sparql.japi

import java.io.InputStream

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Sink, StreamConverters}
import cmwell.tools.data.sparql.SparqlProcessor
import cmwell.tools.data.utils.akka.{concatByteStrings, endl}
import cmwell.tools.data.utils.chunkers.GroupChunker
import scala.concurrent.ExecutionContext.Implicits.global

import scala.concurrent.duration.FiniteDuration

object SparqlUtils {

  def createJavaStreamFromPaths(baseUrl: String,
                                parallelism: Int = 4,
                                isNeedWrapping: Boolean = true,
                                sparqlQuery: String,
                                in: InputStream) = {

    implicit val system = ActorSystem("reactive-sparql-processor")
    implicit val mat = ActorMaterializer()

    SparqlProcessor
      .createSourceFromPathsInputStream(
        baseUrl = baseUrl,
        spQueryParamsBuilder = (p: Seq[String], v: Map[String,String], q: Boolean) => "sp.pid=" + p.head.substring(p.head.lastIndexOf('-') + 1),
        parallelism = parallelism,
        isNeedWrapping = isNeedWrapping,
        sparqlQuery = sparqlQuery,
        in = in
      )
      .map { case (data, _) => data }
      .via(GroupChunker(GroupChunker.formatToGroupExtractor("ntriples")))
      .map(concatByteStrings(_, endl))
      .runWith(StreamConverters.asJavaStream())
  }

  def createJavaOutputStreamFromPaths(baseUrl: String,
                                      parallelism: Int = 4,
                                      isNeedWrapping: Boolean = true,
                                      sparqlQuery: String,
                                      in: InputStream,
                                      timeout: FiniteDuration) = {
    implicit val system = ActorSystem("reactive-sparql-processor")
    implicit val mat = ActorMaterializer()

    SparqlProcessor
      .createSourceFromPathsInputStream(
        baseUrl = baseUrl,
        spQueryParamsBuilder = (p: Seq[String], v: Map[String,String], q: Boolean) => "sp.pid=" + p.head.substring(p.head.lastIndexOf('-') + 1),
        parallelism = parallelism,
        isNeedWrapping = isNeedWrapping,
        sparqlQuery = sparqlQuery,
        in = in
      )
      .map { case (data, _) => data }
      .via(GroupChunker(GroupChunker.formatToGroupExtractor("ntriples")))
      .map(concatByteStrings(_, endl))
      .runWith(StreamConverters.asInputStream(timeout))
  }
} 
Example 84
Source File: IngesterUtils.scala    From CM-Well   with Apache License 2.0 5 votes vote down vote up
package cmwell.tools.data.ingester.japi

import java.io._

import akka.Done
import akka.actor.ActorSystem
import akka.stream.scaladsl.Sink
import akka.stream.ActorMaterializer
import cmwell.tools.data.ingester.Ingester
import cmwell.tools.data.utils.akka._

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future


  def fromInputStream(host: String,
                      format: String,
                      writeToken: String,
                      in: InputStream,
                      onFinish: Runnable): Future[Done] = {

    implicit val system = ActorSystem("reactive-downloader")
    implicit val mat = ActorMaterializer()

    Ingester
      .fromInputStream(
        baseUrl = host,
        format = format,
        writeToken = Option(writeToken),
        in = in
      )
      .runWith(Sink.ignore)
      .andThen { case _ => cleanup() }
      .andThen { case _ => onFinish.run() }
  }
} 
Example 85
Source File: DownloaderUtils.scala    From CM-Well   with Apache License 2.0 5 votes vote down vote up
package cmwell.tools.data.downloader.streams.japi

import java.io.InputStream

import akka.Done
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.Sink
import cmwell.tools.data.downloader.streams.Downloader
import cmwell.tools.data.utils.akka._

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future


  def fromQuery(host: String,
                path: String,
                params: String,
                qp: String,
                format: String,
                op: String,
                length: Option[Int],
                recursive: Boolean,
                onFinish: Runnable): Future[Done] = {

    implicit val system = ActorSystem("reactive-downloader")
    implicit val mat = ActorMaterializer()

    Downloader
      .downloadFromQuery(
        baseUrl = host,
        path = path,
        params = params,
        qp = qp,
        format = format,
        op = op,
        length = length,
        recursive = recursive,
        // scalastyle:off
        outputHandler = println
        // scalastyle:on
      )
      .andThen { case _ => cleanup() }
      .andThen { case _ => onFinish.run() }
  }

  def fromUuidInputStream(host: String, format: String, op: String, in: InputStream, onFinish: Runnable) = {

    implicit val system = ActorSystem("reactive-downloader")
    implicit val mat = ActorMaterializer()

    Downloader
      .downloadFromUuidInputStream(
        baseUrl = host,
        format = format,
        // scalastyle:off
        outputHandler = println,
        // scalastyle:on
        in = in
      )
      .andThen { case _ => cleanup() }
      .andThen { case _ => onFinish.run() }
  }
} 
Example 86
Source File: TsvRetrieverFromFile.scala    From CM-Well   with Apache License 2.0 5 votes vote down vote up
package cmwell.dc.stream

import java.io.{BufferedWriter, File, FileWriter}

import akka.actor.ActorSystem
import akka.stream.{KillSwitch, KillSwitches, Materializer}
import akka.stream.Supervision.Decider
import akka.stream.contrib.SourceGen
import akka.stream.scaladsl.{Flow, Keep, Sink, Source}
import akka.util.ByteString
import cmwell.dc.LazyLogging
import cmwell.dc.stream.MessagesTypesAndExceptions.{DcInfo, InfotonData}
import cmwell.dc.stream.TsvRetriever.{logger, TsvFlowOutput}
import cmwell.util.resource._

import scala.concurrent.Future
import scala.util.{Failure, Success}
import scala.concurrent.ExecutionContext.Implicits.global


object TsvRetrieverFromFile extends LazyLogging {

  def apply(dcInfo: DcInfo)(implicit mat: Materializer,
                            system: ActorSystem): Source[InfotonData, (KillSwitch, Future[Seq[Option[String]]])] = {
    val persistFile = dcInfo.tsvFile.get + ".persist"

    def appendToPersistFile(str: String): Unit = {
      val bw = new BufferedWriter(new FileWriter(persistFile, true))
      bw.write(str)
      bw.close()
    }

    val linesToDrop = dcInfo.positionKey.fold {
      if (!new File(persistFile).exists) 0L
      else using(scala.io.Source.fromFile(persistFile))(_.getLines.toList.last.toLong)
    }(pos => pos.toLong)
    val positionKeySink = Flow[InfotonData]
      .recover {
        case e: Throwable => InfotonData(null, null, -1)
      }
      .scan(linesToDrop) {
        case (count, InfotonData(null, null, -1)) => {
          appendToPersistFile("crash at: " + count + "\n" + count.toString + "\n")
          count
        }
        case (count, _) => {
          val newCount = count + 1
          if (newCount % 10000 == 0) appendToPersistFile(newCount.toString + "\n")
          newCount
        }
      }
      .toMat(Sink.last)(
        (_, right) =>
          right.map { count =>
            appendToPersistFile(count.toString + "\n")
            Seq.fill(2)(Option(count.toString))
        }
      )

    Source
      .fromIterator(() => scala.io.Source.fromFile(dcInfo.tsvFile.get).getLines())
      .drop {
        logger.info(s"Dropping $linesToDrop initial lines from file ${dcInfo.tsvFile.get} for sync ${dcInfo.key}")
        linesToDrop
      }
      .viaMat(KillSwitches.single)(Keep.right)
      .map(line => TsvRetriever.parseTSVAndCreateInfotonDataFromIt(ByteString(line)))
      .alsoToMat(positionKeySink)(Keep.both)
  }
} 
Example 87
Source File: HttpUtil.scala    From CM-Well   with Apache License 2.0 5 votes vote down vote up
package cmwell.analytics.util

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.RequestEntityAcceptance.Tolerated
import akka.http.scaladsl.model.{HttpMethod, HttpRequest, HttpResponse}
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.Sink
import akka.util.ByteString
import com.fasterxml.jackson.databind.{JsonNode, ObjectMapper}
import com.typesafe.config.ConfigFactory

import scala.concurrent.duration.{MILLISECONDS, _}
import scala.concurrent.{Await, ExecutionContextExecutor, Future}

object HttpUtil {

  private val mapper = new ObjectMapper()

  private val config = ConfigFactory.load
  private val ReadTimeout = FiniteDuration(config.getDuration("extract-index-from-es.read-timeout").toMillis, MILLISECONDS)

  // Elasticsearch uses the POST verb in some places where the request is actually idempotent.
  // Requests that use POST, but are known to be idempotent can use this method.
  // The presence of any non-idempotent request in-flight causes Akka to not retry, and that will tend result in
  // entire downloads failing more often.
  val SAFE_POST = HttpMethod(
    value = "POST",
    isSafe = true,
    isIdempotent = true,
    requestEntityAcceptance = Tolerated)

  def resultAsync(request: HttpRequest,
                  action: String)
                 (implicit system: ActorSystem,
                  executionContext: ExecutionContextExecutor,
                  actorMaterializer: ActorMaterializer): Future[ByteString] =
    Http().singleRequest(request).map {

      case HttpResponse(status, _, entity, _) if status.isSuccess =>
        entity.dataBytes
          .fold(ByteString.empty)(_ ++ _)
          .runWith(Sink.head)

      case HttpResponse(status, _, entity, _) =>
        val message = Await.result(entity.toStrict(10.seconds).map(_.data), 10.seconds).utf8String
        throw new RuntimeException(s"HTTP request for $action failed. Status code: $status, message:$message")
    }
      .flatMap(identity)

  def result(request: HttpRequest,
             action: String,
             timeout: FiniteDuration = ReadTimeout)
            (implicit system: ActorSystem,
             executionContext: ExecutionContextExecutor,
             actorMaterializer: ActorMaterializer): ByteString =
    Await.result(resultAsync(request, action), timeout)

  def jsonResult(request: HttpRequest,
                 action: String,
                 timeout: FiniteDuration = ReadTimeout)
                (implicit system: ActorSystem,
                 executionContext: ExecutionContextExecutor,
                 actorMaterializer: ActorMaterializer): JsonNode =
    mapper.readTree(result(request, action, timeout).utf8String)

  def jsonResultAsync(request: HttpRequest,
                      action: String)
                     (implicit system: ActorSystem,
                      executionContext: ExecutionContextExecutor,
                      actorMaterializer: ActorMaterializer): Future[JsonNode] =
    resultAsync(request, action).map((bytes: ByteString) => mapper.readTree(bytes.utf8String))
} 
Example 88
Source File: GlobalSettingsActor.scala    From akka-viz   with MIT License 5 votes vote down vote up
package akkaviz.events

import akka.actor._
import akka.stream.{ActorMaterializer, OverflowStrategy}
import akka.stream.scaladsl.{Sink, Source}
import akkaviz.config.Config
import akkaviz.events.GlobalSettingsActor.{DisableThroughput, EnableThroughput, GetDelay}
import akkaviz.events.types.{ThroughputMeasurement, BackendEvent, ReceiveDelaySet}

import scala.concurrent.duration._


class GlobalSettingsActor extends Actor with ActorLogging {
  private[this] var eventPublisher: Option[ActorRef] = None
  private[this] var throughputSrcRef: Option[ActorRef] = None

  implicit val mat = ActorMaterializer()

  override def receive: Receive = {
    case publisher: ActorRef =>
      eventPublisher = Some(publisher)
      self ! DisableThroughput // todo get from config (could be on by default)

    case EnableThroughput =>
      val src = Source.actorRef[BackendEvent](Config.bufferSize, OverflowStrategy.dropHead)
        .mapMaterializedValue(EventSystem.subscribe)
      val sink = Sink.foreach[ThroughputMeasurement](ev => EventSystem.report(ev))
      val flow = src.via(ThroughputMeasurementFlow.apply(1.second)).to(sink).run()

    case DisableThroughput =>
      throughputSrcRef.foreach { ref =>
        ref ! PoisonPill
        throughputSrcRef = None
      }

  }
}

object GlobalSettingsActor {

  case object GetDelay
  case object EnableThroughput
  case object DisableThroughput
} 
Example 89
Source File: ProtocolSerializationSupportTest.scala    From akka-viz   with MIT License 5 votes vote down vote up
package akkaviz.server

import akka.actor.ActorSystem
import akka.http.scaladsl.model.ws.BinaryMessage
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Sink, Source}
import akka.util.ByteString
import akkaviz.protocol.{IO, Killed, SetEnabled}
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{FunSuite, Matchers}

import scala.concurrent.duration._

class ProtocolSerializationSupportTest extends FunSuite with ScalaFutures with Matchers {

  private[this] implicit val system = ActorSystem()
  private[this] implicit val materializer = ActorMaterializer()

  override implicit val patienceConfig: PatienceConfig = PatienceConfig(10.seconds)

  import ProtocolSerializationSupport._

  test("websocketMessageToClientMessage") {
    val msg = SetEnabled(true)
    val wsMessage = BinaryMessage(ByteString(IO.write(msg)))
    val res = Source.single(wsMessage).via(websocketMessageToClientMessage).runWith(Sink.head)
    whenReady(res) {
      _ shouldBe msg
    }
  }

  test("protocolServerMessageToByteString") {
    val msg = Killed("ref")
    val res = Source.single(msg).via(protocolServerMessageToByteString).runWith(Sink.head)
    whenReady(res) {
      serialized =>
        IO.readServer(serialized.asByteBuffer) shouldBe msg
    }
  }

} 
Example 90
Source File: ThroughputMeasurementFlowTest.scala    From akka-viz   with MIT License 5 votes vote down vote up
package akkaviz.events

import akka.actor.{ActorRef, ActorSystem}
import akka.pattern
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Keep, Sink, Source}
import akka.testkit.{TestActorRef, TestKit}
import akkaviz.events.types.{BackendEvent, ReceivedWithId, ThroughputMeasurement}
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{Matchers, WordSpecLike}

import scala.concurrent.Future

class ThroughputMeasurementFlowTest extends TestKit(ActorSystem("FlowTestSystem"))
    with WordSpecLike with Matchers with ScalaFutures {

  import scala.concurrent.duration._

  implicit val materializer = ActorMaterializer()(system)

  val firstRef = TestActorRef[SomeActor](new SomeActor, "first")
  val secondRef = TestActorRef[SomeActor](new SomeActor, "second")

  override implicit val patienceConfig = PatienceConfig(timeout = 5.seconds)

  "ThroughputMeasurementFlow" should {

    "not emit any measurements if there are no Received events" in {
      val src = Source.empty[BackendEvent]
      val sink: Sink[BackendEvent, Future[List[BackendEvent]]] = Sink.fold(List.empty[BackendEvent])((list, ev) => ev :: list)

      val materialized = ThroughputMeasurementFlow(1.second).runWith(src, sink)._2

      whenReady(materialized) { r =>
        r should be('empty)
      }
    }

    "emit proper measured value for one message" in {
      val src = Source.single(ReceivedWithId(1, ActorRef.noSender, firstRef, "sup", true))
      val mat = src.via(ThroughputMeasurementFlow(1.second))
        .toMat(Sink.head[ThroughputMeasurement])(Keep.right).run()

      whenReady(mat) { measurement =>
        measurement.actorRef should equal(firstRef)
        measurement.msgsPerSecond should equal(1.0)
      }
    }

    "emit measured value for one message and 0 for actors which didn't receive anything" in {
      import system.dispatcher
      val src = Source(List(
        ReceivedWithId(1, ActorRef.noSender, firstRef, "sup", true),
        ReceivedWithId(2, ActorRef.noSender, secondRef, "sup", true)
      )).concat(Source.fromFuture(pattern.after(2.seconds, system.scheduler) {
        Future.successful(ReceivedWithId(3, ActorRef.noSender, firstRef, "sup", true))
      }))

      val mat = src.via(ThroughputMeasurementFlow(1.second))
        .toMat(Sink.fold(List.empty[ThroughputMeasurement]) { (list, ev) => ev :: list })(Keep.right).run()

      whenReady(mat) { measurements =>
        val measurementsFor = measurements.groupBy(_.actorRef)
        measurementsFor(firstRef).map(_.msgsPerSecond) should not contain 0.0
        measurementsFor(secondRef).sortBy(_.timestamp).map(_.msgsPerSecond) should contain inOrder (1.0, 0.0)
      }
    }
  }
} 
Example 91
Source File: AkkaStreamProcess.scala    From aecor   with MIT License 5 votes vote down vote up
package aecor.example.process

import aecor.util.effect._
import akka.stream.scaladsl.{ Keep, Sink, Source }
import akka.stream.{ KillSwitches, Materializer }
import cats.effect.Async
import cats.implicits._

object AkkaStreamProcess {
  final class Builder[F[_]] {
    def apply[M](source: Source[Unit, M],
                 materializer: Materializer)(implicit F: Async[F]): F[Unit] =
      F.bracket(
        F.delay(
          source
            .viaMat(KillSwitches.single)(Keep.right)
            .toMat(Sink.ignore)(Keep.both)
            .run()(materializer)
        )
      )(x => F.fromFuture(x._2).void)(x => F.delay(x._1.shutdown()))

  }
  def apply[F[_]]: Builder[F] = new Builder[F]
} 
Example 92
Source File: Fs2AkkaStreamInterop.scala    From aecor   with MIT License 5 votes vote down vote up
package aecor.example.common

import akka.stream.Materializer
import akka.stream.scaladsl.{ Keep, Sink, Source }
import cats.effect.ConcurrentEffect
import fs2.Stream
import fs2.interop.reactivestreams._
import cats.implicits._

object Fs2AkkaStreamInterop {
  implicit final class SourceToStream[A, Mat](val self: Source[A, Mat]) extends AnyVal {
    def materializeToStream[F[_]](
      materializer: Materializer
    )(implicit F: ConcurrentEffect[F]): F[(Mat, Stream[F, A])] = F.delay {
      val (mat, publisher) = self.toMat(Sink.asPublisher(false))(Keep.both).run()(materializer)
      (mat, publisher.toStream[F])
    }
    def toStream[F[_]](materializer: Materializer)(implicit F: ConcurrentEffect[F]): Stream[F, A] =
      Stream.force(materializeToStream[F](materializer).map(_._2))
  }
} 
Example 93
Source File: DefaultScheduleEventJournal.scala    From aecor   with MIT License 5 votes vote down vote up
package aecor.schedule.process

import java.util.UUID

import aecor.data.{ Committable, ConsumerId, EntityEvent, EventTag }
import aecor.runtime.akkapersistence.readside.CommittableEventJournalQuery
import aecor.schedule.{ ScheduleBucketId, ScheduleEvent }
import aecor.util.effect._
import akka.stream.Materializer
import akka.stream.scaladsl.{ Keep, Sink }
import cats.effect.Effect
import cats.implicits._

object DefaultScheduleEventJournal {
  def apply[F[_]: Effect](
    consumerId: ConsumerId,
    parallelism: Int,
    aggregateJournal: CommittableEventJournalQuery[F, UUID, ScheduleBucketId, ScheduleEvent],
    eventTag: EventTag
  )(implicit materializer: Materializer): DefaultScheduleEventJournal[F] =
    new DefaultScheduleEventJournal(consumerId, parallelism, aggregateJournal, eventTag)
}

final class DefaultScheduleEventJournal[F[_]: Effect](
  consumerId: ConsumerId,
  parallelism: Int,
  aggregateJournal: CommittableEventJournalQuery[F, UUID, ScheduleBucketId, ScheduleEvent],
  eventTag: EventTag
)(implicit materializer: Materializer)
    extends ScheduleEventJournal[F] {
  override def processNewEvents(
    f: EntityEvent[ScheduleBucketId, ScheduleEvent] => F[Unit]
  ): F[Unit] =
    Effect[F].fromFuture {
      aggregateJournal
        .currentEventsByTag(eventTag, consumerId)
        .mapAsync(parallelism)(_.map(_.event).traverse(f).unsafeToFuture())
        .fold(Committable.unit[F])(Keep.right)
        .mapAsync(1)(_.commit.unsafeToFuture())
        .runWith(Sink.ignore)
    }.void
} 
Example 94
Source File: AkkaStreamProcess.scala    From aecor   with MIT License 5 votes vote down vote up
package aecor.distributedprocessing

import aecor.distributedprocessing.DistributedProcessing._
import aecor.util.effect._
import akka.stream.scaladsl.{ Keep, Sink, Source }
import akka.stream.{ KillSwitches, Materializer }
import cats.effect.Async
import cats.implicits._

object AkkaStreamProcess {
  final class Builder[F[_]] {
    def apply[M](source: Source[Unit, M])(implicit F: Async[F],
                                          materializer: Materializer): Process[F] =
      Process(run = F.delay {
        val (killSwitch, terminated) = source
          .viaMat(KillSwitches.single)(Keep.right)
          .toMat(Sink.ignore)(Keep.both)
          .run()
        RunningProcess(F.fromFuture(terminated).void, F.delay(killSwitch.shutdown()))
      })
  }
  def apply[F[_]]: Builder[F] = new Builder[F]
} 
Example 95
Source File: AkkaPersistenceRuntimeSpec.scala    From aecor   with MIT License 5 votes vote down vote up
package aecor.tests

import aecor.data.Tagging
import aecor.runtime.akkapersistence.{ AkkaPersistenceRuntime, CassandraJournalAdapter }
import aecor.tests.e2e._
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.Sink
import akka.testkit.TestKit
import cats.effect.IO
import cats.implicits._
import com.typesafe.config.{ Config, ConfigFactory }
import org.scalatest.matchers.should.Matchers
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.funsuite.AnyFunSuiteLike

import scala.concurrent.duration._

object AkkaPersistenceRuntimeSpec {
  def conf: Config = ConfigFactory.parseString(s"""
        akka {
          cluster {
            seed-nodes = [
              "akka.tcp://[email protected]:52000"
            ]
          }
          actor.provider = cluster
          remote {
            netty.tcp {
              hostname = 127.0.0.1
              port = 52000
              bind.hostname = "0.0.0.0"
              bind.port = 52000
            }
          }
        }
        aecor.generic-akka-runtime.idle-timeout = 1s
     """).withFallback(CassandraLifecycle.config).withFallback(ConfigFactory.load())
}

class AkkaPersistenceRuntimeSpec
    extends TestKit(ActorSystem("test", AkkaPersistenceRuntimeSpec.conf))
    with AnyFunSuiteLike
    with Matchers
    with ScalaFutures
    with CassandraLifecycle {

  override def systemName = system.name

  override implicit val patienceConfig = PatienceConfig(30.seconds, 150.millis)

  val timer = IO.timer(system.dispatcher)
  implicit val contextShift = IO.contextShift(system.dispatcher)
  override def afterAll(): Unit = {
    TestKit.shutdownActorSystem(system)
    super.afterAll()
  }

  val runtime = AkkaPersistenceRuntime(system, CassandraJournalAdapter(system))

  test("Runtime should work") {
    val deployCounters: IO[CounterId => Counter[IO]] =
      runtime.deploy(
        "Counter",
        CounterBehavior.instance[IO],
        Tagging.const[CounterId](CounterEvent.tag)
      )
    val program = for {
      counters <- deployCounters
      first = counters(CounterId("1"))
      second = counters(CounterId("2"))
      _ <- first.increment
      _ <- second.increment
      _2 <- second.value
      _ <- first.decrement
      _1 <- first.value
      afterPassivation <- timer.sleep(2.seconds) >> second.value
    } yield (_1, _2, afterPassivation)

    program.unsafeRunSync() shouldEqual ((0L, 1L, 1L))
  }
  test("Journal should work") {
    implicit val materializer = ActorMaterializer()
    val journal = runtime.journal[CounterId, CounterEvent]
    val entries = journal.currentEventsByTag(CounterEvent.tag, None).runWith(Sink.seq).futureValue

    val map = entries.map(_.event).groupBy(_.entityKey)
    map(CounterId("1")).size shouldBe 2
    map(CounterId("2")).size shouldBe 1
  }
} 
Example 96
Source File: WSApi.scala    From mist   with Apache License 2.0 5 votes vote down vote up
package io.hydrosphere.mist.master.interfaces.http

import akka.http.scaladsl.model.ws._
import akka.http.scaladsl.server.{Directives, Route}
import akka.stream.ActorAttributes.supervisionStrategy
import akka.stream.Supervision.resumingDecider
import akka.stream.{ActorAttributes, Supervision}
import akka.stream.scaladsl.{Flow, Sink}
import io.hydrosphere.mist.master.EventsStreamer
import io.hydrosphere.mist.master.Messages.StatusMessages._
import io.hydrosphere.mist.master.interfaces.JsonCodecs

import scala.concurrent.duration._
import spray.json._

import scala.language.postfixOps


class WSApi(streamer: EventsStreamer)(implicit val keepAliveTimeout: FiniteDuration) {

  import Directives._
  import JsonCodecs._

  val route: Route = {
    pathPrefix("v2" / "api"/ "ws" ) { parameter('withLogs ? false)  { withLogs =>
      path("all") {
        get {
          handleWebSocketMessagesWithKeepAlive(allEventsWsFlow(withLogs))
        }
      } ~
      path("jobs" / Segment) { jobId =>
        get {
          handleWebSocketMessagesWithKeepAlive(jobWsFlow(jobId, withLogs))
        }
      }
    }}
  }

  private def handleWebSocketMessagesWithKeepAlive(handler: Flow[Message, Message, akka.NotUsed]): Route =
    handleWebSocketMessages(handler
      .withAttributes(supervisionStrategy(resumingDecider))
      .keepAlive(
        keepAliveTimeout,
        () => TextMessage.Strict(KeepAliveEvent.asInstanceOf[SystemEvent].toJson.toString())
      ))


  private def jobWsFlow(id: String, withLogs: Boolean): Flow[Message, Message, akka.NotUsed] = {
    val source = streamer.eventsSource()
      .filter({
        case e: UpdateStatusEvent => e.id == id
        case e: ReceivedLogs if withLogs => e.id == id
        case _ => false
      })
      .map(toWsMessage)

    val sink = Sink.ignore
    Flow.fromSinkAndSource(sink, source)
  }

  private def allEventsWsFlow(withLogs: Boolean): Flow[Message, Message, akka.NotUsed] = {
    val source = streamer.eventsSource()
      .filter({
        case _: ReceivedLogs => withLogs
        case _ => true
      })
      .map(toWsMessage)

    val sink = Sink.ignore
    Flow.fromSinkAndSource(sink, source)
  }

  private def toWsMessage(e: SystemEvent): Message = TextMessage.Strict(e.toJson.toString())
} 
Example 97
Source File: LogStreamsSpec.scala    From mist   with Apache License 2.0 5 votes vote down vote up
package io.hydrosphere.mist.master.logging

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Keep, Sink, Source}
import akka.testkit.TestKit
import io.hydrosphere.mist.core.MockitoSugar
import io.hydrosphere.mist.core.logging.LogEvent
import io.hydrosphere.mist.master.FilteredException
import org.mockito.Mockito.verify
import org.scalatest.{FunSpecLike, Matchers}

import scala.concurrent.duration._
import scala.concurrent.{Await, Future}

class LogStreamsSpec extends TestKit(ActorSystem("log-service-test"))
  with FunSpecLike
  with MockitoSugar
  with Matchers {

  implicit val materializer = ActorMaterializer()

  it("should store events") {
    val writer = mock[LogsWriter]
    when(writer.write(any[String], any[Seq[LogEvent]]))
      .thenReturn(Future.successful(LogUpdate("jobId", Seq.empty, 1)))

    val out = Source.single(LogEvent.mkDebug("id", "message"))
      .via(LogStreams.storeFlow(writer))
      .take(1)
      .toMat(Sink.seq)(Keep.right).run()

    val updates = Await.result(out, Duration.Inf)

    updates.size shouldBe 1
    verify(writer).write(any[String], any[Seq[LogEvent]])
  }

  it("should ignore errors") {
    val event = LogEvent.mkDebug("id", "message")
    val writer = mock[LogsWriter]
    when(writer.write(any[String], any[Seq[LogEvent]]))
      .thenSuccess(LogUpdate("id", Seq(event), 1))
      .thenFailure(FilteredException())
      .thenSuccess(LogUpdate("id", Seq(event), 1))
      .thenFailure(FilteredException())
      .thenSuccess(LogUpdate("id", Seq(event), 1))

    val in = (1 to 5).map(i => LogEvent.mkDebug(s"job-$i", "message"))
    val future = Source(in)
      .via(LogStreams.storeFlow(writer))
      .take(3)
      .toMat(Sink.seq)(Keep.right).run()


    val updates = Await.result(future, Duration.Inf)
    updates.flatMap(_.events).size shouldBe 3
  }

} 
Example 98
Source File: EventStreamerSpec.scala    From mist   with Apache License 2.0 5 votes vote down vote up
package io.hydrosphere.mist.master

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.Sink
import akka.testkit.TestKit
import io.hydrosphere.mist.master.Messages.StatusMessages.StartedEvent
import org.scalatest.{FunSpecLike, Matchers}

import scala.concurrent.Await
import scala.concurrent.duration.Duration

class EventStreamerSpec extends TestKit(ActorSystem("streamer"))
  with FunSpecLike
  with Matchers {

  implicit val materializer = ActorMaterializer()

  it("should broadcast events") {
    val streamer = EventsStreamer(system)

    val f = streamer.eventsSource()
      .take(2)
      .runWith(Sink.seq)

    streamer.push(StartedEvent("1", 1))
    streamer.push(StartedEvent("2", 1))
    streamer.push(StartedEvent("3", 1))

    val events = Await.result(f, Duration.Inf)
    events should contain allOf (
      StartedEvent("1", 1),
      StartedEvent("2", 1)
    )
  }

} 
Example 99
Source File: GreeterServiceImpl.scala    From akka-grpc   with Apache License 2.0 5 votes vote down vote up
//#full-service-impl
package example.myapp.helloworld

import scala.concurrent.Future

import akka.NotUsed
import akka.stream.Materializer
import akka.stream.scaladsl.Sink
import akka.stream.scaladsl.Source

import example.myapp.helloworld.grpc._

class GreeterServiceImpl(implicit mat: Materializer) extends GreeterService {
  import mat.executionContext

  override def sayHello(in: HelloRequest): Future[HelloReply] = {
    println(s"sayHello to ${in.name}")
    Future.successful(HelloReply(s"Hello, ${in.name}"))
  }

  override def itKeepsTalking(in: Source[HelloRequest, NotUsed]): Future[HelloReply] = {
    println(s"sayHello to in stream...")
    in.runWith(Sink.seq).map(elements => HelloReply(s"Hello, ${elements.map(_.name).mkString(", ")}"))
  }

  override def itKeepsReplying(in: HelloRequest): Source[HelloReply, NotUsed] = {
    println(s"sayHello to ${in.name} with stream of chars...")
    Source(s"Hello, ${in.name}".toList).map(character => HelloReply(character.toString))
  }

  override def streamHellos(in: Source[HelloRequest, NotUsed]): Source[HelloReply, NotUsed] = {
    println(s"sayHello to stream...")
    in.map(request => HelloReply(s"Hello, ${request.name}"))
  }
}
//#full-service-impl 
Example 100
Source File: PowerGreeterServiceImpl.scala    From akka-grpc   with Apache License 2.0 5 votes vote down vote up
//#full-service-impl
package example.myapp.helloworld

import akka.NotUsed
import akka.grpc.scaladsl.Metadata
import akka.stream.Materializer
import akka.stream.scaladsl.{ Sink, Source }
import example.myapp.helloworld.grpc._

import scala.concurrent.Future

class PowerGreeterServiceImpl(materializer: Materializer) extends GreeterServicePowerApi {
  import materializer.executionContext
  private implicit val mat: Materializer = materializer

  override def sayHello(in: HelloRequest, metadata: Metadata): Future[HelloReply] = {
    val greetee = authTaggedName(in, metadata)
    println(s"sayHello to $greetee")
    Future.successful(HelloReply(s"Hello, $greetee"))
  }

  override def itKeepsTalking(in: Source[HelloRequest, NotUsed], metadata: Metadata): Future[HelloReply] = {
    println(s"sayHello to in stream...")
    in.runWith(Sink.seq)
      .map(elements => HelloReply(s"Hello, ${elements.map(authTaggedName(_, metadata)).mkString(", ")}"))
  }

  override def itKeepsReplying(in: HelloRequest, metadata: Metadata): Source[HelloReply, NotUsed] = {
    val greetee = authTaggedName(in, metadata)
    println(s"sayHello to $greetee with stream of chars...")
    Source(s"Hello, $greetee".toList).map(character => HelloReply(character.toString))
  }

  override def streamHellos(in: Source[HelloRequest, NotUsed], metadata: Metadata): Source[HelloReply, NotUsed] = {
    println(s"sayHello to stream...")
    in.map(request => HelloReply(s"Hello, ${authTaggedName(request, metadata)}"))
  }

  // Bare-bones just for GRPC metadata demonstration purposes
  private def isAuthenticated(metadata: Metadata): Boolean =
    metadata.getText("authorization").nonEmpty

  private def authTaggedName(in: HelloRequest, metadata: Metadata): String = {
    val authenticated = isAuthenticated(metadata)
    s"${in.name} (${if (!authenticated) "not " else ""}authenticated)"
  }
}
//#full-service-impl 
Example 101
Source File: ErrorReportingSpec.scala    From akka-grpc   with Apache License 2.0 5 votes vote down vote up
package example.myapp.helloworld

import akka.actor.{ ActorSystem, ClassicActorSystemProvider }
import akka.grpc.internal.GrpcProtocolNative
import akka.http.scaladsl.model.HttpEntity.{ Chunked, LastChunk }
import akka.http.scaladsl.model._
import akka.http.scaladsl.model.headers.RawHeader
import akka.http.scaladsl.{ Http, HttpConnectionContext }
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.Sink
import example.myapp.helloworld.grpc.{ GreeterService, GreeterServiceHandler }
import io.grpc.Status
import org.junit.runner.RunWith
import org.scalatest.BeforeAndAfterAll
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.matchers.should.Matchers
import org.scalatest.time.Span
import org.scalatest.wordspec.AnyWordSpec
import org.scalatestplus.junit.JUnitRunner

import scala.concurrent.Await
import scala.concurrent.duration._

@RunWith(classOf[JUnitRunner])
class ErrorReportingSpec extends AnyWordSpec with Matchers with ScalaFutures with BeforeAndAfterAll {

  override implicit val patienceConfig = PatienceConfig(5.seconds, Span(100, org.scalatest.time.Millis))

  implicit val system: ActorSystem = ActorSystem()

  implicit val mat = ActorMaterializer()

  "A gRPC server" should {

    val binding = Http()
      .bindAndHandleAsync(
        GreeterServiceHandler(new GreeterServiceImpl())(system.asInstanceOf[ClassicActorSystemProvider]),
        interface = "127.0.0.1",
        port = 0,
        connectionContext = HttpConnectionContext())
      .futureValue

    "respond with an 'unimplemented' gRPC error status when calling an unknown method" in {
      val request = HttpRequest(
        method = HttpMethods.POST,
        entity = HttpEntity.empty(GrpcProtocolNative.contentType),
        uri = s"http://localhost:${binding.localAddress.getPort}/${GreeterService.name}/UnknownMethod")
      val response = Http().singleRequest(request).futureValue

      response.status should be(StatusCodes.OK)
      allHeaders(response) should contain(RawHeader("grpc-status", Status.Code.UNIMPLEMENTED.value().toString))
    }

    "respond with an 'invalid argument' gRPC error status when calling an method without a request body" in {
      val request = HttpRequest(
        method = HttpMethods.POST,
        entity = HttpEntity.empty(GrpcProtocolNative.contentType),
        uri = s"http://localhost:${binding.localAddress.getPort}/${GreeterService.name}/SayHello")
      val response = Http().singleRequest(request).futureValue

      response.status should be(StatusCodes.OK)
      allHeaders(response) should contain(RawHeader("grpc-status", Status.Code.INVALID_ARGUMENT.value().toString))
    }

    def allHeaders(response: HttpResponse) =
      response.entity match {
        case Chunked(_, chunks) =>
          chunks.runWith(Sink.last).futureValue match {
            case LastChunk(_, trailingHeaders) => response.headers ++ trailingHeaders
            case _                             => response.headers
          }
        case _ =>
          response.headers
      }
  }

  override def afterAll: Unit =
    Await.result(system.terminate(), 5.seconds)
} 
Example 102
Source File: ErrorReportingSpec.scala    From akka-grpc   with Apache License 2.0 5 votes vote down vote up
package example.myapp.helloworld

import akka.actor.ActorSystem
import akka.grpc.internal.GrpcProtocolNative
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import akka.http.scaladsl.model.HttpEntity.{ Chunked, LastChunk }
import akka.http.scaladsl.model.headers.RawHeader
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.Sink
import example.myapp.helloworld.grpc.{ GreeterService, GreeterServiceHandlerFactory }
import io.grpc.Status
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.Span
import org.scalatest.BeforeAndAfterAll
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec

import scala.concurrent.Await
import scala.concurrent.duration._

class ErrorReportingSpec extends AnyWordSpec with Matchers with ScalaFutures with BeforeAndAfterAll {
  implicit val sys = ActorSystem()
  override implicit val patienceConfig = PatienceConfig(5.seconds, Span(100, org.scalatest.time.Millis))

  "A gRPC server" should {
    implicit val mat = ActorMaterializer()

    val handler = GreeterServiceHandlerFactory.create(new GreeterServiceImpl(mat), sys)
    val binding = {
      import akka.http.javadsl.{ ConnectHttp, Http }

      Http(sys).bindAndHandleAsync(handler, ConnectHttp.toHost("127.0.0.1", 0), mat).toCompletableFuture.get
    }

    "respond with an 'unimplemented' gRPC error status when calling an unknown method" in {
      val request = HttpRequest(
        method = HttpMethods.POST,
        entity = HttpEntity.empty(GrpcProtocolNative.contentType),
        uri = s"http://localhost:${binding.localAddress.getPort}/${GreeterService.name}/UnknownMethod")
      val response = Http().singleRequest(request).futureValue

      response.status should be(StatusCodes.OK)
      allHeaders(response) should contain(RawHeader("grpc-status", Status.Code.UNIMPLEMENTED.value().toString))
    }

    "respond with an 'invalid argument' gRPC error status when calling an method without a request body" in {
      val request = HttpRequest(
        method = HttpMethods.POST,
        entity = HttpEntity.empty(GrpcProtocolNative.contentType),
        uri = s"http://localhost:${binding.localAddress.getPort}/${GreeterService.name}/SayHello")
      val response = Http().singleRequest(request).futureValue

      response.status should be(StatusCodes.OK)
      allHeaders(response) should contain(RawHeader("grpc-status", Status.Code.INVALID_ARGUMENT.value().toString))
    }

    def allHeaders(response: HttpResponse) =
      response.entity match {
        case Chunked(_, chunks) =>
          chunks.runWith(Sink.last).futureValue match {
            case LastChunk(_, trailingHeaders) => response.headers ++ trailingHeaders
            case _                             => response.headers
          }
        case _ =>
          response.headers
      }
  }

  override def afterAll: Unit =
    Await.result(sys.terminate(), 5.seconds)
} 
Example 103
Source File: GrpcMarshallingSpec.scala    From akka-grpc   with Apache License 2.0 5 votes vote down vote up
package akka.grpc.scaladsl

import akka.actor.ActorSystem
import akka.grpc.internal.{ AbstractGrpcProtocol, GrpcProtocolNative, Gzip }
import akka.grpc.scaladsl.headers.`Message-Encoding`
import akka.http.scaladsl.model.{ HttpEntity, HttpRequest }
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.Sink
import io.grpc.{ Status, StatusException }
import io.grpc.testing.integration.messages.{ BoolValue, SimpleRequest }
import io.grpc.testing.integration.test.TestService
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec

import scala.collection.immutable
import scala.concurrent.{ Await, Future }
import scala.concurrent.duration._

class GrpcMarshallingSpec extends AnyWordSpec with Matchers {
  "The scaladsl GrpcMarshalling" should {
    val message = SimpleRequest(responseCompressed = Some(BoolValue(true)))
    implicit val serializer = TestService.Serializers.SimpleRequestSerializer
    implicit val system = ActorSystem()
    implicit val mat = ActorMaterializer()
    val awaitTimeout = 10.seconds
    val zippedBytes =
      AbstractGrpcProtocol.encodeFrameData(
        AbstractGrpcProtocol.fieldType(Gzip),
        Gzip.compress(serializer.serialize(message)))

    "correctly unmarshal a zipped object" in {
      val request = HttpRequest(
        headers = immutable.Seq(`Message-Encoding`("gzip")),
        entity = HttpEntity.Strict(GrpcProtocolNative.contentType, zippedBytes))

      val marshalled = Await.result(GrpcMarshalling.unmarshal(request), 10.seconds)
      marshalled.responseCompressed should be(Some(BoolValue(true)))
    }

    "correctly unmarshal a zipped stream" in {
      val request = HttpRequest(
        headers = immutable.Seq(`Message-Encoding`("gzip")),
        entity = HttpEntity.Strict(GrpcProtocolNative.contentType, zippedBytes ++ zippedBytes))

      val stream = Await.result(GrpcMarshalling.unmarshalStream(request), 10.seconds)
      val items = Await.result(stream.runWith(Sink.seq), 10.seconds)
      items(0).responseCompressed should be(Some(BoolValue(true)))
      items(1).responseCompressed should be(Some(BoolValue(true)))
    }

    // https://github.com/grpc/grpc/blob/master/doc/compression.md#compression-method-asymmetry-between-peers
    // test case 6
    "fail with INTERNAL when the compressed bit is on but the encoding is identity" in {
      val request = HttpRequest(
        headers = immutable.Seq(`Message-Encoding`("identity")),
        entity = HttpEntity.Strict(GrpcProtocolNative.contentType, zippedBytes))

      assertFailure(GrpcMarshalling.unmarshal(request), Status.Code.INTERNAL, "encoding")
    }

    // https://github.com/grpc/grpc/blob/master/doc/compression.md#compression-method-asymmetry-between-peers
    // test case 6
    "fail with INTERNAL when the compressed bit is on but the encoding is missing" in {
      val request = HttpRequest(entity = HttpEntity.Strict(GrpcProtocolNative.contentType, zippedBytes))

      assertFailure(GrpcMarshalling.unmarshal(request), Status.Code.INTERNAL, "encoding")
    }

    def assertFailure(failure: Future[_], expectedStatusCode: Status.Code, expectedMessageFragment: String): Unit = {
      val e = Await.result(failure.failed, awaitTimeout).asInstanceOf[StatusException]
      e.getStatus.getCode should be(expectedStatusCode)
      e.getStatus.getDescription should include(expectedMessageFragment)
    }
  }
} 
Example 104
Source File: ChatClient.scala    From akka-http-scala-js-websocket-chat   with MIT License 5 votes vote down vote up
package example.akkawschat.cli

import scala.concurrent.Future

import akka.actor.ActorSystem

import akka.stream.scaladsl.{ Keep, Source, Sink, Flow }

import akka.http.scaladsl.Http
import akka.http.scaladsl.model.Uri
import akka.http.scaladsl.model.ws._

import upickle.default._

import shared.Protocol

object ChatClient {
  def connect[T](endpoint: Uri, handler: Flow[Protocol.Message, String, T])(implicit system: ActorSystem): Future[T] = {
    val wsFlow: Flow[Message, Message, T] =
      Flow[Message]
        .collect {
          case TextMessage.Strict(msg) ⇒ read[Protocol.Message](msg)
        }
        .viaMat(handler)(Keep.right)
        .map(TextMessage(_))

    val (fut, t) = Http().singleWebSocketRequest(WebSocketRequest(endpoint), wsFlow)
    fut.map {
      case v: ValidUpgrade                         ⇒ t
      case InvalidUpgradeResponse(response, cause) ⇒ throw new RuntimeException(s"Connection to chat at $endpoint failed with $cause")
    }(system.dispatcher)
  }

  def connect[T](endpoint: Uri, in: Sink[Protocol.Message, Any], out: Source[String, Any])(implicit system: ActorSystem): Future[Unit] =
    connect(endpoint, Flow.fromSinkAndSource(in, out)).map(_ ⇒ ())(system.dispatcher)

  def connect[T](endpoint: Uri, onMessage: Protocol.Message ⇒ Unit, out: Source[String, Any])(implicit system: ActorSystem): Future[Unit] =
    connect(endpoint, Sink.foreach(onMessage), out)
} 
Example 105
Source File: AkkaHttpServerTemplate.scala    From akka-http-circe-json-template   with Apache License 2.0 5 votes vote down vote up
package com.vitorsvieira.http.server

import akka.http.scaladsl.Http
import akka.http.scaladsl.Http.{ IncomingConnection, ServerBinding }
import akka.http.scaladsl.server.Route._
import akka.stream.scaladsl.{ Sink, Source }
import com.vitorsvieira.http.config.ServerSettingsTemplate
import com.vitorsvieira.http.routes.AkkaHttpRoutesTemplate

import scala.concurrent.Future
import scala.io.StdIn

object AkkaHttpServerTemplate extends App {

  import ServerSettingsTemplate._

  val server: Source[IncomingConnection, Future[ServerBinding]] =
    Http(actorSystem).bind(httpInterface, httpPort)

  log.info(s"\nAkka HTTP Server - Version ${actorSystem.settings.ConfigVersion} - running at http://$httpInterface:$httpPort/")

  val handler: Future[ServerBinding] =
    server
      .to(
        Sink.foreach {
          connection ⇒
            connection.handleWithAsyncHandler(asyncHandler(AkkaHttpRoutesTemplate.availableRoutes))
        }
      )
      .run()

  handler.failed.foreach { case ex: Exception ⇒ log.error(ex, "Failed to bind to {}:{}", httpInterface, httpPort) }

  StdIn.readLine(s"\nPress RETURN to stop...")

  handler
    .flatMap(binding ⇒ binding.unbind())
    .onComplete(_ ⇒ actorSystem.terminate())
} 
Example 106
Source File: TestConsumer.scala    From asura   with MIT License 5 votes vote down vote up
package asura.kafka.consumer

import akka.actor.ActorSystem
import akka.kafka.scaladsl.Consumer
import akka.kafka.{ConsumerSettings, Subscriptions}
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.Sink
import com.typesafe.scalalogging.StrictLogging
import org.apache.kafka.common.serialization.StringDeserializer

object TestConsumer extends StrictLogging {

  def main(args: Array[String]): Unit = {

    logger.info("Start consumer")

    implicit val system = ActorSystem("consumer")
    implicit val materializer = ActorMaterializer()
    implicit val ec = system.dispatcher

    val consumerSettings = ConsumerSettings(system, new StringDeserializer, new StringDeserializer)
      .withGroupId("test-group1")

    val done = Consumer
      .plainSource(consumerSettings, Subscriptions.topics("test-topic"))
      .runWith(Sink.foreach(record =>
        logger.info(s"topic:${record.topic()}, partition:${record.partition()}, offset:${record.offset()}, key:${record.key()}, value: ${record.value()}"))
      )
    done onComplete {
      case scala.util.Success(_) => logger.info("Done"); system.terminate()
      case scala.util.Failure(err) => logger.error(err.toString); system.terminate()
    }
  }
} 
Example 107
Source File: TestAvroConsumer.scala    From asura   with MIT License 5 votes vote down vote up
package asura.kafka.consumer

import akka.actor.ActorSystem
import akka.kafka.scaladsl.Consumer
import akka.kafka.{ConsumerSettings, Subscriptions}
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Keep, Sink}
import asura.kafka.avro.SampleAvroClass
import com.typesafe.scalalogging.StrictLogging
import io.confluent.kafka.serializers.{AbstractKafkaAvroSerDeConfig, KafkaAvroDeserializer, KafkaAvroDeserializerConfig}
import org.apache.kafka.clients.consumer.ConsumerConfig
import org.apache.kafka.common.serialization._

import scala.collection.JavaConverters._

object TestAvroConsumer extends StrictLogging {

  def main(args: Array[String]): Unit = {

    implicit val system = ActorSystem("consumer")
    implicit val materializer = ActorMaterializer()
    implicit val ec = system.dispatcher

    val schemaRegistryUrl = ""
    val bootstrapServers = ""
    val topic = ""
    val group = ""

    val kafkaAvroSerDeConfig = Map[String, Any](
      AbstractKafkaAvroSerDeConfig.SCHEMA_REGISTRY_URL_CONFIG -> schemaRegistryUrl,
      KafkaAvroDeserializerConfig.SPECIFIC_AVRO_READER_CONFIG -> true.toString
    )
    val consumerSettings: ConsumerSettings[String, SampleAvroClass] = {
      val kafkaAvroDeserializer = new KafkaAvroDeserializer()
      kafkaAvroDeserializer.configure(kafkaAvroSerDeConfig.asJava, false)
      val deserializer = kafkaAvroDeserializer.asInstanceOf[Deserializer[SampleAvroClass]]

      ConsumerSettings(system, new StringDeserializer, deserializer)
        .withBootstrapServers(bootstrapServers)
        .withGroupId(group)
        .withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")
    }

    val samples = (1 to 3)
    val (control, result) = Consumer
      .plainSource(consumerSettings, Subscriptions.topics(topic))
      .take(samples.size.toLong)
      .map(_.value())
      .toMat(Sink.seq)(Keep.both)
      .run()

    control.shutdown()
    result.map(records => records.foreach(record => logger.info(s"${record}")))
  }
} 
Example 108
Source File: WebSocketMessageHandler.scala    From asura   with MIT License 5 votes vote down vote up
package asura.core.actor.flow

import akka.NotUsed
import akka.actor.{ActorRef, PoisonPill}
import akka.http.scaladsl.model.ws.{Message, TextMessage}
import akka.stream.OverflowStrategy
import akka.stream.scaladsl.{Flow, Sink, Source}
import asura.common.actor.{ActorEvent, SenderMessage}
import asura.common.exceptions.InvalidStatusException
import asura.core.CoreConfig
import asura.core.util.JacksonSupport

import scala.concurrent.duration._

object WebSocketMessageHandler {

  val DEFAULT_BUFFER_SIZE = CoreConfig.DEFAULT_WS_ACTOR_BUFFER_SIZE
  val KEEP_ALIVE_INTERVAL = 2

  def newHandleFlow[T <: AnyRef](workActor: ActorRef, msgClass: Class[T]): Flow[Message, Message, NotUsed] = {
    val incomingMessages: Sink[Message, NotUsed] =
      Flow[Message].map {
        case TextMessage.Strict(text) => JacksonSupport.parse(text, msgClass)
        case _ => throw InvalidStatusException("Unsupported message type")
      }.to(Sink.actorRef[T](workActor, PoisonPill))
    val outgoingMessages: Source[Message, NotUsed] =
      Source.actorRef[ActorEvent](DEFAULT_BUFFER_SIZE, OverflowStrategy.dropHead)
        .mapMaterializedValue { outActor =>
          workActor ! SenderMessage(outActor)
          NotUsed
        }
        .map(result => TextMessage(JacksonSupport.stringify(result)))
        .keepAlive(KEEP_ALIVE_INTERVAL.seconds, () => TextMessage.Strict(""))
    Flow.fromSinkAndSource(incomingMessages, outgoingMessages)
  }

  def newHandleStringFlow[T <: AnyRef](workActor: ActorRef, msgClass: Class[T]): Flow[Message, Message, NotUsed] = {
    val incomingMessages: Sink[Message, NotUsed] =
      Flow[Message].map {
        case TextMessage.Strict(text) => JacksonSupport.parse(text, msgClass)
        case _ => throw InvalidStatusException("Unsupported message type")
      }.to(Sink.actorRef[T](workActor, PoisonPill))
    val outgoingMessages: Source[Message, NotUsed] =
      Source.actorRef[String](DEFAULT_BUFFER_SIZE, OverflowStrategy.dropHead)
        .mapMaterializedValue { outActor =>
          workActor ! SenderMessage(outActor)
          NotUsed
        }
        .map(result => TextMessage(result))
        .keepAlive(KEEP_ALIVE_INTERVAL.seconds, () => TextMessage.Strict(""))
    Flow.fromSinkAndSource(incomingMessages, outgoingMessages)
  }

  def stringToActorEventFlow[T <: AnyRef](workActor: ActorRef, msgClass: Class[T]): Flow[String, String, NotUsed] = {
    val incomingMessages: Sink[String, NotUsed] =
      Flow[String].map {
        case text: String => JacksonSupport.parse(text, msgClass)
      }.to(Sink.actorRef[T](workActor, PoisonPill))
    val outgoingMessages: Source[String, NotUsed] =
      Source.actorRef[ActorEvent](DEFAULT_BUFFER_SIZE, OverflowStrategy.dropHead)
        .mapMaterializedValue { outActor =>
          workActor ! SenderMessage(outActor)
          NotUsed
        }
        .map(result => JacksonSupport.stringify(result))
        .keepAlive(KEEP_ALIVE_INTERVAL.seconds, () => "")
    Flow.fromSinkAndSource(incomingMessages, outgoingMessages)
  }

  def stringToActorEventFlow[T <: AnyRef](workActor: ActorRef): Flow[String, String, NotUsed] = {
    val incomingMessages: Sink[String, NotUsed] =
      Flow[String].to(Sink.actorRef[String](workActor, PoisonPill))
    val outgoingMessages: Source[String, NotUsed] =
      Source.actorRef[ActorEvent](DEFAULT_BUFFER_SIZE, OverflowStrategy.dropHead)
        .mapMaterializedValue { outActor =>
          workActor ! SenderMessage(outActor)
          NotUsed
        }
        .map(result => JacksonSupport.stringify(result))
        .keepAlive(KEEP_ALIVE_INTERVAL.seconds, () => "")
    Flow.fromSinkAndSource(incomingMessages, outgoingMessages)
  }
} 
Example 109
Source File: ClientFlowHttpsSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.httpclient

import java.io.InputStream
import java.security.{KeyStore, SecureRandom}
import javax.net.ssl.{KeyManagerFactory, SSLContext, TrustManagerFactory}

import akka.actor.ActorSystem
import akka.http.scaladsl.model._
import akka.http.scaladsl.{ConnectionContext, Http}
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Sink, Source}
import akka.util.ByteString
import com.typesafe.config.ConfigFactory
import org.scalatest.{AsyncFlatSpec, BeforeAndAfterAll, Matchers}
import org.squbs.resolver.ResolverRegistry
import org.squbs.testkit.Timeouts._

import scala.concurrent.{Await, Future}
import scala.util.{Success, Try}

object ClientFlowHttpsSpec {

  val config = ConfigFactory.parseString(
    """
      |helloHttps {
      |  type = squbs.httpclient
      |  akka.ssl-config.loose.disableHostnameVerification = true
      |}
    """.stripMargin)

  implicit val system = ActorSystem("ClientFlowHttpsSpec", config)
  implicit val materializer = ActorMaterializer()

  ResolverRegistry(system).register[HttpEndpoint]("LocalhostHttpsEndpointResolver") { (name, _) =>
    name match {
      case "helloHttps" =>
        Some(HttpEndpoint(s"https://localhost:$port", Some(sslContext("exampletrust.jks", "changeit")), None))
      case _ => None
    }
  }

  import akka.http.scaladsl.server.Directives._
  import system.dispatcher

  val route =
    path("hello") {
      get {
        complete(HttpEntity(ContentTypes.`text/html(UTF-8)`, "Hello World!"))
      }
    }

  val serverBinding = Await.result(Http().bindAndHandle(route, "localhost", 0,
    ConnectionContext.https(sslContext("example.com.jks", "changeit"))), awaitMax)
  val port = serverBinding.localAddress.getPort
}

class ClientFlowHttpsSpec  extends AsyncFlatSpec with Matchers with BeforeAndAfterAll {

  import ClientFlowHttpsSpec._

  override def afterAll: Unit = {
    serverBinding.unbind() map {_ => system.terminate()}
  }

  it should "make a call to Hello Service" in {
    val clientFlow = ClientFlow[Int]("helloHttps")
    val responseFuture: Future[(Try[HttpResponse], Int)] =
      Source.single(HttpRequest(uri = "/hello") -> 42)
        .via(clientFlow)
        .runWith(Sink.head)

    val (Success(response), _) = Await.result(responseFuture, awaitMax)
    response.status should be (StatusCodes.OK)
    val entity = response.entity.dataBytes.runFold(ByteString(""))(_ ++ _) map(_.utf8String)
    entity map { e => e shouldEqual "Hello World!" }
  }
} 
Example 110
Source File: ClientFlowIdleTimeoutSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.httpclient

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Sink, Source, TcpIdleTimeoutException}
import com.typesafe.config.ConfigFactory
import org.scalatest.{AsyncFlatSpec, BeforeAndAfterAll, Matchers}
import org.squbs.resolver.ResolverRegistry
import org.squbs.testkit.Timeouts.awaitMax

import scala.concurrent.{Await, Promise}
import scala.util.{Failure, Success}

object ClientFlowIdleTimeoutSpec {

  val config = ConfigFactory.parseString(
    """
      |akka {
      |  loggers = [
      |    "akka.event.Logging$DefaultLogger"
      |  ]
      |
      |  loglevel = "DEBUG"
      |
      |  http {
      |    server {
      |      idle-timeout = 240 s
      |      request-timeout = 120 s
      |    }
      |
      |    client.idle-timeout = 1 s
      |
      |    host-connection-pool.max-retries = 0
      |  }
      |}
    """.stripMargin)

  implicit val system = ActorSystem("ClientFlowIdleTimeoutSpec", config)
  implicit val materializer = ActorMaterializer()

  ResolverRegistry(system).register[HttpEndpoint]("LocalhostEndpointResolver") { (svcName, _) => svcName match {
    case "slow" => Some(HttpEndpoint(s"http://localhost:$port"))
    case _ => None
  }}

  import akka.http.scaladsl.server.Directives._
  import system.dispatcher

  val route =
    path("slow") {
      get {
        val promise = Promise[String]
        // Never completing the promise
        onComplete(promise.future) {
          case Success(value) => complete(HttpEntity(ContentTypes.`text/html(UTF-8)`, "Slow...!"))
          case Failure(ex)    => complete(HttpEntity(ContentTypes.`text/html(UTF-8)`, "Slow failed...!"))
        }
      }
    }

  val serverBinding = Await.result(Http().bindAndHandle(route, "localhost", 0), awaitMax)
  val port = serverBinding.localAddress.getPort
}

class ClientFlowIdleTimeoutSpec  extends AsyncFlatSpec with Matchers with BeforeAndAfterAll {

  import ClientFlowIdleTimeoutSpec._

  override def afterAll: Unit = {
    serverBinding.unbind() map {_ => system.terminate()}
  }

  it should "drop the connection after idle-timeout and resume the stream with new connections" in {
    val clientFlow = ClientFlow[Int]("slow")

    val result =
      Source(1 to 10)
        .map(HttpRequest(uri = "/slow") -> _)
        .via(clientFlow)
        .runWith(Sink.seq)

    result map { r =>
      val failures = r.map(_._1).filter(_.isFailure).map(_.failed)
      failures should have size 10
      failures.forall(_.get.isInstanceOf[TcpIdleTimeoutException]) shouldBe true
      r.map(_._2) should contain theSameElementsAs(1 to 10)
    }
  }
} 
Example 111
Source File: ProperShutdownStream.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.stream
import java.util.concurrent.atomic.AtomicLong

import akka.Done
import akka.actor.ActorRef
import akka.stream.ClosedShape
import akka.stream.ThrottleMode.Shaping
import akka.stream.scaladsl.{Flow, GraphDSL, Keep, RunnableGraph, Sink, Source}

import scala.concurrent.Future
import scala.concurrent.duration._
import scala.language.postfixOps

object ProperShutdownStream {
  val genCount = new AtomicLong(0L)
}

class ProperShutdownStream extends PerpetualStream[(ActorRef, Future[Long])] {
  import ProperShutdownStream._
  import org.squbs.unicomplex.Timeouts._

  override def stopTimeout = awaitMax

  def generator = Iterator.iterate(0){ p => if (p == Int.MaxValue) 0 else p + 1 } map { v =>
    genCount.incrementAndGet()
    v
  }

  val managedSource = LifecycleManaged().source(Source fromIterator generator _)

  val throttle = Flow[Int].throttle(5000, 1 second, 1000, Shaping)

  val counter = Flow[Int].map { _ => 1L }.reduce { _ + _ }.toMat(Sink.head)(Keep.right)

  override def streamGraph = RunnableGraph.fromGraph(GraphDSL.create(managedSource, counter)((a, b) => (a._2, b)) {
    implicit builder =>
    (source, sink) =>
      import GraphDSL.Implicits._
      source ~> throttle ~> sink
      ClosedShape
  })

  override def receive = {
    case NotifyWhenDone =>
      val (_, fCount) = matValue

      // Send back the future directly here, don't map the future. The map will likely happen after ActorSystem
      // shutdown so we cannot use context.dispatcher as execution context for the map as it won't be there when
      // the map is supposed to happen.
      sender() ! fCount
  }

  override def shutdown() = {
    super.shutdown()
    import context.dispatcher
    val (actorRef, fCount) = matValue
    val fStopped = gracefulStop(actorRef, awaitMax)
    for { _ <- fCount; _ <- fStopped } yield Done
  }
} 
Example 112
Source File: KillSwitchStream.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.stream
import java.util.concurrent.atomic.AtomicLong

import akka.stream.ClosedShape
import akka.stream.ThrottleMode.Shaping
import akka.stream.scaladsl.{Flow, GraphDSL, Keep, RunnableGraph, Sink, Source}

import scala.concurrent.Future
import scala.concurrent.duration._
import scala.language.postfixOps

object KillSwitchStream {
  val genCount = new AtomicLong(0L)
}

class KillSwitchStream extends PerpetualStream[Future[Long]] {
  import KillSwitchStream._
  import org.squbs.unicomplex.Timeouts._

  override def stopTimeout = awaitMax

  def generator = Iterator.iterate(0){ p => if (p == Int.MaxValue) 0 else p + 1 } map { v =>
    genCount.incrementAndGet()
    v
  }

  val source = Source.fromIterator(generator _)

  val throttle = Flow[Int].throttle(5000, 1 second, 1000, Shaping)

  val counter = Flow[Int].map { _ => 1L }.reduce { _ + _ }.toMat(Sink.head)(Keep.right)

  override def streamGraph = RunnableGraph.fromGraph(GraphDSL.create(counter) {
    implicit builder =>
      sink =>
        import GraphDSL.Implicits._
        source ~> killSwitch.flow[Int] ~> throttle ~> sink
        ClosedShape
  })

  override def receive = {
    case NotifyWhenDone =>

      // Send back the future directly here, don't map the future. The map will likely happen after ActorSystem
      // shutdown so we cannot use context.dispatcher as execution context for the map as it won't be there when
      // the map is supposed to happen.
      sender() ! matValue
  }
} 
Example 113
Source File: KillSwitchWithChildActorStream.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.stream
import java.util.concurrent.atomic.AtomicLong

import akka.actor.{Actor, Props}
import akka.stream.ClosedShape
import akka.stream.ThrottleMode.Shaping
import akka.stream.scaladsl.{Flow, GraphDSL, Keep, RunnableGraph, Sink, Source}

import scala.concurrent.Future
import scala.concurrent.duration._
import scala.language.postfixOps

object KillSwitchWithChildActorStream {
  val genCount = new AtomicLong(0L)
}

class DummyChildActor extends Actor {
  def receive = PartialFunction.empty
}

class KillSwitchWithChildActorStream extends PerpetualStream[Future[Long]] {
  import KillSwitchWithChildActorStream._
  import org.squbs.unicomplex.Timeouts._

  val dummyChildActor = context.actorOf(Props[DummyChildActor])

  override def stopTimeout = awaitMax

  def generator = Iterator.iterate(0){ p => if (p == Int.MaxValue) 0 else p + 1 } map { v =>
    genCount.incrementAndGet()
    v
  }

  val source = Source.fromIterator(generator _)

  val throttle = Flow[Int].throttle(5000, 1 second, 1000, Shaping)

  val counter = Flow[Int].map { _ => 1L }.reduce { _ + _ }.toMat(Sink.head)(Keep.right)

  override def streamGraph = RunnableGraph.fromGraph(GraphDSL.create(counter) {
    implicit builder =>
      sink =>
        import GraphDSL.Implicits._
        source ~> killSwitch.flow[Int] ~> throttle ~> sink
        ClosedShape
  })

  override def receive = {
    case NotifyWhenDone =>

      // Send back the future directly here, don't map the future. The map will likely happen after ActorSystem
      // shutdown so we cannot use context.dispatcher as execution context for the map as it won't be there when
      // the map is supposed to happen.
      sender() ! matValue
  }

  override def shutdown() = {
    val f = super.shutdown()
    defaultMidActorStop(Seq(dummyChildActor))
    f
  }
} 
Example 114
Source File: KillSwitchMatStream.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.stream

import java.util.concurrent.atomic.AtomicLong

import akka.stream.{ClosedShape, KillSwitch, KillSwitches}
import akka.stream.ThrottleMode.Shaping
import akka.stream.scaladsl.{Flow, GraphDSL, Keep, RunnableGraph, Sink, Source}

import scala.concurrent.Future
import scala.concurrent.duration._
import scala.language.postfixOps

object KillSwitchMatStream {
  val genCount = new AtomicLong(0L)
}

class KillSwitchMatStream extends PerpetualStream[(KillSwitch, Future[Long])] {
  import KillSwitchMatStream._
  import org.squbs.unicomplex.Timeouts._

  override def stopTimeout = awaitMax

  def generator = Iterator.iterate(0){ p => if (p == Int.MaxValue) 0 else p + 1 } map { v =>
    genCount.incrementAndGet()
    v
  }

  val source = Source.fromIterator(generator _)

  val throttle = Flow[Int].throttle(5000, 1 second, 1000, Shaping)

  val counter = Flow[Int].map { _ => 1L }.reduce { _ + _ }.toMat(Sink.head)(Keep.right)

  override def streamGraph = RunnableGraph.fromGraph(GraphDSL.create(KillSwitches.single[Int], counter)((_, _)) {
    implicit builder =>
      (kill, sink) =>
        import GraphDSL.Implicits._
        source ~> kill ~> throttle ~> sink
        ClosedShape
  })

  override def receive = {
    case NotifyWhenDone =>

      // Send back the future directly here, don't map the future. The map will likely happen after ActorSystem
      // shutdown so we cannot use context.dispatcher as execution context for the map as it won't be there when
      // the map is supposed to happen.
      sender() ! matValue._2
  }
} 
Example 115
Source File: JavaFlowSvcSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.unicomplex

import akka.actor.ActorSystem
import akka.http.scaladsl.model.HttpEntity.Chunked
import akka.http.scaladsl.model.{ContentTypes, HttpEntity, StatusCodes}
import akka.pattern._
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Keep, Sink, Source}
import akka.testkit.TestKit
import com.typesafe.config.ConfigFactory
import org.scalatest.{AsyncFlatSpecLike, BeforeAndAfterAll, Matchers}
import org.squbs.lifecycle.GracefulStop
import org.squbs.unicomplex.Timeouts._

object JavaFlowSvcSpec {

  val classPaths = Array(getClass.getClassLoader.getResource("classpaths/JavaFlowSvc").getPath)

  val config = ConfigFactory.parseString(
    s"""
       |default-listener.bind-port = 0
       |squbs {
       |  actorsystem-name = JavaFlowSvcSpec
       |  ${JMX.prefixConfig} = true
       |}
    """.stripMargin
  )

  val boot = UnicomplexBoot(config)
    .createUsing {(name, config) => ActorSystem(name, config)}
    .scanComponents(classPaths)
    .initExtensions.start()
}

class JavaFlowSvcSpec extends TestKit(
  JavaFlowSvcSpec.boot.actorSystem) with AsyncFlatSpecLike with BeforeAndAfterAll with Matchers {

  implicit val am = ActorMaterializer()

  val portBindingsF = (Unicomplex(system).uniActor ? PortBindings).mapTo[Map[String, Int]]
  val portF = portBindingsF map { bindings => bindings("default-listener") }

  override def afterAll(): Unit = {
    Unicomplex(system).uniActor ! GracefulStop
  }

  it should "handle a normal request" in {
    for {
      port <- portF
      response <- entityAsString(s"http://127.0.0.1:$port/javaflowsvc/ping")
    } yield {
      response shouldBe "pong"
    }
  }

  it should "handle a chunked request and be able to provide a chunked response" in {
    val requestChunks = Source.single("Hi this is a test")
      .mapConcat { s => s.split(' ').toList }
      .map(HttpEntity.ChunkStreamPart(_))

    for {
      port <- portF
      response <- post(s"http://127.0.0.1:$port/javaflowsvc/chunks",
                       Chunked(ContentTypes.`text/plain(UTF-8)`, requestChunks))
      responseString <- response.entity.dataBytes.map(_.utf8String).toMat(Sink.fold("") { _ + _})(Keep.right).run()
    } yield {
      response.entity shouldBe 'chunked
      responseString should be("Received 5 chunks and 13 bytes.\r\nThis is the last chunk!")
    }
  }

  it should "get an InternalServerError with blank response if Flow collapses" in {
    for {
      port <- portF
      errResp <- get(s"http://127.0.0.1:$port/javaflowsvc/throwit")
      respEntity <- errResp.entity.toStrict(awaitMax)
    } yield {
      errResp.status shouldBe StatusCodes.InternalServerError
      respEntity.data.utf8String shouldBe 'empty
    }
  }
} 
Example 116
Source File: BoundedOrderingSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.streams

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Keep, Sink, Source}
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{FlatSpec, Matchers}

class BoundedOrderingSpec extends FlatSpec with Matchers with ScalaFutures {

  implicit val system = ActorSystem("OrderingStateSpec")
  implicit val mat = ActorMaterializer()

  it should "require waitFor > 0" in {
    an [IllegalArgumentException] should be thrownBy BoundedOrdering[Int, Int](maxBounded = 0, 1, _ + 1, identity)
  }

  it should "retain order of a stream" in {
    val boundedOrdering = BoundedOrdering[Int, Int](maxBounded = 5, 1, _ + 1, identity)
    val input = List(1, 2, 3, 4, 5)
    val output = Source(input).via(boundedOrdering).toMat(Sink.seq)(Keep.right).run()
    output.futureValue should contain theSameElementsInOrderAs input
  }

  it should "re-order the stream completely within the ordering range" in {
    val boundedOrdering = BoundedOrdering[Int, Int](maxBounded = 5, 1, _ + 1, identity)
    val input = List(2, 3, 4, 1, 5, 7, 8, 6, 9, 10)
    val output = Source(input).via(boundedOrdering).toMat(Sink.seq)(Keep.right).run()
    output.futureValue should contain theSameElementsInOrderAs input.sorted
  }

  it should "re-order the stream incompletely outside of the ordering range" in {
    val boundedOrdering = BoundedOrdering[Int, Int](maxBounded = 5, 1, _ + 1, identity)
    val input = List(1, 3, 4, 5, 6, 7, 8, 9, 2, 10)
    val output = Source(input).via(boundedOrdering).toMat(Sink.seq)(Keep.right).run()
    output.futureValue should contain theSameElementsInOrderAs input
  }

  it should "ignore the missing element and keep the stream moving" in {
    val boundedOrdering = BoundedOrdering[Int, Int](maxBounded = 5, 1, _ + 1, identity)
    val input = List(1, 3, 4, 5, 6, 7, 8, 9, 10, 11)
    val output = Source(input).via(boundedOrdering).toMat(Sink.seq)(Keep.right).run()
    output.futureValue should contain theSameElementsInOrderAs input
  }

  it should "re-order the stream with identifier type different from message type" in {
    case class Element(id: Long, content: String)
    val boundedOrdering = BoundedOrdering[Element, Long](maxBounded = 5, 1L, _ + 1L, _.id)
    val input = List(Element(1, "one"), Element(3, "three"), Element(5, "five"), Element(2, "two"), Element(6, "six"),
      Element(7, "seven"), Element(8, "eight"), Element(9, "nine"), Element(10, "ten"), Element(4, "four"))
    val wisb = List(Element(1, "one"), Element(2, "two"), Element(3, "three"), Element(5, "five"), Element(6, "six"),
      Element(7, "seven"), Element(8, "eight"), Element(9, "nine"), Element(10, "ten"), Element(4, "four"))

    val output = Source(input).via(boundedOrdering).toMat(Sink.seq)(Keep.right).run()
    output.futureValue should contain theSameElementsInOrderAs wisb
  }

  it should "re-order the stream using custom id ordering" in {
    case class Element(id: String, content: String)
    implicit val order: Ordering[String] = Ordering.by(_.toInt)
    val boundedOrdering = BoundedOrdering[Element, String](maxBounded = 5, "2", s => (s.toInt + 2).toString, _.id)
    val input = List(Element("2", "one"), Element("6", "three"), Element("10", "five"), Element("4", "two"),
      Element("12", "six"), Element("14", "seven"), Element("16", "eight"), Element("18", "nine"),
      Element("20", "ten"), Element("8", "four"))
    val wisb = List(Element("2", "one"), Element("4", "two"), Element("6", "three"), Element("10", "five"),
      Element("12", "six"), Element("14", "seven"), Element("16", "eight"), Element("18", "nine"),
      Element("20", "ten"), Element("8", "four"))

    val output = Source(input).via(boundedOrdering).toMat(Sink.seq)(Keep.right).run()
    output.futureValue should contain theSameElementsInOrderAs wisb
  }
} 
Example 117
Source File: JsonSupport.scala    From akka-stream-json   with Apache License 2.0 5 votes vote down vote up
package de.knutwalker.akka.http

import de.knutwalker.akka.stream.JsonStreamParser

import akka.http.scaladsl.model.HttpEntity
import akka.http.scaladsl.model.MediaTypes.`application/json`
import akka.http.scaladsl.unmarshalling.{ FromEntityUnmarshaller, Unmarshaller }
import akka.http.scaladsl.util.FastFuture
import akka.stream.scaladsl.Sink
import akka.stream.stage.{ GraphStageLogic, GraphStageWithMaterializedValue, InHandler }
import akka.stream.{ AbruptStageTerminationException, Attributes, Inlet, SinkShape }

import jawn.Facade

import scala.concurrent.{ Future, Promise }
import java.util.NoSuchElementException

object JsonSupport extends JsonSupport {
  private def firstElementSink[J <: AnyRef]: Sink[J, Future[J]] =
    Sink.fromGraph(new FirstElementSinkStage[J])

  private final class FirstElementSinkStage[J <: AnyRef] extends GraphStageWithMaterializedValue[SinkShape[J], Future[J]] {
    private[this] val in: Inlet[J] = Inlet("firstElement.in")

    override val shape: SinkShape[J] = SinkShape.of(in)
    override protected def initialAttributes: Attributes = Attributes.name("firstElement")

    override def createLogicAndMaterializedValue(inheritedAttributes: Attributes): (GraphStageLogic, Future[J]) = {
      val p: Promise[J] = Promise()
      (new GraphStageLogic(shape) with InHandler {
        private[this] var element: J = null.asInstanceOf[J]

        override def preStart(): Unit = pull(in)

        def onPush(): Unit = {
          if (element eq null) {
            element = grab(in)
          }
          pull(in)
        }

        override def onUpstreamFinish(): Unit = {
          val el = element
          element = null.asInstanceOf[J]
          if (el ne null) {
            p.trySuccess(el)
          } else {
            p.tryFailure(new NoSuchElementException("No complete json entity consumed"))
          }
          completeStage()
        }

        override def onUpstreamFailure(ex: Throwable): Unit = {
          element = null.asInstanceOf[J]
          p.tryFailure(ex)
          failStage(ex)
        }

        override def postStop(): Unit = {
          if (!p.isCompleted) {
            p.failure(new AbruptStageTerminationException(this))
            ()
          }
        }

        setHandler(in, this)
      }, p.future)
    }

    override def toString: String = "FirstElementSinkStage"
  }
}

trait JsonSupport {

  implicit def jsonUnmarshaller[J <: AnyRef : Facade]: FromEntityUnmarshaller[J] =
    Unmarshaller.withMaterializer[HttpEntity, J](_ => implicit mat => {
      case HttpEntity.Strict(_, data) => FastFuture(JsonStreamParser.parse[J](data))
      case entity                     => entity.dataBytes.via(JsonStreamParser[J]).runWith(JsonSupport.firstElementSink[J])
    }).forContentTypes(`application/json`)
} 
Example 118
Source File: ReportPrinter.scala    From cloudflow   with Apache License 2.0 5 votes vote down vote up
package com.example

import akka.stream.scaladsl.Sink

import cloudflow.streamlets._
import cloudflow.streamlets.avro._

import cloudflow.akkastream._
import cloudflow.akkastream.scaladsl._

object ReportPrinter extends AkkaStreamlet {
  // 1. Create inlets and outlets
  val inlet = AvroInlet[Report]("report-in")

  // 2. Define the shape of the streamlet
  val shape = StreamletShape.withInlets(inlet)

  // 3. Override createLogic to provide StreamletLogic
  def createLogic = new RunnableGraphStreamletLogic() {
    def format(report: Report) = s"${report.name}\n]n${report.description}"
    def runnableGraph =
      plainSource(inlet)
        .to(Sink.foreach(report ⇒ println(format(report))))
  }
} 
Example 119
Source File: SttpBackendStubAkkaTests.scala    From sttp   with Apache License 2.0 5 votes vote down vote up
package sttp.client.akkahttp

import akka.actor.ActorSystem
import akka.http.scaladsl.model.ws.{Message, TextMessage}
import akka.stream.OverflowStrategy
import akka.stream.scaladsl.{Flow, Keep, Sink, Source}
import org.scalatest.BeforeAndAfterAll
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers
import sttp.client._
import sttp.model.Headers

import scala.concurrent.{Await, Future}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._

class SttpBackendStubAkkaTests extends AnyFlatSpec with Matchers with ScalaFutures with BeforeAndAfterAll {

  implicit val system: ActorSystem = ActorSystem()

  override protected def afterAll(): Unit = {
    Await.result(system.terminate().map(_ => ()), 5.seconds)
  }

  "backend stub" should "cycle through responses using a single sent request" in {
    // given
    implicit val backend = AkkaHttpBackend.stub
      .whenRequestMatches(_ => true)
      .thenRespondCyclic("a", "b", "c")

    // when
    def r = basicRequest.get(uri"http://example.org/a/b/c").send().futureValue

    // then
    r.body shouldBe Right("a")
    r.body shouldBe Right("b")
    r.body shouldBe Right("c")
    r.body shouldBe Right("a")
  }

  it should "use given flow as web socket handler" in {
    // This test is an example how can we test client flow.
    // We check behavior of client when connected to echo server.
    // Client responsibility was to send two messages to the server and collect received messages.
    val useHandler: Flow[Message, Message, Future[Seq[Message]]] => Future[Seq[Message]] = clientFlow => {
      val ((outQueue, clientReceivedMessages), inQueue) = Source
        .queue(1, OverflowStrategy.fail)
        .viaMat(clientFlow)(Keep.both)
        .toMat(Sink.queue())(Keep.both)
        .run()

      def echoMsg(): Future[Unit] =
        inQueue.pull().flatMap {
          case None =>
            echoMsg()
          case Some(msg) =>
            outQueue.offer(TextMessage(s"echo: " + msg.asTextMessage.getStrictText)).map(_ => ())
        }

      (for {
        _ <- outQueue.offer(TextMessage("Hi!"))
        _ <- echoMsg()
        _ <- echoMsg()
        _ = outQueue.complete()
        _ <- outQueue.watchCompletion()
      } yield ()).flatMap(_ => clientReceivedMessages)
    }

    val clientFlow: Flow[Message, Message, Future[Seq[Message]]] = {
      Flow.fromSinkAndSourceMat(
        Sink.seq[Message],
        Source((1 to 2).map(i => TextMessage(s"test$i")))
      )(Keep.left)
    }

    implicit val b = AkkaHttpBackend.stub
      .whenRequestMatches(_ => true)
      .thenHandleOpenWebSocket(Headers(List.empty), useHandler)

    val receivedMessages = basicRequest
      .get(uri"wss://echo.websocket.org")
      .openWebsocket(clientFlow)
      .flatMap(_.result)
      .futureValue
      .toList

    receivedMessages shouldBe List("Hi!", "echo: test1", "echo: test2").map(TextMessage(_))
  }
} 
Example 120
Source File: EventProcessor.scala    From akka-kubernetes-tests   with Apache License 2.0 5 votes vote down vote up
package akka.kubernetes.couchbase

import akka.actor.{Actor, ActorLogging, Props}
import akka.persistence.couchbase.UUIDs
import akka.persistence.couchbase.scaladsl.CouchbaseReadJournal
import akka.persistence.query._
import akka.stream.{ActorMaterializer, KillSwitches, Materializer}
import akka.stream.scaladsl.{RestartSource, Sink}

import scala.concurrent.ExecutionContext
import scala.concurrent.duration._

object EventProcessor {
  def props: Props =
    Props(new EventProcessor)
}

class EventProcessor extends Actor with ActorLogging {

  private val settings = Settings(context.system)
  private val eventProcessorId = settings.eventProcessorSettings.id
  private val tag = self.path.name
  private implicit val ec: ExecutionContext = context.dispatcher
  private implicit val materializer: Materializer = ActorMaterializer()(context.system)
  private val query =
    PersistenceQuery(context.system).readJournalFor[CouchbaseReadJournal](CouchbaseReadJournal.Identifier)
  private val killSwitch = KillSwitches.shared("eventProcessorSwitch")
  override val log = super.log // eager initialization because used from inside stream

  override def preStart(): Unit = {
    super.preStart()
    log.info("Starting event processor for tag: {}", tag)
    runQueryStream()
  }

  override def postStop(): Unit = {
    super.postStop()
    killSwitch.shutdown()
  }

  def receive = {
    case KeepAlive.Ping =>
      sender() ! KeepAlive.Pong
      log.debug(
        s"Event processor(${self.path.name}) @ ${context.system.settings.config
          .getString("akka.remote.artery.canonical.hostname")}:${context.system.settings.config.getInt("akka.remote.artery.canonical.port")}"
      )

    case message =>
      log.error("Got unexpected message: {}", message)
  }

  private def runQueryStream(): Unit =
    RestartSource
      .withBackoff(minBackoff = 500.millis, maxBackoff = 20.seconds, randomFactor = 0.1) { () =>
        // TODO offsets, this just starts from the beginning each time
        query
          .eventsByTag(tag, NoOffset)
          .map { eventEnvelope: EventEnvelope =>
            val now = System.currentTimeMillis()
            val publishTime = eventEnvelope.offset match {
              case t: TimeBasedUUID => UUIDs.timestampFrom(t)
            }
            val delay = now - publishTime
            log.info(s"#Eventprocessor($tag) got $eventEnvelope. Event is {} ms delayed", delay) // You would write to Kafka here
            eventEnvelope.offset
          }
      }
      .via(killSwitch.flow)
      .runWith(Sink.ignore)

} 
Example 121
Source File: ClusterSoakSpec.scala    From akka-kubernetes-tests   with Apache License 2.0 5 votes vote down vote up
package akka.cluster.soak
import akka.actor.ActorSystem
import akka.discovery.ServiceDiscovery.Resolved
import akka.event.Logging
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.{HttpRequest, StatusCodes}
import akka.http.scaladsl.unmarshalling.Unmarshal
import akka.kubernetes.soak.Tests.{ResponseTimeNanos, Target}
import akka.kubernetes.soak.{StatsJsonSupport, TestResults}
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Sink, Source}
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Seconds, Span}
import org.scalatest.{Matchers, WordSpec}
import akka.util.PrettyDuration._

import scala.collection.immutable
import scala.concurrent.Future
import scala.concurrent.duration._

class ClusterSoakSpec(endpoints: Resolved)(implicit system: ActorSystem)
    extends WordSpec
    with StatsJsonSupport
    with ScalaFutures
    with Matchers {

  import system.dispatcher
  implicit val mat = ActorMaterializer()
  implicit override val patienceConfig = PatienceConfig(timeout = Span(30, Seconds), interval = Span(2, Seconds))
  val log = Logging(system, getClass)

  "The Clustered service" should {

    "not have had any failures" in {

      val responses: immutable.Seq[TestResults] = Source(endpoints.addresses)
        .mapAsyncUnordered(10) { rt =>
          log.info("Hitting {}", rt.host)
          val request = HttpRequest(uri = s"http://${rt.host}:${rt.port.getOrElse(8080)}/stats")
          for {
            response <- Http().singleRequest(request)
            entity <- response.entity.toStrict(1.second)
            results <- response.status match {
              case StatusCodes.OK =>
                Unmarshal(entity).to[TestResults]
              case unexpected =>
                Future.failed(
                  new RuntimeException(s"Unexpected response code: $unexpected body: ${entity.data.utf8String}")
                )
            }
          } yield results
        }
        .runWith(Sink.seq)
        .futureValue

      log.info("{} nodes tested", responses.size)

      val maxJoinTimes =
        responses.map(_.joiningTime).sorted.reverse.take(5).map(_.nanos.pretty)

      log.info("Max join times: {}", maxJoinTimes)

      val maxResponseTimePerNode: immutable.Seq[(Target, ResponseTimeNanos)] =
        responses.map(_.lastResult.responses.maxBy(_._2))

      val averageResponseTimesPerNode = responses
        .map((eachNode: TestResults) => {
          val total = eachNode.lastResult.responses.map(_._2).sum.nanos
          val count = eachNode.lastResult.responses.size
          total / count
        })
        .sorted
        .reverse

      log.info("All response times: {}", responses)
      log.info("Slowest response times across all node pings: {}",
               maxResponseTimePerNode.sortBy(_._2).reverse.take(5).map(_._2.nanos.pretty))
      log.info("Slowest average response times across all node pings: {}",
               averageResponseTimesPerNode.take(5).map(_.pretty))

      responses.filter(_.testsFailed != 0) shouldEqual Nil

      withClue("Response took longer than 2 seconds. Do some investigation") {
        responses.filter(_.lastResult.responses.exists(_._2.nanos > 2.seconds)) shouldEqual Nil
      }

      withClue("Found unreachable events") {
        responses.filter(_.memberUnreachableEvents != 0) shouldEqual Nil
      }

      withClue("Found downed events") {
        responses.filter(_.memberDownedEvents != 0) shouldEqual Nil
      }
    }
  }

} 
Example 122
Source File: BankAccountReadModelUseCase.scala    From akka-ddd-cqrs-es-example   with MIT License 5 votes vote down vote up
package com.github.j5ik2o.bank.useCase

import akka.actor.ActorSystem
import akka.stream.scaladsl.{ Flow, Keep, Sink, Source, SourceQueueWithComplete }
import akka.stream.{ ActorMaterializer, OverflowStrategy }
import akka.{ Done, NotUsed }
import com.github.j5ik2o.bank.domain.model._
import com.github.j5ik2o.bank.useCase.BankAccountAggregateUseCase.Protocol.{
  ResolveBankAccountEventsRequest,
  ResolveBankAccountEventsResponse
}
import com.github.j5ik2o.bank.useCase.port.{ BankAccountReadModelFlows, JournalReader }
import pureconfig._

import scala.concurrent.{ ExecutionContext, ExecutionContextExecutor, Future, Promise }

class BankAccountReadModelUseCase(bankAccountReadModelFlows: BankAccountReadModelFlows, journalReader: JournalReader)(
    implicit val system: ActorSystem
) extends UseCaseSupport {

  import UseCaseSupport._

  private val config = loadConfigOrThrow[BankAccountAggregateUseCaseConfig]("bank.use-case.bank-account-use-case")

  private val bufferSize: Int = config.bufferSize

  private implicit val mat: ActorMaterializer       = ActorMaterializer()
  private implicit val ec: ExecutionContextExecutor = system.dispatcher

  def resolveBankAccountEventsById(
      request: ResolveBankAccountEventsRequest
  )(implicit ec: ExecutionContext): Future[ResolveBankAccountEventsResponse] =
    offerToQueue(resolveBankAccountEventQueue)(request, Promise())

  private lazy val resolveBankAccountEventQueue
    : SourceQueueWithComplete[(ResolveBankAccountEventsRequest, Promise[ResolveBankAccountEventsResponse])] =
    Source
      .queue[(ResolveBankAccountEventsRequest, Promise[ResolveBankAccountEventsResponse])](bufferSize,
                                                                                           OverflowStrategy.dropNew)
      .via(bankAccountReadModelFlows.resolveBankAccountEventByIdFlow.zipPromise)
      .toMat(completePromiseSink)(Keep.left)
      .run()

  private val projectionFlow: Flow[(BankAccountEvent, Long), Int, NotUsed] =
    Flow[(BankAccountEvent, Long)].flatMapConcat {
      case (event: BankAccountOpened, sequenceNr: Long) =>
        Source
          .single((event.bankAccountId, event.name.value, sequenceNr, event.occurredAt))
          .via(bankAccountReadModelFlows.openBankAccountFlow)
      case (event: BankAccountEventUpdated, sequenceNr: Long) =>
        Source
          .single((event.bankAccountId, event.name.value, sequenceNr, event.occurredAt))
          .via(bankAccountReadModelFlows.updateAccountFlow)
      case (event: BankAccountDeposited, sequenceNr: Long) =>
        Source
          .single((event.bankAccountId, event.deposit, sequenceNr, event.occurredAt))
          .via(bankAccountReadModelFlows.depositBankAccountFlow)
      case (event: BankAccountWithdrawn, sequenceNr: Long) =>
        Source
          .single((event.bankAccountId, event.withdraw, sequenceNr, event.occurredAt))
          .via(bankAccountReadModelFlows.withdrawBankAccountFlow)
      case (event: BankAccountClosed, sequenceNr: Long) =>
        Source
          .single((event.bankAccountId, sequenceNr, event.occurredAt))
          .via(bankAccountReadModelFlows.closeBankAccountFlow)
    }

  def execute(): Future[Done] = {
    bankAccountReadModelFlows.resolveLastSeqNrSource
      .flatMapConcat { lastSeqNr =>
        journalReader.eventsByTagSource(classOf[BankAccountEvent].getName, lastSeqNr + 1)
      }
      .map { eventBody =>
        (eventBody.event.asInstanceOf[BankAccountEvent], eventBody.sequenceNr)
      }
      .via(projectionFlow)
      .toMat(Sink.ignore)(Keep.right)
      .run()

  }
} 
Example 123
Source File: UseCaseSupport.scala    From akka-ddd-cqrs-es-example   with MIT License 5 votes vote down vote up
package com.github.j5ik2o.bank.useCase

import akka.{ Done, NotUsed }
import akka.stream.{ FlowShape, QueueOfferResult }
import akka.stream.scaladsl.{ Flow, GraphDSL, Sink, SourceQueueWithComplete, Unzip, Zip }

import scala.concurrent.{ ExecutionContext, Future, Promise }

object UseCaseSupport {
  implicit class FlowOps[A, B](val self: Flow[A, B, NotUsed]) extends AnyVal {
    def zipPromise: Flow[(A, Promise[B]), (B, Promise[B]), NotUsed] =
      Flow
        .fromGraph(GraphDSL.create() { implicit b =>
          import GraphDSL.Implicits._
          val unzip = b.add(Unzip[A, Promise[B]])
          val zip   = b.add(Zip[B, Promise[B]])
          unzip.out0 ~> self ~> zip.in0
          unzip.out1 ~> zip.in1
          FlowShape(unzip.in, zip.out)
        })
  }
}

trait UseCaseSupport {

  protected def offerToQueue[A, B](
      sourceQueue: SourceQueueWithComplete[(A, Promise[B])]
  )(request: A, promise: Promise[B])(implicit ec: ExecutionContext): Future[B] = {
    sourceQueue.offer((request, promise)).flatMap {
      case QueueOfferResult.Enqueued =>
        promise.future
      case QueueOfferResult.Failure(t) =>
        Future.failed(new Exception("Failed to offer request", t))
      case QueueOfferResult.Dropped =>
        Future.failed(
          new Exception(
            s"Failed to enqueue resolve request, the queue buffer was full, please check the bank.interface.buffer-size setting"
          )
        )
      case QueueOfferResult.QueueClosed =>
        Future.failed(new Exception("Failed to enqueue request batch write, the queue was closed"))
    }
  }

  protected def completePromiseSink[T]: Sink[(T, Promise[T]), Future[Done]] = Sink.foreach {
    case (response, promise) =>
      promise.success(response)
  }

} 
Example 124
Source File: ExecExamples.scala    From skuber   with Apache License 2.0 5 votes vote down vote up
package skuber.examples.exec

import akka.{Done, NotUsed}
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Sink, Source}
import skuber._

import scala.concurrent.{Await, Future, Promise}
import scala.concurrent.duration.Duration.Inf
import skuber.json.format._

object ExecExamples extends App {

  implicit val system = ActorSystem()
  implicit val materializer = ActorMaterializer()
  implicit val dispatcher = system.dispatcher

  val k8s = k8sInit

  println("Executing commands in pods ==>")

  val podName = "sleep"
  val containerName = "sleep"
  val sleepContainer = Container(name = containerName, image = "busybox", command = List("sh", "-c", "trap exit TERM; sleep 99999 & wait"))
  val sleepPod = Pod(podName, Pod.Spec().addContainer(sleepContainer))

  val terminalReady: Promise[Unit] = Promise()

  // Just print stdout and signal when the terminal gets ready
  val sink: Sink[String, Future[Done]] = Sink.foreach {
    case s =>
      print(s)
      if (s.startsWith("/ #")) {
        terminalReady.success(())
      }
  }

  // Execute `ps aux` when the terminal gets ready
  val source: Source[String, NotUsed] = Source.fromFuture(terminalReady.future.map { _ =>
    "ps aux\n"
  })

  // Wait for a while to ensure outputs
  def close: Promise[Unit] = {
    val promise = Promise[Unit]()
    Future {
      Thread.sleep(1000)
      promise.success(())
    }
    promise
  }

  val fut = for {
    // Create the sleep pod if not present
    _ <- k8s.getOption[Pod](podName).flatMap {
      case Some(pod) => Future.successful()
      case None =>
        k8s.create(sleepPod).map { _ =>
          Thread.sleep(3000)
        }
    }
    // Simulate kubectl exec
    _ <- {
      println("`kubectl exec ps aux`")
      k8s.exec(podName, Seq("ps", "aux"), maybeStdout = Some(sink), maybeClose = Some(close))
    }
    // Simulate kubectl exec -it
    _ <- {
      println("`kubectl -it exec sh` -> `ps aux`")
      k8s.exec(podName, Seq("sh"), maybeStdout = Some(sink), maybeStdin = Some(source), tty = true, maybeClose = Some(close))
    }
  } yield ()

  // Clean up
  fut.onComplete { _ =>
    println("\nFinishing up")
    k8s.delete[Pod]("sleep")
    k8s.close
    system.terminate().foreach { f =>
      System.exit(0)
    }
  }

  Await.result(fut, Inf)
} 
Example 125
Source File: WatchExamples.scala    From skuber   with Apache License 2.0 5 votes vote down vote up
package skuber.examples.watch

import skuber._
import skuber.json.format._
import skuber.K8SWatchEvent
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.Sink



object WatchExamples extends App {

  implicit val system = ActorSystem("watch")
  implicit val materializer = ActorMaterializer()
  implicit val dispatcher = system.dispatcher
  implicit val k8s = k8sInit

  def  watchFrontEndScaling = {

    val frontendReplicaCountMonitor = Sink.foreach[K8SWatchEvent[ReplicationController]] { frontendEvent =>
      println("Current frontend replicas: " + frontendEvent._object.status.get.replicas)
    }
    for {
      frontendRC <- k8s.get[ReplicationController]("frontend")
      frontendRCWatch <- k8s.watch(frontendRC)
      done <- frontendRCWatch.runWith(frontendReplicaCountMonitor)
    } yield done
  }
  
  def watchPodPhases = {

    val podPhaseMonitor = Sink.foreach[K8SWatchEvent[Pod]] { podEvent =>
      val pod = podEvent._object
      val phase = pod.status flatMap { _.phase }
      println(podEvent._type + " => Pod '" + pod.name + "' .. phase = " + phase.getOrElse("<None>"))
    }

    for {
      currPodList <- k8s.list[PodList]()
      latestPodVersion = currPodList.metadata.map { _.resourceVersion }
      currPodsWatch <- k8s.watchAll[Pod](sinceResourceVersion = latestPodVersion) // ignore historic events
      done <- currPodsWatch.runWith(podPhaseMonitor)
    } yield done
  }

  // Note: run appropriate kubectl commands (like 'run') or an example like gueestbook to see events being output
  watchPodPhases
  watchFrontEndScaling

  Thread.sleep(1200000) // watch for a lengthy time before closing the session
  k8s.close
  system.terminate().foreach { f =>
    System.exit(0)
  }
} 
Example 126
Source File: FullStream.scala    From elastic-indexer4s   with MIT License 5 votes vote down vote up
package com.yannick_cw.elastic_indexer4s.indexing_logic

import akka.NotUsed
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Flow, Keep, Sink, Source}
import com.yannick_cw.elastic_indexer4s.Index_results.{IndexError, StageSucceeded, StageSuccess}
import com.typesafe.scalalogging.LazyLogging

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{ExecutionContext, Future}
import scala.util.control.NonFatal

object FullStream extends LazyLogging {

  private def countAntLogSink[A](logPer: FiniteDuration): Sink[A, Future[Int]] =
    Flow[A]
      .groupedWithin(Int.MaxValue, logPer)
      .map(_.length)
      .map { elementsPerTime =>
        logger.info(s"Indexed $elementsPerTime elements last $logPer")
        elementsPerTime
      }
      .toMat(Sink.reduce[Int](_ + _))(Keep.right)

  def run[A](source: Source[A, NotUsed], sink: Sink[A, Future[Unit]], logSpeedInterval: FiniteDuration)(
      implicit materializer: ActorMaterializer,
      ex: ExecutionContext): Future[Either[IndexError, StageSucceeded]] =
    (for {
      count <- source
        .alsoToMat(countAntLogSink(logSpeedInterval))(Keep.right)
        .toMat(sink)(Keep.both)
        .mapMaterializedValue { case (fCount, fDone) => fDone.flatMap(_ => fCount) }
        .run()
    } yield Right(StageSuccess(s"Indexed $count documents successfully")))
      .recover {
        case NonFatal(t) =>
          Left(IndexError("Writing documents failed.", Some(t)))
      }
} 
Example 127
Source File: ElasticWriter.scala    From elastic-indexer4s   with MIT License 5 votes vote down vote up
package com.yannick_cw.elastic_indexer4s.elasticsearch

import akka.actor.ActorSystem
import akka.stream.scaladsl.Sink
import com.sksamuel.elastic4s.http.ElasticDsl._
import com.sksamuel.elastic4s.http.bulk.BulkResponseItem
import com.sksamuel.elastic4s.streams.ReactiveElastic._
import com.sksamuel.elastic4s.streams.{BulkIndexingSubscriber, RequestBuilder, ResponseListener}
import com.yannick_cw.elastic_indexer4s.Index_results.{IndexError, StageSucceeded, StageSuccess}
import com.yannick_cw.elastic_indexer4s.elasticsearch.elasic_config.ElasticWriteConfig

import scala.concurrent.{ExecutionContext, Future, Promise}
import scala.util.Try
import scala.util.control.NonFatal

class ElasticWriter(esConf: ElasticWriteConfig)(implicit system: ActorSystem, ex: ExecutionContext) {

  import esConf._

  //promise that is passed to the error and completion function of the elastic subscriber
  private val elasticFinishPromise: Promise[Unit] = Promise[Unit]()

  private def esSubscriber[A: RequestBuilder]: BulkIndexingSubscriber[A] = client.subscriber[A](
    batchSize = writeBatchSize,
    completionFn = { () =>
      Try(elasticFinishPromise.success(())); ()
    },
    errorFn = { t: Throwable =>
      Try(elasticFinishPromise.failure(t)); ()
    },
    listener = new ResponseListener[A] {
      override def onAck(resp: BulkResponseItem, original: A): Unit = ()

      override def onFailure(resp: BulkResponseItem, original: A): Unit =
        //todo not yet sure if this could break too early
        Try(elasticFinishPromise.failure(new Exception("Failed indexing with: " + resp.error)))
    },
    concurrentRequests = writeConcurrentRequest,
    maxAttempts = writeMaxAttempts
  )

  def esSink[A: RequestBuilder]: Sink[A, Future[Unit]] =
    Sink
      .fromSubscriber(esSubscriber[A])
      .mapMaterializedValue(_ => elasticFinishPromise.future)

  private def tryIndexCreation: Try[Future[Either[IndexError, StageSucceeded]]] =
    Try(
      client
        .execute(
          mappingSetting.fold(
            typed =>
              createIndex(indexName)
                .mappings(typed.mappings)
                .analysis(typed.analyzer)
                .shards(typed.shards)
                .replicas(typed.replicas),
            unsafe =>
              createIndex(indexName)
                .source(unsafe.source.spaces2)
          )
        )
        .map(response =>
          response.fold[Either[IndexError, StageSucceeded]](
            Left(IndexError(s"Index creation failed with error: ${response.error}")))(_ =>
            Right(StageSuccess(s"Index $indexName was created")))))

  def createNewIndex: Future[Either[IndexError, StageSucceeded]] =
    Future
      .fromTry(tryIndexCreation)
      .flatten
      .recover {
        case NonFatal(t) =>
          Left(IndexError("Index creation failed.", Some(t)))
      }
}

object ElasticWriter {
  def apply(esConf: ElasticWriteConfig)(implicit system: ActorSystem, ex: ExecutionContext): ElasticWriter =
    new ElasticWriter(esConf)
} 
Example 128
Source File: FullStreamSpec.scala    From elastic-indexer4s   with MIT License 5 votes vote down vote up
package com.yannick_cw.elastic_indexer4s

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Sink, Source}
import com.yannick_cw.elastic_indexer4s.Index_results.{IndexError, StageSuccess}
import com.yannick_cw.elastic_indexer4s.indexing_logic.FullStream
import com.yannick_cw.elastic_indexer4s.specs.AsyncSpec

import scala.concurrent.Future
import scala.concurrent.duration._

class FullStreamSpec extends AsyncSpec {
  implicit val system       = ActorSystem()
  implicit val materializer = ActorMaterializer()

  "The FullStream" should {
    "write all elements to the sink and log the total count" in {
      val source = Source.repeat("test").take(987)
      val sink   = Sink.ignore.mapMaterializedValue(_.map(_ => ()))

      FullStream
        .run(source, sink, 10 seconds)
        .map(res => res.right.value shouldBe StageSuccess(s"Indexed 987 documents successfully"))
    }

    "fail if an exception is thrown during processing" in {
      val source      = Source.repeat("test").take(987)
      val failingSink = Sink.ignore.mapMaterializedValue(_ => Future.failed(new IllegalArgumentException))

      FullStream.run(source, failingSink, 10 seconds).map(res => res.left.value shouldBe an[IndexError])
    }
  }

  override protected def afterAll(): Unit = {
    materializer.shutdown()
    system.terminate()
  }
} 
Example 129
Source File: WebSocketRoute.scala    From vamp   with Apache License 2.0 5 votes vote down vote up
package io.vamp.http_api.ws

import java.util.UUID

import akka.actor.PoisonPill
import akka.http.scaladsl.model.HttpRequest
import akka.http.scaladsl.model.ws.{ Message, TextMessage }
import akka.http.scaladsl.server.Route
import akka.stream._
import akka.stream.scaladsl.{ Flow, Sink, Source }
import akka.util.Timeout
import io.vamp.common.akka.IoC._
import io.vamp.common.http.{ HttpApiDirectives, HttpApiHandlers, TerminateFlowStage }
import io.vamp.common.{ Config, Namespace }
import io.vamp.http_api.ws.WebSocketActor.{ SessionClosed, SessionEvent, SessionOpened, SessionRequest }
import io.vamp.http_api.{ AbstractRoute, LogDirective }

import scala.concurrent.Future

trait WebSocketRoute extends AbstractRoute with WebSocketMarshaller with HttpApiHandlers {
  this: HttpApiDirectives with LogDirective ⇒

  implicit def materializer: Materializer

  private lazy val limit = Config.int("vamp.http-api.websocket.stream-limit")

  protected def websocketApiHandler(implicit namespace: Namespace, timeout: Timeout): Route

  def websocketRoutes(implicit namespace: Namespace, timeout: Timeout) = {
    pathEndOrSingleSlash {
      get {
        extractRequest { request ⇒
          handleWebSocketMessages {
            websocket(request)
          }
        }
      }
    }
  }

  protected def filterWebSocketOutput(message: AnyRef)(implicit namespace: Namespace, timeout: Timeout): Future[Boolean] = Future.successful(true)

  private def apiHandler(implicit namespace: Namespace, timeout: Timeout) = Route.asyncHandler(log {
    websocketApiHandler
  })

  private def websocket(origin: HttpRequest)(implicit namespace: Namespace, timeout: Timeout): Flow[AnyRef, Message, Any] = {
    val id = UUID.randomUUID()

    val in = Flow[AnyRef].collect {
      case TextMessage.Strict(message)  ⇒ Future.successful(message)
      case TextMessage.Streamed(stream) ⇒ stream.limit(limit()).completionTimeout(timeout.duration).runFold("")(_ + _)
    }.mapAsync(parallelism = 3)(identity)
      .mapConcat(unmarshall)
      .map(SessionRequest(apiHandler, id, origin, _))
      .to(Sink.actorRef[SessionEvent](actorFor[WebSocketActor], SessionClosed(id)))

    val out = Source.actorRef[AnyRef](16, OverflowStrategy.dropHead)
      .mapMaterializedValue(actorFor[WebSocketActor] ! SessionOpened(id, _))
      .via(new TerminateFlowStage[AnyRef](_ == PoisonPill))
      .mapAsync(parallelism = 3)(message ⇒ filterWebSocketOutput(message).map(f ⇒ f → message))
      .collect { case (true, m) ⇒ m }
      .map(message ⇒ TextMessage.Strict(marshall(message)))

    Flow.fromSinkAndSource(in, out)
  }
} 
Example 130
Source File: SseConnector.scala    From vamp   with Apache License 2.0 5 votes vote down vote up
package io.vamp.common.http

import akka.Done
import akka.actor.ActorSystem
import akka.event.LoggingAdapter
import akka.http.scaladsl.model.HttpHeader.ParsingResult.Ok
import akka.http.scaladsl.model.sse.ServerSentEvent
import akka.http.scaladsl.model.{ HttpHeader, HttpRequest, HttpResponse, Uri }
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{ Sink, Source }
import io.vamp.common.http.EventSource.EventSource

import scala.collection.mutable
import scala.concurrent.Future
import scala.concurrent.duration.{ FiniteDuration, _ }
import scala.language.postfixOps
import scala.util.{ Failure, Success }

private case class SseConnectionConfig(url: String, headers: List[(String, String)], tlsCheck: Boolean)

private case class SseConnectionEntryValue(source: EventSource)

trait SseListener {
  def onEvent(event: ServerSentEvent): Unit
}

object SseConnector {

  private val retryDelay: FiniteDuration = 5 second
  private val listeners: mutable.Map[SseConnectionConfig, Set[SseListener]] = mutable.Map()
  private val connections: mutable.Map[SseConnectionConfig, Future[Done]] = mutable.Map()

  def open(url: String, headers: List[(String, String)] = Nil, tlsCheck: Boolean)(listener: SseListener)(implicit system: ActorSystem, logger: LoggingAdapter): Unit = synchronized {
    val config = SseConnectionConfig(url, headers, tlsCheck)
    implicit val materializer: ActorMaterializer = ActorMaterializer()

    listeners.update(config, listeners.getOrElse(config, Set()) + listener)

    connections.getOrElseUpdate(config, {
      logger.info(s"Opening SSE connection: $url")
      EventSource(Uri(url), send(config), None, retryDelay).takeWhile { event ⇒
        event.eventType.foreach(t ⇒ logger.info(s"SSE: $t"))
        val receivers = listeners.getOrElse(config, Set())
        receivers.foreach(_.onEvent(event))
        val continue = receivers.nonEmpty
        if (!continue) logger.info(s"Closing SSE connection: $url")
        continue
      }.runWith(Sink.ignore)
    })
  }

  def close(listener: SseListener): Unit = synchronized {
    listeners.transform((_, v) ⇒ v - listener)
  }

  private def send(config: SseConnectionConfig)(request: HttpRequest)(implicit system: ActorSystem, materializer: ActorMaterializer): Future[HttpResponse] = {
    val httpHeaders = config.headers.map { case (k, v) ⇒ HttpHeader.parse(k, v) } collect { case Ok(h, _) ⇒ h } filterNot request.headers.contains
    Source.single(request.withHeaders(request.headers ++ httpHeaders) → 1).via(HttpClient.pool[Any](config.url, config.tlsCheck)).map {
      case (Success(response: HttpResponse), _) ⇒ response
      case (Failure(f), _)                      ⇒ throw new RuntimeException(f.getMessage)
    }.runWith(Sink.head)
  }
} 
Example 131
Source File: MergeHubDemo.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package example.akkastream.dynamichub

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{ MergeHub, RunnableGraph, Sink, Source }
import com.typesafe.scalalogging.StrictLogging

import scala.io.StdIn

object MergeHubDemo extends App with StrictLogging {
  implicit val system = ActorSystem()
  implicit val mat = ActorMaterializer()

  // A simple consumer that will print to the console for now
  val consumer = Sink.foreach[String](v => logger.info(s"consumer: $v"))

  // Attach a MergeHub Source to the consumer. This will materialize to a
  // corresponding Sink.
  val runnableGraph: RunnableGraph[Sink[String, NotUsed]] =
    MergeHub.source[String](perProducerBufferSize = 16).to(consumer)

  // By running/materializing the consumer we get back a Sink, and hence
  // now have access to feed elements into it. This Sink can be materialized
  // any number of times, and every element that enters the Sink will
  // be consumed by our consumer.
  val toConsumer: Sink[String, NotUsed] = runnableGraph.run()

  // Feeding two independent sources into the hub.
  Source.single("Hello!").runWith(toConsumer)
  Source.single("Hub!").runWith(toConsumer)

  StdIn.readLine()
  system.terminate()
} 
Example 132
Source File: SimplePublishSubscribe.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package example.akkastream.dynamichub

import akka.actor.ActorSystem
import akka.stream.{ ActorMaterializer, KillSwitches, UniqueKillSwitch }
import akka.stream.scaladsl.{ BroadcastHub, Flow, Keep, MergeHub, Sink, Source }
import com.typesafe.scalalogging.StrictLogging

import scala.io.StdIn
import scala.concurrent.duration._

object SimplePublishSubscribe extends App with StrictLogging {
  implicit val system = ActorSystem()
  implicit val mat = ActorMaterializer()
  import system.dispatcher

  val (sink, source) =
    MergeHub.source[String](perProducerBufferSize = 16).toMat(BroadcastHub.sink(bufferSize = 256))(Keep.both).run()

  source.runWith(Sink.ignore)

  val busFlow: Flow[String, String, UniqueKillSwitch] = Flow
    .fromSinkAndSource(sink, source)
    .joinMat(KillSwitches.singleBidi[String, String])(Keep.right)
    .backpressureTimeout(3.seconds)

  val switch: UniqueKillSwitch =
    Source.repeat("Hello world!").viaMat(busFlow)(Keep.right).to(Sink.foreach(v => logger.info(s"switch: $v"))).run()

  Thread.sleep(200)
  switch.shutdown()

  StdIn.readLine()
  system.terminate()
} 
Example 133
Source File: BufferProblem.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package example.akkastream.buffer
import akka.actor.ActorSystem
import akka.stream.scaladsl.{ GraphDSL, RunnableGraph, Sink, Source, ZipWith }
import akka.stream.{ ActorMaterializer, Attributes, ClosedShape }

import scala.concurrent.duration._
import scala.io.StdIn

object BufferProblem extends App {
  implicit val system = ActorSystem()
  implicit val mat = ActorMaterializer()

  case class Tick()

  val g = RunnableGraph.fromGraph(GraphDSL.create() { implicit b =>
    import akka.stream.scaladsl.GraphDSL.Implicits._

    // this is the asynchronous stage in this graph
    val zipper =
      b.add(ZipWith[Tick, Int, Int]((tick, count) => count).async.addAttributes(Attributes.inputBuffer(1, 1)))
    // 用默认缓冲区设置时将只打印 1
    //    val zipper = b.add(ZipWith[Tick, Int, Int]((tick, count) => count).async)

    Source.tick(initialDelay = 3.second, interval = 3.second, Tick()) ~> zipper.in0

    Source
      .tick(initialDelay = 1.second, interval = 1.second, "message!")
      .conflateWithSeed(seed = (_) => 1)((count, _) => count + 1) ~> zipper.in1

    zipper.out ~> Sink.foreach(println)
    ClosedShape
  })

  g.run()

  StdIn.readLine()
  system.terminate()
} 
Example 134
Source File: BufferExample.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package example.akkastream.buffer

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{ Sink, Source }

import scala.io.StdIn

object BufferExample extends App {
  implicit val system = ActorSystem()
  implicit val mat = ActorMaterializer()

  Source(1 to 3)
    .map { i =>
      println(s"A: $i"); i
    }
    .async
    .map { i =>
      println(s"B: $i"); i
    }
    .async
    .map { i =>
      println(s"C: $i"); i
    }
    .async
    .runWith(Sink.ignore)

  Thread.sleep(1000)
  println("------------------------------------")
  Source(1 to 3)
    .map { i =>
      println(s"A: $i"); i
    }
    .map { i =>
      println(s"B: $i"); i
    }
    .map { i =>
      println(s"C: $i"); i
    }
    .runWith(Sink.ignore)

  StdIn.readLine()
  system.terminate()
} 
Example 135
Source File: PartialGraph.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package example.akkastream.basic

import akka.actor.ActorSystem
import akka.stream.scaladsl.{ GraphDSL, RunnableGraph, Sink, Source, ZipWith }
import akka.stream.{ ActorMaterializer, ClosedShape, UniformFanInShape }

import scala.concurrent.Await
import scala.concurrent.duration._
import scala.io.StdIn

object PartialGraph extends App {
  implicit val system = ActorSystem()
  implicit val mat = ActorMaterializer()

  val pickMaxOfThree = GraphDSL.create() { implicit b =>
    import GraphDSL.Implicits._

    // ZipWith 最后一个泛型是输出参数类型。
    val zip1 = b.add(ZipWith[Int, Int, Int](math.max))
    val zip2 = b.add(ZipWith[Int, Int, Int](math.max))
    zip1.out ~> zip2.in0
    UniformFanInShape(zip2.out, zip1.in0, zip1.in1, zip2.in1)
  }

  val resultSink = Sink.head[Int]

  val g = RunnableGraph.fromGraph(GraphDSL.create(resultSink) { implicit b => sink =>
    import GraphDSL.Implicits._

    val pm3 = b.add(pickMaxOfThree)

    Source.single(4) ~> pm3.in(0)
    Source.single(2) ~> pm3.in(1)
    Source.single(3) ~> pm3.in(2)
    pm3.out ~> sink.in

    ClosedShape
  })

  val result = Await.result(g.run, 300.millis)
  println(s"result: $result")

  StdIn.readLine()
  system.terminate()
} 
Example 136
Source File: ActorMaterializerLifecycleDemo.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package example.akkastream.basic

import akka.actor.{ Actor, ActorSystem, Props }
import akka.stream.{ ActorMaterializer, Materializer }
import akka.stream.scaladsl.Sink
import akka.stream.scaladsl.Source

import scala.io.StdIn
import scala.util.{ Failure, Success }


class RunWithItself extends Actor {
  implicit val mat = ActorMaterializer()

  Source.maybe.runWith(Sink.onComplete {
    case Success(done) => println(s"$self Complated: $done")
    case Failure(e)    => println(s"$self Failed: ${e.getMessage}")
  })

  override def receive: Receive = {
    case "boom" => context.stop(self)
  }
}

class RunForever(implicit val mat: Materializer) extends Actor {
  Source.maybe.runWith(Sink.onComplete {
    case Success(done) => println(s"$self Complated: $done")
    case Failure(e)    => println(s"$self Failed: ${e.getMessage}")
  })

  override def receive: Receive = {
    case "boom" => context.stop(self)
  }
}

object ActorMaterializerLifecycleDemo extends App {
  implicit val system = ActorSystem()
  implicit val mat = ActorMaterializer()

  system.actorOf(Props[RunWithItself], "with-itself") ! "boom"
  val runForever = system.actorOf(Props(new RunForever), "run-forever")
  //  Thread.sleep(100)
  //  mat.shutdown()
  //  Thread.sleep(200)
  runForever ! "boom"

  StdIn.readLine()
  system.terminate()
} 
Example 137
Source File: Graph2.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package example.akkastream.basic

import akka.actor.ActorSystem
import akka.stream.{ ActorMaterializer, ClosedShape }
import akka.stream.scaladsl.{ Broadcast, Flow, GraphDSL, RunnableGraph, Sink, Source }

import scala.io.StdIn

object Graph2 extends App {
  implicit val system = ActorSystem()
  implicit val mat = ActorMaterializer()
  import system.dispatcher

  val topHeadSink = Sink.head[Int]
  val bottomHeadSink = Sink.head[Int]
  val sharedDoubler = Flow[Int].map(_ * 2)

  val g = RunnableGraph.fromGraph(GraphDSL.create(topHeadSink, bottomHeadSink)((_, _)) {
    implicit builder => (topHS, bottomHS) =>
      import GraphDSL.Implicits._

      val broadcast = builder.add(Broadcast[Int](2))
      Source.single(1) ~> broadcast.in

      broadcast ~> sharedDoubler ~> topHS.in
      broadcast ~> sharedDoubler ~> bottomHS.in

      ClosedShape
  })

  val (topF, bottomF) = g.run()
  topF.foreach(v => println(s"top is $v"))
  bottomF.foreach(v => println(s"bottom is $v"))

  StdIn.readLine()
  system.terminate()
} 
Example 138
Source File: SimplifiedAPI.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package example.akkastream.basic

import akka.actor.{ Actor, ActorSystem, Props }
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{ Broadcast, Merge, Sink, Source }

import scala.io.StdIn

class Remotely extends Actor {
  override def receive: Receive = {
    case value => println(s"receive: $value")
  }
}

object SimplifiedAPI extends App {
  implicit val system = ActorSystem()
  implicit val mat = ActorMaterializer()
  import system.dispatcher

  val merged = Source.combine(Source(List(1)), Source(List(2)))(Merge(_))
  val mergedResult = merged.runWith(Sink.fold(0)(_ + _))
  mergedResult.foreach(println)

  val sendRemotely =
    Sink.actorRef(system.actorOf(Props[Remotely], "remotely"), "Done")
  val localProcessing = Sink.foreach[Int](v => println(s"foreach($v)"))
  Source(List(0, 1, 1)).runWith(Sink.combine(sendRemotely, localProcessing)(strategy => Broadcast[Int](strategy)))

  StdIn.readLine()
  system.terminate()
} 
Example 139
Source File: PartialGraph2.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package example.akkastream.basic

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.scaladsl.{ Broadcast, Flow, GraphDSL, Sink, Source, Zip }
import akka.stream.{ ActorMaterializer, FlowShape, SourceShape }

import scala.io.StdIn

object PartialGraph2 extends App {
  implicit val system = ActorSystem()
  implicit val mat = ActorMaterializer()
  import system.dispatcher

  val pairs: Source[(Int, Int), NotUsed] = Source.fromGraph(GraphDSL.create() { implicit b =>
    import GraphDSL.Implicits._

    // prepare graph elements
    val zip = b.add(Zip[Int, Int]())

    def ints = Source.fromIterator(() => Iterator.from(1))

    // connect the graph
    ints.filter(_ % 2 != 0) ~> zip.in0
    ints.filter(_ % 2 == 0) ~> zip.in1

    // expose port
    SourceShape(zip.out)
  })

  val firstPair = pairs.runWith(Sink.head)
  firstPair.foreach(println)

  val pairUpWithToString = Flow.fromGraph(GraphDSL.create() { implicit b =>
    import GraphDSL.Implicits._
    val broadcast = b.add(Broadcast[Int](2))
    val zip = b.add(Zip[Int, String]())

    broadcast.out(0)  ~> zip.in0
    broadcast.out(1).map(_.toString) ~> zip.in1

    FlowShape(broadcast.in, zip.out)
  })

  Source(List(1)).via(pairUpWithToString).runWith(Sink.head).foreach(println)

  StdIn.readLine()
  system.terminate()
} 
Example 140
Source File: GraphComponent.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package example.akkastream.basic

import akka.actor.ActorSystem
import akka.stream.FanInShape.{ Init, Name }
import akka.stream._
import akka.stream.scaladsl.{ Balance, Flow, GraphDSL, Merge, MergePreferred, RunnableGraph, Sink, Source }

import scala.collection.immutable
import scala.io.StdIn

case class PriorityWorkerPoolShape[In, Out](jobsIn: Inlet[In], priorityJobsIn: Inlet[In], resultsOut: Outlet[Out])
    extends Shape {
  override def inlets: immutable.Seq[Inlet[_]] = jobsIn :: priorityJobsIn :: Nil

  override def outlets: immutable.Seq[Outlet[_]] = resultsOut :: Nil

  override def deepCopy(): Shape =
    PriorityWorkerPoolShape(jobsIn.carbonCopy(), priorityJobsIn.carbonCopy(), resultsOut.carbonCopy())
}

case class PriorityWorkerPoolShape2[In, Out](_init: Init[Out] = Name("PriorityWorkerPoolShape2"))
    extends FanInShape[Out](_init) {
  override protected def construct(init: Init[Out]): FanInShape[Out] =
    PriorityWorkerPoolShape2(init)

  val jobsIn: Inlet[In] = newInlet[In]("jobsIn")
  val priorityJobsIn: Inlet[In] = newInlet[In]("priorityJobsIn")
  // Outlet[Out] 使用名字 "out" 将被自动创建
}

object PriorityWorkerPool {
  def apply[In, Out](worker: Flow[In, Out, Any], workerCount: Int) =
    GraphDSL.create() { implicit b =>
      import GraphDSL.Implicits._

      val priorityMerge = b.add(MergePreferred[In](1))
      val balance = b.add(Balance[In](workerCount))
      val resultsMerge = b.add(Merge[Out](workerCount))

      for (i <- 0 until workerCount)
        balance.out(i) ~> worker ~> resultsMerge.in(i)

      // 在合并优先和普通作业后发送到平衡器
      priorityMerge ~> balance

      PriorityWorkerPoolShape(priorityMerge.in(0), priorityMerge.preferred, resultsMerge.out)
    }
}

object GraphComponent extends App {
  implicit val system = ActorSystem()
  implicit val mat = ActorMaterializer()
  import system.dispatcher

  val worker1 = Flow[String].map("step 1 " + _)
  val worker2 = Flow[String].map("step 2 " + _)

  val g = RunnableGraph.fromGraph(GraphDSL.create() { implicit b =>
    import GraphDSL.Implicits._

    val priorityPool1 = b.add(PriorityWorkerPool(worker1, 4))
    val priorityPool2 = b.add(PriorityWorkerPool(worker2, 2))

    Source(1 to 10).map("job: " + _) ~> priorityPool1.jobsIn
    Source(1 to 10).map("priority job: " + _) ~> priorityPool1.priorityJobsIn

    priorityPool1.resultsOut ~> priorityPool2.jobsIn
    Source(1 to 10).map("one-step, priority " + _) ~> priorityPool2.priorityJobsIn

    priorityPool2.resultsOut ~> Sink.foreach(println)
    ClosedShape
  })

  g.run()

  StdIn.readLine()
  system.terminate()
} 
Example 141
Source File: Graph1.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package example.akkastream.basic

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.{ ActorMaterializer, ClosedShape }
import akka.stream.scaladsl.{ Broadcast, Flow, GraphDSL, Merge, RunnableGraph, Sink, Source }

import scala.collection.immutable
import scala.io.StdIn

object Graph1 extends App {
  implicit val system = ActorSystem()
  implicit val mat = ActorMaterializer()

  val graph = g(1 to 2)

  graph.run()

  StdIn.readLine()
  system.terminate()

  def g(data: immutable.Iterable[Int]) =
    RunnableGraph.fromGraph(GraphDSL.create() { implicit b: GraphDSL.Builder[NotUsed] =>
      import GraphDSL.Implicits._
      val in = Source(data)
      val out = Sink.foreach(println)

      val bcast = b.add(Broadcast[Int](2))
      val merge = b.add(Merge[Int](2))

      val f1, f2, f3, f4 = Flow[Int].map(_ + 10)

      in ~> f1 ~> bcast ~> f2 ~> merge ~> f3 ~> out
      bcast ~> f4 ~> merge

      ClosedShape
    })
} 
Example 142
Source File: EchoDemo.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package example.akkastream.streamio

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{ Flow, Framing, Sink, Source, Tcp }
import akka.util.ByteString
import example.akkastream.streamio.EchoServer.system

import scala.concurrent.Future
import scala.io.StdIn

object EchoServer extends App {
  implicit val system = ActorSystem()
  implicit val mat = ActorMaterializer()

  val connections = Tcp().bind("localhost", 8888)
  connections.runForeach { connection =>
    println(s"New connection from: ${connection.remoteAddress}")

    val echo: Flow[ByteString, ByteString, NotUsed] = Flow[ByteString]
      .via(Framing.delimiter(ByteString("\n"), 256, true))
      .map(_.utf8String)
      .map(_ + "!!!\n")
      .map(ByteString(_))

    connection.handleWith(echo)
  }

  StdIn.readLine()
  system.terminate()
}

object EchoClient extends App {
  implicit val system = ActorSystem()
  implicit val mat = ActorMaterializer()

  val connection = Tcp().outgoingConnection("localhost", 8888)

  val replParser =
    Flow[String].takeWhile(_ != "q").concat(Source.single("BYE")).map { elem =>
      println(s"send msg: $elem")
      ByteString(s"$elem\n")
    }

  val repl = Flow[ByteString]
    .via(Framing.delimiter(ByteString("\n"), maximumFrameLength = 256, allowTruncation = true))
    .map(_.utf8String)
    .map(text => println("Server: " + text))
    .map(_ => StdIn.readLine("> "))
    .via(replParser)

  val connected: Future[Tcp.OutgoingConnection] = connection.join(repl).run()

  //  StdIn.readLine()
  //  system.terminate()
}

object EchoDemo {} 
Example 143
Source File: PartialGraph.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package example.akkastream.graph

import akka.actor.ActorSystem
import akka.stream.scaladsl.{ Balance, Broadcast, Flow, GraphDSL, Keep, Merge, RunnableGraph, Sink, Source }
import akka.stream.{ ActorMaterializer, FlowShape, SourceShape }

import scala.concurrent.Future
import scala.io.StdIn

object PartialGraph extends App {
  implicit val system = ActorSystem()
  implicit val mat = ActorMaterializer()
  import system.dispatcher

  def partial =
    GraphDSL
      .create() { implicit b =>
        import GraphDSL.Implicits._

        val B = b.add(Broadcast[Int](2))
        val C = b.add(Merge[Int](2))
        val D = Flow[Int].map(_ + 1)
        val E = b.add(Balance[Int](2))
        val F = b.add(Merge[Int](2))

        C <~ F
        B ~> C ~> F
        B ~> D ~> E ~> F

        FlowShape(B.in, E.out(1))
      }
      .named("partial")

  // 转换partial从FlowShape到Flow,可访问流DSL(比如:.filter() 函数)
  val flow = Flow.fromGraph(partial)

  val source = Source.fromGraph(GraphDSL.create() { implicit b =>
    import GraphDSL.Implicits._
    val merge = b.add(Merge[Int](2))
    Source.single(0) ~> merge
    Source(List(2, 3, 4)) ~> merge
    SourceShape(merge.out)
  })

  val sink: Sink[Int, Future[Int]] = Flow[Int].map(_ * 2).drop(10).named("nestedFlow").toMat(Sink.head)(Keep.right)

  val closed: RunnableGraph[Future[Int]] =
    source.via(flow.filter(_ > 1)).toMat(sink)(Keep.right)

  closed.run().foreach(println)

  StdIn.readLine()
  system.terminate()
} 
Example 144
Source File: MaterializeValue.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package example.akkastream.graph

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{ Flow, Keep, RunnableGraph, Sink, Source, Tcp }
import akka.util.ByteString

import scala.concurrent.{ Future, Promise }

object MaterializeValue {
  implicit val system = ActorSystem()
  implicit val mat = ActorMaterializer()
  import system.dispatcher

  case class MyClass(private val p: Promise[Option[Int]], conn: Tcp.OutgoingConnection) extends AutoCloseable {
    override def close(): Unit = p.trySuccess(None)
  }

  // Materializes to Promise[Option[Int]]
  val source: Source[Int, Promise[Option[Int]]] = Source.maybe[Int]

  // Materializes to NotUsed
  val flow1: Flow[Int, Int, NotUsed] = Flow[Int].take(100)

  // Materializes to Promise[Int]
  val nestedSource
      : Source[Int, Promise[Option[Int]]] = source.viaMat(flow1)(Keep.left).named("nestedSource") // viaMat === via()(Keep.left)
  //  val nestedSource2: Source[Int, NotUsed] = source.viaMat(flow1)(Keep.right)

  // Materializes to NotUsed
  val flow2: Flow[Int, ByteString, NotUsed] =
    Flow[Int].map(i => ByteString(i.toString))

  // Materializes to Future[Tcp.OutgoingConnection   (Keep.right)
  val flow3: Flow[ByteString, ByteString, Future[Tcp.OutgoingConnection]] =
    Tcp().outgoingConnection("localhost", 8080)

  val nestedFlow: Flow[Int, ByteString, Future[Tcp.OutgoingConnection]] =
    flow2.viaMat(flow3)(Keep.right)

  val nestedFlow2: Flow[Int, ByteString, NotUsed] =
    flow2.viaMat(flow3)(Keep.left) // flow2.via(flow3)
  val nestedFlow3: Flow[Int, ByteString, (NotUsed, Future[Tcp.OutgoingConnection])] =
    flow2.viaMat(flow3)(Keep.both)

  // Materializes to Future[String]   (Keep.right)
  val sink: Sink[ByteString, Future[String]] =
    Sink.fold[String, ByteString]("")(_ + _.utf8String)

  val nestedSink: Sink[Int, (Future[Tcp.OutgoingConnection], Future[String])] =
    nestedFlow.toMat(sink)(Keep.both)

  def f(p: Promise[Option[Int]], rest: (Future[Tcp.OutgoingConnection], Future[String])): Future[MyClass] = {
    val connFuture = rest._1
    connFuture.map(outConn => MyClass(p, outConn))
  }

  // Materializes to Future[MyClass]
  val runnableGraph: RunnableGraph[Future[MyClass]] =
    nestedSource.toMat(nestedSink)(f)

  val r: RunnableGraph[Promise[Option[Int]]] =
    nestedSource.toMat(nestedSink)(Keep.left)

  val r2: RunnableGraph[(Future[Tcp.OutgoingConnection], Future[String])] =
    nestedSource.toMat(nestedSink)(Keep.right)
} 
Example 145
Source File: EtlGraphImpl.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package mass.rdp.etl.graph

import akka.NotUsed
import akka.stream.scaladsl.{ Sink, Source }
import com.typesafe.scalalogging.StrictLogging
import javax.script.SimpleBindings
import mass.connector.Connector
import mass.connector.sql._
import mass.core.event.{ EventData, EventDataSimple }
import mass.core.script.ScriptManager
import mass.rdp.RdpSystem
import mass.rdp.etl.{ EtlResult, EtlWorkflowExecution, SqlEtlResult }

import scala.collection.immutable
import scala.concurrent.{ Future, Promise }
import scala.util.{ Failure, Success }

case class EtlGraphImpl(graphSetting: EtlGraphSetting) extends EtlGraph with StrictLogging {
  override def run(connectors: immutable.Seq[Connector], rdpSystem: RdpSystem): EtlWorkflowExecution = {
    implicit val ec = rdpSystem.materializer.system.dispatcher
    implicit val mat = rdpSystem.materializer

    def getConnector(name: String): Connector =
      connectors.find(_.name == name) orElse
      rdpSystem.connectorSystem.getConnector(name) getOrElse
      (throw new EtlGraphException(s"connector ref: $name 不存在"))

    val promise = Promise[EtlResult]()

    val source = dataSource(getConnector(graphSource.connector.ref), rdpSystem)
    val sink = dataSink(getConnector(graphSink.connector.ref), rdpSystem)

    graphFlows
      .foldLeft(source)((s, etlFlow) =>
        s.map { event =>
          val engine = ScriptManager.scriptJavascript
          val bindings = new SimpleBindings()
          bindings.put("event", event.asInstanceOf[EventDataSql])
          val data = engine.eval(etlFlow.script.content.get, bindings)

          // TODO 在此可设置是否发送通知消息给在线监控系统
          logger.debug(s"engine: $engine, event: $event, result data: $data")

          EventDataSimple(data)
        })
      .runWith(sink)
      .onComplete {
        case Success(result) => promise.success(SqlEtlResult(result))
        case Failure(e)      => promise.failure(e)
      }

    new EtlWorkflowExecution(promise, () => ())
  }

  private def dataSource(connector: Connector, rdpSystem: RdpSystem): Source[EventData, NotUsed] =
    rdpSystem.streamFactories.get(connector.`type`.toString) match {
      case Some(b) => b.buildSource(connector, graphSource)
      case _       => throw new EtlGraphException(s"未知Connector: $connector")
    }

  private def dataSink(connector: Connector, rdpSystem: RdpSystem): Sink[EventData, Future[JdbcSinkResult]] =
    rdpSystem.streamFactories.get(connector.`type`.toString) match {
      case Some(b) => b.buildSink(connector, graphSink)
      case _       => throw new EtlGraphException(s"未知Connector: $connector")
    }
} 
Example 146
Source File: EtlStreamFactory.scala    From fusion-data   with Apache License 2.0 5 votes vote down vote up
package mass.rdp.etl.graph

import java.sql.PreparedStatement

import akka.NotUsed
import akka.stream.scaladsl.{ Sink, Source }
import fusion.jdbc.util.JdbcUtils
import mass.connector.Connector
import mass.connector.sql._
import mass.core.event.{ EventData, EventDataSimple }

import scala.concurrent.Future

trait EtlStreamFactory {
  def `type`: String

  def buildSource(c: Connector, s: EtlSource): Source[EventDataSql, NotUsed]

  def buildSink(c: Connector, s: EtlSink): Sink[EventData, Future[JdbcSinkResult]]
}

class EtlStreamJdbcFactory extends EtlStreamFactory {
  override def `type`: String = "jdbc"

  override def buildSource(c: Connector, s: EtlSource): Source[EventDataSql, NotUsed] =
    JdbcSource(s.script.content.get, Nil, 1000)(c.asInstanceOf[SQLConnector].dataSource)
      .via(JdbcFlow.flowJdbcResultSet)
      .map(jrs => EventDataSql(jrs))

  def buildSink(c: Connector, s: EtlSink): Sink[EventData, Future[JdbcSinkResult]] = {
    def action(event: EventData, stmt: PreparedStatement): Unit = {
      val args: Iterable[Any] = event match {
        case _: EventDataSimple         => event.data.asInstanceOf[Iterable[Any]]
        case eventDataSql: EventDataSql => eventDataSql.data.values
        case _                          => throw new EtlGraphException(s"Invalid EventData: $event.")
      }
      JdbcUtils.setStatementParameters(stmt, args)
    }
    JdbcSink[EventData](conn => conn.prepareStatement(s.script.content.get), action, 1000)(
      c.asInstanceOf[SQLConnector].dataSource)
  }
} 
Example 147
Source File: ExtensionExample.scala    From korolev   with Apache License 2.0 5 votes vote down vote up
import akka.stream.OverflowStrategy
import akka.stream.scaladsl.{Sink, Source}
import korolev._
import korolev.akka._
import korolev.server._
import korolev.state.javaSerialization._

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future

object ExtensionExample extends SimpleAkkaHttpKorolevApp {

  private val ctx = Context[Future, List[String], String]

  import ctx._

  private val (queue, queueSource) = Source
    .queue[String](10, OverflowStrategy.fail)
    .preMaterialize()

  private val topicListener = Extension.pure[Future, List[String], String] { access =>
    val queueSink = queueSource.runWith(Sink.queue[String])
    def aux(): Future[Unit] = queueSink.pull() flatMap {
      case Some(message) => access
        .transition(_ :+ message)
        .flatMap(_ => aux())
      case None =>
        Future.unit
    }
    aux()
    Extension.Handlers[Future, List[String], String](
      onMessage = message => queue.offer(message).map(_ => ()),
      onDestroy = () => Future.successful(queueSink.cancel())
    )
  }

  private def onSubmit(access: Access) = {
    for {
      sessionId <- access.sessionId
      name <- access.valueOf(nameElement)
      text <- access.valueOf(textElement)
      userName =
        if (name.trim.isEmpty) s"Anonymous #${sessionId.hashCode().toHexString}"
        else name
      _ <-
        if (text.trim.isEmpty) Future.unit
        else access.publish(s"$userName: $text")
      _ <- access.property(textElement).set("value", "")
    } yield ()
  }

  private val nameElement = elementId()
  private val textElement = elementId()

  private val config = KorolevServiceConfig[Future, List[String], String](
    stateLoader = StateLoader.default(Nil),
    extensions = List(topicListener),
    document = { message =>

      import levsha.dsl._
      import html._

      optimize {
        Html(
          body(
            div(
              backgroundColor @= "yellow",
              padding @= "10px",
              border @= "1px solid black",
              "This is a chat. Open this app in few browser tabs or on few different computers"
            ),
            div(
              marginTop @= "10px",
              padding @= "10px",
              height @= "250px",
              backgroundColor @= "#eeeeee",
              message map { x =>
                div(x)
              }
            ),
            form(
              marginTop @= "10px",
              input(`type` := "text", placeholder := "Name", nameElement),
              input(`type` := "text", placeholder := "Message", textElement),
              button("Sent"),
              event("submit")(onSubmit)
            )
          )
        )
      }
    }
  )

  val service: AkkaHttpService =
    akkaHttpService(config)
} 
Example 148
Source File: Converters.scala    From korolev   with Apache License 2.0 5 votes vote down vote up
package korolev.akka

import akka.NotUsed
import akka.stream.OverflowStrategy
import akka.stream.scaladsl.{Sink, Source}
import korolev.akka.util.{KorolevStreamPublisher, KorolevStreamSubscriber}
import korolev.effect.{Effect, Stream}
import org.reactivestreams.Publisher

object Converters {

  implicit final class SinkCompanionOps(value: Sink.type) {
    def korolevStream[F[_]: Effect, T]: Sink[T, Stream[F, T]] = {
      val subscriber = new KorolevStreamSubscriber[F, T]()
      Sink
        .fromSubscriber(subscriber)
        .mapMaterializedValue(_ => subscriber)
    }
  }

  implicit final class StreamCompanionOps(value: Stream.type) {
    def fromPublisher[F[_]: Effect, T](publisher: Publisher[T]): Stream[F, T] = {
      val result = new KorolevStreamSubscriber[F, T]()
      publisher.subscribe(result)
      result
    }
  }

  implicit final class KorolevStreamsOps[F[_]: Effect, T](stream: Stream[F, T]) {

    
    def asPublisher(fanout: Boolean = false): Publisher[T] =
      new KorolevStreamPublisher(stream, fanout)

    def asAkkaSource: Source[T, NotUsed] = {
      val publisher = new KorolevStreamPublisher(stream, fanout = false)
      Source
        .fromPublisher(publisher)
        .buffer(10, OverflowStrategy.backpressure) // FIXME should work without this line. Looks like bug in akka-streams
    }
  }
} 
Example 149
Source File: CommandRegistration.scala    From AckCord   with MIT License 5 votes vote down vote up
package ackcord.commands

import scala.concurrent.Future

import akka.Done
import akka.stream.scaladsl.{Keep, RunnableGraph, Sink, Source}
import akka.stream.{KillSwitches, UniqueKillSwitch}

case class CommandRegistration[Mat](materialized: Mat, onDone: Future[Done], killSwitch: UniqueKillSwitch) {

  def stop(): Unit = killSwitch.shutdown()
}
object CommandRegistration {
  def toSink[A, M](source: Source[A, M]): RunnableGraph[CommandRegistration[M]] =
    source.viaMat(KillSwitches.single)(Keep.both).toMat(Sink.ignore) {
      case ((m, killSwitch), done) => CommandRegistration(m, done, killSwitch)
    }

  def withRegistration[A, M](source: Source[A, M]): Source[A, CommandRegistration[M]] =
    source.viaMat(KillSwitches.single)(Keep.both).watchTermination() {
      case ((m, killSwitch), done) => CommandRegistration(m, done, killSwitch)
    }
} 
Example 150
Source File: MusicCommands.scala    From AckCord   with MIT License 5 votes vote down vote up
package ackcord.examplecore.music

import ackcord._
import ackcord.commands.{CommandBuilder, CommandController, NamedCommand, VoiceGuildMemberCommandMessage}
import ackcord.data.{GuildId, TextChannel}
import ackcord.examplecore.music.MusicHandler.{NextTrack, QueueUrl, StopMusic, TogglePause}
import akka.NotUsed
import akka.actor.typed.scaladsl.AskPattern._
import akka.actor.typed.{ActorRef, ActorSystem}
import akka.stream.scaladsl.{Flow, Keep, Sink}
import akka.stream.typed.scaladsl.ActorFlow
import akka.util.Timeout

class MusicCommands(requests: Requests, guildId: GuildId, musicHandler: ActorRef[MusicHandler.Command])(
    implicit timeout: Timeout,
    system: ActorSystem[Nothing]
) extends CommandController(requests) {

  val VoiceCommand: CommandBuilder[VoiceGuildMemberCommandMessage, NotUsed] =
    GuildVoiceCommand.andThen(CommandBuilder.inOneGuild(guildId))

  val queue: NamedCommand[String] =
    VoiceCommand.named("&", Seq("q", "queue")).parsing[String].withSideEffects { m =>
      musicHandler.ask[MusicHandler.CommandAck.type](QueueUrl(m.parsed, m.textChannel, m.voiceChannel.id, _))
    }

  private def simpleCommand(
      aliases: Seq[String],
      mapper: (TextChannel, ActorRef[MusicHandler.CommandAck.type]) => MusicHandler.MusicHandlerEvents
  ): NamedCommand[NotUsed] = {
    VoiceCommand.andThen(CommandBuilder.inOneGuild(guildId)).named("&", aliases, mustMention = true).toSink {
      Flow[VoiceGuildMemberCommandMessage[NotUsed]]
        .map(_.textChannel)
        .via(ActorFlow.ask(requests.parallelism)(musicHandler)(mapper))
        .toMat(Sink.ignore)(Keep.none)
    }
  }

  val stop: NamedCommand[NotUsed] = simpleCommand(Seq("s", "stop"), StopMusic.apply)

  val next: NamedCommand[NotUsed] = simpleCommand(Seq("n", "next"), NextTrack.apply)

  val pause: NamedCommand[NotUsed] = simpleCommand(Seq("p", "pause"), TogglePause.apply)
} 
Example 151
Source File: ClientUtil.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
package com.daml.quickstart.iou

import java.util.UUID

import akka.stream.Materializer
import akka.stream.scaladsl.{Sink, Source}
import akka.{Done, NotUsed}
import com.daml.ledger.api.domain.LedgerId
import com.daml.ledger.api.refinements.ApiTypes.{ApplicationId, WorkflowId}
import com.daml.ledger.api.v1.command_submission_service.SubmitRequest
import com.daml.ledger.api.v1.commands.{Command, Commands}
import com.daml.ledger.api.v1.ledger_offset.LedgerOffset
import com.daml.ledger.api.v1.transaction.Transaction
import com.daml.ledger.api.v1.transaction_filter.{Filters, TransactionFilter}
import com.daml.ledger.client.LedgerClient
import com.daml.quickstart.iou.FutureUtil.toFuture
import com.google.protobuf.empty.Empty

import scala.concurrent.{ExecutionContext, Future}

class ClientUtil(
    client: LedgerClient,
    applicationId: ApplicationId,
) {

  import ClientUtil._

  private val ledgerId = client.ledgerId
  private val packageClient = client.packageClient
  private val commandClient = client.commandClient
  private val transactionClient = client.transactionClient

  def listPackages(implicit ec: ExecutionContext): Future[Set[String]] =
    packageClient.listPackages().map(_.packageIds.toSet)

  def ledgerEnd(implicit ec: ExecutionContext): Future[LedgerOffset] =
    transactionClient.getLedgerEnd().flatMap(response => toFuture(response.offset))

  def submitCommand(party: String, workflowId: WorkflowId, cmd: Command.Command): Future[Empty] = {
    val commands = Commands(
      ledgerId = LedgerId.unwrap(ledgerId),
      workflowId = WorkflowId.unwrap(workflowId),
      applicationId = ApplicationId.unwrap(applicationId),
      commandId = uniqueId,
      party = party,
      commands = Seq(Command(cmd)),
    )

    commandClient.submitSingleCommand(SubmitRequest(Some(commands), None))
  }

  def nextTransaction(party: String, offset: LedgerOffset)(
      implicit mat: Materializer): Future[Transaction] =
    transactionClient
      .getTransactions(offset, None, transactionFilter(party))
      .take(1L)
      .runWith(Sink.head)

  def subscribe(party: String, offset: LedgerOffset, max: Option[Long])(f: Transaction => Unit)(
      implicit mat: Materializer): Future[Done] = {
    val source: Source[Transaction, NotUsed] =
      transactionClient.getTransactions(offset, None, transactionFilter(party))
    max.fold(source)(n => source.take(n)) runForeach f
  }

  override lazy val toString: String = s"ClientUtil{ledgerId=$ledgerId}"
}

object ClientUtil {
  def transactionFilter(parties: String*): TransactionFilter =
    TransactionFilter(parties.map((_, Filters.defaultInstance)).toMap)

  def uniqueId: String = UUID.randomUUID.toString

  def workflowIdFromParty(p: String): WorkflowId =
    WorkflowId(s"$p Workflow")
} 
Example 152
Source File: ClientUtil.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
package com.daml.quickstart.iou

import java.util.UUID

import akka.stream.Materializer
import akka.stream.scaladsl.{Sink, Source}
import akka.{Done, NotUsed}
import com.daml.ledger.api.refinements.ApiTypes.{ApplicationId, WorkflowId}
import com.daml.ledger.api.v1.command_submission_service.SubmitRequest
import com.daml.ledger.api.v1.commands.Commands
import com.daml.ledger.api.v1.ledger_offset.LedgerOffset
import com.daml.ledger.api.v1.transaction.Transaction
import com.daml.ledger.api.v1.transaction_filter.{Filters, TransactionFilter}
import com.daml.ledger.client.LedgerClient
import com.daml.ledger.client.binding.{Primitive => P}
import com.daml.quickstart.iou.FutureUtil.toFuture
import com.google.protobuf.empty.Empty

import scalaz.syntax.tag._

import scala.concurrent.{ExecutionContext, Future}

class ClientUtil(
    client: LedgerClient,
    applicationId: ApplicationId,
) {

  import ClientUtil._

  private val ledgerId = client.ledgerId
  private val commandClient = client.commandClient
  private val transactionClient = client.transactionClient

  def ledgerEnd(implicit ec: ExecutionContext): Future[LedgerOffset] =
    transactionClient.getLedgerEnd().flatMap(response => toFuture(response.offset))

  def submitCommand[T](
      sender: P.Party,
      workflowId: WorkflowId,
      command: P.Update[P.ContractId[T]]): Future[Empty] = {
    commandClient.submitSingleCommand(submitRequest(sender, workflowId, command))
  }

  def submitRequest[T](
      party: P.Party,
      workflowId: WorkflowId,
      seq: P.Update[P.ContractId[T]]*): SubmitRequest = {
    val commands = Commands(
      ledgerId = ledgerId.unwrap,
      workflowId = WorkflowId.unwrap(workflowId),
      applicationId = ApplicationId.unwrap(applicationId),
      commandId = uniqueId,
      party = P.Party.unwrap(party),
      commands = seq.map(_.command)
    )
    SubmitRequest(Some(commands), None)
  }

  def nextTransaction(party: P.Party, offset: LedgerOffset)(
      implicit mat: Materializer): Future[Transaction] =
    transactionClient
      .getTransactions(offset, None, transactionFilter(party))
      .take(1L)
      .runWith(Sink.head)

  def subscribe(party: P.Party, offset: LedgerOffset, max: Option[Long])(f: Transaction => Unit)(
      implicit mat: Materializer): Future[Done] = {
    val source: Source[Transaction, NotUsed] =
      transactionClient.getTransactions(offset, None, transactionFilter(party))
    max.fold(source)(n => source.take(n)) runForeach f
  }

  override lazy val toString: String = s"ClientUtil{ledgerId=$ledgerId}"
}

object ClientUtil {
  def transactionFilter(ps: P.Party*): TransactionFilter =
    TransactionFilter(P.Party.unsubst(ps).map((_, Filters.defaultInstance)).toMap)

  def uniqueId: String = UUID.randomUUID.toString

  def workflowIdFromParty(p: P.Party): WorkflowId =
    WorkflowId(s"${P.Party.unwrap(p): String} Workflow")
} 
Example 153
Source File: ClientUtil.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
package com.daml.quickstart.iou

import java.util.UUID

import akka.stream.Materializer
import akka.stream.scaladsl.{Sink, Source}
import akka.{Done, NotUsed}
import com.daml.ledger.api.refinements.ApiTypes.{ApplicationId, WorkflowId}
import com.daml.ledger.api.v1.command_submission_service.SubmitRequest
import com.daml.ledger.api.v1.commands.Commands
import com.daml.ledger.api.v1.ledger_offset.LedgerOffset
import com.daml.ledger.api.v1.transaction.Transaction
import com.daml.ledger.api.v1.transaction_filter.{Filters, TransactionFilter}
import com.daml.ledger.client.LedgerClient
import com.daml.ledger.client.binding.{Primitive => P}
import com.daml.quickstart.iou.FutureUtil.toFuture
import com.google.protobuf.empty.Empty

import scalaz.syntax.tag._

import scala.concurrent.{ExecutionContext, Future}

class ClientUtil(
    client: LedgerClient,
    applicationId: ApplicationId,
) {

  import ClientUtil._

  private val ledgerId = client.ledgerId
  private val commandClient = client.commandClient
  private val transactionClient = client.transactionClient

  def ledgerEnd(implicit ec: ExecutionContext): Future[LedgerOffset] =
    transactionClient.getLedgerEnd().flatMap(response => toFuture(response.offset))

  def submitCommand[T](
      sender: P.Party,
      workflowId: WorkflowId,
      command: P.Update[P.ContractId[T]]): Future[Empty] = {
    commandClient.submitSingleCommand(submitRequest(sender, workflowId, command))
  }

  def submitRequest[T](
      party: P.Party,
      workflowId: WorkflowId,
      seq: P.Update[P.ContractId[T]]*): SubmitRequest = {
    val commands = Commands(
      ledgerId = ledgerId.unwrap,
      workflowId = WorkflowId.unwrap(workflowId),
      applicationId = ApplicationId.unwrap(applicationId),
      commandId = uniqueId,
      party = P.Party.unwrap(party),
      commands = seq.map(_.command)
    )
    SubmitRequest(Some(commands), None)
  }

  def nextTransaction(party: P.Party, offset: LedgerOffset)(
      implicit mat: Materializer): Future[Transaction] =
    transactionClient
      .getTransactions(offset, None, transactionFilter(party))
      .take(1L)
      .runWith(Sink.head)

  def subscribe(party: P.Party, offset: LedgerOffset, max: Option[Long])(f: Transaction => Unit)(
      implicit mat: Materializer): Future[Done] = {
    val source: Source[Transaction, NotUsed] =
      transactionClient.getTransactions(offset, None, transactionFilter(party))
    max.fold(source)(n => source.take(n)) runForeach f
  }

  override lazy val toString: String = s"ClientUtil{ledgerId=$ledgerId}"
}

object ClientUtil {
  def transactionFilter(ps: P.Party*): TransactionFilter =
    TransactionFilter(P.Party.unsubst(ps).map((_, Filters.defaultInstance)).toMap)

  def uniqueId: String = UUID.randomUUID.toString

  def workflowIdFromParty(p: P.Party): WorkflowId =
    WorkflowId(s"${P.Party.unwrap(p): String} Workflow")
} 
Example 154
Source File: AcsBench.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.sandbox.perf

import java.io.File

import akka.stream.scaladsl.Sink
import com.daml.bazeltools.BazelRunfiles._
import com.daml.ledger.api.domain
import com.daml.ledger.api.testing.utils.MockMessages
import com.daml.ledger.api.v1.active_contracts_service.GetActiveContractsResponse
import com.daml.ledger.api.v1.command_service.SubmitAndWaitRequest
import com.daml.ledger.api.v1.event.CreatedEvent
import com.daml.ledger.api.v1.value.Identifier
import com.daml.ledger.client.services.acs.ActiveContractSetClient
import com.daml.platform.sandbox.services.TestCommands
import org.openjdk.jmh.annotations.{Benchmark, Level, Setup}

class AcsBenchState extends PerfBenchState with DummyCommands with InfAwait {

  def commandCount = 10000L

  @Setup(Level.Invocation)
  def submitCommands(): Unit = {
    await(
      dummyCreates(ledger.ledgerId)
        .take(commandCount)
        .mapAsync(100)(ledger.commandService.submitAndWait)
        .runWith(Sink.ignore)(mat))
    ()
  }
}

class AcsBench extends TestCommands with InfAwait {

  override protected def darFile: File =
    new File(rlocation("ledger/test-common/model-tests.dar"))

  private def generateCommand(
      sequenceNumber: Int,
      contractId: String,
      ledgerId: domain.LedgerId,
      template: Identifier): SubmitAndWaitRequest = {
    buildRequest(
      ledgerId = ledgerId,
      commandId = s"command-id-exercise-$sequenceNumber",
      commands = Seq(exerciseWithUnit(template, contractId, "DummyChoice1")),
      appId = "app1"
    ).toSync
  }

  private def extractContractId(
      response: GetActiveContractsResponse,
      template: Identifier): Option[String] = {
    val events = response.activeContracts.toSet
    events.collectFirst {
      case CreatedEvent(contractId, _, Some(id), _, _, _, _, _, _) if id == template => contractId
    }
  }

  private def getContractIds(
      state: PerfBenchState,
      template: Identifier,
      ledgerId: domain.LedgerId) =
    new ActiveContractSetClient(ledgerId, state.ledger.acsService)(state.esf)
      .getActiveContracts(MockMessages.transactionFilter)
      .map(extractContractId(_, template))

  @Benchmark
  def consumeAcs(state: AcsBenchState): Unit = {
    val ledgerId = state.ledger.ledgerId
    val template = templateIds.dummy
    await(
      getContractIds(state, template, ledgerId).zipWithIndex
        .collect {
          case (Some(contractId), i) =>
            generateCommand(i.toInt, contractId, ledgerId, template)
        }
        .mapAsync(100)(state.ledger.commandService.submitAndWait)
        .runWith(Sink.ignore)(state.mat))
    ()

  }

} 
Example 155
Source File: SimpleBench.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.sandbox.perf

import java.io.File

import akka.stream.scaladsl.Sink
import com.daml.bazeltools.BazelRunfiles._
import org.openjdk.jmh.annotations.Benchmark

class SimpleBenchState extends PerfBenchState with DummyCommands with InfAwait

class SimpleBench extends DummyCommands with InfAwait {

  override protected def darFile: File =
    new File(rlocation("ledger/test-common/model-tests.dar"))

  @Benchmark
  def ingest10kCommands(state: SimpleBenchState): Unit = {
    val commandCount = 10000L
    await(
      dummyCreates(state.ledger.ledgerId)
        .take(commandCount)
        .mapAsync(100)(state.ledger.commandService.submitAndWait _)
        .runWith(Sink.ignore)(state.mat))
    ()
  }
} 
Example 156
Source File: StaticTime.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.client.services.testing.time

import java.time.Instant
import java.util.concurrent.atomic.AtomicReference

import akka.stream.scaladsl.{Broadcast, Flow, GraphDSL, RunnableGraph, Sink}
import akka.stream.{ClosedShape, KillSwitches, Materializer, UniqueKillSwitch}
import com.daml.api.util.{TimeProvider, TimestampConversion}
import com.daml.api.util.TimestampConversion._
import com.daml.grpc.adapter.ExecutionSequencerFactory
import com.daml.grpc.adapter.client.akka.ClientAdapter
import com.daml.dec.DirectExecutionContext
import com.daml.ledger.api.v1.testing.time_service.{GetTimeRequest, SetTimeRequest}
import com.daml.ledger.api.v1.testing.time_service.TimeServiceGrpc.{TimeService, TimeServiceStub}
import com.daml.ledger.client.LedgerClient

import scala.concurrent.{ExecutionContext, Future}

class StaticTime(
    timeService: TimeService,
    clock: AtomicReference[Instant],
    killSwitch: UniqueKillSwitch,
    ledgerId: String)
    extends TimeProvider
    with AutoCloseable {

  def getCurrentTime: Instant = clock.get

  def timeRequest(instant: Instant) =
    SetTimeRequest(
      ledgerId,
      Some(TimestampConversion.fromInstant(getCurrentTime)),
      Some(TimestampConversion.fromInstant(instant)))

  def setTime(instant: Instant)(implicit ec: ExecutionContext): Future[Unit] = {
    timeService.setTime(timeRequest(instant)).map { _ =>
      val _ = StaticTime.advanceClock(clock, instant)
    }
  }

  override def close(): Unit = killSwitch.shutdown()
}

object StaticTime {
  def advanceClock(clock: AtomicReference[Instant], instant: Instant): Instant = {
    clock.updateAndGet {
      case current if instant isAfter current => instant
      case current => current
    }
  }

  def updatedVia(timeService: TimeServiceStub, ledgerId: String, token: Option[String] = None)(
      implicit m: Materializer,
      esf: ExecutionSequencerFactory): Future[StaticTime] = {
    val clockRef = new AtomicReference[Instant](Instant.EPOCH)
    val killSwitchExternal = KillSwitches.single[Instant]
    val sinkExternal = Sink.head[Instant]

    RunnableGraph
      .fromGraph {
        GraphDSL.create(killSwitchExternal, sinkExternal) {
          case (killSwitch, futureOfFirstElem) =>
            // We serve this in a future which completes when the first element has passed through.
            // Thus we make sure that the object we serve already received time data from the ledger.
            futureOfFirstElem.map(_ => new StaticTime(timeService, clockRef, killSwitch, ledgerId))(
              DirectExecutionContext)
        } { implicit b => (killSwitch, sinkHead) =>
          import GraphDSL.Implicits._
          val instantSource = b.add(
            ClientAdapter
              .serverStreaming(
                GetTimeRequest(ledgerId),
                LedgerClient.stub(timeService, token).getTime)
              .map(r => toInstant(r.getCurrentTime)))

          val updateClock = b.add(Flow[Instant].map { i =>
            advanceClock(clockRef, i)
            i
          })

          val broadcastTimes = b.add(Broadcast[Instant](2))

          val ignore = b.add(Sink.ignore)

          // format: OFF
          instantSource ~> killSwitch ~> updateClock ~> broadcastTimes.in
                                                        broadcastTimes.out(0) ~> sinkHead
                                                        broadcastTimes.out(1) ~> ignore
          // format: ON

          ClosedShape
        }
      }
      .run()
  }

} 
Example 157
Source File: ExtractSingleMaterializedValueTest.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.util.akkastreams

import akka.stream.scaladsl.{Keep, Sink, Source}
import com.daml.ledger.api.testing.utils.AkkaBeforeAndAfterAll
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{Matchers, WordSpec}

import scala.util.Random

class ExtractSingleMaterializedValueTest
    extends WordSpec
    with Matchers
    with ScalaFutures
    with AkkaBeforeAndAfterAll {

  private val discriminator = { i: Int =>
    if (i < 0) Some(i) else None
  }

  private val elemsThatPassThrough = 0.to(10).toVector

  ExtractMaterializedValue.getClass.getSimpleName when {

    "there's a single valid value" should {
      "extract it" in {
        val elemToExtract = -1

        val elements = elemToExtract +: elemsThatPassThrough
        val (extractedF, restF) = processElements(Random.shuffle(elements))

        whenReady(extractedF)(_ shouldEqual elemToExtract)
        whenReady(restF)(_ should contain theSameElementsAs elements)
      }
    }

    "there are multiple valid values" should {
      "extract the first matching element" in {
        val elemToExtract = -1
        val otherCandidateShuffledIn = -2

        val elements = elemToExtract +: Random.shuffle(
          otherCandidateShuffledIn +: elemsThatPassThrough)
        val (extractedF, restF) = processElements(elements)

        whenReady(extractedF)(_ shouldEqual elemToExtract)
        whenReady(restF)(_ should contain theSameElementsAs elements)
      }
    }

    "there are no valid values" should {
      "fail the materialized future, but let the stream continue otherwise" in {

        val (extractedF, restF) =
          processElements(Random.shuffle(elemsThatPassThrough))

        whenReady(extractedF.failed)(_ shouldBe a[RuntimeException])
        whenReady(restF)(_.sorted shouldEqual elemsThatPassThrough)
      }
    }

  }

  private def processElements(elements: Iterable[Int]) = {
    Source
      .fromIterator(() => elements.iterator)
      .viaMat(ExtractMaterializedValue(discriminator))(Keep.right)
      .toMat(Sink.seq)(Keep.both)
      .run()
  }
} 
Example 158
Source File: LedgerEntriesSpec.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.sandbox.stores.ledger.inmemory

import akka.stream.ThrottleMode
import akka.stream.scaladsl.{Flow, Keep, Sink, Source}
import com.daml.ledger.participant.state.v1.Offset
import com.daml.ledger.api.testing.utils.AkkaBeforeAndAfterAll
import org.scalatest.{AsyncWordSpec, Inspectors, Matchers}

import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util.Random

class LedgerEntriesSpec
    extends AsyncWordSpec
    with Matchers
    with AkkaBeforeAndAfterAll
    with Inspectors {

  case class Error(msg: String)

  case class Transaction(content: String)

  val NO_OF_MESSAGES = 10000
  val NO_OF_SUBSCRIBERS = 50

  private def genTransactions() = (1 to NO_OF_MESSAGES).map { i =>
    if (Random.nextBoolean())
      Right(Transaction(i.toString))
    else
      Left(Error(i.toString))
  }

  "LedgerEntries" should {

    "store new blocks and a late subscriber can read them" in {
      val ledger = new LedgerEntries[Either[Error, Transaction]](_.toString)
      val transactions = genTransactions()

      transactions.foreach(t => ledger.publish(t))

      val sink =
        Flow[(Offset, Either[Error, Transaction])]
          .take(NO_OF_MESSAGES.toLong)
          .toMat(Sink.seq)(Keep.right)

      val blocksF = ledger.getSource(None, None).runWith(sink)

      blocksF.map { blocks =>
        val readTransactions = blocks.collect { case (_, transaction) => transaction }
        readTransactions shouldEqual transactions
      }
    }

    "store new blocks while multiple subscribers are reading them with different pace" in {
      val transactions = genTransactions()

      val ledger = new LedgerEntries[Either[Error, Transaction]](_.toString)

      val publishRate = NO_OF_MESSAGES / 10

      val blocksInStream =
        Source(transactions)
          .throttle(publishRate, 100.milliseconds, publishRate, ThrottleMode.shaping)
          .to(Sink.foreach { t =>
            ledger.publish(t)
            ()
          })

      def subscribe() = {
        val subscribeRate = NO_OF_MESSAGES / (Random.nextInt(100) + 1)
        ledger
          .getSource(None, None)
          .runWith(
            Flow[(Offset, Either[Error, Transaction])]
              .throttle(subscribeRate, 100.milliseconds, subscribeRate, ThrottleMode.shaping)
              .take(NO_OF_MESSAGES.toLong)
              .toMat(Sink.seq)(Keep.right)
          )
      }

      val readBlocksF = Future.sequence((1 to NO_OF_SUBSCRIBERS).map(_ => subscribe()))
      blocksInStream.run()

      readBlocksF.map { readBlocksForAll =>
        forAll(readBlocksForAll) { readBlocks =>
          val readTransactions = readBlocks.collect { case (_, transaction) => transaction }
          readTransactions shouldEqual transactions
        }
      }
    }
  }
} 
Example 159
Source File: ScenarioLoadingITDivulgence.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.sandbox

import akka.stream.scaladsl.Sink
import com.daml.ledger.api.domain.LedgerId
import com.daml.ledger.api.testing.utils.{SuiteResourceManagementAroundEach, MockMessages => M}
import com.daml.ledger.api.v1.active_contracts_service.ActiveContractsServiceGrpc
import com.daml.ledger.api.v1.transaction_filter._
import com.daml.ledger.client.services.acs.ActiveContractSetClient
import com.daml.dec.DirectExecutionContext
import com.daml.platform.sandbox.services.{SandboxFixture, TestCommands}
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Millis, Span}
import org.scalatest.{Matchers, WordSpec}

@SuppressWarnings(Array("org.wartremover.warts.StringPlusAny"))
class ScenarioLoadingITDivulgence
    extends WordSpec
    with Matchers
    with ScalaFutures
    with TestCommands
    with SandboxFixture
    with SuiteResourceManagementAroundEach {

  override def scenario: Option[String] = Some("Test:testDivulgenceSuccess")

  private def newACClient(ledgerId: LedgerId) =
    new ActiveContractSetClient(ledgerId, ActiveContractsServiceGrpc.stub(channel))

  override implicit def patienceConfig: PatienceConfig =
    PatienceConfig(scaled(Span(15000, Millis)), scaled(Span(150, Millis)))

  private val allTemplatesForParty = M.transactionFilter

  private def getSnapshot(transactionFilter: TransactionFilter = allTemplatesForParty) =
    newACClient(ledgerId())
      .getActiveContracts(transactionFilter)
      .runWith(Sink.seq)

  implicit val ec = DirectExecutionContext

  "ScenarioLoading" when {
    "running a divulgence scenario" should {
      "not fail" in {
        // The testDivulgenceSuccess scenario uses divulgence
        // This test checks whether the scenario completes without failing
        whenReady(getSnapshot()) { resp =>
          resp.size should equal(1)
        }
      }
    }
  }

} 
Example 160
Source File: WallClockTimeIT.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.sandbox.services.time

import java.time.Instant

import akka.stream.scaladsl.Sink
import com.daml.api.util.TimestampConversion.fromInstant
import com.daml.grpc.GrpcException
import com.daml.grpc.adapter.client.akka.ClientAdapter
import com.daml.ledger.api.testing.utils.SuiteResourceManagementAroundAll
import com.daml.ledger.api.v1.testing.time_service.{GetTimeRequest, SetTimeRequest, TimeServiceGrpc}
import com.daml.platform.sandbox.config.SandboxConfig
import com.daml.platform.sandbox.services.SandboxFixture
import com.daml.platform.services.time.TimeProviderType
import org.scalatest.concurrent.{AsyncTimeLimitedTests, ScalaFutures}
import org.scalatest.time.Span
import org.scalatest.time.SpanSugar.convertIntToGrainOfTime
import org.scalatest.{AsyncWordSpec, Matchers}
import scalaz.syntax.tag.ToTagOps

final class WallClockTimeIT
    extends AsyncWordSpec
    with SandboxFixture
    with SuiteResourceManagementAroundAll
    with AsyncTimeLimitedTests
    with ScalaFutures
    with Matchers {

  override val timeLimit: Span = 15.seconds

  override protected def config: SandboxConfig = super.config.copy(
    timeProviderType = Some(TimeProviderType.WallClock),
  )

  private val unimplemented: PartialFunction[Any, Unit] = {
    case GrpcException.UNIMPLEMENTED() => ()
  }

  "Time Service" when {
    "server is not in static mode" should {
      "not have getTime available" in {
        ClientAdapter
          .serverStreaming(GetTimeRequest(ledgerId().unwrap), TimeServiceGrpc.stub(channel).getTime)
          .take(1)
          .runWith(Sink.head)
          .failed
          .map(_ should matchPattern(unimplemented))
      }

      "not have setTime available" in {
        TimeServiceGrpc
          .stub(channel)
          .setTime(
            SetTimeRequest(
              ledgerId().unwrap,
              Some(fromInstant(Instant.EPOCH)),
              Some(fromInstant(Instant.EPOCH.plusSeconds(1)))
            ))
          .failed
          .map(_ should matchPattern(unimplemented))
      }
    }
  }
} 
Example 161
Source File: GroupContiguousSpec.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.store.dao.events

import akka.stream.scaladsl.{Sink, Source}
import com.daml.ledger.api.testing.utils.AkkaBeforeAndAfterAll
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.prop.PropertyChecks
import org.scalatest.{AsyncFlatSpec, Matchers}

final class GroupContiguousSpec
    extends AsyncFlatSpec
    with Matchers
    with PropertyChecks
    with ScalaFutures
    with AkkaBeforeAndAfterAll {

  behavior of "groupContiguous"

  override def spanScaleFactor: Double = 10 // Give some extra slack on CI

  it should "be equivalent to grouping on inputs with an ordered key" in forAll {
    pairs: List[(Int, String)] =>
      val sortedPairs = pairs.sortBy(_._1)
      val grouped = groupContiguous(Source(sortedPairs))(by = _._1)
      whenReady(grouped.runWith(Sink.seq[Vector[(Int, String)]])) {
        _ should contain theSameElementsAs pairs.groupBy(_._1).values
      }
  }

  it should "be equivalent to grouping on inputs with a contiguous key" in {
    val pairsWithContiguousKeys = List(1 -> "baz", 0 -> "foo", 0 -> "bar", 0 -> "quux")
    val grouped = groupContiguous(Source(pairsWithContiguousKeys))(by = _._1)
    whenReady(grouped.runWith(Sink.seq[Vector[(Int, String)]])) {
      _.map(_.toSet) should contain theSameElementsAs pairsWithContiguousKeys
        .groupBy(_._1)
        .map(_._2.toSet)
    }
  }

  it should "behave as expected when grouping inputs without a contiguous key" in {
    val pairs = List(0 -> "foo", 0 -> "bar", 1 -> "baz", 0 -> "quux")
    val grouped = groupContiguous(Source(pairs))(by = _._1)
    whenReady(grouped.runWith(Sink.seq[Vector[(Int, String)]])) {
      _.map(_.toSet) should contain theSameElementsAs Vector(
        Set(0 -> "foo", 0 -> "bar"),
        Set(1 -> "baz"),
        Set(0 -> "quux"),
      )
    }
  }

} 
Example 162
Source File: DropRepeatedSpec.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.server.api

import akka.actor.ActorSystem
import akka.pattern.pipe
import akka.stream.Materializer
import akka.stream.scaladsl.{Sink, Source}
import akka.testkit.{TestKit, TestProbe}
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike}

import scala.collection.immutable
import scala.concurrent.ExecutionContext

final class DropRepeatedSpec
    extends TestKit(ActorSystem(classOf[DropRepeatedSpec].getSimpleName))
    with WordSpecLike
    with Matchers
    with BeforeAndAfterAll {

  private[this] implicit val materializer: Materializer = Materializer(system)
  private[this] implicit val executionContext: ExecutionContext = materializer.executionContext

  override def afterAll: Unit = {
    TestKit.shutdownActorSystem(system)
  }

  "DropRepeated" should {
    "drop repeated elements" in {
      val probe = TestProbe()
      val input = immutable.Seq(1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5)

      val _ = Source(input)
        .via(DropRepeated())
        .runWith(Sink.seq)
        .pipeTo(probe.ref)
        .failed
        .foreach(fail(_))

      probe.expectMsg(Vector(1, 2, 3, 4, 5))
    }

    "does not drop duplicate elements that are not repeated" in {
      val probe = TestProbe()
      val input = immutable.Seq(1, 1, 2, 2, 1, 1, 2, 2)

      val _ = Source(input)
        .via(DropRepeated())
        .runWith(Sink.seq)
        .pipeTo(probe.ref)
        .failed
        .foreach(fail(_))

      probe.expectMsg(Vector(1, 2, 1, 2))
    }
  }
} 
Example 163
Source File: DispatcherTest.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.akkastreams.dispatcher

import java.util.concurrent.atomic.AtomicReference

import akka.NotUsed
import akka.stream.scaladsl.{Keep, Sink}
import com.daml.ledger.api.testing.utils.AkkaBeforeAndAfterAll
import com.daml.platform.akkastreams.dispatcher.SubSource.OneAfterAnother
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Milliseconds, Seconds, Span}
import org.scalatest.{Matchers, WordSpec}

import scala.concurrent.{ExecutionContextExecutor, Future}

//TODO: merge/review the tests we have around the Dispatcher!
class DispatcherTest extends WordSpec with AkkaBeforeAndAfterAll with Matchers with ScalaFutures {

  override implicit def patienceConfig: PatienceConfig =
    PatienceConfig(scaled(Span(10, Seconds)), scaled(Span(250, Milliseconds)))

  "A Dispatcher" should {
    "not race when creating new subscriptions" in {
      // The test setup here is a little different from the above tests,
      // because we wanted to be specific about emitted pairs and use of Thread.sleep.

      implicit val ec: ExecutionContextExecutor = materializer.executionContext

      val elements = new AtomicReference(Map.empty[Int, Int])
      def readElement(i: Int): Future[Int] = Future {
        Thread.sleep(10) // In a previous version of Dispatcher, this sleep caused a race condition.
        elements.get()(i)
      }
      def readSuccessor(i: Int): Int = i + 1

      // compromise between catching flakes and not taking too long
      0 until 25 foreach { _ =>
        val d = Dispatcher("test", 0, 0)

        // Verify that the results are what we expected
        val subscriptions = 1 until 10 map { i =>
          elements.updateAndGet(m => m + (i -> i))
          d.signalNewHead(i)
          d.startingAt(i - 1, OneAfterAnother(readSuccessor, readElement))
            .toMat(Sink.collection)(Keep.right[NotUsed, Future[Seq[(Int, Int)]]])
            .run()
        }

        d.close()

        subscriptions.zip(1 until 10) foreach {
          case (f, i) =>
            whenReady(f) { vals =>
              vals.map(_._1) should contain theSameElementsAs (i to 9)
              vals.map(_._2) should contain theSameElementsAs (i until 10)
            }
        }
      }
    }
  }
} 
Example 164
Source File: SignalDispatcherTest.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.akkastreams.dispatcher

import java.lang

import akka.stream.scaladsl.Sink
import akka.stream.testkit.scaladsl.TestSink
import com.daml.ledger.api.testing.utils.AkkaBeforeAndAfterAll
import org.awaitility.Awaitility.await
import org.awaitility.Duration
import org.scalatest.concurrent.{AsyncTimeLimitedTests, ScaledTimeSpans}
import org.scalatest.time.Span
import org.scalatest.time.SpanSugar._
import org.scalatest.{FutureOutcome, Matchers, fixture}

class SignalDispatcherTest
    extends fixture.AsyncWordSpec
    with Matchers
    with AkkaBeforeAndAfterAll
    with ScaledTimeSpans
    with AsyncTimeLimitedTests {

  "SignalDispatcher" should {

    "send a signal on subscription if requested" in { sut =>
      sut.subscribe(true).runWith(Sink.head).map(_ => succeed)
    }

    "not send a signal on subscription if not requested" in { sut =>
      val s = sut.subscribe(false).runWith(TestSink.probe[SignalDispatcher.Signal])
      s.request(1L)
      s.expectNoMessage(1.second)
      succeed
    }

    "output a signal when it arrives" in { sut =>
      val result = sut.subscribe(false).runWith(Sink.head).map(_ => succeed)
      sut.signal()
      result
    }

    "output multiple signals when they arrive" in { sut =>
      val count = 10
      val result = sut.subscribe(false).take(count.toLong).runWith(Sink.seq).map(_ => succeed)
      1.to(count).foreach(_ => sut.signal)
      result
    }

    "remove queues from its state when the stream terminates behind them" in { sut =>
      val s = sut.subscribe(true).runWith(TestSink.probe[SignalDispatcher.Signal])
      s.request(1L)
      s.expectNext(SignalDispatcher.Signal)
      sut.getRunningState should have size 1L
      s.cancel()
      await("Cancellation handling")
        .atMost(Duration.TEN_SECONDS)
        .until(() => new lang.Boolean(sut.getRunningState.isEmpty))
      sut.getRunningState shouldBe empty
    }

    "remove queues from its state when closed" in { sut =>
      val s = sut.subscribe(true).runWith(TestSink.probe[SignalDispatcher.Signal])
      s.request(1L)
      s.expectNext(SignalDispatcher.Signal)
      sut.getRunningState should have size 1L
      sut.close()
      assertThrows[IllegalStateException](sut.getRunningState)
      assertThrows[IllegalStateException](sut.signal())
      s.expectComplete()
      succeed
    }
  }
  override def withFixture(test: OneArgAsyncTest): FutureOutcome =
    test.apply(SignalDispatcher())
  override type FixtureParam = SignalDispatcher
  override def timeLimit: Span = scaled(10.seconds)
} 
Example 165
Source File: BatchingQueue.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.participant.state.kvutils.api

import java.io.Closeable
import java.util.concurrent.atomic.AtomicBoolean

import akka.stream.scaladsl.{Sink, Source, SourceQueueWithComplete}
import akka.stream.{Materializer, OverflowStrategy, QueueOfferResult}
import com.daml.ledger.participant.state.kvutils.DamlKvutils.DamlSubmissionBatch
import com.daml.ledger.participant.state.v1.SubmissionResult

import scala.concurrent.Future
import scala.concurrent.duration._

object BatchingQueue {
  type CommitBatchFunction =
    Seq[DamlSubmissionBatch.CorrelatedSubmission] => Future[Unit]
}


case class DefaultBatchingQueue(
    maxQueueSize: Int,
    maxBatchSizeBytes: Long,
    maxWaitDuration: FiniteDuration,
    maxConcurrentCommits: Int
) extends BatchingQueue {
  private val queue: Source[
    Seq[DamlSubmissionBatch.CorrelatedSubmission],
    SourceQueueWithComplete[DamlSubmissionBatch.CorrelatedSubmission]] =
    Source
      .queue(maxQueueSize, OverflowStrategy.dropNew)
      .groupedWeightedWithin(maxBatchSizeBytes, maxWaitDuration)(
        (cs: DamlSubmissionBatch.CorrelatedSubmission) => cs.getSubmission.size.toLong)

  def run(commitBatch: Seq[DamlSubmissionBatch.CorrelatedSubmission] => Future[Unit])(
      implicit materializer: Materializer): RunningBatchingQueueHandle = {
    val materializedQueue = queue
      .mapAsync(maxConcurrentCommits)(commitBatch)
      .to(Sink.ignore)
      .run()

    val queueAlive = new AtomicBoolean(true)
    materializedQueue.watchCompletion.foreach { _ =>
      queueAlive.set(false)
    }(materializer.executionContext)

    new RunningBatchingQueueHandle {
      override def alive: Boolean = queueAlive.get()

      override def offer(
          submission: DamlSubmissionBatch.CorrelatedSubmission): Future[SubmissionResult] = {
        materializedQueue
          .offer(submission)
          .map {
            case QueueOfferResult.Enqueued => SubmissionResult.Acknowledged
            case QueueOfferResult.Dropped => SubmissionResult.Overloaded
            case f: QueueOfferResult.Failure => SubmissionResult.InternalError(f.toString)
            case QueueOfferResult.QueueClosed =>
              SubmissionResult.InternalError("DefaultBatchingQueue.queue is closed")
          }(materializer.executionContext)
      }

      override def close(): Unit = {
        materializedQueue.complete()
      }
    }
  }
} 
Example 166
Source File: Consume.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.http

import akka.stream.scaladsl.Sink
import org.scalactic.source
import scalaz.{~>, Functor}

import scala.concurrent.{ExecutionContext, Future}


  def interpret[T, V](steps: FCC[T, V])(implicit ec: ExecutionContext): Sink[T, Future[V]] =
    Sink
      .foldAsync(steps) { (steps, t: T) =>
        // step through steps until performing exactly one listen,
        // then step through any further steps until encountering
        // either the end or the next listen
        def go(steps: FCC[T, V], listened: Boolean): Future[FCC[T, V]] =
          steps.resume fold ({
            case listen @ Listen(f, _) =>
              if (listened) Future successful (Free roll listen) else go(f(t), true)
            case drain: Drain[s, T, FCC[T, V]] =>
              Future successful Free.roll {
                if (listened) drain
                else drain.copy(init = drain.next(drain.init, t))
              }
            case Emit(run) => run flatMap (go(_, listened))
          }, v =>
            if (listened) Future successful (Free point v)
            else
              Future.failed(new IllegalStateException(
                s"unexpected element $t, script already terminated with $v")))
        go(steps, false)
      }
      .mapMaterializedValue(_.flatMap(_.foldMap(Lambda[Consume[T, ?] ~> Future] {
        case Listen(_, desc) =>
          Future.failed(new IllegalStateException(
            s"${describe(desc)}: script terminated early, expected another value"))
        case Drain(init, _, out) => Future(out(init))
        case Emit(run) => run
      })))

  implicit def `consume functor`[T](implicit ec: ExecutionContext): Functor[Consume[T, ?]] =
    new Functor[Consume[T, ?]] {
      override def map[A, B](fa: Consume[T, A])(f: A => B): Consume[T, B] = fa match {
        case Listen(g, desc) => Listen(g andThen f, desc)
        case Drain(init, next, out) => Drain(init, next, out andThen f)
        case Emit(run) => Emit(run map f)
      }
    }

  private def describe(d: Description) = s"${d.fileName}:${d.lineNumber}"

  implicit final class `Consume Ops`[T, V](private val steps: FCC[T, V]) extends AnyVal {
    def withFilter(p: V => Boolean)(implicit pos: source.Position): Free[Consume[T, ?], V] =
      steps flatMap { v =>
        if (p(v)) Free point v
        else
          Free liftF Emit(
            Future failed new IllegalStateException(
              s"${describe(pos)}: script cancelled by match error on $v"))
      }
  }

  def syntax[T]: Syntax[T] = new Syntax

  final class Syntax[T] {
    def readOne(implicit pos: source.Position): FCC[T, T] = Free liftF Listen(identity, pos)
    def drain: FCC[T, Seq[T]] =
      Free liftF Drain(Nil, (acc: List[T], t) => t :: acc, (_: List[T]).reverse)
    def liftF[V](run: Future[V]): FCC[T, V] = Free liftF Emit(run)
    def point[V](v: V): FCC[T, V] = Free point v
  }
} 
Example 167
Source File: ServerAdapter.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.grpc.adapter.server.akka

import akka.stream.scaladsl.Sink
import com.daml.grpc.adapter.ExecutionSequencerFactory
import com.daml.grpc.adapter.server.rs.ServerSubscriber
import io.grpc.stub.{ServerCallStreamObserver, StreamObserver}

import scala.concurrent.{Future, Promise}

object ServerAdapter {

  def toSink[Resp](streamObserver: StreamObserver[Resp])(
      implicit executionSequencerFactory: ExecutionSequencerFactory): Sink[Resp, Future[Unit]] = {
    val subscriber =
      new ServerSubscriber[Resp](
        streamObserver.asInstanceOf[ServerCallStreamObserver[Resp]],
        executionSequencerFactory.getExecutionSequencer)
    Sink
      .fromSubscriber(subscriber)
      .mapMaterializedValue(_ => {
        val promise = Promise[Unit]()
        subscriber.completionFuture.handle[Unit]((_, throwable) => {
          if (throwable == null) promise.success(()) else promise.failure(throwable)
          ()
        })
        promise.future
      })
  }

} 
Example 168
Source File: ServerStreamingBenchmark.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.grpc.adapter

import akka.Done
import akka.stream.scaladsl.Sink
import com.daml.grpc.adapter.client.akka.ClientAdapter
import com.daml.grpc.adapter.operation.AkkaServiceFixture
import com.daml.ledger.api.perf.util.AkkaStreamPerformanceTest
import com.daml.ledger.api.testing.utils.Resource
import com.daml.platform.hello.{HelloRequest, HelloServiceGrpc}
import io.grpc.ManagedChannel
import org.scalameter.api.Gen
import org.scalameter.picklers.noPickler._

import scala.concurrent.Future
import java.net.InetSocketAddress
import java.util.concurrent.TimeUnit

object ServerStreamingBenchmark extends AkkaStreamPerformanceTest {

  override type ResourceType = () => ManagedChannel

  @transient override protected lazy val resource: Resource[() => ManagedChannel] =
    AkkaServiceFixture.getResource(Some(new InetSocketAddress(0))).map(_._2.channel)

  private val sizes = for {
    totalElements <- Gen.range("numResponses")(50000, 100000, 50000)
    clients <- Gen.enumeration("numClients")(1, 10)
    callsPerClient <- Gen.enumeration("numCals")(1, 10)
  } yield (totalElements, clients, callsPerClient)

  performance of "Akka-Stream server" config (daConfig: _*) in {
    measure method "server streaming" in {
      using(sizes).withLifecycleManagement() in {
        case (totalElements, clients, callsPerClient) =>
          val eventualDones = for {
            (channel, schedulerPool) <- 1
              .to(clients)
              .map(i => resource.value() -> new AkkaExecutionSequencerPool(s"client-$i")(system))
            _ <- 1.to(callsPerClient)
          } yield {
            serverStreamingCall(totalElements / clients / callsPerClient, channel)(schedulerPool)
              .map(_ => channel -> schedulerPool)
          }
          val eventualTuples = Future.sequence(eventualDones)
          await(eventualTuples).foreach {
            case (channel, pool) =>
              channel.shutdown()
              channel.awaitTermination(5, TimeUnit.SECONDS)
              pool.close()
          }

      }
    }
  }

  private def serverStreamingCall(streamedElements: Int, managedChannel: ManagedChannel)(
      implicit
      executionSequencerFactory: ExecutionSequencerFactory): Future[Done] = {
    ClientAdapter
      .serverStreaming(
        HelloRequest(streamedElements),
        HelloServiceGrpc.stub(managedChannel).serverStreaming)
      .runWith(Sink.ignore)(materializer)
  }
} 
Example 169
Source File: AkkaClientCompatibilityCheck.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.grpc.adapter.operation

import akka.actor.ActorSystem
import akka.stream.scaladsl.Sink
import akka.stream.{Materializer, ThrottleMode}
import com.daml.grpc.adapter.ExecutionSequencerFactory
import com.daml.grpc.adapter.client.ResultAssertions
import com.daml.grpc.adapter.client.akka.ClientAdapter
import com.daml.platform.hello.HelloRequest
import com.daml.platform.hello.HelloServiceGrpc.HelloServiceStub
import io.grpc.{ClientCall, MethodDescriptor}
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{Matchers, WordSpec}

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._

trait AkkaClientCompatibilityCheck {
  self: WordSpec with Matchers with ScalaFutures with ResultAssertions =>

  implicit protected def system: ActorSystem

  implicit protected def materializer: Materializer

  implicit protected def esf: ExecutionSequencerFactory

  def akkaClientCompatible(helloStub: => HelloServiceStub): Unit = {

    def getCall[Req, Resp](call: MethodDescriptor[Req, Resp]): ClientCall[Req, Resp] =
      helloStub.getChannel.newCall(call, helloStub.getCallOptions)

    "respond with the correct number of elements and correct content in 1-* setup" in {
      val elemsF = ClientAdapter
        .serverStreaming(HelloRequest(elemCount), helloStub.serverStreaming)
        .runWith(Sink.seq)

      whenReady(elemsF)(assertElementsAreInOrder(elemCount.toLong))
    }

    "tolerate rematerialization of the same response source in 1-* setup" in {
      val source = ClientAdapter
        .serverStreaming(HelloRequest(elemCount), helloStub.serverStreaming)
      val elemsF1 = source.runWith(Sink.seq)
      val elemsF2 = source.runWith(Sink.seq)

      whenReady(for {
        elems1 <- elemsF1
        elems2 <- elemsF2
      } yield elems1 -> elems2)({
        case (elems1, elems2) =>
          val check = assertElementsAreInOrder(elemCount.toLong) _
          check(elems1)
          check(elems2)
      })
    }

    "respond with the correct number of elements and correct content in 1-* setup when back-pressured" in {
      val elemsF = ClientAdapter
        .serverStreaming(HelloRequest(elemCount), helloStub.serverStreaming)
        .throttle(100, 1.second, 16, ThrottleMode.shaping)
        .runWith(Sink.seq)

      whenReady(elemsF)(assertElementsAreInOrder(elemCount.toLong))
    }

    "handle cancellation in 1-* setup" in {
      val elemsF = ClientAdapter
        .serverStreaming(HelloRequest(elemCount), helloStub.serverStreaming)
        .take(halfCount.toLong)
        .runWith(Sink.seq)

      whenReady(elemsF)(assertElementsAreInOrder(halfCount.toLong))
    }

  }
} 
Example 170
Source File: AkkaClientWithReferenceServiceSpecBase.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.grpc.adapter.operation

import akka.stream.scaladsl.Sink
import com.daml.grpc.adapter.client.ReferenceClientCompatibilityCheck
import com.daml.grpc.adapter.client.akka.ClientAdapter
import com.daml.grpc.adapter.{ExecutionSequencerFactory, TestExecutionSequencerFactory}
import com.daml.ledger.api.testing.utils.AkkaBeforeAndAfterAll
import com.daml.platform.hello.HelloRequest
import io.grpc.StatusRuntimeException
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpec}

import java.net.SocketAddress

abstract class AkkaClientWithReferenceServiceSpecBase(
    override protected val socketAddress: Option[SocketAddress])
    extends WordSpec
    with Matchers
    with BeforeAndAfterAll
    with AkkaBeforeAndAfterAll
    with ScalaFutures
    with ReferenceClientCompatibilityCheck
    with AkkaClientCompatibilityCheck
    with ReferenceServiceFixture {

  protected implicit val esf: ExecutionSequencerFactory = TestExecutionSequencerFactory.instance

  "Akka client" when {

    "testing with reference service" should {
      behave like akkaClientCompatible(clientStub)
    }

    "handle request errors when server streaming" in {
      val elemsF = ClientAdapter
        .serverStreaming(HelloRequest(-1), clientStub.serverStreaming)
        .runWith(Sink.ignore)

      whenReady(elemsF.failed)(_ shouldBe a[StatusRuntimeException])
    }

  }
} 
Example 171
Source File: AkkaTest.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.client.testing

import java.util
import java.util.concurrent.{Executors, ScheduledExecutorService}

import akka.NotUsed
import akka.actor.{ActorSystem, Scheduler}
import akka.stream.scaladsl.{Sink, Source}
import akka.stream.Materializer
import akka.util.ByteString
import com.daml.grpc.adapter.{ExecutionSequencerFactory, SingleThreadExecutionSequencerPool}
import com.typesafe.config.{Config, ConfigFactory, ConfigValueFactory}
import com.typesafe.scalalogging.LazyLogging
import org.scalatest.{BeforeAndAfterAll, Suite}

import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContextExecutor, Future}
import scala.util.control.NonFatal

trait AkkaTest extends BeforeAndAfterAll with LazyLogging { self: Suite =>
  // TestEventListener is needed for log testing
  private val loggers =
    util.Arrays.asList("akka.event.slf4j.Slf4jLogger", "akka.testkit.TestEventListener")
  protected implicit val sysConfig: Config = ConfigFactory
    .load()
    .withValue("akka.loggers", ConfigValueFactory.fromIterable(loggers))
    .withValue("akka.logger-startup-timeout", ConfigValueFactory.fromAnyRef("30s"))
    .withValue("akka.stdout-loglevel", ConfigValueFactory.fromAnyRef("INFO"))
  protected implicit val system: ActorSystem = ActorSystem("test", sysConfig)
  protected implicit val ec: ExecutionContextExecutor =
    system.dispatchers.lookup("test-dispatcher")
  protected implicit val scheduler: Scheduler = system.scheduler
  protected implicit val schedulerService: ScheduledExecutorService =
    Executors.newSingleThreadScheduledExecutor()
  protected implicit val materializer: Materializer = Materializer(system)
  protected implicit val esf: ExecutionSequencerFactory =
    new SingleThreadExecutionSequencerPool("testSequencerPool")
  protected val timeout: FiniteDuration = 2.minutes
  protected val shortTimeout: FiniteDuration = 5.seconds

  protected def await[T](fun: => Future[T]): T = Await.result(fun, timeout)

  protected def awaitShort[T](fun: => Future[T]): T = Await.result(fun, shortTimeout)

  protected def drain(source: Source[ByteString, NotUsed]): ByteString = {
    val futureResult: Future[ByteString] = source.runFold(ByteString.empty) { (a, b) =>
      a.concat(b)
    }
    awaitShort(futureResult)
  }

  protected def drain[A, B](source: Source[A, B]): Seq[A] = {
    val futureResult: Future[Seq[A]] = source.runWith(Sink.seq)
    awaitShort(futureResult)
  }

  override protected def afterAll(): Unit = {
    try {
      val _ = await(system.terminate())
    } catch {
      case NonFatal(_) => ()
    }
    schedulerService.shutdownNow()
    super.afterAll()
  }
} 
Example 172
Source File: ClientUtil.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
package com.daml.quickstart.iou

import java.util.UUID

import akka.stream.Materializer
import akka.stream.scaladsl.{Sink, Source}
import akka.{Done, NotUsed}
import com.daml.ledger.api.domain.LedgerId
import com.daml.ledger.api.refinements.ApiTypes.{ApplicationId, WorkflowId}
import com.daml.ledger.api.v1.command_submission_service.SubmitRequest
import com.daml.ledger.api.v1.commands.{Command, Commands}
import com.daml.ledger.api.v1.ledger_offset.LedgerOffset
import com.daml.ledger.api.v1.transaction.Transaction
import com.daml.ledger.api.v1.transaction_filter.{Filters, TransactionFilter}
import com.daml.ledger.client.LedgerClient
import com.daml.quickstart.iou.FutureUtil.toFuture
import com.google.protobuf.empty.Empty

import scala.concurrent.{ExecutionContext, Future}

class ClientUtil(
    client: LedgerClient,
    applicationId: ApplicationId,
) {

  import ClientUtil._

  private val ledgerId = client.ledgerId
  private val packageClient = client.packageClient
  private val commandClient = client.commandClient
  private val transactionClient = client.transactionClient

  def listPackages(implicit ec: ExecutionContext): Future[Set[String]] =
    packageClient.listPackages().map(_.packageIds.toSet)

  def ledgerEnd(implicit ec: ExecutionContext): Future[LedgerOffset] =
    transactionClient.getLedgerEnd().flatMap(response => toFuture(response.offset))

  def submitCommand(party: String, workflowId: WorkflowId, cmd: Command.Command): Future[Empty] = {
    val commands = Commands(
      ledgerId = LedgerId.unwrap(ledgerId),
      workflowId = WorkflowId.unwrap(workflowId),
      applicationId = ApplicationId.unwrap(applicationId),
      commandId = uniqueId,
      party = party,
      commands = Seq(Command(cmd)),
    )

    commandClient.submitSingleCommand(SubmitRequest(Some(commands), None))
  }

  def nextTransaction(party: String, offset: LedgerOffset)(
      implicit mat: Materializer): Future[Transaction] =
    transactionClient
      .getTransactions(offset, None, transactionFilter(party))
      .take(1L)
      .runWith(Sink.head)

  def subscribe(party: String, offset: LedgerOffset, max: Option[Long])(f: Transaction => Unit)(
      implicit mat: Materializer): Future[Done] = {
    val source: Source[Transaction, NotUsed] =
      transactionClient.getTransactions(offset, None, transactionFilter(party))
    max.fold(source)(n => source.take(n)) runForeach f
  }

  override lazy val toString: String = s"ClientUtil{ledgerId=$ledgerId}"
}

object ClientUtil {
  def transactionFilter(parties: String*): TransactionFilter =
    TransactionFilter(parties.map((_, Filters.defaultInstance)).toMap)

  def uniqueId: String = UUID.randomUUID.toString

  def workflowIdFromParty(p: String): WorkflowId =
    WorkflowId(s"$p Workflow")
} 
Example 173
Source File: ClientUtil.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
package com.daml.quickstart.iou

import java.util.UUID

import akka.stream.Materializer
import akka.stream.scaladsl.{Sink, Source}
import akka.{Done, NotUsed}
import com.daml.ledger.api.refinements.ApiTypes.{ApplicationId, WorkflowId}
import com.daml.ledger.api.v1.command_submission_service.SubmitRequest
import com.daml.ledger.api.v1.commands.Commands
import com.daml.ledger.api.v1.ledger_offset.LedgerOffset
import com.daml.ledger.api.v1.transaction.Transaction
import com.daml.ledger.api.v1.transaction_filter.{Filters, TransactionFilter}
import com.daml.ledger.client.LedgerClient
import com.daml.ledger.client.binding.{Primitive => P}
import com.daml.quickstart.iou.FutureUtil.toFuture
import com.google.protobuf.empty.Empty

import scalaz.syntax.tag._

import scala.concurrent.{ExecutionContext, Future}

class ClientUtil(
    client: LedgerClient,
    applicationId: ApplicationId,
) {

  import ClientUtil._

  private val ledgerId = client.ledgerId
  private val commandClient = client.commandClient
  private val transactionClient = client.transactionClient

  def ledgerEnd(implicit ec: ExecutionContext): Future[LedgerOffset] =
    transactionClient.getLedgerEnd().flatMap(response => toFuture(response.offset))

  def submitCommand[T](
      sender: P.Party,
      workflowId: WorkflowId,
      command: P.Update[P.ContractId[T]]): Future[Empty] = {
    commandClient.submitSingleCommand(submitRequest(sender, workflowId, command))
  }

  def submitRequest[T](
      party: P.Party,
      workflowId: WorkflowId,
      seq: P.Update[P.ContractId[T]]*): SubmitRequest = {
    val commands = Commands(
      ledgerId = ledgerId.unwrap,
      workflowId = WorkflowId.unwrap(workflowId),
      applicationId = ApplicationId.unwrap(applicationId),
      commandId = uniqueId,
      party = P.Party.unwrap(party),
      commands = seq.map(_.command)
    )
    SubmitRequest(Some(commands), None)
  }

  def nextTransaction(party: P.Party, offset: LedgerOffset)(
      implicit mat: Materializer): Future[Transaction] =
    transactionClient
      .getTransactions(offset, None, transactionFilter(party))
      .take(1L)
      .runWith(Sink.head)

  def subscribe(party: P.Party, offset: LedgerOffset, max: Option[Long])(f: Transaction => Unit)(
      implicit mat: Materializer): Future[Done] = {
    val source: Source[Transaction, NotUsed] =
      transactionClient.getTransactions(offset, None, transactionFilter(party))
    max.fold(source)(n => source.take(n)) runForeach f
  }

  override lazy val toString: String = s"ClientUtil{ledgerId=$ledgerId}"
}

object ClientUtil {
  def transactionFilter(ps: P.Party*): TransactionFilter =
    TransactionFilter(P.Party.unsubst(ps).map((_, Filters.defaultInstance)).toMap)

  def uniqueId: String = UUID.randomUUID.toString

  def workflowIdFromParty(p: P.Party): WorkflowId =
    WorkflowId(s"${P.Party.unwrap(p): String} Workflow")
} 
Example 174
Source File: AkkaResourceOwnerSpec.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.resources.akka

import akka.actor.{Actor, ActorSystem, Props}
import akka.stream.Materializer
import akka.stream.scaladsl.{Keep, Sink, Source}
import akka.{Done, NotUsed}
import com.daml.resources.ResourceOwner
import org.scalatest.{AsyncWordSpec, Matchers}

import scala.concurrent.{Future, Promise}

class AkkaResourceOwnerSpec extends AsyncWordSpec with Matchers {
  "a function returning an ActorSystem" should {
    "convert to a ResourceOwner" in {
      val testPromise = Promise[Int]()
      class TestActor extends Actor {
        @SuppressWarnings(Array("org.wartremover.warts.Any"))
        override def receive: Receive = {
          case value: Int => testPromise.success(value)
          case value => testPromise.failure(new IllegalArgumentException(s"$value"))
        }
      }

      val resource = for {
        actorSystem <- AkkaResourceOwner
          .forActorSystem(() => ActorSystem("TestActorSystem"))
          .acquire()
        actor <- ResourceOwner
          .successful(actorSystem.actorOf(Props(new TestActor)))
          .acquire()
      } yield (actorSystem, actor)

      for {
        resourceFuture <- resource.asFuture
        (actorSystem, actor) = resourceFuture
        _ = actor ! 7
        result <- testPromise.future
        _ <- resource.release()
      } yield {
        result should be(7)
        an[IllegalStateException] should be thrownBy actorSystem.actorOf(Props(new TestActor))
      }
    }
  }

  "a function returning a Materializer" should {
    "convert to a ResourceOwner" in {
      val resource = for {
        actorSystem <- AkkaResourceOwner
          .forActorSystem(() => ActorSystem("TestActorSystem"))
          .acquire()
        materializer <- AkkaResourceOwner.forMaterializer(() => Materializer(actorSystem)).acquire()
      } yield materializer

      for {
        materializer <- resource.asFuture
        numbers <- Source(1 to 10)
          .toMat(Sink.seq)(Keep.right[NotUsed, Future[Seq[Int]]])
          .run()(materializer)
        _ <- resource.release()
      } yield {
        numbers should be(1 to 10)
        an[IllegalStateException] should be thrownBy Source
          .single(0)
          .toMat(Sink.ignore)(Keep.right[NotUsed, Future[Done]])
          .run()(materializer)
      }
    }
  }
} 
Example 175
Source File: GroupedAverage.scala    From streams-tests   with Apache License 2.0 5 votes vote down vote up
package com.softwaremill.streams

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Sink, Source}
import com.softwaremill.streams.util.Timed._

import scala.concurrent.Await
import scala.concurrent.duration._
import scalaz.stream.{Process, Process0}

trait GroupedAverage {
  def run(input: () => Iterator[Int]): Option[Double]
}

object AkkaStreamsGroupedAverage extends GroupedAverage {
  private lazy implicit val system = ActorSystem()

  def run(input: () => Iterator[Int]): Option[Double] = {
    implicit val mat = ActorMaterializer()

    val r = Source.fromIterator(input)
      .mapConcat(n => List(n, n+1))
      .filter(_ % 17 != 0)
      .grouped(10)
      .map(group => group.sum / group.size.toDouble)
      .runWith(Sink.fold[Option[Double], Double](None)((_, el) => Some(el)))

    Await.result(r, 1.hour)
  }

  def shutdown() = {
    system.terminate()
  }
}

object ScalazStreamsGroupedAverage extends GroupedAverage {
  def run(input: () => Iterator[Int]): Option[Double] = {
    processFromIterator(input)
      .flatMap(n => Process(n, n+1))
      .filter(_ % 17 != 0)
      .chunk(10)
      .map(group => group.sum / group.size.toDouble)
      .toSource.runLast.run
  }

  private def processFromIterator[T](input: () => Iterator[T]): Process0[T] = Process.suspend {
    val iterator = input()
    def go(): Process0[T] = {
      if (iterator.hasNext) {
        Process.emit(iterator.next()) ++ go()
      } else Process.halt
    }
    go()
  }
}

object GroupedAverageRunner extends App {
  val impls = List(AkkaStreamsGroupedAverage, ScalazStreamsGroupedAverage)
  val ranges = List(1000, 100000, 1000000, 10000000)

  val tests = for {
    impl <- impls
    range <- ranges
  } yield (
      s"${if (impl == ScalazStreamsGroupedAverage) "scalaz" else "akka"}, 1->$range",
      () => impl.run(() => Iterator.range(1, range+1)).toString)

  runTests(tests, 3)

  AkkaStreamsGroupedAverage.shutdown()
} 
Example 176
Source File: PersonTest.scala    From akka-serialization-test   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend.domain

import akka.actor.{ ActorRef, Props }
import akka.pattern.ask
import akka.persistence.query.EventEnvelope
import akka.stream.scaladsl.{ Sink, Source }
import akka.testkit.TestProbe
import com.github.dnvriend.TestSpec
import com.github.dnvriend.domain.Person._
import com.github.dnvriend.persistence.ProtobufReader
import proto.person.Command._

class PersonTest extends TestSpec {

  import com.github.dnvriend.persistence.ProtobufFormats._

  def withPerson(id: String)(f: ActorRef ⇒ TestProbe ⇒ Unit): Unit = {
    val tp = TestProbe()
    val ref = system.actorOf(Props(new Person(id)))
    try f(ref)(tp) finally killActors(ref)
  }

  "Person" should "register a name" in {
    withPerson("p1") { ref ⇒ tp ⇒
      Source(List(RegisterNameCommand("dennis", "vriend")))
        .mapAsync(1)(ref ? _).runWith(Sink.ignore).futureValue
    }

    withPerson("p1") { ref ⇒ tp ⇒
      Source(List(RegisterNameCommand("dennis", "vriend")))
        .mapAsync(1)(ref ? _).runWith(Sink.ignore).futureValue
    }

    // note that the persistence-query does not use the deserializer
    // so the protobuf must be deserialized inline
    eventsForPersistenceIdSource("p1").collect {
      case EventEnvelope(_, _, _, proto: NameRegisteredMessage) ⇒
        implicitly[ProtobufReader[NameRegisteredEvent]].read(proto)
    }.testProbe { tp ⇒
      tp.request(Int.MaxValue)
      tp.expectNext(NameRegisteredEvent("dennis", "vriend"))
      tp.expectNext(NameRegisteredEvent("dennis", "vriend"))
      tp.expectComplete()
    }
  }

  it should "update its name and surname" in {
    withPerson("p2") { ref ⇒ tp ⇒
      Source(List(RegisterNameCommand("dennis", "vriend"), ChangeNameCommand("jimi"), ChangeSurnameCommand("hendrix")))
        .mapAsync(1)(ref ? _).runWith(Sink.ignore).futureValue
    }

    eventsForPersistenceIdSource("p2").collect {
      case EventEnvelope(_, _, _, proto: NameRegisteredMessage) ⇒
        implicitly[ProtobufReader[NameRegisteredEvent]].read(proto)
      case EventEnvelope(_, _, _, proto: NameChangedMessage) ⇒
        implicitly[ProtobufReader[NameChangedEvent]].read(proto)
      case EventEnvelope(_, _, _, proto: SurnameChangedMessage) ⇒
        implicitly[ProtobufReader[SurnameChangedEvent]].read(proto)
    }.testProbe { tp ⇒
      tp.request(Int.MaxValue)
      tp.expectNext(NameRegisteredEvent("dennis", "vriend"))
      tp.expectNext(NameChangedEvent("jimi"))
      tp.expectNext(SurnameChangedEvent("hendrix"))
      tp.expectComplete()
    }
  }
} 
Example 177
Source File: AlbumTest.scala    From akka-serialization-test   with Apache License 2.0 5 votes vote down vote up
//

package com.github.dnvriend.domain

import java.time.Duration

import akka.pattern.ask
import akka.stream.scaladsl.{ Sink, Source }
import com.github.dnvriend.TestSpec
import com.github.dnvriend.domain.Music._
import com.github.dnvriend.repository.AlbumRepository

class AlbumTest extends TestSpec {

  "Album" should "register a title" in {
    val album = AlbumRepository.forId("album-1")
    val xs = List(ChangeAlbumTitle("Dark side of the Moon"))
    Source(xs).mapAsync(1)(album ? _).runWith(Sink.ignore).futureValue

    eventsForPersistenceIdSource("album-1").map(_.event).testProbe { tp ⇒
      tp.request(Int.MaxValue)
      tp.expectNextN(xs.map(cmd ⇒ TitleChanged(cmd.title)))
      tp.expectComplete()
    }
    killActors(album)
  }

  it should "update its title and year and songs" in {
    val album = AlbumRepository.forId("album-2")
    val xs = List(
      ChangeAlbumTitle("Dark side of the Moon"),
      ChangeAlbumYear(1973),
      AddSong(Song("Money", Duration.ofSeconds(390))),
      AddSong(Song("Redemption Song", Duration.ofSeconds(227))),
      RemoveSong(Song("Redemption Song", Duration.ofSeconds(227)))
    )

    val expectedEvents = xs.map {
      case ChangeAlbumTitle(title) ⇒ TitleChanged(title)
      case ChangeAlbumYear(year)   ⇒ YearChanged(year)
      case AddSong(song)           ⇒ SongAdded(song)
      case RemoveSong(song)        ⇒ SongRemoved(song)
    }

    Source(xs).mapAsync(1)(album ? _).runWith(Sink.ignore).futureValue

    eventsForPersistenceIdSource("album-2").map(_.event).testProbe { tp ⇒
      tp.request(Int.MaxValue)
      tp.expectNextN(expectedEvents)
      tp.expectComplete()
    }
  }
} 
Example 178
Source File: Client.scala    From opencensus-scala   with Apache License 2.0 5 votes vote down vote up
package io.opencensus.scala.examples.akka.http

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.HttpRequest
import akka.stream.scaladsl.{Sink, Source}
import io.opencensus.scala.akka.http.TracingClient
import org.slf4j.bridge.SLF4JBridgeHandler

import scala.concurrent.duration._
import scala.concurrent.{Await, Future}
import scala.util.{Failure, Success}

object Client extends App {
  // Forward java.util.Logging to slf4j
  SLF4JBridgeHandler.removeHandlersForRootLogger()
  SLF4JBridgeHandler.install()

  implicit val system: ActorSystem = ActorSystem()
  import system.dispatcher

  def await[T](f: Future[T]) = Await.result(f, 3.seconds)

  // Request level client
  val pipeling = Http().singleRequest(_: HttpRequest)
  val r1 = await {
    TracingClient
      .traceRequest(pipeling)(HttpRequest(uri = "http://localhost:8080"))
      .flatMap(_.entity.toStrict(1.second))
      .map(_.data.utf8String)
  }
  println(r1)

  // Host level client
  val pool     = Http().cachedHostConnectionPool[Unit]("localhost", 8080)
  val hostFlow = TracingClient.traceRequestForPool(pool)

  val r2 = await {
    Source
      .single(HttpRequest(uri = "/"))
      .map((_, ()))
      .via(hostFlow)
      .map(_._1)
      .flatMapConcat {
        case Success(response) => response.entity.dataBytes
        case Failure(e)        => throw e
      }
      .map(_.utf8String)
      .runWith(Sink.head)
  }
  println(r2)

  // Connection level client
  val connection     = Http().outgoingConnection("localhost", 8080)
  val connectionFlow = TracingClient.traceRequestForConnection(connection)

  val r3 = await {
    Source
      .single(HttpRequest(uri = "/"))
      .via(connectionFlow)
      .flatMapConcat(_.entity.dataBytes)
      .map(_.utf8String)
      .runWith(Sink.head)
  }
  println(r3)
} 
Example 179
Source File: WsConnection.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.api.ws.connection

import java.util.concurrent.ConcurrentLinkedQueue

import akka.Done
import akka.actor.{ActorRef, ActorSystem, Status}
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.ws.{BinaryMessage, Message, TextMessage, WebSocketRequest}
import akka.stream.scaladsl.{Flow, Sink, Source}
import akka.stream.{CompletionStrategy, Materializer, OverflowStrategy}
import com.wavesplatform.dex.api.ws.protocol.{WsClientMessage, WsMessage, WsPingOrPong, WsServerMessage}
import com.wavesplatform.dex.domain.utils.ScorexLogging
import play.api.libs.json.Json

import scala.collection.JavaConverters._
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util.{Failure, Success, Try}

class WsConnection(uri: String, keepAlive: Boolean = true)(implicit system: ActorSystem, materializer: Materializer) extends ScorexLogging {

  log.info(s"""Connecting to Matcher WS API:
            |         URI = $uri
            |  Keep alive = $keepAlive""".stripMargin)

  import materializer.executionContext

  private val wsHandlerRef = system.actorOf(TestWsHandlerActor props keepAlive)

  protected def stringifyClientMessage(cm: WsClientMessage): TextMessage.Strict =
    WsMessage.toStrictTextMessage(cm)(WsClientMessage.wsClientMessageWrites)

  // From test to server
  private val source: Source[TextMessage.Strict, ActorRef] = {
    val completionMatcher: PartialFunction[Any, CompletionStrategy] = { case akka.actor.Status.Success(_) => CompletionStrategy.draining }
    val failureMatcher: PartialFunction[Any, Throwable]             = { case Status.Failure(cause)        => cause }

    Source
      .actorRef[WsClientMessage](completionMatcher, failureMatcher, 10, OverflowStrategy.fail)
      .map(stringifyClientMessage)
      .mapMaterializedValue { source =>
        wsHandlerRef.tell(TestWsHandlerActor.AssignSourceRef, source)
        source
      }
  }

  private val messagesBuffer: ConcurrentLinkedQueue[WsServerMessage] = new ConcurrentLinkedQueue[WsServerMessage]()

  // From server to test
  private val sink: Sink[Message, Future[Done]] = Sink.foreach {
    case tm: TextMessage =>
      for {
        strictText <- tm.toStrict(1.second).map(_.getStrictText)
        clientMessage <- {
          log.trace(s"Got $strictText")
          Try { Json.parse(strictText).as[WsServerMessage] } match {
            case Failure(exception) => Future.failed(exception)
            case Success(x) => {
              messagesBuffer.add(x)
              if (keepAlive) x match {
                case value: WsPingOrPong => wsHandlerRef ! value
                case _                   =>
              }
              Future.successful(x)
            }
          }
        }
      } yield clientMessage

    case bm: BinaryMessage =>
      bm.dataStream.runWith(Sink.ignore)
      Future.failed { new IllegalArgumentException("Binary messages are not supported") }
  }

  private val flow: Flow[Message, TextMessage.Strict, Future[Done]] = Flow.fromSinkAndSourceCoupled(sink, source).watchTermination() {
    case (_, f) =>
      f.onComplete {
        case Success(_) => log.info(s"WebSocket connection to $uri successfully closed")
        case Failure(e) => log.error(s"WebSocket connection to $uri closed with an error", e)
      }(materializer.executionContext)
      f
  }

  val (connectionResponse, closed) = Http().singleWebSocketRequest(WebSocketRequest(uri), flow)

  val connectionOpenedTs: Long                   = System.currentTimeMillis
  val connectionClosedTs: Future[Long]           = closed.map(_ => System.currentTimeMillis)
  val connectionLifetime: Future[FiniteDuration] = connectionClosedTs.map(cc => FiniteDuration(cc - connectionOpenedTs, MILLISECONDS))

  def messages: List[WsServerMessage] = messagesBuffer.iterator().asScala.toList
  def clearMessages(): Unit           = messagesBuffer.clear()

  def send(message: WsClientMessage): Unit = wsHandlerRef ! TestWsHandlerActor.SendToServer(message)

  def close(): Unit     = if (!isClosed) wsHandlerRef ! TestWsHandlerActor.CloseConnection
  def isClosed: Boolean = closed.isCompleted
} 
Example 180
Source File: WsConnection.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.load.ws

import akka.Done
import akka.actor.{ActorRef, ActorSystem, Status}
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.ws.{BinaryMessage, Message, TextMessage, WebSocketRequest}
import akka.stream.scaladsl.{Flow, Sink, Source}
import akka.stream.{CompletionStrategy, Materializer, OverflowStrategy}
import com.wavesplatform.dex.api.ws.connection.TestWsHandlerActor
import com.wavesplatform.dex.api.ws.protocol.{WsClientMessage, WsMessage, WsServerMessage}
import com.wavesplatform.dex.domain.utils.ScorexLogging
import play.api.libs.json.Json

import scala.concurrent.Future
import scala.concurrent.duration.DurationInt
import scala.util.{Failure, Success, Try}

class WsConnection(uri: String, receive: WsServerMessage => Option[WsClientMessage])(implicit system: ActorSystem) extends ScorexLogging {

  import system.dispatcher
  private implicit val materializer = Materializer(system)
  private val wsHandlerRef          = system.actorOf(TestWsHandlerActor.props(keepAlive = true))

  log.info(s"Connecting to Matcher WS API: $uri")

  protected def stringifyClientMessage(cm: WsClientMessage): TextMessage.Strict =
    WsMessage.toStrictTextMessage(cm)(WsClientMessage.wsClientMessageWrites)

  // To server
  private val source: Source[TextMessage.Strict, ActorRef] = {
    val completionMatcher: PartialFunction[Any, CompletionStrategy] = { case akka.actor.Status.Success(_) => CompletionStrategy.draining }
    val failureMatcher: PartialFunction[Any, Throwable]             = { case Status.Failure(cause)        => cause }

    Source
      .actorRef[WsClientMessage](completionMatcher, failureMatcher, 10, OverflowStrategy.fail)
      .map(stringifyClientMessage)
      .mapMaterializedValue { source =>
        wsHandlerRef.tell(TestWsHandlerActor.AssignSourceRef, source)
        source
      }
  }

  // To client
  private val sink: Sink[Message, Future[Done]] = Sink.foreach {
    case tm: TextMessage => // TODO move to tests
      for {
        strictText <- tm.toStrict(1.second).map(_.getStrictText)
        clientMessage <- {
          log.trace(s"Got $strictText")
          Try { Json.parse(strictText).as[WsServerMessage] } match {
            case Failure(exception) => Future.failed(exception)
            case Success(x)         => Future.successful { receive(x).foreach(wsHandlerRef ! _) }
          }
        }
      } yield clientMessage

    case bm: BinaryMessage =>
      bm.dataStream.runWith(Sink.ignore)
      Future.failed { new IllegalArgumentException("Binary messages are not supported") }
  }

  private val flow: Flow[Message, TextMessage.Strict, Future[Done]] = Flow.fromSinkAndSourceCoupled(sink, source).watchTermination() {
    case (_, f) =>
      f.onComplete {
        case Success(_) => log.info(s"WebSocket connection to $uri successfully closed")
        case Failure(e) => log.error(s"WebSocket connection to $uri closed with an error", e)
      }(materializer.executionContext)
      f
  }

  val (connectionResponse, closed) = Http().singleWebSocketRequest(WebSocketRequest(uri), flow)

  def send(message: WsClientMessage): Unit = wsHandlerRef ! TestWsHandlerActor.SendToServer(message)

  def isClosed: Boolean = closed.isCompleted
  def close(): Future[Done] = {
    if (!isClosed) wsHandlerRef ! TestWsHandlerActor.CloseConnection
    closed
  }
} 
Example 181
Source File: DynamoDBSnapshotStore.scala    From akka-persistence-dynamodb   with Apache License 2.0 5 votes vote down vote up
package com.github.j5ik2o.akka.persistence.dynamodb.snapshot

import akka.actor.ExtendedActorSystem
import akka.persistence.snapshot.SnapshotStore
import akka.persistence.{ SelectedSnapshot, SnapshotMetadata, SnapshotSelectionCriteria }
import akka.serialization.SerializationExtension
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{ Sink, Source }
import com.github.j5ik2o.akka.persistence.dynamodb.config.SnapshotPluginConfig
import com.github.j5ik2o.akka.persistence.dynamodb.model.{ PersistenceId, SequenceNumber }
import com.github.j5ik2o.akka.persistence.dynamodb.snapshot.dao.{ SnapshotDao, SnapshotDaoImpl }
import com.github.j5ik2o.akka.persistence.dynamodb.utils.V2DynamoDbClientBuilderUtils
import com.github.j5ik2o.reactive.aws.dynamodb.DynamoDbAsyncClient
import com.typesafe.config.Config
import software.amazon.awssdk.services.dynamodb.{ DynamoDbAsyncClient => JavaDynamoDbAsyncClient }

import scala.concurrent.{ ExecutionContext, Future }

object DynamoDBSnapshotStore {

  def toSelectedSnapshot(tupled: (SnapshotMetadata, Any)): SelectedSnapshot = tupled match {
    case (meta: SnapshotMetadata, snapshot: Any) => SelectedSnapshot(meta, snapshot)
  }
}

class DynamoDBSnapshotStore(config: Config) extends SnapshotStore {
  import DynamoDBSnapshotStore._

  implicit val ec: ExecutionContext        = context.dispatcher
  implicit val system: ExtendedActorSystem = context.system.asInstanceOf[ExtendedActorSystem]
  implicit val mat                         = ActorMaterializer()

  private val serialization                        = SerializationExtension(system)
  protected val pluginConfig: SnapshotPluginConfig = SnapshotPluginConfig.fromConfig(config)

  protected val javaClient: JavaDynamoDbAsyncClient =
    V2DynamoDbClientBuilderUtils.setupAsync(system.dynamicAccess, pluginConfig).build()
  protected val asyncClient: DynamoDbAsyncClient = DynamoDbAsyncClient(javaClient)

  protected val snapshotDao: SnapshotDao =
    new SnapshotDaoImpl(asyncClient, serialization, pluginConfig)

  override def loadAsync(
      persistenceId: String,
      criteria: SnapshotSelectionCriteria
  ): Future[Option[SelectedSnapshot]] = {
    val result = criteria match {
      case SnapshotSelectionCriteria(Long.MaxValue, Long.MaxValue, _, _) =>
        snapshotDao.latestSnapshot(PersistenceId(persistenceId))
      case SnapshotSelectionCriteria(Long.MaxValue, maxTimestamp, _, _) =>
        snapshotDao.snapshotForMaxTimestamp(PersistenceId(persistenceId), maxTimestamp)
      case SnapshotSelectionCriteria(maxSequenceNr, Long.MaxValue, _, _) =>
        snapshotDao.snapshotForMaxSequenceNr(PersistenceId(persistenceId), SequenceNumber(maxSequenceNr))
      case SnapshotSelectionCriteria(maxSequenceNr, maxTimestamp, _, _) =>
        snapshotDao.snapshotForMaxSequenceNrAndMaxTimestamp(
          PersistenceId(persistenceId),
          SequenceNumber(maxSequenceNr),
          maxTimestamp
        )
      case _ => Source.empty
    }
    result.map(_.map(toSelectedSnapshot)).runWith(Sink.head)
  }

  override def saveAsync(metadata: SnapshotMetadata, snapshot: Any): Future[Unit] =
    snapshotDao.save(metadata, snapshot).runWith(Sink.ignore).map(_ => ())

  override def deleteAsync(metadata: SnapshotMetadata): Future[Unit] =
    snapshotDao
      .delete(PersistenceId(metadata.persistenceId), SequenceNumber(metadata.sequenceNr)).map(_ => ()).runWith(
        Sink.ignore
      ).map(_ => ())

  override def deleteAsync(persistenceId: String, criteria: SnapshotSelectionCriteria): Future[Unit] = {
    val pid = PersistenceId(persistenceId)
    criteria match {
      case SnapshotSelectionCriteria(Long.MaxValue, Long.MaxValue, _, _) =>
        snapshotDao.deleteAllSnapshots(pid).runWith(Sink.ignore).map(_ => ())
      case SnapshotSelectionCriteria(Long.MaxValue, maxTimestamp, _, _) =>
        snapshotDao.deleteUpToMaxTimestamp(pid, maxTimestamp).runWith(Sink.ignore).map(_ => ())
      case SnapshotSelectionCriteria(maxSequenceNr, Long.MaxValue, _, _) =>
        snapshotDao
          .deleteUpToMaxSequenceNr(pid, SequenceNumber(maxSequenceNr)).runWith(Sink.ignore).map(_ => ())
      case SnapshotSelectionCriteria(maxSequenceNr, maxTimestamp, _, _) =>
        snapshotDao
          .deleteUpToMaxSequenceNrAndMaxTimestamp(pid, SequenceNumber(maxSequenceNr), maxTimestamp).runWith(
            Sink.ignore
          ).map(_ => ())
      case _ => Future.successful(())
    }
  }

} 
Example 182
Source File: FoldResourceSink.scala    From gfc-aws-s3   with Apache License 2.0 5 votes vote down vote up
package com.gilt.gfc.aws.s3.akka

import akka.stream.stage._
import akka.stream._
import akka.stream.scaladsl.Sink

import scala.concurrent.{Future, Promise}
import scala.util.Try

class FoldResourceSink[TState, TItem, Mat](
  open: () => TState,
  onEach: (TState, TItem) => (TState),
  close: TState => Mat,
  onFailure: (Throwable, TState) => Unit
) extends GraphStageWithMaterializedValue[SinkShape[TItem], Future[Mat]] {

  private val in = Inlet[TItem]("Resource.Sink")
  override val shape: Shape = SinkShape(in)

  class FoldResourceSinkLogic(materializedPromise: Promise[Mat]) extends GraphStageLogic(shape) with InHandler {
    var state: TState = _

    override def preStart(): Unit = {
      state = open()
      pull(in)
    }

    def onPush(): Unit = {
      val value = grab(in)
      try {
        state = onEach(state, value)
        pull(in)
      } catch {
        case ex: Throwable => fail(ex)
      }
    }

    override def onUpstreamFinish(): Unit = {
      val materializedValue = Try(close(state))
      materializedPromise.complete(materializedValue)
    }

    override def onUpstreamFailure(ex: Throwable): Unit = {
      fail(ex)
    }

    private def fail(ex: Throwable) = {
      onFailure(ex, state)
      materializedPromise.tryFailure(ex)
      failStage(ex)
    }

    setHandler(in, this)
  }

  def createLogicAndMaterializedValue(inheritedAttributes: Attributes): (GraphStageLogic, Future[Mat]) = {

    val completePromise = Promise[Mat]()

    val stageLogic = new FoldResourceSinkLogic(completePromise)

    stageLogic -> completePromise.future
  }
}

object FoldResourceSink {
  implicit class SinkExtension(val sink: Sink.type) extends AnyVal {

    def foldResource[TState, TItem, Mat](
      open: () => TState,
      onEach: (TState, TItem) => (TState),
      close: TState => Mat,
      onFailure: (Throwable, TState) => Unit = (ex: Throwable, f: TState) => ()
    ): FoldResourceSink[TState, TItem, Mat] = {

      new FoldResourceSink(open, onEach, close, onFailure)

    }
  }
} 
Example 183
Source File: ActorRefWithAckTest.scala    From intro-to-akka-streams   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend.streams.sink

import akka.actor.{ Actor, ActorRef, Props }
import akka.stream.scaladsl.{ Sink, Source }
import akka.stream.testkit.TestPublisher
import akka.stream.testkit.scaladsl.TestSource
import akka.testkit.TestProbe
import com.github.dnvriend.streams.TestSpec
import scala.concurrent.duration._

import scala.reflect.ClassTag

// see: https://github.com/akka/akka/blob/4acc1cca6a27be0ff80f801de3640f91343dce94/akka-stream-tests/src/test/scala/akka/stream/scaladsl/ActorRefBackpressureSinkSpec.scala
object ActorRefWithAckTest {
  final val InitMessage = "start"
  final val CompleteMessage = "done"
  final val AckMessage = "ack"

  class Forwarder(ref: ActorRef) extends Actor {
    def receive = {
      case msg @ `InitMessage` ⇒
        sender() ! AckMessage
        ref forward msg
      case msg @ `CompleteMessage` ⇒
        ref forward msg
      case msg ⇒
        sender() ! AckMessage
        ref forward msg
    }
  }
}

class ActorRefWithAckTest extends TestSpec {
  import ActorRefWithAckTest._
  def createActor[A: ClassTag](testProbeRef: ActorRef): ActorRef =
    system.actorOf(Props(implicitly[ClassTag[A]].runtimeClass, testProbeRef))

  def withForwarder(xs: Int*)(f: TestProbe ⇒ Unit): Unit = {
    val tp = TestProbe()
    val ref = createActor[Forwarder](tp.ref)
    Source(xs.toList).runWith(Sink.actorRefWithAck(ref, InitMessage, AckMessage, CompleteMessage))
    try f(tp) finally killActors(ref)
  }

  def withTestPublisher[A](f: (TestPublisher.Probe[A], TestProbe, ActorRef) ⇒ Unit): Unit = {
    val tp = TestProbe()
    val ref = createActor[Forwarder](tp.ref)
    val pub: TestPublisher.Probe[A] = TestSource.probe[A].to(Sink.actorRefWithAck(ref, InitMessage, AckMessage, CompleteMessage)).run()
    try f(pub, tp, ref) finally killActors(ref)
  }

  it should "send the elements to the ActorRef" in {
    // which means that the forwarder actor that acts as a sink
    // will initially receive an InitMessage
    // next it will receive each `payload` element, here 1, 2 and 3,
    // finally the forwarder will receive the CompletedMessage, stating that
    // the producer completes the stream because there are no more elements (a finite stream)
    withForwarder(1, 2, 3) { tp ⇒
      tp.expectMsg(InitMessage)
      tp.expectMsg(1)
      tp.expectMsg(2)
      tp.expectMsg(3)
      tp.expectMsg(CompleteMessage)
      tp.expectNoMsg(100.millis)
    }
  }

  it should "send the elements to the ActorRef manually 1, 2 and 3" in {
    withTestPublisher[Int] { (pub, tp, _) ⇒
      pub.sendNext(1)
      tp.expectMsg(InitMessage)
      tp.expectMsg(1)

      pub.sendNext(2)
      tp.expectMsg(2)

      pub.sendNext(3)
      tp.expectMsg(3)

      pub.sendComplete()
      tp.expectMsg(CompleteMessage)
      tp.expectNoMsg(100.millis)
    }
  }

  it should "cancel stream when actor terminates" in {
    withTestPublisher[Int] { (pub, tp, ref) ⇒
      pub.sendNext(1)
      tp.expectMsg(InitMessage)
      tp.expectMsg(1)
      killActors(ref)
      pub.expectCancellation()
    }
  }
} 
Example 184
Source File: ActorSubscriberTest.scala    From intro-to-akka-streams   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend.streams.sink

import akka.Done
import akka.actor.Actor.Receive
import akka.actor.{ ActorRef, Props }
import akka.event.LoggingReceive
import akka.stream.actor.ActorSubscriberMessage.{ OnComplete, OnError, OnNext }
import akka.stream.actor.{ ActorSubscriber, OneByOneRequestStrategy, RequestStrategy }
import akka.stream.scaladsl.{ Sink, Source }
import akka.stream.testkit.TestPublisher
import akka.stream.testkit.scaladsl.TestSource
import akka.testkit.TestProbe
import com.github.dnvriend.streams.TestSpec
import com.github.dnvriend.streams.sink.ActorSubscriberTest.TestActorSubscriber

import scala.concurrent.Future
import scala.reflect.ClassTag

object ActorSubscriberTest {
  final val OnNextMessage = "onNext"
  final val OnCompleteMessage = "onComplete"
  final val OnErrorMessage = "onError"

  class TestActorSubscriber(ref: ActorRef) extends ActorSubscriber {
    override protected val requestStrategy: RequestStrategy = OneByOneRequestStrategy
    override def receive: Receive = LoggingReceive {
      case OnNext(msg)    ⇒ ref ! OnNextMessage
      case OnComplete     ⇒ ref ! OnCompleteMessage
      case OnError(cause) ⇒ ref ! OnErrorMessage
    }
  }
}

//class ActorSubscriberTest extends TestSpec {
//  def withForwarder(xs: Int*)(f: TestProbe ⇒ Unit): Unit = {
//    val tp = TestProbe()
//    val ref = new TestActorSubscriber(tp.ref)
//    Source(xs.toList).to(Sink.actorSubscriber(Props())).mapMaterializedValue(_ ⇒ Future.successful[Done]).run()
//    try f(tp) finally killActors(ref)
//  }
//
//} 
Example 185
Source File: QueueSourceTest.scala    From intro-to-akka-streams   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend.streams.source

import akka.stream.OverflowStrategy
import akka.stream.scaladsl.{ Keep, Sink, Source, SourceQueueWithComplete }
import com.github.dnvriend.streams.TestSpec

import scala.collection.immutable._
import scala.concurrent.Future

class QueueSourceTest extends TestSpec {
  it should "queue a b and c and return Seq(a, b, c)" in {
    val (queue: SourceQueueWithComplete[String], xs: Future[Seq[String]]) =
      Source.queue[String](Int.MaxValue, OverflowStrategy.backpressure).toMat(Sink.seq)(Keep.both).run()

    queue.offer("a").toTry should be a 'success // offer 'a' to stream
    queue.offer("b").toTry should be a 'success // b
    queue.offer("c").toTry should be a 'success // and c

    // complete the queue
    queue.complete()
    queue.watchCompletion().toTry should be a 'success

    // get the results of the stream
    xs.futureValue shouldEqual Seq("a", "b", "c")
    xs.futureValue should not equal Seq("c", "b", "a")
  }
} 
Example 186
Source File: FailedSource.scala    From intro-to-akka-streams   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend.streams.source

import akka.Done
import akka.stream.OverflowStrategy
import akka.stream.scaladsl.{ Keep, Merge, Sink, Source, SourceQueueWithComplete }
import com.github.dnvriend.streams.TestSpec

import scala.concurrent.Future
import scala.concurrent.duration._
import scala.collection.immutable._

class FailedSource extends TestSpec {
  it should "fail the stream" in {
    Source.failed[Int](new RuntimeException("test error")).testProbe { tp ⇒
      tp.request(Long.MaxValue)
      tp.expectError()
    }
  }

  it should "complete a stream" in {
    val (queue: SourceQueueWithComplete[Int], done: Future[Done]) = Source.queue[Int](1, OverflowStrategy.dropNew)
      .toMat(Sink.ignore)(Keep.both).run
    queue.complete()
    done.toTry should be a 'success
  }

  it should "complete a stream normally" in {
    val (queue: SourceQueueWithComplete[String], done: Future[Done]) = Source.queue[String](1, OverflowStrategy.dropNew).flatMapConcat {
      case "stop" ⇒ Source.failed(new RuntimeException("test error"))
      case str    ⇒ Source.single(str)
    }.toMat(Sink.seq)(Keep.both).run

    Thread.sleep(3000)
    queue.offer("foo").futureValue
    queue.offer("bar").futureValue
    queue.complete()
    done.futureValue shouldBe List("foo", "bar")
  }

  it should "force stop a stream with an error" in {
    val (queue: SourceQueueWithComplete[String], done: Future[Done]) = Source.queue[String](1, OverflowStrategy.dropNew).flatMapConcat {
      case "stop" ⇒ Source.failed(new RuntimeException("test error"))
      case str    ⇒ Source.single(str)
    }.toMat(Sink.seq)(Keep.both).run

    Thread.sleep(3000)
    queue.offer("stop").futureValue
    done.toTry should be a 'failure
  }

} 
Example 187
Source File: StreamMaterializationTest.scala    From intro-to-akka-streams   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend.streams.streammaterialization

import akka.Done
import akka.stream.scaladsl.Sink
import com.github.dnvriend.streams.TestSpec

class StreamMaterializationTest extends TestSpec {

  

  "Stream Materialization" should "be triggered using runFold" in {
    withIterator() { src ⇒
      src.take(10)
        .runFold(0) { (c, _) ⇒ c + 1 }
        .futureValue shouldBe 10
    }
  }

  it should "be triggered using runWith" in {
    withIterator() { src ⇒
      src.take(10)
        .runForeach(_ ⇒ ())
        .futureValue shouldBe Done
    }
  }

  it should "be triggered using runWith (which takes a sink shape)" in {
    withIterator() { src ⇒
      src.take(10)
        .runWith(Sink.foreach(_ ⇒ ()))
        .futureValue shouldBe Done
    }
  }
} 
Example 188
Source File: FileIOTest.scala    From intro-to-akka-streams   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend.streams.io

import akka.stream.scaladsl.{ Sink, Source }
import com.github.dnvriend.streams.TestSpec

class FileIOTest extends TestSpec {
  trait Foo
  case class ImportStarted(fileName: String, processId: String) extends Foo
  case class ImportFinished(a: String = "") extends Foo
  case class ImportFailed(t: Throwable) extends Foo
  case class NestedType2(a: String = "") extends Foo
  case class NestedType1(b: String = "") extends Foo
  case class RootType(c: String = "") extends Foo

  case class ImportFileCommand(processId: String = "abcdefg", fileName: String = "fileName.xml")
  it should "import" in {
    // import proces

    def unmarshaller(fileName: String, processId: String) =
      Source(List(ImportStarted(fileName, processId), NestedType2(), NestedType1(), RootType(), ImportFinished()))

    Source(List.fill(1)(ImportFileCommand()))
      .flatMapConcat { cmd ⇒
        unmarshaller(cmd.fileName, cmd.processId)
          .map {
            //            case _: NestedType2 ⇒ throw new RuntimeException("error")
            case e ⇒ e
          }
      }
      .recover {
        case t: Throwable ⇒ ImportFailed(t)
      }
      .runWith(Sink.seq).futureValue should matchPattern {
        case Seq(ImportStarted("fileName.xml", "abcdefg"), NestedType2(_), ImportFailed(_))                                ⇒
        case Seq(ImportStarted("fileName.xml", "abcdefg"), NestedType2(_), NestedType1(_), RootType(_), ImportFinished(_)) ⇒
      }
  }
} 
Example 189
Source File: MessageSink.scala    From parquet4s   with MIT License 5 votes vote down vote up
package com.github.mjakubowski84.parquet4s.indefinite

import java.sql.Timestamp
import java.util.UUID

import akka.Done
import akka.kafka.CommitterSettings
import akka.kafka.ConsumerMessage.CommittableOffsetBatch
import akka.kafka.scaladsl.Committer
import akka.stream.scaladsl.{Flow, Keep, Sink}
import com.github.mjakubowski84.parquet4s.{ChunkPathBuilder, ParquetStreams, ParquetWriter}
import com.google.common.io.Files
import org.apache.hadoop.fs.Path
import org.apache.parquet.hadoop.metadata.CompressionCodecName

import scala.concurrent.Future
import scala.concurrent.duration._

object MessageSink {

  case class Data(timestamp: Timestamp, word: String)

  val MaxChunkSize: Int = 128
  val ChunkWriteTimeWindow: FiniteDuration = 10.seconds
  val WriteDirectoryName: String = "messages"

}

trait MessageSink {

  this: Akka =>

  import MessageSink._
  import MessageSource._

  protected val baseWritePath: String = new Path(Files.createTempDir().getAbsolutePath, WriteDirectoryName).toString

  private val writerOptions = ParquetWriter.Options(compressionCodecName = CompressionCodecName.SNAPPY)

  private lazy val committerSink = Flow.apply[Seq[Message]].map { messages =>
    CommittableOffsetBatch(messages.map(_.committableOffset))
  }.toMat(Committer.sink(CommitterSettings(system)))(Keep.right)

  def chunkPath: ChunkPathBuilder[Message] = {
    case (basePath, chunk) =>
      val lastElementDateTime = new Timestamp(chunk.last.record.timestamp()).toLocalDateTime
      val year = lastElementDateTime.getYear
      val month = lastElementDateTime.getMonthValue
      val day = lastElementDateTime.getDayOfMonth
      val uuid = UUID.randomUUID()

      basePath.suffix(s"/$year/$month/$day/part-$uuid.parquet")
  }

  lazy val messageSink: Sink[Message, Future[Done]] = ParquetStreams.toParquetIndefinite(
    path = baseWritePath,
    maxChunkSize = MaxChunkSize,
    chunkWriteTimeWindow = ChunkWriteTimeWindow,
    buildChunkPath = chunkPath,
    preWriteTransformation = { message: Message =>
      Data(
        timestamp = new Timestamp(message.record.timestamp()),
        word = message.record.value()
      )
    },
    postWriteSink = committerSink,
    options = writerOptions
  )

} 
Example 190
Source File: WriteAndReadFilteredAkkaApp.scala    From parquet4s   with MIT License 5 votes vote down vote up
package com.github.mjakubowski84.parquet4s.akka

import akka.actor.ActorSystem
import akka.stream.scaladsl.{Sink, Source}
import akka.stream.{ActorMaterializer, Materializer}
import com.github.mjakubowski84.parquet4s.{Col, ParquetReader, ParquetStreams}
import com.google.common.io.Files

import scala.concurrent.Future
import scala.util.Random

object WriteAndReadFilteredAkkaApp extends App {

  object Dict {
    val A = "A"
    val B = "B"
    val C = "C"
    val D = "D"

    val values: List[String] = List(A, B, C, D)
    def random: String = values(Random.nextInt(values.length))
  }

  case class Data(id: Int, dict: String)

  val count = 100
  val data = (1 to count).map { i => Data(id = i, dict = Dict.random) }
  val path = Files.createTempDir().getAbsolutePath

  implicit val system: ActorSystem = ActorSystem()
  implicit val materializer: Materializer = ActorMaterializer()
  import system.dispatcher

  val options = ParquetReader.Options()
  val printingSink = Sink.foreach(println)

  for {
    // write
    _ <- Source(data).runWith(ParquetStreams.toParquetSingleFile(s"$path/data.parquet"))
    // read filtered
    _ <- Future(println("""dict == "A""""))
    _ <- ParquetStreams.fromParquet[Data](path, options = options, filter = Col("dict") === Dict.A).runWith(printingSink)
    _ <- Future(println("""id >= 20 && id < 40"""))
    _ <- ParquetStreams.fromParquet[Data](path, options = options, filter = Col("id") >= 20 && Col("id") < 40).runWith(printingSink)
    // finish
    _ <- system.terminate()
  } yield ()

} 
Example 191
Source File: WriteAndReadCustomTypeAkkaApp.scala    From parquet4s   with MIT License 5 votes vote down vote up
package com.github.mjakubowski84.parquet4s.akka

import akka.actor.ActorSystem
import akka.stream.scaladsl.{Sink, Source}
import akka.stream.{ActorMaterializer, Materializer}
import com.github.mjakubowski84.parquet4s.CustomType._
import com.github.mjakubowski84.parquet4s.ParquetStreams
import com.google.common.io.Files

object WriteAndReadCustomTypeAkkaApp extends App {

  object Data {
    def generate(count: Int): Iterator[Data] = Iterator.range(1, count).map { i => Data(id = i, dict = Dict.random) }
  }
  case class Data(id: Long, dict: Dict.Type)

  val data = () => Data.generate(count = 100)
  val path = Files.createTempDir().getAbsolutePath

  implicit val system: ActorSystem = ActorSystem()
  implicit val materializer: Materializer = ActorMaterializer()
  import system.dispatcher

  for {
    // write
    _ <- Source.fromIterator(data).runWith(ParquetStreams.toParquetSingleFile(s"$path/data.parquet"))
    // read
    // hint: you can filter by dict using string value, for example: filter = Col("dict") === "A"
    _ <- ParquetStreams.fromParquet[Data](path).runWith(Sink.foreach(println))
    // finish
    _ <- system.terminate()
  } yield ()

} 
Example 192
Source File: WriteAndReadAkkaApp.scala    From parquet4s   with MIT License 5 votes vote down vote up
package com.github.mjakubowski84.parquet4s.akka

import akka.actor.ActorSystem
import akka.stream.scaladsl.{Sink, Source}
import akka.stream.{ActorMaterializer, Materializer}
import com.github.mjakubowski84.parquet4s.ParquetStreams
import com.google.common.io.Files

import scala.util.Random

object WriteAndReadAkkaApp extends App {

  case class Data(id: Int, text: String)

  val count = 100
  val data = (1 to count).map { i => Data(id = i, text = Random.nextString(4)) }
  val path = Files.createTempDir().getAbsolutePath

  implicit val system: ActorSystem = ActorSystem()
  implicit val materializer: Materializer = ActorMaterializer()
  import system.dispatcher

  for {
    // write
    _ <- Source(data).runWith(ParquetStreams.toParquetSingleFile(s"$path/data.parquet"))
    // read
    _ <- ParquetStreams.fromParquet[Data](path).runWith(Sink.foreach(println))
    // finish
    _ <- system.terminate()
  } yield ()

} 
Example 193
Source File: UnorderedParallelParquetSink.scala    From parquet4s   with MIT License 5 votes vote down vote up
package com.github.mjakubowski84.parquet4s

import java.util.UUID

import akka.Done
import akka.stream.scaladsl.{Flow, Keep, Sink}
import org.apache.hadoop.fs.Path
import org.apache.parquet.schema.MessageType
import org.slf4j.{Logger, LoggerFactory}

import scala.concurrent.Future

private[parquet4s] object UnorderedParallelParquetSink extends IOOps {

  protected val logger: Logger = LoggerFactory.getLogger(this.getClass)

  def apply[T: ParquetRecordEncoder : ParquetSchemaResolver](path: Path,
                                                             parallelism: Int,
                                                             options: ParquetWriter.Options = ParquetWriter.Options()
                                                            ): Sink[T, Future[Done]] = {
    val schema = ParquetSchemaResolver.resolveSchema[T]
    val valueCodecConfiguration = options.toValueCodecConfiguration

    validateWritePath(path, options)

    def encode(data: T): RowParquetRecord = ParquetRecordEncoder.encode[T](data, valueCodecConfiguration)

    Flow[T]
      .zipWithIndex
      .groupBy(parallelism, elemAndIndex => Math.floorMod(elemAndIndex._2, parallelism))
      .map(elemAndIndex => encode(elemAndIndex._1))
      .fold(UnorderedChunk(path, schema, options))(_.write(_))
      .map(_.close())
      .async
      .mergeSubstreamsWithParallelism(parallelism)
      .toMat(Sink.ignore)(Keep.right)
  }

  private trait UnorderedChunk {

    def write(record: RowParquetRecord): UnorderedChunk

    def close(): Unit

  }

  private object UnorderedChunk {

    def apply(basePath: Path,
              schema: MessageType,
              options: ParquetWriter.Options): UnorderedChunk = new PendingUnorderedChunk(basePath, schema, options)

    private[UnorderedChunk] class PendingUnorderedChunk(basePath: Path,
                                        schema: MessageType,
                                        options: ParquetWriter.Options) extends UnorderedChunk {
      override def write(record: RowParquetRecord): UnorderedChunk = {
        val chunkPath = Path.mergePaths(basePath, new Path(s"/part-${UUID.randomUUID()}.parquet"))
        val writer = ParquetWriter.internalWriter(chunkPath, schema, options)
        writer.write(record)
        new StartedUnorderedChunk(chunkPath, writer, acc = 1)
      }

      override def close(): Unit = ()
    }

    private[UnorderedChunk] class StartedUnorderedChunk(chunkPath: Path,
                                        writer: ParquetWriter.InternalWriter,
                                        acc: Long
                                       ) extends UnorderedChunk {
      override def write(record: RowParquetRecord): UnorderedChunk = {
        writer.write(record)
        new StartedUnorderedChunk(chunkPath, writer, acc = acc + 1)
      }

      override def close(): Unit = {
        if (logger.isDebugEnabled) logger.debug(s"$acc records were successfully written to $chunkPath")
        writer.close()
      }
    }
  }

} 
Example 194
Source File: IndefiniteStreamParquetSink.scala    From parquet4s   with MIT License 5 votes vote down vote up
package com.github.mjakubowski84.parquet4s
import akka.stream.FlowShape
import akka.stream.scaladsl.{Broadcast, Flow, GraphDSL, Keep, Sink, ZipWith}
import com.github.mjakubowski84.parquet4s.ParquetWriter.ParquetWriterFactory
import org.apache.hadoop.fs.Path
import org.slf4j.{Logger, LoggerFactory}

import scala.concurrent.duration.FiniteDuration


private[parquet4s] object IndefiniteStreamParquetSink extends IOOps {

  protected val logger: Logger = LoggerFactory.getLogger(this.getClass)

  def apply[In, ToWrite: ParquetWriterFactory, Mat](path: Path,
                                                    maxChunkSize: Int,
                                                    chunkWriteTimeWindow: FiniteDuration,
                                                    buildChunkPath: ChunkPathBuilder[In] = ChunkPathBuilder.default,
                                                    preWriteTransformation: In => ToWrite = identity[In] _,
                                                    postWriteSink: Sink[Seq[In], Mat] = Sink.ignore,
                                                    options: ParquetWriter.Options = ParquetWriter.Options()
                                            ): Sink[In, Mat] = {
    validateWritePath(path, options)

    val internalFlow = Flow.fromGraph(GraphDSL.create() { implicit b =>
      import GraphDSL.Implicits._
    
      val inChunkFlow = b.add(Flow[In].groupedWithin(maxChunkSize, chunkWriteTimeWindow))
      val broadcastChunks = b.add(Broadcast[Seq[In]](outputPorts = 2))
      val writeFlow = Flow[Seq[In]].map { chunk =>
        val toWrite = chunk.map(preWriteTransformation)
        val chunkPath = buildChunkPath(path, chunk)
        if (logger.isDebugEnabled()) logger.debug(s"Writing ${toWrite.size} records to $chunkPath")
        ParquetWriter.writeAndClose(chunkPath.toString, toWrite, options)
      }
      val zip = b.add(ZipWith[Seq[In], Unit, Seq[In]]((chunk, _) => chunk))
      
      inChunkFlow ~> broadcastChunks ~> writeFlow ~> zip.in1
                     broadcastChunks ~> zip.in0

      FlowShape(inChunkFlow.in, zip.out)               
    })

    internalFlow.toMat(postWriteSink)(Keep.right)
  }

} 
Example 195
Source File: SingleFileParquetSink.scala    From parquet4s   with MIT License 5 votes vote down vote up
package com.github.mjakubowski84.parquet4s

import akka.Done
import akka.stream.scaladsl.{Flow, Keep, Sink}
import org.apache.hadoop.fs.Path
import org.slf4j.{Logger, LoggerFactory}

import scala.concurrent.Future

private[parquet4s] object SingleFileParquetSink {

  protected val logger: Logger = LoggerFactory.getLogger(this.getClass)

  def apply[T: ParquetRecordEncoder : ParquetSchemaResolver](path: Path,
                                                             options: ParquetWriter.Options = ParquetWriter.Options()
                                                            ): Sink[T, Future[Done]] = {
    val schema = ParquetSchemaResolver.resolveSchema[T]
    val writer = ParquetWriter.internalWriter(path, schema, options)
    val valueCodecConfiguration = options.toValueCodecConfiguration
    val isDebugEnabled = logger.isDebugEnabled

    def encode(data: T): RowParquetRecord = ParquetRecordEncoder.encode[T](data, valueCodecConfiguration)

    Flow[T]
      .map(encode)
      .fold(0) { case (acc, record) => writer.write(record); acc + 1}
      .map { count =>
        if (isDebugEnabled) logger.debug(s"$count records were successfully written to $path")
        writer.close()
      }
      .toMat(Sink.ignore)(Keep.right)
  }

} 
Example 196
Source File: SequentialFileSplittingParquetSink.scala    From parquet4s   with MIT License 5 votes vote down vote up
package com.github.mjakubowski84.parquet4s

import akka.Done
import akka.stream.scaladsl.{Flow, Keep, Sink}
import org.apache.hadoop.fs.Path
import org.apache.parquet.schema.MessageType
import org.slf4j.{Logger, LoggerFactory}

import scala.concurrent.Future

private[parquet4s] object SequentialFileSplittingParquetSink extends IOOps {

  protected val logger: Logger = LoggerFactory.getLogger(this.getClass)

  def apply[T: ParquetRecordEncoder : ParquetSchemaResolver](path: Path,
                                                             maxRecordsPerFile: Long,
                                                             options: ParquetWriter.Options = ParquetWriter.Options()
                                                            ): Sink[T, Future[Done]] = {
    val schema = ParquetSchemaResolver.resolveSchema[T]
    val valueCodecConfiguration = options.toValueCodecConfiguration

    validateWritePath(path, options)

    def encode(data: T): RowParquetRecord = ParquetRecordEncoder.encode[T](data, valueCodecConfiguration)

    Flow[T]
      .zipWithIndex
      .map { case (elem, index) => OrderedChunkElem(encode(elem), index) }
      .fold(OrderedChunk(path, schema, maxRecordsPerFile, options))(_.write(_))
      .map(_.close())
      .toMat(Sink.ignore)(Keep.right)
  }

  private case class OrderedChunkElem(record: RowParquetRecord, index: Long) {
    def isSplit(maxRecordsPerFile: Long): Boolean = index % maxRecordsPerFile == 0
  }

  private trait OrderedChunk {
    def write(elem: OrderedChunkElem): OrderedChunk
    def close(): Unit
  }

  private object OrderedChunk {

    def apply(basePath: Path,
              schema: MessageType,
              maxRecordsPerFile: Long,
              options: ParquetWriter.Options): OrderedChunk = new PendingOrderedChunk(basePath, schema, maxRecordsPerFile, options)


    private[OrderedChunk] class PendingOrderedChunk(basePath: Path,
                                                    schema: MessageType,
                                                    maxRecordsPerFile: Long,
                                                    options: ParquetWriter.Options) extends OrderedChunk {
      override def write(elem: OrderedChunkElem): OrderedChunk = {
        val chunkNumber: Int = Math.floorDiv(elem.index, maxRecordsPerFile).toInt
        val chunkPath = Path.mergePaths(basePath, new Path(chunkFileName(chunkNumber)))
        val writer = ParquetWriter.internalWriter(chunkPath, schema, options)
        writer.write(elem.record)
        new StartedOrderedChunk(basePath, schema, maxRecordsPerFile, options, chunkPath, writer, acc = 1)
      }

      override def close(): Unit = ()

      private def chunkFileName(chunkNumber: Int): String = f"/part-$chunkNumber%05d.parquet"
    }

    private[OrderedChunk] class StartedOrderedChunk(basePath: Path,
                                                    schema: MessageType,
                                                    maxRecordsPerFile: Long,
                                                    options: ParquetWriter.Options,
                                                    chunkPath: Path,
                                                    writer: ParquetWriter.InternalWriter,
                                                    acc: Long) extends OrderedChunk {
      override def write(elem: OrderedChunkElem): OrderedChunk = {
        if (elem.isSplit(maxRecordsPerFile)) {
          this.close()
          new PendingOrderedChunk(basePath, schema, maxRecordsPerFile, options).write(elem)
        } else {
          writer.write(elem.record)
          new StartedOrderedChunk(basePath, schema, maxRecordsPerFile, options, chunkPath, writer, acc = acc + 1)
        }
      }

      override def close(): Unit = {
        if (logger.isDebugEnabled) logger.debug(s"$acc records were successfully written to $chunkPath")
        writer.close()
      }
    }
  }

} 
Example 197
Source File: ReliableHttpProxyFactory.scala    From reliable-http-client   with Apache License 2.0 5 votes vote down vote up
package rhttpc.akkahttp.proxy

import akka.NotUsed
import akka.actor._
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.{HttpEntity, HttpRequest, HttpResponse}
import akka.stream.Materializer
import akka.stream.scaladsl.{Flow, Sink, Source}
import org.slf4j.LoggerFactory
import rhttpc.client.protocol.{Correlated, Request}
import rhttpc.client.proxy._

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}
import scala.util.control.NonFatal
import scala.util.{Failure, Success}

object ReliableHttpProxyFactory {

  private lazy val logger = LoggerFactory.getLogger(getClass)

  def send(successRecognizer: SuccessHttpResponseRecognizer, batchSize: Int, parallelConsumers: Int)
          (request: Request[HttpRequest])
          (implicit actorSystem: ActorSystem, materialize: Materializer): Future[HttpResponse] = {
    import actorSystem.dispatcher
    send(prepareHttpFlow(batchSize * parallelConsumers), successRecognizer)(request.correlated)
  }

  private def prepareHttpFlow(parallelism: Int)
                             (implicit actorSystem: ActorSystem, materialize: Materializer):
    Flow[(HttpRequest, String), HttpResponse, NotUsed] = {

    import actorSystem.dispatcher
    Http().superPool[String]().mapAsync(parallelism) {
      case (tryResponse, id) =>
        tryResponse match {
          case Success(response) =>
            response.toStrict(1 minute)
          case Failure(ex) =>
            Future.failed(ex)
        }
    }
  }

  private def send(httpFlow: Flow[(HttpRequest, String), HttpResponse, Any], successRecognizer: SuccessHttpResponseRecognizer)
                  (corr: Correlated[HttpRequest])
                  (implicit ec: ExecutionContext, materialize: Materializer): Future[HttpResponse] = {
    import collection.JavaConverters._
    logger.debug(
      s"""Sending request for ${corr.correlationId} to ${corr.msg.getUri()}. Headers:
         |${corr.msg.getHeaders().asScala.toSeq.map(h => "  " + h.name() + ": " + h.value()).mkString("\n")}
         |Body:
         |${corr.msg.entity.asInstanceOf[HttpEntity.Strict].data.utf8String}""".stripMargin
    )
    val logResp = logResponse(corr) _
    val responseFuture = Source.single((corr.msg, corr.correlationId)).via(httpFlow).runWith(Sink.head)
    responseFuture.onComplete {
      case Failure(ex) =>
        logger.error(s"Got failure for ${corr.correlationId} to ${corr.msg.getUri()}", ex)
      case Success(_) =>
    }
    for {
      response <- responseFuture
      transformedToFailureIfNeed <- {
        if (successRecognizer.isSuccess(response)) {
          logResp(response, "success response")
          Future.successful(response)
        } else {
          logResp(response, "response recognized as non-success")
          Future.failed(NonSuccessResponse)
        }
      }
    } yield transformedToFailureIfNeed
  }

  private def logResponse(corr: Correlated[HttpRequest])
                         (response: HttpResponse, additionalInfo: String): Unit = {
    import collection.JavaConverters._
    logger.debug(
      s"""Got $additionalInfo for ${corr.correlationId} to ${corr.msg.getUri()}. Status: ${response.status.value}. Headers:
         |${response.getHeaders().asScala.toSeq.map(h => "  " + h.name() + ": " + h.value()).mkString("\n")}
         |Body:
         |${response.entity.asInstanceOf[HttpEntity.Strict].data.utf8String}""".stripMargin
    )
  }

} 
Example 198
Source File: HTTPResponseStream.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package akkahttp

import akka.NotUsed
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.common.{EntityStreamingSupport, JsonEntityStreamingSupport}
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Directives.{complete, get, logRequestResult, path, _}
import akka.http.scaladsl.server.Route
import akka.http.scaladsl.unmarshalling.Unmarshal
import akka.stream.ThrottleMode
import akka.stream.scaladsl.{Flow, Sink, Source}
import com.typesafe.config.ConfigFactory
import spray.json.DefaultJsonProtocol

import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util.{Failure, Success}


object HTTPResponseStream extends App with DefaultJsonProtocol with SprayJsonSupport {
  implicit val system = ActorSystem("HTTPResponseStream")
  implicit val executionContext = system.dispatcher

  //JSON Protocol and streaming support
  final case class ExamplePerson(name: String)

  implicit def examplePersonFormat = jsonFormat1(ExamplePerson.apply)

  implicit val jsonStreamingSupport: JsonEntityStreamingSupport = EntityStreamingSupport.json()

  val (address, port) = ("127.0.0.1", 8080)
  server(address, port)
  client(address, port)

  def client(address: String, port: Int): Unit = {
    val requestParallelism = ConfigFactory.load.getInt("akka.http.host-connection-pool.max-connections")

    val requests: Source[HttpRequest, NotUsed] = Source
      .fromIterator(() =>
        Range(0, requestParallelism).map(i => HttpRequest(uri = Uri(s"http://$address:$port/download/$i"))).iterator
      )

    // Run singleRequest and completely consume response elements
    def runRequestDownload(req: HttpRequest) =
      Http()
        .singleRequest(req)
        .flatMap { response =>
          val unmarshalled: Future[Source[ExamplePerson, NotUsed]] = Unmarshal(response).to[Source[ExamplePerson, NotUsed]]
          val source: Source[ExamplePerson, Future[NotUsed]] = Source.futureSource(unmarshalled)
          source.via(processorFlow).runWith(printSink)
        }

    requests
      .mapAsync(requestParallelism)(runRequestDownload)
      .runWith(Sink.ignore)
  }


  val printSink = Sink.foreach[ExamplePerson] { each: ExamplePerson => println(s"Client processed element: $each") }

  val processorFlow: Flow[ExamplePerson, ExamplePerson, NotUsed] = Flow[ExamplePerson].map {
    each: ExamplePerson => {
      //println(s"Process: $each")
      each
    }
  }


  def server(address: String, port: Int): Unit = {

    def routes: Route = logRequestResult("httpecho") {
      path("download" / Segment) { id: String =>
        get {
          println(s"Server received request with id: $id, stream response...")
          extractRequest { r: HttpRequest =>
            val finishedWriting = r.discardEntityBytes().future
            onComplete(finishedWriting) { done =>
              //Limit response by appending eg .take(5)
              val responseStream: Stream[ExamplePerson] = Stream.continually(ExamplePerson(s"request:$id"))
              complete(Source(responseStream).throttle(1, 1.second, 1, ThrottleMode.shaping))
            }
          }
        }
      }
    }

    val bindingFuture = Http().bindAndHandle(routes, address, port)
    bindingFuture.onComplete {
      case Success(b) =>
        println("Server started, listening on: " + b.localAddress)
      case Failure(e) =>
        println(s"Server could not bind to: $address:$port. Exception message: ${e.getMessage}")
        system.terminate()
    }
  }
} 
Example 199
package sample.stream

import akka.actor.ActorSystem
import akka.stream.Supervision.Decider
import akka.stream._
import akka.stream.scaladsl.{Flow, Sink, Source, SourceQueueWithComplete}
import akka.{Done, NotUsed}
import org.slf4j.{Logger, LoggerFactory}

import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util.{Failure, Success}


object PublishToSourceQueueFromMultipleThreads extends App {
  val logger: Logger = LoggerFactory.getLogger(this.getClass)
  implicit val system = ActorSystem("PublishToSourceQueueFromMultipleThreads")
  implicit val ec = system.dispatcher

  val bufferSize = 100
  // As of akka 2.6.x there is a thread safe implementation for SourceQueue
  val maxConcurrentOffers = 1000
  val numberOfPublishingClients = 1000

  val slowSink: Sink[Seq[Int], NotUsed] =
    Flow[Seq[Int]]
      .delay(2.seconds, DelayOverflowStrategy.backpressure)
      .to(Sink.foreach(e => logger.info(s"Reached sink: $e")))

  val sourceQueue: SourceQueueWithComplete[Int] = Source
    .queue[Int](bufferSize, OverflowStrategy.backpressure, maxConcurrentOffers)
    .groupedWithin(10, 1.seconds)
    .to(slowSink)
    .run

  val doneConsuming: Future[Done] = sourceQueue.watchCompletion()
  signalWhen(doneConsuming, "consuming") //never completes

  simulatePublishingFromMulitpleThreads()

  // Before 2.6.x a stream had to be used to throttle and control the backpressure
  //simulatePublishingClientsFromStream()

  // Decide on the stream level, because the OverflowStrategy.backpressure
  // on the sourceQueue causes an IllegalStateException
  // Handling this on the stream level allows to restart the stream
  private def simulatePublishingClientsFromStream() = {

    val decider: Decider = {
      case _: IllegalStateException => println("Got backpressure signal for offered element, restart..."); Supervision.Restart
      case _ => Supervision.Stop
    }

    val donePublishing: Future[Done] = Source(1 to numberOfPublishingClients)
      .mapAsync(10)(offerToSourceQueue) //throttle
      .withAttributes(ActorAttributes.supervisionStrategy(decider))
      .runWith(Sink.ignore)
    signalWhen(donePublishing, "publishing")
  }

  private def simulatePublishingFromMulitpleThreads() = (1 to numberOfPublishingClients).par.foreach(offerToSourceQueue)

  private def offerToSourceQueue(each: Int) = {
    sourceQueue.offer(each).map {
      case QueueOfferResult.Enqueued => logger.info(s"enqueued $each")
      case QueueOfferResult.Dropped => logger.info(s"dropped $each")
      case QueueOfferResult.Failure(ex) => logger.info(s"Offer failed: $ex")
      case QueueOfferResult.QueueClosed => logger.info("Source Queue closed")
    }
  }

  private def signalWhen(done: Future[Done], operation: String) = {
    done.onComplete {
      case Success(b) =>
        logger.info(s"Finished: $operation")
      case Failure(e) =>
        logger.info(s"Failure: $e About to terminate...")
        system.terminate()
    }
  }
} 
Example 200
Source File: PublishToBlockingResource.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package sample.stream

import java.util.concurrent.{ArrayBlockingQueue, BlockingQueue}

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.DelayOverflowStrategy
import akka.stream.scaladsl.{Flow, Sink, Source}

import scala.concurrent.duration._
import scala.util.Failure



object PublishToBlockingResource extends App {
  implicit val system = ActorSystem("PublishToBlockingResource")
  implicit val ec = system.dispatcher

  val slowSink: Sink[Seq[Int], NotUsed] =
    Flow[Seq[Int]]
      .delay(1.seconds, DelayOverflowStrategy.backpressure)
      .to(Sink.foreach(e => println(s"Reached sink: $e")))

  val blockingResource: BlockingQueue[Int] = new ArrayBlockingQueue[Int](100)

  //Start a new `Source` from some (third party) blocking resource which can be opened, read and closed
  val source: Source[Int, NotUsed] =
    Source.unfoldResource[Int, BlockingQueue[Int]](
      () => blockingResource,                   //open
      (q: BlockingQueue[Int]) => Some(q.take()),//read
      (_: BlockingQueue[Int]) => {})            //close

  val done = source
    .groupedWithin(10, 2.seconds)
    .watchTermination()((_, done) => done.onComplete {
      case Failure(err) =>
        println(s"Flow failed: $err")
      case each => println(s"Server flow terminated: $each")
    })
    .runWith(slowSink)

  //simulate n process that publish in blocking fashion to the queue
  (1 to 1000).par.foreach(value => blockingResource.put(value))
}