akka.actor.ActorSystem Scala Examples

The following examples show how to use akka.actor.ActorSystem. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: KafkaTopicInfo.scala    From matcher   with MIT License 7 votes vote down vote up
package tools

import java.io.File

import akka.actor.ActorSystem
import com.typesafe.config.ConfigFactory
import com.wavesplatform.dex.queue.KafkaMatcherQueue.eventDeserializer
import com.wavesplatform.dex.queue.{QueueEvent, QueueEventWithMeta}
import com.wavesplatform.dex.settings.toConfigOps
import org.apache.kafka.clients.consumer.KafkaConsumer
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.StringDeserializer

import scala.collection.JavaConverters._
import scala.concurrent.duration.DurationInt

object KafkaTopicInfo extends App {
  implicit val system: ActorSystem = ActorSystem()

  val configFile = new File(args(0))
  val topic      = args(1)
  val from       = args(2).toLong
  val max        = args(3).toInt

  println(s"""configFile: ${configFile.getAbsolutePath}
             |topic: $topic
             |from: $from
             |max: $max""".stripMargin)

  val requestTimeout = java.time.Duration.ofNanos(5.seconds.toNanos)

  val config = ConfigFactory
    .parseString("""waves.dex.events-queue.kafka.consumer.client {
                   |  client.id = "kafka-topics-info"
                   |  enable.auto.commit = false
                   |  auto.offset.reset = earliest
                   |}
                   |
                   |""".stripMargin)
    .withFallback {
      ConfigFactory
        .parseFile(configFile)
        .withFallback(ConfigFactory.defaultApplication())
        .withFallback(ConfigFactory.defaultReference())
        .resolve()
        .getConfig("waves.dex.events-queue.kafka")
    }

  val consumer = new KafkaConsumer[String, QueueEvent](
    config.getConfig("waves.dex.events-queue.kafka.consumer.client").toProperties,
    new StringDeserializer,
    eventDeserializer
  )

  try {
    val topicPartition  = new TopicPartition(topic, 0)
    val topicPartitions = java.util.Collections.singletonList(topicPartition)
    consumer.assign(topicPartitions)

    {
      val r = consumer.partitionsFor(topic, requestTimeout)
      println(s"Partitions:\n${r.asScala.mkString("\n")}")
    }

    {
      val r = consumer.endOffsets(topicPartitions, requestTimeout)
      println(s"End offsets for $topicPartition: ${r.asScala.mkString(", ")}")
    }

    consumer.seek(topicPartition, from)

    val pollDuriation = java.time.Duration.ofNanos(1.seconds.toNanos)
    val lastOffset    = from + max
    var continue      = true
    while (continue) {
      println(s"Reading from Kafka")

      val xs = consumer.poll(pollDuriation).asScala.toVector
      xs.foreach { msg =>
        println(QueueEventWithMeta(msg.offset(), msg.timestamp(), msg.value()))
      }

      xs.lastOption.foreach { x =>
        if (x.offset() == lastOffset) continue = false
      }
    }
  } finally {
    consumer.close()
  }
} 
Example 2
Source File: AbstractWebServer.scala    From ohara   with Apache License 2.0 6 votes vote down vote up
package oharastream.ohara.shabondi.common

import akka.Done
import akka.actor.ActorSystem
import akka.event.Logging
import akka.http.scaladsl.Http
import akka.http.scaladsl.Http.ServerBinding
import akka.http.scaladsl.server.{Directives, Route}
import akka.http.scaladsl.settings.ServerSettings
import oharastream.ohara.common.util.Releasable

import scala.concurrent._
import scala.concurrent.duration.Duration
import scala.io.StdIn
import scala.util.{Failure, Success}


private[shabondi] abstract class AbstractWebServer extends Directives with Releasable {
  implicit protected val actorSystem: ActorSystem = ActorSystem(Logging.simpleName(this).replaceAll("\\$", ""))

  protected def routes: Route

  protected def postBinding(binding: ServerBinding): Unit = {
    val hostname = binding.localAddress.getHostName
    val port     = binding.localAddress.getPort
    actorSystem.log.info(s"Server online at http://$hostname:$port/")
  }

  protected def postBindingFailure(cause: Throwable): Unit = {
    actorSystem.log.error(cause, s"Error starting the server ${cause.getMessage}")
  }

  protected def waitForShutdownSignal()(implicit ec: ExecutionContext): Future[Done] = {
    val promise = Promise[Done]()
    sys.addShutdownHook {
      promise.trySuccess(Done)
    }
    Future {
      blocking {
        if (StdIn.readLine("Press <RETURN> to stop Shabondi WebServer...\n") != null)
          promise.trySuccess(Done)
      }
    }
    promise.future
  }

  protected def postServerShutdown(): Unit = actorSystem.log.info("Shutting down the server")

  def start(bindInterface: String, port: Int): Unit = {
    start(bindInterface, port, ServerSettings(actorSystem))
  }

  def start(bindInterface: String, port: Int, settings: ServerSettings): Unit = {
    implicit val executionContext: ExecutionContextExecutor = actorSystem.dispatcher

    val bindingFuture: Future[Http.ServerBinding] = Http().bindAndHandle(
      handler = routes,
      interface = bindInterface,
      port = port,
      settings = settings
    )

    bindingFuture.onComplete {
      case Success(binding) =>
        postBinding(binding)
      case Failure(cause) =>
        postBindingFailure(cause)
    }

    Await.ready(
      bindingFuture.flatMap(_ => waitForShutdownSignal()),
      Duration.Inf
    )

    bindingFuture
      .flatMap(_.unbind())
      .onComplete { _ =>
        postServerShutdown()
        actorSystem.terminate()
      }
  }

  override def close(): Unit = actorSystem.terminate()
} 
Example 3
Source File: Components.scala    From gbf-raidfinder   with MIT License 6 votes vote down vote up
package walfie.gbf.raidfinder.server

import akka.actor.ActorSystem
import akka.stream.Materializer
import com.trueaccord.scalapb.json.JsonFormat
import monix.execution.Scheduler
import play.api.BuiltInComponents
import play.api.http.{ContentTypes, DefaultHttpErrorHandler}
import play.api.libs.json.Json
import play.api.Mode.Mode
import play.api.mvc._
import play.api.routing.Router
import play.api.routing.sird._
import play.core.server._
import play.filters.cors.{CORSConfig, CORSFilter}
import play.filters.gzip.GzipFilterComponents
import scala.concurrent.duration.FiniteDuration
import scala.concurrent.Future
import walfie.gbf.raidfinder.protocol.{RaidBossesResponse, BinaryProtobuf}
import walfie.gbf.raidfinder.RaidFinder
import walfie.gbf.raidfinder.server.controller._
import walfie.gbf.raidfinder.server.syntax.ProtocolConverters.RaidBossDomainOps

class Components(
  raidFinder:                 RaidFinder[BinaryProtobuf],
  translator:                 BossNameTranslator,
  port:                       Int,
  mode:                       Mode,
  websocketKeepAliveInterval: FiniteDuration,
  metricsCollector:           MetricsCollector
) extends NettyServerComponents
  with BuiltInComponents with GzipFilterComponents with Controller {

  override lazy val serverConfig = ServerConfig(port = Some(port), mode = mode)

  private val corsFilter = new CORSFilter(corsConfig = CORSConfig().withAnyOriginAllowed)
  override lazy val httpFilters = List(gzipFilter, corsFilter)

  lazy val websocketController = new WebsocketController(
    raidFinder, translator, websocketKeepAliveInterval, metricsCollector
  )(actorSystem, materializer, Scheduler.Implicits.global)

  // The charset isn't necessary, but without it, Chrome displays Japanese incorrectly
  // if you try to view the JSON directly.
  // https://bugs.chromium.org/p/chromium/issues/detail?id=438464
  private val ContentTypeJsonWithUtf8 = "application/json; charset=utf-8"

  lazy val router = Router.from {
    case GET(p"/") =>
      controllers.Assets.at(path = "/public", "index.html")

    case GET(p"/api/bosses.json" ? q_s"name=$names") =>
      val bosses = if (names.nonEmpty) {
        val knownBossesMap = raidFinder.getKnownBosses
        names.collect(knownBossesMap)
      } else raidFinder.getKnownBosses.values

      val responseProtobuf = RaidBossesResponse(
        raidBosses = bosses.map(_.toProtocol(translator)).toSeq
      )
      val responseJson = JsonFormat.toJsonString(responseProtobuf)
      Action(Ok(responseJson).as(ContentTypeJsonWithUtf8))

    case GET(p"/api/metrics.json") =>
      val activeUsers = metricsCollector.getActiveWebSocketCount()
      val json = Json.obj("activeUsers" -> activeUsers)
      Action(Ok(json))

    case GET(p"/ws/raids" ? q_o"keepAlive=${ bool(keepAlive) }") =>
      websocketController.raids(keepAlive = keepAlive.getOrElse(false))

    case GET(p"/$file*") =>
      controllers.Assets.at(path = "/public", file = file)
  }

  override lazy val httpErrorHandler = new ErrorHandler

  override def serverStopHook = () => Future.successful {
    actorSystem.terminate()
  }
} 
Example 4
Source File: IntegrationTest.scala    From kmq   with Apache License 2.0 6 votes vote down vote up
package com.softwaremill.kmq.redelivery

import java.time.Duration
import java.util.Random

import akka.actor.ActorSystem
import akka.kafka.scaladsl.{Consumer, Producer}
import akka.kafka.{ConsumerSettings, ProducerMessage, ProducerSettings, Subscriptions}
import akka.stream.ActorMaterializer
import akka.testkit.TestKit
import com.softwaremill.kmq._
import com.softwaremill.kmq.redelivery.infrastructure.KafkaSpec
import org.apache.kafka.clients.consumer.ConsumerConfig
import org.apache.kafka.clients.producer.{ProducerConfig, ProducerRecord}
import org.apache.kafka.common.serialization.StringDeserializer
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{Seconds, Span}
import org.scalatest.{BeforeAndAfterAll, FlatSpecLike, Matchers}

import scala.collection.mutable.ArrayBuffer

class IntegrationTest extends TestKit(ActorSystem("test-system")) with FlatSpecLike with KafkaSpec with BeforeAndAfterAll with Eventually with Matchers {

  implicit val materializer = ActorMaterializer()
  import system.dispatcher

  "KMQ" should "resend message if not committed" in {
    val bootstrapServer = s"localhost:${testKafkaConfig.kafkaPort}"
    val kmqConfig = new KmqConfig("queue", "markers", "kmq_client", "kmq_redelivery", Duration.ofSeconds(1).toMillis,
    1000)

    val consumerSettings = ConsumerSettings(system, new StringDeserializer, new StringDeserializer)
      .withBootstrapServers(bootstrapServer)
      .withGroupId(kmqConfig.getMsgConsumerGroupId)
      .withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")

    val markerProducerSettings = ProducerSettings(system,
      new MarkerKey.MarkerKeySerializer(), new MarkerValue.MarkerValueSerializer())
      .withBootstrapServers(bootstrapServer)
      .withProperty(ProducerConfig.PARTITIONER_CLASS_CONFIG, classOf[ParititionFromMarkerKey].getName)
    val markerProducer = markerProducerSettings.createKafkaProducer()

    val random = new Random()

    lazy val processedMessages = ArrayBuffer[String]()
    lazy val receivedMessages = ArrayBuffer[String]()

    val control = Consumer.committableSource(consumerSettings, Subscriptions.topics(kmqConfig.getMsgTopic)) // 1. get messages from topic
      .map { msg =>
      ProducerMessage.Message(
        new ProducerRecord[MarkerKey, MarkerValue](kmqConfig.getMarkerTopic, MarkerKey.fromRecord(msg.record), new StartMarker(kmqConfig.getMsgTimeoutMs)), msg)
    }
      .via(Producer.flow(markerProducerSettings, markerProducer)) // 2. write the "start" marker
      .map(_.message.passThrough)
      .mapAsync(1) { msg =>
        msg.committableOffset.commitScaladsl().map(_ => msg.record) // this should be batched
      }
      .map { msg =>
        receivedMessages += msg.value
        msg
      }
      .filter(_ => random.nextInt(5) != 0)
      .map { processedMessage =>
        processedMessages += processedMessage.value
        new ProducerRecord[MarkerKey, MarkerValue](kmqConfig.getMarkerTopic, MarkerKey.fromRecord(processedMessage), EndMarker.INSTANCE)
      }
      .to(Producer.plainSink(markerProducerSettings, markerProducer)) // 5. write "end" markers
      .run()

    val redeliveryHook = RedeliveryTracker.start(new KafkaClients(bootstrapServer), kmqConfig)

    val messages = (0 to 20).map(_.toString)
    messages.foreach(msg => sendToKafka(kmqConfig.getMsgTopic,msg))

    eventually {
      receivedMessages.size should be > processedMessages.size
      processedMessages.sortBy(_.toInt).distinct shouldBe messages
    }(PatienceConfig(timeout = Span(15, Seconds)), implicitly)

    redeliveryHook.close()
    control.shutdown()
  }

  override def afterAll(): Unit = {
    super.afterAll()
    TestKit.shutdownActorSystem(system)
  }
} 
Example 5
Source File: ModelService.scala    From reactive-machine-learning-systems   with MIT License 6 votes vote down vote up
package com.reactivemachinelearning

import akka.actor.ActorSystem
import akka.event.{Logging, LoggingAdapter}
import akka.http.scaladsl.Http
import akka.http.scaladsl.marshalling.ToResponseMarshallable
import akka.http.scaladsl.model.StatusCodes._
import akka.http.scaladsl.server.Directives._
import akka.stream.{ActorMaterializer, Materializer}
//import spray.json._
import spray.json.DefaultJsonProtocol

import scala.concurrent.{ExecutionContextExecutor, Future}

case class Prediction(id: Long, timestamp: Long, value: Double)

trait Protocols extends DefaultJsonProtocol {
  implicit val ipInfoFormat = jsonFormat3(Prediction.apply)
}

trait Service extends Protocols {
  implicit val system: ActorSystem

  implicit def executor: ExecutionContextExecutor

  implicit val materializer: Materializer

  val logger: LoggingAdapter

//  private def parseFeatures(features: String): Map[Long, Double] = {
//    features.parseJson.convertTo[Map[Long, Double]]
//  }

  def predict(features: String): Future[Prediction] = {
    Future(Prediction(123, 456, 0.5))
  }

  val routes = {
    logRequestResult("predictive-service") {
      pathPrefix("ip") {
        (get & path(Segment)) { features =>
          complete {
            predict(features).map[ToResponseMarshallable] {
//              case prediction: Prediction => prediction
              case _ => BadRequest
            }
          }
        }
      }
    }
  }
}

object PredictiveService extends App with Service {
  override implicit val system = ActorSystem()
  override implicit val executor = system.dispatcher
  override implicit val materializer = ActorMaterializer()

  override val logger = Logging(system, getClass)

  Http().bindAndHandle(routes, "0.0.0.0", 9000)
} 
Example 6
Source File: TestAkka.scala    From DataXServer   with Apache License 2.0 6 votes vote down vote up
package org.tianlangstudio.data.hamal.akka

import akka.actor.{Props, ActorSystem, Actor}
import akka.actor.Actor.Receive

/**
 * Created by zhuhq on 2016/5/5.
 */
object TestAkka extends App{
  val system = ActorSystem("test")
  val actor = system.actorOf(Props(classOf[TestAkka]))
  for(i <- 0 to 10) {
    actor ! Remove()
    actor ! Add()
  }
}
class TestAkka extends Actor{
  override def receive: Receive = {
    case Remove() =>
      println("remove begin")
      Thread.sleep((1000 * math.ceil(math.random) * 10).toLong)
      println("remove end")
    case Add() =>
      println("add begin")
      Thread.sleep((1000 * math.ceil(math.random) * 10).toLong)
      println("add end")

  }
}
case class Remove()
case class Add() 
Example 7
Source File: ReliableClientWithSubscriptionActorSpec.scala    From reliable-http-client   with Apache License 2.0 5 votes vote down vote up
package rhttpc.client

import akka.actor.{ActorRef, ActorSystem, Props}
import akka.testkit._
import org.scalatest.Matchers
import rhttpc.client.subscription._

import scala.concurrent.ExecutionContext

class ReliableClientWithSubscriptionActorSpec
  extends TestKit(ActorSystem("ReliableClientWithSubscriptionActorSpec"))
  with ReliableClientBaseSpec
  with ImplicitSender
  with Matchers {

  it should "got success reply" in { fixture =>
    val replyMock = TestProbe()
    val actor = system.actorOf(MockSubscriptionActor.props(fixture.client, replyMock.ref))
    actor ! SendRequest
    fixture.transport.publicationPromise.success(Unit)
    expectMsg(Unit)

    fixture.transport.replySubscriptionPromise.success("bar")
    replyMock.expectMsg("bar")
  }

  it should "got subscription aborted" in { fixture =>
    val replyMock = TestProbe()
    val actor = system.actorOf(MockSubscriptionActor.props(fixture.client, replyMock.ref))
    actor ! SendRequest
    fixture.transport.publicationPromise.failure(FailedAcknowledge)
    expectMsgAllClassOf(classOf[RequestAborted])
  }

}

private class MockSubscriptionActor(client: InOutReliableClient[String], replyMock: ActorRef)
                                   (implicit ec: ExecutionContext) extends PublicationListener {
  override def receive: Receive = {
    case SendRequest =>
      client.send("foo") pipeTo this
  }

  override def subscriptionPromiseRegistered(sub: SubscriptionOnResponse): Unit = {
    context.become(waitingOnSubscriptionCommand(sender()))
  }

  private def waitingOnSubscriptionCommand(originalSender: ActorRef): Receive = {
    case RequestPublished(sub) =>
      client.subscriptionManager.confirmOrRegister(sub, self) // FIXME
      originalSender ! Unit
      context.become(waitingOnReply)
    case a: RequestAborted =>
      originalSender ! a
      context.stop(self)
  }

  private def waitingOnReply: Receive = {
    case MessageFromSubscription(reply, sub) =>
      replyMock ! reply
      context.stop(self)
  }
}

object MockSubscriptionActor {
  def props(client: InOutReliableClient[String], replyMock: ActorRef)(implicit ec: ExecutionContext): Props = Props(new MockSubscriptionActor(client, replyMock))
}

case object SendRequest 
Example 8
Source File: HTTPResponseStream.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package akkahttp

import akka.NotUsed
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.common.{EntityStreamingSupport, JsonEntityStreamingSupport}
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Directives.{complete, get, logRequestResult, path, _}
import akka.http.scaladsl.server.Route
import akka.http.scaladsl.unmarshalling.Unmarshal
import akka.stream.ThrottleMode
import akka.stream.scaladsl.{Flow, Sink, Source}
import com.typesafe.config.ConfigFactory
import spray.json.DefaultJsonProtocol

import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util.{Failure, Success}


object HTTPResponseStream extends App with DefaultJsonProtocol with SprayJsonSupport {
  implicit val system = ActorSystem("HTTPResponseStream")
  implicit val executionContext = system.dispatcher

  //JSON Protocol and streaming support
  final case class ExamplePerson(name: String)

  implicit def examplePersonFormat = jsonFormat1(ExamplePerson.apply)

  implicit val jsonStreamingSupport: JsonEntityStreamingSupport = EntityStreamingSupport.json()

  val (address, port) = ("127.0.0.1", 8080)
  server(address, port)
  client(address, port)

  def client(address: String, port: Int): Unit = {
    val requestParallelism = ConfigFactory.load.getInt("akka.http.host-connection-pool.max-connections")

    val requests: Source[HttpRequest, NotUsed] = Source
      .fromIterator(() =>
        Range(0, requestParallelism).map(i => HttpRequest(uri = Uri(s"http://$address:$port/download/$i"))).iterator
      )

    // Run singleRequest and completely consume response elements
    def runRequestDownload(req: HttpRequest) =
      Http()
        .singleRequest(req)
        .flatMap { response =>
          val unmarshalled: Future[Source[ExamplePerson, NotUsed]] = Unmarshal(response).to[Source[ExamplePerson, NotUsed]]
          val source: Source[ExamplePerson, Future[NotUsed]] = Source.futureSource(unmarshalled)
          source.via(processorFlow).runWith(printSink)
        }

    requests
      .mapAsync(requestParallelism)(runRequestDownload)
      .runWith(Sink.ignore)
  }


  val printSink = Sink.foreach[ExamplePerson] { each: ExamplePerson => println(s"Client processed element: $each") }

  val processorFlow: Flow[ExamplePerson, ExamplePerson, NotUsed] = Flow[ExamplePerson].map {
    each: ExamplePerson => {
      //println(s"Process: $each")
      each
    }
  }


  def server(address: String, port: Int): Unit = {

    def routes: Route = logRequestResult("httpecho") {
      path("download" / Segment) { id: String =>
        get {
          println(s"Server received request with id: $id, stream response...")
          extractRequest { r: HttpRequest =>
            val finishedWriting = r.discardEntityBytes().future
            onComplete(finishedWriting) { done =>
              //Limit response by appending eg .take(5)
              val responseStream: Stream[ExamplePerson] = Stream.continually(ExamplePerson(s"request:$id"))
              complete(Source(responseStream).throttle(1, 1.second, 1, ThrottleMode.shaping))
            }
          }
        }
      }
    }

    val bindingFuture = Http().bindAndHandle(routes, address, port)
    bindingFuture.onComplete {
      case Success(b) =>
        println("Server started, listening on: " + b.localAddress)
      case Failure(e) =>
        println(s"Server could not bind to: $address:$port. Exception message: ${e.getMessage}")
        system.terminate()
    }
  }
} 
Example 9
Source File: SampleRoutes.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package akkahttp

import java.io.File

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import org.slf4j.{Logger, LoggerFactory}

import scala.concurrent.Await
import scala.concurrent.duration._
import scala.sys.process.Process
import scala.util.{Failure, Success}


object SampleRoutes extends App {
  val logger: Logger = LoggerFactory.getLogger(this.getClass)
  implicit val system = ActorSystem("SampleRoutes")
  implicit val executionContext = system.dispatcher


  def getFromBrowsableDir: Route = {
    val dirToBrowse = File.separator + "tmp"

    // pathPrefix allows loading dirs and files recursively
    pathPrefix("entries") {
      getFromBrowseableDirectory(dirToBrowse)
    }
  }

  def parseFormData: Route = path("post") {
    formFields('color, 'age.as[Int]) { (color, age) =>
      complete(s"The color is '$color' and the age is $age")
    }
  }

  def routes: Route = {
    getFromBrowsableDir ~ parseFormData
  }

  val bindingFuture = Http().bindAndHandle(routes, "127.0.0.1", 8000)

  bindingFuture.onComplete {
    case Success(b) =>
      println("Server started, listening on: " + b.localAddress)
    case Failure(e) =>
      println(s"Server could not bind to... Exception message: ${e.getMessage}")
      system.terminate()
  }

  def browserClient() = {
    val os = System.getProperty("os.name").toLowerCase
    if (os == "mac os x") Process("open ./src/main/resources/SampleRoutes.html").!
  }

  browserClient()

  sys.addShutdownHook {
    println("About to shutdown...")
    val fut = bindingFuture.map(serverBinding => serverBinding.terminate(hardDeadline = 3.seconds))
    println("Waiting for connections to terminate...")
    val onceAllConnectionsTerminated = Await.result(fut, 10.seconds)
    println("Connections terminated")
    onceAllConnectionsTerminated.flatMap { _ => system.terminate()
    }
  }
} 
Example 10
package sample.stream

import akka.actor.ActorSystem
import akka.stream.Supervision.Decider
import akka.stream._
import akka.stream.scaladsl.{Flow, Sink, Source, SourceQueueWithComplete}
import akka.{Done, NotUsed}
import org.slf4j.{Logger, LoggerFactory}

import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util.{Failure, Success}


object PublishToSourceQueueFromMultipleThreads extends App {
  val logger: Logger = LoggerFactory.getLogger(this.getClass)
  implicit val system = ActorSystem("PublishToSourceQueueFromMultipleThreads")
  implicit val ec = system.dispatcher

  val bufferSize = 100
  // As of akka 2.6.x there is a thread safe implementation for SourceQueue
  val maxConcurrentOffers = 1000
  val numberOfPublishingClients = 1000

  val slowSink: Sink[Seq[Int], NotUsed] =
    Flow[Seq[Int]]
      .delay(2.seconds, DelayOverflowStrategy.backpressure)
      .to(Sink.foreach(e => logger.info(s"Reached sink: $e")))

  val sourceQueue: SourceQueueWithComplete[Int] = Source
    .queue[Int](bufferSize, OverflowStrategy.backpressure, maxConcurrentOffers)
    .groupedWithin(10, 1.seconds)
    .to(slowSink)
    .run

  val doneConsuming: Future[Done] = sourceQueue.watchCompletion()
  signalWhen(doneConsuming, "consuming") //never completes

  simulatePublishingFromMulitpleThreads()

  // Before 2.6.x a stream had to be used to throttle and control the backpressure
  //simulatePublishingClientsFromStream()

  // Decide on the stream level, because the OverflowStrategy.backpressure
  // on the sourceQueue causes an IllegalStateException
  // Handling this on the stream level allows to restart the stream
  private def simulatePublishingClientsFromStream() = {

    val decider: Decider = {
      case _: IllegalStateException => println("Got backpressure signal for offered element, restart..."); Supervision.Restart
      case _ => Supervision.Stop
    }

    val donePublishing: Future[Done] = Source(1 to numberOfPublishingClients)
      .mapAsync(10)(offerToSourceQueue) //throttle
      .withAttributes(ActorAttributes.supervisionStrategy(decider))
      .runWith(Sink.ignore)
    signalWhen(donePublishing, "publishing")
  }

  private def simulatePublishingFromMulitpleThreads() = (1 to numberOfPublishingClients).par.foreach(offerToSourceQueue)

  private def offerToSourceQueue(each: Int) = {
    sourceQueue.offer(each).map {
      case QueueOfferResult.Enqueued => logger.info(s"enqueued $each")
      case QueueOfferResult.Dropped => logger.info(s"dropped $each")
      case QueueOfferResult.Failure(ex) => logger.info(s"Offer failed: $ex")
      case QueueOfferResult.QueueClosed => logger.info("Source Queue closed")
    }
  }

  private def signalWhen(done: Future[Done], operation: String) = {
    done.onComplete {
      case Success(b) =>
        logger.info(s"Finished: $operation")
      case Failure(e) =>
        logger.info(s"Failure: $e About to terminate...")
        system.terminate()
    }
  }
} 
Example 11
Source File: CalculateMedian.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package sample.stream

import java.util.concurrent.ThreadLocalRandom

import akka.actor.ActorSystem
import akka.stream.ThrottleMode
import akka.stream.scaladsl.Source

import scala.annotation.tailrec
import scala.concurrent.duration._


//noinspection LanguageFeature
object CalculateMedian {
  implicit val system = ActorSystem("CalculateMedian")
  implicit val ec = system.dispatcher

  def main(args: Array[String]) = {
    val maxRandomNumber = 100
    val source = Source.fromIterator(() => Iterator.continually(ThreadLocalRandom.current().nextDouble(maxRandomNumber)))

    source
      .throttle(1, 10.millis, 1, ThrottleMode.shaping)
      .groupedWithin(100, 1.second)
      //.map{each => println(each); each}
      .map(each => medianOfMedians(each.toArray))
      .runForeach(result => println(s"Median of Median (grouped by 5) over the last 100 elements: $result"))
      .onComplete(_ => system.terminate())
  }

  @tailrec def findKMedian(arr: Array[Double], k: Int)(implicit choosePivot: Array[Double] => Double): Double = {
    val a = choosePivot(arr)
    val (s, b) = arr partition (a >)
    if (s.length == k) a
    // The following test is used to avoid infinite repetition
    else if (s.isEmpty) {
      val (s, b) = arr partition (a ==)
      if (s.length > k) a
      else findKMedian(b, k - s.length)
    } else if (s.length < k) findKMedian(b, k - s.length)
    else findKMedian(s, k)
  }

  def medianUpTo5(five: Array[Double]): Double = {
    def order2(a: Array[Double], i: Int, j: Int) = {
      if (a(i) > a(j)) {
        val t = a(i); a(i) = a(j); a(j) = t
      }
    }

    def pairs(a: Array[Double], i: Int, j: Int, k: Int, l: Int) = {
      if (a(i) < a(k)) {
        order2(a, j, k); a(j)
      }
      else {
        order2(a, i, l); a(i)
      }
    }

    if (five.length < 2) {
      return five(0)
    }
    order2(five, 0, 1)
    if (five.length < 4) return if (five.length == 2 || five(2) < five(0)) five(0)
    else if (five(2) > five(1)) five(1)
    else five(2)
    order2(five, 2, 3)
    if (five.length < 5) pairs(five, 0, 1, 2, 3)
    else if (five(0) < five(2)) {
      order2(five, 1, 4); pairs(five, 1, 4, 2, 3)
    }
    else {
      order2(five, 3, 4); pairs(five, 0, 1, 3, 4)
    }
  }

  def medianOfMedians(arr: Array[Double]): Double = {
    val medians = arr grouped 5 map medianUpTo5 toArray;
    if (medians.length <= 5) medianUpTo5(medians)
    else medianOfMedians(medians)
  }
} 
Example 12
Source File: PublishToBlockingResource.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package sample.stream

import java.util.concurrent.{ArrayBlockingQueue, BlockingQueue}

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.DelayOverflowStrategy
import akka.stream.scaladsl.{Flow, Sink, Source}

import scala.concurrent.duration._
import scala.util.Failure



object PublishToBlockingResource extends App {
  implicit val system = ActorSystem("PublishToBlockingResource")
  implicit val ec = system.dispatcher

  val slowSink: Sink[Seq[Int], NotUsed] =
    Flow[Seq[Int]]
      .delay(1.seconds, DelayOverflowStrategy.backpressure)
      .to(Sink.foreach(e => println(s"Reached sink: $e")))

  val blockingResource: BlockingQueue[Int] = new ArrayBlockingQueue[Int](100)

  //Start a new `Source` from some (third party) blocking resource which can be opened, read and closed
  val source: Source[Int, NotUsed] =
    Source.unfoldResource[Int, BlockingQueue[Int]](
      () => blockingResource,                   //open
      (q: BlockingQueue[Int]) => Some(q.take()),//read
      (_: BlockingQueue[Int]) => {})            //close

  val done = source
    .groupedWithin(10, 2.seconds)
    .watchTermination()((_, done) => done.onComplete {
      case Failure(err) =>
        println(s"Flow failed: $err")
      case each => println(s"Server flow terminated: $each")
    })
    .runWith(slowSink)

  //simulate n process that publish in blocking fashion to the queue
  (1 to 1000).par.foreach(value => blockingResource.put(value))
} 
Example 13
Source File: TweetExample.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package sample.stream

import java.time.{Instant, ZoneId}

import akka.NotUsed
import akka.actor.{ActorSystem, Cancellable}
import akka.stream.DelayOverflowStrategy
import akka.stream.scaladsl.{Flow, MergePrioritized, Sink, Source}
import org.apache.commons.lang3.exception.ExceptionUtils
import org.slf4j.{Logger, LoggerFactory}

import scala.concurrent.duration._
import scala.util.{Failure, Success}



object TweetExample extends App {
  implicit val system = ActorSystem("TweetExample")
  implicit val ec = system.dispatcher
  val logger: Logger = LoggerFactory.getLogger(this.getClass)

  final case class Author(handle: String)

  final case class Hashtag(name: String)

  final case class Tweet(author: Author, timestamp: Long, body: String) {
    def hashtags: Set[Hashtag] =
      body.split(" ").collect { case t if t.startsWith("#") => Hashtag(t) }.toSet

    override def toString = {
      val localDateTime = Instant.ofEpochMilli(timestamp).atZone(ZoneId.systemDefault()).toLocalDateTime
      s"$localDateTime - ${author.handle} tweeted: ${body.take(5)}..."
    }
  }

  val akkaTag = Hashtag("#akka")

  val tweetsLowPrio: Source[Tweet, Cancellable] = Source.tick(1.second, 200.millis, NotUsed).map(_ => Tweet(Author("LowPrio"), System.currentTimeMillis, "#other #akka aBody"))
  val tweetsHighPrio: Source[Tweet, Cancellable] = Source.tick(2.second, 1.second, NotUsed).map(_ => Tweet(Author("HighPrio"), System.currentTimeMillis, "#akka #other aBody"))
  val tweetsVeryHighPrio: Source[Tweet, Cancellable] = Source.tick(2.second, 1.second, NotUsed).map(_ => Tweet(Author("VeryHighPrio"), System.currentTimeMillis, "#akka #other aBody"))

  val limitedTweets: Source[Tweet, NotUsed] = Source.combine(tweetsLowPrio, tweetsHighPrio, tweetsVeryHighPrio)(_ => MergePrioritized(List(1, 10, 100))).take(20)

  val processingFlow = Flow[Tweet]
    .filter(_.hashtags.contains(akkaTag))
    .wireTap(each => logger.info(s"$each"))

  val slowDownstream  =
    Flow[Tweet]
      .delay(5.seconds, DelayOverflowStrategy.backpressure)

  val processedTweets =
    limitedTweets
      .via(processingFlow)
      .via(slowDownstream)
      .runWith(Sink.seq)

  processedTweets.onComplete {
    case Success(results) =>
      logger.info(s"Successfully processed: ${results.size} tweets")
      system.terminate()
    case Failure(exception) =>
      logger.info(s"The stream failed with: ${ExceptionUtils.getRootCause(exception)}")
      system.terminate()
  }
} 
Example 14
Source File: MergeHubWithDynamicSources.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package sample.stream

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.DelayOverflowStrategy
import akka.stream.scaladsl.{Flow, MergeHub, RunnableGraph, Sink, Source}

import scala.concurrent.duration._


object MergeHubWithDynamicSources {
  implicit val system = ActorSystem("MergeHubWithDynamicSources")
  implicit val ec = system.dispatcher

  def main(args: Array[String]): Unit = {

    val slowSink: Sink[Seq[String], NotUsed] =
      Flow[Seq[String]]
        .delay(1.seconds, DelayOverflowStrategy.backpressure)
        .to(Sink.foreach(e => println(s"Reached Sink: $e")))

    // Attach a MergeHub Source to the consumer. This will materialize to a corresponding Sink
    val runnableGraph: RunnableGraph[Sink[String, NotUsed]] =
      MergeHub.source[String](perProducerBufferSize = 16)
        .groupedWithin(10, 2.seconds)
        .to(slowSink)

    // By running/materializing the graph we get back a Sink, and hence now have access to feed elements into it
    // This Sink can then be materialized any number of times, and every element that enters the Sink will be consumed by our consumer
    val toConsumer: Sink[String, NotUsed] = runnableGraph.run()

    def fastSource(sourceId: Int, toConsumer: Sink[String, NotUsed]) = {
      Source(1 to 10)
        .map{each => println(s"Produced: $sourceId.$each"); s"$sourceId.$each"}
        .runWith(toConsumer)
    }

    // Add dynamic producer sources. If the consumer cannot keep up, then ALL of the producers are backpressured
    (1 to 10).par.foreach(each => fastSource(each, toConsumer))
  }
} 
Example 15
Source File: PartitionHubWithDynamicSinks.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package sample.stream

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.ThrottleMode
import akka.stream.scaladsl.{Keep, PartitionHub, RunnableGraph, Source}

import scala.concurrent.duration._


object PartitionHubWithDynamicSinks {
  implicit val system = ActorSystem()
  implicit val ec = system.dispatcher

  def main(args: Array[String]): Unit = {

    val producer = Source.tick(1.second, 100.millis, "message").zipWith(Source(1 to 100))((a, b) => s"$a-$b")

    // A new instance of the partitioner functions and its state is created for each materialization of the PartitionHub
    def partitionRoundRobin(): (PartitionHub.ConsumerInfo, String) => Long = {
      var i = -1L

      (info, elem) => {
        i += 1
        info.consumerIdByIdx((i % info.size).toInt)
      }
    }

    def partitionToFastestConsumer(): (PartitionHub.ConsumerInfo, String) => Long = {
      (info: PartitionHub.ConsumerInfo, each:String) => info.consumerIds.minBy(id => info.queueSize(id))
    }

    // Attach a PartitionHub Sink to the producer. This will materialize to a corresponding Source
    // We need to use toMat and Keep.right since by default the materialized value to the left is used
    val runnableGraph: RunnableGraph[Source[String, NotUsed]] =
    producer.toMat(PartitionHub.statefulSink(
      //Switch the partitioning function
      () => partitionRoundRobin(),
      //() => partitionToFastestConsumer(),
      startAfterNrOfConsumers = 1, bufferSize = 1))(Keep.right)

    // By running/materializing the producer, we get back a Source, which
    // gives us access to the elements published by the producer.
    val fromProducer: Source[String, NotUsed] = runnableGraph.run()

    // Attach three dynamic fan-out sinks to the PartitionHub
    fromProducer.runForeach(msg => println("fast consumer1 received: " + msg))
    fromProducer.throttle(100, 1.millis, 10, ThrottleMode.Shaping)
      .runForeach(msg => println("slow consumer2 received: " + msg))
    fromProducer.throttle(100, 2.millis, 10, ThrottleMode.Shaping)
      .runForeach(msg => println("really slow consumer3 received: " + msg))
  }
} 
Example 16
Source File: AsyncExecution.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package sample.stream

import akka.Done
import akka.actor.ActorSystem
import akka.stream.ActorAttributes
import akka.stream.scaladsl.{Flow, Sink, Source}
import org.slf4j.{Logger, LoggerFactory}

import scala.concurrent.Future
import scala.util.{Failure, Success}


object AsyncExecution extends App {
  val logger: Logger = LoggerFactory.getLogger(this.getClass)
  implicit val system = ActorSystem("AsyncExecution")
  implicit val ec = system.dispatcher

  def stage(name: String) =
    Flow[Int]
      .wireTap(index => logger.info(s"Stage $name processing element $index by ${Thread.currentThread().getName}"))

  def stageBlocking(name: String) =
    Flow[Int]
      .wireTap(index => logger.info(s"Stage $name processing element $index by ${Thread.currentThread().getName}"))
      .wireTap(_ => Thread.sleep(5000))
      .withAttributes(ActorAttributes.dispatcher("custom-dispatcher-for-blocking"))

  def sinkBlocking: Sink[Int, Future[Done]] =
    Sink.foreach { index: Int =>
      Thread.sleep(2000)
      logger.info(s"Slow sink processing element $index by ${Thread.currentThread().getName}")
     }
      //Adding a custom dispatcher creates an async boundary
      //see discussion in: https://discuss.lightbend.com/t/how-can-i-make-sure-that-fileio-frompath-is-picking-up-my-dispatcher/6528/4
      .withAttributes(ActorAttributes.dispatcher("custom-dispatcher-for-blocking"))


  val done = Source(1 to 10)
    .via(stage("A")).async
    //When activated instead of alsoTo(sinkBlocking): elements for stage C are held up by stage B
    //.via(stageBlocking("B")).async
    .alsoTo(sinkBlocking).async
    .via(stage("C")).async
    .runWith(Sink.ignore)

  //With alsoTo(sinkBlocking) the stages A and C signal "done" too early and thus would terminate the whole stream
  //The reason for this is the custom dispatcher in sinkBlocking
  //terminateWhen(done)

  def terminateWhen(done: Future[_]) = {
    done.onComplete {
      case Success(_) =>
        println("Flow Success. About to terminate...")
        system.terminate()
      case Failure(e) =>
        println(s"Flow Failure: $e. About to terminate...")
        system.terminate()
    }
  }
} 
Example 17
Source File: BasicTransformation.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package sample.stream

import akka.actor.ActorSystem
import akka.stream.scaladsl.{Flow, Sink, Source}


object BasicTransformation {
  implicit val system = ActorSystem("BasicTransformation")
  import system.dispatcher

  def main(args: Array[String]): Unit = {
    val text =
      """|Lorem Ipsum is simply dummy text of the printing and typesetting industry.
         |Lorem Ipsum has been the industry's standard dummy text ever since the 1500s,
         |when an unknown printer took a galley of type and scrambled it to make a type
         |specimen book.""".stripMargin

    val source = Source.fromIterator(() => text.split("\\s").iterator)
    val sink = Sink.foreach[String](println)
    val flow = Flow[String].map(x => x.toUpperCase)
    val result = source.via(flow).runWith(sink)
    result.onComplete(_ => system.terminate())
  }
} 
Example 18
package sample.stream

import java.time.{Instant, ZoneId, ZonedDateTime}

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.scaladsl._
import akka.stream.{DelayOverflowStrategy, ThrottleMode}

import scala.concurrent.duration._
import scala.util.Failure

case class SourceEvent(id: Integer)

case class DomainEvent(id: Integer, timeDate: ZonedDateTime)



object SlowConsumerDropsElementsOnFastProducer extends App {
  implicit val system = ActorSystem("SlowConsumerDropsElementsOnFastProducer")
  implicit val ec = system.dispatcher

  val fastSource: Source[SourceEvent, NotUsed] =
    Source(1 to 500)
      .throttle(10, 1.second, 1, ThrottleMode.shaping)
      .map { i =>
        println(s"Producing event: $i")
        SourceEvent(i)
      }

  val droppyStream: Flow[SourceEvent, SourceEvent, NotUsed] =
  //Conflate is "rate aware", it combines/aggregates elements from upstream while downstream backpressures
  //The reducer function here takes the freshest element. This in a simple dropping operation.
    Flow[SourceEvent]
      .conflate((lastEvent, newEvent) => newEvent)

  val enrichWithTimestamp: Flow[SourceEvent, DomainEvent, NotUsed] =
    Flow[SourceEvent]
      .map { e =>
        val instant = Instant.ofEpochMilli(System.currentTimeMillis())
        val zonedDateTimeUTC: ZonedDateTime = ZonedDateTime.ofInstant(instant, ZoneId.of("UTC"))
        DomainEvent(e.id, zonedDateTimeUTC)
      }

  val terminationHook: Flow[DomainEvent, DomainEvent, Unit] = Flow[DomainEvent]
    .watchTermination() { (_, done) =>
      done.onComplete {
        case Failure(err) => println(s"Flow failed: $err")
        case _ => system.terminate(); println(s"Flow terminated")
      }
    }

  val slowSink: Sink[DomainEvent, NotUsed] =
    Flow[DomainEvent]
      //.buffer(100, OverflowStrategy.backpressure)
      .delay(10.seconds, DelayOverflowStrategy.backpressure)
      .to(Sink.foreach(e => println(s"Reached Sink: $e")))

  fastSource
    .via(droppyStream)
    .via(enrichWithTimestamp)
    .via(terminationHook)
    .runWith(slowSink)
} 
Example 19
Source File: TcpEcho.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package sample.stream

import akka.actor.ActorSystem
import akka.stream.scaladsl.{Flow, Framing, Keep, Sink, Source, Tcp}
import akka.util.ByteString

import scala.concurrent.Future
import scala.util.{Failure, Success}


object TcpEcho extends App {
  val systemServer = ActorSystem("TcpEchoServer")
  val systemClient = ActorSystem("TcpEchoClient")

  var serverBinding: Future[Tcp.ServerBinding] = _

    if (args.isEmpty) {
      val (address, port) = ("127.0.0.1", 6000)
      serverBinding = server(systemServer, address, port)
      (1 to 10).par.foreach(each => client(each, systemClient, address, port))
    } else {
      val (address, port) =
        if (args.length == 3) (args(1), args(2).toInt)
        else ("127.0.0.1", 6000)
      if (args(0) == "server") {
        serverBinding = server(systemServer, address, port)
      } else if (args(0) == "client") {
        client(1, systemClient, address, port)
      }
    }

  def server(system: ActorSystem, address: String, port: Int): Future[Tcp.ServerBinding] = {
    implicit val sys = system
    implicit val ec = system.dispatcher

    val handler = Sink.foreach[Tcp.IncomingConnection] { connection =>

      // parse incoming commands and append !
      val commandParser = Flow[String].takeWhile(_ != "BYE").map(_ + "!")

      val welcomeMsg = s"Welcome to: ${connection.localAddress}, you are: ${connection.remoteAddress}!"
      val welcomeSource = Source.single(welcomeMsg)

      val serverEchoFlow = Flow[ByteString]
        .via(Framing.delimiter( //chunk the inputs up into actual lines of text
          ByteString("\n"),
          maximumFrameLength = 256,
          allowTruncation = true))
        .map(_.utf8String)
        .via(commandParser)
        .merge(welcomeSource) // merge the initial banner after parser
        .map(_ + "\n")
        .map(ByteString(_))
        .watchTermination()((_, done) => done.onComplete {
        case Failure(err) =>
          println(s"Server flow failed: $err")
        case _ => println(s"Server flow terminated for client: ${connection.remoteAddress}")
      })
      connection.handleWith(serverEchoFlow)
    }
    
    val connections = Tcp().bind(interface = address, port = port)
    val binding = connections.watchTermination()(Keep.left).to(handler).run()

    binding.onComplete {
      case Success(b) =>
        println("Server started, listening on: " + b.localAddress)
      case Failure(e) =>
        println(s"Server could not bind to: $address:$port: ${e.getMessage}")
        system.terminate()
    }

    binding
  }

  def client(id: Int, system: ActorSystem, address: String, port: Int): Unit = {
    implicit val sys = system
    implicit val ec = system.dispatcher

    val connection: Flow[ByteString, ByteString, Future[Tcp.OutgoingConnection]] = Tcp().outgoingConnection(address, port)
    val testInput = ('a' to 'z').map(ByteString(_)) ++ Seq(ByteString("BYE"))
    val source =  Source(testInput).via(connection)
    val closed = source.runForeach(each => println(s"Client: $id received echo: ${each.utf8String}"))
    closed.onComplete(each => println(s"Client: $id closed: $each"))
  }
} 
Example 20
Source File: WaitForThreeFlowsToComplete.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package sample.stream

import java.nio.file.Paths

import akka.actor.ActorSystem
import akka.stream._
import akka.stream.scaladsl._
import akka.util.ByteString
import org.slf4j.{Logger, LoggerFactory}

import scala.concurrent._
import scala.concurrent.duration._


object WaitForThreeFlowsToComplete extends App {
  val logger: Logger = LoggerFactory.getLogger(this.getClass)
  implicit val system = ActorSystem("WaitForThreeFlowsToComplete")
  implicit val ec = system.dispatcher

  def lineSink(filename: String): Sink[String, Future[IOResult]] =
    Flow[String]
      .map(s => ByteString(s + "\n"))
      .wireTap(_ => logger.info(s"Add line to file: $filename"))
      .toMat(FileIO.toPath(Paths.get(filename)))(Keep.right) //retain to the Future[IOResult]
      .withAttributes(ActorAttributes.dispatcher("custom-dispatcher-for-blocking"))

  val origSource = Source(1 to 10)
  //scan (= transform) the source
  val factorialsSource = origSource.scan(BigInt(1))((acc, next) => acc * next)

  val fastFlow = origSource.runForeach(i => logger.info(s"Reached sink: $i"))

  val slowFlow1 = factorialsSource
    .map(_.toString)
    .runWith(lineSink("factorial1.txt"))

  val slowFlow2 = factorialsSource
    .zipWith(Source(0 to 10))((num, idx) => s"$idx! = $num")
    .throttle(1, 1.second, 1, ThrottleMode.shaping)
    .runWith(lineSink("factorial2.txt"))

  val allDone = for {
    fastFlowDone <- fastFlow
    slowFlow1Done <- slowFlow1
    slowFlow2Done <- slowFlow2
  } yield (fastFlowDone, slowFlow1Done, slowFlow2Done)

  allDone.onComplete { results =>
    logger.info(s"Resulting futures from flows: $results - about to terminate")
    system.terminate()
  }
} 
Example 21
Source File: AlsoTo.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package sample.stream_divert

import akka.actor.ActorSystem
import akka.event.Logging
import akka.stream.Attributes
import akka.stream.scaladsl.{Flow, Sink, Source}



object AlsoTo extends App {
  implicit val system = ActorSystem("AlsoTo")
  implicit val executionContext = system.dispatcher
  implicit val adapter = Logging(system, this.getClass)

  val source = Source(1 to 10)

  val sink = Sink.foreach { x: Int => adapter.log(Logging.InfoLevel, s" --> Element: $x reached sink") }

  def sinkBlocking = Sink.foreach { x: Int =>
    Thread.sleep(1000)
    adapter.log(Logging.InfoLevel, s" --> Element: $x logged in alsoTo sinkBlocking by ${Thread.currentThread().getName}")
  }

  val flow = Flow[Int]
    .log("before alsoTo")
    .alsoTo(sinkBlocking)
    .log("after alsoTo")
    .withAttributes(
      Attributes.logLevels(
        onElement = Logging.InfoLevel,
        onFinish = Logging.InfoLevel,
        onFailure = Logging.DebugLevel
      ))

  val done = source.via(flow).runWith(sink)
  done.onComplete(_ => system.terminate())
} 
Example 22
Source File: DivertTo.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package sample.stream_divert

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.scaladsl.{Flow, Sink, Source}

import scala.concurrent.Future
import scala.util.{Failure, Success}


object DivertTo extends App {
  implicit val system = ActorSystem("DivertTo")
  implicit val executionContext = system.dispatcher

  val source = Source(1 to 10)

  val sink = Sink.foreach[Either[Valid[Int], Invalid[Int]]](each => println(s"Reached sink: ${each.left.get}"))

  val errorSink = Flow[Invalid[Int]]
    .map(each => println(s"Reached errorSink: $each"))
    .to(Sink.ignore)

  val flow: Flow[Int, Either[Valid[Int], Invalid[Int]], NotUsed] = Flow[Int]
    .map { x =>
      if (x % 2 == 0) Left(Valid(x))
      else Right(Invalid(x, Some(new Exception("Is odd"))))
    }
    .map {
      //Drawback of this approach: Pattern matching on all downstream operations
      case left@Left(_) => businessLogicOn(left)
      case right@Right(_) => right
    }
    .map {
      case left@Left(_) => left
      case right@Right(_) => right
    }
    //Divert invalid elements
    //contramap: apply "right.get" to each incoming upstream element *before* it is passed to the errorSink
    .divertTo(errorSink.contramap(_.right.get), _.isRight)

  private def businessLogicOn(left: Left[Valid[Int], Invalid[Int]]) = {
    if (left.value.payload > 5) left
    else Right(Invalid(left.value.payload, Some(new Exception("Is smaller than 5"))))
  }

  val done = source.via(flow).runWith(sink)
  terminateWhen(done)


  def terminateWhen(done: Future[_]) = {
    done.onComplete {
      case Success(_) =>
        println("Flow Success. About to terminate...")
        system.terminate()
      case Failure(e) =>
        println(s"Flow Failure: $e. About to terminate...")
        system.terminate()
    }
  }
}

case class Valid[T](payload: T)

case class Invalid[T](payload: T, cause: Option[Throwable]) 
Example 23
Source File: SplitAfter.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package sample.stream_shared_state

import java.time.{Instant, LocalDateTime, ZoneOffset}

import akka.Done
import akka.actor.ActorSystem
import akka.stream.scaladsl.{Sink, Source}
import org.slf4j.{Logger, LoggerFactory}

import scala.collection.immutable._
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util.{Failure, Success}


object SplitAfter extends App {
  val logger: Logger = LoggerFactory.getLogger(this.getClass)
  implicit val system = ActorSystem("SplitAfter")
  implicit val executionContext = system.dispatcher

  private def hasSecondChanged: () => Seq[(Int, Instant)] => Iterable[(Instant, Boolean)] = {
    () => {
      slidingElements => {
        if (slidingElements.size == 2) {
          val current = slidingElements.head
          val next = slidingElements.tail.head
          val currentBucket = LocalDateTime.ofInstant(current._2, ZoneOffset.UTC).withNano(0)
          val nextBucket = LocalDateTime.ofInstant(next._2, ZoneOffset.UTC).withNano(0)
          List((current._2, currentBucket != nextBucket))
        } else {
          val current = slidingElements.head
          List((current._2, false))
        }
      }
    }
  }

  val done: Future[Done] = Source(1 to 100)
    .throttle(1, 100.millis)
    .map(elem => (elem, Instant.now()))
    .sliding(2)                           // allows to compare this element with the next element
    .statefulMapConcat(hasSecondChanged)  // stateful decision
    .splitAfter(_._2)                     // split when second has changed
    .map(_._1)                            // proceed with payload
    .fold(0)((acc, _) => acc + 1)   // sum
    .mergeSubstreams
    .runWith(Sink.foreach(each => println(s"Elements in group: $each")))

  terminateWhen(done)


  def terminateWhen(done: Future[_]) = {
    done.onComplete {
      case Success(_) =>
        println("Flow Success. About to terminate...")
        system.terminate()
      case Failure(e) =>
        println(s"Flow Failure: $e. About to terminate...")
        system.terminate()
    }
  }
} 
Example 24
Source File: ParametrizedFlow.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package sample.stream_shared_state

import akka.Done
import akka.actor.{ActorSystem, Cancellable}
import akka.stream.scaladsl.{Flow, GraphDSL, Keep, Sink, Source, SourceQueueWithComplete, Zip}
import akka.stream.{FlowShape, OverflowStrategy}

import scala.collection.immutable
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util.{Failure, Success}



object ParametrizedFlow extends App {
  val service = ParameterizedFlowService

  Thread.sleep(5000)
  service.update(1.0)

  Thread.sleep(2000)
  service.update(1.5)
  Thread.sleep(2000)
  service.cancel()
  Thread.sleep(2000)

  println(service.result())
}

object ParameterizedFlowService {
  implicit val system = ActorSystem("ParameterizedFlowService")
  implicit val executionContext = system.dispatcher

  def update(element: Double): Unit = flow._1._2.offer(element)

  def cancel(): Boolean = flow._1._1.cancel()

  def result(): Future[Seq[Double]] = flow._2

  val fun = (flowValue: Int, paramValue: Double) => flowValue * paramValue
  val flow: ((Cancellable, SourceQueueWithComplete[Double]), Future[immutable.Seq[Double]]) =
    Source.tick(0.seconds, 500.millis, 10)
      .viaMat(createParamFlow(1, OverflowStrategy.dropBuffer, 0.5)(fun))(Keep.both)
      .wireTap(x => println(x))
      .toMat(Sink.seq)(Keep.both)
      .run()

  val done: Future[Done] = flow._1._2.watchCompletion()
  terminateWhen(done)

  private def createParamFlow[A, P, O](bufferSize: Int, overflowStrategy: OverflowStrategy, initialParam: P)(fun: (A, P) => O) =
    Flow.fromGraph(GraphDSL.create(Source.queue[P](bufferSize, overflowStrategy)) { implicit builder =>
      queue =>
        import GraphDSL.Implicits._
        val zip = builder.add(Zip[A, P]())
        //Interesting use of the extrapolate operator
        //based on https://doc.akka.io/docs/akka/current/stream/stream-rate.html#understanding-extrapolate-and-expand
        val extra = builder.add(Flow[P].extrapolate(Iterator.continually(_), Some(initialParam)))
        val map = builder.add(Flow[(A, P)].map(r => fun(r._1, r._2)))

        queue ~> extra ~> zip.in1
        zip.out ~> map
        FlowShape(zip.in0, map.out)
    })

  private def terminateWhen(done: Future[_]) = {
    done.onComplete {
      case Success(_) =>
        println("Flow Success. About to terminate...")
        system.terminate()
      case Failure(e) =>
        println(s"Flow Failure: $e. About to terminate...")
        system.terminate()
    }
  }
} 
Example 25
Source File: Blacklist.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package sample.stream_shared_state

import akka.actor.ActorSystem
import akka.stream._
import akka.stream.scaladsl.{Keep, Sink, Source}
import akka.stream.stage._

import scala.concurrent.duration._



object Blacklist extends App {
  implicit val system = ActorSystem("Blacklist")

  val initBlacklist = Set.empty[String]

  val service: StateService[Set[String]] =
    Source.repeat("yes")
      .throttle(1, 1.second, 10, ThrottleMode.shaping)
      .viaMat(new ZipWithState(initBlacklist))(Keep.right)
      .filterNot { case (blacklist: Set[String], elem: String) => blacklist(elem) }
      .to(Sink.foreach(each => println(each._2)))
      .run()

  println("Starting with empty blacklist on a list of 'yes' elements -> elements are passing")

  Thread.sleep(2000)
  println("Inject new blacklist with value: 'yes' -> elements are filtered")
  service.update(Set("yes"))

  Thread.sleep(5000)
  println("Inject new blacklist with value: 'no' -> elements are passing again")
  service.update(Set("no"))
}


trait StateService[A] {
  def update(state: A): Unit
}

class StateServiceCallback[A](callback: AsyncCallback[A]) extends StateService[A] {
  override def update(state: A): Unit = callback.invoke(state)
}

class ZipWithState[S, I](initState: S) extends GraphStageWithMaterializedValue[FlowShape[I, (S, I)], StateService[S]] {
  val in = Inlet[I]("ZipWithState.in")
  val out = Outlet[(S, I)]("ZipWithState.out")

  override val shape: FlowShape[I, (S, I)] = FlowShape.of(in, out)

  override def createLogicAndMaterializedValue(inheritedAttributes: Attributes): (GraphStageLogic, StateService[S]) = {
    val logic = new GraphStageLogic(shape) {
      private[this] var state: S = initState
      val updateStateCallback: AsyncCallback[S] =
        getAsyncCallback[S] {
          state = _
        }

      setHandler(in, new InHandler {
        override def onPush(): Unit = {
          push(out, (state, grab(in)))
        }
      })

      setHandler(out, new OutHandler {
        override def onPull(): Unit = {
          pull(in)
        }
      })
    }

    (logic, new StateServiceCallback(logic.updateStateCallback))
  }
} 
Example 26
Source File: ConflateWithSeed.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package sample.stream_shared_state

import akka.actor.ActorSystem
import akka.stream.scaladsl.{Flow, Source}
import org.slf4j.{Logger, LoggerFactory}

import scala.collection._
import scala.concurrent.duration._
import scala.util.Random


object ConflateWithSeed extends App {
  val logger: Logger = LoggerFactory.getLogger(this.getClass)
  implicit val system = ActorSystem("ConflateWithSeed")
  implicit val executionContext = system.dispatcher

  def seed(i: Int): mutable.LinkedHashMap[Int, Int] = mutable.LinkedHashMap[Int, Int](i -> 1)

  def aggregate(state: mutable.LinkedHashMap[Int, Int], i: Int): mutable.LinkedHashMap[Int, Int] = {
    logger.info(s"Got: $i")
    state.put(i, state.getOrElseUpdate(i, 0) + 1)
    state
  }

  // lazyFlow is not really needed here, but nice to know that it exists
  // conflateWithSeed invokes the seed method every time, so it
  // is safe to materialize this flow multiple times
  val lazyFlow = Flow.lazyFlow(() =>
    Flow[Int]
    .map(_ => Random.nextInt(100))
    .conflateWithSeed(seed)(aggregate)

  )
  Source(1 to 10)
    .via(lazyFlow)
    .throttle(1, 1.second) //simulate slow sink
    .runForeach(each => logger.info(s"1st reached sink: $each"))

//  Source(1 to 10)
//    .via(lazyFlow)
//    .throttle(1, 1.second) //simulate slow sink
//    .runForeach(each => logger.info(s"2nd reached sink: $each"))
} 
Example 27
Source File: SplitWhen.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package sample.stream_shared_state

import java.nio.file.Paths

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.IOResult
import akka.stream.scaladsl.{FileIO, Flow, Framing, Keep, Sink, Source}
import akka.util.ByteString
import org.slf4j.{Logger, LoggerFactory}

import scala.concurrent.Future
import scala.util.{Failure, Success}


object SplitWhen extends App {
  val logger: Logger = LoggerFactory.getLogger(this.getClass)
  implicit val system = ActorSystem("SplitWhen")
  implicit val executionContext = system.dispatcher

  val nonLinearCapacityFactor = 100 //raise to see how it scales
  val filename = "splitWhen.csv"

  def genResourceFile() = {
    logger.info(s"Writing resource file: $filename...")

    def fileSink(filename: String): Sink[String, Future[IOResult]] =
      Flow[String]
        .map(s => ByteString(s + "\n"))
        .toMat(FileIO.toPath(Paths.get(filename)))(Keep.right)

    Source.fromIterator(() => (1 to nonLinearCapacityFactor).toList.combinations(2))
      .map(each => s"${each.head},${each.last}")
      .runWith(fileSink(filename))
  }

  val sourceOfLines = FileIO.fromPath(Paths.get(filename))
    .via(Framing.delimiter(ByteString("\n"), maximumFrameLength = 1024, allowTruncation = true)
      .map(_.utf8String))

  val csvToRecord: Flow[String, Record, NotUsed] = Flow[String]
    .map(_.split(",").map(_.trim))
    .map(stringArrayToRecord)

  val terminationHook: Flow[Record, Record, Unit] = Flow[Record]
    .watchTermination() { (_, done) =>
      done.onComplete {
        case Failure(err) => logger.info(s"Flow failed: $err")
        case _ => system.terminate(); logger.info(s"Flow terminated")
      }
    }

  val printSink = Sink.foreach[Vector[Record]](each => println(s"Reached sink: $each"))

  private def stringArrayToRecord(cols: Array[String]) = Record(cols(0), cols(1))

  private def hasKeyChanged = {
    () => {
      var lastRecordKey: Option[String] = None
      currentRecord: Record =>
        lastRecordKey match {
          case Some(currentRecord.key) | None =>
            lastRecordKey = Some(currentRecord.key)
            List((currentRecord, false))
          case _ =>
            lastRecordKey = Some(currentRecord.key)
            List((currentRecord, true))
        }
    }
  }

  genResourceFile().onComplete {
    case Success(_) =>
      logger.info(s"Start processing...")
      sourceOfLines
        .via(csvToRecord)
        .via(terminationHook)
        .statefulMapConcat(hasKeyChanged)   // stateful decision
        .splitWhen(_._2)                    // split when key has changed
        .map(_._1)                          // proceed with payload
        .fold(Vector.empty[Record])(_ :+ _) // sum payload
        .mergeSubstreams                    // better performance, but why?
        .runWith(printSink)
    case Failure(exception) => logger.info(s"Exception: $exception")
  }

  case class Record(key: String, value: String)
} 
Example 28
Source File: SimulateWindTurbines.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package sample.stream_actor

import akka.actor.ActorSystem
import akka.pattern.{BackoffOpts, BackoffSupervisor}
import akka.stream.ThrottleMode
import akka.stream.scaladsl.{Sink, Source}

import scala.concurrent.duration._


object SimulateWindTurbines extends App {
  implicit val system = ActorSystem()

  val endpoint = "ws://127.0.0.1:8080"
  val numberOfTurbines = 5
  Source(1 to numberOfTurbines)
    .throttle(
      elements = 100, //number of elements to be taken from bucket
      per = 1.second,
      maximumBurst = 100, //capacity of bucket
      mode = ThrottleMode.shaping
    )
    .map { _ =>
      val id = java.util.UUID.randomUUID.toString

      val supervisor = BackoffSupervisor.props(
        BackoffOpts.onFailure(
          WindTurbineSimulator.props(id, endpoint),
          childName = id,
          minBackoff = 1.second,
          maxBackoff = 30.seconds,
          randomFactor = 0.2
        ))

      system.actorOf(supervisor, name = s"$id-backoff-supervisor")
    }
    .runWith(Sink.ignore)
} 
Example 29
Source File: WebSocketClient.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package sample.stream_actor

import akka.actor.{ActorRef, ActorSystem}
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.model.ws._
import akka.stream.scaladsl.{Flow, GraphDSL, Keep, Sink, Source}
import akka.stream.{FlowShape, SourceShape}
import sample.stream_actor.WindTurbineSimulator._

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success}

object WebSocketClient {
  def apply(id: String, endpoint: String, windTurbineSimulator: ActorRef)
           (implicit
            system: ActorSystem,
            executionContext: ExecutionContext) = {
    new WebSocketClient(id, endpoint, windTurbineSimulator)(system, executionContext)
  }
}

class WebSocketClient(id: String, endpoint: String, windTurbineSimulator: ActorRef)
                     (implicit
                      system: ActorSystem,
                      executionContext: ExecutionContext) {


  val webSocketFlow: Flow[Message, Message, Future[WebSocketUpgradeResponse]] = {
    val websocketUri = s"$endpoint/measurements/$id"
    Http().webSocketClientFlow(WebSocketRequest(websocketUri))
  }

  val outgoing = GraphDSL.create() { implicit builder =>
    val data = WindTurbineData(id)

    val flow = builder.add {
      Source.tick(1.second, 100.millis,())  //valve for the WindTurbineData frequency
        .map(_ => TextMessage(data.getNext))
    }

    SourceShape(flow.out)
  }

  val incoming = GraphDSL.create() { implicit builder =>
    val flow = builder.add {
      Flow[Message]
        .collect {
          case TextMessage.Strict(text) =>
            Future.successful(text)
          case TextMessage.Streamed(textStream) =>
            textStream.runFold("")(_ + _)
              .flatMap(Future.successful)
        }
        .mapAsync(1)(identity)
        .map(each => println(s"Client received msg: $each"))
    }

    FlowShape(flow.in, flow.out)
  }

  val (upgradeResponse, closed) = Source.fromGraph(outgoing)
    .viaMat(webSocketFlow)(Keep.right) // keep the materialized Future[WebSocketUpgradeResponse]
    .via(incoming)
    .toMat(Sink.ignore)(Keep.both) // also keep the Future[Done]
    .run()


  val connected =
    upgradeResponse.map { upgrade =>
      upgrade.response.status match {
        case StatusCodes.SwitchingProtocols => windTurbineSimulator ! Upgraded
        case statusCode => windTurbineSimulator ! FailedUpgrade(statusCode)
      }
    }

  connected.onComplete {
    case Success(_) => windTurbineSimulator ! Connected
    case Failure(ex) => windTurbineSimulator ! ConnectionFailure(ex)
  }

  closed.map { _ =>
    windTurbineSimulator ! Terminated
  }
  closed.onComplete {
    case Success(_)  => windTurbineSimulator ! Connected
    case Failure(ex) => windTurbineSimulator ! ConnectionFailure(ex)
  }
} 
Example 30
Source File: WritePrimes.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package sample.graphdsl

import java.nio.file.Paths
import java.util.concurrent.ThreadLocalRandom

import akka.actor.ActorSystem
import akka.stream._
import akka.stream.scaladsl._
import akka.util.ByteString

import scala.concurrent.duration._
import scala.util.{Failure, Success}


object WritePrimes extends App {
    implicit val system = ActorSystem("WritePrimes")
    implicit val ec = system.dispatcher

    val maxRandomNumberSize = 100
    val primeSource: Source[Int, akka.NotUsed] =
      Source.fromIterator(() => Iterator.continually(ThreadLocalRandom.current().nextInt(maxRandomNumberSize)))
      .take(100)
        .filter(rnd => isPrime(rnd))
        // neighbor +2 is also prime?
        .filter(prime => isPrime(prime + 2))

    val fileSink = FileIO.toPath(Paths.get("target/primes.txt"))
    val slowSink = Flow[Int]
      .throttle(1, 1.seconds, 1, ThrottleMode.shaping)
      .map(i => ByteString(i.toString) )
      .toMat(fileSink)((_, bytesWritten) => bytesWritten)
    val consoleSink = Sink.foreach[Int](each => println(s"Reached console sink: $each"))

    // Additional processing flow, to show the nature of the composition
    val sharedDoubler = Flow[Int].map(_ * 2)

    // send primes to both sinks using graph API
    val graph = GraphDSL.create(slowSink, consoleSink)((x, _) => x) { implicit builder =>
      (slow, console) =>
        import GraphDSL.Implicits._
        val broadcastSplitter = builder.add(Broadcast[Int](2)) // the splitter - like a Unix tee
        primeSource ~> broadcastSplitter ~> sharedDoubler ~> slow // connect source to splitter, other side to slow sink (via sharedDoubler)
        broadcastSplitter ~> sharedDoubler ~> console // connect other side of splitter to console sink (via sharedDoubler)
        ClosedShape
    }
    val materialized = RunnableGraph.fromGraph(graph).run()

    materialized.onComplete {
      case Success(_) =>
        system.terminate()
      case Failure(e) =>
        println(s"Failure: ${e.getMessage}")
        system.terminate()
    }

  def isPrime(n: Int): Boolean = {
    if (n <= 1) false
    else if (n == 2) true
    else !(2 until n).exists(x => n % x == 0)
  }
} 
Example 31
Source File: DistributeAndMerge.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package sample.graphdsl

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream._
import akka.stream.scaladsl._

import scala.concurrent.Future
import scala.util.hashing.MurmurHash3



    def hashingDistribution[A, B](numBuckets: Int,
                                  parallelism: Int,
                                  hash: A => Int,
                                  fn: A => Future[B]): Flow[A, B, NotUsed] = {
      Flow.fromGraph(GraphDSL.create() { implicit builder =>
        import GraphDSL.Implicits._
        val numPorts = numBuckets
        val partitioner =
          builder.add(Partition[A](outputPorts = numPorts, partitioner = a => math.abs(hash(a)) % numPorts))
        val merger = builder.add(Merge[B](inputPorts = numPorts, eagerComplete = false))

        Range(0, numPorts).foreach { eachPort =>
          partitioner.out(eachPort) ~> Flow[A].mapAsync(parallelism)(fn) ~> merger.in(eachPort)
        }

        FlowShape(partitioner.in, merger.out)
      })
    }

  Source(1 to 10)
    .via(
      hashingDistribution[Int, Int](
        numBuckets = 3,
        parallelism = 2,
        hash = element => MurmurHash3.stringHash(element.toString), //Hashing function: String => Int
        fn = sampleAsyncCall
      )
    )
    .runWith(Sink.foreach(each => println(s"Reached sink: $each")))
    .onComplete(_ => system.terminate())
} 
Example 32
Source File: Hl7TcpClient.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package alpakka.tcp_to_websockets.hl7mllp

import akka.actor.ActorSystem
import akka.stream.scaladsl.{Sink, Source, Tcp}
import akka.util.ByteString
import ca.uhn.hl7v2.AcknowledgmentCode
import org.slf4j.{Logger, LoggerFactory}

import scala.concurrent.Future
import scala.concurrent.duration._

object Hl7TcpClient  extends App with MllpProtocol {
  val logger: Logger = LoggerFactory.getLogger(this.getClass)
  val system = ActorSystem("Hl7TcpClient")

  val (address, port) = ("127.0.0.1", 6160)

  //(1 to 1).par.foreach(each => localStreamingMessageClient(each, 1000, system, address, port))
  (1 to 1).par.foreach(each => localSingleMessageClient(each, 100, system, address, port))


  def localSingleMessageClient(clientname: Int, numberOfMessages: Int, system: ActorSystem, address: String, port: Int): Unit = {
    implicit val sys = system
    implicit val ec = system.dispatcher

    val connection = Tcp().outgoingConnection(address, port)

    def sendAndReceive(i: Int): Future[Int] = {
      val traceID = s"$clientname-${i.toString}"
      val source = Source.single(ByteString(encodeMllp(generateTestMessage(traceID)))).via(connection)
      val closed = source.runForeach(each =>
        if (isNACK(each)) {
          logger.info(s"Client: $clientname-$i received NACK: ${printable(each.utf8String)}")
          throw new RuntimeException("NACK")
        } else {
          logger.info(s"Client: $clientname-$i received ACK: ${printable(each.utf8String)}")
        }
      ).recoverWith {
        case _: RuntimeException => {
          logger.info(s"About to retry for: $clientname-$i...")
          sendAndReceive(i)
        }
        case e: Throwable => Future.failed(e)
      }
      closed.onComplete(each => logger.debug(s"Client: $clientname-$i closed: $each"))
      Future(i)
    }

    Source(1 to numberOfMessages)
      .throttle(1, 1.second)
      .mapAsync(1)(i => sendAndReceive(i))
      .runWith(Sink.ignore)
  }

  def localStreamingMessageClient(id: Int, numberOfMesssages: Int, system: ActorSystem, address: String, port: Int): Unit = {
    implicit val sys = system
    implicit val ec = system.dispatcher

    val connection = Tcp().outgoingConnection(address, port)

    val hl7MllpMessages=  (1 to numberOfMesssages).map(each => ByteString(encodeMllp(generateTestMessage(each.toString)) ))
    val source = Source(hl7MllpMessages).throttle(10, 1.second).via(connection)
    val closed = source.runForeach(each => logger.info(s"Client: $id received echo: ${printable(each.utf8String)}"))
    closed.onComplete(each => logger.info(s"Client: $id closed: $each"))
  }

  private def generateTestMessage(senderTraceID: String) = {
    //For now put the senderTraceID into the "sender lab" field to follow the messages accross the workflow
    val message = new StringBuilder
    message ++= s"MSH|^~\\&|$senderTraceID|MCM|LABADT|MCM|198808181126|SECURITY|ADT^A01|1234|P|2.5.1|"
    message ++= CARRIAGE_RETURN
    message ++= "EVN|A01|198808181123||"
    message ++= CARRIAGE_RETURN
    message ++= "PID|||PATID1234^5^M11^ADT1^MR^MCM~123456789^^^USSSA^SS||EVERYMAN^ADAM^A^III||19610615|M||C|1200 N ELM STREET^^GREENSBORO^NC^27401-1020"
    message ++= CARRIAGE_RETURN
    message ++= "NK1|1|JONES^BARBARA^K|SPO^Spouse^HL70063|171 ZOBERLEIN^^ISHPEMING^MI^49849^|"
    message ++= CARRIAGE_RETURN
    message ++= "PV1|1|I|2000^2012^01||||004777^LEBAUER^SIDNEY^J.|||SUR||||9|A0|"
    message ++= CARRIAGE_RETURN
    message.toString()
  }

  private def isNACK(message: ByteString): Boolean = {
    message.utf8String.contains(AcknowledgmentCode.AE.name()) ||
      message.utf8String.contains(AcknowledgmentCode.AR.name()) ||
      message.utf8String.contains(AcknowledgmentCode.CE.name()) ||
      message.utf8String.contains(AcknowledgmentCode.CR.name())
  }
} 
Example 33
Source File: WebsocketServer.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package alpakka.env

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.ws._
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import akka.http.scaladsl.server.directives.WebSocketDirectives
import akka.stream.scaladsl.{Flow, Sink, Source}

import scala.concurrent.Await
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.util.{Failure, Success}


object WebsocketServer extends App with WebSocketDirectives {
  implicit val system = ActorSystem("WebsocketServer")
  implicit val executionContext = system.dispatcher

  val (address, port) = ("127.0.0.1", 6002)
  server(address, port)

  def server(address: String, port: Int) = {

    def echoFlow: Flow[Message, Message, Any] =
      Flow[Message].mapConcat {
        case tm: TextMessage =>
          println(s"Server received: $tm")
          TextMessage(Source.single("Echo: ") ++ tm.textStream) :: Nil
        case bm: BinaryMessage =>
          // ignore binary messages but drain content to avoid the stream being clogged
          bm.dataStream.runWith(Sink.ignore)
          Nil
      }

    val websocketRoute: Route =
      path("echo") {
        handleWebSocketMessages(echoFlow)
      }

    val bindingFuture = Http().bindAndHandle(websocketRoute, address, port)
    bindingFuture.onComplete {
      case Success(b) =>
        println("Server started, listening on: " + b.localAddress)
      case Failure(e) =>
        println(s"Server could not bind to $address:$port. Exception message: ${e.getMessage}")
        system.terminate()
    }

    sys.addShutdownHook {
      println("About to shutdown...")
      val fut = bindingFuture.map(serverBinding => serverBinding.terminate(hardDeadline = 3.seconds))
      println("Waiting for connections to terminate...")
      val onceAllConnectionsTerminated = Await.result(fut, 10.seconds)
      println("Connections terminated")
      onceAllConnectionsTerminated.flatMap { _ => system.terminate()
      }
    }
  }
} 
Example 34
Source File: XmlProcessing.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package alpakka.xml

import java.nio.file.Paths
import java.util.Base64

import akka.actor.ActorSystem
import akka.stream.alpakka.xml.scaladsl.XmlParsing
import akka.stream.alpakka.xml.{EndElement, ParseEvent, StartElement, TextEvent}
import akka.stream.scaladsl.{FileIO, Sink, Source}
import akka.util.ByteString

import scala.collection.immutable
import scala.concurrent.Future
import scala.util.{Failure, Success}



object XmlProcessing extends App {
  implicit val system = ActorSystem("XmlProcessing")
  implicit val executionContext = system.dispatcher

  val resultFileName = "testfile_result.jpg"

  val done = FileIO.fromPath(Paths.get("./src/main/resources/xml_with_base64_embedded.xml"))
    .via(XmlParsing.parser)
    .statefulMapConcat(() => {

      // state
      val stringBuilder: StringBuilder = StringBuilder.newBuilder
      var counter: Int = 0

      // aggregation function
      parseEvent: ParseEvent =>
        parseEvent match {
          case s: StartElement if s.attributes.contains("mediaType") =>
            stringBuilder.clear()
            val mediaType = s.attributes.head._2
            println("mediaType: " + mediaType)
            immutable.Seq(mediaType)
          case s: EndElement if s.localName == "embeddedDoc" =>
            val text = stringBuilder.toString
            println("File content: " + text) //large embedded files are read into memory
            Source.single(ByteString(text))
              .map(each => ByteString(Base64.getMimeDecoder.decode(each.toByteBuffer)))
              .runWith(FileIO.toPath(Paths.get(s"$counter-$resultFileName")))
            counter = counter + 1
            immutable.Seq(text)
          case t: TextEvent =>
            stringBuilder.append(t.text)
            immutable.Seq.empty
          case _ =>
            immutable.Seq.empty
        }
    })
    .runWith(Sink.ignore)

  terminateWhen(done)


  def terminateWhen(done: Future[_]) = {
    done.onComplete {
      case Success(_) =>
        println("Flow Success. About to terminate...")
        system.terminate()
      case Failure(e) =>
        println(s"Flow Failure: $e. About to terminate...")
        system.terminate()
    }
  }
} 
Example 35
Source File: JMSTextMessageProducerClient.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package alpakka.jms

import akka.Done
import akka.actor.ActorSystem
import akka.stream.ThrottleMode
import akka.stream.alpakka.jms.scaladsl.JmsProducer
import akka.stream.alpakka.jms.{JmsProducerSettings, JmsTextMessage}
import akka.stream.scaladsl.{Sink, Source}
import com.typesafe.config.Config
import javax.jms.ConnectionFactory
import org.apache.activemq.ActiveMQConnectionFactory
import org.slf4j.{Logger, LoggerFactory}

import scala.concurrent.Future
import scala.concurrent.duration._

object JMSTextMessageProducerClient {
  val logger: Logger = LoggerFactory.getLogger(this.getClass)
  implicit val system = ActorSystem("JMSTextMessageProducerClient")
  implicit val ec = system.dispatcher

  //The "failover:" part in the brokerURL instructs ActiveMQ to reconnect on network failure
  //This does not interfere with the new 1.0-M2 implementation
  val connectionFactory: ConnectionFactory = new ActiveMQConnectionFactory("artemis", "simetraehcapa", "failover:tcp://127.0.0.1:21616")


  def main(args: Array[String]): Unit = {
    jmsTextMessageProducerClient(connectionFactory)
  }

  private def jmsTextMessageProducerClient(connectionFactory: ConnectionFactory) = {
    val producerConfig: Config = system.settings.config.getConfig(JmsProducerSettings.configPath)
    val jmsProducerSink: Sink[JmsTextMessage, Future[Done]] = JmsProducer.sink(
      JmsProducerSettings(producerConfig, connectionFactory).withQueue("test-queue")
    )

    Source(1 to 2000000)
      .throttle(1, 1.second, 1, ThrottleMode.shaping)
      .wireTap(number => logger.info(s"SEND Msg with TRACE_ID: $number"))
      .map { number =>
        JmsTextMessage(s"Payload: ${number.toString}")
          .withProperty("TRACE_ID", number)
      }
      .runWith(jmsProducerSink)
  }
} 
Example 36
Source File: FileIOEcho.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package alpakka.file

import java.nio.file.Paths

import akka.actor.ActorSystem
import akka.stream.IOResult
import akka.stream.scaladsl.FileIO

import scala.concurrent.Future
import scala.util.{Failure, Success}


object FileIOEcho extends App {
  implicit val system = ActorSystem("FileIOEcho")
  implicit val executionContext = system.dispatcher

  val sourceFileName = "./src/main/resources/testfile.jpg"
  val encFileName = "testfile.enc"
  val resultFileName = "testfile_result.jpg"

  val sourceOrig = FileIO.fromPath(Paths.get(sourceFileName), 3000)
  val sinkEnc = FileIO.toPath(Paths.get(encFileName))

  val doneEnc = sourceOrig
    //.wireTap(each => println(s"Chunk enc: $each"))
    .map(each => each.encodeBase64)
    .runWith(sinkEnc)

  doneEnc.onComplete {
    case Success(_) =>
      val sourceEnc = FileIO.fromPath(Paths.get(encFileName))
      val sinkDec = FileIO.toPath(Paths.get(resultFileName))

      val doneDec = sourceEnc
        //.wireTap(each => println(s"Chunk dec: $each"))
        .map(each => each.decodeBase64)
        .runWith(sinkDec)
      terminateWhen(doneDec)
    case Failure(ex) => println(s"Exception: $ex")
  }

  def terminateWhen(done: Future[IOResult]) = {
    done.onComplete {
      case Success(_) =>
        println(s"Flow Success. Written file: $resultFileName About to terminate...")
        system.terminate()
      case Failure(e) =>
        println(s"Flow Failure: $e. About to terminate...")
        system.terminate()
    }
  }
} 
Example 37
Source File: WordCountProducer.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package alpakka.kafka

import java.util
import java.util.concurrent.ThreadLocalRandom

import akka.actor.ActorSystem
import akka.kafka.ProducerMessage.Message
import akka.kafka.ProducerSettings
import akka.kafka.scaladsl.Producer
import akka.stream.ThrottleMode
import akka.stream.scaladsl.{Keep, Sink, Source}
import akka.{Done, NotUsed}
import org.apache.kafka.clients.producer.{Partitioner, ProducerRecord}
import org.apache.kafka.common.errors.{NetworkException, UnknownTopicOrPartitionException}
import org.apache.kafka.common.serialization.StringSerializer
import org.apache.kafka.common.{Cluster, PartitionInfo}

import scala.concurrent.Future
import scala.concurrent.duration._


class CustomPartitioner extends Partitioner {
  override def partition(topic: String, key: Any, keyBytes: Array[Byte], value: Any, valueBytes: Array[Byte], cluster: Cluster): Int = {
    val partitionInfoList: util.List[PartitionInfo] = cluster.availablePartitionsForTopic(topic)
    val partitionCount = partitionInfoList.size
    val fakeNewsPartition = 0

    //println("CustomPartitioner received key: " + key + " and value: " + value)

    if (value.toString.contains(WordCountProducer.fakeNewsKeyword)) {
      //println("CustomPartitioner send message: " + value + " to fakeNewsPartition")
      fakeNewsPartition
    }
    else ThreadLocalRandom.current.nextInt(1, partitionCount) //round robin
  }

  override def close(): Unit = {
    println("CustomPartitioner: " + Thread.currentThread + " received close")
  }

  override def configure(configs: util.Map[String, _]): Unit = {
    println("CustomPartitioner received configure with configuration: " + configs)
  }
}

object CustomPartitioner {
  private def deserialize[V](objectData: Array[Byte]): V = org.apache.commons.lang3.SerializationUtils.deserialize(objectData).asInstanceOf[V]
} 
Example 38
Source File: WordCountConsumer.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package alpakka.kafka

import akka.Done
import akka.actor.{ActorSystem, Props}
import akka.kafka.scaladsl.Consumer.DrainingControl
import akka.kafka.scaladsl.{Committer, Consumer}
import akka.kafka.{CommitterSettings, ConsumerSettings, Subscriptions}
import akka.stream.scaladsl.Sink
import akka.util.Timeout
import alpakka.kafka.TotalFake.{IncrementMessage, IncrementWord}
import org.apache.kafka.clients.consumer.ConsumerConfig
import org.apache.kafka.common.serialization.{LongDeserializer, StringDeserializer}

import scala.concurrent.Future
import scala.concurrent.duration._


object WordCountConsumer extends App {
  implicit val system = ActorSystem("WordCountConsumer")
  implicit val ec = system.dispatcher

  val total = system.actorOf(Props[TotalFake], "totalFake")

  val committerSettings = CommitterSettings(system).withMaxBatch(1)

  def createConsumerSettings(group: String): ConsumerSettings[String, java.lang.Long] = {
    ConsumerSettings(system, new StringDeserializer , new LongDeserializer)
      .withBootstrapServers("localhost:9092")
      .withGroupId(group)
      //Define consumer behavior upon starting to read a partition for which it does not have a committed offset or if the committed offset it has is invalid
      .withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")
  }

  def createAndRunConsumerWordCount(id: String) = {
    Consumer.committableSource(createConsumerSettings("wordcount consumer group"), Subscriptions.topics("wordcount-output"))
      .mapAsync(1) { msg =>
        //println(s"$id - Offset: ${msg.record.offset()} - Partition: ${msg.record.partition()} Consume msg with key: ${msg.record.key()} and value: ${msg.record.value()}")
        if (msg.record.key().equalsIgnoreCase("fakeNews")) { //hardcoded because WordCountProducer.fakeNewsKeyword does not work
          import akka.pattern.ask
          implicit val askTimeout: Timeout = Timeout(3.seconds)
          (total ? IncrementWord(msg.record.value.toInt, id))
            .mapTo[Done]
            .map(_ => msg.committableOffset)
        } else {
          Future(msg).map(_ => msg.committableOffset)
        }
      }
      .via(Committer.flow(committerSettings))
      .toMat(Sink.seq)(DrainingControl.apply)
      .run()
  }

  def createAndRunConsumerMessageCount(id: String) = {
    Consumer.committableSource(createConsumerSettings("messagecount consumer group"), Subscriptions.topics("messagecount-output"))
      .mapAsync(1) { msg =>
        //println(s"$id - Offset: ${msg.record.offset()} - Partition: ${msg.record.partition()} Consume msg with key: ${msg.record.key()} and value: ${msg.record.value()}")
        import akka.pattern.ask
        implicit val askTimeout: Timeout = Timeout(3.seconds)
        (total ? IncrementMessage(msg.record.value.toInt, id))
          .mapTo[Done]
          .map(_ => msg.committableOffset)
      }
      .via(Committer.flow(committerSettings))
      .toMat(Sink.seq)(DrainingControl.apply)
      .run()
  }

  val drainingControlW1 = createAndRunConsumerWordCount("W.1")
  val drainingControlW2 = createAndRunConsumerWordCount("W.2")
  val drainingControlM = createAndRunConsumerMessageCount("M")


  sys.addShutdownHook{
    println("Got control-c cmd from shell, about to shutdown...")
    drainingControlW1.drainAndShutdown()
    drainingControlW2.drainAndShutdown()
    drainingControlM.drainAndShutdown()
  }
} 
Example 39
Source File: TotalTweetsScheduler.scala    From redrock   with Apache License 2.0 5 votes vote down vote up
package com.restapi

import java.io.{File, FileInputStream}

import akka.actor.{ActorRef, Actor, ActorSystem, Props}
import akka.io.IO
import org.slf4j.LoggerFactory
import play.api.libs.json.Json
import spray.can.Http
import akka.pattern.ask
import spray.http.DateTime
import scala.concurrent.duration._
import akka.util.Timeout
import scala.concurrent.ExecutionContext.Implicits.global
import org.apache.commons.codec.digest.DigestUtils
import scala.io.Source

case object GetTotalTweetsScheduler

object CurrentTotalTweets {
  @volatile
  var totalTweets: Long = 0
}

class ExecuterTotalTweetsES(delay: FiniteDuration, interval: FiniteDuration) extends Actor {
  context.system.scheduler.schedule(delay, interval) {
    getTotalTweetsES
  }

  val logger = LoggerFactory.getLogger(this.getClass)

  override def receive: Actor.Receive = {
    case GetTotalTweetsScheduler => {
      logger.info(s"Getting Total of Tweets. Begin: ${CurrentTotalTweets.totalTweets}")
    }
    case _ => // just ignore any messages
  }

  def getTotalTweetsES: Unit = {
    val elasticsearchRequests = new GetElasticsearchResponse(0, Array[String](), Array[String](),
      LoadConf.restConf.getString("searchParam.defaulStartDatetime"),
      LoadConf.restConf.getString("searchParam.defaultEndDatetime"),
      LoadConf.esConf.getString("decahoseIndexName"))
    val totalTweetsResponse = Json.parse(elasticsearchRequests.getTotalTweetsESResponse())
    logger.info(s"Getting Total of Tweets. Current: ${CurrentTotalTweets.totalTweets}")
    CurrentTotalTweets.totalTweets = (totalTweetsResponse \ "hits" \ "total").as[Long]
    logger.info(s"Total users updated. New: ${CurrentTotalTweets.totalTweets}")
  }
} 
Example 40
Source File: Application.scala    From redrock   with Apache License 2.0 5 votes vote down vote up
package com.restapi

import akka.actor.{ActorSystem, Props}
import akka.io.IO
import spray.can.Http
import akka.pattern.ask
import scala.concurrent.duration._
import akka.util.Timeout
import org.slf4j.LoggerFactory;


object Application extends App {
  val logger = LoggerFactory.getLogger(this.getClass)
  
  // we need an ActorSystem to host our application in
  implicit val system = ActorSystem(LoadConf.restConf.getString("actor"))
  // create and start our service actor
  val service = system.actorOf(Props[MyServiceActor], LoadConf.restConf.getString("name"))
  val sessionTimeout = system.actorOf(Props[SessionTimeoutActor])

  val sessionTable = system.actorOf(Props(classOf[SimpleSession], sessionTimeout,
    LoadConf.accessConf.getInt("delay") seconds,
    LoadConf.accessConf.getInt("timeout-interval") seconds))
  sessionTable ! InitSessionTable

  val sessionLoader = system.actorOf(Props(classOf[LoadSessionActor], sessionTable,
    LoadConf.accessConf.getInt("delay") seconds,
    LoadConf.accessConf.getInt("check-interval") seconds))
  sessionLoader ! InitFileMd5Sum

  val schedTotalTweets = system.actorOf(Props(classOf[ExecuterTotalTweetsES],
    LoadConf.restConf.getInt("totalTweetsScheduler.delay") seconds,
    LoadConf.restConf.getInt("totalTweetsScheduler.reapeatEvery") seconds))
  schedTotalTweets ! GetTotalTweetsScheduler

  implicit val timeout = Timeout(800.seconds)
  IO(Http) ? Http.Bind(service, interface = "0.0.0.0", port = LoadConf.restConf.getInt("port"))

  logger.info( s"""Application: ${LoadConf.globalConf.getString("appName")} running version: ${LoadConf.globalConf.getString("appVersion")}""".stripMargin)
} 
Example 41
Source File: HelloAkka.scala    From sbt-reactive-app   with Apache License 2.0 5 votes vote down vote up
package hello.akka

import akka.cluster.Cluster
import akka.cluster.ClusterEvent._
import akka.actor.{ Actor, ActorSystem, Props }
import akka.discovery._
import com.typesafe.config.ConfigFactory

final case class Greet(name: String)

class GreeterActor extends Actor {
  val cluster = Cluster(context.system)

  override def preStart = {
    cluster.subscribe(self, initialStateMode = InitialStateAsEvents,
      classOf[MemberEvent], classOf[UnreachableMember])
  }

  override def postStop = {
    cluster.unsubscribe(self)
  }

  def receive = {
    case Greet(name) =>
      println(s"Hello, $name")
    case MemberUp(member) =>
      println(s"Member up: $member")
    case MemberRemoved(member, previousStatus) =>
      println(s"Member down: $member")
    case _: MemberEvent => // ignore
  }
}

object HelloAkka {
  def main(args: Array[String]) = {
    startup()
  }

  def startup() = {
    val system = ActorSystem("ClusterSystem")
    val discovery = ServiceDiscovery(system).discovery
    val actor = system.actorOf(Props[GreeterActor], name = "GreeterActor")

    actor ! Greet("[unnamed]")
  }
} 
Example 42
Source File: DemoApp.scala    From sbt-reactive-app   with Apache License 2.0 5 votes vote down vote up
package foo

import akka.actor.{ Actor, ActorLogging, ActorSystem, Props }
import akka.cluster.ClusterEvent.ClusterDomainEvent
import akka.cluster.{ Cluster, ClusterEvent }
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Directives._
import akka.management.AkkaManagement
import akka.management.cluster.bootstrap.ClusterBootstrap
import akka.stream.ActorMaterializer

object DemoApp extends App {

  implicit val system = ActorSystem("Appka")

  import system.log
  implicit val mat = ActorMaterializer()
  val cluster = Cluster(system)

  log.info(s"Started [$system], cluster.selfAddress = ${cluster.selfAddress}")
  log.info("something2")
  //#start-akka-management
  AkkaManagement(system).start()
  //#start-akka-management
  ClusterBootstrap(system).start()

  cluster.subscribe(
    system.actorOf(Props[ClusterWatcher]),
    ClusterEvent.InitialStateAsEvents,
    classOf[ClusterDomainEvent])

  // add real app routes here
  val routes =
    path("hello") {
      get {
        complete(
          HttpEntity(
            ContentTypes.`text/html(UTF-8)`,
            "<h1>Hello</h1>"))
      }
    }
  Http().bindAndHandle(routes, "0.0.0.0", 8080)

  Cluster(system).registerOnMemberUp({
    log.info("Cluster member is up!")
  })

}

class ClusterWatcher extends Actor with ActorLogging {
  val cluster = Cluster(context.system)

  override def receive = {
    case msg ⇒ log.info(s"Cluster ${cluster.selfAddress} >>> " + msg)
  }
} 
Example 43
Source File: Timing.scala    From logging   with Apache License 2.0 5 votes vote down vote up
package demo.test

import java.net.InetAddress
import akka.actor.ActorSystem
import com.persist.logging._
import logging_demo.BuildInfo
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.concurrent.{Future, Await}
import scala.concurrent.ExecutionContext.Implicits._

case class TimeClass() extends ClassLogging with Timing {
  def b(id: RequestId): Unit = {
    Time(id, "bbb") {
      Thread.sleep(100)
    }
  }

  def a(id: RequestId): Unit = {
    Time(id, "top") {
      Time(id, "aaa1") {
        Thread.sleep(200)
      }
      b(id)
      Time(id, "aaa2") {
        Thread.sleep(300)
      }
      b(id)
    }
  }
}

case class FutureClass() extends ClassLogging with Timing {
  def demo(id: RequestId): Future[Int] = {
    val token = time.start(id, "top")
    val f1 = Future {
      Time(id, "f1") {
        Thread.sleep(100)
        100
      }
    }
    val f2 = f1.map { i =>
      val result = Time(id, "f2") {
        Thread.sleep(200)
        Time(id, "f2a") {
           i * 2
        }
      }
      result
    }
    val f3 = f2.recover{ case ex:Throwable =>
      log.error("Timing test failed", ex)
      -1
    }
    f3.map {  i =>
      time.end(id, "top", token)
      i
    }
  }
}

object Timing {

  case class F() extends ClassLogging {
    val fc = FutureClass()
    val f = fc.demo(RequestId())
    val i = Await.result(f, 3 seconds)
    log.info(Map("@msg" -> "Future result", "val" -> i))
  }

  def main(args: Array[String]) {
    val system = ActorSystem("test")

    val host = InetAddress.getLocalHost.getHostName
    val loggingSystem = LoggingSystem(system, BuildInfo.name, BuildInfo.version, host)

    val tc = new TimeClass()
    tc.a(RequestId())
    tc.a(RequestId())

    F()


    Await.result(loggingSystem.stop, 30 seconds)
    Await.result(system.terminate(), 20 seconds)
  }
} 
Example 44
Source File: ActorDemo.scala    From logging   with Apache License 2.0 5 votes vote down vote up
package demo.test

import java.net.InetAddress
import akka.actor.{Props, Actor, ActorSystem}
import com.persist.logging._
import logging_demo.BuildInfo
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.concurrent.Await


object DemoActor {
  def props() = Props(new DemoActor())
}

class DemoActor() extends Actor with ActorLogging {
  println(this.getClass.getSimpleName)

  def receive = {
    case "foo" => log.info("Saw foo")
    case "done" => context.stop(self)
    case x: Any => log.error(Map("@msg" -> "Unexpected actor message",
      "message" -> x.toString))
  }
}

case class ActorDemo(system: ActorSystem) {
  def demo(): Unit = {
    val a = system.actorOf(DemoActor.props(), name = "Demo")
    a ! "foo"
    a ! "bar"
    a ! "done"
  }
}

object ActorDemo {
  def main(args: Array[String]) {
    val system = ActorSystem("test")

    val host = InetAddress.getLocalHost.getHostName
    val loggingSystem = LoggingSystem(system, BuildInfo.name, BuildInfo.version, host)

    val act = ActorDemo(system)
    act.demo()

    Await.result(loggingSystem.stop, 30 seconds)
    Await.result(system.terminate(), 20 seconds)
  }
} 
Example 45
Source File: Exceptions.scala    From logging   with Apache License 2.0 5 votes vote down vote up
package demo.test

import java.net.InetAddress
import akka.actor.ActorSystem
import com.persist.logging._
import logging_demo.BuildInfo
import scala.concurrent.Await
import scala.concurrent.duration._
import scala.language.postfixOps

case class MyException(msg: RichMsg) extends RichException(msg)

case class ExceptionClass() extends ClassLogging {
  def demo(): Unit = {
    log.error("Test", new Exception("Bad Code"))
    log.warn("Rich", RichException(Map("@msg" -> "Fail", "count" -> 23)))
    log.error("Special", MyException(Map("@msg" -> "Fail", "count" -> 23)))
    try {
      throw MyException(Map("@msg" -> "Die", "name" -> "abc"))
    } catch {
      case ex: Exception => log.error("Caught exception", ex)
    }
  }
}

object Exceptions {
  def main(args: Array[String]) {
    val system = ActorSystem("test")

    val host = InetAddress.getLocalHost.getHostName
    val loggingSystem = LoggingSystem(system, BuildInfo.name, BuildInfo.version, host)

    val ec = new ExceptionClass()
    ec.demo()

    Await.result(loggingSystem.stop, 30 seconds)
    Await.result(system.terminate(), 20 seconds)
  }
} 
Example 46
Source File: OtherApis.scala    From logging   with Apache License 2.0 5 votes vote down vote up
package demo.test

import java.net.InetAddress
import akka.actor.{Props, Actor, ActorSystem}
import com.persist.logging._
import logging_demo.BuildInfo
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.concurrent.Await
import org.slf4j.LoggerFactory

case class Slf4jDemo() {
  val slf4jlog = LoggerFactory.getLogger(classOf[Slf4jDemo])

  def demo(): Unit = {
    slf4jlog.warn("slf4j")
  }
}

object AkkaActor {
  def props() = Props(new AkkaActor())
}

class AkkaActor() extends Actor with akka.actor.ActorLogging {
  def receive = {
    case "foo" => log.warning("Saw foo")
    case "done" => context.stop(self)
    case x: Any => log.error(s"Unexpected actor message: ${x}")
  }
}

case class AkkaDemo(system: ActorSystem) {
  def demo(): Unit = {
    val a = system.actorOf(AkkaActor.props(), name="Demo")
    a ! "foo"
    a ! "bar"
    a ! "done"
  }

}

object OtherApis {
  def main(args: Array[String]) {
    val system = ActorSystem("test")

    val host = InetAddress.getLocalHost.getHostName
    val loggingSystem = LoggingSystem(system, BuildInfo.name, BuildInfo.version, host)

    val slf = Slf4jDemo()
    slf.demo()

    val act = AkkaDemo(system)
    act.demo()

    Await.result(loggingSystem.stop, 30 seconds)
    Await.result(system.terminate(), 20 seconds)
  }
} 
Example 47
Source File: Appender.scala    From logging   with Apache License 2.0 5 votes vote down vote up
package demo.test

import java.net.InetAddress
import akka.actor.{ActorRefFactory, ActorSystem}
import com.persist.logging._
import logging_demo.BuildInfo
import scala.concurrent.Await
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.concurrent.Future

case class AppenderClass() extends ClassLogging {

  def demo(): Unit = {
    log.info("Test")
    log.error("Foo failed")
    log.warn(Map("@msg" -> "fail", "value" -> 23))
  }
}

object FlatAppender extends LogAppenderBuilder {
  def apply(factory: ActorRefFactory, stdHeaders: Map[String, RichMsg])
  = new FlatAppender(factory, stdHeaders)
}

class FlatAppender(factory: ActorRefFactory, stdHeaders: Map[String, RichMsg]) extends LogAppender {

  def append(msg: Map[String, RichMsg], category: String) {
    if (category == "common") {
      val level = msg.get("@severity") match {
        case Some(s: String) => s
        case _ => "???"
      }
      val time = msg.get("@timestamp") match {
        case Some(s: String) => s
        case _ => "???"
      }
      val message = richToString(msg.getOrElse("msg","???"))
      println(s"$time\t$level\t$message")
    }
  }

  def finish(): Future[Unit] = Future.successful(())

  def stop(): Future[Unit] = Future.successful(())
}

object Appender {
  def main(args: Array[String]) {
    val system = ActorSystem("test")

    val host = InetAddress.getLocalHost.getHostName
    val loggingSystem = LoggingSystem(system, BuildInfo.name,
      BuildInfo.version, host, appenderBuilders = Seq(FileAppender, FlatAppender))

    val sc = new SimpleClass()
    sc.demo()

    Await.result(loggingSystem.stop, 30 seconds)
    Await.result(system.terminate(), 20 seconds)
  }
} 
Example 48
Source File: Alternative.scala    From logging   with Apache License 2.0 5 votes vote down vote up
package demo.test

import java.net.InetAddress
import akka.actor.ActorSystem
import com.persist.logging._
import logging_demo.BuildInfo
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.concurrent.Await

case class AltClass() extends ClassLogging {

  def demo(): Unit = {
    log.alternative("foo", Map("message"->"test"))
    log.alternative("foo", Map("a" -> "x", "b" -> false, "c" -> 65))
    log.alternative("bar", Map("message"->"btest"))
  }
}

object Alternative {
  def main(args: Array[String]) {
    val system = ActorSystem("test")

    val host = InetAddress.getLocalHost.getHostName
    val loggingSystem = LoggingSystem(system, BuildInfo.name, BuildInfo.version, host)

    val alt = new AltClass()
    alt.demo()

    Await.result(loggingSystem.stop, 30 seconds)
    Await.result(system.terminate(), 20 seconds)
  }
} 
Example 49
Source File: RequestIds.scala    From logging   with Apache License 2.0 5 votes vote down vote up
package demo.test

import java.net.InetAddress
import akka.actor.ActorSystem
import com.persist.logging._
import logging_demo.BuildInfo
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.concurrent.Await

case class RequestB() extends ClassLogging {

  def demo(id: AnyId): Unit = {
    log.trace("In B", id = id)
    log info("BBB", id = id)
  }
}

case class RequestC() extends ClassLogging {
  def demo(id: AnyId): Unit = {
    log.trace("In C", id = id)
    log.info("CCC", id = id)
  }
}

case class RequestA() extends ClassLogging {
  val b = RequestB()
  val c = RequestC()

  def demo(id: AnyId): Unit = {
    log.trace("Enter A", id = id)
    b.demo(id)
    log.info("AAA", id = id)
    c.demo(id)
    log.trace("Exit A", id = id)
  }
}

object RequestIds {
  def main(args: Array[String]) {
    val system = ActorSystem("test")

    val host = InetAddress.getLocalHost.getHostName
    val loggingSystem = LoggingSystem(system, BuildInfo.name, BuildInfo.version, host)

    val a = new RequestA()
    a.demo(noId)
    a.demo(RequestId())
    a.demo(RequestId(level = Some(LoggingLevels.TRACE)))

    Await.result(loggingSystem.stop, 30 seconds)
    Await.result(system.terminate(), 20 seconds)
  }
} 
Example 50
Source File: Simple.scala    From logging   with Apache License 2.0 5 votes vote down vote up
package demo.test

import java.net.InetAddress
import akka.actor.ActorSystem
import com.persist.logging._
import logging_demo.BuildInfo
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.concurrent.Await

case class SimpleClass() extends ClassLogging {

  def demo(): Unit = {
    log.info("Test")
    log.error("Foo failed")
    log.warn(Map("@msg" -> "fail", "value" -> 23))
  }
}

object Simple {
  def main(args: Array[String]) {
    val system = ActorSystem("test")

    val host = InetAddress.getLocalHost.getHostName
    val loggingSystem = LoggingSystem(system, BuildInfo.name, BuildInfo.version, host)

    val sc = new SimpleClass()
    sc.demo()

    Await.result(loggingSystem.stop, 30 seconds)
    Await.result(system.terminate(), 20 seconds)
  }
} 
Example 51
Source File: Filter.scala    From logging   with Apache License 2.0 5 votes vote down vote up
package demo.test

import java.net.InetAddress
import akka.actor.ActorSystem
import com.persist.logging._
import logging_demo.BuildInfo
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.concurrent.Await

case class Class1() extends ClassLogging {
  def demo(name: String): Unit = {
    log.debug(name)
    log.warn(name)
  }
}

case class Class2() extends ClassLogging {
  def demo(name: String): Unit = {
    log.debug(name)
    log.warn(name)
  }
}

object Filter {

  import LoggingLevels._


  def filter(fields: Map[String, RichMsg], level:Level): Boolean = {
    val cls = fields.get("class") match {
      case Some(s: String) => s
      case _ => ""
    }
    if (cls == "demo.test.Class1") {
      level >= DEBUG
    } else {
      level >= WARN
    }
  }

  def main(args: Array[String]) {
    val system = ActorSystem("test")

    val host = InetAddress.getLocalHost.getHostName
    val loggingSystem = LoggingSystem(system, BuildInfo.name, BuildInfo.version, host)

    val c1 = new Class1()
    val c2 = new Class2()

    c1.demo("no filter")
    c2.demo("no filter")

    // Add filter and change level
    val oldLevel = loggingSystem.getLevel.current
    loggingSystem.setFilter(Some(filter))
    loggingSystem.setLevel(DEBUG)

    c1.demo("filter")
    c2.demo("filter")

    // Reset it back
    loggingSystem.setLevel(oldLevel)
    loggingSystem.setFilter(None)

    Await.result(loggingSystem.stop, 30 seconds)
    Await.result(system.terminate(), 20 seconds)
  }
} 
Example 52
Source File: StdOutAppender.scala    From logging   with Apache License 2.0 5 votes vote down vote up
package com.persist.logging

import akka.actor.{ActorContext, ActorRefFactory, ActorSystem}
import com.persist.JsonOps._
import com.persist.logging.LoggingLevels.Level

import scala.concurrent.Future


  def stop(): Future[Unit] = {
    if (summary) {
      val cats = if (categories.size == 0) emptyJsonObject else JsonObject("alts" -> categories)
      val levs = if (levels.size == 0) emptyJsonObject else JsonObject("levels" -> levels)
      val knds = if (kinds.size == 0) emptyJsonObject else JsonObject("kinds" -> kinds)
      val txt = Pretty(levs ++ cats ++ knds, width = width)
      val colorTxt = if (color) {
        s"${Console.BLUE}$txt${Console.RESET}"
      } else {
        txt
      }
      println(colorTxt)
    }
    Future.successful(())
  }
} 
Example 53
Source File: KafkaAppender.scala    From logging   with Apache License 2.0 5 votes vote down vote up
package com.persist.logging.kafka

import akka.actor.{ActorContext, ActorRefFactory, ActorSystem}
import com.persist.logging.{ClassLogging, LogAppender, LogAppenderBuilder, RichMsg}
import com.persist.JsonOps._
import java.util.Properties
import com.persist.logging._
import com.persist.logging.LoggingLevels.Level

import org.apache.kafka.clients.producer._

import scala.concurrent.Future

class CB extends Callback() with ClassLogging {
  var timeoutCnt = 0
  var sawTimeouts = false

  override def onCompletion(metadata: RecordMetadata, exception: Exception): Unit = {
    exception match {
      case ex: org.apache.kafka.common.errors.`TimeoutException` =>
        if (!sawTimeouts) log.error(JsonObject("fail" -> "KAFKA", "error" -> "Kafka timing out"))
        sawTimeouts = true
        timeoutCnt += 1
      case _ =>
        if (sawTimeouts) log.error(JsonObject("fail" -> "KAFKA", "error" -> "messages lost", "lost" -> timeoutCnt))
        sawTimeouts = false
        timeoutCnt = 0
    }
  }

  def finish(blockMs: Int) {
    Thread.sleep(blockMs * 3) // ait for all messages to complete
    if (timeoutCnt > 0) log.error(JsonObject("fail" -> "KAFKA", "error" -> "messages lost", "lost" -> timeoutCnt))
  }
}

object KafkaAppender extends LogAppenderBuilder {
  override def apply(factory: ActorRefFactory, standardHeaders: Map[String, RichMsg]): LogAppender =
    new KafkaAppender(factory, standardHeaders)
}

class KafkaAppender(factory: ActorRefFactory, standardHeaders: Map[String, RichMsg]) extends LogAppender {
  val cb = new CB()
  private[this] val system = factory match {
    case context: ActorContext => context.system
    case s: ActorSystem => s
  }
  private[this] implicit val executionContext = factory.dispatcher
  private[this] val config = system.settings.config.getConfig("com.persist.logging.appenders.kafka")
  private[this] val fullHeaders = config.getBoolean("fullHeaders")
  private[this] val sort = config.getBoolean("sorted")
  private[this] val logLevelLimit = Level(config.getString("logLevelLimit"))

  private[this] val topic = config.getString("topic")
  private[this] val bootstrapServers = config.getString("bootstrapServers")
  private[this] val acks = config.getString("acks")
  private[this] val retries = config.getInt("retries")
  private[this] val batchSize = config.getInt("batchSize")
  private[this] val blockMs = config.getInt("blockMs")
  private[this] val bufferMemory = config.getInt("bufferMemory")


  val props = new Properties()
  props.put("bootstrap.servers", bootstrapServers)
  props.put("acks", acks)
  props.put("retries", new Integer(retries))
  props.put("batch.size", new Integer(batchSize))
  //props.put("request.timeout.ms", new Integer(timeoutMs))
  props.put("max.block.ms", new Integer(blockMs))
  //props.put("linger.ms", new Integer(lingerMs))
  props.put("buffer.memory", new Integer(bufferMemory))
  props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer")
  props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer")

  private[this] val producer: Producer[String, String] = new KafkaProducer(props)

  private def checkLevel(baseMsg: Map[String, RichMsg]): Boolean = {
    val level = jgetString(baseMsg, "@severity")
    Level(level) >= logLevelLimit
  }

  override def append(baseMsg: Map[String, RichMsg], category: String): Unit = {
    if (category != "common" || checkLevel(baseMsg)) {
      val msg = if (fullHeaders) standardHeaders ++ baseMsg else baseMsg
      val txt = Compact(msg, safe = true, sort = sort)
      Future {
        producer.send(new ProducerRecord[String, String](topic, txt), cb)
      }
    }
  }

  override def finish(): Future[Unit] = {
    cb.finish(blockMs)
    Future.successful(())
  }

  override def stop(): Future[Unit] = {
    producer.close()
    Future.successful(())
  }
} 
Example 54
Source File: TestKafka.scala    From logging   with Apache License 2.0 5 votes vote down vote up
package com.persist.logging.test

import java.net.InetAddress
import akka.actor.ActorSystem
import com.persist.logging.kafka.KafkaAppender
import com.persist.logging._
import kafka_logging_demo.BuildInfo
import scala.language.postfixOps
import scala.concurrent.duration._
import scala.concurrent.Await

case class TestKafka() extends ClassLogging {
  def send: Unit = {
    for (i <- 1 to 5) {
      log.warn(Map("msg" -> "test", "i" -> i))
      Thread.sleep(500)
    }
  }
}

object TestKafka {
  def main(args: Array[String]): Unit = {
    val system = ActorSystem("test")

    val host = InetAddress.getLocalHost.getHostName
    val loggingSystem = LoggingSystem(system, BuildInfo.name,
      BuildInfo.version, host, appenderBuilders = Seq(StdOutAppender, KafkaAppender))

    val tc = TestKafka()
    tc.send

    //Thread.sleep(60000)

    Await.result(loggingSystem.stop, 30 seconds)
    Await.result(system.terminate(), 20 seconds)
  }
} 
Example 55
Source File: RepositoryTest.scala    From akka-http-slick-sample   with MIT License 5 votes vote down vote up
package net.softler.data

import java.util.UUID

import akka.actor.ActorSystem
import akka.testkit.TestKit
import net.softler.data.model.User
import net.softler.data.persistence.{H2, UserComponent}
import org.scalatest._
import org.scalatest.concurrent.ScalaFutures

import scala.concurrent.ExecutionContext
import scala.concurrent.duration._
import scala.language.postfixOps


class RepositoryTest
    extends TestKit(ActorSystem("test-system"))
    with FlatSpecLike
    with Matchers
    with ScalaFutures
    with UserComponent
    with H2 {

  implicit val ec: ExecutionContext = system.dispatcher

  val repo: UserRepository = new UserRepository

  private val testId = UUID.fromString("00000000-0000-0000-0000-000000000000")

  override implicit def patienceConfig: PatienceConfig = PatienceConfig(5 seconds)

  "The generic repository" should "handle a in memory database" in {
    whenReady(repo.all)(_.size shouldBe 3)
  }

  it should "retrieve a user by id" in {
    whenReady(repo.byId(testId)) { user =>
      user.get.login shouldBe "tom"
    }
  }

  it should "update a single entity" in {
    val testEntity: User = repo.byId(testId).futureValue.get

    val result = repo.update(testEntity).futureValue

    result shouldBe 1
  }

  it should "delete a single user by id" in {
    whenReady(repo.delete(testId)) { result =>
      result shouldBe true
    }

    whenReady(repo.byId(testId)) { user =>
      user shouldBe None
    }

    whenReady(repo.all)(_.size shouldBe 2)
  }
} 
Example 56
Source File: Step4_SecondaryPersistenceSpec.scala    From Principles-of-Reactive-Programming   with GNU General Public License v3.0 5 votes vote down vote up
package kvstore

import akka.testkit.TestKit
import akka.testkit.ImplicitSender
import org.scalatest.BeforeAndAfterAll
import org.scalatest.Matchers
import org.scalatest.FunSuiteLike
import akka.actor.ActorSystem
import akka.testkit.TestProbe
import scala.concurrent.duration._
import Arbiter._
import Persistence._
import org.scalactic.ConversionCheckedTripleEquals

class Step4_SecondaryPersistenceSpec extends TestKit(ActorSystem("Step4SecondaryPersistenceSpec"))
    with FunSuiteLike
        with BeforeAndAfterAll
    with Matchers
    with ConversionCheckedTripleEquals
    with ImplicitSender
    with Tools {

  override def afterAll(): Unit = {
    system.shutdown()
  }

  test("case1: Secondary should not acknowledge snapshots until persisted") {
    import Replicator._

    val arbiter = TestProbe()
    val persistence = TestProbe()
    val replicator = TestProbe()
    val secondary = system.actorOf(Replica.props(arbiter.ref, probeProps(persistence)), "case1-secondary")
    val client = session(secondary)

    arbiter.expectMsg(Join)
    arbiter.send(secondary, JoinedSecondary)

    client.get("k1") should ===(None)

    replicator.send(secondary, Snapshot("k1", Some("v1"), 0L))
    val persistId = persistence.expectMsgPF() {
      case Persist("k1", Some("v1"), id) => id
    }

    withClue("secondary replica should already serve the received update while waiting for persistence: ") {
      client.get("k1") should ===(Some("v1"))
    }

    replicator.expectNoMsg(500.milliseconds)

    persistence.reply(Persisted("k1", persistId))
    replicator.expectMsg(SnapshotAck("k1", 0L))
    client.get("k1") should ===(Some("v1"))
  }

  test("case2: Secondary should retry persistence in every 100 milliseconds") {
    import Replicator._

    val arbiter = TestProbe()
    val persistence = TestProbe()
    val replicator = TestProbe()
    val secondary = system.actorOf(Replica.props(arbiter.ref, probeProps(persistence)), "case2-secondary")
    val client = session(secondary)

    arbiter.expectMsg(Join)
    arbiter.send(secondary, JoinedSecondary)

    client.get("k1") should ===(None)

    replicator.send(secondary, Snapshot("k1", Some("v1"), 0L))
    val persistId = persistence.expectMsgPF() {
      case Persist("k1", Some("v1"), id) => id
    }

    withClue("secondary replica should already serve the received update while waiting for persistence: ") {
      client.get("k1") should ===(Some("v1"))
    }

    // Persistence should be retried
    persistence.expectMsg(200.milliseconds, Persist("k1", Some("v1"), persistId))
    persistence.expectMsg(200.milliseconds, Persist("k1", Some("v1"), persistId))

    replicator.expectNoMsg(500.milliseconds)

    persistence.reply(Persisted("k1", persistId))
    replicator.expectMsg(SnapshotAck("k1", 0L))
    client.get("k1") should ===(Some("v1"))
  }

} 
Example 57
Source File: Tools.scala    From Principles-of-Reactive-Programming   with GNU General Public License v3.0 5 votes vote down vote up
package kvstore

import akka.actor.ActorSystem
import scala.concurrent.duration.FiniteDuration
import akka.testkit.TestProbe
import akka.actor.{ ActorRef, Actor }
import org.scalatest.Matchers
import org.scalatest.FunSuiteLike
import akka.actor.Props
import akka.testkit.TestKit
import akka.testkit.ImplicitSender
import scala.concurrent.duration._

object Tools {
  class TestRefWrappingActor(val probe: TestProbe) extends Actor {
    def receive = { case msg => probe.ref forward msg }
  }
}


trait Tools { this: TestKit with FunSuiteLike with Matchers with ImplicitSender =>

  import Arbiter._
  import Tools._

  def probeProps(probe: TestProbe): Props = Props(classOf[TestRefWrappingActor], probe)

  class Session(val probe: TestProbe, val replica: ActorRef) {
    import Replica._

    @volatile private var seq = 0L
    private def nextSeq: Long = {
      val next = seq
      seq += 1
      next
    }

    @volatile private var referenceMap = Map.empty[String, String]

    def waitAck(s: Long): Unit = probe.expectMsg(OperationAck(s))

    def waitFailed(s: Long): Unit = probe.expectMsg(OperationFailed(s))

    def set(key: String, value: String): Long = {
      referenceMap += key -> value
      val s = nextSeq
      probe.send(replica, Insert(key, value, s))
      s
    }

    def setAcked(key: String, value: String): Unit = waitAck(set(key, value))

    def remove(key: String): Long = {
      referenceMap -= key
      val s = nextSeq
      probe.send(replica, Remove(key, s))
      s
    }

    def removeAcked(key: String): Unit = waitAck(remove(key))

    def getAndVerify(key: String): Unit = {
      val s = nextSeq
      probe.send(replica, Get(key, s))
      probe.expectMsg(GetResult(key, referenceMap.get(key), s))
    }

    def get(key: String): Option[String] = {
      val s = nextSeq
      probe.send(replica, Get(key, s))
      probe.expectMsgType[GetResult].valueOption
    }

    def nothingHappens(duration: FiniteDuration): Unit = probe.expectNoMsg(duration)
  }

  def session(replica: ActorRef)(implicit system: ActorSystem) = new Session(TestProbe(), replica)


} 
Example 58
Source File: IntegrationSpec.scala    From Principles-of-Reactive-Programming   with GNU General Public License v3.0 5 votes vote down vote up
package kvstore

import akka.actor.{ Actor, Props, ActorRef, ActorSystem }
import akka.testkit.{ TestProbe, ImplicitSender, TestKit }
import org.scalatest.{ BeforeAndAfterAll, FlatSpec, Matchers }
import scala.concurrent.duration._
import org.scalatest.FunSuiteLike
import org.scalactic.ConversionCheckedTripleEquals

class IntegrationSpec(_system: ActorSystem) extends TestKit(_system)
    with FunSuiteLike
        with Matchers
    with BeforeAndAfterAll
    with ConversionCheckedTripleEquals
    with ImplicitSender
    with Tools {

  import Replica._
  import Replicator._
  import Arbiter._

  def this() = this(ActorSystem("ReplicatorSpec"))

  override def afterAll: Unit = system.shutdown()

  
  } 
Example 59
Source File: Step6_NewSecondarySpec.scala    From Principles-of-Reactive-Programming   with GNU General Public License v3.0 5 votes vote down vote up
package kvstore

import akka.testkit.TestKit
import akka.testkit.ImplicitSender
import org.scalatest.BeforeAndAfterAll
import org.scalatest.Matchers
import org.scalatest.FunSuiteLike
import akka.actor.ActorSystem
import akka.testkit.TestProbe
import Arbiter._
import Replicator._
import org.scalactic.ConversionCheckedTripleEquals

class Step6_NewSecondarySpec extends TestKit(ActorSystem("Step6NewSecondarySpec"))
  with FunSuiteLike
  with BeforeAndAfterAll
  with Matchers
  with ConversionCheckedTripleEquals
  with ImplicitSender
  with Tools {

  override def afterAll(): Unit = {
    system.shutdown()
  }

  test("case1: Primary must start replication to new replicas") {
    val arbiter = TestProbe()
        val primary = system.actorOf(Replica.props(arbiter.ref, Persistence.props(flaky = false)), "case1-primary")
        val user = session(primary)
    val secondary = TestProbe()

    arbiter.expectMsg(Join)
    arbiter.send(primary, JoinedPrimary)

    user.setAcked("k1", "v1")
    arbiter.send(primary, Replicas(Set(primary, secondary.ref)))

    secondary.expectMsg(Snapshot("k1", Some("v1"), 0L))
    secondary.reply(SnapshotAck("k1", 0L))

    val ack1 = user.set("k1", "v2")
    secondary.expectMsg(Snapshot("k1", Some("v2"), 1L))
    secondary.reply(SnapshotAck("k1", 1L))
    user.waitAck(ack1)

    val ack2 = user.remove("k1")
    secondary.expectMsg(Snapshot("k1", None, 2L))
    secondary.reply(SnapshotAck("k1", 2L))
    user.waitAck(ack2)
  }

  test("case2: Primary must stop replication to removed replicas and stop Replicator") {
    val arbiter = TestProbe()
        val primary = system.actorOf(Replica.props(arbiter.ref, Persistence.props(flaky = false)), "case2-primary")
        val user = session(primary)
    val secondary = TestProbe()

    arbiter.expectMsg(Join)
    arbiter.send(primary, JoinedPrimary)
    arbiter.send(primary, Replicas(Set(primary, secondary.ref)))

    val ack1 = user.set("k1", "v1")
    secondary.expectMsg(Snapshot("k1", Some("v1"), 0L))
    val replicator = secondary.lastSender
    secondary.reply(SnapshotAck("k1", 0L))
    user.waitAck(ack1)

    watch(replicator)
    arbiter.send(primary, Replicas(Set(primary)))
    expectTerminated(replicator)
  }

  test("case3: Primary must stop replication to removed replicas and waive their outstanding acknowledgements") {
    val arbiter = TestProbe()
        val primary = system.actorOf(Replica.props(arbiter.ref, Persistence.props(flaky = false)), "case3-primary")
        val user = session(primary)
    val secondary = TestProbe()

    arbiter.expectMsg(Join)
    arbiter.send(primary, JoinedPrimary)
    arbiter.send(primary, Replicas(Set(primary, secondary.ref)))

    val ack1 = user.set("k1", "v1")
    secondary.expectMsg(Snapshot("k1", Some("v1"), 0L))
    secondary.reply(SnapshotAck("k1", 0L))
    user.waitAck(ack1)

    val ack2 = user.set("k1", "v2")
    secondary.expectMsg(Snapshot("k1", Some("v2"), 1L))
    arbiter.send(primary, Replicas(Set(primary)))
    user.waitAck(ack2)
  }

} 
Example 60
Source File: Step1_PrimarySpec.scala    From Principles-of-Reactive-Programming   with GNU General Public License v3.0 5 votes vote down vote up
package kvstore

import akka.testkit.TestKit
import akka.actor.ActorSystem
import org.scalatest.FunSuiteLike
import org.scalatest.BeforeAndAfterAll
import org.scalatest.Matchers
import akka.testkit.ImplicitSender
import akka.testkit.TestProbe
import scala.concurrent.duration._
import kvstore.Persistence.{ Persisted, Persist }
import kvstore.Replica.OperationFailed
import kvstore.Replicator.{ Snapshot }
import scala.util.Random
import scala.util.control.NonFatal
import org.scalactic.ConversionCheckedTripleEquals

class Step1_PrimarySpec extends TestKit(ActorSystem("Step1PrimarySpec"))
    with FunSuiteLike
        with BeforeAndAfterAll
    with Matchers
    with ConversionCheckedTripleEquals
    with ImplicitSender
    with Tools {

  override def afterAll(): Unit = {
    system.shutdown()
  }

  import Arbiter._

  test("case1: Primary (in isolation) should properly register itself to the provided Arbiter") {
    val arbiter = TestProbe()
        system.actorOf(Replica.props(arbiter.ref, Persistence.props(flaky = false)), "case1-primary")
    
    arbiter.expectMsg(Join)
  }

  test("case2: Primary (in isolation) should react properly to Insert, Remove, Get") {
    val arbiter = TestProbe()
        val primary = system.actorOf(Replica.props(arbiter.ref, Persistence.props(flaky = false)), "case2-primary")
        val client = session(primary)

    arbiter.expectMsg(Join)
    arbiter.send(primary, JoinedPrimary)

    client.getAndVerify("k1")
    client.setAcked("k1", "v1")
    client.getAndVerify("k1")
    client.getAndVerify("k2")
    client.setAcked("k2", "v2")
    client.getAndVerify("k2")
    client.removeAcked("k1")
    client.getAndVerify("k1")
  }

  
} 
Example 61
Source File: Step3_ReplicatorSpec.scala    From Principles-of-Reactive-Programming   with GNU General Public License v3.0 5 votes vote down vote up
package kvstore

import akka.testkit.{ TestProbe, TestKit, ImplicitSender }
import org.scalatest.BeforeAndAfterAll
import org.scalatest.Matchers
import org.scalatest.FunSuiteLike
import akka.actor.ActorSystem
import scala.concurrent.duration._
import kvstore.Arbiter.{ JoinedSecondary, Join }
import kvstore.Persistence.{ Persisted, Persist }
import kvstore.Replicator.{ SnapshotAck, Snapshot, Replicate }
import org.scalactic.ConversionCheckedTripleEquals

class Step3_ReplicatorSpec extends TestKit(ActorSystem("Step3ReplicatorSpec"))
    with FunSuiteLike
        with BeforeAndAfterAll
    with Matchers
    with ConversionCheckedTripleEquals
    with ImplicitSender
    with Tools {

  override def afterAll(): Unit = {
    system.shutdown()
  }

  test("case1: Replicator should send snapshots when asked to replicate") {
    val secondary = TestProbe()
    val replicator = system.actorOf(Replicator.props(secondary.ref), "case1-replicator")

    replicator ! Replicate("k1", Some("v1"), 0L)
    secondary.expectMsg(Snapshot("k1", Some("v1"), 0L))
    secondary.ignoreMsg({ case Snapshot(_, _, 0L) => true })
    secondary.reply(SnapshotAck("k1", 0L))

    replicator ! Replicate("k1", Some("v2"), 1L)
    secondary.expectMsg(Snapshot("k1", Some("v2"), 1L))
    secondary.ignoreMsg({ case Snapshot(_, _, 1L) => true })
    secondary.reply(SnapshotAck("k1", 1L))

    replicator ! Replicate("k2", Some("v1"), 2L)
    secondary.expectMsg(Snapshot("k2", Some("v1"), 2L))
    secondary.ignoreMsg({ case Snapshot(_, _, 2L) => true })
    secondary.reply(SnapshotAck("k2", 2L))

    replicator ! Replicate("k1", None, 3L)
    secondary.expectMsg(Snapshot("k1", None, 3L))
    secondary.reply(SnapshotAck("k1", 3L))
  }

  test("case2: Replicator should retry until acknowledged by secondary") {
    val secondary = TestProbe()
    val replicator = system.actorOf(Replicator.props(secondary.ref), "case2-replicator")

    replicator ! Replicate("k1", Some("v1"), 0L)
    secondary.expectMsg(Snapshot("k1", Some("v1"), 0L))
    secondary.expectMsg(300.milliseconds, Snapshot("k1", Some("v1"), 0L))
    secondary.expectMsg(300.milliseconds, Snapshot("k1", Some("v1"), 0L))

    secondary.reply(SnapshotAck("k1", 0L))
  }

} 
Example 62
Source File: SidechainDeliveryTracker.scala    From Sidechains-SDK   with MIT License 5 votes vote down vote up
package com.horizen.network

import akka.actor.{ActorRef, ActorSystem}
import scorex.core.network.ModifiersStatus.{Received, Requested}
import scorex.core.network.{ConnectedPeer, DeliveryTracker, ModifiersStatus}
import scorex.util.ModifierId

import scala.concurrent.duration.FiniteDuration

class SidechainDeliveryTracker(system: ActorSystem,
                               deliveryTimeout: FiniteDuration,
                               maxDeliveryChecks: Int,
                               nvsRef: ActorRef)
  extends DeliveryTracker(system, deliveryTimeout, maxDeliveryChecks, nvsRef) {

  def peerInfo(id: ModifierId): Option[ConnectedPeer] = {
    val modifierStatus: ModifiersStatus = status(id)
    modifierStatus match {
      case Requested =>
        requested.get(id).flatMap(_.peer)
      case Received =>
        received.get(id)
      case _ =>
        None
    }
  }
} 
Example 63
Source File: SidechainTransactionActor.scala    From Sidechains-SDK   with MIT License 5 votes vote down vote up
package com.horizen.api.http

import akka.actor.{Actor, ActorRef, ActorSystem, Props}
import com.horizen.SidechainTypes
import com.horizen.api.http.SidechainTransactionActor.ReceivableMessages.BroadcastTransaction
import scorex.core.NodeViewHolder.ReceivableMessages.LocallyGeneratedTransaction
import scorex.core.network.NodeViewSynchronizer.ReceivableMessages.{FailedTransaction, SuccessfulTransaction}
import scorex.util.{ModifierId, ScorexLogging}

import scala.collection.concurrent.TrieMap
import scala.concurrent.{ExecutionContext, Promise}

class SidechainTransactionActor[T <: SidechainTypes#SCBT](sidechainNodeViewHolderRef: ActorRef)(implicit ec: ExecutionContext)
  extends Actor with ScorexLogging {

  private var transactionMap : TrieMap[String, Promise[ModifierId]] = TrieMap()

  override def preStart(): Unit = {
    context.system.eventStream.subscribe(self, classOf[SuccessfulTransaction[T]])
    context.system.eventStream.subscribe(self, classOf[FailedTransaction])
  }

  protected def broadcastTransaction: Receive = {
    case BroadcastTransaction(transaction) =>
      val promise = Promise[ModifierId]
      val future = promise.future
      transactionMap(transaction.id) = promise
      sender() ! future

      sidechainNodeViewHolderRef ! LocallyGeneratedTransaction[SidechainTypes#SCBT](transaction)
  }

  protected def sidechainNodeViewHolderEvents: Receive = {
    case SuccessfulTransaction(transaction) =>
      transactionMap.remove(transaction.id) match {
        case Some(promise) => promise.success(transaction.id)
        case None =>
      }
    case FailedTransaction(transactionId, throwable, _) =>
      transactionMap.remove(transactionId) match {
        case Some(promise) => promise.failure(throwable)
        case None =>
      }
  }

  override def receive: Receive = {
    broadcastTransaction orElse
    sidechainNodeViewHolderEvents orElse {
      case message: Any => log.error("SidechainTransactionActor received strange message: " + message)
    }
  }
}

object SidechainTransactionActor {

  object ReceivableMessages {

    case class BroadcastTransaction[T <: SidechainTypes#SCBT](transaction: T)

  }

}

object SidechainTransactionActorRef {
  def props(sidechainNodeViewHolderRef: ActorRef)
           (implicit ec: ExecutionContext): Props =
    Props(new SidechainTransactionActor(sidechainNodeViewHolderRef))

  def apply(sidechainNodeViewHolderRef: ActorRef)
           (implicit system: ActorSystem, ec: ExecutionContext): ActorRef =
    system.actorOf(props(sidechainNodeViewHolderRef))
} 
Example 64
Source File: SidechainNodeViewHolderTest.scala    From Sidechains-SDK   with MIT License 5 votes vote down vote up
package com.horizen.actors

import java.util.concurrent.TimeUnit

import akka.actor.{ActorRef, ActorSystem}
import akka.pattern.ask
import akka.testkit.TestKit
import akka.util.Timeout
import com.horizen.SidechainNodeViewHolder.ReceivableMessages.GetDataFromCurrentSidechainNodeView
import com.horizen.fixtures.SidechainNodeViewHolderFixture
import com.horizen.node.SidechainNodeView
import org.scalatest.{BeforeAndAfterAll, FunSuiteLike}

import scala.concurrent._
import scala.concurrent.duration._
import org.scalatest._
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner


@RunWith(classOf[JUnitRunner])
class SidechainNodeViewHolderTest extends Suites(
  new SidechainNodeViewHolderTest1,
  new SidechainNodeViewHolderTest2
)

@RunWith(classOf[JUnitRunner])
class SidechainNodeViewHolderTest1
  extends TestKit(ActorSystem("testsystem"))
  with FunSuiteLike
  with BeforeAndAfterAll
  with SidechainNodeViewHolderFixture
{

  implicit val timeout = Timeout(5, TimeUnit.SECONDS)

  override def afterAll: Unit = {
    //info("Actor system is shutting down...")
    TestKit.shutdownActorSystem(system)
  }

  test ("Test1") {
    def f(v: SidechainNodeView) = v
    val sidechainNodeViewHolderRef: ActorRef = getSidechainNodeViewHolderRef
    val nodeView = (sidechainNodeViewHolderRef ? GetDataFromCurrentSidechainNodeView(f))
      .mapTo[SidechainNodeView]

    assert(Await.result(nodeView, 5 seconds) != null)
  }

  test("Test2") {
  }

}

@RunWith(classOf[JUnitRunner])
class SidechainNodeViewHolderTest2
  extends TestKit(ActorSystem("testSystem"))
  with FeatureSpecLike
  with BeforeAndAfterAll
  with Matchers
  with SidechainNodeViewHolderFixture
{

  implicit val timeout = Timeout(5, TimeUnit.SECONDS)

  override def afterAll: Unit = {
    //info("Actor system is shutting down...")
    TestKit.shutdownActorSystem(system)
  }

  feature("Actor1") {
    scenario("Scenario 1"){
      system should not be(null)

      def f(v: SidechainNodeView) = v
      val sidechainNodeViewHolderRef: ActorRef = getSidechainNodeViewHolderRef
      val nodeView = (sidechainNodeViewHolderRef ? GetDataFromCurrentSidechainNodeView(f))
        .mapTo[SidechainNodeView]

      Await.result(nodeView, 5 seconds) should not be(null)

    }
  }
} 
Example 65
Source File: MockedSidechainNodeViewHolderFixture.scala    From Sidechains-SDK   with MIT License 5 votes vote down vote up
package com.horizen.fixtures

import akka.actor.{ActorRef, ActorSystem, Props}
import com.horizen._
import org.mockito.Mockito
import org.scalatest.mockito.MockitoSugar
import scorex.core.settings.{NetworkSettings, ScorexSettings}

class MockedSidechainNodeViewHolder(sidechainSettings: SidechainSettings,
                                    history: SidechainHistory,
                                    state: SidechainState,
                                    wallet: SidechainWallet,
                                    mempool: SidechainMemoryPool)
  extends SidechainNodeViewHolder(sidechainSettings, null, null, null, null, null, null, null, null, null, null, null, null) {

  override def restoreState(): Option[(HIS, MS, VL, MP)] = {
    Some(history, state, wallet, mempool)
  }
}


trait MockedSidechainNodeViewHolderFixture extends MockitoSugar {
  def getMockedSidechainNodeViewHolderRef(history: SidechainHistory, state: SidechainState, wallet: SidechainWallet, mempool: SidechainMemoryPool)
                                         (implicit actorSystem: ActorSystem): ActorRef = {
    val sidechainSettings = mock[SidechainSettings]
    val scorexSettings = mock[ScorexSettings]
    val networkSettings = mock[NetworkSettings]
    Mockito.when(sidechainSettings.scorexSettings)
      .thenAnswer(answer => {
        scorexSettings
      })
    Mockito.when(scorexSettings.network)
      .thenAnswer(answer => {
      networkSettings
    })
    Mockito.when(networkSettings.maxModifiersCacheSize)
      .thenAnswer(answer => {
      10
    })

    actorSystem.actorOf(Props(new MockedSidechainNodeViewHolder(sidechainSettings, history, state, wallet, mempool)))
  }
} 
Example 66
Source File: CompositeHttpService.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.api.http

import akka.actor.ActorSystem
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import encry.api.http.routes.SwaggerRoute
import encry.settings.RESTApiSettings

case class CompositeHttpService(system: ActorSystem,
                                routes: Seq[ApiRoute],
                                settings: RESTApiSettings,
                                swaggerConf: String) {

  implicit val actorSystem: ActorSystem = system

  val redirectToSwagger: Route = redirect("/swagger", StatusCodes.PermanentRedirect)

  @SuppressWarnings(Array("org.wartremover.warts.TraversableOps"))
  val compositeRoute: Route = routes.map(_.route).reduce(_ ~ _) ~
    path("swagger") { getFromResource("swagger-ui/index.html") } ~
    getFromResourceDirectory("swagger-ui") ~
    SwaggerRoute.routes ~
    redirectToSwagger
} 
Example 67
Source File: BlackListTests.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.network

import java.net.{InetAddress, InetSocketAddress}

import akka.actor.ActorSystem
import akka.testkit.{TestActorRef, TestProbe}
import encry.modifiers.InstanceFactory
import encry.network.BlackList.BanReason._
import encry.network.PeerConnectionHandler.{ConnectedPeer, Outgoing}
import encry.network.PeerConnectionHandler.ReceivableMessages.CloseConnection
import encry.network.PeersKeeper.BanPeer
import encry.settings.TestNetSettings
import org.encryfoundation.common.network.BasicMessagesRepo.Handshake
import org.scalatest.{BeforeAndAfterAll, Matchers, OneInstancePerTest, WordSpecLike}
import scala.concurrent.duration._

class BlackListTests extends WordSpecLike
  with Matchers
  with BeforeAndAfterAll
  with InstanceFactory
  with OneInstancePerTest
  with TestNetSettings {

  implicit val system: ActorSystem = ActorSystem()

  override def afterAll(): Unit = system.terminate()

  val knowPeersSettings = testNetSettings.copy(
    network = settings.network.copy(
      knownPeers = List(new InetSocketAddress("172.16.11.11", 9001)),
      connectOnlyWithKnownPeers = Some(true)
    ),
    blackList = settings.blackList.copy(
      banTime = 2 seconds,
      cleanupTime = 3 seconds
    ))

  
  "Peers keeper" should {
    "handle ban peer message correctly" in {
      val peersKeeper: TestActorRef[PeersKeeper] = TestActorRef[PeersKeeper](PeersKeeper.props(knowPeersSettings, TestProbe().ref, TestProbe().ref))
      val address: InetSocketAddress = new InetSocketAddress("0.0.0.0", 9000)
      val peerHandler: TestProbe = TestProbe()
      val connectedPeer: ConnectedPeer = ConnectedPeer(
        address,
        peerHandler.ref,
        Outgoing,
        Handshake(protocolToBytes(knowPeersSettings.network.appVersion), "test node", Some(address), System.currentTimeMillis())
      )
      peersKeeper ! BanPeer(connectedPeer, SpamSender)
      peerHandler.expectMsg(CloseConnection)
      peersKeeper.underlyingActor.blackList.contains(address.getAddress) shouldBe true
    }
    "cleanup black list by scheduler correctly" in {
      val peersKeeper: TestActorRef[PeersKeeper] = TestActorRef[PeersKeeper](PeersKeeper.props(knowPeersSettings, TestProbe().ref, TestProbe().ref))
      val address: InetSocketAddress = new InetSocketAddress("0.0.0.0", 9000)
      val peerHandler: TestProbe = TestProbe()
      val connectedPeer: ConnectedPeer = ConnectedPeer(
        address,
        peerHandler.ref,
        Outgoing,
        Handshake(protocolToBytes(knowPeersSettings.network.appVersion), "test node", Some(address), System.currentTimeMillis())
      )
      peersKeeper ! BanPeer(connectedPeer, SentPeersMessageWithoutRequest)
      Thread.sleep(6000)
      peersKeeper.underlyingActor.blackList.contains(address.getAddress) shouldBe false
    }
    "don't remove peer from black list before ban time expired" in {
      val peersKeeper: TestActorRef[PeersKeeper] = TestActorRef[PeersKeeper](PeersKeeper.props(knowPeersSettings, TestProbe().ref, TestProbe().ref))
      val address: InetSocketAddress = new InetSocketAddress("0.0.0.0", 9000)
      val peerHandler: TestProbe = TestProbe()
      val connectedPeer: ConnectedPeer = ConnectedPeer(
        address,
        peerHandler.ref,
        Outgoing,
        Handshake(protocolToBytes(knowPeersSettings.network.appVersion), "test node", Some(address), System.currentTimeMillis())
      )
      Thread.sleep(4000)
      peersKeeper ! BanPeer(connectedPeer, CorruptedSerializedBytes)
      Thread.sleep(2000)
      peersKeeper.underlyingActor.blackList.contains(address.getAddress) shouldBe true
    }
  }
} 
Example 68
Source File: DMUtils.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.network.DeliveryManagerTests

import java.net.InetSocketAddress
import akka.actor.ActorSystem
import akka.testkit.{TestActorRef, TestProbe}
import encry.local.miner.Miner.{DisableMining, StartMining}
import encry.modifiers.InstanceFactory
import encry.network.DeliveryManager
import encry.network.DeliveryManager.FullBlockChainIsSynced
import encry.network.NodeViewSynchronizer.ReceivableMessages.UpdatedHistory
import encry.network.PeerConnectionHandler.{ConnectedPeer, Incoming}
import encry.settings.EncryAppSettings
import encry.view.history.History
import org.encryfoundation.common.modifiers.history.Block
import org.encryfoundation.common.network.BasicMessagesRepo.Handshake
import org.encryfoundation.common.utils.TaggedTypes.ModifierId
import scala.collection.mutable
import scala.collection.mutable.WrappedArray

object DMUtils extends InstanceFactory {

  def initialiseDeliveryManager(isBlockChainSynced: Boolean,
                                isMining: Boolean,
                                settings: EncryAppSettings)
                               (implicit actorSystem: ActorSystem): (TestActorRef[DeliveryManager], History) = {
    val history: History = generateDummyHistory(settings)
    val deliveryManager: TestActorRef[DeliveryManager] =
      TestActorRef[DeliveryManager](DeliveryManager
        .props(None, TestProbe().ref, TestProbe().ref, TestProbe().ref, TestProbe().ref, TestProbe().ref, settings))
    deliveryManager ! UpdatedHistory(history)
    if (isMining) deliveryManager ! StartMining
    else deliveryManager ! DisableMining
    if (isBlockChainSynced) deliveryManager ! FullBlockChainIsSynced
    (deliveryManager, history)
  }

  def generateBlocks(qty: Int, history: History): (History, List[Block]) =
    (0 until qty).foldLeft(history, List.empty[Block]) {
      case ((prevHistory, blocks), _) =>
        val block: Block = generateNextBlock(prevHistory)
        prevHistory.append(block.header)
        prevHistory.append(block.payload)
        val a = prevHistory.reportModifierIsValid(block)
        (a, blocks :+ block)
    }

  def toKey(id: ModifierId): WrappedArray.ofByte = new mutable.WrappedArray.ofByte(id)

  def createPeer(port: Int,
                 host: String,
                 settings: EncryAppSettings)(implicit system: ActorSystem): (InetSocketAddress, ConnectedPeer) = {
    val address = new InetSocketAddress(host, port)
    val peer: ConnectedPeer = ConnectedPeer(address, TestProbe().ref, Incoming,
      Handshake(protocolToBytes(settings.network.appVersion), host, Some(address), System.currentTimeMillis()))
    (address, peer)
  }
} 
Example 69
Source File: MemoryPoolTests.scala    From EncryCore   with GNU General Public License v3.0 5 votes vote down vote up
package encry.view.mempool

import akka.actor.ActorSystem
import akka.testkit.{ TestActorRef, TestProbe }
import com.typesafe.scalalogging.StrictLogging
import encry.modifiers.InstanceFactory
import encry.settings.{ EncryAppSettings, TestNetSettings }
import encry.utils.NetworkTimeProvider
import encry.view.mempool.MemoryPool.{ NewTransaction, TransactionsForMiner }
import org.scalatest.{ BeforeAndAfterAll, Matchers, OneInstancePerTest, WordSpecLike }

import scala.concurrent.duration._

class MemoryPoolTests
    extends WordSpecLike
    with Matchers
    with InstanceFactory
    with BeforeAndAfterAll
    with OneInstancePerTest
    with TestNetSettings
    with StrictLogging {

  implicit val system: ActorSystem = ActorSystem()

  override def afterAll(): Unit = system.terminate()

  val timeProvider: NetworkTimeProvider = new NetworkTimeProvider(testNetSettings.ntp)

  "MemoryPool" should {
    "add new unique transactions" in {
      val mempool                = MemoryPoolStorage.empty(testNetSettings, timeProvider)
      val transactions           = genValidPaymentTxs(10)
      val (newMempool, validTxs) = mempool.validateTransactions(transactions)
      newMempool.size shouldBe 10
      validTxs.map(_.encodedId).forall(transactions.map(_.encodedId).contains) shouldBe true
    }
    "reject not unique transactions" in {
      val mempool                          = MemoryPoolStorage.empty(testNetSettings, timeProvider)
      val transactions                     = genValidPaymentTxs(10)
      val (newMempool, validTxs)           = mempool.validateTransactions(transactions)
      val (newMempoolAgain, validTxsAgain) = newMempool.validateTransactions(validTxs)
      newMempoolAgain.size shouldBe 10
      validTxsAgain.size shouldBe 0
    }
    "mempoolMaxCapacity works correct" in {
      val mempool                = MemoryPoolStorage.empty(testNetSettings, timeProvider)
      val transactions           = genValidPaymentTxs(11)
      val (newMempool, validTxs) = mempool.validateTransactions(transactions)
      newMempool.size shouldBe 10
      validTxs.size shouldBe 10
    }
    "getTransactionsForMiner works fine" in {
      val mempool         = MemoryPoolStorage.empty(testNetSettings, timeProvider)
      val transactions    = (0 until 10).map(k => coinbaseAt(k))
      val (newMempool, _) = mempool.validateTransactions(transactions)
      val (uPool, txs)    = newMempool.getTransactionsForMiner
      uPool.size shouldBe 0
      txs.map(_.encodedId).forall(transactions.map(_.encodedId).contains) shouldBe true
      transactions.map(_.encodedId).forall(txs.map(_.encodedId).contains) shouldBe true
    }
  }
  "Mempool actor" should {
    "send transactions to miner" in {
      val miner1 = TestProbe()
      val mempool1: TestActorRef[MemoryPool] =
        TestActorRef[MemoryPool](MemoryPool.props(testNetSettings, timeProvider, miner1.ref, Some(TestProbe().ref)))
      val transactions1 = (0 until 4).map { k =>
        val a = coinbaseAt(k)
        a
      }
      transactions1.foreach(mempool1 ! NewTransaction(_))
      mempool1.underlyingActor.memoryPool.size shouldBe 4
      logger.info(s"generated: ${transactions1.map(_.encodedId)}")
      miner1.expectMsg(20.seconds, TransactionsForMiner(transactions1))
    }
  }
} 
Example 70
Source File: BaseSpec.scala    From process   with Apache License 2.0 5 votes vote down vote up
package processframework

import akka.actor.ActorSystem

import org.scalatest._
import org.scalatest.concurrent.Eventually

import akka.testkit.{ ImplicitSender, TestKit }

abstract class BaseSpec extends TestKit(ActorSystem(getClass.getSimpleName.stripSuffix("$")))
    with WordSpecLike
    with Suite
    with Matchers
    with BeforeAndAfterAll
    with BeforeAndAfterEach
    with ImplicitSender
    with Eventually {

  override def afterAll() {
    TestKit.shutdownActorSystem(system)
  }
} 
Example 71
Source File: ProcessTest.scala    From process   with Apache License 2.0 5 votes vote down vote up
package processframework

import java.lang

import akka.actor.{ ActorContext, ActorRef, ActorSystem, Props }
import akka.testkit.{ ImplicitSender, TestKit, TestProbe }
import org.scalatest._
import org.scalatest.concurrent.Eventually

import scala.concurrent.duration._

object ProcessTest {
  case object Start
  case object Response
  case class Command(i: Int)
  case object Completed extends Process.Event

  class MockStep(service: ActorRef, retryInt: Duration)(implicit val context: ActorContext) extends ProcessStep[Int] {
    override val retryInterval = retryInt
    def execute()(implicit process: akka.actor.ActorRef) = { state ⇒
      service ! Command(state)
    }
    def receiveCommand = {
      case Response ⇒
        Completed
    }
    def updateState = {
      case Completed ⇒ state ⇒ markDone(state + 1)
    }
  }

  class Process1(service: ActorRef, retryInterval: Duration) extends Process[Int] {
    import context.dispatcher
    var state = 0
    val process = new MockStep(service, retryInterval)

    def receive = {
      case Start ⇒
        process.run()
    }
  }
}

class ProcessTest extends BaseSpec {
  import ProcessTest._

  "Process" should {
    "have a happy flow" in {
      val service = TestProbe()
      val process = system.actorOf(Props(new Process1(service.ref, Duration.Inf)), "Process1")
      process ! processframework.Process.GetState
      expectMsg(0)
      process ! Start

      service.expectMsg(Command(0))
      service.reply(Response)

      eventually {
        process ! processframework.Process.GetState
        expectMsg(1)
      }
      process ! Start
      expectNoMsg(250 millis)
      process ! processframework.Process.GetState
      expectMsg(1)
    }

    "does not retry by default" in {
      val service = TestProbe()
      val process = system.actorOf(Props(new Process1(service.ref, Duration.Inf)), "Process2")
      process ! processframework.Process.GetState
      expectMsg(0)
      process ! Start

      service.expectMsg(Command(0))
      expectNoMsg()
    }

    "retries execution until succeeded" in {
      val service = TestProbe()
      val process = system.actorOf(Props(new Process1(service.ref, 150 millis)), "Process3")
      process ! processframework.Process.GetState
      expectMsg(0)
      process ! Start

      service.expectMsg(Command(0))
      service.expectMsg(1000.millis, Command(0))
      service.expectMsg(1000.millis, Command(0))
      service.reply(Response)
      expectNoMsg()
    }
  }
} 
Example 72
Source File: HttpServerApp.scala    From DataXServer   with Apache License 2.0 5 votes vote down vote up
package org.tianlangstudio.data.hamal.server.http

import akka.actor.ActorSystem
import org.tianlangstudio.data.hamal.common.Logging
import org.tianlangstudio.data.hamal.core.{Constants, HamalConf}
import org.tianlangstudio.data.hamal.core.handler.{ITaskHandler, LocalServerHandler}
import org.tianlangstudio.data.hamal.core.HamalConf


object HttpServerApp extends App with Logging{

  def start(taskHandler: ITaskHandler, hamalConf: HamalConf)(implicit  system: ActorSystem): Unit = {
    val httpServer = new HttpServer(taskHandler, hamalConf)
    sys.addShutdownHook {
      httpServer.stop()
    }
  }
  val concurrence = if(args.length > 1) {
    args(0).toInt
  } else  {
    3
  }
  //使用LocalServerHandler多线程方式执行任务
  implicit val system = ActorSystem(Constants.NAME_HTTP_SERVER)
  start(new LocalServerHandler(concurrence), new HamalConf())
  while(true){
    Thread.sleep(10000)
  }
} 
Example 73
Source File: AkkaUtils.scala    From DataXServer   with Apache License 2.0 5 votes vote down vote up
package org.tianlangstudio.data.hamal.yarn.util

import akka.actor.{ActorSystem, ExtendedActorSystem}
import com.typesafe.config.ConfigFactory
import org.apache.log4j.{Level, Logger}
import org.tianlangstudio.data.hamal.core.{Constants, HamalConf}
import org.tianlangstudio.data.hamal.core.HamalConf


  def maxFrameSizeBytes(conf: HamalConf): Int = {
    val frameSizeInMB = conf.getInt("datax.akka.frameSize", 128)
    if (frameSizeInMB > AKKA_MAX_FRAME_SIZE_IN_MB) {
      throw new IllegalArgumentException(
        s"spark.akka.frameSize should not be greater than $AKKA_MAX_FRAME_SIZE_IN_MB MB")
    }
    frameSizeInMB * 1024 * 1024
  }


  def protocol(actorSystem: ActorSystem): String = {
    val akkaConf = actorSystem.settings.config
    val sslProp = "akka.remote.netty.tcp.enable-ssl"
    protocol(akkaConf.hasPath(sslProp) && akkaConf.getBoolean(sslProp))
  }

  def protocol(ssl: Boolean = false): String = {
    if (ssl) {
      "akka.ssl.tcp"
    } else {
      "akka.tcp"
    }
  }

  def address(
      protocol: String,
      systemName: String,
      host: String,
      port: Int,
      actorName: String): String = {

        address(protocol,
          systemName,
          s"$host:$port",
          actorName
        )
  }
  def address(
               protocol: String,
               systemName: String,
               hostPort: String,
               actorName: String): String = {
    s"$protocol://$systemName@$hostPort/user/$actorName"
  }
} 
Example 74
Source File: CouchbasePluginSpec.scala    From akka-persistence-couchbase   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.couchbase.support

import akka.actor.ActorSystem
import akka.persistence.couchbase.{CouchbaseExtension, LoggingConfig}
import com.typesafe.config.ConfigFactory
import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll, Suite}

import scala.concurrent.Await
import scala.concurrent.duration._

object CouchbasePluginSpec {

  val config = ConfigFactory.parseString(
    """
      |akka {
      |  persistence {
      |    journal {
      |      plugin = "couchbase-journal"
      |    }
      |
      |    snapshot-store {
      |      plugin =  "couchbase-snapshot-store"
      |    }
      |
      |    journal-plugin-fallback {
      |      replay-filter {
      |        mode = warn
      |      }
      |    }
      |  }
      |
      |  test.single-expect-default = 10s
      |  loglevel = WARNING
      |  log-dead-letters = 0
      |  log-dead-letters-during-shutdown = off
      |  test.single-expect-default = 10s
      |}
      |
      |couchbase-replay {
      |
      |  batchSize = "4"
      |}
    """.stripMargin)
}

trait CouchbasePluginSpec
  extends Suite
    with BeforeAndAfter
    with BeforeAndAfterAll {

  System.setProperty("java.util.logging.config.class", classOf[LoggingConfig].getName)

  def system: ActorSystem

  def couchbase = CouchbaseExtension(system)

  before {
    assert(couchbase.journalBucket.bucketManager.flush())
    assert(couchbase.snapshotStoreBucket.bucketManager.flush())
  }

  override protected def afterAll(): Unit = {
    Await.result(system.terminate(), 10.seconds)
    super.afterAll()
  }
} 
Example 75
Source File: ActorWaitHelper.scala    From wookiee   with Apache License 2.0 5 votes vote down vote up
package com.webtrends.harness.utils

import java.util.concurrent.TimeUnit

import akka.actor.{Actor, ActorRef, ActorSystem, Props}
import akka.util.Timeout

import scala.concurrent.Await

object ActorWaitHelper {
  // Will wait until an actor has come up before returning its ActorRef
  def awaitActor(props: Props, system: ActorSystem, actorName: Option[String] = None)(implicit timeout: Timeout = Timeout(5, TimeUnit.SECONDS)): ActorRef = {
    val actor = actorName match {
      case Some(name) => system.actorOf(props, name)
      case None => system.actorOf(props)
    }
    awaitActorRef(actor, system)
  }

  // Will wait until an actor has come up before returning its ActorRef
  def awaitActorRef(actor: ActorRef, system: ActorSystem)(implicit timeout: Timeout = Timeout(5, TimeUnit.SECONDS)): ActorRef = {
    Await.result(system.actorSelection(actor.path).resolveOne(), timeout.duration)
    actor
  }
}

trait ActorWaitHelper { this: Actor =>
  // Will wait until an actor has come up before returning its ActorRef
  def awaitActor(props: Props, actorName: Option[String] = None)(implicit timeout: Timeout = Timeout(5, TimeUnit.SECONDS)): ActorRef =
    ActorWaitHelper.awaitActor(props, context.system, actorName)(timeout)
} 
Example 76
Source File: Harness.scala    From wookiee   with Apache License 2.0 5 votes vote down vote up
package com.webtrends.harness.app

import akka.actor.{ActorRef, ActorSystem, Props, UnhandledMessage}
import akka.pattern._
import com.typesafe.config.Config
import com.webtrends.harness.UnhandledEventListener
import com.webtrends.harness.app.HarnessActor.ShutdownSystem
import com.webtrends.harness.logging.Logger

import scala.concurrent.{Await, Future}
import scala.concurrent.duration._
import scala.util.Failure
import scala.util.Success


  def addShutdownHook(): Unit = {
    Runtime.getRuntime.addShutdownHook(new Thread(new Runnable {
      def run() = {
        system match {
          case Some(sys) =>
            sys.log.debug("The shutdown hook has been called")
            shutdownActorSystem(block = true) {
              externalLogger.info("Wookiee Shut Down, Thanks for Coming!")
            }
          case _ =>
        }
      }
    }))
  }
} 
Example 77
Source File: HarnessActorSystem.scala    From wookiee   with Apache License 2.0 5 votes vote down vote up
package com.webtrends.harness.app

import akka.actor.ActorSystem
import com.typesafe.config.{Config, ConfigFactory}
import com.webtrends.harness.component.ComponentManager
import com.webtrends.harness.logging.Logger
import com.webtrends.harness.service.ServiceManager

object HarnessActorSystem {

  lazy val loader = HarnessClassLoader(Thread.currentThread.getContextClassLoader)
  private val externalLogger = Logger.getLogger(this.getClass)

  def apply(config:Option[Config]=None): ActorSystem = {
    ActorSystem.create("server", getConfig(config), loader)
  }

  def getConfig(config:Option[Config]): Config = {
    val sysConfig = {
      if (config.isDefined) {
        config.get
      } else {
        val baseConfig = ConfigFactory.load(loader, "conf/application.conf")
        ConfigFactory.load(loader).withFallback(baseConfig).getConfig("wookiee-system")
      }
    }

    ComponentManager.loadComponentJars(sysConfig, loader)
    ConfigFactory.load

    externalLogger.debug("Loading the service configs")
    val configs = ServiceManager.loadConfigs(sysConfig)
    if (configs.nonEmpty) externalLogger.info(s"${configs.size} service config(s) have been loaded: ${configs.mkString(", ")}")

    externalLogger.debug("Loading the component configs")
    val compConfigs = ComponentManager.loadComponentInfo(sysConfig)
    if (compConfigs.nonEmpty) externalLogger.info(s"${compConfigs.size} component config(s) have been loaded: ${compConfigs.mkString(", ")}\nIf 0 could be due to config loaded from component JARs.")

    val allConfigs = configs ++ compConfigs

    // Build the hierarchy
    val conf = if (allConfigs.isEmpty) sysConfig
      else allConfigs.reduce(_.withFallback(_)).withFallback(sysConfig)
    conf.resolve()
  }
} 
Example 78
Source File: LoggerSpec.scala    From wookiee   with Apache License 2.0 5 votes vote down vote up
package com.webtrends.harness.logging

import akka.actor.ActorSystem
import akka.testkit.{TestKit, TestProbe}
import ch.qos.logback.classic.Level
import com.webtrends.harness.TestKitSpecificationWithJUnit
import org.slf4j.LoggerFactory

class LoggerSpec extends TestKitSpecificationWithJUnit(ActorSystem("harness")) with LoggingAdapter {

  val probe = new TestProbe(system)
  val appender = setupAppender()
  sequential

  "logging" should {
    "allow for logging that is received by a mediator actor using Scala string interpolation" in {
      Logger.registerMediator(probe.ref)
      val logger = Logger("test")
      val x = 0
      logger.trace(s"testing ${x}123...")

      val msg = Trace(LoggerFactory getLogger "test", "testing 0123...", None, None, Nil, None)
      Logger.unregisterMediator(probe.ref)
      probe.expectMsgClass(classOf[Trace]) must be equalTo msg
    }

    "allow for logging that is received by a mediator actor using Java string interpolation" in {
      Logger.registerMediator(probe.ref)
      val logger = Logger("test")
      logger.debug("testing {}123...", 0)

      val msg = Debug(LoggerFactory getLogger "test", "testing {}123...", None, None, Seq(0), None)
      Logger.unregisterMediator(probe.ref)
      probe.expectMsgClass(classOf[Debug]) must be equalTo msg
    }

    "allow for logging that is handle directly by the underlying logging framework using Scala string interpolation" in {
      val logger = Logger("test")
      val x = 0
      logger.info(s"testing ${x}123...")
      appender.lastMessage.get must be equalTo "testing 0123..."
    }

    "allow for logging that is handle directly by the underlying logging framework using Java string interpolation" in {
      val logger = Logger("test")
      logger.warn("testing {}123...", 0)
      appender.lastMessage.get must be equalTo "testing 0123..."
    }

    "allow for logging that is handle directly by the underlying logging framework using Scala string interpolation and handles a Throwable" in {
      val logger = Logger("test")
      logger.error("testing {}123...", 0)
      appender.lastMessage.get must be equalTo "testing 0123..."
    }

    "don't log if try succeeds" in {
      val logger = Logger("test")
      logger.error("testing {}123...", 0)
      tryAndLogError({ true })
      appender.lastMessage.get must be equalTo "testing 0123..."
    }

    "do log if try fails" in {
      val logger = Logger("test")
      logger.error("testing {}123...", 0)
      tryAndLogError({ 5 / 0 })
      appender.lastMessage.get must be equalTo "/ by zero"
    }
  }

  step {
    TestKit.shutdownActorSystem(system)
  }

  private def setupAppender(): TestingAppender = {
    val root = LoggerFactory.getLogger(org.slf4j.Logger.ROOT_LOGGER_NAME).asInstanceOf[ch.qos.logback.classic.Logger]
    root.setLevel(Level.ALL)
    val appender = new TestingAppender()
    appender.start()
    root.addAppender(appender)
    appender
  }
} 
Example 79
Source File: LoggingActorSpec.scala    From wookiee   with Apache License 2.0 5 votes vote down vote up
package com.webtrends.harness.logging

import akka.actor.{ActorSystem, Props}
import akka.event.Logging.{InitializeLogger, LoggerInitialized}
import akka.testkit.{TestKit, TestProbe}
import com.typesafe.config.ConfigFactory
import com.webtrends.harness.TestKitSpecificationWithJUnit

class LoggingActorSpec extends TestKitSpecificationWithJUnit(ActorSystem("test", ConfigFactory.parseString( """logging.use-actor=off"""))) {

  val logger = system.actorOf(Props[LoggingActor])

  "Logging" should {
    "test logging initialization" in {
      val probe = TestProbe()
      probe.send(logger, InitializeLogger(null))
      LoggerInitialized must beEqualTo(probe.expectMsg(LoggerInitialized))
    }
  }

  step {
    TestKit.shutdownActorSystem(system)
  }
} 
Example 80
Source File: ServiceSpec.scala    From wookiee   with Apache License 2.0 5 votes vote down vote up
package com.webtrends.harness

import akka.actor.ActorSystem
import akka.testkit.{TestActorRef, TestKit, TestProbe}
import com.webtrends.harness.health.{ComponentState, HealthComponent}
import com.webtrends.harness.service.messages._
import com.webtrends.harness.service.meta.ServiceMetaDetails
import org.specs2.mutable.SpecificationLike

case class TestClass(val name: String, val value: Int)


class ServiceSpec extends TestKit(ActorSystem("harness")) with SpecificationLike {

  val act = TestActorRef(new TestService)
  //val httpAct = TestActorRef(new TestHttpService)

  "services " should {

    " be able to be loaded and pinged" in {
      val probe = TestProbe()
      probe.send(act, Ping)
      Pong must beEqualTo(probe.expectMsg(Pong))
    }

    " be able to be loaded and sent a ready message" in {
      val probe = TestProbe()
      probe.send(act, Ready)
      Ready must beEqualTo(probe.expectMsg(Ready))
    }

    " be able to be loaded and checked" in {
      val probe = TestProbe()
      probe.send(act, CheckHealth)
      val comp = HealthComponent("testservice", ComponentState.NORMAL, "test")
      comp.addComponent(HealthComponent("childcomponent", ComponentState.DEGRADED, "test"))

      comp must beEqualTo(probe.expectMsg(comp))
    }

    //todo only HttpService should be able to do this
    
  }

  step {
    TestKit.shutdownActorSystem(system)
  }
} 
Example 81
Source File: ConfigSpec.scala    From wookiee   with Apache License 2.0 5 votes vote down vote up
package com.webtrends.harness

import java.io.{BufferedWriter, File, FileWriter}
import java.util.concurrent.TimeUnit

import akka.actor.{Actor, ActorSystem, Props}
import akka.testkit.TestProbe
import com.typesafe.config.ConfigFactory
import com.webtrends.harness.app.HarnessActor.ConfigChange
import com.webtrends.harness.config.ConfigWatcherActor
import com.webtrends.harness.health.{ComponentState, HealthComponent}
import com.webtrends.harness.service.messages.CheckHealth
import org.specs2.mutable.SpecificationWithJUnit

import scala.concurrent.ExecutionContextExecutor
import scala.concurrent.duration.FiniteDuration
import scala.reflect.io.{Directory, Path}

class ConfigSpec extends SpecificationWithJUnit {
  implicit val dur = FiniteDuration(2, TimeUnit.SECONDS)
  new File("services/test/conf").mkdirs()
  implicit val sys = ActorSystem("system", ConfigFactory.parseString( """
    akka.actor.provider = "akka.actor.LocalActorRefProvider"
    services { path = "services" }
    """).withFallback(ConfigFactory.load))

  implicit val ec: ExecutionContextExecutor =  sys.dispatcher

  val probe = TestProbe()
  val parent = sys.actorOf(Props(new Actor {
    val child = context.actorOf(ConfigWatcherActor.props, "child")
    def receive = {
      case x if sender == child => probe.ref forward x
      case x => child forward x
    }
  }))

  sequential

  "config " should {
    "be in good health" in {
      probe.send(parent, CheckHealth)
      val msg = probe.expectMsgClass(classOf[HealthComponent])
      msg.state equals ComponentState.NORMAL
    }

    "detect changes in config" in {
      val file = new File("services/test/conf/test.conf")
      val bw = new BufferedWriter(new FileWriter(file))
      bw.write("test = \"value\"")
      bw.close()
      val msg = probe.expectMsgClass(classOf[ConfigChange])
      msg.isInstanceOf[ConfigChange]
    }
  }

  step {
    sys.terminate().onComplete { _ =>
        Directory(Path(new File("services"))).deleteRecursively()
    }
  }
} 
Example 82
Source File: InternalHttpSpec.scala    From wookiee   with Apache License 2.0 5 votes vote down vote up
package com.webtrends.harness.http

import java.net.{HttpURLConnection, URL}
import java.util.concurrent.TimeUnit
import akka.actor.{Props, ActorSystem}
import akka.testkit.TestKit
import akka.util.Timeout
import com.webtrends.harness.TestKitSpecificationWithJUnit
import com.webtrends.harness.service.messages.CheckHealth
import scala.concurrent.Await
import akka.pattern.ask
import scala.concurrent.duration.FiniteDuration

class InternalHttpSpec extends TestKitSpecificationWithJUnit(ActorSystem("test")) with InternalHttpClient {
  val port = 8123
  val path = "http://127.0.0.1:" + port + "/"
  val httpActor = system.actorOf(Props(classOf[SimpleHttpServer], port))

  // We need to make sure the httpActor has started up before trying to connect.
  implicit val timeout = Timeout(FiniteDuration(5, TimeUnit.SECONDS))
  Await.result(httpActor ? CheckHealth, timeout.duration)

  "Test handlers" should {
    "handle the get path /ping" in {
      val url = new URL(path + "ping")
      val conn = url.openConnection().asInstanceOf[HttpURLConnection]
      val resp = getResponseContent(conn)

      resp.status mustEqual "200"
      resp.content.length must be > 0
      resp.content.substring(0, 5) mustEqual "pong:"
    }
  }

  step {
    TestKit.shutdownActorSystem(system)
  }

} 
Example 83
Source File: BaseSpecTest.scala    From wookiee   with Apache License 2.0 5 votes vote down vote up
package com.webtrends.harness.service.test

import akka.actor.ActorSystem
import ch.qos.logback.classic.Level
import com.typesafe.config.{Config, ConfigFactory}
import com.webtrends.harness.component.Component
import com.webtrends.harness.service.Service
import org.specs2.mutable.SpecificationLike
import org.scalatest.{MustMatchers, WordSpecLike}

import scala.concurrent.duration._

trait BaseWookieeTest {
  def config:Config = ConfigFactory.empty()
  def componentMap:Option[Map[String, Class[_<:Component]]] = None
  def servicesMap:Option[Map[String, Class[_<:Service]]] = None
  def logLevel: Level = Level.INFO
  def startupWait: FiniteDuration = 15 seconds

  TestHarness(config, servicesMap, componentMap, logLevel, startupWait)
  Thread.sleep(1000)
  implicit val system: ActorSystem = TestHarness.system.get
}

trait BaseWookieeSpecTest extends BaseWookieeTest with SpecificationLike
trait BaseWookieeScalaTest extends BaseWookieeTest with WordSpecLike with MustMatchers 
Example 84
Source File: ActorWaitSpec.scala    From wookiee   with Apache License 2.0 5 votes vote down vote up
package com.webtrends.harness

import java.util.concurrent.TimeUnit

import akka.actor.{Actor, ActorSystem, PoisonPill, Props}
import akka.pattern.ask
import akka.testkit.TestKit
import akka.util.Timeout
import com.webtrends.harness.utils.ActorWaitHelper
import org.specs2.mutable.SpecificationLike

import scala.concurrent.Await
import scala.concurrent.duration.Duration

class WaitedOnActor extends Actor with ActorWaitHelper {
  def receive: Receive = {
    case "message" => sender ! "waitedResponse"
  }
}

class WaitActor extends Actor with ActorWaitHelper {
  implicit val timeout = Timeout(5000, TimeUnit.MILLISECONDS)
  val waited = awaitActor(Props[WaitedOnActor])

  def receive: Receive = {
    case "message" => sender ! "response"
    case "waited" => sender ! Await.result((waited ? "message").mapTo[String], Duration(5, "seconds"))
  }
}

class ActorWaitSpec extends TestKit(ActorSystem("wait-spec")) with SpecificationLike {
  implicit val timeout = Timeout(5000, TimeUnit.MILLISECONDS)
  val waitActor = ActorWaitHelper.awaitActor(Props[WaitActor], system)

  sequential

  "ActorWaitSpec" should {
    "await the WaitActor successfully " in {
      Await.result((waitActor ? "message").mapTo[String], Duration(5, "seconds")) must beEqualTo("response")
    }

    "the WaitActor's awaited actor must have come up " in {
      Await.result((waitActor ? "waited").mapTo[String], Duration(5, "seconds")) must beEqualTo("waitedResponse")
    }
  }

  step {
    waitActor ! PoisonPill
  }
} 
Example 85
Source File: Routes.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.ingest.modules

import akka.actor.ActorSystem
import akka.http.scaladsl.server.directives.RouteDirectives
import akka.http.scaladsl.server.{Route, RouteConcatenation}
import cats.effect.Sync
import hydra.common.config.ConfigSupport
import hydra.common.util.{ActorUtils, Futurable}
import hydra.ingest.app.AppConfig.AppConfig
import hydra.ingest.http._
import hydra.kafka.consumer.KafkaConsumerProxy
import hydra.kafka.endpoints.{BootstrapEndpoint, BootstrapEndpointV2, TopicMetadataEndpoint, TopicsEndpoint}
import hydra.kafka.util.KafkaUtils.TopicDetails

import scala.concurrent.ExecutionContext

final class Routes[F[_]: Sync: Futurable] private(programs: Programs[F], algebras: Algebras[F], cfg: AppConfig)
                                                 (implicit system: ActorSystem) extends RouteConcatenation with ConfigSupport {

  private implicit val ec: ExecutionContext = system.dispatcher
  private val bootstrapEndpointV2 = if (cfg.v2MetadataTopicConfig.createV2TopicsEnabled) {
    val topicDetails =
      TopicDetails(
        cfg.createTopicConfig.defaultNumPartions,
        cfg.createTopicConfig.defaultReplicationFactor
      )
    new BootstrapEndpointV2(programs.createTopic, topicDetails).route
  } else {
    RouteDirectives.reject
  }

  lazy val routes: F[Route] = Sync[F].delay {
    import ConfigSupport._

    //TODO: remove this lookup
    val consumerPath = applicationConfig
      .getStringOpt("actors.kafka.consumer_proxy.path")
      .getOrElse(
        s"/user/service/${ActorUtils.actorName(classOf[KafkaConsumerProxy])}"
      )

    val consumerProxy = system.actorSelection(consumerPath)

    new SchemasEndpoint().route ~
      new BootstrapEndpoint(system).route ~
      new TopicMetadataEndpoint(consumerProxy, algebras.metadata).route ~
      new IngestorRegistryEndpoint().route ~
      new IngestionWebSocketEndpoint().route ~
      new IngestionEndpoint(cfg.ingestConfig.alternateIngestEnabled,
                            programs.ingestionFlow,
                            programs.ingestionFlowV2,
                            cfg.ingestConfig.useOldIngestIfUAContains).route ~
      new TopicsEndpoint(consumerProxy)(system.dispatcher).route ~
      HealthEndpoint.route ~
      bootstrapEndpointV2
  }
}

object Routes {
  def make[F[_]: Sync: Futurable](programs: Programs[F], algebras: Algebras[F], config: AppConfig)
                           (implicit system: ActorSystem): F[Routes[F]] = Sync[F].delay(new Routes[F](programs, algebras, config))
} 
Example 86
Source File: IngestorRegistryEndpoint.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.ingest.http

import akka.actor.ActorSystem
import akka.http.scaladsl.server.Route
import akka.pattern.ask
import akka.util.Timeout
import hydra.common.config.ConfigSupport
import ConfigSupport._
import hydra.core.http.RouteSupport
import hydra.ingest.bootstrap.HydraIngestorRegistryClient
import hydra.ingest.services.IngestorRegistry.{FindAll, LookupResult}

import scala.concurrent.duration.{FiniteDuration, _}


class IngestorRegistryEndpoint()(implicit system:ActorSystem) extends RouteSupport
    with HydraIngestJsonSupport
    with ConfigSupport {

  private val registryLookupTimeout = applicationConfig
    .getDurationOpt("ingest.service-lookup.timeout")
    .getOrElse(5.seconds)

  lazy val registry = HydraIngestorRegistryClient(applicationConfig).registry

  private implicit val timeout = Timeout(registryLookupTimeout)

  override val route: Route =
    path("ingestors" ~ Slash.?) {
      get {
        onSuccess(registry ? FindAll) {
          case response: LookupResult => complete(response.ingestors)
        }
      }
    }
} 
Example 87
Source File: HydraIngestorRegistryClient.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.ingest.bootstrap

import akka.actor.{ActorSelection, ActorSystem}
import akka.pattern.ask
import akka.util.Timeout
import com.typesafe.config.Config
import hydra.common.util.ActorUtils
import hydra.ingest.services.IngestorRegistry
import hydra.ingest.services.IngestorRegistry.{FindByName, LookupResult}

import scala.concurrent.Future


class HydraIngestorRegistryClient(registryPath: String)(
    implicit val system: ActorSystem
) {

  lazy val registry: ActorSelection = system.actorSelection(registryPath)

  def lookupIngestor(
      name: String
  )(implicit timeout: Timeout): Future[LookupResult] = {
    (registry ? FindByName(name)).mapTo[LookupResult]
  }
}

object HydraIngestorRegistryClient {

  import hydra.common.config.ConfigSupport._

  def registryPath(config: Config) =
    config
      .getStringOpt("ingest.ingestor-registry.path")
      .getOrElse(
        s"/user/service/${ActorUtils.actorName(classOf[IngestorRegistry])}"
      )

  def apply(
      config: Config
  )(implicit system: ActorSystem): HydraIngestorRegistryClient = {
    new HydraIngestorRegistryClient(registryPath(config))(system)
  }
} 
Example 88
Source File: MockEndpoint.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.ingest.http.mock

import akka.actor.ActorSystem
import akka.http.scaladsl.model.StatusCodes._
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.{ExceptionHandler, Route}
import hydra.ingest.http.SchemasEndpoint
import io.confluent.kafka.schemaregistry.client.rest.exceptions.RestClientException

import scala.concurrent.{ExecutionContext, Future}

class MockEndpoint(
    implicit system: ActorSystem,
    implicit val e: ExecutionContext
) {

  def throwRestClientException(
      statusCode: Int,
      errorCode: Int,
      errorMessage: String
  ): Future[Any] = {
    throw new RestClientException(errorMessage, statusCode, errorCode)
  }

  val schemaRouteExceptionHandler: ExceptionHandler =
    new SchemasEndpoint().excptHandler

  def route: Route = {
    pathPrefix("throwRestClientException") {
      handleExceptions(schemaRouteExceptionHandler) {
        get {
          parameters('statusCode, 'errorCode, 'errorMessage) {
            (statusCode, errorCode, errorMessage) =>
              pathEndOrSingleSlash {
                onSuccess(
                  throwRestClientException(
                    statusCode.toInt,
                    errorCode.toInt,
                    errorMessage
                  )
                ) { _ => complete(OK) }
              }
          }
        }
      }
    }
  }
} 
Example 89
Source File: IngestorRegistrarSpec.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.ingest.services

import java.util.concurrent.TimeUnit

import akka.actor.{ActorSystem, Props}
import akka.pattern.ask
import akka.testkit.{ImplicitSender, TestKit}
import akka.util.Timeout
import hydra.common.util.ActorUtils
import hydra.ingest.services.IngestorRegistrar.UnregisterAll
import hydra.ingest.services.IngestorRegistry.{
  FindAll,
  FindByName,
  LookupResult
}
import hydra.ingest.test.TestIngestor
import org.scalatest.concurrent.{Eventually, ScalaFutures}
import org.scalatest.time.{Seconds, Span}
import org.scalatest.matchers.should.Matchers
import org.scalatest.funspec.AnyFunSpecLike
import org.scalatest.BeforeAndAfterAll

import scala.concurrent.duration._


class IngestorRegistrarSpec
    extends TestKit(ActorSystem("IngestorRegistrarSpec"))
    with Matchers
    with AnyFunSpecLike
    with ImplicitSender
    with ScalaFutures
    with BeforeAndAfterAll
    with Eventually {

  override def afterAll =
    TestKit.shutdownActorSystem(system, verifySystemShutdown = true)

  implicit override val patienceConfig =
    PatienceConfig(timeout = Span(10, Seconds), interval = Span(1, Seconds))

  val registry = system.actorOf(Props[IngestorRegistry], "ingestor_registry")

  val act = system.actorOf(Props[IngestorRegistrar])

  implicit val timeout = Timeout(3, TimeUnit.SECONDS)

  describe("The ingestor registrar actor") {
    it("registers from classpath on bootstrap") {
      eventually {
        whenReady(
          (registry ? FindByName(ActorUtils.actorName(classOf[TestIngestor])))
            .mapTo[LookupResult]
        ) { i =>
          i.ingestors.size shouldBe 1
          i.ingestors(0).name shouldBe ActorUtils.actorName(
            classOf[TestIngestor]
          )
        }
      }
    }

    it("unregisters") {
      act ! UnregisterAll
      eventually {
        whenReady((registry ? FindAll).mapTo[LookupResult]) { i =>
          i.ingestors.size shouldBe 0
        }
      }
    }
  }
} 
Example 90
Source File: IngestionSocketActorSpec.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.ingest.services

import org.scalatest.matchers.should.Matchers
import org.scalatest.flatspec.AnyFlatSpecLike
import org.scalatest.BeforeAndAfterAll
import akka.actor.ActorSystem
import akka.actor.Props
import akka.testkit.TestProbe

class IngestionSocketActorSpec
    extends AnyFlatSpecLike
    with Matchers
    with BeforeAndAfterAll {

  private implicit val system: ActorSystem = ActorSystem()

  override def afterAll(): Unit = {
    system.terminate()
  }

  private def getIngestActorRef = system.actorOf(Props[IngestionSocketActor])

  it should "ack the init message in waiting state" in {
    val ingestActor = getIngestActorRef
    val probe = TestProbe()
    ingestActor.tell(SocketInit, probe.ref)
    probe.expectMsg(SocketAck)
  }

  it should "ack the init message in initialized state" in {
    val ingestActor = getIngestActorRef
    val probe = TestProbe()
    ingestActor ! SocketStarted(probe.ref)
    ingestActor.tell(SocketInit, probe.ref)
    probe.expectMsg(SocketAck)
  }

  private def testIngestionMessageAck(ingestionMessages: IncomingMessage*) = {
    it should s"ack the incoming messages of form: $ingestionMessages" in {
      val ingestActor = getIngestActorRef
      val probe = TestProbe()
      ingestActor ! SocketStarted(probe.ref)
      ingestActor.tell(SocketInit, probe.ref)
      probe.expectMsg(SocketAck)
      ingestionMessages.foreach { ingestionMessage =>
        ingestActor.tell(ingestionMessage, probe.ref)
        probe.expectMsgClass(classOf[SimpleOutgoingMessage])
        probe.expectMsg(SocketAck)
      }
    }
  }

  testIngestionMessageAck(IncomingMessage("-c HELP"))
  testIngestionMessageAck(IncomingMessage("-c SET hydra-ack = replicated"))
  testIngestionMessageAck(IncomingMessage("-c WHAT"))

} 
Example 91
Source File: RequestFactoriesSpec.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.ingest.bootstrap

import akka.actor.ActorSystem
import akka.http.scaladsl.model.HttpRequest
import akka.stream.ActorMaterializer
import akka.testkit.TestKit
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.matchers.should.Matchers
import org.scalatest.funspec.AnyFunSpecLike
import org.scalatest.BeforeAndAfterAll

import scala.concurrent.duration._

class RequestFactoriesSpec
    extends TestKit(ActorSystem("RequestFactoriesSpec"))
    with Matchers
    with AnyFunSpecLike
    with BeforeAndAfterAll
    with ScalaFutures {

  override def afterAll =
    TestKit.shutdownActorSystem(
      system,
      verifySystemShutdown = true,
      duration = 10.seconds
    )

  import RequestFactories._

  describe("The RequestFactories") {
    it("build a Hydra request from an HTTP request") {
      val hr = HttpRequest(entity = "test")
      val hydraReq = createRequest("1", hr)
      whenReady(hydraReq) { r => r.payload shouldBe "test" }
    }
  }
} 
Example 92
Source File: HydraIngestorRegistrySpec.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.ingest.bootstrap

import akka.actor.{ActorSystem, Props}
import akka.testkit.{ImplicitSender, TestKit}
import com.typesafe.config.ConfigFactory
import hydra.common.util.ActorUtils
import hydra.core.bootstrap.ReflectionsWrapper
import hydra.ingest.IngestorInfo
import hydra.ingest.services.IngestorRegistry
import hydra.ingest.services.IngestorRegistry.RegisterWithClass
import hydra.ingest.test.TestIngestor
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.matchers.should.Matchers
import org.scalatest.funspec.AnyFunSpecLike
import org.scalatest.BeforeAndAfterAll

import scala.concurrent.duration._

class HydraIngestorRegistrySpec
    extends TestKit(ActorSystem("HydraIngestorRegistrySpec"))
    with Matchers
    with AnyFunSpecLike
    with BeforeAndAfterAll
    with ImplicitSender
    with ScalaFutures {

  override def afterAll =
    TestKit.shutdownActorSystem(
      system,
      verifySystemShutdown = true,
      duration = 10.seconds
    )

  val testRegistry =
    system.actorOf(Props[IngestorRegistry], "ingestor_registry")

  val cfg = ConfigFactory.parseString(
    "ingest.ingestor-registry.path=/user/ingestor_registry"
  )
  val registry = HydraIngestorRegistryClient(cfg)

  implicit val actorRefFactory = system

  ReflectionsWrapper.rescan()

  registry.registry ! RegisterWithClass(classOf[TestIngestor], "global")
  expectMsgType[IngestorInfo]

  describe("The Ingestor Registry") {
    it("uses the default registry if no config") {
      val path = HydraIngestorRegistryClient.registryPath(ConfigFactory.empty())
      path shouldBe s"/user/service/${ActorUtils.actorName(classOf[IngestorRegistry])}"
    }

    it("looks up an ingestor") {
      implicit val timeout = akka.util.Timeout(10.seconds)
      whenReady(registry.lookupIngestor("test_ingestor")) { i =>
        i.ingestors.size shouldBe 1
        i.ingestors(0).name shouldBe "test_ingestor"
        i.ingestors(0).path shouldBe testRegistry.path / "test_ingestor"
      }
    }
  }
} 
Example 93
Source File: RabbitIngestorSpec.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.rabbit

import akka.actor.{ActorSystem, Props}
import akka.testkit.TestActors.ForwardActor
import akka.testkit.{ImplicitSender, TestKit, TestProbe}
import hydra.core.ingest.HydraRequest
import hydra.core.protocol._
import hydra.core.transport.{AckStrategy, HydraRecord}
import org.scalatest.matchers.should.Matchers
import org.scalatest.funspec.AnyFunSpecLike
import org.scalatest.BeforeAndAfterAll

import scala.concurrent.duration._

class RabbitIngestorSpec
    extends TestKit(ActorSystem("rabbit-ingestor-spec"))
    with Matchers
    with AnyFunSpecLike
    with ImplicitSender
    with BeforeAndAfterAll {

  val ingestor = system.actorOf(Props[RabbitIngestor])

  val probe = TestProbe()

  val rabbitTransport =
    system.actorOf(Props(new ForwardActor(probe.ref)), "rabbit_transport")

  override def afterAll =
    TestKit.shutdownActorSystem(system, verifySystemShutdown = true)

  describe("When using the rabbit ingestor") {
    it("Joins if exchange provided") {
      val request = HydraRequest(
        "123",
        "{'name': 'test'}",
        None,
        Map(RabbitRecord.HYDRA_RABBIT_EXCHANGE -> "test.exchange")
      )
      ingestor ! Publish(request)
      expectMsg(10.seconds, Join)
    }

    it("Joins if queue provided") {
      val request = HydraRequest(
        "123",
        "{'name': 'test'}",
        None,
        Map(RabbitRecord.HYDRA_RABBIT_QUEUE -> "test.queue")
      )
      ingestor ! Publish(request)
      expectMsg(10.seconds, Join)
    }

    it("Ignores") {
      val request = HydraRequest("123", "test string")
      ingestor ! Publish(request)
      expectMsg(10.seconds, Ignore)
    }

    it("transports") {
      ingestor ! Ingest(
        TestRecord("test", "test", "", AckStrategy.NoAck),
        AckStrategy.NoAck
      )
      probe.expectMsg(
        Produce(
          TestRecord("test", "test", "", AckStrategy.NoAck),
          self,
          AckStrategy.NoAck
        )
      )
    }
  }
}

case class TestRecord(
    destination: String,
    payload: String,
    key: String,
    ackStrategy: AckStrategy
) extends HydraRecord[String, String] 
Example 94
Source File: BootstrapEndpointActors.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.kafka.endpoints

import akka.actor.{ActorRef, ActorSystem}
import akka.stream.{ActorMaterializer, Materializer}
import hydra.avro.registry.ConfluentSchemaRegistry
import hydra.common.config.ConfigSupport
import hydra.core.akka.SchemaRegistryActor
import hydra.kafka.services.{StreamsManagerActor, TopicBootstrapActor}
import hydra.kafka.util.KafkaUtils

import scala.concurrent.ExecutionContext

trait BootstrapEndpointActors extends ConfigSupport {

  implicit val system: ActorSystem

  private[kafka] val kafkaIngestor = system.actorSelection(path =
    applicationConfig.getString("kafka-ingestor-path")
  )

  private[kafka] val schemaRegistryActor =
    system.actorOf(SchemaRegistryActor.props(applicationConfig))

  private[kafka] val bootstrapKafkaConfig =
    applicationConfig.getConfig("bootstrap-config")

  private[kafka] val streamsManagerProps = StreamsManagerActor.props(
    bootstrapKafkaConfig,
    KafkaUtils.BootstrapServers,
    ConfluentSchemaRegistry.forConfig(applicationConfig).registryClient
  )

  val bootstrapActor: ActorRef = system.actorOf(
    TopicBootstrapActor.props(
      schemaRegistryActor,
      kafkaIngestor,
      streamsManagerProps,
      Some(bootstrapKafkaConfig)
    )
  )

} 
Example 95
Source File: KafkaMetrics.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.kafka.transport

import akka.actor.ActorSystem
import com.typesafe.config.Config
import hydra.common.config.ConfigSupport
import hydra.kafka.producer.KafkaRecordMetadata
import hydra.kafka.util.KafkaUtils
import org.apache.kafka.clients.producer.ProducerRecord
import spray.json.DefaultJsonProtocol

trait KafkaMetrics {
  def saveMetrics(record: KafkaRecordMetadata): Unit

  def close(): Unit = {}
}

// $COVERAGE-OFF$
object NoOpMetrics extends KafkaMetrics {
  def saveMetrics(record: KafkaRecordMetadata): Unit = {}
}

// $COVERAGE-ON$

class PublishMetrics(topic: String)(implicit system: ActorSystem)
    extends KafkaMetrics
    with DefaultJsonProtocol
    with ConfigSupport {

  import spray.json._

  import KafkaRecordMetadata._

  private val producer = KafkaUtils
    .producerSettings[String, String]("string", rootConfig)
    .withProperty("client.id", "hydra.kafka.metrics")
    .createKafkaProducer()

  def saveMetrics(record: KafkaRecordMetadata) = {
    val payload = record.toJson.compactPrint
    producer.send(new ProducerRecord(topic, record.destination, payload))
  }

  override def close(): Unit = {
    producer.close()
  }

}

object KafkaMetrics {

  import ConfigSupport._

  def apply(config: Config)(implicit system: ActorSystem): KafkaMetrics = {
    val metricsEnabled =
      config.getBooleanOpt("transports.kafka.metrics.enabled").getOrElse(false)

    val metricsTopic = config
      .getStringOpt("transports.kafka.metrics.topic")
      .getOrElse("HydraKafkaError")

    if (metricsEnabled) new PublishMetrics(metricsTopic) else NoOpMetrics
  }
} 
Example 96
Source File: KafkaAdminAlgebraSpec.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.kafka.algebras

import akka.actor.ActorSystem
import cats.effect.{ContextShift, IO}
import cats.implicits._
import hydra.kafka.util.KafkaUtils.TopicDetails
import net.manub.embeddedkafka.{EmbeddedKafka, EmbeddedKafkaConfig}
import org.scalatest.BeforeAndAfterAll
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpecLike

import scala.concurrent.ExecutionContext

final class KafkaAdminAlgebraSpec
    extends AnyWordSpecLike
    with Matchers
    with BeforeAndAfterAll
    with EmbeddedKafka {

  private val port = 8023

  implicit private val kafkaConfig: EmbeddedKafkaConfig =
    EmbeddedKafkaConfig(kafkaPort = port, zooKeeperPort = 3027)

  implicit private val contextShift: ContextShift[IO] =
    IO.contextShift(ExecutionContext.global)

  implicit private val system: ActorSystem = ActorSystem(
    "kafka-client-spec-system"
  )

  override def beforeAll(): Unit = {
    super.beforeAll()
    EmbeddedKafka.start()
  }

  override def afterAll(): Unit = {
    super.afterAll()
    EmbeddedKafka.stop()
  }

  (for {
    live <- KafkaAdminAlgebra
      .live[IO](s"localhost:$port")
    test <- KafkaAdminAlgebra.test[IO]
  } yield {
    runTests(live)
    runTests(test, isTest = true)
  }).unsafeRunSync()

  private def runTests(kafkaClient: KafkaAdminAlgebra[IO], isTest: Boolean = false): Unit = {
    (if (isTest) "KafkaAdmin#test" else "KafkaAdmin#live") must {
      "create a topic" in {
        val topicName = "Topic1"
        val topicDetails = TopicDetails(3, 1.toShort)
        (kafkaClient.createTopic(topicName, topicDetails) *> kafkaClient
          .describeTopic(topicName)
          .map {
            case Some(topic) =>
              topic.name shouldBe topicName
              topic.numberPartitions shouldBe topicDetails.numPartitions
            case None => fail("Found None when a Topic was Expected")
          }).unsafeRunSync()
      }

      "list all topics" in {
        kafkaClient.getTopicNames.unsafeRunSync() shouldBe List("Topic1")
      }

      "delete a topic" in {
        val topicToDelete = "topic_to_delete"
        (for {
          _ <- kafkaClient.createTopic(topicToDelete, TopicDetails(1, 1))
          _ <- kafkaClient.deleteTopic(topicToDelete)
          maybeTopic <- kafkaClient.describeTopic(topicToDelete)
        } yield maybeTopic should not be defined).unsafeRunSync()
      }
    }
  }
} 
Example 97
Source File: KafkaConsumerProxySpec.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.kafka.consumer

import akka.actor.{ActorSystem, Props}
import akka.testkit.{ImplicitSender, TestKit}
import hydra.kafka.consumer.KafkaConsumerProxy._
import net.manub.embeddedkafka.{EmbeddedKafka, EmbeddedKafkaConfig}
import org.apache.kafka.common.TopicPartition
import org.scalatest.matchers.should.Matchers
import org.scalatest.funspec.AnyFunSpecLike
import org.scalatest.BeforeAndAfterAll

import scala.concurrent.duration._


class KafkaConsumerProxySpec
    extends TestKit(ActorSystem("test"))
    with Matchers
    with AnyFunSpecLike
    with BeforeAndAfterAll
    with ImplicitSender {

  implicit val config =
    EmbeddedKafkaConfig(kafkaPort = 8092, zooKeeperPort = 3181)

  override def beforeAll() = {
    super.beforeAll()
    EmbeddedKafka.start()
    EmbeddedKafka.createCustomTopic("test-consumer1")
    EmbeddedKafka.createCustomTopic("test-consumer2")
  }

  override def afterAll() = {
    super.afterAll()
    EmbeddedKafka.stop()
    TestKit.shutdownActorSystem(system, verifySystemShutdown = true)
  }

  lazy val kafkaProxy = system.actorOf(Props[KafkaConsumerProxy])

  describe("When using KafkaConsumerProxy") {
    it("gets latest offsets for a topic") {
      kafkaProxy ! GetLatestOffsets("test-consumer1")
      expectMsg(
        10.seconds,
        LatestOffsetsResponse(
          "test-consumer1",
          Map(new TopicPartition("test-consumer1", 0) -> 0L)
        )
      )
    }

    it("lists topics") {
      kafkaProxy ! ListTopics
      expectMsgPF(10.seconds) {
        case ListTopicsResponse(topics) =>
          topics.keys should contain allOf ("test-consumer1", "test-consumer2")
      }
    }

    it("gets partition info") {
      kafkaProxy ! GetPartitionInfo("test-consumer2")
      expectMsgPF(10.seconds) {
        case PartitionInfoResponse(topic, response) =>
          topic shouldBe "test-consumer2"
          response.map(p => p.partition()) shouldBe Seq(0)
      }
    }

    it("handles errors") {
      kafkaProxy ! GetPartitionInfo("test-consumer-unknown")
      expectMsgPF(10.seconds) {
        case PartitionInfoResponse(topic, response) =>
          response(0).leader().idString shouldBe "0"
          topic should startWith("test-consumer-unknown")
      }
    }
  }
} 
Example 98
Source File: KafkaMetricsSpec.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.kafka.transport

import akka.actor.ActorSystem
import akka.testkit.TestKit
import com.typesafe.config.ConfigFactory
import hydra.core.transport.AckStrategy
import hydra.kafka.producer.KafkaRecordMetadata
import net.manub.embeddedkafka.{EmbeddedKafka, EmbeddedKafkaConfig}
import org.scalatest.matchers.should.Matchers
import org.scalatest.funspec.AnyFunSpecLike
import org.scalatest.BeforeAndAfterAll
import spray.json.DefaultJsonProtocol


class KafkaMetricsSpec
    extends TestKit(ActorSystem("hydra"))
    with Matchers
    with AnyFunSpecLike
    with BeforeAndAfterAll
    with DefaultJsonProtocol {

  import KafkaRecordMetadata._

  implicit val config = EmbeddedKafkaConfig(
    kafkaPort = 8092,
    zooKeeperPort = 3181,
    customBrokerProperties = Map(
      "auto.create.topics.enable" -> "false",
      "offsets.topic.replication.factor" -> "1"
    )
  )

  override def afterAll() = {
    super.afterAll()
    EmbeddedKafka.stop()
    TestKit.shutdownActorSystem(system, verifySystemShutdown = true)
  }

  override def beforeAll() = {
    super.beforeAll()
    EmbeddedKafka.start()
    EmbeddedKafka.createCustomTopic("metrics_topic")
  }

  describe("When using the KafkaMetrics object") {

    it("uses the NoOpMetrics") {
      KafkaMetrics(ConfigFactory.empty()) shouldBe NoOpMetrics
      KafkaMetrics(
        ConfigFactory.parseString("transports.kafka.metrics.enabled=false")
      ) shouldBe NoOpMetrics
    }

    it("uses the PublishMetrics") {
      import spray.json._
      val cfg = ConfigFactory.parseString(s"""
           | transports.kafka.metrics.topic = metrics_topic
           | transports.kafka.metrics.enabled=true""".stripMargin)
      val pm = KafkaMetrics(cfg)
      pm shouldBe a[PublishMetrics]
      val kmd = KafkaRecordMetadata(1, 1, "topic", 1, 1, AckStrategy.NoAck)
      pm.saveMetrics(kmd)
      EmbeddedKafka
        .consumeFirstStringMessageFrom("metrics_topic")
        .parseJson shouldBe kmd.toJson

    }
  }
} 
Example 99
Source File: LoggingAdapterSpec.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.common.logging

import akka.actor.{Actor, ActorSystem}
import akka.testkit.{TestActorRef, TestKit, TestProbe}
import org.scalatest.matchers.should.Matchers
import org.scalatest.funspec.AnyFunSpecLike
import org.scalatest.BeforeAndAfterAll


class LoggingAdapterSpec
    extends TestKit(ActorSystem("test"))
    with Matchers
    with AnyFunSpecLike
    with BeforeAndAfterAll {

  override def afterAll = TestKit.shutdownActorSystem(system)

  describe("The logging adapter") {

    it("allows an actor to use the logger") {

      val act = TestActorRef(new Actor with ActorLoggingAdapter {
        override def receive = {
          case _ => log.info("got it"); sender ! "got it"
        }
      }, "logger-test")

      act.underlyingActor.log.getName shouldBe "akka.testkit.TestActorRef"

      // Send a message and make sure we get a response back
      val probe = TestProbe()
      probe.send(act, "test")
      probe.expectMsgType[String] shouldBe "got it"
    }
  }
} 
Example 100
Source File: ActorConfigSupportSpec.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.common.config

import akka.actor.ActorSystem
import akka.testkit.{TestActorRef, TestKit}
import hydra.common.testing.DummyActor
import org.scalatest.matchers.should.Matchers
import org.scalatest.funspec.AnyFunSpecLike
import org.scalatest.BeforeAndAfterAll


class ActorConfigSupportSpec
    extends TestKit(ActorSystem("test"))
    with Matchers
    with AnyFunSpecLike
    with BeforeAndAfterAll
    with ConfigSupport {

  val dummy = TestActorRef[DummyActor]

  override def afterAll = TestKit.shutdownActorSystem(system)

  describe("When mixing the trait in an actor") {
    it("has the correct actor name") {
      dummy.underlyingActor.thisActorName shouldBe "dummy_actor"
    }

    it("has the correct actor config") {
      dummy.underlyingActor.actorConfig shouldBe rootConfig.getConfig(
        "hydraTest.actors.dummy_actor"
      )
    }

  }
} 
Example 101
Source File: TransportOpsSpec.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.core.ingest

import akka.actor.{ActorRef, ActorSystem, Props}
import akka.testkit.TestActors.ForwardActor
import akka.testkit.{ImplicitSender, TestKit, TestProbe}
import com.pluralsight.hydra.reflect.DoNotScan
import hydra.core.akka.ActorInitializationException
import hydra.core.protocol.{IngestorError, Produce}
import hydra.core.test.TestRecordFactory
import hydra.core.transport.AckStrategy.NoAck
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.matchers.should.Matchers
import org.scalatest.funspec.AnyFunSpecLike
import org.scalatest.BeforeAndAfterAll

import scala.concurrent.Await
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._


class TransportOpsSpec
    extends TestKit(ActorSystem("test"))
    with Matchers
    with AnyFunSpecLike
    with BeforeAndAfterAll
    with ImplicitSender
    with ScalaFutures {

  override def afterAll() = TestKit.shutdownActorSystem(system)

  val supervisor = TestProbe()

  val tm = TestProbe()

  val transport =
    system.actorOf(Props(new ForwardActor(tm.ref)), "test-transport")

  describe("TransportOps") {
    it("looks up a transport") {
      val t =
        system.actorOf(Props(classOf[TestTransportIngestor], supervisor.ref))
      t ! "hello"
      expectMsg("hi!")
    }

    it("won't initialize if transport can't be found") {
      val t = system.actorOf(Props[TestTransportIngestorError])
      t ! "hello"
      expectNoMessage()
    }

    it("transports a record") {
      val req = HydraRequest("123", "test-produce")
      val t =
        system.actorOf(Props(classOf[TestTransportIngestor], supervisor.ref))
      t ! req
      whenReady(TestRecordFactory.build(req))(r =>
        tm.expectMsg(Produce(r, self, NoAck))
      )
    }
  }
}

@DoNotScan
class TestTransportIngestor(supervisor: ActorRef)
    extends Ingestor
    with TransportOps {

  override val recordFactory = TestRecordFactory

  override def initTimeout = 500 millis

  ingest {
    case "hello" => sender ! "hi!"
    case req: HydraRequest =>
      val record = Await.result(TestRecordFactory.build(req), 3.seconds)
      transport(record, NoAck)
  }

  override def transportName = "test-transport"
}

class TestTransportIngestorError extends Ingestor with TransportOps {
  override val recordFactory = TestRecordFactory

  override def transportName = "test-transport-unknown"
} 
Example 102
Source File: TransportCallbackSpec.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.core.transport

import akka.actor.ActorSystem
import akka.testkit.{ImplicitSender, TestKit, TestProbe}
import hydra.core.protocol.{RecordNotProduced, RecordProduced}
import hydra.core.test.{TestRecord, TestRecordMetadata}
import hydra.core.transport.Transport.{Confirm, TransportError}
import org.scalatest.matchers.should.Matchers
import org.scalatest.funspec.AnyFunSpecLike
import org.scalatest.BeforeAndAfterAll
import scala.concurrent.duration._

class TransportCallbackSpec
    extends TestKit(ActorSystem("test"))
    with Matchers
    with AnyFunSpecLike
    with BeforeAndAfterAll
    with ImplicitSender {

  private val ingestor = TestProbe()
  private val supervisor = TestProbe()

  override def afterAll() {
    super.afterAll()
    TestKit.shutdownActorSystem(system, verifySystemShutdown = true)
  }

  describe("Transports Acks") {
    it("handles empty callbacks") {
      NoCallback.onCompletion(
        -1,
        None,
        Some(new IllegalArgumentException("test"))
      )
      ingestor.expectNoMessage(3 seconds)
      supervisor.expectNoMessage(3 seconds)
    }

    it("handles simple/transport only callbacks") {
      val probe = TestProbe()
      new TransportSupervisorCallback(probe.ref)
        .onCompletion(-11, None, Some(new IllegalArgumentException("test")))
      ingestor.expectNoMessage(3 seconds)
      supervisor.expectNoMessage(3 seconds)
      probe.expectMsg(TransportError(-11))

      new TransportSupervisorCallback(probe.ref).onCompletion(
        -11,
        Some(TestRecordMetadata(1, 0, "", AckStrategy.NoAck)),
        None
      )
      ingestor.expectNoMessage(3 seconds)
      supervisor.expectNoMessage(3 seconds)
      probe.expectMsg(Confirm(-11))
    }

    it("handles ingestor callbacks") {
      val rec = TestRecord("OK", "1", "test", AckStrategy.NoAck)
      val transport = TestProbe()
      val cb = new IngestorCallback[String, String](
        rec,
        ingestor.ref,
        supervisor.ref,
        transport.ref
      )

      cb.onCompletion(
        1,
        Some(TestRecordMetadata(1, 0, "", AckStrategy.NoAck)),
        None
      )
      ingestor.expectMsgPF() {
        case RecordProduced(md, sup) =>
          sup shouldBe supervisor.ref
          md shouldBe a[TestRecordMetadata]
      }
      transport.expectMsg(Confirm(1))

      cb.onCompletion(1, None, Some(new IllegalArgumentException("test")))
      ingestor.expectMsgPF() {
        case RecordNotProduced(r, e, s) =>
          r shouldBe rec
          e.getMessage shouldBe "test"
          s shouldBe supervisor.ref
      }
      transport.expectMsg(TransportError(1))
    }
  }
} 
Example 103
Source File: ComposeReceiveSpec.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.core.akka

import akka.actor.{Actor, ActorSystem, Props}
import akka.testkit.{ImplicitSender, TestKit}
import org.scalatest.BeforeAndAfterAll
import org.scalatest.matchers.should.Matchers
import org.scalatest.flatspec.AnyFlatSpecLike

class ComposeReceiveSpec
    extends TestKit(ActorSystem("test"))
    with Matchers
    with AnyFlatSpecLike
    with BeforeAndAfterAll
    with ImplicitSender {

  override def afterAll =
    TestKit.shutdownActorSystem(system, verifySystemShutdown = true)

  "The ComposingReceiveTrait" should "compose" in {
    system.actorOf(Props[TestBaseActor]) ! "foo"
    expectMsg("bar")

    system.actorOf(Props[TestComposeActor]) ! "foo"
    expectMsg("new-bar")
  }

}

trait TestBase extends Actor with ComposingReceive {

  override def baseReceive = {
    case "foo" => sender ! "bar"
  }
}

class TestBaseActor extends TestBase {
  compose(Actor.emptyBehavior)
}

class TestComposeActor extends TestBase {
  compose {
    case "foo" => sender ! "new-bar"
  }
} 
Example 104
Source File: BaseAkkaSpec.scala    From incubator-retired-iota   with Apache License 2.0 5 votes vote down vote up
package org.apache.iota.fey

import java.nio.file.Paths

import akka.actor.{ActorIdentity, ActorRef, ActorSystem, Identify, Props}
import akka.testkit.{EventFilter, TestEvent, TestProbe}
import com.typesafe.config.ConfigFactory
import org.scalatest.BeforeAndAfterAll
import play.api.libs.json._

import scala.concurrent.duration.{DurationInt, FiniteDuration}
import scala.concurrent.Await

class BaseAkkaSpec extends BaseSpec with BeforeAndAfterAll with LoggingTest{

  //Load default configuration for Fey when running tests
  resetCapturedLogs()
  CONFIG.loadUserConfiguration(Paths.get(TestSetup.configTest.toURI()).toFile().getAbsolutePath)
  TestSetup.setup()

  val systemName = "FEY-TEST"
  implicit val system = ActorSystem(systemName, ConfigFactory.parseString("""akka.loggers = ["akka.testkit.TestEventListener"]"""))
  system.eventStream.publish(TestEvent.Mute(EventFilter.debug()))
  system.eventStream.publish(TestEvent.Mute(EventFilter.info()))
  system.eventStream.publish(TestEvent.Mute(EventFilter.warning()))
  system.eventStream.publish(TestEvent.Mute(EventFilter.error()))

  val globalIdentifierName = "GLOBAL-IDENTIFIER"
  val globalIdentifierRef = system.actorOf(Props[IdentifyFeyActors],globalIdentifierName)

  override protected def afterAll(): Unit = {
    //Force reload of GenericActor's jar
    Utils.loadedJars.remove("fey-test-actor.jar")
    Monitor.events.removeAllNodes()
    Await.ready(system.terminate(), 20.seconds)
  }

  implicit class TestProbeOps(probe: TestProbe) {

    def expectActor(path: String, max: FiniteDuration = 3.seconds): ActorRef = {
      probe.within(max) {
        var actor = null: ActorRef
        probe.awaitAssert {
          (probe.system actorSelection path).tell(Identify(path), probe.ref)
          probe.expectMsgPF(100 milliseconds) {
            case ActorIdentity(`path`, Some(ref)) => actor = ref
          }
        }
        actor
      }
    }

    def expectActorInSystem(path: String, lookInSystem: ActorSystem, max: FiniteDuration = 3.seconds): ActorRef = {
      probe.within(max) {
        var actor = null: ActorRef
        probe.awaitAssert {
          (lookInSystem actorSelection path).tell(Identify(path), probe.ref)
          probe.expectMsgPF(100 milliseconds) {
            case ActorIdentity(`path`, Some(ref)) => actor = ref
          }
        }
        actor
      }
    }

    def verifyActorTermination(actor: ActorRef)(implicit system: ActorSystem): Unit = {
      val watcher = TestProbe()
      watcher.watch(actor)
      watcher.expectTerminated(actor)
    }

    def notExpectActor(path: String, max: FiniteDuration = 3.seconds): Unit = {
      probe.within(max) {
        probe.awaitAssert {
          (probe.system actorSelection path).tell(Identify(path), probe.ref)
          probe.expectMsgPF(100 milliseconds) {
            case ActorIdentity(`path`, None) =>
          }
        }
      }
    }

    def isThreadRunning(threadName: String): Boolean = {
      Thread.getAllStackTraces.keySet().toArray
        .map(_.asInstanceOf[Thread])
        .find(_.getName == threadName) match {
        case Some(thread) =>
          if(thread.isAlive) true else false
        case None => false
      }
    }
  }

  //Utils Functions
  def getJSValueFromString(json: String): JsValue = {
    Json.parse(json)
  }

} 
Example 105
Source File: TOCApplicationLoader.scala    From play-table-of-contents   with MIT License 5 votes vote down vote up
import akka.actor.ActorSystem
import com.typesafe.config.ConfigFactory
import context.MyExecutionContext
import play.api.routing.Router
import play.api.{Application, ApplicationLoader, BuiltInComponentsFromContext}
import com.softwaremill.macwire._
import router.Routes

class TOCApplicationLoader extends ApplicationLoader{

  def load(context: ApplicationLoader.Context): Application = {
    val exeContext = new MyExecutionContext(ActorSystem("tocActor", ConfigFactory.load()))
    new TOCComponents(exeContext,context).application
  }
}

class TOCComponents(ec: MyExecutionContext, context: ApplicationLoader.Context)
  extends BuiltInComponentsFromContext(context)
  with play.filters.HttpFiltersComponents
  with _root_.controllers.AssetsComponents{

  lazy val tableOfContentController = wire[controllers.TableOfContentController]
  // add the prefix string in local scope for the Routes constructor
  val prefix: String = "/"
  lazy val router: Router = wire[Routes]
} 
Example 106
Source File: ProgressSourceSpec.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate.adapter.stream

import akka.actor.ActorSystem
import akka.pattern.AskTimeoutException
import akka.stream.scaladsl._
import akka.stream.testkit.TestSubscriber
import akka.stream.testkit.scaladsl.TestSink
import akka.stream._
import akka.testkit._

import com.rbmhtechnology.eventuate.ReplicationProtocol._
import com.rbmhtechnology.eventuate.VectorTime
import com.typesafe.config.ConfigFactory

import org.scalatest._

object ProgressSourceSpec {
  val SrcLogId = "A"

  val config = ConfigFactory.parseString("eventuate.log.read-timeout = 500ms")
}

class ProgressSourceSpec extends TestKit(ActorSystem("test", ProgressSourceSpec.config)) with WordSpecLike with Matchers with BeforeAndAfterAll with BeforeAndAfterEach {
  import ProgressSourceSpec._

  implicit val materializer: Materializer =
    ActorMaterializer()

  private var log: TestProbe = _
  private var snk: TestSubscriber.Probe[Long] = _

  override def beforeEach(): Unit = {
    log = TestProbe()
    snk = Source.fromGraph(ProgressSource(SrcLogId, log.ref)).toMat(TestSink.probe[Long])(Keep.right).run()
  }

  override def afterAll(): Unit = {
    TestKit.shutdownActorSystem(system)
    super.afterAll()
  }

  "A ProgressSource" must {
    "complete after emitting a stored replication progress" in {
      snk.request(1)
      log.expectMsg(GetReplicationProgress(SrcLogId))
      log.sender() ! GetReplicationProgressSuccess(SrcLogId, 17, VectorTime.Zero)
      snk.expectNext(17)
      snk.expectComplete()
    }
    "fail if replication progress reading fails" in {
      snk.request(1)
      log.expectMsg(GetReplicationProgress(SrcLogId))
      log.sender() ! GetReplicationProgressFailure(TestException)
      snk.expectError(TestException)
    }
    "fail on replication progress reading timeout" in {
      snk.request(1)
      log.expectMsg(GetReplicationProgress(SrcLogId))
      snk.expectError should be(an[AskTimeoutException])
    }
  }
} 
Example 107
Source File: DurableEventLogs.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.example.stream

//# durable-event-logs
import akka.actor.{ ActorRef, ActorSystem }
import akka.stream.{ ActorMaterializer, Materializer }
import com.rbmhtechnology.eventuate.log.leveldb.LeveldbEventLog

//#
trait DurableEventLogs {
  //# durable-event-logs
  implicit val system: ActorSystem = ActorSystem("example")
  implicit val materializer: Materializer = ActorMaterializer()

  val logAId = "A"
  val logBId = "B"
  val logCId = "C"

  val logA: ActorRef = createLog(logAId)
  val logB: ActorRef = createLog(logBId)
  val logC: ActorRef = createLog(logCId)

  def createLog(id: String): ActorRef =
    system.actorOf(LeveldbEventLog.props(id))
  //#
} 
Example 108
Source File: SparkBatchAdapter.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate.adapter.spark

import akka.actor.ActorSystem
import akka.serialization.SerializationExtension

import com.datastax.spark.connector._
import com.datastax.spark.connector.types._
import com.rbmhtechnology.eventuate.DurableEvent
import com.rbmhtechnology.eventuate.log.cassandra.CassandraEventLogSettings
import com.typesafe.config._

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD


  def eventBatch(logId: String, fromSequenceNr: Long = 1L): RDD[DurableEvent] = {
    context.cassandraTable(cassandraSettings.keyspace, s"${cassandraSettings.tablePrefix}_$logId")
      .select("event").where(s"sequence_nr >= $fromSequenceNr").as((event: DurableEvent) => event)
  }
}

private class DurableEventConverter(config: Config) extends TypeConverter[DurableEvent] {
  import scala.reflect.runtime.universe._

  val converter = implicitly[TypeConverter[Array[Byte]]]

  // --------------------------------------
  //  FIXME: how to shutdown actor system?
  // --------------------------------------

  @transient lazy val system = ActorSystem("TypeConverter", config)
  @transient lazy val serial = SerializationExtension(system)

  def targetTypeTag = implicitly[TypeTag[DurableEvent]]
  def convertPF = {
    case obj => deserialize(converter.convert(obj))
  }

  def deserialize(bytes: Array[Byte]): DurableEvent =
    serial.deserialize(bytes, classOf[DurableEvent]).get
} 
Example 109
Source File: EventLogPartitioningSpecCassandra.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate.log

import akka.actor.ActorSystem
import akka.testkit.{ TestKit, TestProbe }

import com.rbmhtechnology.eventuate.EventsourcingProtocol._
import com.rbmhtechnology.eventuate.SingleLocationSpecCassandra
import com.typesafe.config._

import scala.collection.immutable.Seq

object EventLogPartitioningSpecCassandra {
  val config: Config = ConfigFactory.parseString(
    """
      |akka.loglevel = "ERROR"
      |akka.test.single-expect-default = 20s
      |
      |eventuate.snapshot.filesystem.dir = target/test-snapshot
      |
      |eventuate.log.write-batch-size = 3
      |eventuate.log.cassandra.partition-size = 5
      |eventuate.log.cassandra.default-port = 9142
    """.stripMargin)
}

class EventLogPartitioningSpecCassandra extends TestKit(ActorSystem("test", EventLogPartitioningSpecCassandra.config)) with EventLogSpecSupport with SingleLocationSpecCassandra {
  import EventLogSpec._

  def replay(fromSequenceNr: Long): Seq[(Any, Long)] = {
    val probe = TestProbe()
    log.tell(Replay(fromSequenceNr, None, 0), replyToProbe.ref)
    replyToProbe.expectMsgClass(classOf[ReplaySuccess]).events.map { event =>
      (event.payload, event.localSequenceNr)
    }
  }

  "A Cassandra event log" must {
    "fill a partition with a single batch" in {
      writeEmittedEvents(List(event("a"), event("b"), event("c"), event("d"), event("e")))
      replay(1L) should be(List(("a", 1L), ("b", 2L), ("c", 3L), ("d", 4L), ("e", 5L)))
      replay(4L) should be(List(("d", 4L), ("e", 5L)))
      replay(5L) should be(List(("e", 5L)))
      replay(6L) should be(List())
    }
    "fill a partition with more than one batch" in {
      writeEmittedEvents(List(event("a"), event("b"), event("c")))
      writeEmittedEvents(List(event("d"), event("e")))
      replay(1L) should be(List(("a", 1L), ("b", 2L), ("c", 3L), ("d", 4L), ("e", 5L)))
      replay(5L) should be(List(("e", 5L)))
      replay(6L) should be(List())
    }
    "switch to the next partition if the current partition is full" in {
      writeEmittedEvents(List(event("a"), event("b"), event("c"), event("d"), event("e")))
      writeEmittedEvents(List(event("f"), event("g")))
      replay(1L) should be(List(("a", 1L), ("b", 2L), ("c", 3L), ("d", 4L), ("e", 5L), ("f", 6L), ("g", 7L)))
      replay(5L) should be(List(("e", 5L), ("f", 6L), ("g", 7L)))
      replay(6L) should be(List(("f", 6L), ("g", 7L)))
    }
    "switch to the next partition if the current partition isn't full but doesn't provide enough remaining space for a batch" in {
      val eventsA = List(event("a"), event("b"), event("c"), event("d"))
      val eventsB = List(event("f"), event("g"))

      log ! Write(eventsA, system.deadLetters, replyToProbe.ref, 0, 0)
      log ! Write(eventsB, system.deadLetters, replyToProbe.ref, 0, 0)

      val expectedA = eventsA.zipWithIndex.map {
        case (event, idx) => event.copy(vectorTimestamp = timestamp(1L + idx), processId = logId, localLogId = logId, localSequenceNr = 1L + idx)
      }

      val expectedB = eventsB.zipWithIndex.map {
        case (event, idx) => event.copy(vectorTimestamp = timestamp(6L + idx), processId = logId, localLogId = logId, localSequenceNr = 6L + idx)
      }

      replyToProbe.expectMsg(WriteSuccess(expectedA, 0, 0))
      replyToProbe.expectMsg(WriteSuccess(expectedB, 0, 0))

      replay(1L) should be(List(("a", 1L), ("b", 2L), ("c", 3L), ("d", 4L), ("f", 6L), ("g", 7L)))
      replay(5L) should be(List(("f", 6L), ("g", 7L)))
      replay(6L) should be(List(("f", 6L), ("g", 7L)))
    }
    "reject batches larger than the maximum partition size" in {
      val events = Vector(event("a"), event("b"), event("c"), event("d"), event("e"), event("f"))
      log ! Write(events, system.deadLetters, replyToProbe.ref, 0, 0)
      replyToProbe.expectMsgClass(classOf[WriteFailure])
    }
  }
} 
Example 110
Source File: LocationSpecsCassandra.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate

import akka.actor.{ Props, ActorSystem }
import akka.testkit.TestKit

// --------------------------------------------------------------------------
//  Provider-specific single-location specs
// --------------------------------------------------------------------------

class EventsourcedProcessorIntegrationSpecCassandra extends TestKit(ActorSystem("test")) with EventsourcedProcessorIntegrationSpec with SingleLocationSpecCassandra {
  override def beforeEach(): Unit = {
    super.beforeEach()
    init()
  }
}

class EventsourcedActorIntegrationSpecCassandra extends TestKit(ActorSystem("test")) with EventsourcedActorIntegrationSpec with SingleLocationSpecCassandra {
  override def batching = false
}

class PersistOnEventIntegrationSpecCassandra extends TestKit(ActorSystem("test")) with PersistOnEventIntegrationSpec with SingleLocationSpecCassandra
class EventsourcedActorThroughputSpecCassandra extends TestKit(ActorSystem("test")) with EventsourcedActorThroughputSpec with SingleLocationSpecCassandra

// --------------------------------------------------------------------------
//  Provider-specific multi-location specs
// --------------------------------------------------------------------------

class EventsourcedActorCausalitySpecCassandra extends EventsourcedActorCausalitySpec with MultiLocationSpecCassandra {
  override val logFactory: String => Props = id => SingleLocationSpecCassandra.TestEventLog.props(id, batching = true, aggregateIndexing = true)
}

class ReplicationIntegrationSpecCassandra extends ReplicationIntegrationSpec with MultiLocationSpecCassandra {
  def customPort = 2554
}

class ReplicationCycleSpecCassandra extends ReplicationCycleSpec with MultiLocationSpecCassandra 
Example 111
Source File: AkkaSerializationMessageCodec.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate.adapter.vertx

import akka.actor.{ ActorSystem, ExtendedActorSystem, Extension, ExtensionId, ExtensionIdProvider }
import com.rbmhtechnology.eventuate.serializer.CommonFormats.PayloadFormat
import com.rbmhtechnology.eventuate.serializer.DelegatingPayloadSerializer
import io.vertx.core.buffer.Buffer
import io.vertx.core.eventbus.MessageCodec

object AkkaSerializationMessageCodec {
  val Name = "akka-serialization-message-codec"

  def apply(name: String)(implicit system: ActorSystem): MessageCodec[AnyRef, AnyRef] =
    new AkkaSerializationMessageCodec(name)

  def apply(clazz: Class[_])(implicit system: ActorSystem): MessageCodec[AnyRef, AnyRef] =
    new AkkaSerializationMessageCodec(s"${AkkaSerializationMessageCodec.Name}-${clazz.getName}")
}

class AkkaSerializationMessageCodec(override val name: String)(implicit system: ActorSystem) extends MessageCodec[AnyRef, AnyRef] {

  val serializer = PayloadSerializationExtension(system)

  override def transform(o: AnyRef): AnyRef =
    o

  override def encodeToWire(buffer: Buffer, o: AnyRef): Unit = {
    val payload = serializer.toBinary(o)
    buffer.appendInt(payload.length)
    buffer.appendBytes(payload)
  }

  override def decodeFromWire(pos: Int, buffer: Buffer): AnyRef = {
    val payloadLength = buffer.getInt(pos)
    val payload = buffer.getBytes(pos + Integer.BYTES, pos + Integer.BYTES + payloadLength)
    serializer.fromBinary(payload).asInstanceOf[AnyRef]
  }

  override def systemCodecID(): Byte = -1
}

object PayloadSerializationExtension extends ExtensionId[PayloadSerializationExtension] with ExtensionIdProvider {

  override def lookup = PayloadSerializationExtension

  override def createExtension(system: ExtendedActorSystem): PayloadSerializationExtension =
    new PayloadSerializationExtension(system)

  override def get(system: ActorSystem): PayloadSerializationExtension =
    super.get(system)
}

class PayloadSerializationExtension(system: ExtendedActorSystem) extends Extension {

  val serializer = new DelegatingPayloadSerializer(system)

  def toBinary(o: AnyRef): Array[Byte] =
    serializer.payloadFormatBuilder(o).build().toByteArray

  def fromBinary(b: Array[Byte]): Any =
    serializer.payload(PayloadFormat.parseFrom(b))
} 
Example 112
Source File: VertxAdapterSpec.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate.adapter.vertx

import akka.actor.ActorSystem
import akka.testkit.TestKit
import com.rbmhtechnology.eventuate.adapter.vertx.api.{ EventProducer, VertxAdapterConfig }
import com.rbmhtechnology.eventuate.log.EventLogWriter
import com.rbmhtechnology.eventuate.log.leveldb.LeveldbEventLog
import com.rbmhtechnology.eventuate.utilities._
import com.rbmhtechnology.eventuate.{ LocationCleanupLeveldb, ReplicationEndpoint }
import com.typesafe.config.Config
import org.scalatest.{ BeforeAndAfterAll, MustMatchers, WordSpecLike }

import scala.collection.immutable.Seq

object VertxAdapterSpec {
  case class Event(id: String)

  val Config = TestConfig.withReplayBatchSize(10)
}

class VertxAdapterSpec extends TestKit(ActorSystem("test", VertxAdapterSpec.Config))
  with WordSpecLike with MustMatchers with BeforeAndAfterAll with StopSystemAfterAll with LocationCleanupLeveldb
  with VertxEnvironment with VertxEventBusProbes {

  import VertxAdapterSpec._
  import utilities._

  val logName = "logA"
  val adapterId = "adapter1"
  var storage: ActorStorageProvider = _
  var endpoint: ReplicationEndpoint = _

  override def config: Config = VertxAdapterSpec.Config

  override def beforeAll(): Unit = {
    super.beforeAll()
    storage = new ActorStorageProvider(adapterId)
    endpoint = new ReplicationEndpoint(id = "1", logNames = Set(logName), logFactory = logId => LeveldbEventLog.props(logId), connections = Set())
  }

  "A VertxAdapter" must {
    "read events from an inbound log and deliver them to the Vert.x eventbus" in {
      val log = endpoint.logs(logName)
      val adapterConfig = VertxAdapterConfig()
        .addProducer(EventProducer.fromLog(log)
          .publishTo {
            case _ => endpoint1.address
          }
          .as("adapter1"))
        .registerDefaultCodecFor(classOf[Event])

      val vertxAdapter = VertxAdapter(adapterConfig, vertx, storage)
      val logWriter = new EventLogWriter("w1", endpoint.logs(logName))

      endpoint.activate()
      vertxAdapter.start()

      logWriter.write(Seq(Event("1"))).await.head

      storage.expectRead(replySequenceNr = 0)
      storage.expectWrite(sequenceNr = 1)

      endpoint1.probe.expectVertxMsg(body = Event("1"))

      logWriter.write(Seq(Event("2"))).await

      storage.expectWrite(sequenceNr = 2)

      endpoint1.probe.expectVertxMsg(body = Event("2"))

      logWriter.write(Seq(Event("3"), Event("4"))).await

      storage.expectWriteAnyOf(sequenceNrs = Seq(3, 4))

      endpoint1.probe.expectVertxMsg(body = Event("3"))
      endpoint1.probe.expectVertxMsg(body = Event("4"))
    }
  }
} 
Example 113
Source File: ActorStorageProvider.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate.adapter.vertx

import akka.actor.{ ActorSystem, Status }
import akka.pattern.ask
import akka.testkit.TestProbe
import akka.util.Timeout
import com.rbmhtechnology.eventuate.adapter.vertx.api.StorageProvider

import scala.concurrent.duration._
import scala.concurrent.{ ExecutionContext, Future }

class ActorStorageProvider(defaultId: String)(implicit system: ActorSystem) extends StorageProvider {
  implicit val timeout = Timeout(20.seconds)

  val probe = TestProbe()

  override def readProgress(id: String)(implicit executionContext: ExecutionContext): Future[Long] =
    probe.ref.ask(read(id)).mapTo[Long]

  override def writeProgress(id: String, sequenceNr: Long)(implicit executionContext: ExecutionContext): Future[Long] =
    probe.ref.ask(write(id, sequenceNr)).mapTo[Long]

  def expectRead(replySequenceNr: Long, id: String = defaultId): Unit = {
    probe.expectMsg(read(id))
    probe.reply(replySequenceNr)
  }

  def expectWrite(sequenceNr: Long, id: String = defaultId): Unit = {
    probe.expectMsg(write(id, sequenceNr))
    probe.reply(sequenceNr)
  }

  def expectWriteAndFail(sequenceNr: Long, failure: Throwable, id: String = defaultId): Unit = {
    probe.expectMsg(write(id, sequenceNr))
    probe.reply(Status.Failure(failure))
  }

  def expectWriteAnyOf(sequenceNrs: Seq[Long], id: String = defaultId): Unit = {
    probe.expectMsgAnyOf(sequenceNrs.map(write(id, _)): _*)
    probe.reply(sequenceNrs.max)
  }

  def expectNoMsg(duration: FiniteDuration): Unit = {
    probe.expectNoMsg(duration)
  }

  private def read(id: String): String =
    s"read[$id]"

  private def write(id: String, sequenceNr: Long): String =
    s"write[$id]-$sequenceNr"
} 
Example 114
Source File: VertxEventBusProbes.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate.adapter.vertx

import akka.actor.ActorSystem
import akka.testkit.{ TestKit, TestProbe }
import com.rbmhtechnology.eventuate.adapter.vertx.utilities.EventBusMessage
import io.vertx.core.eventbus.Message
import org.scalatest.{ BeforeAndAfterEach, Suite }

trait VertxEventBusProbes extends BeforeAndAfterEach {
  this: TestKit with Suite with VertxEnvironment =>

  import VertxHandlerConverters._

  var endpoint1: EventBusEndpoint = _
  var endpoint2: EventBusEndpoint = _

  override def beforeEach(): Unit = {
    super.beforeEach()

    endpoint1 = EventBusEndpoint.withId("1")
    endpoint2 = EventBusEndpoint.withId("2")
  }

  def eventBusProbe(endpoint: String): TestProbe = {
    val probe = TestProbe()
    val handler = (m: Message[String]) => probe.ref ! EventBusMessage(m.body(), m, endpoint)
    vertx.eventBus().consumer[String](endpoint, handler.asVertxHandler)
    probe
  }

  object EventBusEndpoint {
    def apply(address: String): EventBusEndpoint =
      new EventBusEndpoint(address, eventBusProbe(address))

    def withId(id: String): EventBusEndpoint =
      apply(endpointAddress(id))
  }

  case class EventBusEndpoint(address: String, probe: TestProbe)
} 
Example 115
Source File: LocationSpecsLeveldb.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate

import akka.actor.{ Props, ActorSystem }
import akka.testkit.TestKit

// --------------------------------------------------------------------------
//  Provider-specific single-location specs
// --------------------------------------------------------------------------

class EventsourcedProcessorIntegrationSpecLeveldb extends TestKit(ActorSystem("test")) with EventsourcedProcessorIntegrationSpec with SingleLocationSpecLeveldb {
  override def beforeEach(): Unit = {
    super.beforeEach()
    init()
  }
}

class EventsourcedActorIntegrationSpecLeveldb extends TestKit(ActorSystem("test")) with EventsourcedActorIntegrationSpec with SingleLocationSpecLeveldb {
  override def batching = false
}

class PersistOnEventIntegrationSpecLeveldb extends TestKit(ActorSystem("test")) with PersistOnEventIntegrationSpec with SingleLocationSpecLeveldb
class EventsourcedActorThroughputSpecLeveldb extends TestKit(ActorSystem("test")) with EventsourcedActorThroughputSpec with SingleLocationSpecLeveldb

// --------------------------------------------------------------------------
//  Provider-specific multi-location specs
// --------------------------------------------------------------------------

class EventsourcedActorCausalitySpecLeveldb extends EventsourcedActorCausalitySpec with MultiLocationSpecLeveldb {
  override val logFactory: String => Props = id => SingleLocationSpecLeveldb.TestEventLog.props(id, batching = true)
}

class ReplicationIntegrationSpecLeveldb extends ReplicationIntegrationSpec with MultiLocationSpecLeveldb {
  def customPort = 2553
}

class ReplicationCycleSpecLeveldb extends ReplicationCycleSpec with MultiLocationSpecLeveldb 
Example 116
Source File: AkkaUnitTestLike.scala    From reactive-kinesis   with Apache License 2.0 5 votes vote down vote up
package com.weightwatchers.reactive.kinesis.common

import akka.actor.{ActorSystem, Scheduler}
import akka.stream.{ActorMaterializer, Materializer}
import akka.testkit.TestKitBase
import com.typesafe.config.{Config, ConfigFactory}
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{BeforeAndAfterAll, Suite}

import scala.concurrent.ExecutionContextExecutor


trait AkkaUnitTestLike extends TestKitBase with ScalaFutures with BeforeAndAfterAll {
  self: Suite =>

  implicit lazy val config: Config                = ConfigFactory.load("sample.conf")
  implicit lazy val system: ActorSystem           = ActorSystem(suiteName, config)
  implicit lazy val scheduler: Scheduler          = system.scheduler
  implicit lazy val mat: Materializer             = ActorMaterializer()
  implicit lazy val ctx: ExecutionContextExecutor = system.dispatcher

  abstract override def afterAll(): Unit = {
    super.afterAll()
    // intentionally shutdown the actor system last.
    system.terminate().futureValue
  }
} 
Example 117
Source File: Main.scala    From akka-api-gateway-example   with MIT License 5 votes vote down vote up
package jp.co.dzl.example.akka.api

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.stream.{ ActorMaterializer, Materializer }
import jp.co.dzl.example.akka.api.di.{ ServiceModule, HandlerModule, ConfigModule, AkkaModule }
import jp.co.dzl.example.akka.api.handler.RootHandler
import scaldi.{ Injector, Injectable }

import scala.concurrent.ExecutionContextExecutor

trait MainService extends Injectable {
  implicit val module: Injector =
    new AkkaModule :: new ConfigModule :: new HandlerModule :: new ServiceModule

  implicit val system: ActorSystem = inject[ActorSystem]
  implicit val executor: ExecutionContextExecutor = system.dispatcher
  implicit val materializer: Materializer = ActorMaterializer()

  val host = inject[String](identified by "http.listen.host")
  val port = inject[Int](identified by "http.listen.port")
  val handler = inject[RootHandler]
}

object Main extends App with MainService {
  Http().bindAndHandle(handler.routes, host, port)
} 
Example 118
Source File: ServiceModule.scala    From akka-api-gateway-example   with MIT License 5 votes vote down vote up
package jp.co.dzl.example.akka.api.di

import akka.actor.ActorSystem
import jp.co.dzl.example.akka.api.service.{ HttpClientImpl, HttpClient, GitHubImpl, GitHub }
import scaldi.Module

class ServiceModule extends Module {
  bind[HttpClient] to new HttpClientImpl(
    actorSystem = inject[ActorSystem]
  )

  bind[GitHub] to new GitHubImpl(
    host = inject[String](identified by "services.github.host"),
    port = inject[Int](identified by "services.github.port"),
    timeout = inject[Int](identified by "services.github.timeout"),
    httpClient = inject[HttpClient]
  )
} 
Example 119
Source File: HandlerModule.scala    From akka-api-gateway-example   with MIT License 5 votes vote down vote up
package jp.co.dzl.example.akka.api.di

import akka.actor.ActorSystem
import jp.co.dzl.example.akka.api.handler.{ RootHandler, Handler }
import jp.co.dzl.example.akka.api.handler.v1.github.UsersHandler
import jp.co.dzl.example.akka.api.service.GitHub
import scaldi.Module

class HandlerModule extends Module {
  bind[UsersHandler] to new UsersHandler(
    actorSystem = inject[ActorSystem],
    github = inject[GitHub]
  )

  bind[List[Handler]] to List(
    inject[UsersHandler]
  )

  bind[RootHandler] to new RootHandler(
    handlers = inject[List[Handler]]
  )
} 
Example 120
Source File: UsersHandler.scala    From akka-api-gateway-example   with MIT License 5 votes vote down vote up
package jp.co.dzl.example.akka.api.handler.v1.github

import akka.actor.ActorSystem
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Directives._
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{ Sink, Source }
import jp.co.dzl.example.akka.api.handler.Handler
import jp.co.dzl.example.akka.api.service.GitHub

import scala.util.{ Failure, Success }

class UsersHandler(
    actorSystem: ActorSystem,
    github:      GitHub
) extends Handler {
  implicit val system = actorSystem
  implicit val executor = system.dispatcher
  implicit val materializer = ActorMaterializer()

  def routes = pathPrefix("v1" / "github") {
    path("users" / """^[a-zA-Z0-9\-]+$""".r) { login =>
      get {
        extractRequest { req =>
          val result = Source.single(HttpRequest(HttpMethods.GET, s"/users/$login"))
            .via(github.from(req))
            .via(github.send)
            .runWith(Sink.head)

          onComplete(result) {
            case Success(response) => complete(response)
            case Failure(error)    => complete(StatusCodes.ServiceUnavailable -> error.toString)
          }
        }
      }
    }
  }
} 
Example 121
Source File: HttpClientSpec.scala    From akka-api-gateway-example   with MIT License 5 votes vote down vote up
package jp.co.dzl.example.akka.api.service

import akka.actor.ActorSystem
import akka.stream.scaladsl.Flow
import org.scalatest.{ BeforeAndAfterAll, Matchers, FlatSpec }

import scala.concurrent.Await
import scala.concurrent.duration.Duration

class HttpClientSpec extends FlatSpec with Matchers with BeforeAndAfterAll {
  implicit val system = ActorSystem("http-client-spec")
  implicit val executor = system.dispatcher

  override protected def afterAll: Unit = {
    Await.result(system.terminate(), Duration.Inf)
  }

  "#conectionHttps" should "return outgoing connection flow" in {
    val httpClient = new HttpClientImpl(system)
    val connection = httpClient.connectionHttps("127.0.0.1", 8000, 5)

    connection shouldBe a[Flow[_, _, _]]
  }
} 
Example 122
Source File: GitHubSpec.scala    From akka-api-gateway-example   with MIT License 5 votes vote down vote up
package jp.co.dzl.example.akka.api.service

import akka.actor.ActorSystem
import akka.http.scaladsl.model.headers.RawHeader
import akka.http.scaladsl.model.{ HttpMethods, HttpRequest, HttpResponse }
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{ Flow, Source }
import akka.stream.testkit.scaladsl.TestSink
import org.scalamock.scalatest.MockFactory
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{ BeforeAndAfterAll, FlatSpec, Matchers }

import scala.concurrent.Await
import scala.concurrent.duration.Duration

class GitHubSpec extends FlatSpec with Matchers with ScalaFutures with BeforeAndAfterAll with MockFactory {
  implicit val system = ActorSystem("github-spec")
  implicit val executor = system.dispatcher
  implicit val materializer = ActorMaterializer()

  override protected def afterAll: Unit = {
    Await.result(system.terminate(), Duration.Inf)
  }

  "#from" should "merge original headers to github request" in {
    val github = new GitHubImpl("127.0.0.1", 8000, 5, mock[HttpClient])
    val request = HttpRequest(HttpMethods.GET, "/")
      .addHeader(RawHeader("host", "dummy"))
      .addHeader(RawHeader("timeout-access", "dummy"))

    val result = Source.single(HttpRequest(HttpMethods.GET, "/v1/github/users/xxxxxx"))
      .via(github.from(request))
      .runWith(TestSink.probe[HttpRequest])
      .request(1)
      .expectNext()

    result.headers.filter(_.lowercaseName() == "host") shouldBe empty
    result.headers.filter(_.lowercaseName() == "timeout-access") shouldBe empty
    result.headers.filter(_.lowercaseName() == "x-forwarded-host") shouldNot be(empty)
  }

  "#send" should "connect using http client" in {
    val httpResponse = HttpResponse()
    val httpClient = mock[HttpClient]
    (httpClient.connectionHttps _).expects(*, *, *).returning(Flow[HttpRequest].map(_ => httpResponse))

    val github = new GitHubImpl("127.0.0.1", 8000, 5, httpClient)
    val result = Source.single(HttpRequest(HttpMethods.GET, "/"))
      .via(github.send)
      .runWith(TestSink.probe[HttpResponse])
      .request(1)
      .expectNext()

    result shouldBe httpResponse
  }
} 
Example 123
Source File: ClickhouseHostHealth.scala    From clickhouse-scala-client   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.crobox.clickhouse.balancing.discovery.health

import akka.NotUsed
import akka.actor.{ActorSystem, Cancellable}
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import akka.http.scaladsl.settings.ConnectionPoolSettings
import akka.http.scaladsl.unmarshalling.Unmarshaller
import akka.stream.Materializer
import akka.stream.scaladsl.{Flow, Source}
import com.crobox.clickhouse.internal.ClickhouseResponseParser

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success, Try}

object ClickhouseHostHealth extends ClickhouseResponseParser {

  sealed trait ClickhouseHostStatus {
    val host: Uri
    val code: String
  }

  case class Alive(host: Uri) extends ClickhouseHostStatus { override val code: String = "ok" }

  case class Dead(host: Uri, reason: Throwable) extends ClickhouseHostStatus { override val code: String = "nok" }

  
  def healthFlow(host: Uri)(
      implicit system: ActorSystem,
      materializer: Materializer,
      executionContext: ExecutionContext
  ): Source[ClickhouseHostStatus, Cancellable] = {
    val healthCheckInterval: FiniteDuration =
      system.settings.config
        .getDuration("connection.health-check.interval")
        .getSeconds.seconds
    val healthCheckTimeout: FiniteDuration =
      system.settings.config
        .getDuration("connection.health-check.timeout")
        .getSeconds.seconds

    val healthCachedPool = Http(system).cachedHostConnectionPool[Int](
      host.authority.host.address(),
      host.effectivePort,
      settings = ConnectionPoolSettings(system)
        .withMaxConnections(1)
        .withMinConnections(1)
        .withMaxOpenRequests(2)
        .withMaxRetries(3)
        .withUpdatedConnectionSettings(
          _.withIdleTimeout(healthCheckTimeout + healthCheckInterval).withConnectingTimeout(healthCheckTimeout)
        )
    )
    Source
      .tick(0.milliseconds, healthCheckInterval, 0)
      .map(tick => {
        (HttpRequest(method = HttpMethods.GET, uri = host), tick)
      })
      .via(healthCachedPool)
      .via(parsingFlow(host))
  }

  private[health] def parsingFlow[T](
      host: Uri
  )(implicit ec: ExecutionContext, mat: Materializer): Flow[(Try[HttpResponse], T), ClickhouseHostStatus, NotUsed] =
    Flow[(Try[HttpResponse], T)].mapAsync(1) {
      case (Success(response @ akka.http.scaladsl.model.HttpResponse(StatusCodes.OK, _, _, _)), _) =>
        Unmarshaller.stringUnmarshaller(decodeResponse(response).entity)
          .map(splitResponse)
          .map(
            stringResponse =>
              if (stringResponse.equals(Seq("Ok."))) {
                Alive(host)
              } else {
                Dead(host, new IllegalArgumentException(s"Got wrong result $stringResponse"))
            }
          )
      case (Success(response), _) =>
        Future.successful(Dead(host, new IllegalArgumentException(s"Got response with status code ${response.status}")))
      case (Failure(ex), _) =>
        Future.successful(Dead(host, ex))
    }

} 
Example 124
Source File: ClusterConnectionFlow.scala    From clickhouse-scala-client   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.crobox.clickhouse.balancing.discovery.cluster

import akka.actor.{ActorSystem, Cancellable}
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.Uri
import akka.http.scaladsl.settings.ConnectionPoolSettings
import akka.stream.Materializer
import akka.stream.scaladsl.Source
import com.crobox.clickhouse.balancing.discovery.ConnectionManagerActor.Connections
import com.crobox.clickhouse.internal.QuerySettings.ReadQueries
import com.crobox.clickhouse.internal.{ClickhouseHostBuilder, ClickhouseQueryBuilder, ClickhouseResponseParser, QuerySettings}
import com.typesafe.scalalogging.LazyLogging

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}

private[clickhouse] object ClusterConnectionFlow
    extends ClickhouseQueryBuilder
    with ClickhouseResponseParser
    with LazyLogging {

  def clusterConnectionsFlow(
      targetHost: => Future[Uri],
      scanningInterval: FiniteDuration,
      cluster: String
  )(implicit system: ActorSystem,
    materializer: Materializer,
    ec: ExecutionContext): Source[Connections, Cancellable] = {
    val http                   = Http(system)
    val settings = ConnectionPoolSettings(system)
      .withMaxConnections(1)
      .withMinConnections(1)
      .withMaxOpenRequests(2)
      .withMaxRetries(3)
      .withUpdatedConnectionSettings(
        _.withIdleTimeout(scanningInterval.plus(1.second))
      )
    Source
      .tick(0.millis, scanningInterval, {})
      .mapAsync(1)(_ => targetHost)
      .mapAsync(1)(host => {
        val query = s"SELECT host_address FROM system.clusters WHERE cluster='$cluster'"
        val request =
          toRequest(host, query, None, QuerySettings(readOnly = ReadQueries, idempotent = Some(true)), None)(
            system.settings.config
          )
        processClickhouseResponse(http.singleRequest(request, settings = settings), query, host, None)
          .map(splitResponse)
          .map(_.toSet.filter(_.nonEmpty))
          .map(result => {
            if (result.isEmpty) {
              throw new IllegalArgumentException(
                s"Could not determine clickhouse cluster hosts for cluster $cluster and host $host. " +
                s"This could indicate that you are trying to use the cluster balancer to connect to a non cluster based clickhouse server. " +
                s"Please use the `SingleHostQueryBalancer` in that case."
              )
            }
            Connections(result.map(ClickhouseHostBuilder.toHost(_, Some(8123))))
          })
      })
  }
} 
Example 125
Source File: HostBalancer.scala    From clickhouse-scala-client   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.crobox.clickhouse.balancing

import akka.actor.ActorSystem
import akka.http.scaladsl.model._
import akka.stream.Materializer
import com.crobox.clickhouse.balancing.Connection.{BalancingHosts, ClusterAware, ConnectionType, SingleHost}
import com.crobox.clickhouse.balancing.discovery.ConnectionManagerActor
import com.crobox.clickhouse.balancing.discovery.health.ClickhouseHostHealth
import com.crobox.clickhouse.internal.ClickhouseHostBuilder
import com.typesafe.config.Config
import com.typesafe.scalalogging.LazyLogging

import scala.collection.JavaConverters._
import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}

trait HostBalancer extends LazyLogging {

  def nextHost: Future[Uri]

}

object HostBalancer extends ClickhouseHostBuilder {

  def apply(
      optionalConfig: Option[Config] = None
  )(implicit system: ActorSystem, materializer: Materializer, ec: ExecutionContext): HostBalancer = {
    val config = optionalConfig.getOrElse(system.settings.config)
    val connectionConfig = config.getConfig("connection")
    val connectionType           = ConnectionType(connectionConfig.getString("type"))
    val connectionHostFromConfig = extractHost(connectionConfig)
    connectionType match {
      case SingleHost => SingleHostBalancer(connectionHostFromConfig)
      case BalancingHosts =>
        val manager = system.actorOf(
          ConnectionManagerActor
            .props(ClickhouseHostHealth.healthFlow(_))
        )
        MultiHostBalancer(connectionConfig
                            .getConfigList("hosts")
                            .asScala
                            .toSet
                            .map((config: Config) => extractHost(config)),
                          manager)
      case ClusterAware =>
        val manager = system.actorOf(
          ConnectionManagerActor.props(ClickhouseHostHealth.healthFlow(_))
        )
        ClusterAwareHostBalancer(
          connectionHostFromConfig,
          connectionConfig.getString("cluster"),
          manager,
          connectionConfig.getDuration("scanning-interval").getSeconds.seconds
        )(system,
          config.getDuration("host-retrieval-timeout").getSeconds.seconds,
          ec,
          materializer)
    }
  }

  def extractHost(connectionConfig: Config): Uri =
    toHost(connectionConfig.getString("host"),
           if (connectionConfig.hasPath("port")) Option(connectionConfig.getInt("port")) else None)
} 
Example 126
Source File: ClusterAwareHostBalancer.scala    From clickhouse-scala-client   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.crobox.clickhouse.balancing

import akka.actor.{ActorRef, ActorSystem}
import akka.http.scaladsl.model.Uri
import akka.pattern.ask
import akka.stream.scaladsl.Sink
import akka.stream.{ActorAttributes, Materializer, Supervision}
import akka.util.Timeout
import com.crobox.clickhouse.balancing.discovery.ConnectionManagerActor.{GetConnection, LogDeadConnections}
import com.crobox.clickhouse.balancing.discovery.cluster.ClusterConnectionFlow

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}


case class ClusterAwareHostBalancer(host: Uri,
                                    cluster: String = "cluster",
                                    manager: ActorRef,
                                    scanningInterval: FiniteDuration)(
    implicit system: ActorSystem,
    connectionRetrievalTimeout: Timeout,
    ec: ExecutionContext,
    materializer: Materializer
) extends HostBalancer {

  ClusterConnectionFlow
    .clusterConnectionsFlow(Future.successful(host), scanningInterval, cluster)
    .withAttributes(
      ActorAttributes.supervisionStrategy({
        case ex: IllegalArgumentException =>
          logger.error("Failed resolving hosts for cluster, stopping the flow.", ex)
          Supervision.stop
        case ex =>
          logger.error("Failed resolving hosts for cluster, resuming.", ex)
          Supervision.Resume
      })
    )
    .runWith(Sink.actorRef(manager, LogDeadConnections))

  override def nextHost: Future[Uri] =
    (manager ? GetConnection()).mapTo[Uri]
} 
Example 127
Source File: ClickhouseClientSpec.scala    From clickhouse-scala-client   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.crobox.clickhouse

import java.util.UUID

import akka.actor.ActorSystem
import akka.stream.{ActorMaterializer, Materializer}
import akka.testkit.TestKit
import com.typesafe.config.{Config, ConfigFactory}
import org.scalatest.BeforeAndAfterAll
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.flatspec.AnyFlatSpecLike
import org.scalatest.matchers.should.Matchers

import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContext}
import scala.util.Random

abstract class ClickhouseClientSpec(val config: Config = ConfigFactory.load())
    extends TestKit(ActorSystem("clickhouseClientTestSystem", config.getConfig("crobox.clickhouse.client")))
    with AnyFlatSpecLike
    with Matchers
    with BeforeAndAfterAll
    with ScalaFutures {

  implicit val materializer: Materializer = ActorMaterializer()
  implicit val ec: ExecutionContext = system.dispatcher

  override implicit def patienceConfig: PatienceConfig  = PatienceConfig(1.seconds, 50.millis)

  override protected def afterAll(): Unit = {
    try super.afterAll()
    finally Await.result(system.terminate(), 10.seconds)
  }

  def randomUUID: UUID =
    UUID.randomUUID

  def randomString: String =
    Random.alphanumeric.take(10).mkString

  def randomInt: Int =
    Random.nextInt(100000)
} 
Example 128
Source File: ClickhouseClientAsyncSpec.scala    From clickhouse-scala-client   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.crobox.clickhouse

import akka.actor.{ActorRef, ActorSystem}
import akka.http.scaladsl.model.Uri
import akka.pattern.ask
import akka.stream.{ActorMaterializer, Materializer}
import akka.testkit.TestKit
import akka.util.Timeout
import akka.util.Timeout.durationToTimeout
import com.crobox.clickhouse.balancing.HostBalancer
import com.crobox.clickhouse.balancing.discovery.ConnectionManagerActor.GetConnection
import com.typesafe.config.{Config, ConfigFactory}
import org.scalatest._

import scala.concurrent.{Await, Future}
import scala.concurrent.duration._
import org.scalatest.flatspec.AsyncFlatSpecLike
import org.scalatest.matchers.should.Matchers

abstract class ClickhouseClientAsyncSpec(val config: Config = ConfigFactory.load())
    extends TestKit(ActorSystem("clickhouseClientAsyncTestSystem", config.getConfig("crobox.clickhouse.client")))
    with AsyncFlatSpecLike
    with Matchers
    with BeforeAndAfterAll
    with BeforeAndAfterEach {

  implicit val timeout: Timeout = 5.second
  implicit val materializer: Materializer = ActorMaterializer()

  override protected def afterAll(): Unit = {
    try super.afterAll()
    finally Await.result(system.terminate(), 10.seconds)
  }

  def requestParallelHosts(balancer: HostBalancer, connections: Int = 10): Future[Seq[Uri]] =
    Future.sequence(
      (1 to connections)
        .map(_ => {
          balancer.nextHost
        })
    )

  def getConnections(manager: ActorRef, connections: Int = 10): Future[Seq[Uri]] =
    Future.sequence(
      (1 to connections)
        .map(_ => {
          (manager ? GetConnection()).mapTo[Uri]
        })
    )

  //  TODO change this methods to custom matchers
  def returnsConnectionsInRoundRobinFashion(manager: ActorRef, expectedConnections: Set[Uri]): Future[Assertion] = {
    val RequestConnectionsPerHost = 100
    getConnections(manager, RequestConnectionsPerHost * expectedConnections.size)
      .map(connections => {
        expectedConnections.foreach(
          uri =>
            connections
              .count(_ == uri) shouldBe (RequestConnectionsPerHost +- RequestConnectionsPerHost / 10) //10% delta for warm-up phase
        )
        succeed
      })
  }

} 
Example 129
Source File: SqsSettings.scala    From akka-stream-sqs   with Apache License 2.0 5 votes vote down vote up
package me.snov.akka.sqs.client

import akka.actor.ActorSystem
import com.amazonaws.ClientConfiguration
import com.amazonaws.auth.{AWSCredentialsProvider, DefaultAWSCredentialsProviderChain}
import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration
import com.amazonaws.services.sqs.AmazonSQSAsync
import com.typesafe.config.Config
import collection.JavaConverters._

object SqsSettings {
  private val defaultAWSCredentialsProvider = new DefaultAWSCredentialsProviderChain()
  private val defaultAWSClientConfiguration = new ClientConfiguration()
  private val defaultMaxNumberOfMessages = 10
  private val defaultWaitTimeSeconds = 10
  private val configurationRoot = "akka-stream-sqs"

  def apply(
             queueUrl: String,
             maxNumberOfMessages: Int = defaultMaxNumberOfMessages,
             waitTimeSeconds: Int = defaultWaitTimeSeconds,
             awsCredentialsProvider: Option[AWSCredentialsProvider] = None,
             awsClientConfiguration: Option[ClientConfiguration] = None,
             awsClient: Option[AmazonSQSAsync] = None,
             endpoint: Option[EndpointConfiguration] = None,
             visibilityTimeout: Option[Int] = None,
             messageAttributes: Seq[String] = List()
           ): SqsSettings =
    new SqsSettings(
      queueUrl = queueUrl,
      maxNumberOfMessages = maxNumberOfMessages,
      waitTimeSeconds = waitTimeSeconds,
      awsClient = awsClient,
      endpoint = endpoint,
      awsCredentialsProvider = awsCredentialsProvider.getOrElse(defaultAWSCredentialsProvider),
      awsClientConfiguration = awsClientConfiguration.getOrElse(defaultAWSClientConfiguration),
      visibilityTimeout = visibilityTimeout,
      messageAttributes = messageAttributes
    )

  def apply(system: ActorSystem): SqsSettings = apply(system, None, None)

  def apply(
             system: ActorSystem,
             awsCredentialsProvider: Option[AWSCredentialsProvider],
             awsClientConfiguration: Option[ClientConfiguration]
           ): SqsSettings =
    apply(system.settings.config.getConfig(configurationRoot), awsCredentialsProvider, awsClientConfiguration)

  def apply(config: Config): SqsSettings = apply(config, None, None)

  def apply(
             config: Config,
             awsCredentialsProvider: Option[AWSCredentialsProvider],
             awsClientConfiguration: Option[ClientConfiguration]
           ): SqsSettings = {
    apply(
      queueUrl = config.getString("queue-url"),
      maxNumberOfMessages = if (config.hasPath("max-number-of-messages")) config.getInt("max-number-of-messages") else defaultMaxNumberOfMessages,
      waitTimeSeconds = if (config.hasPath("wait-time-seconds")) config.getInt("wait-time-seconds") else defaultWaitTimeSeconds,
      awsCredentialsProvider = awsCredentialsProvider,
      awsClientConfiguration = awsClientConfiguration,
      endpoint = if (config.hasPath("endpoint") && config.hasPath("region")) Some(new EndpointConfiguration(config.getString("endpoint"), config.getString("region"))) else None,
      visibilityTimeout = if (config.hasPath("visibility-timeout")) Some(config.getInt("visibility-timeout")) else None,
      messageAttributes = if (config.hasPath("message-attributes")) config.getStringList("message-attributes").asScala else List()
    )
  }
}

case class SqsSettings(queueUrl: String,
                       maxNumberOfMessages: Int,
                       waitTimeSeconds: Int,
                       awsClient: Option[AmazonSQSAsync],
                       endpoint: Option[EndpointConfiguration],
                       awsCredentialsProvider: AWSCredentialsProvider,
                       awsClientConfiguration: ClientConfiguration,
                       visibilityTimeout: Option[Int],
                       messageAttributes: Seq[String]) 
Example 130
Source File: TestHttpProxy.scala    From akka-stream-sqs   with Apache License 2.0 5 votes vote down vote up
package me.snov.akka.sqs

import akka.actor.{ActorSystem, Terminated}
import akka.http.scaladsl.Http
import akka.http.scaladsl.server.{RequestContext, Route}
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Sink, Source}

import scala.concurrent.{Await, Future}
import scala.concurrent.duration._

class TestHttpProxy(
                     interface: String = "localhost",
                     port: Int,
                     remoteHost: String = "localhost",
                     remotePort: Int = 9324
                   ) {

  implicit var system: ActorSystem = createActorSystem()

  private def createActorSystem() = ActorSystem("test-http-server")

  def start(): Unit = {
    implicit val materializer: ActorMaterializer = ActorMaterializer()
    implicit val executionContext = system.dispatcher

    val proxy = Route { context: RequestContext =>
      context.log.debug("Opening connection to %s:%d".format(remoteHost, remotePort))
      Source.single(context.request)
        .via(Http(system).outgoingConnection(remoteHost, remotePort))
        .runWith(Sink.head)
        .flatMap(context.complete(_))
    }

    Http().bindAndHandle(handler = proxy, interface = interface, port = port)
  }

  def stop(): Unit = {
    Await.ready(system.terminate(), 1.second)
  }

  def asyncStartAfter(d: FiniteDuration) = {
    system = createActorSystem()
    system.scheduler.scheduleOnce(d, new Runnable {
      override def run(): Unit = start()
    })(system.dispatcher)
  }
} 
Example 131
Source File: RoleLeaderAutoDowningRoles.scala    From akka-cluster-custom-downing   with Apache License 2.0 5 votes vote down vote up
package tanukki.akka.cluster.autodown

import akka.actor.{ActorSystem, Address, Props}
import akka.cluster.{Cluster, DowningProvider}
import com.typesafe.config.Config

import scala.collection.JavaConverters._
import scala.concurrent.duration.{FiniteDuration, _}

final class RoleLeaderAutoDowningRoles(system: ActorSystem) extends DowningProvider {

  private[this] val cluster = Cluster(system)

  private val config: Config = system.settings.config

  override def downRemovalMargin: FiniteDuration = {
    val key = "custom-downing.down-removal-margin"
    config.getString(key) match {
      case "off" => Duration.Zero
      case _     => Duration(config.getDuration(key, MILLISECONDS), MILLISECONDS)
    }
  }

  override def downingActorProps: Option[Props] = {
    val stableAfter = system.settings.config.getDuration("custom-downing.stable-after").toMillis millis
    val leaderRole = system.settings.config.getString("custom-downing.role-leader-auto-downing-roles.leader-role")
    val roles = system.settings.config.getStringList("custom-downing.role-leader-auto-downing-roles.target-roles").asScala.toSet
    if (roles.isEmpty) None else Some(RoleLeaderAutoDownRoles.props(leaderRole, roles, stableAfter))
  }
}


private[autodown] object RoleLeaderAutoDownRoles {
  def props(leaderRole: String, targetRoles: Set[String], autoDownUnreachableAfter: FiniteDuration): Props = Props(classOf[RoleLeaderAutoDownRoles], leaderRole, targetRoles, autoDownUnreachableAfter)
}

private[autodown] class RoleLeaderAutoDownRoles(leaderRole: String, targetRoles: Set[String], autoDownUnreachableAfter: FiniteDuration)
  extends RoleLeaderAutoDownRolesBase(leaderRole, targetRoles, autoDownUnreachableAfter) with ClusterCustomDowning {

  override def down(node: Address): Unit = {
    log.info("RoleLeader is auto-downing unreachable node [{}]", node)
    cluster.down(node)
  }
} 
Example 132
Source File: OldestAutoDowning.scala    From akka-cluster-custom-downing   with Apache License 2.0 5 votes vote down vote up
package tanukki.akka.cluster.autodown

import akka.ConfigurationException
import akka.actor.{ActorSystem, Address, Props}
import akka.cluster.{Cluster, DowningProvider}
import com.typesafe.config.Config

import scala.concurrent.Await
import scala.concurrent.duration._

class OldestAutoDowning(system: ActorSystem) extends DowningProvider {

  private[this] val cluster = Cluster(system)

  private val config: Config = system.settings.config

  override def downRemovalMargin: FiniteDuration = {
    val key = "custom-downing.down-removal-margin"
    config.getString(key) match {
      case "off" => Duration.Zero
      case _     => Duration(config.getDuration(key, MILLISECONDS), MILLISECONDS)
    }
  }

  override def downingActorProps: Option[Props] = {
    val stableAfter = system.settings.config.getDuration("custom-downing.stable-after").toMillis millis
    val oldestMemberRole = {
      val r = system.settings.config.getString("custom-downing.oldest-auto-downing.oldest-member-role")
      if (r.isEmpty) None else Some(r)
    }
    val downIfAlone = system.settings.config.getBoolean("custom-downing.oldest-auto-downing.down-if-alone")
    val shutdownActorSystem = system.settings.config.getBoolean("custom-downing.oldest-auto-downing.shutdown-actor-system-on-resolution")
    if (stableAfter == Duration.Zero && downIfAlone) throw new ConfigurationException("If you set down-if-alone=true, stable-after timeout must be greater than zero.")
    else {
      Some(OldestAutoDown.props(oldestMemberRole, downIfAlone, shutdownActorSystem, stableAfter))
    }
  }
}

private[autodown] object OldestAutoDown {
  def props(oldestMemberRole: Option[String], downIfAlone: Boolean, shutdownActorSystem: Boolean, autoDownUnreachableAfter: FiniteDuration): Props =
    Props(classOf[OldestAutoDown], oldestMemberRole, downIfAlone, shutdownActorSystem, autoDownUnreachableAfter)
}

private[autodown] class OldestAutoDown(oldestMemberRole: Option[String], downIfAlone: Boolean, shutdownActorSystem: Boolean, autoDownUnreachableAfter: FiniteDuration)
  extends OldestAutoDownBase(oldestMemberRole, downIfAlone, autoDownUnreachableAfter) with ClusterCustomDowning {

  override def down(node: Address): Unit = {
    log.info("Oldest is auto-downing unreachable node [{}]", node)
    cluster.down(node)
  }

  override def shutdownSelf(): Unit = {
    if (shutdownActorSystem) {
      Await.result(context.system.terminate(), 10 seconds)
    } else {
      throw new SplitBrainResolvedError("OldestAutoDowning")
    }
  }
} 
Example 133
Source File: ClientTest.scala    From bitcoin-s-spv-node   with MIT License 5 votes vote down vote up
package org.bitcoins.spvnode.networking

import java.net.{InetSocketAddress, ServerSocket}

import akka.actor.ActorSystem
import akka.io.{Inet, Tcp}
import akka.testkit.{ImplicitSender, TestActorRef, TestKit, TestProbe}
import org.bitcoins.core.config.TestNet3
import org.bitcoins.core.util.{BitcoinSLogger, BitcoinSUtil}
import org.bitcoins.spvnode.messages.control.VersionMessage
import org.bitcoins.spvnode.messages.{NetworkPayload, VersionMessage}
import org.bitcoins.spvnode.util.BitcoinSpvNodeUtil
import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll, FlatSpecLike, MustMatchers}

import scala.concurrent.duration._
import scala.util.Try

class ClientTest extends TestKit(ActorSystem("ClientTest")) with FlatSpecLike
  with MustMatchers with ImplicitSender
  with BeforeAndAfter with BeforeAndAfterAll with BitcoinSLogger {

  "Client" must "connect to a node on the bitcoin network, " +
    "send a version message to a peer on the network and receive a version message back, then close that connection" in {
    val probe = TestProbe()

    val client = TestActorRef(Client.props,probe.ref)

    val remote = new InetSocketAddress(TestNet3.dnsSeeds(0), TestNet3.port)
    val randomPort = 23521
    //random port
    client ! Tcp.Connect(remote, Some(new InetSocketAddress(randomPort)))

    //val bound : Tcp.Bound = probe.expectMsgType[Tcp.Bound]
    val conn : Tcp.Connected = probe.expectMsgType[Tcp.Connected]

    //make sure the socket is currently bound
    Try(new ServerSocket(randomPort)).isSuccess must be (false)
    client ! Tcp.Abort
    val confirmedClosed = probe.expectMsg(Tcp.Aborted)

    //make sure the port is now available
    val boundSocket = Try(new ServerSocket(randomPort))
    boundSocket.isSuccess must be (true)

    boundSocket.get.close()

  }

  it must "bind connect to two nodes on one port" in {
    //NOTE if this test case fails it is more than likely because one of the two dns seeds
    //below is offline
    val remote1 = new InetSocketAddress(TestNet3.dnsSeeds(0), TestNet3.port)
    val remote2 = new InetSocketAddress(TestNet3.dnsSeeds(2), TestNet3.port)

    val probe1 = TestProbe()
    val probe2 = TestProbe()


    val client1 = TestActorRef(Client.props, probe1.ref)
    val client2 = TestActorRef(Client.props, probe2.ref)

    val local1 = new InetSocketAddress(TestNet3.port)
    val options = List(Inet.SO.ReuseAddress(true))
    client1 ! Tcp.Connect(remote1,Some(local1),options)


    probe1.expectMsgType[Tcp.Connected]
    client1 ! Tcp.Abort

    val local2 = new InetSocketAddress(TestNet3.port)
    client2 ! Tcp.Connect(remote2,Some(local2),options)
    probe2.expectMsgType[Tcp.Connected](5.seconds)
    client2 ! Tcp.Abort
  }

  override def afterAll: Unit = {
    TestKit.shutdownActorSystem(system)
  }


} 
Example 134
Source File: BlockActorTest.scala    From bitcoin-s-spv-node   with MIT License 5 votes vote down vote up
package org.bitcoins.spvnode.networking

import akka.actor.ActorSystem
import akka.testkit.{ImplicitSender, TestActorRef, TestKit, TestProbe}
import org.bitcoins.core.crypto.DoubleSha256Digest
import org.bitcoins.core.protocol.blockchain.BlockHeader
import org.bitcoins.core.util.{BitcoinSLogger, BitcoinSUtil}
import org.bitcoins.spvnode.messages.BlockMessage
import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll, FlatSpecLike, MustMatchers}

import scala.concurrent.duration.DurationInt


class BlockActorTest extends TestKit(ActorSystem("BlockActorTest")) with FlatSpecLike
  with MustMatchers with ImplicitSender
  with BeforeAndAfter with BeforeAndAfterAll with BitcoinSLogger  {

  def blockActor = TestActorRef(BlockActor.props,self)
  val blockHash = DoubleSha256Digest(BitcoinSUtil.flipEndianness("00000000b873e79784647a6c82962c70d228557d24a747ea4d1b8bbe878e1206"))

  "BlockActor" must "be able to send a GetBlocksMessage then receive that block back" in {
    blockActor ! blockHash
    val blockMsg = expectMsgType[BlockMessage](10.seconds)
    blockMsg.block.blockHeader.hash must be (blockHash)

  }


  it must "be able to request a block from it's block header" in {
    val blockHeader = BlockHeader("0100000043497fd7f826957108f4a30fd9cec3aeba79972084e90ead01ea330900000000bac8b0fa927c0ac8234287e33c5f74d38d354820e24756ad709d7038fc5f31f020e7494dffff001d03e4b672")
    blockActor ! blockHeader
    val blockMsg = expectMsgType[BlockMessage](10.seconds)
    blockMsg.block.blockHeader.hash must be (blockHash)
  }


  override def afterAll = {
    TestKit.shutdownActorSystem(system)
  }
} 
Example 135
Source File: Main.scala    From typebus   with MIT License 5 votes vote down vote up
package io.surfkit.typebus.gen

import akka.actor.ActorSystem
import io.surfkit.typebus.module.Service
import io.surfkit.typebus.event.ServiceIdentifier
import io.surfkit.typebus.cli._
import io.surfkit.typebus.bus.akka.{AkkaBusConsumer, AkkaBusProducer}

import scala.concurrent.duration._


object Main extends App {

  implicit val system = ActorSystem("codegen")
  lazy val serviceIdentity = ServiceIdentifier("gen-code-service")

  // only want to activate and join cluster in certain cases
  //ZookeeperClusterSeed(system).join()
  lazy val producer = new AkkaBusProducer(serviceIdentity, system)
  lazy val service = new Service(serviceIdentity, producer){
  }
  lazy val consumer = new AkkaBusConsumer(service, producer, system)

  println("\n\n***********\n\n")
  CommandParser.runCli

  Thread.currentThread().join()
} 
Example 136
Source File: Client.scala    From typebus   with MIT License 5 votes vote down vote up
package io.surfkit.typebus.client

import io.surfkit.typebus.{ByteStreamReader, ByteStreamWriter}
import io.surfkit.typebus.event.EventMeta
import io.surfkit.typebus.bus.Publisher
import io.surfkit.typebus.actors.GatherActor
import io.surfkit.typebus.event._
import scala.concurrent.Future
import scala.concurrent.duration._
import akka.actor.{ActorSystem, Props}
import scala.reflect.ClassTag
import akka.util.Timeout
import java.util.UUID

  def wire[T : ClassTag, U : ClassTag](x: T, eventMeta: Option[EventMeta] = None)(implicit timeout:Timeout = Timeout(4 seconds), w:ByteStreamWriter[T], r: ByteStreamReader[U]) :Future[Either[ServiceException,U]]= {
    val tType = scala.reflect.classTag[T].runtimeClass.getCanonicalName
    val uType = scala.reflect.classTag[U].runtimeClass.getCanonicalName
    val gather = system.actorOf(Props(new GatherActor[T, U](serviceIdentifier, publisher, timeout, w, r)))
    val meta = eventMeta.map(_.copy(eventId = UUID.randomUUID().toString, eventType = EventType.parse(x.getClass.getCanonicalName))).getOrElse{
      EventMeta(
        eventId = UUID.randomUUID().toString,
        eventType = EventType.parse(x.getClass.getCanonicalName),
        directReply = None,
        correlationId = None
      )
    }
    (gather ? GatherActor.Request(x)).map{
      case x: U => Right(x.asInstanceOf[U])
      case y: ServiceException => Left(y)
    }.recoverWith{
      case t: Throwable =>
        publisher.produceErrorReport(t, meta, s"FAILED RPC call ${tType} => Future[${uType}] failed with exception '${t.getMessage}'")(system)
        Future.failed(t)
    }
  }
} 
Example 137
Source File: Forwarding.scala    From typebus   with MIT License 5 votes vote down vote up
package io.surfkit.typebus.client

import java.util.UUID

import akka.actor.ActorSystem
import io.surfkit.typebus.ByteStreamWriter
import io.surfkit.typebus.bus.Publisher
import io.surfkit.typebus.event._
import scala.reflect.ClassTag

trait Forwarding {

  def forward[T : ClassTag](publisher: Publisher, x: T, caller: RpcClient, correlationId: Option[String] = None, settings: Map[String, String] = Map.empty)(implicit w:ByteStreamWriter[T], system: ActorSystem) : Unit= {
    val tType = scala.reflect.classTag[T].runtimeClass.getCanonicalName
    val meta =
      EventMeta(
        eventId = UUID.randomUUID().toString,
        eventType = EventType.parse(tType),
        directReply = Some(caller),
        correlationId = correlationId,
        extra = settings
      )
    publisher.publish(PublishedEvent(
      meta = meta,
      payload = w.write(x)
    ))
  }
} 
Example 138
Source File: Boot.scala    From mqtt-mongo   with MIT License 5 votes vote down vote up
package com.izmailoff.mm.service

import akka.actor.ActorSystem
import com.izmailoff.mm.mongo.MongoDbProviderImpl
import com.izmailoff.mm.mqtt.MqttIntermediary

object Boot
  extends App
  with MqttMongoService
  with MqttIntermediary
  with MongoDbProviderImpl {

  val system = ActorSystem("mqtt-mongo-system")
  import system.log
  val banner =
    """
      | __  __  ___ _____ _____     __  __
      ||  \/  |/ _ \_   _|_   _|   |  \/  | ___  _ __   __ _  ___
      || |\/| | | | || |   | |_____| |\/| |/ _ \| '_ \ / _` |/ _ \
      || |  | | |_| || |   | |_____| |  | | (_) | | | | (_| | (_) |
      ||_|  |_|\__\_\|_|   |_|     |_|  |_|\___/|_| |_|\__, |\___/
      |                                                |___/
      |
    """.stripMargin
  log.info(banner)

  val pubSubIntermediary = startMqttIntermediary()

  val messageConsumer = startMqttConsumer(pubSubIntermediary)

  log.info("APPLICATION STARTED!")
} 
Example 139
Source File: MqttIntermediary.scala    From mqtt-mongo   with MIT License 5 votes vote down vote up
package com.izmailoff.mm.mqtt

import akka.actor.{ActorRef, ActorSystem, Props}
import com.izmailoff.mm.config.GlobalAppConfig.Application.MqttBroker
import com.sandinh.paho.akka.MqttPubSub
import com.sandinh.paho.akka.MqttPubSub.PSConfig
import com.izmailoff.mm.util.StringUtils._

trait MqttIntermediary
  extends MqttIntermediaryComponent {

  def system: ActorSystem

  def startMqttIntermediary(): ActorRef =
    system.actorOf(Props(classOf[MqttPubSub], PSConfig(
      brokerUrl = MqttBroker.url,
      userName = emptyToNull(MqttBroker.userName),
      password = emptyToNull(MqttBroker.password),
      stashTimeToLive = MqttBroker.stashTimeToLive,
      stashCapacity = MqttBroker.stashCapacity,
      reconnectDelayMin = MqttBroker.reconnectDelayMin,
      reconnectDelayMax = MqttBroker.reconnectDelayMax
    )), name = "MqttIntermediary")
}

trait MqttIntermediaryComponent {

  def startMqttIntermediary(): ActorRef
} 
Example 140
Source File: ServiceSpec.scala    From mqtt-mongo   with MIT License 5 votes vote down vote up
package com.izmailoff.mm.service

import akka.actor.ActorSystem
import akka.testkit.{TestProbe, DefaultTimeout, ImplicitSender, TestKit}
import com.izmailoff.mm.config.GlobalAppConfig
import com.sandinh.paho.akka.MqttPubSub.{Subscribe, SubscribeAck, Message}
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike}
import scala.concurrent.duration._
import scala.collection.JavaConversions._


class ServiceSpec
  extends TestKit(ActorSystem("test-mqtt-mongo-system", GlobalAppConfig.config))
  with DefaultTimeout
  with ImplicitSender
  with WordSpecLike
  with Matchers
  with BeforeAndAfterAll
  with TestMqttMongoServiceImpl
  with TestHelpers {

  override def afterAll {
    shutdown()
  }

  "Subscription between MQTT Broker and Consumer" should {
    "get established when consumer is started" in {
      val mqttBroker = startMqttIntermediary()
      val probe = TestProbe()
      val mqttConsumer = startMqttConsumer(probe.ref)

      probe.expectMsg(Subscribe(testTopic, mqttConsumer))
      probe.forward(mqttBroker, Subscribe(testTopic, probe.ref))
      probe.expectMsg(SubscribeAck(Subscribe(testTopic, probe.ref)))
      probe.forward(mqttConsumer, SubscribeAck(Subscribe(testTopic, mqttConsumer)))
      probe.expectNoMsg()
    }
  }

  "Sending a message to MQTT Broker" should {
    "forward it to MQTT Consumer and get saved in DB in proper JSON format" in {
      val collection = getCollectionName(testTopic).head
      db.getCollection(collection).count() should be(0)
      val mqttBroker = startMqttIntermediary()
      val mqttConsumer = startMqttConsumer(mqttBroker)
      expectNoMsg(1 second)

      mqttBroker ! new Message(testTopic, "test content".getBytes)
      mqttBroker ! new Message(testTopic, """{ "field1" : "str val", "field2" : 123 }""".getBytes)
      expectNoMsg(1 second)

      db.getCollection(collection).count() should be(2)
      val allDocsDb = db.getCollection(collection).find().iterator.toList
      allDocsDb.exists { d =>
        val fields: Map[Any, Any] = d.toMap.toMap
        fields.size == 2 &&
          fields("payload") == "test content"
      } should be(true)
      allDocsDb.exists { d =>
        val fields: Map[Any, Any] = d.toMap.toMap
        fields.size == 3 &&
          fields("field1") == "str val" &&
          fields("field2") == 123
      } should be(true)
    }
  }


} 
Example 141
Source File: TipValidationTest.scala    From bitcoin-s   with MIT License 5 votes vote down vote up
package org.bitcoins.chain.validation

import akka.actor.ActorSystem
import org.bitcoins.chain.models.{BlockHeaderDAO, BlockHeaderDbHelper}
import org.bitcoins.chain.pow.Pow
import org.bitcoins.core.protocol.blockchain.BlockHeader
import org.bitcoins.testkit.chain.{BlockHeaderHelper, ChainDbUnitTest}
import org.scalatest.{Assertion, FutureOutcome}

class TipValidationTest extends ChainDbUnitTest {
  import org.bitcoins.chain.blockchain.Blockchain
  import org.bitcoins.chain.config.ChainAppConfig

  override type FixtureParam = BlockHeaderDAO

  // we're working with mainnet data
  implicit override lazy val appConfig: ChainAppConfig = mainnetAppConfig

  override def withFixture(test: OneArgAsyncTest): FutureOutcome =
    withBlockHeaderDAO(test)

  implicit override val system: ActorSystem = ActorSystem("TipValidationTest")

  behavior of "TipValidation"

  //blocks 566,092 and 566,093
  val newValidTip = BlockHeaderHelper.header1
  val currentTipDb = BlockHeaderHelper.header2Db
  val blockchain = Blockchain.fromHeaders(Vector(currentTipDb))

  it must "connect two blocks with that are valid" in { bhDAO =>
    val newValidTipDb =
      BlockHeaderDbHelper.fromBlockHeader(
        566093,
        currentTipDb.chainWork + Pow.getBlockProof(newValidTip),
        newValidTip)
    val expected = TipUpdateResult.Success(newValidTipDb)

    runTest(newValidTip, expected, blockchain)
  }

  it must "fail to connect two blocks that do not reference prev block hash correctly" in {
    bhDAO =>
      val badPrevHash = BlockHeaderHelper.badPrevHash

      val expected = TipUpdateResult.BadPreviousBlockHash(badPrevHash)

      runTest(badPrevHash, expected, blockchain)
  }

  it must "fail to connect two blocks with two different POW requirements at the wrong interval" in {
    bhDAO =>
      val badPOW = BlockHeaderHelper.badNBits
      val expected = TipUpdateResult.BadPOW(badPOW)
      runTest(badPOW, expected, blockchain)
  }

  it must "fail to connect two blocks with a bad nonce" in { bhDAO =>
    val badNonce = BlockHeaderHelper.badNonce
    val expected = TipUpdateResult.BadNonce(badNonce)
    runTest(badNonce, expected, blockchain)
  }

  private def runTest(
      header: BlockHeader,
      expected: TipUpdateResult,
      blockchain: Blockchain): Assertion = {
    val result = TipValidation.checkNewTip(header, blockchain)
    assert(result == expected)
  }
} 
Example 142
Source File: ChainAppConfigTest.scala    From bitcoin-s   with MIT License 5 votes vote down vote up
package org.bitcoins.chain.config

import java.nio.file.Files

import akka.actor.ActorSystem
import ch.qos.logback.classic.Level
import com.typesafe.config.ConfigFactory
import org.bitcoins.core.config.{MainNet, RegTest, TestNet3}
import org.bitcoins.testkit.chain.ChainUnitTest
import org.bitcoins.testkit.util.FileUtil
import org.scalatest.FutureOutcome

class ChainAppConfigTest extends ChainUnitTest {
  val tempDir = Files.createTempDirectory("bitcoin-s")
  val config = ChainAppConfig(directory = tempDir, useLogbackConf = false)

  //if we don't turn off logging here, isInitF a few lines down will
  //produce some nasty error logs since we are testing initialization
  //of the chain project
  val chainAppConfig = appConfig.withOverrides(
    ConfigFactory.parseString("bitcoin-s.logging.level=OFF"))

  behavior of "ChainAppConfig"

  override def withFixture(test: OneArgAsyncTest): FutureOutcome =
    withChainFixture(test)

  it must "initialize our chain project" in { _ =>
    val isInitF = chainAppConfig.isInitialized()

    for {
      isInit <- isInitF
      _ = assert(!isInit)
      _ <- chainAppConfig.initialize()
      isInitAgain <- chainAppConfig.isInitialized()
    } yield assert(isInitAgain)
  }

  it must "be overridable" in { _ =>
    assert(config.network == RegTest)

    val otherConf = ConfigFactory.parseString("bitcoin-s.network = testnet3")
    val withOther: ChainAppConfig = config.withOverrides(otherConf)
    assert(withOther.network == TestNet3)

    val mainnetConf = ConfigFactory.parseString("bitcoin-s.network = mainnet")
    val mainnet: ChainAppConfig = withOther.withOverrides(mainnetConf)
    assert(mainnet.network == MainNet)
  }

  it must "be overridable with multiple levels" in { _ =>
    val testnet = ConfigFactory.parseString("bitcoin-s.network = testnet3")
    val mainnet = ConfigFactory.parseString("bitcoin-s.network = mainnet")
    val overriden: ChainAppConfig = config.withOverrides(testnet, mainnet)
    assert(overriden.network == MainNet)

  }

  it must "have user data directory configuration take precedence" in { _ =>
    val tempDir = Files.createTempDirectory("bitcoin-s")
    val tempFile = Files.createFile(tempDir.resolve("bitcoin-s.conf"))
    val confStr = """
                    | bitcoin-s {
                    |   network = testnet3
                    |   
                    |   logging {
                    |     level = off
                    |
                    |     p2p = warn
                    |   }
                    | }
    """.stripMargin
    val _ = Files.write(tempFile, confStr.getBytes())

    val appConfig = ChainAppConfig(directory = tempDir, useLogbackConf = false)

    assert(appConfig.datadir == tempDir.resolve("testnet3"))
    assert(appConfig.network == TestNet3)
    assert(appConfig.logLevel == Level.OFF)
    assert(appConfig.p2pLogLevel == Level.WARN)
  }

  override def afterAll: Unit = {

    FileUtil.deleteTmpDir(chainAppConfig.baseDatadir)
  }
} 
Example 143
Source File: BitcoindChainHandlerViaZmqTest.scala    From bitcoin-s   with MIT License 5 votes vote down vote up
package org.bitcoins.chain.blockchain

import akka.actor.ActorSystem
import org.bitcoins.rpc.util.RpcUtil
import org.bitcoins.testkit.chain.{ChainDbUnitTest, ChainUnitTest}
import org.bitcoins.testkit.chain.fixture.BitcoindChainHandlerViaZmq
import org.scalatest.FutureOutcome

class BitcoindChainHandlerViaZmqTest extends ChainDbUnitTest {

  override type FixtureParam = BitcoindChainHandlerViaZmq

  implicit override val system: ActorSystem = ActorSystem("ChainUnitTest")

  override def withFixture(test: OneArgAsyncTest): FutureOutcome =
    withBitcoindChainHandlerViaZmq(test)

  behavior of "BitcoindChainHandlerViaZmq"

  it must "peer with bitcoind via zmq and have blockchain info relayed" in {
    bitcoindChainHandler: BitcoindChainHandlerViaZmq =>
      val bitcoind = bitcoindChainHandler.bitcoindRpc

      val chainHandler = bitcoindChainHandler.chainHandler

      for {
        _ <-
          chainHandler.getBlockCount
            .map(count => assert(count == 0))
        address <- bitcoind.getNewAddress
        hash +: _ <- bitcoind.generateToAddress(1, address)
        _ <- {
          //test case is totally async since we
          //can't monitor processing flow for zmq
          //so we just need to await until we
          //have fully processed the header
          RpcUtil.awaitConditionF(() =>
            chainHandler.getHeader(hash).map(_.isDefined))
        }

        header <- chainHandler.getHeader(hash)
      } yield assert(header.get.hashBE == hash)
  }

} 
Example 144
Source File: BlockchainTest.scala    From bitcoin-s   with MIT License 5 votes vote down vote up
package org.bitcoins.chain.blockchain

import akka.actor.ActorSystem
import org.bitcoins.chain.models.BlockHeaderDb
import org.bitcoins.testkit.chain.fixture.ChainFixture
import org.bitcoins.testkit.chain.{BlockHeaderHelper, ChainUnitTest}
import org.scalatest.FutureOutcome

import scala.collection.mutable

class BlockchainTest extends ChainUnitTest {
  override type FixtureParam = ChainFixture

  override def withFixture(test: OneArgAsyncTest): FutureOutcome =
    withChainFixture(test)

  implicit override val system: ActorSystem = ActorSystem("BlockchainTest")

  behavior of "Blockchain"

  it must "connect a new header to the current tip of a blockchain" inFixtured {
    case ChainFixture.Empty =>
      val blockchain = Blockchain.fromHeaders(
        headers = Vector(ChainUnitTest.genesisHeaderDb)
      )

      val newHeader =
        BlockHeaderHelper.buildNextHeader(ChainUnitTest.genesisHeaderDb)

      val connectTip =
        Blockchain.connectTip(header = newHeader.blockHeader, blockchain)

      connectTip match {
        case ConnectTipResult.ExtendChain(_, newChain) =>
          assert(newHeader == newChain.tip)

        case fail @ (_: ConnectTipResult.Reorg | _: ConnectTipResult.BadTip) =>
          assert(false)
      }
  }

  it must "reconstruct a blockchain given a child header correctly" inFixtured {
    case ChainFixture.Empty =>
      val accum = new mutable.ArrayBuffer[BlockHeaderDb](5)
      accum.+=(ChainUnitTest.genesisHeaderDb)
      //generate 4 headers
      0.until(4).foreach { _ =>
        val newHeader = BlockHeaderHelper.buildNextHeader(accum.last)
        accum.+=(newHeader)
      }

      //now given the last header, and the other headers we should reconstruct the blockchain
      val headers = accum.dropRight(1).toVector
      val tip = accum.last

      val reconstructed = Blockchain.reconstructFromHeaders(childHeader = tip,
                                                            ancestors = headers)

      assert(reconstructed.length == 1)
      val chain = reconstructed.head
      assert(chain.toVector.length == 5)
      assert(chain.tip == accum.last)
      assert(chain.last == ChainUnitTest.genesisHeaderDb)
      assert(chain.toVector == accum.reverse.toVector)
  }

  it must "fail to reconstruct a blockchain if we do not have validly connected headers" inFixtured {
    case ChainFixture.Empty =>
      val missingHeader =
        BlockHeaderHelper.buildNextHeader(ChainUnitTest.genesisHeaderDb)

      val thirdHeader = BlockHeaderHelper.buildNextHeader(missingHeader)

      val reconstructed =
        Blockchain.reconstructFromHeaders(thirdHeader,
                                          Vector(ChainUnitTest.genesisHeaderDb))

      assert(reconstructed.isEmpty)
  }
} 
Example 145
Source File: BitcoindV16RpcClient.scala    From bitcoin-s   with MIT License 5 votes vote down vote up
package org.bitcoins.rpc.client.v16

import akka.actor.ActorSystem
import org.bitcoins.commons.jsonmodels.bitcoind.{
  RpcOpts,
  SignRawTransactionResult
}
import org.bitcoins.commons.serializers.JsonSerializers._
import org.bitcoins.commons.serializers.JsonWriters._
import org.bitcoins.core.api.ChainQueryApi
import org.bitcoins.core.protocol.transaction.Transaction
import org.bitcoins.core.script.crypto.HashType
import org.bitcoins.crypto.ECPrivateKey
import org.bitcoins.rpc.client.common.{BitcoindRpcClient, BitcoindVersion}
import org.bitcoins.rpc.config.BitcoindInstance
import play.api.libs.json._

import scala.concurrent.Future
import scala.util.Try


  def withActorSystem(instance: BitcoindInstance)(implicit
      system: ActorSystem): BitcoindV16RpcClient =
    new BitcoindV16RpcClient(instance)

  def fromUnknownVersion(
      rpcClient: BitcoindRpcClient): Try[BitcoindV16RpcClient] =
    Try {
      new BitcoindV16RpcClient(rpcClient.instance)(rpcClient.system)
    }
} 
Example 146
Source File: BitcoindV18RpcClient.scala    From bitcoin-s   with MIT License 5 votes vote down vote up
package org.bitcoins.rpc.client.v18

import akka.actor.ActorSystem
import org.bitcoins.commons.jsonmodels.bitcoind.{
  RpcOpts,
  SignRawTransactionResult
}
import org.bitcoins.commons.serializers.JsonSerializers._
import org.bitcoins.commons.serializers.JsonWriters._
import org.bitcoins.core.api.ChainQueryApi
import org.bitcoins.core.protocol.transaction.Transaction
import org.bitcoins.core.script.crypto.HashType
import org.bitcoins.crypto.ECPrivateKey
import org.bitcoins.rpc.client.common.{
  BitcoindRpcClient,
  BitcoindVersion,
  DescriptorRpc,
  PsbtRpc
}
import org.bitcoins.rpc.config.BitcoindInstance
import play.api.libs.json._

import scala.concurrent.Future
import scala.util.Try


  def withActorSystem(instance: BitcoindInstance)(implicit
      system: ActorSystem): BitcoindV18RpcClient =
    new BitcoindV18RpcClient(instance)(system)

  def fromUnknownVersion(
      rpcClient: BitcoindRpcClient): Try[BitcoindV18RpcClient] =
    Try {
      new BitcoindV18RpcClient(rpcClient.instance)(rpcClient.system)
    }

} 
Example 147
Source File: RpcUtil.scala    From bitcoin-s   with MIT License 5 votes vote down vote up
package org.bitcoins.rpc.util

import java.net.ServerSocket

import akka.actor.ActorSystem
import org.bitcoins.rpc.client.common.BitcoindRpcClient

import scala.annotation.tailrec
import scala.concurrent.Future
import scala.concurrent.duration.FiniteDuration
import scala.concurrent.duration.DurationInt
import scala.util.{Failure, Random, Success, Try}

abstract class RpcUtil extends AsyncUtil {

  def awaitServerShutdown(
      server: BitcoindRpcClient,
      duration: FiniteDuration = 300.milliseconds,
      maxTries: Int = 50)(implicit system: ActorSystem): Future[Unit] = {
    retryUntilSatisfiedF(() => server.isStoppedF, duration, maxTries)
  }

  
  @tailrec
  final def randomPort: Int = {
    val MAX = 65535 // max tcp port number
    val MIN = 1025 // lowest port not requiring sudo
    val port = Math.abs(Random.nextInt(MAX - MIN) + (MIN + 1))
    val attempt = Try {
      val socket = new ServerSocket(port)
      socket.close()
      socket.getLocalPort
    }

    attempt match {
      case Success(value) => value
      case Failure(_)     => randomPort
    }
  }
}

object RpcUtil extends RpcUtil 
Example 148
Source File: FutureUtilTest.scala    From bitcoin-s   with MIT License 5 votes vote down vote up
package org.bitcoins.core.util

import akka.actor.ActorSystem
import org.bitcoins.testkit.util.BitcoinSAsyncTest
import org.scalatest.compatible.Assertion

import scala.concurrent._

class FutureUtilTest extends BitcoinSAsyncTest with BitcoinSLogger {
  it must "execute futures sequentially in the correct order" in {

    val actorSystem = ActorSystem()
    implicit val ec = actorSystem.dispatcher
    val scheduler = actorSystem.scheduler

    val assertionP = Promise[Assertion]()
    val assertionF = assertionP.future

    val promise1 = Promise[Unit]()
    val promise2 = Promise[Unit]()

    val future1 = promise1.future
    val future2 = promise2.future

    future1.onComplete { _ =>
      if (future2.isCompleted) {
        assertionP.failure(new Error(s"future2 completed before future1"))
      }
    }

    future2.onComplete { _ =>
      if (!future1.isCompleted) {
        assertionP.failure(
          new Error(s"future1 was not complete by future2 completing"))
      } else {
        assertionP.success(succeed)
      }
    }

    val futs = FutureUtil.sequentially(List(1, 2)) {
      case 1 =>
        promise1.success(())
        Future.successful(1)
      case 2 =>
        promise2.success(())
        Future.successful(2)

    }

    futs.map(xs => assert(List(1, 2) == xs)).flatMap(_ => assertionF)
  }
} 
Example 149
Source File: Server.scala    From bitcoin-s   with MIT License 5 votes vote down vote up
package org.bitcoins.server

import akka.actor.ActorSystem
import akka.event.Logging
import akka.http.scaladsl._
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server._
import akka.http.scaladsl.server.directives.DebuggingDirectives
import de.heikoseeberger.akkahttpupickle.UpickleSupport._
import org.bitcoins.db.AppConfig
import upickle.{default => up}

import scala.concurrent.Future

case class Server(
    conf: AppConfig,
    handlers: Seq[ServerRoute],
    rpcport: Int = 9999)(implicit system: ActorSystem)
    extends HttpLogger {
  implicit private val config: AppConfig = conf

  import system.dispatcher

  
  def httpSuccess[T](body: T)(implicit
      writer: up.Writer[T]): HttpEntity.Strict = {
    val response = Response(result = Some(up.writeJs(body)))
    HttpEntity(
      ContentTypes.`application/json`,
      up.write(response.toJsonMap)
    )
  }

  def httpError(
      msg: String,
      status: StatusCode = StatusCodes.InternalServerError): HttpResponse = {

    val entity = {
      val response = Response(error = Some(msg))
      HttpEntity(
        ContentTypes.`application/json`,
        up.write(response.toJsonMap)
      )
    }

    HttpResponse(status = status, entity = entity)
  }
} 
Example 150
Source File: NodeRoutes.scala    From bitcoin-s   with MIT License 5 votes vote down vote up
package org.bitcoins.server

import akka.actor.ActorSystem
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server._
import org.bitcoins.node.Node

import scala.util.{Failure, Success}

case class NodeRoutes(node: Node)(implicit system: ActorSystem)
    extends ServerRoute {
  import system.dispatcher

  def handleCommand: PartialFunction[ServerCommand, StandardRoute] = {
    case ServerCommand("getpeers", _) =>
      complete {
        Server.httpSuccess("TODO implement getpeers")
      }

    case ServerCommand("stop", _) =>
      complete {
        val nodeStopping = node.stop().map { _ =>
          Server.httpSuccess("Node shutting down")
        }
        system.terminate()
        nodeStopping
      }

    case ServerCommand("sendrawtransaction", arr) =>
      SendRawTransaction.fromJsArr(arr) match {
        case Failure(exception) =>
          reject(ValidationRejection("failure", Some(exception)))
        case Success(SendRawTransaction(tx)) =>
          complete {
            node.broadcastTransaction(tx).map { _ =>
              Server.httpSuccess(s"${tx.txIdBE.hex}")
            }
          }
      }
  }
} 
Example 151
Source File: ChainRoutes.scala    From bitcoin-s   with MIT License 5 votes vote down vote up
package org.bitcoins.server

import akka.actor.ActorSystem
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server._
import org.bitcoins.chain.api.ChainApi
import org.bitcoins.commons.serializers.Picklers._

case class ChainRoutes(chain: ChainApi)(implicit system: ActorSystem)
    extends ServerRoute {
  import system.dispatcher

  def handleCommand: PartialFunction[ServerCommand, StandardRoute] = {
    case ServerCommand("getblockcount", _) =>
      complete {
        chain.getBlockCount.map { count =>
          Server.httpSuccess(count)
        }
      }
    case ServerCommand("getfiltercount", _) =>
      complete {
        chain.getFilterCount().map { count =>
          Server.httpSuccess(count)
        }
      }
    case ServerCommand("getfilterheadercount", _) =>
      complete {
        chain.getFilterHeaderCount().map { count =>
          Server.httpSuccess(count)
        }
      }
    case ServerCommand("getbestblockhash", _) =>
      complete {
        chain.getBestBlockHash.map { hash =>
          Server.httpSuccess(hash)
        }
      }
  }

} 
Example 152
Source File: CoreRoutes.scala    From bitcoin-s   with MIT License 5 votes vote down vote up
package org.bitcoins.server

import akka.actor.ActorSystem
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server._
import org.bitcoins.core.api.CoreApi

import scala.util.{Failure, Success}

case class CoreRoutes(core: CoreApi)(implicit system: ActorSystem)
    extends ServerRoute {
  import system.dispatcher

  def handleCommand: PartialFunction[ServerCommand, StandardRoute] = {
    case ServerCommand("finalizepsbt", arr) =>
      FinalizePSBT.fromJsArr(arr) match {
        case Success(FinalizePSBT(psbt)) =>
          complete {
            core
              .finalizePSBT(psbt)
              .map(finalized => Server.httpSuccess(finalized.base64))
          }
        case Failure(exception) =>
          reject(ValidationRejection("failure", Some(exception)))
      }

    case ServerCommand("extractfrompsbt", arr) =>
      ExtractFromPSBT.fromJsArr(arr) match {
        case Success(ExtractFromPSBT(psbt)) =>
          complete {
            core
              .extractFromPSBT(psbt)
              .map(tx => Server.httpSuccess(tx.hex))
          }
        case Failure(exception) =>
          reject(ValidationRejection("failure", Some(exception)))
      }

    case ServerCommand("converttopsbt", arr) =>
      ConvertToPSBT.fromJsArr(arr) match {
        case Success(ConvertToPSBT(tx)) =>
          complete {
            core
              .convertToPSBT(tx)
              .map(psbt => Server.httpSuccess(psbt.base64))
          }
        case Failure(exception) =>
          reject(ValidationRejection("failure", Some(exception)))
      }

    case ServerCommand("combinepsbts", arr) =>
      CombinePSBTs.fromJsArr(arr) match {
        case Success(CombinePSBTs(psbts)) =>
          complete {
            core
              .combinePSBTs(psbts)
              .map(psbt => Server.httpSuccess(psbt.base64))
          }
        case Failure(exception) =>
          reject(ValidationRejection("failure", Some(exception)))
      }

    case ServerCommand("joinpsbts", arr) =>
      JoinPSBTs.fromJsArr(arr) match {
        case Success(JoinPSBTs(psbts)) =>
          complete {
            core
              .joinPSBTs(psbts)
              .map(psbt => Server.httpSuccess(psbt.base64))
          }
        case Failure(exception) =>
          reject(ValidationRejection("failure", Some(exception)))
      }

    case ServerCommand("decoderawtransaction", arr) =>
      DecodeRawTransaction.fromJsArr(arr) match {
        case Failure(exception) =>
          reject(ValidationRejection("failure", Some(exception)))
        case Success(DecodeRawTransaction(tx)) =>
          complete {
            val jsonStr = SerializedTransaction.decodeRawTransaction(tx)
            Server.httpSuccess(jsonStr)
          }
      }
  }
} 
Example 153
Source File: BitcoinerLiveFeeRateProvider.scala    From bitcoin-s   with MIT License 5 votes vote down vote up
package org.bitcoins.feeprovider

import akka.actor.ActorSystem
import akka.http.scaladsl.model.Uri
import org.bitcoins.commons.jsonmodels.wallet.BitcoinerLiveResult
import org.bitcoins.commons.serializers.JsonSerializers._
import org.bitcoins.core.wallet.fee.SatoshisPerVirtualByte
import play.api.libs.json.{JsError, JsSuccess, Json}

import scala.util.{Failure, Success, Try}

case class BitcoinerLiveFeeRateProvider(minutes: Int)(implicit
    override val system: ActorSystem)
    extends CachedHttpFeeRateProvider {

  private val bitcoinerLiveValidMinutes =
    Vector(30, 60, 120, 180, 360, 720, 1440)
  require(
    bitcoinerLiveValidMinutes.contains(minutes),
    s"$minutes is not a valid selection, must be from $bitcoinerLiveValidMinutes")

  override val uri: Uri =
    Uri("https://bitcoiner.live/api/fees/estimates/latest")

  override def converter(str: String): Try[SatoshisPerVirtualByte] = {
    val json = Json.parse(str)
    json.validate[BitcoinerLiveResult] match {
      case JsSuccess(response, _) =>
        Success(response.estimates(minutes).sat_per_vbyte)
      case JsError(error) =>
        Failure(
          new RuntimeException(
            s"Unexpected error when parsing response: $error"))
    }
  }
} 
Example 154
Source File: HttpFeeRateProvider.scala    From bitcoin-s   with MIT License 5 votes vote down vote up
package org.bitcoins.feeprovider

import java.time.{Duration, Instant}

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.{HttpRequest, Uri}
import akka.util.ByteString
import org.bitcoins.core.api.FeeRateApi
import org.bitcoins.core.util.TimeUtil
import org.bitcoins.core.wallet.fee.FeeUnit

import scala.concurrent.{ExecutionContextExecutor, Future}
import scala.util.Try

object HttpFeeRateProvider {

  def makeApiCall(uri: Uri)(implicit system: ActorSystem): Future[String] = {
    implicit val ec: ExecutionContextExecutor = system.dispatcher
    Http()
      .singleRequest(HttpRequest(uri = uri))
      .flatMap(response =>
        response.entity.dataBytes
          .runFold(ByteString.empty)(_ ++ _)
          .map(payload => payload.decodeString(ByteString.UTF_8)))
  }
}

abstract class HttpFeeRateProvider extends FeeRateApi {
  implicit protected val system: ActorSystem

  protected def uri: Uri

  protected def converter(str: String): Try[FeeUnit]

  def getFeeRate: Future[FeeUnit] = {
    HttpFeeRateProvider
      .makeApiCall(uri)
      .flatMap(ret => Future.fromTry(converter(ret)))(system.dispatcher)
  }
}

abstract class CachedHttpFeeRateProvider extends HttpFeeRateProvider {

  private var cachedFeeRateOpt: Option[(FeeUnit, Instant)] = None

  val cacheDuration: Duration = Duration.ofMinutes(5)

  private def updateFeeRate(): Future[FeeUnit] = {
    implicit val ec: ExecutionContextExecutor = system.dispatcher
    super.getFeeRate.map { feeRate =>
      cachedFeeRateOpt = Some(feeRate, TimeUtil.now)
      feeRate
    }
  }

  override def getFeeRate: Future[FeeUnit] = {
    cachedFeeRateOpt match {
      case None =>
        updateFeeRate()
      case Some((cachedFeeRate, time)) =>
        val now = TimeUtil.now
        if (time.plus(cacheDuration).isAfter(now)) {
          updateFeeRate()
        } else {
          Future.successful(cachedFeeRate)
        }
    }
  }
} 
Example 155
Source File: EclairRpcTestUtilTest.scala    From bitcoin-s   with MIT License 5 votes vote down vote up
package org.bitcoins.eclair.rpc

import akka.actor.ActorSystem
import org.bitcoins.eclair.rpc.client.EclairRpcClient
import org.bitcoins.testkit.eclair.rpc.EclairRpcTestUtil
import org.bitcoins.testkit.util.BitcoinSAsyncTest

class EclairRpcTestUtilTest extends BitcoinSAsyncTest {

  implicit private val actorSystem: ActorSystem =
    ActorSystem("EclairRpcTestUtilTest")

  private lazy val bitcoindRpcF =
    for {
      cli <- EclairRpcTestUtil.startedBitcoindRpcClient()
      address <- cli.getNewAddress
      blocks <- cli.generateToAddress(200, address)
    } yield cli

  private val clients =
    Vector.newBuilder[EclairRpcClient]

  override def afterAll: Unit = {
    clients.result().foreach(EclairRpcTestUtil.shutdown)
    super.afterAll()
  }

  behavior of "EclairRpcTestUtilTest"

  it must "spawn four nodes and create a channel link between them" in {
    val nodes4F = bitcoindRpcF.flatMap { bitcoindRpc =>
      val nodes = EclairRpcTestUtil.createNodeLink(bitcoindRpc)

      nodes.map { n4 =>
        clients ++= List(n4.c1, n4.c2, n4.c3, n4.c4)
      }

      nodes
    }

    nodes4F.flatMap { n4 =>
      val first = n4.c1
      val second = n4.c2
      val third = n4.c3
      val fourth = n4.c4

      for {
        nodeInfoFirst <- first.getInfo
        channelsFirst <- first.channels()
        nodeInfoSecond <- second.getInfo
        channelsSecond <- second.channels()
        nodeInfoThird <- third.getInfo
        channelsThird <- third.channels()
        nodeInfoFourth <- fourth.getInfo
        channelsFourth <- fourth.channels()
      } yield {
        assert(channelsFirst.length == 1)
        assert(channelsFirst.exists(_.nodeId == nodeInfoSecond.nodeId))

        assert(channelsSecond.length == 2)
        assert(channelsSecond.exists(_.nodeId == nodeInfoFirst.nodeId))
        assert(channelsSecond.exists(_.nodeId == nodeInfoThird.nodeId))

        assert(channelsThird.length == 2)
        assert(channelsThird.exists(_.nodeId == nodeInfoSecond.nodeId))
        assert(channelsThird.exists(_.nodeId == nodeInfoFourth.nodeId))

        assert(channelsFourth.length == 1)
        assert(channelsFourth.exists(_.nodeId == nodeInfoThird.nodeId))
      }
    }

  }
} 
Example 156
Source File: NeutrinoNode.scala    From bitcoin-s   with MIT License 5 votes vote down vote up
package org.bitcoins.node

import akka.actor.ActorSystem
import org.bitcoins.chain.config.ChainAppConfig
import org.bitcoins.core.api.ChainQueryApi.FilterResponse
import org.bitcoins.core.protocol.BlockStamp
import org.bitcoins.node.config.NodeAppConfig
import org.bitcoins.node.models.Peer

import scala.concurrent.Future

case class NeutrinoNode(
    nodePeer: Peer,
    nodeConfig: NodeAppConfig,
    chainConfig: ChainAppConfig,
    actorSystem: ActorSystem)
    extends Node {
  require(
    nodeConfig.isNeutrinoEnabled,
    s"We need our Neutrino mode enabled to be able to construct a Neutrino node!")

  implicit override def system: ActorSystem = actorSystem

  implicit override def nodeAppConfig: NodeAppConfig = nodeConfig

  implicit override def chainAppConfig: ChainAppConfig = chainConfig

  override val peer: Peer = nodePeer

  override def start(): Future[Node] = {
    val res = for {
      node <- super.start()
      chainApi <- chainApiFromDb()
      bestHash <- chainApi.getBestBlockHash()
      peerMsgSender <- peerMsgSenderF
      _ <- peerMsgSender.sendGetCompactFilterCheckPointMessage(
        stopHash = bestHash.flip)
    } yield {
      node
    }

    res.failed.foreach(logger.error("Cannot start Neutrino node", _))

    res
  }

  
  override def getHeightByBlockStamp(blockStamp: BlockStamp): Future[Int] =
    chainApiFromDb().flatMap(_.getHeightByBlockStamp(blockStamp))

  override def getFiltersBetweenHeights(
      startHeight: Int,
      endHeight: Int): Future[Vector[FilterResponse]] =
    chainApiFromDb().flatMap(_.getFiltersBetweenHeights(startHeight, endHeight))

} 
Example 157
Source File: SpvNode.scala    From bitcoin-s   with MIT License 5 votes vote down vote up
package org.bitcoins.node

import akka.actor.ActorSystem
import org.bitcoins.chain.config.ChainAppConfig
import org.bitcoins.core.api.ChainQueryApi.FilterResponse
import org.bitcoins.core.bloom.BloomFilter
import org.bitcoins.core.protocol.transaction.Transaction
import org.bitcoins.core.protocol.{BitcoinAddress, BlockStamp}
import org.bitcoins.core.util.Mutable
import org.bitcoins.node.config.NodeAppConfig
import org.bitcoins.node.models.Peer

import scala.concurrent.Future

case class SpvNode(
    nodePeer: Peer,
    nodeConfig: NodeAppConfig,
    chainConfig: ChainAppConfig,
    actorSystem: ActorSystem)
    extends Node {
  require(nodeConfig.isSPVEnabled,
          s"We need our SPV mode enabled to be able to construct a SPV node!")

  implicit override def system: ActorSystem = actorSystem

  implicit override def nodeAppConfig: NodeAppConfig = nodeConfig

  implicit override def chainAppConfig: ChainAppConfig = chainConfig

  override val peer: Peer = nodePeer

  private val _bloomFilter = new Mutable(BloomFilter.empty)

  def bloomFilter: BloomFilter = _bloomFilter.atomicGet

  def setBloomFilter(bloom: BloomFilter): SpvNode = {
    _bloomFilter.atomicSet(bloom)
    this
  }

  
  override def getFilterCount: Future[Int] =
    Future.failed(new RuntimeException(cfErrMsg))

  override def getFiltersBetweenHeights(
      startHeight: Int,
      endHeight: Int): Future[Vector[FilterResponse]] =
    Future.failed(new RuntimeException(cfErrMsg))
} 
Example 158
Source File: NodeAppConfig.scala    From bitcoin-s   with MIT License 5 votes vote down vote up
package org.bitcoins.node.config

import java.nio.file.Path

import akka.actor.ActorSystem
import com.typesafe.config.Config
import org.bitcoins.chain.config.ChainAppConfig
import org.bitcoins.core.util.FutureUtil
import org.bitcoins.db.{AppConfig, AppConfigFactory, JdbcProfileComponent}
import org.bitcoins.node.{NeutrinoNode, Node, SpvNode}
import org.bitcoins.node.db.NodeDbManagement
import org.bitcoins.node.models.Peer

import scala.concurrent.{ExecutionContext, Future}


  def createNode(peer: Peer)(implicit
      nodeConf: NodeAppConfig,
      chainConf: ChainAppConfig,
      system: ActorSystem): Future[Node] = {
    if (nodeConf.isSPVEnabled) {
      Future.successful(SpvNode(peer, nodeConf, chainConf, system))
    } else if (nodeConf.isNeutrinoEnabled) {
      Future.successful(NeutrinoNode(peer, nodeConf, chainConf, system))
    } else {
      Future.failed(
        new RuntimeException("Neither Neutrino nor SPV mode is enabled."))
    }
  }
} 
Example 159
Source File: TestRpcUtil.scala    From bitcoin-s   with MIT License 5 votes vote down vote up
package org.bitcoins.testkit.rpc

import akka.actor.ActorSystem
import org.bitcoins.testkit.async.TestAsyncUtil

import scala.concurrent.Future
import scala.concurrent.duration.FiniteDuration

abstract class TestRpcUtil extends org.bitcoins.rpc.util.RpcUtil {

  override protected def retryUntilSatisfiedWithCounter(
      conditionF: () => Future[Boolean],
      duration: FiniteDuration,
      counter: Int,
      maxTries: Int,
      stackTrace: Array[StackTraceElement])(implicit
      system: ActorSystem): Future[Unit] = {
    val retryF = super
      .retryUntilSatisfiedWithCounter(conditionF,
                                      duration,
                                      counter,
                                      maxTries,
                                      stackTrace)

    TestAsyncUtil.transformRetryToTestFailure(retryF)(system.dispatcher)
  }
}

object TestRpcUtil extends TestRpcUtil 
Example 160
Source File: FundWalletUtil.scala    From bitcoin-s   with MIT License 5 votes vote down vote up
package org.bitcoins.testkit.wallet

import akka.actor.ActorSystem
import com.typesafe.config.Config
import org.bitcoins.core.api.{ChainQueryApi, NodeApi}
import org.bitcoins.core.currency.CurrencyUnit
import org.bitcoins.core.hd.HDAccount
import org.bitcoins.core.protocol.BitcoinAddress
import org.bitcoins.core.protocol.transaction.TransactionOutput
import org.bitcoins.rpc.client.common.BitcoindRpcClient
import org.bitcoins.server.BitcoinSAppConfig
import org.bitcoins.testkit.util.TransactionTestUtil
import org.bitcoins.testkit.wallet.FundWalletUtil.FundedWallet
import org.bitcoins.wallet.Wallet

import scala.concurrent.{ExecutionContext, Future}

trait FundWalletUtil {

  def fundAccountForWallet(
      amts: Vector[CurrencyUnit],
      account: HDAccount,
      wallet: Wallet)(implicit ec: ExecutionContext): Future[Wallet] = {

    val addressesF: Future[Vector[BitcoinAddress]] = Future.sequence {
      Vector.fill(3)(wallet.getNewAddress(account))
    }

    //construct three txs that send money to these addresses
    //these are "fictional" transactions in the sense that the
    //outpoints do not exist on a blockchain anywhere
    val txsF = for {
      addresses <- addressesF
    } yield {
      addresses.zip(amts).map {
        case (addr, amt) =>
          val output =
            TransactionOutput(value = amt, scriptPubKey = addr.scriptPubKey)
          TransactionTestUtil.buildTransactionTo(output)
      }
    }

    val fundedWalletF =
      txsF.flatMap(txs => wallet.processTransactions(txs, None))

    fundedWalletF.map(_.asInstanceOf[Wallet])
  }

  def fundAccountForWalletWithBitcoind(
      amts: Vector[CurrencyUnit],
      account: HDAccount,
      wallet: Wallet,
      bitcoind: BitcoindRpcClient)(implicit
      ec: ExecutionContext): Future[Wallet] = {

    val addressesF: Future[Vector[BitcoinAddress]] = Future.sequence {
      Vector.fill(3)(wallet.getNewAddress(account))
    }

    val txAndHashF = for {
      addresses <- addressesF
      addressAmountMap = addresses.zip(amts).toMap
      txId <- bitcoind.sendMany(addressAmountMap)
      tx <- bitcoind.getRawTransactionRaw(txId)
      hashes <- bitcoind.getNewAddress.flatMap(bitcoind.generateToAddress(6, _))
    } yield (tx, hashes.head)

    val fundedWalletF =
      txAndHashF.map(txAndHash =>
        wallet.processTransaction(txAndHash._1, Some(txAndHash._2)))

    fundedWalletF.flatMap(_.map(_.asInstanceOf[Wallet]))
  }

  
  def createFundedWallet(
      nodeApi: NodeApi,
      chainQueryApi: ChainQueryApi,
      bip39PasswordOpt: Option[String],
      extraConfig: Option[Config] = None)(implicit
      config: BitcoinSAppConfig,
      system: ActorSystem): Future[FundedWallet] = {

    import system.dispatcher
    for {
      wallet <- BitcoinSWalletTest.createWallet2Accounts(
        nodeApi = nodeApi,
        chainQueryApi = chainQueryApi,
        bip39PasswordOpt = bip39PasswordOpt,
        extraConfig = extraConfig)
      funded <- FundWalletUtil.fundWallet(wallet)
    } yield funded
  }
} 
Example 161
Source File: TestAsyncUtil.scala    From bitcoin-s   with MIT License 5 votes vote down vote up
package org.bitcoins.testkit.async

import akka.actor.ActorSystem
import org.scalatest.exceptions.{StackDepthException, TestFailedException}

import scala.concurrent.{ExecutionContext, Future}
import scala.concurrent.duration.FiniteDuration

abstract class TestAsyncUtil
    extends org.bitcoins.rpc.util.AsyncUtil
    with Serializable {

  override protected def retryUntilSatisfiedWithCounter(
      conditionF: () => Future[Boolean],
      duration: FiniteDuration,
      counter: Int,
      maxTries: Int,
      stackTrace: Array[StackTraceElement])(implicit
      system: ActorSystem): Future[Unit] = {
    val retryF = super
      .retryUntilSatisfiedWithCounter(conditionF,
                                      duration,
                                      counter,
                                      maxTries,
                                      stackTrace)

    TestAsyncUtil.transformRetryToTestFailure(retryF)(system.dispatcher)
  }
}

object TestAsyncUtil extends TestAsyncUtil {

  
  def transformRetryToTestFailure[T](fut: Future[T])(implicit
      ec: ExecutionContext): Future[T] = {
    def transformRetry(err: Throwable): Throwable = {
      if (err.isInstanceOf[RpcRetryException]) {
        val retryErr = err.asInstanceOf[RpcRetryException]
        val relevantStackTrace = retryErr.caller.tail
          .dropWhile(elem => retryErr.internalFiles.contains(elem.getFileName))
          .takeWhile(!_.getFileName.contains("TestSuite"))
        val stackElement = relevantStackTrace.head
        val file = stackElement.getFileName
        val path = stackElement.getClassName
        val line = stackElement.getLineNumber
        val pos = org.scalactic.source.Position(file, path, line)
        val newErr = new TestFailedException({ _: StackDepthException =>
                                               Some(retryErr.message)
                                             },
                                             None,
                                             pos)
        newErr.setStackTrace(relevantStackTrace)
        newErr
      } else {
        err
      }
    }

    fut.transform({ elem: T =>
                    elem
                  },
                  transformRetry)
  }
} 
Example 162
Source File: RestService.scala    From introduction-to-akkahttp   with Apache License 2.0 5 votes vote down vote up
package com.shashank.akkahttp.project

import java.util.concurrent.ConcurrentHashMap

import akka.actor.ActorSystem
import akka.http.scaladsl.server.Directives._
import akka.stream.ActorMaterializer
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._
import com.shashank.akkahttp.project.Models.{LoadRequest, ServiceJsonProtoocol}
import spray.json.JsArray

import scala.collection.JavaConverters._
import spray.json.{DefaultJsonProtocol, JsArray, pimpAny}
import spray.json.DefaultJsonProtocol._
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql._


trait RestService {
  implicit val system: ActorSystem
  implicit val materializer: ActorMaterializer
  implicit val sparkSession: SparkSession
  val datasetMap = new ConcurrentHashMap[String, Dataset[Row]]()

  import ServiceJsonProtoocol._

  val route =
    pathSingleSlash {
      get {
        complete {
          "welcome to rest service"
        }
      }
    } ~
      path("load") {
        post {
          entity(as[LoadRequest]) {
            loadRequest => complete {
              val id = "" + System.nanoTime()
              val dataset = sparkSession.read.format("csv")
                .option("header", "true")
                .load(loadRequest.path)
              datasetMap.put(id, dataset)
              id
            }
          }
        }
      } ~
      path("view" / """[\w[0-9]-_]+""".r) { id =>
        get {
          complete {
            val dataset = datasetMap.get(id)
            dataset.take(10).map(row => row.toString())
          }
        }
      }
} 
Example 163
Source File: RestServer.scala    From introduction-to-akkahttp   with Apache License 2.0 5 votes vote down vote up
package com.shashank.akkahttp.project

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.stream.ActorMaterializer
import org.apache.spark.sql.SparkSession




class RestServer(implicit val system:ActorSystem,
	implicit  val materializer:ActorMaterializer,implicit val
	sparkSession:SparkSession) extends RestService{
		def startServer(address:String, port:Int) = {
			Http().bindAndHandle(route,address,port)
		}
	}

	object RestServer {

		def main(args: Array[String]) {

			implicit val actorSystem = ActorSystem("rest-server")
			implicit val materializer = ActorMaterializer()
			implicit val sparkSession:SparkSession = SparkSession.builder().master("local").
				appName("Rest Server context").getOrCreate()
			val server = new RestServer()
			server.startServer("localhost",8080)
			println("running server at localhost 8080")
		}
	} 
Example 164
Source File: TestKit.scala    From introduction-to-akkahttp   with Apache License 2.0 5 votes vote down vote up
package com.shashank.akkahttp.basic.routing

import akka.actor.ActorSystem
import akka.http.scaladsl.model.HttpMethods._
import akka.http.scaladsl.model.HttpRequest
import akka.http.scaladsl.server.Directives._
import akka.stream.{ActorMaterializer, Materializer}
import org.scalatest.{ Matchers, WordSpec }
import akka.http.scaladsl.testkit.ScalatestRouteTest



object TestKit extends WordSpec with Matchers with ScalatestRouteTest {

  def main(args: Array[String]) {

    val route =
      path("welcome"){
        get{
          complete {
            "welcome to rest service"
          }
        }
      } ~
        path("demo"){
          get{
            complete {
              "welcome to demonstration"
            }
          }
        }


    val getRequest = HttpRequest(GET, "/welcome")

    getRequest ~> route ~> check {
      status.intValue shouldEqual 200
      entityAs[String] shouldEqual "welcome to rest service"
    }

    system.terminate()
  }

} 
Example 165
Source File: RoutingDSL.scala    From introduction-to-akkahttp   with Apache License 2.0 5 votes vote down vote up
package com.shashank.akkahttp.basic.routing

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.{HttpResponse, StatusCodes}
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server._
import akka.stream.{ActorMaterializer, Materializer}


object RoutingDSL {

  def main(args: Array[String]) {

    implicit val sys = ActorSystem("IntroductionToAkkaHttp")
    implicit val mat:Materializer = ActorMaterializer()

    val route =
      path("welcome"){
        get{
          complete {
            "welcome to rest service"
          }
        }
      } ~
      path("demo"){
        get{
          complete {
            "welcome to demonstration"
          }
        }
      }

    Http().bindAndHandle(route, "localhost", 8090)

  }

} 
Example 166
Source File: Failure.scala    From introduction-to-akkahttp   with Apache License 2.0 5 votes vote down vote up
package com.shashank.akkahttp.basic.routing

import akka.actor.ActorSystem
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.HttpResponse
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.ExceptionHandler
import akka.stream.{ActorMaterializer, Materializer}


object Failure {

  def main(args: Array[String]) {

    implicit val sys = ActorSystem("IntroductionToAkkaHttp")
    implicit val mat:Materializer = ActorMaterializer()

    implicit def myExceptionHandler = ExceptionHandler {
      case _: ArithmeticException =>
        complete(HttpResponse(StatusCodes.BadRequest, entity = "Bad numbers, bad result!!!"))
      case e: Throwable => {
        println(e.getMessage)
        println(e.getStackTraceString)
        complete(HttpResponse(StatusCodes.BadRequest, entity = e.getMessage))
      }
    }

    val route =
      path("welcome"){
        get{
          complete {
            "welcome to rest service"
          }
        }
      } ~
      path("demo"){
        get {
          complete {
            100/0
            "welcome to demonstration"
          }
        }
      }

    Http().bindAndHandle(route, "localhost", 8090)
  }

} 
Example 167
Source File: UnMarshalling.scala    From introduction-to-akkahttp   with Apache License 2.0 5 votes vote down vote up
package com.shashank.akkahttp.basic.routing

import akka.actor.ActorSystem
import akka.http.scaladsl.marshalling.Marshal
import akka.http.scaladsl.model.{HttpMethods, HttpRequest, HttpResponse, MessageEntity}
import akka.http.scaladsl.unmarshalling.Unmarshal
import akka.stream.{ActorMaterializer, Materializer}
import akka.util.ByteString

import scala.concurrent.Await
import scala.concurrent.duration._
import scala.concurrent.ExecutionContext.Implicits.global
import spray.json._


object UnMarshalling {

  def main(args: Array[String]) {

    implicit val sys = ActorSystem("IntroductionToAkkaHttp")
    implicit val mat:Materializer = ActorMaterializer()

    //type FromStringUnmarshaller[T] = Unmarshaller[String, T]
    val intFuture = Unmarshal("42").to[Int]
    val int = Await.result(intFuture, 1.second)
    println("int unmarshalling "+int)

    //type FromStringUnmarshaller[T] = Unmarshaller[String, T]
    val boolFuture = Unmarshal("off").to[Boolean]
    val bool = Await.result(boolFuture, 1.second)
    println("off unmarshalling "+bool)

    //type ToEntityMarshaller[T] = Marshaller[T, MessageEntity]
    val string = "Yeah"
    val entityFuture = Marshal(string).to[MessageEntity]
    val entity = Await.result(entityFuture, 1.second) // don't block in non-test code!
    println(entity)

    //type ToResponseMarshaller[T] = Marshaller[T, HttpResponse]
    val errorMsg = "Not found, pal!"
    val responseFuture = Marshal(404 -> errorMsg).to[HttpResponse]
    val response = Await.result(responseFuture, 1.second)
    println(response)


    //type FromEntityUnmarshaller[T] = Unmarshaller[HttpEntity, T]
    val jsonByteString = ByteString("""{"name":"Hello"}""")
    val httpRequest = HttpRequest(HttpMethods.POST, entity = jsonByteString)
    val jsonDataUnmarshalledFuture = Unmarshal(httpRequest).to[String]
    val jsonDataUnmarshalled = Await.result(jsonDataUnmarshalledFuture, 1.second)
    println(jsonDataUnmarshalled)

    sys.terminate()

  }

} 
Example 168
Source File: Rejection.scala    From introduction-to-akkahttp   with Apache License 2.0 5 votes vote down vote up
package com.shashank.akkahttp.basic.routing

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.{HttpResponse, StatusCodes}
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server._
import akka.stream.{ActorMaterializer, Materializer}


object Rejection {

  def main(args: Array[String]) {

    implicit val sys = ActorSystem("IntroductionToAkkaHttp")
    implicit val mat:Materializer = ActorMaterializer()

    implicit def myRejectionHandler = RejectionHandler.newBuilder().handle{
      case MissingCookieRejection(cookieName) =>
        complete(HttpResponse(StatusCodes.BadRequest, entity = "No cookies, no service!!!"))
    }.handleNotFound {
      complete((StatusCodes.NotFound, "Not here!"))
    }.result()

    val route =
      path("welcome"){
        get{
          complete {
            "welcome to rest service"
          }
        }
      } ~
      path("demo"){
        get{
          complete {
            "welcome to demonstration"
          }
        }
      } ~
      path("wrong"){
        reject{
          ValidationRejection("Invalid path", None)
        }
      }

    Http().bindAndHandle(route, "localhost", 8090)

  }

} 
Example 169
Source File: ConnectionLevel.scala    From introduction-to-akkahttp   with Apache License 2.0 5 votes vote down vote up
package com.shashank.akkahttp.basic.client

import akka.actor.ActorSystem
import akka.http.javadsl.settings.ClientConnectionSettings
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import akka.stream.ActorMaterializer
import akka.stream.scaladsl._

import scala.concurrent.{Await, Future}
import scala.concurrent.duration._


object ConnectionLevel {

  def main(args: Array[String]) {
    implicit val sys = ActorSystem("IntroductionToAkkaHttp")
    implicit val mat = ActorMaterializer()

    val connectionFlow = Http().outgoingConnection("localhost", 8090)

    val responseFuture =
      Source.single(HttpRequest(uri = "/welcome"))
        .via(connectionFlow)
        .runWith(Sink.head)

    val response = Await.result(responseFuture, 10 seconds)
    response.entity.dataBytes.map(_.utf8String).runForeach(println)
    sys.terminate()
  }

} 
Example 170
Source File: ReverseProxy.scala    From introduction-to-akkahttp   with Apache License 2.0 5 votes vote down vote up
package com.shashank.akkahttp.basic.serving

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import akka.http.scaladsl.model.headers.{Host, `Access-Control-Allow-Origin`}
import akka.stream.scaladsl.Flow
import akka.stream.{ActorMaterializer, Materializer}


object ReverseProxy {

  def main(args: Array[String]) {
    implicit val sys = ActorSystem("IntroductionToAkkaHttp")
    implicit val mat:Materializer = ActorMaterializer()

    val redirectHost = "localhost"
    val redirectPort = 8090

    val requestFlow = Flow.fromFunction[HttpRequest, HttpRequest]( request => {
      request
        .withUri(request.uri.withAuthority(redirectHost, redirectPort))
        .mapHeaders(headers => headers.filterNot(_.lowercaseName() == Host.lowercaseName))
        .addHeader(Host(redirectHost, redirectPort))
    })

    val outgoingConnection = Http().outgoingConnection(redirectHost, redirectPort)

    val responseFlow = Flow.fromFunction[HttpResponse, HttpResponse]( response => {
      response.withHeaders(`Access-Control-Allow-Origin`.*)
    })

    Http().bindAndHandle(requestFlow via outgoingConnection via responseFlow, "localhost", 8080)
  }

} 
Example 171
Source File: FriendJournalReader.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter6

import akka.actor.ActorSystem
import akka.persistence.Recovery
import akka.persistence.query.PersistenceQuery
import akka.persistence.query.journal.leveldb.scaladsl.LeveldbReadJournal
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.Sink
import scala.concurrent.duration._

object FriendJournalReader extends App {
  implicit val system = ActorSystem()
  import system.dispatcher
  implicit val mat = ActorMaterializer()(system)
  val queries = PersistenceQuery(system).readJournalFor[LeveldbReadJournal](LeveldbReadJournal.Identifier)

  val laura = system.actorOf(FriendActor.props("Laura", Recovery()))
  val maria = system.actorOf(FriendActor.props("Maria", Recovery()))
  laura ! AddFriend(Friend("Hector"))
  laura ! AddFriend(Friend("Nancy"))
  maria ! AddFriend(Friend("Oliver"))
  maria ! AddFriend(Friend("Steve"))
  system.scheduler.scheduleOnce(5 second, maria, AddFriend(Friend("Steve")))
  system.scheduler.scheduleOnce(10 second, maria, RemoveFriend(Friend("Oliver")))
  Thread.sleep(2000)

  queries.allPersistenceIds().map(id => system.log.info(s"Id received [$id]")).to(Sink.ignore).run()
  queries.eventsByPersistenceId("Laura").map(e => log(e.persistenceId, e.event)).to(Sink.ignore).run()
  queries.eventsByPersistenceId("Maria").map(e => log(e.persistenceId, e.event)).to(Sink.ignore).run()

  def log(id: String, evt: Any) = system.log.info(s"Id [$id] Event [$evt]")
} 
Example 172
Source File: SafePersistenceActorShutdownApp.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter6

import akka.actor.{ActorSystem, PoisonPill, Props}

object SafePersistenceActorShutdownApp extends App {
  val system = ActorSystem("safe-shutdown")
  val persistentActor1 = system.actorOf(Props[SamplePersistenceActor])
  val persistentActor2 = system.actorOf(Props[SamplePersistenceActor])

  persistentActor1 ! UserUpdate("foo", Add)
  persistentActor1 ! UserUpdate("foo", Add)
  persistentActor1 ! PoisonPill
  persistentActor2 ! UserUpdate("foo", Add)
  persistentActor2 ! UserUpdate("foo", Add)
  persistentActor2 ! ShutdownPersistentActor
} 
Example 173
Source File: FriendRecoveryApp.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter6

import akka.actor.ActorSystem
import akka.persistence.{Recovery, SnapshotSelectionCriteria}

object FriendApp extends App {
  val system = ActorSystem("test")
  val hector = system.actorOf(FriendActor.props("Hector", Recovery()))
  hector ! AddFriend(Friend("Laura"))
  hector ! AddFriend(Friend("Nancy"))
  hector ! AddFriend(Friend("Oliver"))
  hector ! AddFriend(Friend("Steve"))
  hector ! "snap"
  hector ! RemoveFriend(Friend("Oliver"))
  hector ! "print"
  Thread.sleep(2000)
  system.terminate()
}

object FriendRecoveryDefault extends App {
  val system = ActorSystem("test")
  val hector = system.actorOf(FriendActor.props("Hector", Recovery()))
  hector ! "print"
  Thread.sleep(2000)
  system.terminate()
}

object FriendRecoveryOnlyEvents extends App {
  val system = ActorSystem("test")
  val recovery = Recovery(fromSnapshot = SnapshotSelectionCriteria.None)
  val hector = system.actorOf(FriendActor.props("Hector", recovery))
  hector ! "print"
  Thread.sleep(2000)
  system.terminate()
}

object FriendRecoveryEventsSequence extends App {
  val system = ActorSystem("test")
  val recovery = Recovery(fromSnapshot = SnapshotSelectionCriteria.None, toSequenceNr = 2)
  val hector = system.actorOf(FriendActor.props("Hector", recovery))
  Thread.sleep(2000)
  system.terminate()
}

object FriendRecoveryEventsReplay extends App {
  val system = ActorSystem("test")
  val recovery = Recovery(fromSnapshot = SnapshotSelectionCriteria.None, replayMax = 3)
  val hector = system.actorOf(FriendActor.props("Hector", recovery))
  Thread.sleep(2000)
  system.terminate()
} 
Example 174
Source File: PersistentFSMApp.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter6

import akka.actor.ActorSystem

object PersistentFSMApp extends App {
  val system = ActorSystem("test")
  val actor1 = createActor("uid1")
  actor1 ! Initialize(4)
  actor1 ! Mark
  actor1 ! Mark
  Thread.sleep(2000)
  system.stop(actor1)
  val actor2 = createActor("uid1")
  actor2 ! Mark
  actor2 ! Mark

  def createActor(id: String) = system.actorOf(PersistentFSMActor.props(id))
} 
Example 175
Source File: SamplePersistenceApp.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter6

import akka.actor.{ActorSystem, Props}

object SamplePersistenceApp extends App {
  val system = ActorSystem("example")
  val persistentActor1 = system.actorOf(Props[SamplePersistenceActor])

  persistentActor1 ! UserUpdate("foo", Add)
  persistentActor1 ! UserUpdate("baz", Add)
  persistentActor1 ! "snap"
  persistentActor1 ! "print"
  persistentActor1 ! UserUpdate("baz", Remove)
  persistentActor1 ! "print"

  Thread.sleep(2000)
  system.stop(persistentActor1)

  val persistentActor2 = system.actorOf(Props[SamplePersistenceActor])
  persistentActor2 ! "print"

  Thread.sleep(2000)
  system.terminate()
} 
Example 176
Source File: HandlingExceptionsServer.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter9

import akka.actor.{ActorRef, ActorSystem, Props}
import akka.http.scaladsl.server.HttpApp
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.settings.ServerSettings
import com.typesafe.config.ConfigFactory
import akka.pattern.ask
import akka.util.Timeout

import scala.concurrent.duration._

class HandlingExceptionsServer(someActor: ActorRef) extends HttpApp with RouteExceptionHandler {
  implicit val timeout = Timeout(500 millis)

  val route =
    handleExceptions(routeExceptionHandler) {
      path("divide") {
        parameters('a.as[Int], 'b.as[Int]) { (a, b) =>
          complete {
            val result = a / b
            s"Result is: $result"
          }
        }
      } ~
        path("futureTimingOut") {
          onSuccess(someActor ? "Something") {
            case _ => complete("Actor finished processing.")
          }
        }
    }

}

object HandlingExceptionsApplication extends App {
  val actorSystem = ActorSystem()
  val unresponsiveActor = actorSystem.actorOf(Props[UnresponsiveActor])
  new HandlingExceptionsServer(unresponsiveActor).startServer("0.0.0.0", 8088, ServerSettings(ConfigFactory.load))
} 
Example 177
Source File: EncodingDecodingClientApplication.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter9

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.coding.{Encoder, Gzip, NoCoding}
import akka.http.scaladsl.model._
import akka.http.scaladsl.model.headers._
import akka.http.scaladsl.model.headers.HttpEncodings._
import akka.http.scaladsl.model.HttpMethods._
import headers.HttpEncodings
import akka.stream.ActorMaterializer
import akka.util.ByteString

import scala.concurrent.duration._
import scala.concurrent.Future
import scala.util.{Failure, Success}

object EncodingDecodingClientApplication extends App {
  implicit val system = ActorSystem()
  implicit val materializer = ActorMaterializer()

  import system.dispatcher

  val http = Http()
  val uriServer = "http://localhost:8088/"

  val requests = Seq (
    HttpRequest(POST, uriServer, List(`Accept-Encoding`(gzip)), HttpEntity("Hello!")),
    HttpRequest(POST, uriServer, List(`Content-Encoding`(gzip), `Accept-Encoding`(gzip)), HttpEntity(compress("Hello compressed!", Gzip))
    )
  )

  Future.traverse(requests)(http.singleRequest(_).map(decodeResponse)) andThen {
    case Success(responses) => responses.foreach(response =>
      response.entity.toStrict(5 seconds).map(_.data.decodeString("UTF-8")).andThen {
        case Success(content) => println(s"Response: $content")
        case _ =>
      })
    case Failure(e) => println(s"request failed $e")
  }

  private def decodeResponse(response: HttpResponse) = {
    val decoder = response.encoding match {
      case HttpEncodings.gzip => Gzip
      case HttpEncodings.identity => NoCoding
    }

    decoder.decode(response)
  }

  private def compress(input: String, encoder: Encoder): ByteString =
    encoder.encode(ByteString(input))
} 
Example 178
Source File: UploadingFileClient.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter9

import java.nio.file.Paths

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.HttpMethods._
import akka.http.scaladsl.model._
import akka.stream.ActorMaterializer

import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util.{Failure, Success}

object UploadingFileClient extends App {

  implicit val system = ActorSystem()
  implicit val materializer = ActorMaterializer()
  import system.dispatcher

  val http = Http()
  val entity = Multipart.FormData.fromPath(
    "file",
    ContentTypes.`text/plain(UTF-8)`,
    Paths.get("./src/main/resources/testfile.txt")
  ).toEntity()
  val uris = Seq(
    "http://localhost:8088/regularupload",
    "http://localhost:8088/streamupload"
  )
  val requests = uris.map(uri => HttpRequest(POST, uri, Nil, entity))

  Future.traverse(requests)(http.singleRequest(_)) andThen {
    case Success(responses) => responses.foreach(response =>
      response.entity.toStrict(5 seconds).map(_.data.utf8String).andThen {
        case Success(content) => println(s"Response: $content")
        case _ =>
      })
    case Failure(e) => println(s"request failed $e")
  }
} 
Example 179
Source File: RequestLevelClientAPIApplication.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter9

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.HttpRequest
import akka.stream.ActorMaterializer
import scala.concurrent.duration._
import scala.util.Success

object RequestLevelClientAPIApplication extends App {

  implicit val system = ActorSystem()
  implicit val materializer = ActorMaterializer()
  implicit val executionContext = system.dispatcher

  val akkaToolkitRequest = HttpRequest(uri = "https://api.github.com/repos/akka/akka-http")
  val responseFuture = Http().singleRequest(akkaToolkitRequest)

  responseFuture.andThen {
    case Success(response) =>
      response.entity.toStrict(5 seconds).map(_.data.decodeString("UTF-8")).andThen {
        case Success(json) =>
          val pattern = """.*"open_issues":(.*?),.*""".r
          pattern.findAllIn(json).matchData foreach { m =>
            println(s"There are ${m.group(1)} open issues in Akka Http.")
            materializer.shutdown()
            system.terminate()
          }
        case _ =>
      }
    case _ => println(s"request failed")
  }
} 
Example 180
Source File: HostLevelClientAPIApplication.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter9

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.HttpRequest
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Sink, Source}

import scala.concurrent.duration._
import scala.util.{Failure, Success}

object HostLevelClientAPIApplication extends App {
  implicit val system = ActorSystem()
  implicit val materializer = ActorMaterializer()
  implicit val executionContext = system.dispatcher

  val poolClientFlow = Http().cachedHostConnectionPoolHttps[String]("api.github.com")
  val akkaToolkitRequest = HttpRequest(uri = "/repos/akka/akka-http") -> """.*"open_issues":(.*?),.*"""
  val responseFuture = Source.single(akkaToolkitRequest).via(poolClientFlow).runWith(Sink.head)

  responseFuture.andThen {
    case Success(result) =>
      val (tryResponse, regex) = result
      tryResponse match {
        case Success(response) =>
          response.entity.toStrict(5 seconds).map(_.data.decodeString("UTF-8")).andThen {
            case Success(json) =>
              val pattern = regex.r
              pattern.findAllIn(json).matchData foreach { m =>
                println(s"There are ${m.group(1)} open issues in Akka Http.")
                materializer.shutdown()
                system.terminate()
              }
            case _ =>
          }
        case _ => println("request failed")
      }
    case _ => println("request failed")
  }
} 
Example 181
Source File: ConnectionLevelClientAPIApplication.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter9

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.HttpRequest
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Sink, Source}

import scala.util.{Failure, Success}
import scala.concurrent.duration._

object ConnectionLevelClientAPIApplication extends App {

  implicit val system = ActorSystem()
  implicit val materializer = ActorMaterializer()
  implicit val executionContext = system.dispatcher

  val connectionFlow = Http().outgoingConnectionHttps("api.github.com")
  val akkaToolkitRequest = HttpRequest(uri = "/repos/akka/akka-http")

  val responseFuture = Source.single(akkaToolkitRequest).via(connectionFlow).runWith(Sink.head)

  responseFuture.andThen {
    case Success(response) =>
      response.entity.toStrict(5 seconds).map(_.data.decodeString("UTF-8")).andThen {
        case Success(json) =>
          val pattern = """.*"open_issues":(.*?),.*""".r
          pattern.findAllIn(json).matchData foreach { m =>
            println(s"There are ${m.group(1)} open issues in Akka Http.")
            materializer.shutdown()
            system.terminate()
          }
        case _ =>
      }
    case _ => println("request failed")
  }
} 
Example 182
Source File: SimpleStreamsApplication.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter8

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Sink, Source}

object SimpleStreamsApplication extends App {

  implicit val actorSystem = ActorSystem("SimpleStream")
  implicit val actorMaterializer = ActorMaterializer()

  val fileList = List(
    "src/main/resources/testfile1.text",
    "src/main/resources/testfile2.txt",
    "src/main/resources/testfile3.txt")

  val stream = Source(fileList)
    .map(new java.io.File(_))
    .filter(_.exists())
    .filter(_.length() != 0)
    .to(Sink.foreach(f => println(s"Absolute path: ${f.getAbsolutePath}")))

  stream.run()
} 
Example 183
Source File: ComposingStreamsApplication.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter8

import java.io.File

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Sink, Source}

object ComposingStreamsApplication extends App {

  implicit val actorSystem = ActorSystem("SimpleStream")
  implicit val actorMaterializer = ActorMaterializer()

  val fileList = List(
    "src/main/resources/testfile1.text",
    "src/main/resources/testfile2.txt",
    "src/main/resources/testfile3.txt")

  val stream = Source(fileList)
    .map(new File(_))
    .filter(_.exists())
    .filter(_.length() != 0)
    .to(Sink.foreach(f => println(s"Absolute path: ${f.getAbsolutePath}")))

  stream.run()
} 
Example 184
Source File: WorkingWithGraphsApplication.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter8

import akka.actor.ActorSystem
import akka.stream._
import akka.stream.scaladsl.{Balance, Broadcast, Flow, GraphDSL, Merge, RunnableGraph, Sink, Source}
import scala.concurrent.duration._
import scala.util.Random


object WorkingWithGraphsApplication extends App {

  implicit val actorSystem = ActorSystem("WorkingWithGraphs")
  implicit val actorMaterializer = ActorMaterializer()

  trait MobileMsg {
    def id = Random.nextInt(1000)
    def toGenMsg(origin: String) = GenericMsg(id, origin)
  }
  class AndroidMsg extends MobileMsg
  class IosMsg extends MobileMsg
  case class GenericMsg(id: Int, origin: String)

  val graph = RunnableGraph.fromGraph(GraphDSL.create() { implicit builder =>
    import GraphDSL.Implicits._

    //Sources
    val androidNotification = Source.tick(2 seconds, 500 millis, new AndroidMsg)
    val iOSNotification = Source.tick(700 millis, 600 millis, new IosMsg)

    //Flow
    val groupAndroid = Flow[AndroidMsg].map(_.toGenMsg("ANDROID")).groupedWithin(5, 5 seconds).async
    val groupIos = Flow[IosMsg].map(_.toGenMsg("IOS")).groupedWithin(5, 5 seconds).async
    def counter = Flow[Seq[GenericMsg]].via(new StatefulCounterFlow())
    def mapper = Flow[Seq[GenericMsg]].mapConcat(_.toList)

    //Junctions
    val aBroadcast = builder.add(Broadcast[Seq[GenericMsg]](2))
    val iBroadcast = builder.add(Broadcast[Seq[GenericMsg]](2))
    val balancer = builder.add(Balance[Seq[GenericMsg]](2))
    val notitificationMerge = builder.add(Merge[Seq[GenericMsg]](2))
    val genericNotitificationMerge = builder.add(Merge[GenericMsg](2))

    def counterSink(s: String) = Sink.foreach[Int](x => println(s"$s: [$x]"))

    //Graph
    androidNotification ~> groupAndroid ~> aBroadcast ~> counter ~> counterSink("Android")
                                           aBroadcast ~> notitificationMerge
                                           iBroadcast ~> notitificationMerge
    iOSNotification     ~> groupIos     ~> iBroadcast ~> counter ~> counterSink("Ios")

    notitificationMerge ~> balancer ~> mapper.async ~> genericNotitificationMerge
                           balancer ~> mapper.async ~> genericNotitificationMerge

    genericNotitificationMerge ~> Sink.foreach(println)

    ClosedShape
  })

  graph.run()
} 
Example 185
Source File: HandlingErrorsApplication.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter8

import akka.actor.ActorSystem
import akka.stream.{ActorAttributes, ActorMaterializer, ActorMaterializerSettings, Supervision}
import akka.stream.scaladsl._

object HandlingErrorsApplication extends App {

  implicit val actorSystem = ActorSystem("HandlingErrors")

  val streamDecider: Supervision.Decider = {
    case e: IndexOutOfBoundsException =>
      println("Dropping element because of IndexOufOfBoundException. Resuming.")
      Supervision.Resume
    case _ => Supervision.Stop
  }

  val flowDecider: Supervision.Decider = {
    case e: IllegalArgumentException =>
      println("Dropping element because of IllegalArgumentException. Restarting.")
      Supervision.Restart
    case _ => Supervision.Stop
  }

  val actorMaterializerSettings = ActorMaterializerSettings(actorSystem).withSupervisionStrategy(streamDecider)
  implicit val actorMaterializer = ActorMaterializer(actorMaterializerSettings)

  val words = List("Handling", "Errors", "In", "Akka", "Streams", "")

  val flow = Flow[String].map(word => {
    if(word.length == 0) throw new IllegalArgumentException("Empty words are not allowed")
    word
  }).withAttributes(ActorAttributes.supervisionStrategy(flowDecider))

  Source(words).via(flow).map(array => array(2)).to(Sink.foreach(println)).run()
} 
Example 186
Source File: TransformingStreamsApplication.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter8

import java.nio.file.Paths

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl._

object TransformingStreamsApplication extends App {

  implicit val actorSystem = ActorSystem("TransformingStream")
  implicit val actorMaterializer = ActorMaterializer()

  val MaxGroups = 1000

  val path = Paths.get("src/main/resources/gzipped-file.gz")

  val stream = FileIO.fromPath(path)
    .via(Compression.gunzip())
    .map(_.utf8String.toUpperCase)
    .mapConcat(_.split(" ").toList)
    .collect { case w if w.nonEmpty =>
      w.replaceAll("""[p{Punct}&&[^.]]""", "").replaceAll(System.lineSeparator(), "") }
    .groupBy(MaxGroups, identity)
    .map(_ -> 1)
    .reduce((l, r) => (l._1, l._2 + r._2))
    .mergeSubstreams
    .to(Sink.foreach(println))

  stream.run()
} 
Example 187
Source File: IntegratingWithActorsApplication.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter8

import akka.actor.{ActorSystem, Props}
import akka.stream.{ActorMaterializer, OverflowStrategy}
import akka.stream.scaladsl._
import akka.pattern.ask
import akka.util.Timeout
import com.packt.chapter8.SinkActor.{AckSinkActor, CompletedSinkActor, InitSinkActor}

import scala.concurrent.duration._

object IntegratingWithActorsApplication extends App {

  implicit val actorSystem = ActorSystem("IntegratingWithActors")
  implicit val actorMaterializer = ActorMaterializer()

  implicit val askTimeout = Timeout(5 seconds)
  val stringCleaner = actorSystem.actorOf(Props[StringCleanerActor])
  val sinkActor = actorSystem.actorOf(Props[SinkActor])

  val source = Source.queue[String](100, OverflowStrategy.backpressure)
  val sink = Sink.actorRefWithAck[String](sinkActor, InitSinkActor, AckSinkActor, CompletedSinkActor)
  val queue = source
    .mapAsync(parallelism = 5)(elem => (stringCleaner ? elem).mapTo[String])
    .to(sink)
    .run()

  actorSystem.actorOf(SourceActor.props(queue))
} 
Example 188
Source File: ProcessingKafkaApplication.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter8

import akka.actor.ActorSystem
import akka.kafka.scaladsl.{Consumer, Producer}
import akka.kafka.{ConsumerSettings, ProducerSettings, Subscriptions}
import akka.stream.{ActorMaterializer, ClosedShape}
import akka.stream.scaladsl.{Flow, GraphDSL, RunnableGraph, Sink, Source}
import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord}
import org.apache.kafka.clients.producer.ProducerRecord
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.{ByteArrayDeserializer, ByteArraySerializer, StringDeserializer, StringSerializer}

import scala.concurrent.duration._

object ProcessingKafkaApplication extends App {
  implicit val actorSystem = ActorSystem("SimpleStream")
  implicit val actorMaterializer = ActorMaterializer()

  val bootstrapServers = "localhost:9092"
  val kafkaTopic = "akka_streams_topic"
  val partition = 0
  val subscription = Subscriptions.assignment(new TopicPartition(kafkaTopic, partition))

  val consumerSettings = ConsumerSettings(actorSystem, new ByteArrayDeserializer, new StringDeserializer)
    .withBootstrapServers(bootstrapServers)
    .withGroupId("akka_streams_group")
    .withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")

  val producerSettings = ProducerSettings(actorSystem, new ByteArraySerializer, new StringSerializer)
    .withBootstrapServers(bootstrapServers)

  val runnableGraph = RunnableGraph.fromGraph(GraphDSL.create() { implicit builder =>
    import GraphDSL.Implicits._

    val tickSource = Source.tick(0 seconds, 5 seconds, "Hello from Akka Streams using Kafka!")
    val kafkaSource = Consumer.plainSource(consumerSettings, subscription)
    val kafkaSink = Producer.plainSink(producerSettings)
    val printlnSink = Sink.foreach(println)

    val mapToProducerRecord = Flow[String].map(elem => new ProducerRecord[Array[Byte], String](kafkaTopic, elem))
    val mapFromConsumerRecord = Flow[ConsumerRecord[Array[Byte], String]].map(record => record.value())

    tickSource  ~> mapToProducerRecord   ~> kafkaSink
    kafkaSource ~> mapFromConsumerRecord ~> printlnSink

    ClosedShape
  })

  runnableGraph.run()
} 
Example 189
Source File: ProcessingRabbitMQApplication.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter8

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.alpakka.amqp._
import akka.stream.alpakka.amqp.scaladsl.{AmqpSink, AmqpSource}
import akka.util.ByteString

object ProcessingRabbitMQApplication extends App {

  implicit val actorSystem = ActorSystem("SimpleStream")
  implicit val actorMaterializer = ActorMaterializer()

  val consumerQueueName = "akka_streams_consumer_queue"
  val consumerQueueDeclaration = QueueDeclaration(consumerQueueName)
  val sourceDeclarations = Seq(consumerQueueDeclaration)

  val exchangeName = "akka_streams_exchange"
  val exchangeDeclaration = ExchangeDeclaration(exchangeName, "direct")
  val destinationQueueName = "akka_streams_destination_queue"
  val destinationQueueDeclaration = QueueDeclaration(destinationQueueName)
  val bindingDeclaration = BindingDeclaration(destinationQueueName, exchangeName)
  val sinkDeclarations = Seq(exchangeDeclaration, destinationQueueDeclaration, bindingDeclaration)

  val credentials = AmqpCredentials("guest", "guest")
  val connectionSetting = AmqpConnectionDetails("127.0.0.1", 5672, Some(credentials))
  val amqpSourceConfig = NamedQueueSourceSettings(connectionSetting, consumerQueueName, sourceDeclarations)
  val rabbitMQSource = AmqpSource(amqpSourceConfig, 1000)
  val amqpSinkConfig = AmqpSinkSettings(connectionSetting, Some(exchangeName), None, sinkDeclarations)
  val rabbitMQSink = AmqpSink(amqpSinkConfig)

  val stream = rabbitMQSource
      .map(incomingMessage => {
        val upperCased = incomingMessage.bytes.utf8String.toUpperCase
        OutgoingMessage(bytes = ByteString(upperCased),
          immediate = false,
          mandatory = false,
          props = None)
      })
    .to(rabbitMQSink)

  stream.run()
} 
Example 190
Source File: ModularizingStreamsApplication.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter8

import java.nio.file.Paths

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl._
import akka.util.ByteString

object ModularizingStreamsApplication extends App {

  implicit val actorSystem = ActorSystem("TransformingStream")
  implicit val actorMaterializer = ActorMaterializer()

  val MaxGroups = 1000

  val path = Paths.get("src/main/resources/gzipped-file.gz")

  
  val streamUppercase = source
      .via(gunzip)
      .via(utf8UppercaseMapper)
      .via(splitter)
      .via(punctuationMapper)
      .via(filterEmptyElements)
      .via(wordCountFlow)
      .to(printlnSink)

  val streamLowercase = source
    .via(gunzip)
    .via(utf8LowercaseMapper)
    .via(splitter)
    .via(punctuationMapper)
    .via(filterEmptyElements)
    .via(wordCountFlow)
    .to(printlnSink)

  streamUppercase.run()
  streamLowercase.run()

//  val sourceGunzip = source.via(gunzip)
//  val reusableProcessingFlow = Flow[String].via(splitter)
//    .via(punctuationMapper)
//    .via(filterEmptyElements)
//    .via(wordCountFlow)
//
//  val streamLowercase = sourceGunzip
//    .via(utf8LowercaseMapper)
//    .via(reusableProcessingFlow)
//    .to(printlnSink)
} 
Example 191
Source File: PipeliningParallelizing.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter8

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.{ActorMaterializer, FlowShape}
import akka.stream.scaladsl.{Balance, Flow, GraphDSL, Merge, Sink, Source}

import scala.util.Random

trait PipeliningParallelizing extends App {

  implicit val actorSystem = ActorSystem("PipeliningParallelizing")
  implicit val actorMaterializer = ActorMaterializer()

  case class Wash(id: Int)
  case class Dry(id: Int)
  case class Done(id: Int)

  val tasks = (1 to 5).map(Wash)

  def washStage = Flow[Wash].map(wash => {
    val sleepTime = Random.nextInt(3) * 1000
    println(s"Washing ${wash.id}. It will take $sleepTime milliseconds.")
    Thread.sleep(sleepTime)
    Dry(wash.id)
  })

  def dryStage = Flow[Dry].map(dry => {
    val sleepTime = Random.nextInt(3) * 1000
    println(s"Drying ${dry.id}. It will take $sleepTime milliseconds.")
    Thread.sleep(sleepTime)
    Done(dry.id)
  })

  val parallelStage = Flow.fromGraph(GraphDSL.create() { implicit builder =>
    import GraphDSL.Implicits._

    val dispatchLaundry = builder.add(Balance[Wash](3))
    val mergeLaundry = builder.add(Merge[Done](3))

    dispatchLaundry.out(0) ~> washStage.async ~> dryStage.async ~> mergeLaundry.in(0)
    dispatchLaundry.out(1) ~> washStage.async ~> dryStage.async ~> mergeLaundry.in(1)
    dispatchLaundry.out(2) ~> washStage.async ~> dryStage.async ~> mergeLaundry.in(2)

    FlowShape(dispatchLaundry.in, mergeLaundry.out)
  })

  def runGraph(testingFlow: Flow[Wash, Done, NotUsed]) = Source(tasks).via(testingFlow).to(Sink.foreach(println)).run()
} 
Example 192
Source File: WorkingIOStreamsApplication.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter8

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.Tcp.{IncomingConnection, ServerBinding}
import akka.stream.scaladsl._
import akka.util.ByteString

import scala.concurrent.Future

object WorkingIOStreamsApplication extends App {

  implicit val actorSystem = ActorSystem("WorkingIOStreams")
  implicit val actorMaterializer = ActorMaterializer()

  val MaxGroups = 1000

  val connections = Tcp().bind("127.0.0.1", 1234)
  connections.runForeach(connection => connection.handleWith(wordCount))

  val wordCount = Flow[ByteString].map(_.utf8String.toUpperCase)
    .mapConcat(_.split(" ").toList)
    .collect { case w if w.nonEmpty =>
      w.replaceAll("""[p{Punct}&&[^.]]""", "").replaceAll(System.lineSeparator(), "") }
    .groupBy(MaxGroups, identity)
    .map(_ -> 1)
    .reduce((l, r) => (l._1, l._2 + r._2))
    .mergeSubstreams
    .map(x => ByteString(s"[${x._1} => ${x._2}]\n"))
} 
Example 193
Source File: CountDownLatch.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter10

import akka.Done
import akka.actor.{Actor, ActorLogging, ActorRef, ActorSystem, Props}

import scala.concurrent.{Await, Future, Promise}
import scala.concurrent.duration._

object CountDownLatch {
  case object CountDown

  def apply(count:Int)(implicit actorSystem: ActorSystem) = {
    val promise = Promise[Done]()
    val props = Props(classOf[CountDownLatchActor], count, promise)
    val countDownLatchActor = actorSystem.actorOf(props, "countDownLatchActor")
    new CountDownLatch(countDownLatchActor, promise)
  }
}

class CountDownLatch(private val actor: ActorRef, private val promise: Promise[Done]) {
  import CountDownLatch._

  def countDown() = actor ! CountDown
  def await() : Unit = Await.result(promise.future, 10 minutes)
  val result : Future[Done] = promise.future
}


class CountDownLatchActor(count: Int, promise: Promise[Done]) extends Actor with ActorLogging {
  import CountDownLatch._

  var remaining = count

  def receive = {
    case CountDown if remaining - 1  == 0 =>
      log.info("Counting down")
      promise.success(Done)
      log.info("Gate opened")
      context.stop(self)
    case CountDown =>
      log.info("Counting down")
      remaining -= 1
  }
} 
Example 194
Source File: CountDownLatchApp.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter10

import akka.actor.{ActorSystem, Props}
import akka.routing.RoundRobinPool

object CountDownLatchApp extends App {
  implicit val actorSystem = ActorSystem()
  import actorSystem._
  val routeesToSetUp = 2
  val countDownLatch = CountDownLatch(routeesToSetUp)

  actorSystem.actorOf(Props(classOf[CountDownLatchWorker], countDownLatch)
    .withRouter(RoundRobinPool(routeesToSetUp)), "workers")

  //Future based solution
  countDownLatch.result.onSuccess { case _ => log.info("Future completed successfully") }

  //Await based solution
  countDownLatch.await()
  actorSystem.terminate()
} 
Example 195
Source File: BalancingWorkApp.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter10

import akka.actor.{ActorSystem, Props}
import akka.routing.{BalancingPool, SmallestMailboxPool}
import com.packt.chapter10.BalancedWorker.WorkTask

import scala.concurrent.duration._
import scala.util.Random

object BalancingDispatcherApp extends App {
  val actorSystem = ActorSystem()
  val workerPool = actorSystem.actorOf(Props[BalancedWorker].withRouter(BalancingPool(4)),"workers")

  import actorSystem.dispatcher
  actorSystem.scheduler.schedule(1 second, 200 millis)(sendTask)

  def sendTask : Unit = workerPool ! WorkTask(Random.nextInt(10000))
}

object SmallestMailboxRouterApp extends App {
  val actorSystem = ActorSystem()
  val workerPool = actorSystem.actorOf(Props[BalancedWorker].withRouter(SmallestMailboxPool(4)),"workers")

  import actorSystem.dispatcher
  actorSystem.scheduler.schedule(1 second, 200 millis)(sendTask)

  def sendTask() : Unit = workerPool ! WorkTask(Random.nextInt(10000))
} 
Example 196
Source File: EnvelopingActorApp.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter10

import java.util.UUID

import akka.actor.{ActorSystem, Props}

object EnvelopingActorApp extends App {

  val actorSystem = ActorSystem()
  val envelopReceived = actorSystem.actorOf(Props[EnvelopeReceiver], "receiver")
  val envelopingActor = actorSystem.actorOf(Props(classOf[EnvelopingActor], envelopReceived, headers _))
  envelopingActor ! "Hello!"

  def headers(msg: Any) = {
    Map(
      "timestamp" -> System.currentTimeMillis(),
      "correlationId" -> UUID.randomUUID().toString
    )
  }
} 
Example 197
Source File: Shutdown.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter1

import akka.actor.{PoisonPill, Props, ActorSystem, Actor}


object ShutdownApp extends App{
  val actorSystem = ActorSystem("HelloAkka")
  val shutdownActor1 = actorSystem.actorOf(Props[ShutdownActor], "shutdownActor1")
  shutdownActor1 ! "hello"
  shutdownActor1 ! PoisonPill
  shutdownActor1 ! "Are you there?"

  val shutdownActor2 = actorSystem.actorOf(Props[ShutdownActor], "shutdownActor2")
  shutdownActor2 ! "hello"
  shutdownActor2 ! Stop
  shutdownActor2 ! "Are you there?"

}

class ShutdownActor extends Actor {
  override def receive: Receive = {
    case msg:String => println(s"$msg")
    case Stop => context.stop(self)
  }
} {

case object Stop

} 
Example 198
Source File: CustomMailbox.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter1

import java.util.concurrent.ConcurrentLinkedQueue

import akka.actor.{Props, Actor, ActorSystem, ActorRef}

import akka.dispatch.{MailboxType, ProducesMessageQueue, Envelope, MessageQueue}

import com.typesafe.config.Config


object CustomMailbox extends App  {

  val actorSystem = ActorSystem("HelloAkka")
  val actor = actorSystem.actorOf(Props[MySpecialActor].withDispatcher("custom-dispatcher"))
  val actor1 = actorSystem.actorOf(Props[MyActor],"xyz")

  val actor2 = actorSystem.actorOf(Props[MyActor],"MyActor")

  actor1 !  ("hello", actor)
  actor2 !  ("hello", actor)
  
}

class MySpecialActor extends Actor {
  override def receive: Receive = {
    case msg: String => println(s"msg is $msg" )
  }
}
class MyActor extends Actor {
  override def receive: Receive = {
    case (msg: String, actorRef: ActorRef) => actorRef ! msg
    case msg => println(msg)
  }
}



trait MyUnboundedMessageQueueSemantics

  // This is the MessageQueue implementation
  class MyMessageQueue extends MessageQueue
  {

    private final val queue = new ConcurrentLinkedQueue[Envelope]()

    // these should be implemented; queue used as example
    def enqueue(receiver: ActorRef, handle: Envelope): Unit = {
      if(handle.sender.path.name == "MyActor") {
        handle.sender !  "Hey dude, How are you?, I Know your name,processing your request"
          queue.offer(handle)
          }
          else handle.sender ! "I don't talk to strangers, I can't process your request"
    }
    def dequeue(): Envelope = queue.poll
    def numberOfMessages: Int = queue.size
    def hasMessages: Boolean = !queue.isEmpty
    def cleanUp(owner: ActorRef, deadLetters: MessageQueue) {
      while (hasMessages) {
        deadLetters.enqueue(owner, dequeue())
      }
    }
}

class MyUnboundedMailbox extends MailboxType
with ProducesMessageQueue[MyMessageQueue] {

  // This constructor signature must exist, it will be called by Akka
  def this(settings: ActorSystem.Settings, config: Config) = {
    // put your initialization code here
    this()
  }

  // The create method is called to create the MessageQueue
  final override def create(owner: Option[ActorRef],
                            system: Option[ActorSystem]): MessageQueue =
    new MyMessageQueue()
} 
Example 199
Source File: BecomeUnBecome.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter1

import akka.actor.{Props, ActorSystem, Actor}


object BecomeUnBecomeApp extends App {

  val actorSystem = ActorSystem("HelloAkka")
  val becomeUnBecome = actorSystem.actorOf(Props[BecomeUnBecomeActor])
  becomeUnBecome ! true
  becomeUnBecome ! "Hello how are you?"
  becomeUnBecome ! false
  becomeUnBecome ! 1100
  becomeUnBecome ! true
  becomeUnBecome ! "What do u do?"
}

class BecomeUnBecomeActor extends Actor {
  def receive: Receive = {
    case true => context.become(isStateTrue)
    case false => context.become(isStateFalse)
    case _ => println("don't know what you want to say !! ")
   }


  def isStateTrue: Receive  = {
    case msg : String => println(s"$msg")
    case false => context.become(isStateFalse)
  }

  def isStateFalse: Receive  = {
    case msg : Int => println(s"$msg")
    case true =>  context.become(isStateTrue)
  }
} 
Example 200
Source File: Communication.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter1
import akka.actor.{ActorRef, Actor, ActorSystem, Props}
import scala.util.Random._



object Messages {
  case class Done(randomNumber: Int)
  case object GiveMeRandomNumber
  case class Start(actorRef: ActorRef)
}

class RandomNumberGeneratorActor extends Actor {
  import Messages._
  override def receive: Receive = {
    case GiveMeRandomNumber =>
      println("received a message to generate a random integer")
      val randomNumber = nextInt
      sender ! Done(randomNumber)
  }
}

class QueryActor extends Actor {
  import Messages._
  override def receive: Receive = {
    case Start(actorRef) => println(s"send me the next random number")
      actorRef ! GiveMeRandomNumber
    case Done(randomNumber) =>
      println(s"received a random number $randomNumber")
  }
}

object Communication extends App {
  import Messages._
  val actorSystem = ActorSystem("HelloAkka")
  val randomNumberGenerator = actorSystem.actorOf(Props[RandomNumberGeneratorActor], "randomNumberGeneratorActor")
  val queryActor = actorSystem.actorOf(Props[QueryActor], "queryActor")
  queryActor ! Start(randomNumberGenerator)

}