akka.stream.QueueOfferResult Scala Examples
The following examples show how to use akka.stream.QueueOfferResult.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: HandleOfferResult.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.apiserver.services.tracking import akka.stream.QueueOfferResult import com.daml.platform.server.api.ApiException import com.google.rpc.status.Status import io.grpc.{Status => GrpcStatus} import scala.concurrent.Promise import scala.util.{Failure, Success, Try} private[tracking] object HandleOfferResult { val toGrpcStatus: PartialFunction[Try[QueueOfferResult], Option[GrpcStatus]] = { case Failure(t) => t match { case i: IllegalStateException if i.getMessage == "You have to wait for previous offer to be resolved to send another request" => Some( GrpcStatus.RESOURCE_EXHAUSTED .withDescription("Ingress buffer is full")) case _ => Some( GrpcStatus.ABORTED .withDescription(s"Failure: ${t.getClass.getSimpleName}: ${t.getMessage}") .withCause(t)) } case Success(QueueOfferResult.Failure(t)) => Some( GrpcStatus.ABORTED .withDescription(s"Failed to enqueue: ${t.getClass.getSimpleName}: ${t.getMessage}") .withCause(t)) case Success(QueueOfferResult.Dropped) => Some( GrpcStatus.RESOURCE_EXHAUSTED .withDescription("Ingress buffer is full")) case Success(QueueOfferResult.QueueClosed) => Some(GrpcStatus.ABORTED.withDescription("Queue closed")) case Success(QueueOfferResult.Enqueued) => None // Promise will be completed downstream. } def toStatusMessage: PartialFunction[Try[QueueOfferResult], Status] = toGrpcStatus.andThen(_.fold(Status())(e => Status(e.getCode.value(), e.getDescription))) def completePromise(promise: Promise[_]): PartialFunction[Try[QueueOfferResult], Unit] = toGrpcStatus.andThen(_.foreach(s => promise.tryFailure(new ApiException(s)))) }
Example 2
Source File: BatchingQueue.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.participant.state.kvutils.api import java.io.Closeable import java.util.concurrent.atomic.AtomicBoolean import akka.stream.scaladsl.{Sink, Source, SourceQueueWithComplete} import akka.stream.{Materializer, OverflowStrategy, QueueOfferResult} import com.daml.ledger.participant.state.kvutils.DamlKvutils.DamlSubmissionBatch import com.daml.ledger.participant.state.v1.SubmissionResult import scala.concurrent.Future import scala.concurrent.duration._ object BatchingQueue { type CommitBatchFunction = Seq[DamlSubmissionBatch.CorrelatedSubmission] => Future[Unit] } case class DefaultBatchingQueue( maxQueueSize: Int, maxBatchSizeBytes: Long, maxWaitDuration: FiniteDuration, maxConcurrentCommits: Int ) extends BatchingQueue { private val queue: Source[ Seq[DamlSubmissionBatch.CorrelatedSubmission], SourceQueueWithComplete[DamlSubmissionBatch.CorrelatedSubmission]] = Source .queue(maxQueueSize, OverflowStrategy.dropNew) .groupedWeightedWithin(maxBatchSizeBytes, maxWaitDuration)( (cs: DamlSubmissionBatch.CorrelatedSubmission) => cs.getSubmission.size.toLong) def run(commitBatch: Seq[DamlSubmissionBatch.CorrelatedSubmission] => Future[Unit])( implicit materializer: Materializer): RunningBatchingQueueHandle = { val materializedQueue = queue .mapAsync(maxConcurrentCommits)(commitBatch) .to(Sink.ignore) .run() val queueAlive = new AtomicBoolean(true) materializedQueue.watchCompletion.foreach { _ => queueAlive.set(false) }(materializer.executionContext) new RunningBatchingQueueHandle { override def alive: Boolean = queueAlive.get() override def offer( submission: DamlSubmissionBatch.CorrelatedSubmission): Future[SubmissionResult] = { materializedQueue .offer(submission) .map { case QueueOfferResult.Enqueued => SubmissionResult.Acknowledged case QueueOfferResult.Dropped => SubmissionResult.Overloaded case f: QueueOfferResult.Failure => SubmissionResult.InternalError(f.toString) case QueueOfferResult.QueueClosed => SubmissionResult.InternalError("DefaultBatchingQueue.queue is closed") }(materializer.executionContext) } override def close(): Unit = { materializedQueue.complete() } } } }
Example 3
Source File: CarbonClient.scala From akka-http-metrics with Apache License 2.0 | 5 votes |
package fr.davit.akka.http.metrics.graphite import java.time.{Clock, Instant} import akka.NotUsed import akka.actor.ActorSystem import akka.event.Logging import akka.stream.scaladsl.{Flow, Keep, RestartFlow, Sink, Source, Tcp} import akka.stream.{OverflowStrategy, QueueOfferResult} import akka.util.ByteString import fr.davit.akka.http.metrics.core.Dimension import scala.concurrent.Await import scala.concurrent.duration.{Duration, _} object CarbonClient { def apply(host: String, port: Int)(implicit system: ActorSystem): CarbonClient = new CarbonClient(host, port) } class CarbonClient(host: String, port: Int)(implicit system: ActorSystem) extends AutoCloseable { private val logger = Logging(system.eventStream, classOf[CarbonClient]) protected val clock: Clock = Clock.systemUTC() private def serialize[T](name: String, value: T, dimensions: Seq[Dimension], ts: Instant): ByteString = { val tags = dimensions.map(d => d.key + "=" + d.value).toList val taggedMetric = (name :: tags).mkString(";") ByteString(s"$taggedMetric $value ${ts.getEpochSecond}\n") } // TODO read backoff from config private def connection: Flow[ByteString, ByteString, NotUsed] = RestartFlow.withBackoff( minBackoff = 3.seconds, maxBackoff = 30.seconds, randomFactor = 0.2, // adds 20% "noise" to vary the intervals slightly maxRestarts = -1 // keep retrying forever )(() => Tcp().outgoingConnection(host, port)) private val queue = Source .queue[ByteString](19, OverflowStrategy.dropHead) .via(connection) .toMat(Sink.ignore)(Keep.left) .run() def publish[T]( name: String, value: T, dimensions: Seq[Dimension] = Seq.empty, ts: Instant = Instant .now(clock) ): Unit = { // it's reasonable to block until the message in enqueued Await.result(queue.offer(serialize(name, value, dimensions, ts)), Duration.Inf) match { case QueueOfferResult.Enqueued => logger.debug("Metric {} enqueued", name) case QueueOfferResult.Dropped => logger.debug("Metric {} dropped", name) case QueueOfferResult.Failure(e) => logger.error(e, s"Failed publishing metric $name") case QueueOfferResult.QueueClosed => throw new Exception("Failed publishing metric to closed carbon client") } } override def close(): Unit = { queue.complete() Await.result(queue.watchCompletion(), Duration.Inf) } }
Example 4
Source File: HTTPInterfaceSpec.scala From reactive-kafka-microservice-template with Apache License 2.0 | 5 votes |
package akka import akka.event.Logging import akka.http.scaladsl.testkit.ScalatestRouteTest import akka.stream.QueueOfferResult import akka.stream.QueueOfferResult.Enqueued import akka.stream.scaladsl.SourceQueueWithComplete import akka.testkit.{TestActorRef, TestProbe} import com.omearac.consumers.{DataConsumer, EventConsumer} import com.omearac.http.routes.{ConsumerCommands, ProducerCommands} import com.omearac.producers.DataProducer import org.scalatest.{Matchers, WordSpec} import scala.concurrent.Future class HTTPInterfaceSpec extends WordSpec with Matchers with ScalatestRouteTest with ConsumerCommands with ProducerCommands { val log = Logging(system, this.getClass.getName) //Mocks for DataConsumer Tests val dataConsumer = TestActorRef(new DataConsumer) val manager = TestProbe() dataConsumer.underlyingActor.consumerStreamManager = manager.ref //Mocks for EventConsumer Tests val eventConsumer = TestActorRef(new EventConsumer) eventConsumer.underlyingActor.consumerStreamManager = manager.ref //Mocks for DataProducer Tests val dataProducer = TestActorRef(new DataProducer) val mockProducerStream: SourceQueueWithComplete[Any] = new SourceQueueWithComplete[Any] { override def complete(): Unit = println("complete") override def fail(ex: Throwable): Unit = println("fail") override def offer(elem: Any): Future[QueueOfferResult] = Future{Enqueued} override def watchCompletion(): Future[Done] = Future{Done} } "The HTTP interface to control the DataConsumerStream" should { "return a Already Stopped message for GET requests to /data_consumer/stop" in { Get("/data_consumer/stop") ~> dataConsumerHttpCommands ~> check { responseAs[String] shouldEqual "Data Consumer Stream Already Stopped" } } "return a Stream Started response for GET requests to /data_consumer/start" in { Get("/data_consumer/start") ~> dataConsumerHttpCommands ~> check { responseAs[String] shouldEqual "Data Consumer Stream Started" } } } "The HTTP interface to control the EventConsumerStream" should { "return a Already Stopped message for GET requests to /event_consumer/stop" in { Get("/event_consumer/stop") ~> eventConsumerHttpCommands ~> check { responseAs[String] shouldEqual "Event Consumer Stream Already Stopped" } } "return a Stream Started response for GET requests to /data_consumer/start" in { Get("/event_consumer/start") ~> eventConsumerHttpCommands ~> check { responseAs[String] shouldEqual "Event Consumer Stream Started" } } } "The HTTP interface to tell the DataProducer Actor to publish messages to Kafka" should { "return a Messages Produced message for GET requests to /data_producer/produce/10" in { dataProducer.underlyingActor.producerStream = mockProducerStream val producing = dataProducer.underlyingActor.publishData dataProducer.underlyingActor.context.become(producing) Get("/data_producer/produce/10") ~> producerHttpCommands ~> check { responseAs[String] shouldEqual "10 messages Produced as Ordered, Boss!" } } } }
Example 5
Source File: EventProducerSpec.scala From reactive-kafka-microservice-template with Apache License 2.0 | 5 votes |
package akka.kafka import java.util.Date import akka.Done import akka.actor.ActorSystem import akka.serialization.Serialization import akka.stream.QueueOfferResult import akka.stream.QueueOfferResult.Enqueued import akka.stream.scaladsl.SourceQueueWithComplete import akka.testkit.{DefaultTimeout, EventFilter, ImplicitSender, TestActorRef, TestKit, TestProbe} import com.omearac.producers.EventProducer import com.omearac.shared.AkkaStreams import com.omearac.shared.EventMessages.{ActivatedProducerStream, MessagesPublished} import com.omearac.shared.KafkaMessages.ExampleAppEvent import com.typesafe.config.ConfigFactory import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike} import scala.concurrent.Future class EventProducerSpec extends TestKit(ActorSystem("EventProducerSpec",ConfigFactory.parseString(""" akka.loggers = ["akka.testkit.TestEventListener"] """))) with DefaultTimeout with ImplicitSender with WordSpecLike with Matchers with BeforeAndAfterAll with AkkaStreams { val testProducer = TestActorRef(new EventProducer) val producerActor = testProducer.underlyingActor val mockProducerStream: SourceQueueWithComplete[Any] = new SourceQueueWithComplete[Any] { override def complete(): Unit = println("complete") override def fail(ex: Throwable): Unit = println("fail") override def offer(elem: Any): Future[QueueOfferResult] = Future{Enqueued} override def watchCompletion(): Future[Done] = Future{Done} } override def afterAll: Unit = { shutdown() } //Create an test event listener for the local message bus val testEventListener = TestProbe() system.eventStream.subscribe(testEventListener.ref, classOf[ExampleAppEvent]) "Sending ActivatedProducerStream to EventProducer in receive state" should { "save the stream ref and change state to producing " in { testProducer ! ActivatedProducerStream(mockProducerStream, "TestTopic") Thread.sleep(500) producerActor.producerStream should be(mockProducerStream) EventFilter.error(message = "EventProducer got the unknown message while producing: testMessage", occurrences = 1) intercept { testProducer ! "testMessage" } } } "Sending ExampleAppEvent to system bus while EventProducer is in publishEvent state" should { "offer the ExampleAppEvent to the stream " in { val producingState = producerActor.publishEvent producerActor.context.become(producingState) producerActor.producerStream = mockProducerStream val dateFormat = new java.text.SimpleDateFormat("dd:MM:yy:HH:mm:ss.SSS") lazy val timetag = dateFormat.format(new Date(System.currentTimeMillis())) val eventMsg = MessagesPublished(5) val testMessage = ExampleAppEvent(timetag,Serialization.serializedActorPath(self),eventMsg.toString) system.eventStream.publish(testMessage) testEventListener.expectMsgPF(){ case ExampleAppEvent(_,_,m) => if (m == eventMsg.toString) () else fail() } } } }
Example 6
Source File: DataProducerSpec.scala From reactive-kafka-microservice-template with Apache License 2.0 | 5 votes |
package akka.kafka import akka.Done import akka.actor.ActorSystem import akka.stream.QueueOfferResult import akka.stream.QueueOfferResult.Enqueued import akka.stream.scaladsl.SourceQueueWithComplete import akka.testkit.{DefaultTimeout, EventFilter, ImplicitSender, TestActorRef, TestKit, TestProbe} import com.omearac.producers.DataProducer import com.omearac.producers.DataProducer.PublishMessages import com.omearac.shared.AkkaStreams import com.omearac.shared.EventMessages.{ActivatedProducerStream, MessagesPublished} import com.omearac.shared.KafkaMessages.ExampleAppEvent import com.typesafe.config.ConfigFactory import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike} import scala.concurrent.Future class DataProducerSpec extends TestKit(ActorSystem("DataProducerSpec", ConfigFactory.parseString( """ akka.loggers = ["akka.testkit.TestEventListener"] """))) with DefaultTimeout with ImplicitSender with WordSpecLike with Matchers with BeforeAndAfterAll with AkkaStreams { val testProducer = TestActorRef(new DataProducer) val producerActor = testProducer.underlyingActor val mockProducerStream: SourceQueueWithComplete[Any] = new SourceQueueWithComplete[Any] { override def complete(): Unit = println("complete") override def fail(ex: Throwable): Unit = println("fail") override def offer(elem: Any): Future[QueueOfferResult] = Future { Enqueued } override def watchCompletion(): Future[Done] = Future { Done } } override def afterAll: Unit = { shutdown() } //Create an test event listener for the local message bus val testEventListener = TestProbe() system.eventStream.subscribe(testEventListener.ref, classOf[ExampleAppEvent]) "Sending ActivatedProducerStream to DataProducer in receive state" should { "save the stream ref and change state to producing " in { testProducer ! ActivatedProducerStream(mockProducerStream, "TestTopic") Thread.sleep(500) producerActor.producerStream should be(mockProducerStream) EventFilter.error(message = "DataProducer got the unknown message while producing: testMessage", occurrences = 1) intercept { testProducer ! "testMessage" } } } "Sending PublishMessages(number: Int) to DataProducer in publishData state" should { "return MessagesPublished(number: Int) and publish the local event " in { val producing = producerActor.publishData producerActor.context.become(producing) producerActor.producerStream = mockProducerStream val resultMessage = MessagesPublished(5) testProducer ! PublishMessages(5) expectMsg(resultMessage) testEventListener.expectMsgPF() { case ExampleAppEvent(_, _, m) => if (m == resultMessage.toString) () else fail() } } } }
Example 7
Source File: PoolingRestClient.scala From openwhisk with Apache License 2.0 | 5 votes |
package org.apache.openwhisk.http import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._ import akka.http.scaladsl.marshalling._ import akka.http.scaladsl.model._ import akka.http.scaladsl.settings.ConnectionPoolSettings import akka.http.scaladsl.unmarshalling._ import akka.stream.{ActorMaterializer, OverflowStrategy, QueueOfferResult} import akka.stream.scaladsl.{Flow, _} import spray.json._ import scala.concurrent.{ExecutionContext, Future, Promise} import scala.concurrent.duration._ import scala.util.{Failure, Success, Try} def requestJson[T: RootJsonReader](futureRequest: Future[HttpRequest]): Future[Either[StatusCode, T]] = request(futureRequest).flatMap { response => if (response.status.isSuccess) { Unmarshal(response.entity.withoutSizeLimit).to[T].map(Right.apply) } else { Unmarshal(response.entity).to[String].flatMap { body => val statusCode = response.status val reason = if (body.nonEmpty) s"${statusCode.reason} (details: $body)" else statusCode.reason val customStatusCode = StatusCodes .custom(intValue = statusCode.intValue, reason = reason, defaultMessage = statusCode.defaultMessage) // This is important, as it drains the entity stream. // Otherwise the connection stays open and the pool dries up. response.discardEntityBytes().future.map(_ => Left(customStatusCode)) } } } def shutdown(): Future[Unit] = Future.successful(materializer.shutdown()) } object PoolingRestClient { def mkRequest(method: HttpMethod, uri: Uri, body: Future[MessageEntity] = Future.successful(HttpEntity.Empty), headers: List[HttpHeader] = List.empty)(implicit ec: ExecutionContext): Future[HttpRequest] = { body.map { b => HttpRequest(method, uri, headers, b) } } def mkJsonRequest(method: HttpMethod, uri: Uri, body: JsValue, headers: List[HttpHeader] = List.empty)( implicit ec: ExecutionContext): Future[HttpRequest] = { val b = Marshal(body).to[MessageEntity] mkRequest(method, uri, b, headers) } }
Example 8
Source File: KafkaEventProducer.scala From openwhisk with Apache License 2.0 | 5 votes |
package org.apache.openwhisk.core.database.cosmosdb.cache import akka.Done import akka.actor.ActorSystem import akka.kafka.scaladsl.Producer import akka.kafka.{ProducerMessage, ProducerSettings} import akka.stream.scaladsl.{Keep, Sink, Source} import akka.stream.{ActorMaterializer, OverflowStrategy, QueueOfferResult} import org.apache.kafka.clients.consumer.ConsumerConfig import org.apache.kafka.clients.producer.ProducerRecord import org.apache.openwhisk.connector.kafka.KamonMetricsReporter import scala.collection.immutable.Seq import scala.concurrent.{ExecutionContext, Future, Promise} case class KafkaEventProducer( settings: ProducerSettings[String, String], topic: String, eventProducerConfig: EventProducerConfig)(implicit system: ActorSystem, materializer: ActorMaterializer) extends EventProducer { private implicit val executionContext: ExecutionContext = system.dispatcher private val queue = Source .queue[(Seq[String], Promise[Done])](eventProducerConfig.bufferSize, OverflowStrategy.dropNew) //TODO Use backpressure .map { case (msgs, p) => ProducerMessage.multi(msgs.map(newRecord), p) } .via(Producer.flexiFlow(producerSettings)) .map { case ProducerMessage.MultiResult(_, passThrough) => passThrough.success(Done) case _ => //As we use multi mode only other modes need not be handled } .toMat(Sink.ignore)(Keep.left) .run override def send(msg: Seq[String]): Future[Done] = { val promise = Promise[Done] queue.offer(msg -> promise).flatMap { case QueueOfferResult.Enqueued => promise.future case QueueOfferResult.Dropped => Future.failed(new Exception("Kafka request queue is full.")) case QueueOfferResult.QueueClosed => Future.failed(new Exception("Kafka request queue was closed.")) case QueueOfferResult.Failure(f) => Future.failed(f) } } def close(): Future[Done] = { queue.complete() queue.watchCompletion() } private def newRecord(msg: String) = new ProducerRecord[String, String](topic, "messages", msg) private def producerSettings = settings.withProperty(ConsumerConfig.METRIC_REPORTER_CLASSES_CONFIG, KamonMetricsReporter.name) }
Example 9
Source File: UseCaseSupport.scala From akka-ddd-cqrs-es-example with MIT License | 5 votes |
package com.github.j5ik2o.bank.useCase import akka.{ Done, NotUsed } import akka.stream.{ FlowShape, QueueOfferResult } import akka.stream.scaladsl.{ Flow, GraphDSL, Sink, SourceQueueWithComplete, Unzip, Zip } import scala.concurrent.{ ExecutionContext, Future, Promise } object UseCaseSupport { implicit class FlowOps[A, B](val self: Flow[A, B, NotUsed]) extends AnyVal { def zipPromise: Flow[(A, Promise[B]), (B, Promise[B]), NotUsed] = Flow .fromGraph(GraphDSL.create() { implicit b => import GraphDSL.Implicits._ val unzip = b.add(Unzip[A, Promise[B]]) val zip = b.add(Zip[B, Promise[B]]) unzip.out0 ~> self ~> zip.in0 unzip.out1 ~> zip.in1 FlowShape(unzip.in, zip.out) }) } } trait UseCaseSupport { protected def offerToQueue[A, B]( sourceQueue: SourceQueueWithComplete[(A, Promise[B])] )(request: A, promise: Promise[B])(implicit ec: ExecutionContext): Future[B] = { sourceQueue.offer((request, promise)).flatMap { case QueueOfferResult.Enqueued => promise.future case QueueOfferResult.Failure(t) => Future.failed(new Exception("Failed to offer request", t)) case QueueOfferResult.Dropped => Future.failed( new Exception( s"Failed to enqueue resolve request, the queue buffer was full, please check the bank.interface.buffer-size setting" ) ) case QueueOfferResult.QueueClosed => Future.failed(new Exception("Failed to enqueue request batch write, the queue was closed")) } } protected def completePromiseSink[T]: Sink[(T, Promise[T]), Future[Done]] = Sink.foreach { case (response, promise) => promise.success(response) } }