akka.pattern.after Scala Examples

The following examples show how to use akka.pattern.after. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: RetryHelper.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.client.binding.retrying

import java.lang.Math.floor

import akka.actor.Scheduler
import akka.pattern.after
import com.daml.ledger.client.binding.config.IRetryConfig
import com.typesafe.scalalogging.LazyLogging

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{ExecutionContext, Future}
import scala.util.control.NonFatal

object RetryHelper extends LazyLogging {

  
  val always: RetryStrategy = {
    case NonFatal(_) => true
  }

  def retry[T](retryConfig: Option[(Scheduler, IRetryConfig)])(retryStrategy: RetryStrategy)(
      f: => Future[T])(implicit ec: ExecutionContext): Future[T] = {
    retryConfig match {
      case None =>
        f
      case Some(rc) =>
        implicit val scheduler: Scheduler = rc._1
        retry(Option(rc._2))(retryStrategy)(f)
    }
  }

  def retry[T](retryConfig: Option[IRetryConfig])(retryStrategy: RetryStrategy)(
      f: => Future[T])(implicit ec: ExecutionContext, s: Scheduler): Future[T] = {
    retryConfig match {
      case None =>
        f
      case Some(rc) =>
        val maxAttempts = floor(rc.timeout / rc.interval).toInt
        retry(maxAttempts, rc.interval)(retryStrategy)(f)
    }
  }

  def retry[T](maxAttempts: Int, delay: FiniteDuration)(retryStrategy: RetryStrategy)(
      f: => Future[T])(implicit ec: ExecutionContext, s: Scheduler): Future[T] = {

    def shouldRetry(n: Int, e: Throwable): Boolean =
      n > 0 && retryStrategy.applyOrElse(e, (_: Throwable) => false)

    val remainingAttempts = maxAttempts - 1 // the next line will trigger a future evaluation

    f.recoverWith {
      case NonFatal(e) if shouldRetry(remainingAttempts, e) =>
        logWarning(remainingAttempts, e)
        after(delay, s)(retry(remainingAttempts, delay)(retryStrategy)(f))
    }
  }

  private def logWarning(remainingAttempts: Int, e: Throwable): Unit = {
    logger.warn(
      s"Retrying after failure. Attempts remaining: $remainingAttempts. Error: ${e.getMessage}")
  }
} 
Example 2
Source File: RetryHelper.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.navigator.util

import java.lang.Math.floor

import akka.actor.Scheduler
import akka.pattern.after
import com.daml.grpc.GrpcException
import com.typesafe.scalalogging.LazyLogging

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}
import scala.util.control.NonFatal


  val always: RetryStrategy = {
    case NonFatal(_) => true
  }

  val failFastOnPermissionDenied: RetryStrategy = {
    case GrpcException.PERMISSION_DENIED() => false
    case NonFatal(_) => true
  }

  def retry[T](retryConfig: Option[(Scheduler, IRetryConfig)])(retryStrategy: RetryStrategy)(
      f: => Future[T])(implicit ec: ExecutionContext): Future[T] = {
    retryConfig match {
      case None =>
        f
      case Some(rc) =>
        implicit val scheduler: Scheduler = rc._1
        retry(Option(rc._2))(retryStrategy)(f)
    }
  }

  def retry[T](retryConfig: Option[IRetryConfig])(retryStrategy: RetryStrategy)(
      f: => Future[T])(implicit ec: ExecutionContext, s: Scheduler): Future[T] = {
    retryConfig match {
      case None =>
        f
      case Some(rc) =>
        val maxAttempts = floor(rc.timeout / rc.interval).toInt
        retry(maxAttempts, rc.interval)(retryStrategy)(f)
    }
  }

  def retry[T](maxAttempts: Int, delay: FiniteDuration)(retryStrategy: RetryStrategy)(
      f: => Future[T])(implicit ec: ExecutionContext, s: Scheduler): Future[T] = {

    def shouldRetry(n: Int, e: Throwable): Boolean =
      n > 0 && retryStrategy.applyOrElse(e, (_: Throwable) => false)

    val remainingAttempts = maxAttempts - 1 // the next line will trigger a future evaluation

    f.recoverWith {
      case NonFatal(e) if shouldRetry(remainingAttempts, e) =>
        logWarning(remainingAttempts, e)
        after(delay, s)(retry(remainingAttempts, delay)(retryStrategy)(f))
    }
  }

  private def logWarning(remainingAttempts: Int, e: Throwable): Unit = {
    logger.warn(
      s"Retrying after failure. Attempts remaining: $remainingAttempts. Error: ${e.getMessage}")
  }
} 
Example 3
Source File: Model.scala    From scala-concurrency-playground   with MIT License 5 votes vote down vote up
package org.zalando.benchmarks

import akka.actor.ActorSystem
import akka.pattern.after
import org.openjdk.jmh.infra.Blackhole

import scala.concurrent.Future
import scala.util.Random

case class Job(id: Int) {
  val payload = Array.fill(16000)(Random.nextInt())
}
case class JobResult(job: Job, result: Int)
case class PublishResult(result: JobResult)

object Computer {
  import ComputationFollowedByAsyncPublishing._

  def compute(job: Job): JobResult = {
    // jmh ensures that this really consumes CPU
    Blackhole consumeCPU numTokensToConsume
    JobResult(job, job.id)
  }
}

object Publisher {
  import ComputationFollowedByAsyncPublishing._

  // we use the scheduler and the dispatcher of the actor system here because it's so very convenient
  def publish(result: JobResult, system: ActorSystem): Future[PublishResult] =
    after(publishDuration, system.scheduler) {
      Future(PublishResult(result))(system.dispatcher)
    } (system.dispatcher)
} 
Example 4
Source File: RetryPolicy.scala    From reactive-consul   with MIT License 5 votes vote down vote up
package stormlantern.consul.client
package util

import scala.concurrent._
import scala.concurrent.duration._

import akka.actor.Scheduler
import akka.pattern.after

trait RetryPolicy {
  def maxRetries = 4
  def retry[T](
    delay: FiniteDuration = 500.milli,
    retries: Int = 4,
    backoff: Int = 2,
    predicate: T ⇒ Boolean = (r: T) ⇒ true
  )(f: ⇒ Future[T])(implicit ec: ExecutionContext, s: Scheduler): Future[T] = {
    f.map {
      case r if !predicate(r) ⇒ throw new IllegalStateException("Result does not satisfy the predicate specified")
      case r                  ⇒ r
    } recoverWith { case _ if retries > 0 ⇒ after(delay, s)(retry(delay * backoff, retries - 1, backoff, predicate)(f)) }
  }
} 
Example 5
Source File: KafkaProducerConnector.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.connector.kafka

import akka.actor.ActorSystem
import akka.pattern.after
import org.apache.kafka.clients.producer._
import org.apache.kafka.common.errors._
import org.apache.kafka.common.serialization.StringSerializer
import pureconfig._
import pureconfig.generic.auto._
import org.apache.openwhisk.common.{Counter, Logging, TransactionId}
import org.apache.openwhisk.connector.kafka.KafkaConfiguration._
import org.apache.openwhisk.core.ConfigKeys
import org.apache.openwhisk.core.connector.{Message, MessageProducer}
import org.apache.openwhisk.core.entity.{ByteSize, UUIDs}
import org.apache.openwhisk.utils.Exceptions

import scala.collection.JavaConverters._
import scala.concurrent.duration._
import scala.concurrent.{blocking, ExecutionContext, Future, Promise}
import scala.util.{Failure, Success}

class KafkaProducerConnector(
  kafkahosts: String,
  id: String = UUIDs.randomUUID().toString,
  maxRequestSize: Option[ByteSize] = None)(implicit logging: Logging, actorSystem: ActorSystem)
    extends MessageProducer
    with Exceptions {

  implicit val ec: ExecutionContext = actorSystem.dispatcher
  private val gracefulWaitTime = 100.milliseconds

  override def sentCount(): Long = sentCounter.cur

  
  override def close(): Unit = {
    logging.info(this, "closing producer")
    producer.close()
  }

  private val sentCounter = new Counter()

  private def createProducer(): KafkaProducer[String, String] = {
    val config = Map(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG -> kafkahosts) ++
      configMapToKafkaConfig(loadConfigOrThrow[Map[String, String]](ConfigKeys.kafkaCommon)) ++
      configMapToKafkaConfig(loadConfigOrThrow[Map[String, String]](ConfigKeys.kafkaProducer)) ++
      (maxRequestSize map { max =>
        Map("max.request.size" -> max.size.toString)
      } getOrElse Map.empty)

    verifyConfig(config, ProducerConfig.configNames().asScala.toSet)

    tryAndThrow("creating producer")(new KafkaProducer(config, new StringSerializer, new StringSerializer))
  }

  private def recreateProducer(): Unit = {
    logging.info(this, s"recreating producer")
    tryAndSwallow("closing old producer")(producer.close())
    logging.info(this, s"old producer closed")
    producer = createProducer()
  }

  @volatile private var producer = createProducer()
} 
Example 6
Source File: StatusCheckerModule.scala    From CloudGenesis   with Apache License 2.0 5 votes vote down vote up
package com.lifeway.cloudops.cloudformation

import akka.actor.{ActorSystem, Scheduler}
import com.amazonaws.services.cloudformation.AmazonCloudFormation

import scala.concurrent.{Await, ExecutionContext, Future}
import scala.concurrent.duration._
import akka.pattern.after
import com.amazonaws.AmazonServiceException
import com.lifeway.cloudops.cloudformation.Types.StackName
import org.scalactic._
import org.slf4j.Logger

trait StatusCheckerModule {
  val logger: Logger

  def waitForStatus(
      actorSystem: ActorSystem,
      maxRetries: Int = 100,
      maxWaitTime: Duration = 5.minutes,
      retrySpeed: FiniteDuration = 3.seconds)(statusFetcher: (AmazonCloudFormation, String) => (String, String))(
      cfClient: AmazonCloudFormation,
      id: String,
      stackName: StackName,
      waitForStatus: Types.Status,
      failIfInStatus: Seq[Types.Status]): Unit Or AutomationError = {

    implicit val ec: ExecutionContext = actorSystem.dispatcher
    implicit val sch: Scheduler       = actorSystem.scheduler

    sealed trait StatusException            extends Exception
    case object PendingException            extends StatusException
    case class FailedException(msg: String) extends StatusException

    def checkStatus: Unit = {
      val (status, reason) = statusFetcher(cfClient, id)

      if (status == waitForStatus) ()
      else if (failIfInStatus.contains(status))
        throw new FailedException(s"Unexpected stack status: $status. Reason: $reason")
      else throw PendingException
    }

    def retry(op: => Unit, delay: FiniteDuration, retries: Int): Future[Unit Or AutomationError] =
      Future(op).map(x => Good(x)) recoverWith {
        case PendingException if retries > 0 => after(delay, sch)(retry(op, delay, retries - 1))
        case FailedException(err) =>
          Future.successful(
            Bad(StackError(s"Failed to reach expected status of $waitForStatus for $stackName due to: $err")))
        case t: AmazonServiceException if t.getStatusCode >= 500 =>
          logger.error(s"AWS 500 Service Exception: Failed to reach expected status of $waitForStatus for $stackName",
                       t)
          Future.successful(
            Bad(ServiceError(
              s"AWS 500 Service Exception: Failed to reach expected status of $waitForStatus for $stackName")))
        case _ =>
          Future.successful(Bad(StackError(s"Failed to reach expected status of $waitForStatus for $stackName")))
      }

    //Retry to find final status for up to max time...
    try {
      Await.result(retry(checkStatus, retrySpeed, maxRetries), maxWaitTime)
    } catch {
      case _: Throwable =>
        Bad(
          StackError(
            s"Failed to wait to reach expected status of $waitForStatus for $stackName due to process timeout"))
    }
  }
} 
Example 7
Source File: Retry.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.internal.javadsl.persistence.jpa

import java.util.concurrent.CompletionStage
import java.util.function.Supplier

import akka.actor.Scheduler
import akka.pattern.after

import scala.concurrent.duration.Duration.fromNanos
import scala.concurrent.duration.FiniteDuration
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import scala.util.control.NonFatal

// With thanks to https://gist.github.com/viktorklang/9414163
private[lagom] class Retry(delay: FiniteDuration, delayFactor: Double, maxRetries: Int) {
  def apply[T](op: => T)(implicit ec: ExecutionContext, s: Scheduler): Future[T] = {
    def iterate(nextDelay: FiniteDuration, remainingRetries: Int): Future[T] =
      Future(op).recoverWith {
        case NonFatal(throwable) if remainingRetries > 0 => {
          onRetry(throwable, nextDelay, remainingRetries)
          after(nextDelay, s)(iterate(finiteMultiply(nextDelay, delayFactor), remainingRetries - 1))
        }
      }

    iterate(delay, maxRetries)
  }

  // For convenient use from Java 8
  def retry[T](op: Supplier[T])(implicit ec: ExecutionContext, s: Scheduler): CompletionStage[T] = {
    import scala.compat.java8.FutureConverters._

    apply(op.get()).toJava
  }

  protected def onRetry(throwable: Throwable, delay: FiniteDuration, remainingRetries: Int): Unit = ()

  private def finiteMultiply(duration: FiniteDuration, factor: Double): FiniteDuration =
    fromNanos((duration.toNanos * factor).toLong)
}

private[lagom] object Retry {
  def apply[T](delay: FiniteDuration, delayFactor: Double, maxRetries: Int)(
      op: => T
  )(implicit ec: ExecutionContext, s: Scheduler): Future[T] =
    (new Retry(delay, delayFactor, maxRetries))(op)
} 
Example 8
Source File: MultiNodeExpect.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.internal.cluster

import akka.Done
import akka.actor.ActorRef
import akka.actor.ActorSystem
import akka.actor.Scheduler
import akka.annotation.ApiMayChange
import akka.cluster.ddata.DistributedData
import akka.cluster.ddata.Flag
import akka.cluster.ddata.FlagKey
import akka.cluster.ddata.Replicator.Get
import akka.cluster.ddata.Replicator.GetSuccess
import akka.cluster.ddata.Replicator.ReadLocal
import akka.cluster.ddata.Replicator.Update
import akka.cluster.ddata.Replicator.UpdateSuccess
import akka.cluster.ddata.Replicator.WriteAll
import akka.cluster.ddata.Replicator.WriteConsistency
import akka.cluster.ddata.SelfUniqueAddress
import akka.event.LoggingAdapter
import akka.testkit.TestProbe

import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.reflect.ClassTag
import akka.pattern.after
import akka.pattern.ask
import akka.util.Timeout

import scala.util.control.NonFatal

@ApiMayChange
class MultiNodeExpect(probe: TestProbe)(implicit system: ActorSystem) {
  private implicit val scheduler: Scheduler               = system.scheduler
  private implicit val executionContext: ExecutionContext = system.dispatcher

  val replicator: ActorRef             = DistributedData(system).replicator
  implicit val node: SelfUniqueAddress = DistributedData(system).selfUniqueAddress

  
  def expectMsgType[T](expectationKey: String, max: FiniteDuration)(implicit t: ClassTag[T]): Future[Done] = {
    val eventualT = () => Future(errorAsRuntime(probe.expectMsgType[T](max)))
    doExpect(eventualT)(expectationKey, max)
  }

  // prevents Errors from turning into BoxedError when using `Future(f)` (where f throws Error)
  private def errorAsRuntime[T](f: => T): T = {
    try {
      f
    } catch {
      case NonFatal(t)  => throw t
      case x: Throwable => throw new RuntimeException(x)
    }
  }

  private def doExpect[T](eventualT: () => Future[T])(expectationKey: String, max: FiniteDuration): Future[Done] = {
    val DataKey: FlagKey           = FlagKey(expectationKey)
    val writeAll: WriteConsistency = WriteAll(max)
    implicit val timeout: Timeout  = Timeout(max)

    val retryDelay = 3.second

    val fTimeout = after(max, scheduler)(Future.failed(new RuntimeException(s"timeout $max expired")))

    // If the local expectation wins, it must notify others.
    val fLocalExpect: Future[Done] = eventualT()
      .map { _ =>
        (replicator ? Update(DataKey, Flag.empty, writeAll)(
          _.switchOn
        )).mapTo[UpdateSuccess[Flag]]
      }
      .map(_ => Done)

    // if a remote expectation wins, we can move on.
    val poll: () => Future[Done] = () =>
      (replicator ? Get(DataKey, ReadLocal)).map {
        case g @ GetSuccess(DataKey, _) if g.get(DataKey).enabled => Done
        case _                                                    => throw new RuntimeException("Flag unset")
      }
    val fRemoteExpect: Future[Done] = retry(
      poll,
      retryDelay,
      Int.MaxValue // keep retrying, there's a timeout later
    )

    Future
      .firstCompletedOf(
        Seq(
          fLocalExpect,
          fRemoteExpect,
          fTimeout
        )
      )
  }

  // From vklang's https://gist.github.com/viktorklang/9414163
  def retry[T](op: () => Future[T], delay: FiniteDuration, retries: Int): Future[T] =
    op().recoverWith { case _ if retries > 0 => after(delay, scheduler)(retry(op, delay, retries - 1)) }
} 
Example 9
Source File: Retries.scala    From http-verbs   with Apache License 2.0 5 votes vote down vote up
package uk.gov.hmrc.http

import akka.actor.ActorSystem
import akka.pattern.after
import com.typesafe.config.Config
import java.util.concurrent.TimeUnit
import javax.net.ssl.SSLException
import org.slf4j.LoggerFactory
import scala.collection.JavaConverters._
import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}
import uk.gov.hmrc.play.http.logging.Mdc

trait Retries {

  protected def actorSystem: ActorSystem

  protected def configuration: Option[Config]

  private val logger = LoggerFactory.getLogger("application")

  def retry[A](verb: String, url: String)(block: => Future[A])(implicit ec: ExecutionContext): Future[A] = {
    def loop(remainingIntervals: Seq[FiniteDuration])(mdcData: Map[String, String])(block: => Future[A]): Future[A] =
      // scheduling will loose MDC data. Here we explicitly ensure it is available on block.
      Mdc.withMdc(block, mdcData)
        .recoverWith {
          case ex @ `sslEngineClosedMatcher`() if remainingIntervals.nonEmpty =>
            val delay = remainingIntervals.head
            logger.warn(s"Retrying $verb $url in $delay due to '${ex.getMessage}' error")
            after(delay, actorSystem.scheduler)(loop(remainingIntervals.tail)(mdcData)(block))
        }
    loop(intervals)(Mdc.mdcData)(block)
  }

  private[http] lazy val intervals: Seq[FiniteDuration] = {
    val defaultIntervals = Seq(500.millis, 1.second, 2.seconds, 4.seconds, 8.seconds)
    configuration
      .map { c =>
        val path = "http-verbs.retries.intervals"
        if (c.hasPath(path)) {
          c.getDurationList(path).asScala.map { d =>
            FiniteDuration(d.toMillis, TimeUnit.MILLISECONDS)
          }
        } else {
          defaultIntervals
        }
      }
      .getOrElse(defaultIntervals)
  }

  private lazy val sslEngineClosedMatcher =
    new SSlEngineClosedMatcher(isEnabled("ssl-engine-closed-already"))

  private class SSlEngineClosedMatcher(enabled: Boolean) {
    def unapply(ex: Throwable): Boolean =
      ex match {
        case _: SSLException if ex.getMessage == "SSLEngine closed already" => enabled
        case _                                                              => false
      }
  }

  private def isEnabled(name: String): Boolean =
    configuration.exists { c =>
      val path = s"http-verbs.retries.$name.enabled"
      c.hasPath(path) && c.getBoolean(path)
    }

} 
Example 10
Source File: FutureRetries.scala    From HAT2.0   with GNU Affero General Public License v3.0 5 votes vote down vote up
package org.hatdex.hat.utils

import akka.actor.Scheduler
import akka.pattern.after

import scala.concurrent.duration._
import scala.concurrent.{ ExecutionContext, Future }
import scala.util.Random

object FutureRetries {
  def retry[T](f: => Future[T], delays: List[FiniteDuration])(implicit ec: ExecutionContext, s: Scheduler): Future[T] = {
    f recoverWith { case _ if delays.nonEmpty => after(delays.head, s)(retry(f, delays.tail)) }
  }

  def withDefault(delays: List[FiniteDuration], retries: Int, default: FiniteDuration): List[FiniteDuration] = {
    if (delays.length > retries) {
      delays take retries
    }
    else {
      delays ++ List.fill(retries - delays.length)(default)
    }
  }

  def withJitter(delays: List[FiniteDuration], maxJitter: Double, minJitter: Double): List[FiniteDuration] = {
    delays.map { delay =>
      val jitter = delay * (minJitter + (maxJitter - minJitter) * Random.nextDouble)
      jitter match {
        case d: FiniteDuration => d
        case _                 => delay
      }
    }
  }

  val fibonacci: Stream[FiniteDuration] = 0.seconds #:: 1.seconds #:: (fibonacci zip fibonacci.tail).map { t => t._1 + t._2 }
} 
Example 11
Source File: BackoffStrategy.scala    From graphcool-framework   with Apache License 2.0 5 votes vote down vote up
package cool.graph.messagebus.queue

import akka.pattern.after
import cool.graph.akkautil.SingleThreadedActorSystem

import scala.concurrent.Future
import scala.concurrent.duration.FiniteDuration

object BackoffStrategy {
  import scala.concurrent.ExecutionContext.Implicits.global
  val system = SingleThreadedActorSystem("backoff")

  def backoffDurationFor(currentTry: Int, strategy: BackoffStrategy): FiniteDuration = {
    strategy match {
      case ConstantBackoff(d) => d
      case LinearBackoff(d)   => d * currentTry
    }
  }

  def backoff(duration: FiniteDuration): Future[Unit] = after(duration, system.scheduler)(Future.successful(Unit))
}

sealed trait BackoffStrategy {
  val duration: FiniteDuration
}


case class LinearBackoff(duration: FiniteDuration) extends BackoffStrategy 
Example 12
Source File: FutureRetryUtility.scala    From NSDb   with Apache License 2.0 5 votes vote down vote up
package io.radicalbit.nsdb.util

import akka.actor.{Actor, ActorRef, Scheduler, Status}
import akka.event.LoggingAdapter
import akka.pattern.after

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success}


trait FutureRetryUtility {

  implicit class FutureRetry[T](f: => Future[T]) {
    def retry(delay: FiniteDuration, retries: Int)(
        wasSuccessful: T => Boolean)(implicit ec: ExecutionContext, s: Scheduler, log: LoggingAdapter): Future[T] =
      (for {
        a <- f
        result <- if (wasSuccessful(a) || retries < 1) Future(a)
        else { log.warning("{}. Retrying...", a); after(delay, s)(retry(delay, retries - 1)(wasSuccessful)) }
      } yield result) recoverWith {
        case t if retries > 0 =>
          log.warning("{}. Retrying...", t); after(delay, s)(retry(delay, retries - 1)(wasSuccessful))
      }
  }

  implicit class PipeToFutureRetry[T](f: => Future[T]) {
    def pipeTo(delay: FiniteDuration, retries: Int, recipient: ActorRef)(wasSuccessful: T => Boolean = _ => true)(
        implicit ec: ExecutionContext,
        s: Scheduler,
        log: LoggingAdapter,
        sender: ActorRef = Actor.noSender) =
      f.retry(delay, retries)(wasSuccessful) andThen {
        case Success(r) ⇒ recipient ! r
        case Failure(f) ⇒ recipient ! Status.Failure(f)
      }
  }
} 
Example 13
Source File: Retriable.scala    From vamp   with Apache License 2.0 5 votes vote down vote up
package io.vamp.container_driver.kubernetes

import akka.actor.Scheduler
import akka.pattern.after
import scala.concurrent.{ ExecutionContext, Future }
import scala.concurrent.duration.FiniteDuration

trait Retriable {

  def retryIndefinitely[T](op: ⇒ T, delay: FiniteDuration)(implicit ec: ExecutionContext, s: Scheduler): Future[T] =
    Future(op) recoverWith { case _ ⇒ after(delay, s)(retryIndefinitely(op, delay)) }

  def retryWithLimit[T](op: ⇒ T, delay: FiniteDuration, iteration: Int, limit: Int)(implicit ec: ExecutionContext, s: Scheduler): Future[T] =
    Future(op) recoverWith {
      case _ ⇒
        if (iteration < limit)
          after(delay * iteration, s)(retryWithLimit(op, delay, iteration + 1, limit))
        else
          Future.failed(new Exception("Give up on retrial"))
    }

} 
Example 14
Source File: DataRetrieval.scala    From vamp   with Apache License 2.0 5 votes vote down vote up
package io.vamp.common.akka

import java.util.concurrent.TimeoutException

import akka.actor.Actor
import akka.pattern.after
import akka.util.Timeout

import scala.concurrent.Future
import scala.util.{ Failure, Success }

case class DataRetrieved(data: Map[Class[Actor], Any], succeeded: Boolean)

trait DataRetrieval {
  this: ExecutionContextProvider with ActorSystemProvider ⇒

  def retrieve(actors: List[Class[Actor]], futureOf: (Class[Actor]) ⇒ Future[Any], timeout: Timeout): Future[DataRetrieved] = {
    def noDataError(actor: Class[Actor]) = noData(actor) → false

    val futures: Map[Class[Actor], Future[Any]] = actors.map(actor ⇒ actor → futureOf(actor)).toMap

    Future.firstCompletedOf(List(Future.sequence(futures.values.toList.map(_.recover { case x ⇒ Failure(x) })), after(timeout.duration, using = actorSystem.scheduler) {
      Future.successful(new TimeoutException("Component timeout."))
    })) map { _ ⇒
      futures.map {
        case (actor, future) if future.isCompleted ⇒
          actor → future.value.map {
            case Success(data) ⇒ data → true
            case _             ⇒ noDataError(actor)
          }.getOrElse(noDataError(actor))
        case (actor, future) ⇒ actor → noDataError(actor)
      }.foldLeft[DataRetrieved](DataRetrieved(Map(), succeeded = true)) { (r, e) ⇒ r.copy(r.data + (e._1 → e._2._1), succeeded = r.succeeded && e._2._2) }
    }
  }

  def noData(actor: Class[Actor]) = Map("error" → "No response.")
}