com.google.common.base.Throwables Scala Examples

The following examples show how to use com.google.common.base.Throwables. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: WhiskChangeEventObserver.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.core.database.cosmosdb.cache

import akka.Done
import com.azure.data.cosmos.CosmosItemProperties
import com.azure.data.cosmos.internal.changefeed.ChangeFeedObserverContext
import com.google.common.base.Throwables
import kamon.metric.MeasurementUnit
import org.apache.openwhisk.common.{LogMarkerToken, Logging, MetricEmitter}
import org.apache.openwhisk.core.database.CacheInvalidationMessage
import org.apache.openwhisk.core.database.cosmosdb.CosmosDBConstants
import org.apache.openwhisk.core.database.cosmosdb.CosmosDBUtil.unescapeId
import org.apache.openwhisk.core.entity.CacheKey

import scala.collection.concurrent.TrieMap
import scala.collection.immutable.Seq
import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success}

class WhiskChangeEventObserver(config: InvalidatorConfig, eventProducer: EventProducer)(implicit ec: ExecutionContext,
                                                                                        log: Logging)
    extends ChangeFeedObserver {
  import WhiskChangeEventObserver._

  override def process(context: ChangeFeedObserverContext, docs: Seq[CosmosItemProperties]): Future[Done] = {
    //Each observer is called from a pool managed by CosmosDB ChangeFeedProcessor
    //So its fine to have a blocking wait. If this fails then batch would be reread and
    //retried thus ensuring at-least-once semantics
    val f = eventProducer.send(processDocs(docs, config))
    f.andThen {
      case Success(_) =>
        MetricEmitter.emitCounterMetric(feedCounter, docs.size)
        recordLag(context, docs.last)
      case Failure(t) =>
        log.warn(this, "Error occurred while sending cache invalidation message " + Throwables.getStackTraceAsString(t))
    }
  }
}

trait EventProducer {
  def send(msg: Seq[String]): Future[Done]
}

object WhiskChangeEventObserver {
  val instanceId = "cache-invalidator"
  private val feedCounter =
    LogMarkerToken("cosmosdb", "change_feed", "count", tags = Map("collection" -> "whisks"))(MeasurementUnit.none)
  private val lags = new TrieMap[String, LogMarkerToken]

  
  def recordLag(context: ChangeFeedObserverContext, lastDoc: CosmosItemProperties): Unit = {
    val sessionToken = context.getFeedResponse.sessionToken()
    val lsnRef = lastDoc.get("_lsn")
    require(lsnRef != null, s"Non lsn defined in document $lastDoc")

    val lsn = lsnRef.toString.toLong
    val sessionLsn = getSessionLsn(sessionToken)
    val lag = sessionLsn - lsn
    val partitionKey = context.getPartitionKeyRangeId
    val gaugeToken = lags.getOrElseUpdate(partitionKey, createLagToken(partitionKey))
    MetricEmitter.emitGaugeMetric(gaugeToken, lag)
  }

  private def createLagToken(partitionKey: String) = {
    LogMarkerToken("cosmosdb", "change_feed", "lag", tags = Map("collection" -> "whisks", "pk" -> partitionKey))(
      MeasurementUnit.none)
  }

  def getSessionLsn(token: String): Long = {
    // Session Token can be in two formats. Either {PartitionKeyRangeId}:{LSN}
    // or {PartitionKeyRangeId}:{Version}#{GlobalLSN}
    // See https://github.com/Azure/azure-documentdb-changefeedprocessor-dotnet/pull/113/files#diff-54cbd8ddcc33cab4120c8af04869f881
    val parsedSessionToken = token.substring(token.indexOf(":") + 1)
    val segments = parsedSessionToken.split("#")
    val lsn = if (segments.size < 2) segments(0) else segments(1)
    lsn.toLong
  }

  def processDocs(docs: Seq[CosmosItemProperties], config: InvalidatorConfig)(implicit log: Logging): Seq[String] = {
    docs
      .filter { doc =>
        val cid = Option(doc.getString(CosmosDBConstants.clusterId))
        val currentCid = config.clusterId

        //only if current clusterId is configured do a check
        currentCid match {
          case Some(_) => cid != currentCid
          case None    => true
        }
      }
      .map { doc =>
        val id = unescapeId(doc.id())
        log.info(this, s"Changed doc [$id]")
        val event = CacheInvalidationMessage(CacheKey(id), instanceId)
        event.serialize
      }
  }

} 
Example 2
Source File: CacheInvalidator.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.core.database.cosmosdb.cache

import akka.Done
import akka.actor.{ActorSystem, CoordinatedShutdown}
import akka.kafka.ProducerSettings
import akka.stream.ActorMaterializer
import com.google.common.base.Throwables
import com.typesafe.config.Config
import org.apache.kafka.common.serialization.StringSerializer
import org.apache.openwhisk.common.Logging
import org.apache.openwhisk.core.database.RemoteCacheInvalidation.cacheInvalidationTopic

import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success}

object CacheInvalidator {

  val instanceId = "cache-invalidator"
  val whisksCollection = "whisks"

  def start(
    globalConfig: Config)(implicit system: ActorSystem, materializer: ActorMaterializer, log: Logging): Future[Done] = {
    implicit val ec: ExecutionContext = system.dispatcher
    val config = CacheInvalidatorConfig(globalConfig)
    val producer =
      KafkaEventProducer(
        kafkaProducerSettings(defaultProducerConfig(globalConfig)),
        cacheInvalidationTopic,
        config.eventProducerConfig)
    val observer = new WhiskChangeEventObserver(config.invalidatorConfig, producer)
    val feedConsumer = new ChangeFeedConsumer(whisksCollection, config, observer)
    feedConsumer.isStarted.andThen {
      case Success(_) =>
        registerShutdownTasks(system, feedConsumer, producer)
        log.info(this, s"Started the Cache invalidator service. ClusterId [${config.invalidatorConfig.clusterId}]")
      case Failure(t) =>
        log.error(this, "Error occurred while starting the Consumer" + Throwables.getStackTraceAsString(t))
    }
  }

  private def registerShutdownTasks(system: ActorSystem,
                                    feedConsumer: ChangeFeedConsumer,
                                    producer: KafkaEventProducer)(implicit ec: ExecutionContext, log: Logging): Unit = {
    CoordinatedShutdown(system).addTask(CoordinatedShutdown.PhaseBeforeServiceUnbind, "closeFeedListeners") { () =>
      feedConsumer
        .close()
        .flatMap { _ =>
          producer.close().andThen {
            case Success(_) =>
              log.info(this, "Kafka producer successfully shutdown")
          }
        }
    }
  }

  def kafkaProducerSettings(config: Config): ProducerSettings[String, String] =
    ProducerSettings(config, new StringSerializer, new StringSerializer)

  def defaultProducerConfig(globalConfig: Config): Config = globalConfig.getConfig("akka.kafka.producer")

} 
Example 3
Source File: RetryUtils.scala    From spark-druid-olap   with Apache License 2.0 5 votes vote down vote up
package org.sparklinedata.druid

import java.util.concurrent.{Callable, TimeUnit}

import com.google.common.base.Throwables
import org.apache.spark.sql.SPLLogging


import scala.reflect._

object RetryUtils extends SPLLogging {

  var DEFAULT_RETRY_COUNT: Int = 10
  var DEFAULT_RETRY_SLEEP: Long = TimeUnit.SECONDS.toMillis(30)

  def retryUntil(callable: Callable[Boolean],
                 expectedValue: Boolean,
                 delayInMillis: Long,
                 retryCount: Int,
                 taskMessage: String) : Unit = {
    try {
      var currentTry: Int = 0
      while (callable.call != expectedValue) {
        if (currentTry > retryCount) {
          throw new IllegalStateException(
            s"Max number of retries[$retryCount] exceeded for Task[$taskMessage]. Failing."
          )
        }
        logInfo(s"Attempt[$currentTry]: " +
          s"Task $taskMessage still not complete. Next retry in $delayInMillis ms")
        Thread.sleep(delayInMillis)
        currentTry += 1
      }
    } catch {
      case e: Exception => {
        throw Throwables.propagate(e)
      }
    }
  }

  def ifException[E <: Exception : ClassTag] = (e: Exception) =>
    classTag[E].runtimeClass.isAssignableFrom(e.getClass)


  def backoff(start : Int, cap : Int) : Stream[Int] = {
    def next(current : Int) : Stream[Int] = {
      Stream.cons(current, next(Math.min(current *2, cap)))
    }
    next(start)
  }

  def execWithBackOff[X](taskMessage : String, f : Int => Option[X])(
    numTries : Int = Int.MaxValue, start : Int = 200, cap : Int = 5000) : X = {
    val b = backoff(start, cap).iterator
    var tries = 0
    while(tries < numTries) {
      val nextBackOff = b.next()
      f(nextBackOff) match {
        case Some(x) => return x
        case _ => {
          Thread.sleep(b.next)
          tries += 1
        }
      }
    }
    throw new IllegalStateException(
      s"Max number of retries[$numTries] exceeded for Task[$taskMessage]. Failing."
    )
  }

  def retryOnErrors[X](isTransients: (Exception => Boolean)*)(
    taskMessage: String,
    x: => X,
    numTries: Int = Int.MaxValue, start: Int = 200, cap: Int = 5000
  ): X = {
    execWithBackOff(taskMessage, { nextBackOff =>
      try Some(x) catch {
        case e: Exception if isTransients.find(_ (e)).isDefined =>
          logWarning(s"Transient error in $taskMessage, retrying after $nextBackOff ms")
          None
      }
    })(numTries, start, cap)

  }

  def retryOnError(isTransient: Exception => Boolean) = new {
    def apply[X](taskMessage: String,
                 x: => X)(
                 numTries: Int = Int.MaxValue, start: Int = 200, cap: Int = 5000) =
      retryOnErrors(isTransient)(taskMessage, x, numTries, start, cap)
  }
}