akka.actor.Cancellable Scala Examples

The following examples show how to use akka.actor.Cancellable. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: GabRawSpout.scala    From Raphtory   with Apache License 2.0 5 votes vote down vote up
package com.raphtory.examples.gab.actors

import akka.actor.Cancellable
import ch.qos.logback.classic.Level
import com.mongodb.casbah.Imports.MongoConnection
import com.mongodb.casbah.Imports._
import com.raphtory.core.components.Spout.SpoutTrait
import org.slf4j.LoggerFactory

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
import scala.language.postfixOps

final class GabRawSpout extends SpoutTrait {

  //private val redis    = new RedisClient("moe", 6379)
  //private val redisKey = "gab-posts"
  private var sched: Cancellable = null

  //val options: MongoClientOptions = MongoClientOptions.builder.addCommandListener(new LoggingClusterListener).build()
  //ddClusterListener(new LoggingClusterListener).build
  private val mongoConn = MongoConnection("138.37.32.67", 27017)
  private val mongoColl = mongoConn("gab")("posts")
  private var window    = 1000
  private var postMin   = 0
  private var postMax   = 1001

  val root = LoggerFactory.getLogger(org.slf4j.Logger.ROOT_LOGGER_NAME).asInstanceOf[ch.qos.logback.classic.Logger]
  root.setLevel(Level.ERROR)
  // private val mongoLogger = Logger.getLogger("org.mongodb.driver.cluster")
  // mongoLogger.setLevel(Level.OFF)

  override protected def ProcessSpoutTask(message: Any): Unit = message match {
    case StartSpout  => AllocateSpoutTask(Duration(1, MILLISECONDS), "parsePost")
    case "parsePost" => running()
  }

  def running(): Unit = {
    val count = getNextPosts()
    postMin += window
    postMax += window
    println(s"Current min post is $postMin, max post is $postMax, last call retrieved $count posts")
    AllocateSpoutTask(Duration(10, MILLISECONDS), "parsePost")

  }

  private def getNextPosts(): Int = {
    var count = 0
    for (x <- mongoColl.find("_id" $lt postMax $gt postMin))
      try {
        val data = x.get("data").toString.drop(2).dropRight(1).replaceAll("""\\"""", "").replaceAll("""\\""", "")
        count += 1
        //println(data)
        sendTuple(data)
      } catch {
        case e: Throwable =>
          println("Cannot parse record")
      }
    return count
  }

}
//redis-server --dir /home/moe/ben/gab --dbfilename gab.rdb --daemonize yes 
Example 2
Source File: SOLStreamProcessor.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.streaming.examples.sol

import java.time.Instant
import java.util.concurrent.TimeUnit

import scala.concurrent.duration.FiniteDuration
import akka.actor.Cancellable
import org.apache.gearpump.Message
import org.apache.gearpump.cluster.UserConfig
import org.apache.gearpump.streaming.task.{Task, TaskContext}

class SOLStreamProcessor(taskContext: TaskContext, conf: UserConfig)
  extends Task(taskContext, conf) {
  import taskContext.output

  val taskConf = taskContext

  private var msgCount: Long = 0
  private var scheduler: Cancellable = null
  private var snapShotWordCount: Long = 0
  private var snapShotTime: Long = 0

  override def onStart(startTime: Instant): Unit = {
    scheduler = taskContext.schedule(new FiniteDuration(5, TimeUnit.SECONDS),
      new FiniteDuration(5, TimeUnit.SECONDS))(reportWordCount())
    snapShotTime = System.currentTimeMillis()
  }

  override def onNext(msg: Message): Unit = {
    output(msg)
    msgCount = msgCount + 1
  }

  override def onStop(): Unit = {
    if (scheduler != null) {
      scheduler.cancel()
    }
  }

  def reportWordCount(): Unit = {
    val current: Long = System.currentTimeMillis()
    LOG.info(s"Task ${taskConf.taskId} " +
      s"Throughput: ${(msgCount - snapShotWordCount, (current - snapShotTime) / 1000)} " +
      s"(words, second)")
    snapShotWordCount = msgCount
    snapShotTime = current
  }
} 
Example 3
Source File: Sum.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.streaming.examples.wordcount

import java.time.Instant
import java.util.concurrent.TimeUnit

import scala.collection.mutable
import scala.concurrent.duration.FiniteDuration
import akka.actor.Cancellable
import org.apache.gearpump.Message
import org.apache.gearpump.cluster.UserConfig
import org.apache.gearpump.streaming.task.{Task, TaskContext}

class Sum(taskContext: TaskContext, conf: UserConfig) extends Task(taskContext, conf) {
  private[wordcount] val map: mutable.HashMap[String, Long] = new mutable.HashMap[String, Long]()

  private[wordcount] var wordCount: Long = 0
  private var snapShotTime: Long = System.currentTimeMillis()
  private var snapShotWordCount: Long = 0

  private var scheduler: Cancellable = null

  override def onStart(startTime: Instant): Unit = {
    scheduler = taskContext.schedule(new FiniteDuration(5, TimeUnit.SECONDS),
      new FiniteDuration(30, TimeUnit.SECONDS))(reportWordCount)
  }

  override def onNext(msg: Message): Unit = {
    if (null != msg) {
      val current = map.getOrElse(msg.value.asInstanceOf[String], 0L)
      wordCount += 1
      map.put(msg.value.asInstanceOf[String], current + 1)
    }
  }

  override def onStop(): Unit = {
    if (scheduler != null) {
      scheduler.cancel()
    }
  }

  def reportWordCount(): Unit = {
    val current: Long = System.currentTimeMillis()
    LOG.info(s"Task ${taskContext.taskId} Throughput:" +
      s" ${(wordCount - snapShotWordCount, (current - snapShotTime) / 1000)} (words, second)")
    snapShotWordCount = wordCount
    snapShotTime = current
  }
} 
Example 4
Source File: MessageAkkaScheduler.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter10

import akka.actor.{Actor, ActorLogging, Cancellable}

import scala.concurrent.duration._

object MessageAkkaScheduler {
  case object JustOne
  case object Every2Seconds
}

class MessageAkkaScheduler extends Actor with ActorLogging {
  import MessageAkkaScheduler._
  import context.dispatcher
  var scheduleCancellable : Cancellable = _

  override def preStart() = {
    context.system.scheduler.scheduleOnce(2 seconds, self, JustOne)
    scheduleCancellable = context.system.scheduler.schedule(2 seconds, 1 second, self, Every2Seconds)
  }

  override def postStop() = scheduleCancellable.cancel()
  def receive = { case x => log.info(s"Received $x") }
} 
Example 5
Source File: BruteForceDefenderActor.scala    From silhouette-vuejs-app   with Apache License 2.0 5 votes vote down vote up
package models.services

import java.time.Instant

import akka.actor.{Actor, Cancellable}
import javax.inject.Inject

import scala.concurrent.ExecutionContext
import scala.concurrent.duration.FiniteDuration
import scala.language.postfixOps

class BruteForceDefenderActor @Inject()(conf: BruteForceDefenderConf)(implicit ex: ExecutionContext) extends Actor {
  import BruteForceDefenderActor._

  private var signInAttempts = Map.empty[String, List[Instant]]

  private var scheduler: Cancellable = _

  private case object Reset

  override def preStart(): Unit = {
    import scala.concurrent.duration._
    scheduler = context.system.scheduler.schedule(
      initialDelay = 1 second,
      interval = conf.period,
      receiver = self,
      message = Reset
    )
  }

  override def postStop(): Unit = {
    scheduler.cancel()
  }

  override def receive: Receive = {
    case IsSignInAllowed(email) =>
      val signInAttemptsForEmail = signInAttempts.getOrElse(email, List.empty)
      val attemptsAllowed = conf.attempts - signInAttemptsForEmail.size
      if (attemptsAllowed > 0) {
        sender() ! SignInAllowed(attemptsAllowed)
      } else {
        sender() ! SignInForbidden(signInAttemptsForEmail.last.plusSeconds(conf.period.toSeconds))
      }

    case RegisterWrongPasswordSignIn(email) =>
      signInAttempts += email -> (Instant.now :: signInAttempts.getOrElse(email, List.empty))

    case Reset =>
      val expiredBefore = Instant.now.minusSeconds(conf.period.toSeconds)
      signInAttempts = signInAttempts.map {
        case (email, attempts) => email -> attempts.filter(_.isAfter(expiredBefore))
      }
  }
}


case class BruteForceDefenderConf(attempts: Int, period: FiniteDuration)

object BruteForceDefenderActor {
  case class RegisterWrongPasswordSignIn(email: String)

  case class IsSignInAllowed(email: String)
  case class SignInAllowed(attemptsAllowed: Int)
  case class SignInForbidden(nextSignInAllowedAt: Instant)
} 
Example 6
Source File: Master.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.scheduler.actors

import java.time.LocalDateTime
import java.util.concurrent.TimeUnit

import akka.actor.{Props, Cancellable, Actor}
import akka.routing.RoundRobinPool
import com.ivan.nikolov.scheduler.actors.messages.{Work, Schedule, Done}
import com.ivan.nikolov.scheduler.config.job.{Daily, Hourly}
import com.typesafe.scalalogging.LazyLogging

import scala.concurrent.duration.Duration
import scala.collection.mutable.ListBuffer
import scala.concurrent.ExecutionContext.Implicits.global

class Master(numWorkers: Int, actorFactory: ActorFactory) extends Actor with LazyLogging {
  val cancelables = ListBuffer[Cancellable]()
  
  val router = context.actorOf(
    Props(actorFactory.createWorkerActor()).withRouter(RoundRobinPool(numWorkers)),
    "scheduler-master-worker-router"
  )
  
  override def receive: Receive = {
    case Done(name, command, jobType, success) =>
      if (success) {
        logger.info("Successfully completed {} ({}).", name, command)
      } else {
        logger.error("Failure! Command {} ({}) returned a non-zero result code.", name, command)
      }
    case Schedule(configs) => 
      configs.foreach {
        case config =>
          val cancellable = this.context.system.scheduler.schedule(
            config.timeOptions.getInitialDelay(LocalDateTime.now(), config.frequency),
            config.frequency match {
              case Hourly => Duration.create(1, TimeUnit.HOURS)
              case Daily => Duration.create(1, TimeUnit.DAYS)
            },
            router,
            Work(config.name, config.command, config.jobType)
          )
          cancellable +: cancelables
          logger.info("Scheduled: {}", config)
      }
  }
  
  override def postStop(): Unit = {
    cancelables.foreach(_.cancel())
  }
} 
Example 7
Source File: Master.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.scheduler.actors

import java.time.LocalDateTime
import java.util.concurrent.TimeUnit

import akka.actor.{Props, Cancellable, Actor}
import akka.routing.RoundRobinPool
import com.ivan.nikolov.scheduler.actors.messages.{Work, Schedule, Done}
import com.ivan.nikolov.scheduler.config.job.{Daily, Hourly}
import com.typesafe.scalalogging.LazyLogging

import scala.concurrent.duration.Duration
import scala.collection.mutable.ListBuffer
import scala.concurrent.ExecutionContext.Implicits.global

class Master(numWorkers: Int, actorFactory: ActorFactory) extends Actor with LazyLogging {
  val cancelables = ListBuffer[Cancellable]()
  
  val router = context.actorOf(
    Props(actorFactory.createWorkerActor()).withRouter(RoundRobinPool(numWorkers)),
    "scheduler-master-worker-router"
  )
  
  override def receive: Receive = {
    case Done(name, command, jobType, success) =>
      if (success) {
        logger.info("Successfully completed {} ({}).", name, command)
      } else {
        logger.error("Failure! Command {} ({}) returned a non-zero result code.", name, command)
      }
    case Schedule(configs) => 
      configs.foreach {
        case config =>
          val cancellable = this.context.system.scheduler.schedule(
            config.timeOptions.getInitialDelay(LocalDateTime.now(), config.frequency),
            config.frequency match {
              case Hourly => Duration.create(1, TimeUnit.HOURS)
              case Daily => Duration.create(1, TimeUnit.DAYS)
            },
            router,
            Work(config.name, config.command, config.jobType)
          )
          cancellable +: cancelables
          logger.info("Scheduled: {}", config)
      }
  }
  
  override def postStop(): Unit = {
    cancelables.foreach(_.cancel())
  }
} 
Example 8
Source File: ReconnectingActor.scala    From scastie   with Apache License 2.0 5 votes vote down vote up
package com.olegych.scastie.util

import akka.actor.{Actor, ActorContext, ActorLogging, Cancellable}
import akka.remote.DisassociatedEvent
import com.olegych.scastie.api.ActorConnected

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._

case class ReconnectInfo(serverHostname: String, serverAkkaPort: Int, actorHostname: String, actorAkkaPort: Int)

trait ActorReconnecting extends Actor with ActorLogging {

  private var tryReconnectCallback: Option[Cancellable] = None

  def reconnectInfo: Option[ReconnectInfo]

  def tryConnect(context: ActorContext): Unit

  def onConnected(): Unit = {}

  def onDisconnected(): Unit = {}

  private def setupReconnectCallback(context: ActorContext): Unit = {
    if (reconnectInfo.isDefined) {
      tryReconnectCallback.foreach(_.cancel())
      tryReconnectCallback = Some(
        context.system.scheduler.schedule(0.seconds, 10.seconds) {
          log.info("Reconnecting to server")
          tryConnect(context)
        }
      )
    }
  }

  override def preStart(): Unit =
    try {
      context.system.eventStream.subscribe(self, classOf[DisassociatedEvent])
      setupReconnectCallback(context)
    } finally super.preStart()

  val reconnectBehavior: Receive = {
    case ActorConnected =>
      log.info("Connected to server")
      tryReconnectCallback.foreach(_.cancel())
      tryReconnectCallback = None
      onConnected()

    case ev: DisassociatedEvent => {
      println("DisassociatedEvent " + ev)

      val isServerHostname =
        reconnectInfo
          .map(info => ev.remoteAddress.host.contains(info.serverHostname))
          .getOrElse(false)

      val isServerAkkaPort =
        reconnectInfo
          .map(info => ev.remoteAddress.port.contains(info.serverAkkaPort))
          .getOrElse(false)

      if (isServerHostname && isServerAkkaPort && ev.inbound) {
        log.warning("Disconnected from server")
        onDisconnected()
        setupReconnectCallback(context)
      }
    }
  }
} 
Example 9
Source File: JvmProfiler.scala    From graphcool-framework   with Apache License 2.0 5 votes vote down vote up
package cool.graph.profiling

import akka.actor.Cancellable
import cool.graph.metrics.MetricsManager

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.duration._

object JvmProfiler {
  def schedule(
      metricsManager: MetricsManager,
      initialDelay: FiniteDuration = 0.seconds,
      interval: FiniteDuration = 5.seconds
  ): Cancellable = {
    import metricsManager.gaugeFlushSystem.dispatcher
    val memoryProfiler = MemoryProfiler(metricsManager)
    val cpuProfiler    = CpuProfiler(metricsManager)
    metricsManager.gaugeFlushSystem.scheduler.schedule(initialDelay, interval) {
      memoryProfiler.profile()
      cpuProfiler.profile()
    }
  }
} 
Example 10
Source File: SeqFileStreamProcessor.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.streaming.examples.fsio

import java.io.File
import java.time.Instant
import java.util.concurrent.TimeUnit

import scala.concurrent.duration.FiniteDuration
import akka.actor.Cancellable
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.io.SequenceFile._
import org.apache.hadoop.io.{SequenceFile, Text}
import org.apache.gearpump.Message
import org.apache.gearpump.cluster.UserConfig
import org.apache.gearpump.streaming.examples.fsio.HadoopConfig._
import org.apache.gearpump.streaming.examples.fsio.SeqFileStreamProcessor._
import org.apache.gearpump.streaming.task.{Task, TaskContext}

class SeqFileStreamProcessor(taskContext: TaskContext, config: UserConfig)
  extends Task(taskContext, config) {

  import taskContext.taskId

  val outputPath = new Path(config.getString(OUTPUT_PATH).get + File.separator + taskId)
  var writer: SequenceFile.Writer = null
  val textClass = new Text().getClass
  val key = new Text()
  val value = new Text()
  val hadoopConf = config.hadoopConf

  private var msgCount: Long = 0
  private var snapShotKVCount: Long = 0
  private var snapShotTime: Long = 0
  private var scheduler: Cancellable = null

  override def onStart(startTime: Instant): Unit = {

    val fs = FileSystem.get(hadoopConf)
    fs.deleteOnExit(outputPath)
    writer = SequenceFile.createWriter(hadoopConf, Writer.file(outputPath),
      Writer.keyClass(textClass), Writer.valueClass(textClass))

    scheduler = taskContext.schedule(new FiniteDuration(5, TimeUnit.SECONDS),
      new FiniteDuration(5, TimeUnit.SECONDS))(reportStatus())
    snapShotTime = System.currentTimeMillis()
    LOG.info("sequence file bolt initiated")
  }

  override def onNext(msg: Message): Unit = {
    val kv = msg.value.asInstanceOf[String].split("\\+\\+")
    if (kv.length >= 2) {
      key.set(kv(0))
      value.set(kv(1))
      writer.append(key, value)
    }
    msgCount += 1
  }

  override def onStop(): Unit = {
    if (scheduler != null) {
      scheduler.cancel()
    }
    writer.close()
    LOG.info("sequence file bolt stopped")
  }

  private def reportStatus() = {
    val current: Long = System.currentTimeMillis()
    LOG.info(s"Task $taskId Throughput: ${
      (msgCount - snapShotKVCount,
        (current - snapShotTime) / 1000)
    } (KVPairs, second)")
    snapShotKVCount = msgCount
    snapShotTime = current
  }
}

object SeqFileStreamProcessor {
  val OUTPUT_PATH = "outputpath"
} 
Example 11
Source File: SchedulerUtil.scala    From Raphtory   with Apache License 2.0 5 votes vote down vote up
package com.raphtory.core.utils

import akka.actor.Actor
import akka.actor.ActorContext
import akka.actor.ActorRef
import akka.actor.Cancellable

import scala.concurrent.ExecutionContext
import scala.concurrent.duration.FiniteDuration

object SchedulerUtil {

  def scheduleTask(initialDelay: FiniteDuration, interval: FiniteDuration, receiver: ActorRef, message: Any)(
      implicit context: ActorContext,
      executor: ExecutionContext,
      sender: ActorRef = Actor.noSender
  ): Cancellable = {
    val scheduler = context.system.scheduler

    val cancellable = scheduler.schedule(initialDelay, interval, receiver, message)
    context.system.log.debug("The message [{}] has been scheduled for send to [{}].", message, receiver.path)

    cancellable
  }

  def scheduleTaskOnce(
      delay: FiniteDuration,
      receiver: ActorRef,
      message: Any
  )(implicit context: ActorContext, executor: ExecutionContext, sender: ActorRef = Actor.noSender): Cancellable = {
    val scheduler = context.system.scheduler

    val cancellable = scheduler.scheduleOnce(delay, receiver, message)
    context.system.log.debug("The message [{}] has been scheduled for send to [{}].", message, receiver.path)

    cancellable
  }

  def cancelTask(key: String, task: Cancellable)(implicit context: ActorContext): Boolean = {
    task.cancel()

    val isCancelled = task.isCancelled

    if (isCancelled)
      context.system.log.debug("The task [{}] has been cancelled.", key)
    else
      context.system.log.debug("Failed to cancel the task [{}].", key)

    isCancelled
  }

} 
Example 12
Source File: MonitorActor.scala    From CM-Well   with Apache License 2.0 5 votes vote down vote up
package k.grid.monitoring

import akka.actor.{Actor, Cancellable}
import ch.qos.logback.classic.{Level, Logger}
import com.typesafe.scalalogging.LazyLogging
import org.slf4j.LoggerFactory
import akka.pattern.pipe
import scala.concurrent.duration._
import scala.concurrent.ExecutionContext.Implicits.global

object MonitorActor {
  val name = "MonitorActor"
}

object SetNodeLogLevel {
  val lvlMappings = Map(
    "OFF" -> Level.OFF,
    "ERROR" -> Level.ERROR,
    "WARN" -> Level.WARN,
    "INFO" -> Level.INFO,
    "DEBUG" -> Level.DEBUG,
    "TRACE" -> Level.TRACE,
    "ALL" -> Level.ALL
  )

  def levelTranslator(lvl: String): Option[Level] = {
    lvlMappings.get(lvl.toUpperCase)
  }
}

case object GetNodeLogLevel
case class NodeLogLevel(lvl: String)
case class SetNodeLogLevel(level: Level, levelDuration: Option[Int] = Some(10))

class MonitorActor extends Actor with LazyLogging {
  private[this] var originalLogLevel: Level = _
  private val editableLogger = "ROOT"
  private[this] var scheduledLogLevelReset: Cancellable = _

  @throws[Exception](classOf[Exception])
  override def preStart(): Unit = {
    //FIXME: what if logger is not logback? use jmx or write more defensive code
    originalLogLevel = {
      val l = LoggerFactory.getLogger(editableLogger)
      val f = l.getClass.getProtectionDomain.getCodeSource.getLocation.getFile
      l.info("logger is loaded from: " + f)
      l.asInstanceOf[ch.qos.logback.classic.Logger].getLevel
    }
  }

  override def receive: Receive = {
    case PingChildren =>
      MonitorUtil.pingChildren.pipeTo(sender)

    case SetNodeLogLevel(lvl, duration) =>
      if (scheduledLogLevelReset != null) {
        scheduledLogLevelReset.cancel()
        scheduledLogLevelReset = null
      }

      logger.info(s"Setting $editableLogger to log level $lvl")
      duration.foreach { d =>
        logger.info(s"Scheduling $editableLogger to be in level $originalLogLevel in $d minutes")
        scheduledLogLevelReset =
          context.system.scheduler.scheduleOnce(d.minutes, self, SetNodeLogLevel(originalLogLevel, None))
      }

      LoggerFactory.getLogger(editableLogger).asInstanceOf[ch.qos.logback.classic.Logger].setLevel(lvl)
      //change also the log level of the akka logger
      val akkaLoggerName = "akka"
      LoggerFactory.getLogger(akkaLoggerName) match {
        case akkaLogger: Logger =>
          if (akkaLogger != null)
            akkaLogger.setLevel(lvl)
        case _ =>
      }
    case GetNodeLogLevel =>
      val lvl = LoggerFactory.getLogger(editableLogger).asInstanceOf[ch.qos.logback.classic.Logger].getLevel
      sender ! NodeLogLevel(lvl.toString)
  }
} 
Example 13
Source File: TaskActor.scala    From CM-Well   with Apache License 2.0 5 votes vote down vote up
package k.grid

import akka.actor.Actor.Receive
import akka.actor.{Actor, ActorLogging, Cancellable}
import scala.concurrent.duration._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.language.postfixOps


case object PauseTask
case object ResumeTask
class TaskActor(start: Long, interval: Long, block: () => Unit) extends Actor with ActorLogging {

  var s: Cancellable = _

  def startTask: Cancellable = {
    Grid.system.scheduler.schedule(start milli, interval milli) {
      block()
    }
  }

  s = startTask

  override def receive: Receive = {
    case PauseTask  => if (!s.isCancelled) s.cancel()
    case ResumeTask => if (s.isCancelled) s = startTask
  }
} 
Example 14
Source File: LagSim.scala    From kafka-lag-exporter   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.kafkalagexporter.integration

import akka.actor.Cancellable
import akka.actor.typed.scaladsl.Behaviors
import akka.actor.typed.{Behavior, PostStop}
import akka.kafka.{CommitterSettings, Subscriptions}
import akka.kafka.scaladsl.{Committer, Consumer}
import akka.kafka.testkit.scaladsl.KafkaSpec
import akka.stream.OverflowStrategy
import akka.stream.scaladsl.Keep
import akka.stream.testkit.scaladsl.TestSink
import org.scalatest.concurrent.ScalaFutures

import scala.concurrent.Await
import scala.concurrent.duration._

trait LagSim extends KafkaSpec with ScalaFutures {
  private implicit val patience: PatienceConfig = PatienceConfig(30.seconds, 1.second)

  class LagSimulator(topic: String, group: String) {
    private var offset: Int = 0
    private val committerSettings = CommitterSettings(system).withMaxBatch(1).withParallelism(1)

    private lazy val (consumerControl, consumerProbe) = Consumer
      .committableSource(consumerDefaults.withGroupId(group), Subscriptions.topics(topic))
      .buffer(size = 1, OverflowStrategy.backpressure)
      .map { elem =>
        log.debug("Committing elem with offset: {}", elem.committableOffset.partitionOffset)
        elem.committableOffset.commitScaladsl()
      }
      .toMat(TestSink.probe)(Keep.both)
      .run()

    def produceElements(num: Int): Unit = {
      Await.result(produce(topic, offset to (offset + num)), remainingOrDefault)
      offset += num + 1
    }

    // TODO: Replace this with regular Kafka Consumer for more fine-grained control over committing
    def consumeElements(num: Int): Unit = {
      consumerProbe
        .request(num)
        .expectNextN(num)
    }

    def shutdown(): Unit = {
      consumerControl.shutdown().futureValue
      consumerProbe.cancel()
    }
  }

  sealed trait Simulator
  case class Tick(produce: Int, consume: Int) extends Simulator

  def lagSimActor(simulator: LagSimulator,
                  scheduledTick: Cancellable = Cancellable.alreadyCancelled): Behavior[Simulator] =
    Behaviors.receive[Simulator] {
      case (context, tick @ Tick(produce, consume)) =>
        simulator.produceElements(produce)
        simulator.consumeElements(consume)
        lagSimActor(simulator, context.scheduleOnce(1 second, context.self, tick))
    } receiveSignal {
      case (_, PostStop) =>
        simulator.shutdown()
        scheduledTick.cancel()
        Behaviors.same
    }

} 
Example 15
Source File: WeightWatcher.scala    From shield   with MIT License 5 votes vote down vote up
package shield.actors.config

import akka.actor.{Actor, ActorLogging, Cancellable, Props}
import shield.config.ServiceLocation

import scala.concurrent.duration._

object WeightWatcherMsgs {
  case class SetTargetWeights(services: Map[ServiceLocation, ServiceDetails])
  case object Tick
  case class SetWeights(weights: Map[ServiceLocation, Int]) {
    require(weights.values.forall(_ >= 0), "Negative weight not allowed")
  }
}

object TransitionDetails {
  def default(details: ServiceDetails): TransitionDetails = {
    TransitionDetails(details.weight, 0, details.weight)
  }
}
case class TransitionDetails(targetWeight: Int, delta: Double, currentWeight: Double) {
  require(targetWeight >= 0, "target weight can't be negative")
  require(currentWeight >= 0, "current weight can't be negative")

  def setTarget(newTarget: Int, stepCount: Int) : TransitionDetails =
    if (newTarget != targetWeight) {
      copy(
        targetWeight = newTarget,
        delta = (newTarget - currentWeight) / stepCount
      )
    } else {
      this
    }

  def advanceStep() : TransitionDetails = {
    val next = currentWeight + delta
    if (delta == 0) {
      this
    } else if ((delta < 0 && next <= targetWeight) || (delta > 0 && next >= targetWeight)) {
      copy(delta=0, currentWeight=targetWeight)
    } else {
      copy(currentWeight=next)
    }
  }
}

// Why do we have one weight watcher for all hosts instead of one weight watcher for each host?
// Having a watcher for each host would cause multiple hosts to update their state per step.
// Having one watcher for all hosts will cause one state update per step.
// The one-for-all approach significantly lowers the number of times that ConfigWatcher will have to rebuild the router
object WeightWatcher {
  def props(stepTime: FiniteDuration, stepCount: Int) : Props = Props(new WeightWatcher(stepTime, stepCount))
}
class WeightWatcher(stepTime: FiniteDuration, stepCount: Int) extends Actor with ActorLogging {
  require(stepCount > 0, "Must have at least one step")
  import WeightWatcherMsgs._
  import context.dispatcher

  var state : Map[ServiceLocation, TransitionDetails] = Map.empty

  var ticker : Cancellable = context.system.scheduler.schedule(stepTime, stepTime, self, Tick)

  override def preRestart(reason: Throwable, message: Option[Any]): Unit = {
    self ! SetWeights(state.map { case (loc, transition) => loc -> transition.targetWeight })
    super.preRestart(reason, message)
  }

  override def postStop() = {
    ticker.cancel()
  }

  def receive = {
    case SetTargetWeights(proxyDetails) =>
      state = proxyDetails.map { case (location, proxyDetail) =>
          location -> state.get(location).map(_.setTarget(proxyDetail.weight, stepCount)).getOrElse(TransitionDetails.default(proxyDetail))
      }

    case Tick =>
      val oldWeights = state.map { case (loc, transition) => loc -> transition.currentWeight.toInt }
      state = state.map { case (loc, transition) => loc -> transition.advanceStep() }
      val newWeights = state.map { case (loc, transition) => loc -> transition.currentWeight.toInt }
      if (oldWeights != newWeights) {
        context.parent ! SetWeights(newWeights)
      }

    case SetWeights(weights) =>
      state = weights.map { case (loc, weight) => loc -> TransitionDetails(weight, 0 , weight) }
      context.parent ! SetWeights(weights)
  }
} 
Example 16
Source File: FileMonitorActor.scala    From graphql-gateway   with Apache License 2.0 5 votes vote down vote up
package sangria.gateway.file

import java.nio.file.{NoSuchFileException, StandardWatchEventKinds}

import akka.actor.{Actor, ActorRef, Cancellable, PoisonPill, Props}
import akka.event.Logging
import better.files._
import sangria.gateway.file.FileWatcher._

import scala.collection.mutable
import scala.concurrent.duration.FiniteDuration

class FileMonitorActor(paths: Seq[File], threshold: FiniteDuration, globs: Seq[String], cb: Vector[File] ⇒ Unit) extends Actor {
  import FileMonitorActor._

  import context.dispatcher

  val log = Logging(context.system, this)
  var watchers: Seq[ActorRef] = _
  val pendingFiles: mutable.HashSet[File] = mutable.HashSet[File]()
  var scheduled: Option[Cancellable] = None

  override def preStart(): Unit = {
    watchers = paths.map(_.newWatcher(recursive = true))

    watchers.foreach { watcher ⇒
      watcher ! when(events = StandardWatchEventKinds.ENTRY_CREATE, StandardWatchEventKinds.ENTRY_MODIFY, StandardWatchEventKinds.ENTRY_DELETE) {
        case (_, file) ⇒ self ! FileChange(file)
      }
    }
  }

  def receive = {
    case FileChange(file) ⇒
      try {
        if (file.exists && !file.isDirectory && globs.exists(file.glob(_, includePath = false).nonEmpty)) {
          pendingFiles += file

          if (scheduled.isEmpty)
            scheduled = Some(context.system.scheduler.scheduleOnce(threshold, self, Threshold))
        }
      } catch {
        case _: NoSuchFileException ⇒ // ignore, it's ok
      }

    case Threshold ⇒
      val files = pendingFiles.toVector.sortBy(_.name)

      if (files.nonEmpty)
        cb(files)

      pendingFiles.clear()
      scheduled = None
  }
}

object FileMonitorActor {
  case class FileChange(file: File)
  case object Threshold

  def props(paths: Seq[File], threshold: FiniteDuration, globs: Seq[String], cb: Vector[File] ⇒ Unit) =
    Props(new FileMonitorActor(paths, threshold, globs, cb))
} 
Example 17
Source File: SimulatedScheduler.scala    From metronome   with Apache License 2.0 5 votes vote down vote up
package dcos.metronome

import akka.actor.{Cancellable, Scheduler}
import java.util.concurrent.atomic.AtomicLong

import scala.concurrent.ExecutionContext
import scala.concurrent.duration.FiniteDuration


class SimulatedScheduler(clock: SettableClock) extends Scheduler {
  override def maxFrequency = 0.0
  private[this] val nextId = new AtomicLong
  private[this] val scheduledTasks = scala.collection.mutable.Map.empty[Long, ScheduledTask]
  private case class ScheduledTask(action: () => Unit, var time: Long)
  private class ScheduledTaskCancellable(id: Long) extends Cancellable {
    override def cancel() = {
      doCancel(id)
      true
    }
    override def isCancelled = scheduledTasks.contains(id)
  }

  clock.onChange { () => poll() }

  private[this] def doCancel(id: Long) = synchronized { scheduledTasks -= id }
  private[this] def poll(): Unit =
    synchronized {
      val now = clock.instant().toEpochMilli
      scheduledTasks.values.foreach { task =>
        if (task.time <= now) task.action()
      }
    }

  override def scheduleOnce(delay: FiniteDuration, runnable: Runnable)(implicit
      executor: ExecutionContext
  ): Cancellable =
    synchronized {
      val id = nextId.getAndIncrement
      val cancellable = new ScheduledTaskCancellable(id)
      scheduledTasks(id) = ScheduledTask(
        time = clock.instant().plusMillis(delay.toMillis).toEpochMilli,
        action = () => {
          cancellable.cancel()
          executor.execute(runnable)
        }
      )
      poll()
      cancellable
    }

  def schedule(initialDelay: FiniteDuration, interval: FiniteDuration, runnable: Runnable)(implicit
      executor: ExecutionContext
  ): Cancellable =
    synchronized {
      val id = nextId.getAndIncrement
      val cancellable = new ScheduledTaskCancellable(id)
      scheduledTasks(id) = ScheduledTask(
        time = clock.instant().toEpochMilli + initialDelay.toMillis,
        action = () => {
          scheduledTasks(id).time = clock.instant().toEpochMilli + interval.toMillis
          executor.execute(runnable)
        }
      )
      poll()
      cancellable
    }

  def taskCount = synchronized { scheduledTasks.size }
} 
Example 18
Source File: HttpMetrics.scala    From service-container   with Apache License 2.0 5 votes vote down vote up
package com.github.vonnagy.service.container.http

import java.util.concurrent.TimeUnit

import akka.actor.{ActorSystem, Cancellable}
import com.github.vonnagy.service.container.log.LoggingAdapter
import com.github.vonnagy.service.container.metrics._

import scala.concurrent.duration._

case class Stats(
                  uptime: FiniteDuration,
                  totalRequests: Long,
                  openRequests: Long,
                  maxOpenRequests: Long,
                  totalConnections: Long,
                  openConnections: Long,
                  maxOpenConnections: Long,
                  requestTimeouts: Long)

private[http] trait HttpMetrics extends LoggingAdapter {

  implicit def system: ActorSystem

  var metricsJob: Option[Cancellable] = None
  var lastStats = Stats(FiniteDuration(0, TimeUnit.MILLISECONDS), 0, 0, 0, 0, 0, 0, 0)

  /// TODO def httpListener: Option[ActorSelection]

  val totConn = Gauge("container.http.connections.total") {
    lastStats.totalConnections
  }
  val openConn = Gauge("container.http.connections.open") {
    lastStats.openConnections
  }
  val maxOpenConn = Gauge("container.http.connections.max-open") {
    lastStats.maxOpenConnections
  }
  val totReq = Gauge("container.http.requests.total") {
    lastStats.totalRequests
  }
  val openReq = Gauge("container.http.requests.open") {
    lastStats.openRequests
  }
  val maxOpenReq = Gauge("container.http.requests.max-open") {
    lastStats.maxOpenRequests
  }
  val uptime = Gauge("container.http.uptime") {
    lastStats.uptime.toMillis
  }
  val idle = Gauge("container.http.idle-timeouts") {
    lastStats.requestTimeouts
  }

  protected[http] def scheduleHttpMetrics(interval: FiniteDuration): Unit = {
    // Schedule an event to gather the http statistics so that we can add information to our metrics system
    log.info("Scheduling http server metrics handler")
    implicit val dis = system.dispatcher
    metricsJob = Some(system.scheduler.schedule(interval, interval)(getMetrics))
  }

  protected[http] def cancelHttpMetrics(): Unit = {
    metricsJob.exists(_.cancel())
    metricsJob = None
  }

  private def getMetrics(): Unit = {

    try {
      // TODO - No stats
//      if (httpListener.isDefined) httpListener.get ? Http.GetStats onSuccess {
//        case x: Stats => lastStats = x
//      }
      lastStats = Stats(0 seconds, 0, 0, 0, 0, 0, 0, 0)
    }
    catch {
      case e: Exception =>
        log.error("An error occurred when trying to fetch and record the http server metrics", e)
    }
  }
} 
Example 19
Source File: ClickhouseHostHealth.scala    From clickhouse-scala-client   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.crobox.clickhouse.balancing.discovery.health

import akka.NotUsed
import akka.actor.{ActorSystem, Cancellable}
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import akka.http.scaladsl.settings.ConnectionPoolSettings
import akka.http.scaladsl.unmarshalling.Unmarshaller
import akka.stream.Materializer
import akka.stream.scaladsl.{Flow, Source}
import com.crobox.clickhouse.internal.ClickhouseResponseParser

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success, Try}

object ClickhouseHostHealth extends ClickhouseResponseParser {

  sealed trait ClickhouseHostStatus {
    val host: Uri
    val code: String
  }

  case class Alive(host: Uri) extends ClickhouseHostStatus { override val code: String = "ok" }

  case class Dead(host: Uri, reason: Throwable) extends ClickhouseHostStatus { override val code: String = "nok" }

  
  def healthFlow(host: Uri)(
      implicit system: ActorSystem,
      materializer: Materializer,
      executionContext: ExecutionContext
  ): Source[ClickhouseHostStatus, Cancellable] = {
    val healthCheckInterval: FiniteDuration =
      system.settings.config
        .getDuration("connection.health-check.interval")
        .getSeconds.seconds
    val healthCheckTimeout: FiniteDuration =
      system.settings.config
        .getDuration("connection.health-check.timeout")
        .getSeconds.seconds

    val healthCachedPool = Http(system).cachedHostConnectionPool[Int](
      host.authority.host.address(),
      host.effectivePort,
      settings = ConnectionPoolSettings(system)
        .withMaxConnections(1)
        .withMinConnections(1)
        .withMaxOpenRequests(2)
        .withMaxRetries(3)
        .withUpdatedConnectionSettings(
          _.withIdleTimeout(healthCheckTimeout + healthCheckInterval).withConnectingTimeout(healthCheckTimeout)
        )
    )
    Source
      .tick(0.milliseconds, healthCheckInterval, 0)
      .map(tick => {
        (HttpRequest(method = HttpMethods.GET, uri = host), tick)
      })
      .via(healthCachedPool)
      .via(parsingFlow(host))
  }

  private[health] def parsingFlow[T](
      host: Uri
  )(implicit ec: ExecutionContext, mat: Materializer): Flow[(Try[HttpResponse], T), ClickhouseHostStatus, NotUsed] =
    Flow[(Try[HttpResponse], T)].mapAsync(1) {
      case (Success(response @ akka.http.scaladsl.model.HttpResponse(StatusCodes.OK, _, _, _)), _) =>
        Unmarshaller.stringUnmarshaller(decodeResponse(response).entity)
          .map(splitResponse)
          .map(
            stringResponse =>
              if (stringResponse.equals(Seq("Ok."))) {
                Alive(host)
              } else {
                Dead(host, new IllegalArgumentException(s"Got wrong result $stringResponse"))
            }
          )
      case (Success(response), _) =>
        Future.successful(Dead(host, new IllegalArgumentException(s"Got response with status code ${response.status}")))
      case (Failure(ex), _) =>
        Future.successful(Dead(host, ex))
    }

} 
Example 20
Source File: WatchDistributedCompletionActor.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.actors

import akka.actor.{Actor, ActorRef, Cancellable, Props, Terminated}
import com.wavesplatform.dex.domain.utils.ScorexLogging

import scala.concurrent.duration.FiniteDuration

class WatchDistributedCompletionActor(workers: Set[ActorRef],
                                      completionReceiver: ActorRef,
                                      startWorkCommand: Any,
                                      workCompleted: Any,
                                      timeout: FiniteDuration)
    extends Actor
    with ScorexLogging {

  import context.dispatcher

  if (workers.isEmpty) stop(Cancellable.alreadyCancelled)
  else
    workers.foreach { x =>
      context.watch(x)
      x ! startWorkCommand
    }

  override def receive: Receive = state(workers, context.system.scheduler.scheduleOnce(timeout, self, TimedOut))

  private def state(rest: Set[ActorRef], timer: Cancellable): Receive = {
    case `workCompleted` =>
      switchTo(rest - sender(), timer)
      context.unwatch(sender())

    case Terminated(ref) =>
      switchTo(rest - ref, timer)

    case TimedOut =>
      val workerPairs = workers.iterator.map(_.path.name).mkString(", ")
      log.error(s"$startWorkCommand is timed out! Workers those didn't respond: $workerPairs")
      stop(timer)
  }

  private def switchTo(updatedRest: Set[ActorRef], timer: Cancellable): Unit =
    if (updatedRest.isEmpty) stop(timer) else context.become(state(updatedRest, timer))

  private def stop(timer: Cancellable): Unit = {
    timer.cancel()
    completionReceiver ! workCompleted
    context.stop(self)
  }
}

object WatchDistributedCompletionActor {
  def props(workers: Set[ActorRef], completionReceiver: ActorRef, startWorkCommand: Any, workCompleted: Any, timeout: FiniteDuration): Props =
    Props(new WatchDistributedCompletionActor(workers, completionReceiver, startWorkCommand, workCompleted, timeout))
} 
Example 21
Source File: BatchOrderCancelActor.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.actors.address

import akka.actor.{Actor, ActorRef, Cancellable, Props}
import com.wavesplatform.dex.actors.TimedOut
import com.wavesplatform.dex.actors.address.AddressActor.Command.CancelOrder
import com.wavesplatform.dex.actors.address.AddressActor.Event
import com.wavesplatform.dex.actors.address.BatchOrderCancelActor.CancelResponse.OrderCancelResult
import com.wavesplatform.dex.domain.order.Order
import com.wavesplatform.dex.domain.utils.ScorexLogging
import com.wavesplatform.dex.error

import scala.concurrent.duration.FiniteDuration

class BatchOrderCancelActor private (
    orderIds: Set[Order.Id],
    processorActor: ActorRef,
    clientActor: ActorRef,
    timeout: FiniteDuration,
    initResponse: Map[Order.Id, OrderCancelResult]
) extends Actor
    with ScorexLogging {

  import BatchOrderCancelActor._
  import context.dispatcher

  orderIds.foreach(processorActor ! CancelOrder(_))

  override def receive: Receive = state(orderIds, initResponse, context.system.scheduler.scheduleOnce(timeout, self, TimedOut))

  private def state(restOrderIds: Set[Order.Id], response: Map[Order.Id, OrderCancelResult], timer: Cancellable): Receive = {
    case CancelResponse(id, x) =>
      val updatedRestOrderIds = restOrderIds - id
      val updatedResponse     = response.updated(id, x)

      if (updatedRestOrderIds.isEmpty) stop(Event.BatchCancelCompleted(updatedResponse), timer)
      else context.become(state(restOrderIds - id, updatedResponse, timer))

    // case Terminated(ref) => // Can't terminate before processorActor, because processorActor is a parent

    case TimedOut =>
      log.error(s"CancelOrder is timed out for orders: ${restOrderIds.mkString(", ")}")
      stop(Event.BatchCancelCompleted(response), timer)
  }

  private def stop(response: Event.BatchCancelCompleted, timer: Cancellable): Unit = {
    timer.cancel()
    clientActor ! response
    context.stop(self)
  }
}

object BatchOrderCancelActor {
  def props(orderIds: Set[Order.Id],
            processorActor: ActorRef,
            clientActor: ActorRef,
            timeout: FiniteDuration,
            initResponse: Map[Order.Id, OrderCancelResult] = Map.empty): Props = {
    require(orderIds.nonEmpty, "orderIds is empty")
    Props(new BatchOrderCancelActor(orderIds, processorActor, clientActor, timeout, initResponse))
  }

  object CancelResponse {

    type OrderCancelResult = Either[error.MatcherError, Event.OrderCanceled]

    def unapply(arg: Any): Option[(Order.Id, OrderCancelResult)] = helper.lift(arg)

    private val helper: PartialFunction[Any, (Order.Id, OrderCancelResult)] = {
      case x @ Event.OrderCanceled(id)     => (id, Right(x))
      case x @ error.OrderNotFound(id)     => (id, Left(x))
      case x @ error.OrderCanceled(id)     => (id, Left(x))
      case x @ error.OrderFull(id)         => (id, Left(x))
      case x @ error.MarketOrderCancel(id) => (id, Left(x))
    }
  }
} 
Example 22
Source File: HistoryMessagesBatchSender.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.history

import akka.actor.{Actor, Cancellable}
import com.wavesplatform.dex.history.HistoryRouter.{HistoryMsg, StopAccumulate}

import scala.collection.mutable
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
import scala.reflect.ClassTag

abstract class HistoryMessagesBatchSender[M <: HistoryMsg: ClassTag] extends Actor {

  val batchLinger: Long
  val batchEntries: Long

  def createAndSendBatch(batchBuffer: Iterable[M]): Unit

  private val batchBuffer: mutable.Set[M] = mutable.Set.empty[M]

  private def scheduleStopAccumulating: Cancellable = context.system.scheduler.scheduleOnce(batchLinger.millis, self, StopAccumulate)

  private def sendBatch(): Unit = {
    if (batchBuffer.nonEmpty) {
      createAndSendBatch(batchBuffer)
      batchBuffer.clear()
    }
  }

  def receive: Receive = awaitingHistoryMessages

  private def awaitingHistoryMessages: Receive = {
    case msg: M =>
      scheduleStopAccumulating
      context become accumulateBuffer(scheduleStopAccumulating)
      batchBuffer += msg
  }

  private def accumulateBuffer(scheduledStop: Cancellable): Receive = {
    case msg: M =>
      if (batchBuffer.size == batchEntries) {
        scheduledStop.cancel()
        sendBatch()
        context become accumulateBuffer(scheduleStopAccumulating)
      }

      batchBuffer += msg

    case StopAccumulate => sendBatch(); context become awaitingHistoryMessages
  }
} 
Example 23
Source File: RandomDataProducer.scala    From parquet4s   with MIT License 5 votes vote down vote up
package com.github.mjakubowski84.parquet4s.indefinite

import akka.actor.{Actor, ActorRef, Cancellable, Props, Scheduler}
import akka.pattern.ask
import akka.util.Timeout

import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContext}
import scala.util.Random

object RandomDataProducer {

  private val words = Seq("Example", "how", "to", "setup", "indefinite", "stream", "with", "Parquet", "writer")

}

trait RandomDataProducer {

  this: Akka with Logger with Kafka =>

  import RandomDataProducer._

  private def nextWord: String = words(Random.nextInt(words.size - 1))
  private def action(): Unit = sendKafkaMessage(nextWord)

  private lazy val scheduler: ActorRef = system.actorOf(FluctuatingSchedulerActor.props(action))
  implicit private val stopTimeout: Timeout = new Timeout(FluctuatingSchedulerActor.MaxDelay)

  def startDataProducer(): Unit = {
    logger.info("Starting scheduler that sends messages to Kafka...")
    scheduler ! FluctuatingSchedulerActor.Start
  }

  def stopDataProducer(): Unit = {
    logger.info("Stopping scheduler...")
    Await.ready(scheduler.ask(FluctuatingSchedulerActor.Stop), Duration.Inf)
  }

}

private object FluctuatingSchedulerActor {

  case object Start
  case object ScheduleNext
  case object Stop

  val MinDelay: FiniteDuration = 1.milli
  val MaxDelay: FiniteDuration = 500.millis
  val StartDelay: FiniteDuration = 100.millis

  trait Direction
  case object Up extends Direction
  case object Down extends Direction

  def props(action: () => Unit): Props = Props(new FluctuatingSchedulerActor(action))

}

private class FluctuatingSchedulerActor(action: () => Unit) extends Actor {

  import FluctuatingSchedulerActor._

  implicit def executionContext: ExecutionContext = context.system.dispatcher
  def scheduler: Scheduler = context.system.scheduler
  var scheduled: Option[Cancellable] = None

  override def receive: Receive = {
    case Start =>
      self ! ScheduleNext
      context.become(scheduling(StartDelay, direction = Down), discardOld = true)
  }

  def scheduling(delay: FiniteDuration, direction: Direction): Receive = {
    case ScheduleNext =>
      action()

      val rate = Random.nextFloat / 10.0f
      val step = (delay.toMillis * rate).millis
      val (newDirection, newDelay) = direction match {
        case Up if delay + step < MaxDelay =>
          (Up, delay + step)
        case Up =>
          (Down, delay - step)
        case Down if delay - step > MinDelay =>
          (Down, delay - step)
        case Down =>
          (Up, delay + step)
      }

      scheduled = Some(scheduler.scheduleOnce(delay, self, ScheduleNext))
      context.become(scheduling(newDelay, newDirection), discardOld = true)

    case Stop =>
      scheduled.foreach(_.cancel())
      context.stop(self)
  }

} 
Example 24
Source File: GroupedWithin.scala    From lila-ws   with GNU Affero General Public License v3.0 5 votes vote down vote up
package lila.ws
package util

import akka.actor.typed.Scheduler
import akka.actor.Cancellable
import scala.collection.immutable.VectorBuilder
import scala.concurrent.duration.FiniteDuration
import scala.concurrent.ExecutionContext

final class GroupedWithin()(implicit scheduler: Scheduler, ec: ExecutionContext) {

  def apply[A](nb: Int, interval: FiniteDuration)(emit: Emit[Vector[A]]) =
    new GroupedWithinStage[A](nb, interval, emit)
}

final class GroupedWithinStage[A](
    nb: Int,
    interval: FiniteDuration,
    emit: Emit[Vector[A]]
)(implicit
    scheduler: Scheduler,
    ec: ExecutionContext
) {

  private val buffer: VectorBuilder[A] = new VectorBuilder

  private var scheduledFlush: Cancellable = scheduler.scheduleOnce(interval, () => flush)

  def apply(elem: A): Unit =
    synchronized {
      buffer += elem
      if (buffer.size >= nb) unsafeFlush
    }

  private def flush(): Unit = synchronized { unsafeFlush }

  private def unsafeFlush(): Unit = {
    if (buffer.nonEmpty) {
      emit(buffer.result)
      buffer.clear
    }
    scheduledFlush.cancel
    scheduledFlush = scheduler.scheduleOnce(interval, () => flush)
  }
} 
Example 25
Source File: AmqpJdbcScheduler.scala    From reliable-http-client   with Apache License 2.0 5 votes vote down vote up
package rhttpc.transport.amqpjdbc

import akka.actor.{Cancellable, Scheduler}
import org.slf4j.LoggerFactory
import rhttpc.transport.SerializingPublisher.SerializedMessage
import rhttpc.transport._

import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{ExecutionContext, Future}
import scala.util.control.NonFatal
import scala.util.{Failure, Success, Try}

private[amqpjdbc] trait AmqpJdbcScheduler[PubMsg] {

  def schedule(msg: Message[PubMsg], delay: FiniteDuration): Future[Unit]

  def start(): Unit

  def stop(): Future[Unit]

}

private[amqpjdbc] class AmqpJdbcSchedulerImpl[PubMsg](scheduler: Scheduler,
                                                      checkInterval: FiniteDuration,
                                                      repo: ScheduledMessagesRepository,
                                                      queueName: String,
                                                      batchSize: Int,
                                                      publisher: SerializingPublisher[PubMsg])
                                                     (implicit ec: ExecutionContext,
                                                      serializer: Serializer[PubMsg]) extends AmqpJdbcScheduler[PubMsg] {
  private val logger = LoggerFactory.getLogger(getClass)

  private var ran: Boolean = false
  private var scheduledCheck: Option[Cancellable] = None
  private var currentPublishedFetchedFuture: Future[Int] = Future.successful(0)

  override def schedule(msg: Message[PubMsg], delay: FiniteDuration): Future[Unit] = {
    val serialized = serializer.serialize(msg.content)
    repo.save(MessageToSchedule(queueName, serialized, msg.properties, delay))
  }

  override def start(): Unit = {
    synchronized {
      if (!ran) {
        ran = true
        publishFetchedMessagesThanReschedule()
      }
    }
  }

  private def publishFetchedMessagesThanReschedule(): Unit = {
    synchronized {
      if (ran) {
        val publishedFetchedFuture = repo.fetchMessagesShouldByRun(queueName, batchSize)(publish)
        currentPublishedFetchedFuture = publishedFetchedFuture
        publishedFetchedFuture onComplete handlePublicationResult
      }
    }
  }

  private def publish(messages: Seq[ScheduledMessage]): Future[Seq[Unit]] = {
    if (messages.nonEmpty) {
      logger.debug(s"Fetched ${messages.size}, publishing")
    }
    val handlingFutures = messages.map { message =>
      publisher.publishSerialized(SerializedMessage(message.content.getBytes(), message.properties))
    }
    Future.sequence(handlingFutures)
  }

  private def handlePublicationResult(tryResult: Try[Int]): Unit = {
    tryResult match {
      case Failure(ex) =>
        logger.error("Exception while publishing fetched messages", ex)
      case _ =>
    }
    synchronized {
      if (ran) {
        scheduledCheck = Some(scheduler.scheduleOnce(checkInterval)(publishFetchedMessagesThanReschedule()))
      } else {
        logger.debug(s"Scheduler is stopping, next check will be skipped")
      }
    }
  }

  override def stop(): Future[Unit] = {
    synchronized {
      scheduledCheck.foreach(_.cancel())
      ran = false
      currentPublishedFetchedFuture.map(_ => Unit)
    }
  }

} 
Example 26
Source File: TweetExample.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package sample.stream

import java.time.{Instant, ZoneId}

import akka.NotUsed
import akka.actor.{ActorSystem, Cancellable}
import akka.stream.DelayOverflowStrategy
import akka.stream.scaladsl.{Flow, MergePrioritized, Sink, Source}
import org.apache.commons.lang3.exception.ExceptionUtils
import org.slf4j.{Logger, LoggerFactory}

import scala.concurrent.duration._
import scala.util.{Failure, Success}



object TweetExample extends App {
  implicit val system = ActorSystem("TweetExample")
  implicit val ec = system.dispatcher
  val logger: Logger = LoggerFactory.getLogger(this.getClass)

  final case class Author(handle: String)

  final case class Hashtag(name: String)

  final case class Tweet(author: Author, timestamp: Long, body: String) {
    def hashtags: Set[Hashtag] =
      body.split(" ").collect { case t if t.startsWith("#") => Hashtag(t) }.toSet

    override def toString = {
      val localDateTime = Instant.ofEpochMilli(timestamp).atZone(ZoneId.systemDefault()).toLocalDateTime
      s"$localDateTime - ${author.handle} tweeted: ${body.take(5)}..."
    }
  }

  val akkaTag = Hashtag("#akka")

  val tweetsLowPrio: Source[Tweet, Cancellable] = Source.tick(1.second, 200.millis, NotUsed).map(_ => Tweet(Author("LowPrio"), System.currentTimeMillis, "#other #akka aBody"))
  val tweetsHighPrio: Source[Tweet, Cancellable] = Source.tick(2.second, 1.second, NotUsed).map(_ => Tweet(Author("HighPrio"), System.currentTimeMillis, "#akka #other aBody"))
  val tweetsVeryHighPrio: Source[Tweet, Cancellable] = Source.tick(2.second, 1.second, NotUsed).map(_ => Tweet(Author("VeryHighPrio"), System.currentTimeMillis, "#akka #other aBody"))

  val limitedTweets: Source[Tweet, NotUsed] = Source.combine(tweetsLowPrio, tweetsHighPrio, tweetsVeryHighPrio)(_ => MergePrioritized(List(1, 10, 100))).take(20)

  val processingFlow = Flow[Tweet]
    .filter(_.hashtags.contains(akkaTag))
    .wireTap(each => logger.info(s"$each"))

  val slowDownstream  =
    Flow[Tweet]
      .delay(5.seconds, DelayOverflowStrategy.backpressure)

  val processedTweets =
    limitedTweets
      .via(processingFlow)
      .via(slowDownstream)
      .runWith(Sink.seq)

  processedTweets.onComplete {
    case Success(results) =>
      logger.info(s"Successfully processed: ${results.size} tweets")
      system.terminate()
    case Failure(exception) =>
      logger.info(s"The stream failed with: ${ExceptionUtils.getRootCause(exception)}")
      system.terminate()
  }
} 
Example 27
Source File: ParametrizedFlow.scala    From akka_streams_tutorial   with MIT License 5 votes vote down vote up
package sample.stream_shared_state

import akka.Done
import akka.actor.{ActorSystem, Cancellable}
import akka.stream.scaladsl.{Flow, GraphDSL, Keep, Sink, Source, SourceQueueWithComplete, Zip}
import akka.stream.{FlowShape, OverflowStrategy}

import scala.collection.immutable
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util.{Failure, Success}



object ParametrizedFlow extends App {
  val service = ParameterizedFlowService

  Thread.sleep(5000)
  service.update(1.0)

  Thread.sleep(2000)
  service.update(1.5)
  Thread.sleep(2000)
  service.cancel()
  Thread.sleep(2000)

  println(service.result())
}

object ParameterizedFlowService {
  implicit val system = ActorSystem("ParameterizedFlowService")
  implicit val executionContext = system.dispatcher

  def update(element: Double): Unit = flow._1._2.offer(element)

  def cancel(): Boolean = flow._1._1.cancel()

  def result(): Future[Seq[Double]] = flow._2

  val fun = (flowValue: Int, paramValue: Double) => flowValue * paramValue
  val flow: ((Cancellable, SourceQueueWithComplete[Double]), Future[immutable.Seq[Double]]) =
    Source.tick(0.seconds, 500.millis, 10)
      .viaMat(createParamFlow(1, OverflowStrategy.dropBuffer, 0.5)(fun))(Keep.both)
      .wireTap(x => println(x))
      .toMat(Sink.seq)(Keep.both)
      .run()

  val done: Future[Done] = flow._1._2.watchCompletion()
  terminateWhen(done)

  private def createParamFlow[A, P, O](bufferSize: Int, overflowStrategy: OverflowStrategy, initialParam: P)(fun: (A, P) => O) =
    Flow.fromGraph(GraphDSL.create(Source.queue[P](bufferSize, overflowStrategy)) { implicit builder =>
      queue =>
        import GraphDSL.Implicits._
        val zip = builder.add(Zip[A, P]())
        //Interesting use of the extrapolate operator
        //based on https://doc.akka.io/docs/akka/current/stream/stream-rate.html#understanding-extrapolate-and-expand
        val extra = builder.add(Flow[P].extrapolate(Iterator.continually(_), Some(initialParam)))
        val map = builder.add(Flow[(A, P)].map(r => fun(r._1, r._2)))

        queue ~> extra ~> zip.in1
        zip.out ~> map
        FlowShape(zip.in0, map.out)
    })

  private def terminateWhen(done: Future[_]) = {
    done.onComplete {
      case Success(_) =>
        println("Flow Success. About to terminate...")
        system.terminate()
      case Failure(e) =>
        println(s"Flow Failure: $e. About to terminate...")
        system.terminate()
    }
  }
} 
Example 28
Source File: BlacklistSupport.scala    From mantis   with Apache License 2.0 5 votes vote down vote up
package io.iohk.ethereum.blockchain.sync

import scala.concurrent.duration.FiniteDuration
import akka.actor.{Actor, ActorLogging, Cancellable, Scheduler}
import io.iohk.ethereum.network.PeerId

import scala.concurrent.ExecutionContext.Implicits.global

trait BlacklistSupport {
  selfActor: Actor with ActorLogging =>

  import BlacklistSupport._

  def scheduler: Scheduler

  var blacklistedPeers: Seq[(PeerId, Cancellable)] = Nil

  def blacklist(peerId: PeerId, duration: FiniteDuration, reason: String): Unit = {
    undoBlacklist(peerId)
    log.debug(s"Blacklisting peer ($peerId), $reason")
    val unblacklistCancellable = scheduler.scheduleOnce(duration, self, UnblacklistPeer(peerId))
    blacklistedPeers :+= (peerId, unblacklistCancellable)
  }

  def undoBlacklist(peerId: PeerId): Unit = {
    blacklistedPeers.find(_._1 == peerId).foreach(_._2.cancel())
    blacklistedPeers = blacklistedPeers.filterNot(_._1 == peerId)
  }

  def isBlacklisted(peerId: PeerId): Boolean =
    blacklistedPeers.exists(_._1 == peerId)

  def handleBlacklistMessages: Receive = {
    case UnblacklistPeer(ref) => undoBlacklist(ref)
  }
}

object BlacklistSupport {
  private case class UnblacklistPeer(peerId: PeerId)
} 
Example 29
Source File: ClusterConnectionFlow.scala    From clickhouse-scala-client   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.crobox.clickhouse.balancing.discovery.cluster

import akka.actor.{ActorSystem, Cancellable}
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.Uri
import akka.http.scaladsl.settings.ConnectionPoolSettings
import akka.stream.Materializer
import akka.stream.scaladsl.Source
import com.crobox.clickhouse.balancing.discovery.ConnectionManagerActor.Connections
import com.crobox.clickhouse.internal.QuerySettings.ReadQueries
import com.crobox.clickhouse.internal.{ClickhouseHostBuilder, ClickhouseQueryBuilder, ClickhouseResponseParser, QuerySettings}
import com.typesafe.scalalogging.LazyLogging

import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}

private[clickhouse] object ClusterConnectionFlow
    extends ClickhouseQueryBuilder
    with ClickhouseResponseParser
    with LazyLogging {

  def clusterConnectionsFlow(
      targetHost: => Future[Uri],
      scanningInterval: FiniteDuration,
      cluster: String
  )(implicit system: ActorSystem,
    materializer: Materializer,
    ec: ExecutionContext): Source[Connections, Cancellable] = {
    val http                   = Http(system)
    val settings = ConnectionPoolSettings(system)
      .withMaxConnections(1)
      .withMinConnections(1)
      .withMaxOpenRequests(2)
      .withMaxRetries(3)
      .withUpdatedConnectionSettings(
        _.withIdleTimeout(scanningInterval.plus(1.second))
      )
    Source
      .tick(0.millis, scanningInterval, {})
      .mapAsync(1)(_ => targetHost)
      .mapAsync(1)(host => {
        val query = s"SELECT host_address FROM system.clusters WHERE cluster='$cluster'"
        val request =
          toRequest(host, query, None, QuerySettings(readOnly = ReadQueries, idempotent = Some(true)), None)(
            system.settings.config
          )
        processClickhouseResponse(http.singleRequest(request, settings = settings), query, host, None)
          .map(splitResponse)
          .map(_.toSet.filter(_.nonEmpty))
          .map(result => {
            if (result.isEmpty) {
              throw new IllegalArgumentException(
                s"Could not determine clickhouse cluster hosts for cluster $cluster and host $host. " +
                s"This could indicate that you are trying to use the cluster balancer to connect to a non cluster based clickhouse server. " +
                s"Please use the `SingleHostQueryBalancer` in that case."
              )
            }
            Connections(result.map(ClickhouseHostBuilder.toHost(_, Some(8123))))
          })
      })
  }
} 
Example 30
Source File: CustomAutoDownBase.scala    From akka-cluster-custom-downing   with Apache License 2.0 5 votes vote down vote up
package tanukki.akka.cluster.autodown

import akka.actor.{Cancellable, Scheduler, Address, Actor}
import akka.cluster.ClusterEvent._
import akka.cluster.MemberStatus.{Exiting, Down}
import akka.cluster._
import scala.concurrent.duration.{Duration, FiniteDuration}

object CustomDowning {
  case class UnreachableTimeout(member: Member)
}

abstract class CustomAutoDownBase(autoDownUnreachableAfter: FiniteDuration) extends Actor {

  import CustomDowning._

  def selfAddress: Address

  def down(node: Address): Unit

  def downOrAddPending(member: Member): Unit

  def downOrAddPendingAll(members: Set[Member]): Unit

  def scheduler: Scheduler

  import context.dispatcher

  val skipMemberStatus = Set[MemberStatus](Down, Exiting)

  private var scheduledUnreachable: Map[Member, Cancellable] = Map.empty
  private var pendingUnreachable: Set[Member] = Set.empty
  private var unstableUnreachable: Set[Member] = Set.empty

  override def postStop(): Unit = {
    scheduledUnreachable.values foreach { _.cancel }
    super.postStop()
  }

  def receiveEvent: Receive

  def receive: Receive = receiveEvent orElse predefinedReceiveEvent

  def predefinedReceiveEvent: Receive = {
    case state: CurrentClusterState =>
      initialize(state)
      state.unreachable foreach unreachableMember

    case UnreachableTimeout(member) =>
      if (scheduledUnreachable contains member) {
        scheduledUnreachable -= member
        if (scheduledUnreachable.isEmpty) {
          unstableUnreachable += member
          downOrAddPendingAll(unstableUnreachable)
          unstableUnreachable = Set.empty
        } else {
          unstableUnreachable += member
        }
      }

    case _: ClusterDomainEvent =>
  }

  def initialize(state: CurrentClusterState) = {}

  def unreachableMember(m: Member): Unit =
    if (!skipMemberStatus(m.status) && !scheduledUnreachable.contains(m))
      scheduleUnreachable(m)

  def scheduleUnreachable(m: Member): Unit = {
    if (autoDownUnreachableAfter == Duration.Zero) {
      downOrAddPending(m)
    } else {
      val task = scheduler.scheduleOnce(autoDownUnreachableAfter, self, UnreachableTimeout(m))
      scheduledUnreachable += (m -> task)
    }
  }

  def remove(member: Member): Unit = {
    scheduledUnreachable.get(member) foreach { _.cancel }
    scheduledUnreachable -= member
    pendingUnreachable -= member
    unstableUnreachable -= member
  }

  def scheduledUnreachableMembers: Map[Member, Cancellable] = scheduledUnreachable

  def pendingUnreachableMembers: Set[Member] = pendingUnreachable

  def pendingAsUnreachable(member: Member): Unit = pendingUnreachable += member

  def downPendingUnreachableMembers(): Unit = {
    pendingUnreachable.foreach(member => down(member.address))
    pendingUnreachable = Set.empty
  }

  def unstableUnreachableMembers: Set[Member] = unstableUnreachable
} 
Example 31
Source File: CodebaseAnalyzeAggregatorActor.scala    From CodeAnalyzerTutorial   with Apache License 2.0 5 votes vote down vote up
package tutor

import java.util.Date

import akka.actor.{Actor, ActorLogging, ActorRef, Cancellable, Props, Terminated}
import akka.routing.{ActorRefRoutee, RoundRobinRoutingLogic, Router}
import tutor.CodebaseAnalyzeAggregatorActor.{AnalyzeDirectory, Complete, Report, Timeout}
import tutor.SourceCodeAnalyzerActor.NewFile
import tutor.utils.BenchmarkUtil

import scala.concurrent.duration._
import scala.util.{Failure, Success, Try}

object CodebaseAnalyzeAggregatorActor {
  def props(): Props = Props(new CodebaseAnalyzeAggregatorActor)

  final case class AnalyzeDirectory(path: String)

  final case class Complete(result: Try[SourceCodeInfo])

  final case object Timeout

  final case class Report(codebaseInfo: CodebaseInfo)

}

class CodebaseAnalyzeAggregatorActor extends Actor with ActorLogging with DirectoryScanner with ReportFormatter {
  var controller: ActorRef = _
  var currentPath: String = _
  var beginTime: Date = _
  var fileCount = 0
  var completeCount = 0
  var failCount = 0
  var result: CodebaseInfo = CodebaseInfo.empty
  var timeoutTimer: Cancellable = _

  var router: Router = {
    val routees = Vector.fill(8) {
      val r = context.actorOf(SourceCodeAnalyzerActor.props())
      context watch r
      ActorRefRoutee(r)
    }
    Router(RoundRobinRoutingLogic(), routees)
  }

  override def receive: Receive = {
    case AnalyzeDirectory(path) => {
      controller = sender()
      currentPath = path
      beginTime = BenchmarkUtil.recordStart(s"analyze folder $currentPath")
      foreachFile(path, PresetFilters.knownFileTypes, PresetFilters.ignoreFolders) { file =>
        fileCount += 1
        router.route(NewFile(file.getAbsolutePath), context.self)
      }
      import context.dispatcher
      timeoutTimer = context.system.scheduler.scheduleOnce((fileCount / 1000).seconds, context.self, Timeout)
    }
    case Complete(Success(sourceCodeInfo: SourceCodeInfo)) => {
      completeCount += 1
      result = result + sourceCodeInfo
      finishIfAllComplete()
    }
    case Complete(Failure(exception)) => {
      completeCount += 1
      failCount += 1
      log.warning("processing file failed {}", exception)
      finishIfAllComplete()
    }
    case Timeout => {
      println(s"${result.totalFileNums} of $fileCount files processed before timeout")
      controller ! Report(result)
      BenchmarkUtil.recordElapse(s"analyze folder $currentPath", beginTime)
    }
    case Terminated(a) =>
      router = router.removeRoutee(a)
      val r = context.actorOf(Props[SourceCodeAnalyzerActor])
      context watch r
      router = router.addRoutee(r)
    case x@_ => log.error(s"receive unknown message $x")
  }

  def finishIfAllComplete(): Unit = {
    if (completeCount == fileCount) {
      timeoutTimer.cancel()
      controller ! Report(result)
      BenchmarkUtil.recordElapse(s"analyze folder $currentPath", beginTime)
      context.stop(self)
    }
  }
} 
Example 32
Source File: Scheduler.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.common

import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util.Failure
import scala.util.Success
import scala.util.Try

import akka.actor.Actor
import akka.actor.ActorSystem
import akka.actor.Cancellable
import akka.actor.Props


  def scheduleWaitAtLeast(interval: FiniteDuration,
                          initialDelay: FiniteDuration = Duration.Zero,
                          name: String = "Scheduler")(f: () => Future[Any])(implicit system: ActorSystem,
                                                                            logging: Logging,
                                                                            transid: TransactionId =
                                                                              TransactionId.unknown) = {
    require(interval > Duration.Zero)
    system.actorOf(Props(new Worker(initialDelay, interval, true, name, f)))
  }
} 
Example 33
Source File: ReceiveTimeout.scala    From perf_tester   with Apache License 2.0 5 votes vote down vote up
package akka.actor.dungeon

import ReceiveTimeout.emptyReceiveTimeoutData
import akka.actor.ActorCell
import akka.actor.ActorCell.emptyCancellable
import akka.actor.Cancellable
import scala.concurrent.duration.Duration
import scala.concurrent.duration.FiniteDuration

private[akka] object ReceiveTimeout {
  final val emptyReceiveTimeoutData: (Duration, Cancellable) = (Duration.Undefined, ActorCell.emptyCancellable)
}

private[akka] trait ReceiveTimeout { this: ActorCell ⇒

  import ReceiveTimeout._
  import ActorCell._

  private var receiveTimeoutData: (Duration, Cancellable) = emptyReceiveTimeoutData

  final def receiveTimeout: Duration = receiveTimeoutData._1

  final def setReceiveTimeout(timeout: Duration): Unit = receiveTimeoutData = receiveTimeoutData.copy(_1 = timeout)

  final def checkReceiveTimeout() {
    val recvtimeout = receiveTimeoutData
    //Only reschedule if desired and there are currently no more messages to be processed
    if (!mailbox.hasMessages) recvtimeout._1 match {
      case f: FiniteDuration ⇒
        recvtimeout._2.cancel() //Cancel any ongoing future
        val task = system.scheduler.scheduleOnce(f, self, akka.actor.ReceiveTimeout)(this.dispatcher)
        receiveTimeoutData = (f, task)
      case _ ⇒ cancelReceiveTimeout()
    }
    else cancelReceiveTimeout()

  }

  final def cancelReceiveTimeout(): Unit =
    if (receiveTimeoutData._2 ne emptyCancellable) {
      receiveTimeoutData._2.cancel()
      receiveTimeoutData = (receiveTimeoutData._1, emptyCancellable)
    }

} 
Example 34
Source File: LogkafkaViewCacheActor.scala    From CMAK   with Apache License 2.0 5 votes vote down vote up
package kafka.manager.logkafka

import akka.actor.{ActorPath, Cancellable}
import kafka.manager.model.{ClusterContext, ActorModel}
import ActorModel._
import kafka.manager.base.{LongRunningPoolActor, LongRunningPoolConfig}
import kafka.manager.features.KMLogKafkaFeature

import scala.concurrent.duration._
import scala.util.Try


case class LogkafkaViewCacheActorConfig(logkafkaStateActorPath: ActorPath, 
                                      clusterContext: ClusterContext,
                                      longRunningPoolConfig: LongRunningPoolConfig, 
                                      updatePeriod: FiniteDuration = 10 seconds)
class LogkafkaViewCacheActor(config: LogkafkaViewCacheActorConfig) extends LongRunningPoolActor {
  
  private[this] val ZERO = BigDecimal(0)

  private[this] var cancellable : Option[Cancellable] = None

  private[this] var logkafkaIdentities : Map[String, LogkafkaIdentity] = Map.empty

  private[this] var logkafkaConfigsOption : Option[LogkafkaConfigs] = None

  private[this] var logkafkaClientsOption : Option[LogkafkaClients] = None

  override def preStart() = {
    if (config.clusterContext.clusterFeatures.features(KMLogKafkaFeature)) {
      log.info("Started actor %s".format(self.path))
      log.info("Scheduling updater for %s".format(config.updatePeriod))
      cancellable = Some(
        context.system.scheduler.schedule(0 seconds,
          config.updatePeriod,
          self,
          LKVForceUpdate)(context.system.dispatcher,self)
      )
    }
  }

  @scala.throws[Exception](classOf[Exception])
  override def postStop(): Unit = {
    log.info("Stopped actor %s".format(self.path))
    log.info("Cancelling updater...")
    Try(cancellable.map(_.cancel()))
    super.postStop()
  }

  override protected def longRunningPoolConfig: LongRunningPoolConfig = config.longRunningPoolConfig

  override protected def longRunningQueueFull(): Unit = {
    log.error("Long running pool queue full, skipping!")
  }

  override def processActorRequest(request: ActorRequest): Unit = {
    request match {
      case LKVForceUpdate =>
        log.info("Updating logkafka view...")
        //ask for logkafka configs 
        val lastLogkafkaConfigsUpdateMillisOption: Option[Long] = logkafkaConfigsOption.map(_.lastUpdateMillis)
        context.actorSelection(config.logkafkaStateActorPath).tell(LKSGetAllLogkafkaConfigs(lastLogkafkaConfigsUpdateMillisOption), self)
        //ask for logkafka clients
        val lastLogkafkaClientsUpdateMillisOption: Option[Long] = logkafkaClientsOption.map(_.lastUpdateMillis)
        context.actorSelection(config.logkafkaStateActorPath).tell(LKSGetAllLogkafkaClients(lastLogkafkaClientsUpdateMillisOption), self)

      case LKVGetLogkafkaIdentities =>
        sender ! logkafkaIdentities
        
      case any: Any => log.warning("bvca : processActorRequest : Received unknown message: {}", any)
    }
  }

  override def processActorResponse(response: ActorResponse): Unit = {
    response match {
      case lcg: LogkafkaConfigs =>
        logkafkaConfigsOption = Some(lcg)
        updateView()

      case lct: LogkafkaClients =>
        logkafkaClientsOption = Some(lct)
        updateView()

      case any: Any => log.warning("bvca : processActorResponse : Received unknown message: {}", any)
    }
  }

  private[this] def updateView(): Unit = {
    for {
      logkafkaConfigs <- logkafkaConfigsOption
      logkafkaClients <- logkafkaClientsOption
    } {
      val lcgMap = Map(logkafkaConfigs.configs map { a => a.logkafka_id -> a }: _*)
      val lctMap = Map(logkafkaClients.clients map { a => a.logkafka_id -> a }: _*)
      logkafkaIdentities = lcgMap.map (kv =>
        kv._1 -> LogkafkaIdentity.from(kv._1, Some(kv._2), lctMap.get(kv._1)))
    }
  }
} 
Example 35
Source File: SchedulerUtils.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.serving.core.utils

import akka.actor.Cancellable
import akka.event.slf4j.SLF4JLogging
import com.stratio.sparta.sdk.utils.AggregationTime
import com.stratio.sparta.serving.core.config.SpartaConfig
import com.stratio.sparta.serving.core.constants.AppConstant._

import scala.concurrent.duration._
import scala.util.Try

trait SchedulerUtils extends SLF4JLogging {

  def scheduleOneTask(timeProperty: String, defaultTime: String)(f: ⇒ Unit): Cancellable = {
    import scala.concurrent.ExecutionContext.Implicits.global
    val delay = Try(SpartaConfig.getDetailConfig.get.getString(timeProperty)).toOption
      .flatMap(x => if (x == "") None else Some(x)).getOrElse(defaultTime)

    log.info(s"Starting scheduler task in $timeProperty with time: $delay")
    SchedulerSystem.scheduler.scheduleOnce(AggregationTime.parseValueToMilliSeconds(delay) milli)(f)
  }

  def scheduleTask(initTimeProperty: String,
                   defaultInitTime: String,
                   intervalTimeProperty: String,
                   defaultIntervalTime: String
                  )(f: ⇒ Unit): Cancellable = {
    import scala.concurrent.ExecutionContext.Implicits.global
    val initialDelay = Try(SpartaConfig.getDetailConfig.get.getString(initTimeProperty)).toOption
      .flatMap(x => if (x == "") None else Some(x)).getOrElse(defaultInitTime)

    val interval = Try(SpartaConfig.getDetailConfig.get.getString(intervalTimeProperty)).toOption
      .flatMap(x => if (x == "") None else Some(x)).getOrElse(defaultIntervalTime)

    log.info(s"Starting scheduler tasks with delay $initTimeProperty with time: $initialDelay and interval " +
      s"$intervalTimeProperty with time: $interval")

    SchedulerSystem.scheduler.schedule(
      AggregationTime.parseValueToMilliSeconds(initialDelay) milli,
      AggregationTime.parseValueToMilliSeconds(interval) milli)(f)
  }
} 
Example 36
Source File: MarathonLauncherActor.scala    From sparta   with Apache License 2.0 5 votes vote down vote up
package com.stratio.sparta.serving.api.actor

import akka.actor.{Actor, Cancellable, PoisonPill}
import com.stratio.sparta.serving.core.marathon.MarathonService
import com.stratio.sparta.serving.core.actor.LauncherActor.Start
import com.stratio.sparta.serving.core.actor.StatusActor.ResponseStatus
import com.stratio.sparta.serving.core.config.SpartaConfig
import com.stratio.sparta.serving.core.constants.AppConstant._
import com.stratio.sparta.serving.core.models.enumerators.PolicyStatusEnum._
import com.stratio.sparta.serving.core.models.policy.{PhaseEnum, PolicyErrorModel, PolicyModel, PolicyStatusModel}
import com.stratio.sparta.serving.core.models.submit.SubmitRequest
import com.stratio.sparta.serving.core.services.ClusterCheckerService
import com.stratio.sparta.serving.core.utils._
import org.apache.curator.framework.CuratorFramework

import scala.util.{Failure, Success, Try}

class MarathonLauncherActor(val curatorFramework: CuratorFramework) extends Actor
  with LauncherUtils with SchedulerUtils with SparkSubmitUtils with ClusterListenerUtils with ArgumentsUtils
  with PolicyStatusUtils with RequestUtils {

  private val clusterCheckerService = new ClusterCheckerService(curatorFramework)
  private val checkersPolicyStatus = scala.collection.mutable.ArrayBuffer.empty[Cancellable]

  override def receive: PartialFunction[Any, Unit] = {
    case Start(policy: PolicyModel) => initializeSubmitRequest(policy)
    case ResponseStatus(status) => loggingResponsePolicyStatus(status)
    case _ => log.info("Unrecognized message in Marathon Launcher Actor")
  }

  override def postStop(): Unit = checkersPolicyStatus.foreach(_.cancel())

  def initializeSubmitRequest(policy: PolicyModel): Unit = {
    Try {
      log.info(s"Initializing options for submit Marathon application associated to policy: ${policy.name}")
      val zookeeperConfig = getZookeeperConfig
      val clusterConfig = SpartaConfig.getClusterConfig(Option(ConfigMesos)).get
      val master = clusterConfig.getString(Master).trim
      val driverFile = extractMarathonDriverSubmit(policy, DetailConfig, SpartaConfig.getHdfsConfig)
      val pluginsFiles = pluginsJars(policy)
      val driverArguments =
        extractDriverArguments(policy, driverFile, clusterConfig, zookeeperConfig, ConfigMesos, pluginsFiles)
      val (sparkSubmitArguments, sparkConfigurations) =
        extractSubmitArgumentsAndSparkConf(policy, clusterConfig, pluginsFiles)
      val submitRequest = SubmitRequest(policy.id.get, SpartaDriverClass, driverFile, master, sparkSubmitArguments,
        sparkConfigurations, driverArguments, ConfigMesos, killUrl(clusterConfig))
      val detailExecMode = getDetailExecutionMode(policy, clusterConfig)

      createRequest(submitRequest).getOrElse(throw new Exception("Impossible to create submit request in persistence"))

      (new MarathonService(context, curatorFramework, policy, submitRequest), detailExecMode)
    } match {
      case Failure(exception) =>
        val information = s"Error when initializing Sparta Marathon App options"
        log.error(information, exception)
        updateStatus(PolicyStatusModel(id = policy.id.get, status = Failed, statusInfo = Option(information),
          lastError = Option(PolicyErrorModel(information, PhaseEnum.Execution, exception.toString))
        ))
        self ! PoisonPill
      case Success((marathonApp, detailExecMode)) =>
        val information = "Sparta Marathon App configurations initialized correctly"
        log.info(information)
        updateStatus(PolicyStatusModel(id = policy.id.get, status = NotStarted,
          statusInfo = Option(information), lastExecutionMode = Option(detailExecMode)))
        marathonApp.launch(detailExecMode)
        addMarathonContextListener(policy.id.get, policy.name, context, Option(self))
        checkersPolicyStatus += scheduleOneTask(AwaitPolicyChangeStatus, DefaultAwaitPolicyChangeStatus)(
          clusterCheckerService.checkPolicyStatus(policy, self, context))
    }
  }
}