java.util.concurrent.ScheduledExecutorService Scala Examples
The following examples show how to use java.util.concurrent.ScheduledExecutorService.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: package.scala From ionroller with MIT License | 5 votes |
import java.util.concurrent.{ExecutorService, Executors, ScheduledExecutorService} import com.amazonaws.services.elasticbeanstalk.model.ConfigurationOptionSetting import com.typesafe.scalalogging.StrictLogging import ionroller.aws.Dynamo import ionroller.tracking.Event import play.api.libs.functional.syntax._ import play.api.libs.json._ import scala.concurrent.duration.FiniteDuration import scalaz.concurrent.Task import scalaz.{-\/, \/-} package object ionroller extends StrictLogging { val ionrollerExecutorService: ExecutorService = Executors.newFixedThreadPool(4) implicit val `| Implicit executor service |`: ExecutorService = ionrollerExecutorService implicit val ` | is disabled - define explicitly |`: ExecutorService = ionrollerExecutorService implicit val timer: ScheduledExecutorService = scalaz.concurrent.Strategy.DefaultTimeoutScheduler def ionrollerRole(awsAccountId: String) = s"arn:aws:iam::$awsAccountId:role/ionroller" implicit lazy val finiteDurationFormat = { def applyFiniteDuration(l: Long, u: String): FiniteDuration = { FiniteDuration(l, u.toLowerCase) } def unapplyFiniteDuration(d: FiniteDuration): (Long, String) = { (d.length, d.unit.toString) } ((JsPath \ "length").format[Long] and (JsPath \ "unit").format[String])(applyFiniteDuration, unapplyFiniteDuration) } implicit lazy val configurationOptionSettingFormat: Format[ConfigurationOptionSetting] = { def applyConfigOptionSetting(ns: String, optionName: String, value: String) = new ConfigurationOptionSetting(ns, optionName, value) def unapplyConfigOptionSetting(o: ConfigurationOptionSetting): Option[(String, String, String)] = { for { ns <- Option(o.getNamespace) n <- Option(o.getOptionName) v <- Option(o.getValue) } yield (ns, n, v) } ((JsPath \ "Namespace").format[String] and (JsPath \ "OptionName").format[String] and (JsPath \ "Value").format[String])(applyConfigOptionSetting _, unlift(unapplyConfigOptionSetting)) } def enabled(name: TimelineName) = { ConfigurationManager.modifyEnvironments && (ConfigurationManager.modifyEnvironmentsWhitelist.isEmpty || ConfigurationManager.modifyEnvironmentsWhitelist.contains(name)) && !ConfigurationManager.modifyEnvironmentsBlacklist.contains(name) } def logEvent(evt: Event) = { logger.info(s"$evt (enabled = ${enabled(evt.service)})") if (enabled(evt.service)) Dynamo.EventLogger.log(evt) .flatMap({ case \/-(s) => Task.now(()) case -\/(f) => Task.delay(logger.error(f.getMessage, f)) }) else Task.now(()) } }
Example 2
Source File: DoobieHikariModule.scala From scala-server-toolkit with MIT License | 5 votes |
package com.avast.sst.doobie import java.util.Properties import java.util.concurrent.{ScheduledExecutorService, ThreadFactory} import cats.Show import cats.effect.{Async, Blocker, ContextShift, Resource, Sync} import cats.syntax.show._ import com.zaxxer.hikari.HikariConfig import com.zaxxer.hikari.metrics.MetricsTrackerFactory import doobie.enum.TransactionIsolation import doobie.hikari.HikariTransactor import scala.concurrent.ExecutionContext object DoobieHikariModule { def make[F[_]: Async]( config: DoobieHikariConfig, boundedConnectExecutionContext: ExecutionContext, blocker: Blocker, metricsTrackerFactory: Option[MetricsTrackerFactory] = None )(implicit cs: ContextShift[F]): Resource[F, HikariTransactor[F]] = { for { hikariConfig <- Resource.liftF(makeHikariConfig(config, metricsTrackerFactory)) transactor <- HikariTransactor.fromHikariConfig(hikariConfig, boundedConnectExecutionContext, blocker) } yield transactor } implicit private val transactionIsolationShow: Show[TransactionIsolation] = { case TransactionIsolation.TransactionNone => "TRANSACTION_NONE" case TransactionIsolation.TransactionReadUncommitted => "TRANSACTION_READ_UNCOMMITTED" case TransactionIsolation.TransactionReadCommitted => "TRANSACTION_READ_COMMITTED" case TransactionIsolation.TransactionRepeatableRead => "TRANSACTION_REPEATABLE_READ" case TransactionIsolation.TransactionSerializable => "TRANSACTION_SERIALIZABLE" } private def makeHikariConfig[F[_]: Sync]( config: DoobieHikariConfig, metricsTrackerFactory: Option[MetricsTrackerFactory], scheduledExecutorService: Option[ScheduledExecutorService] = None, threadFactory: Option[ThreadFactory] = None ): F[HikariConfig] = { Sync[F].delay { val c = new HikariConfig() c.setDriverClassName(config.driver) c.setJdbcUrl(config.url) c.setUsername(config.username) c.setPassword(config.password) c.setAutoCommit(config.autoCommit) c.setConnectionTimeout(config.connectionTimeout.toMillis) c.setIdleTimeout(config.idleTimeout.toMillis) c.setMaxLifetime(config.maxLifeTime.toMillis) c.setMinimumIdle(config.minimumIdle) c.setMaximumPoolSize(config.maximumPoolSize) c.setReadOnly(config.readOnly) c.setAllowPoolSuspension(config.allowPoolSuspension) c.setIsolateInternalQueries(config.isolateInternalQueries) c.setRegisterMbeans(config.registerMBeans) val dataSourceProperties = new Properties() config.dataSourceProperties.foreach { case (k, v) => dataSourceProperties.put(k, v) } c.setDataSourceProperties(dataSourceProperties) config.leakDetectionThreshold.map(_.toMillis).foreach(c.setLeakDetectionThreshold) config.initializationFailTimeout.map(_.toMillis).foreach(c.setInitializationFailTimeout) config.poolName.foreach(c.setPoolName) config.validationTimeout.map(_.toMillis).foreach(c.setValidationTimeout) config.transactionIsolation.map(_.show).foreach(c.setTransactionIsolation) scheduledExecutorService.foreach(c.setScheduledExecutor) threadFactory.foreach(c.setThreadFactory) metricsTrackerFactory.foreach(c.setMetricsTrackerFactory) c } } }
Example 3
Source File: IOTimer.scala From cats-effect with Apache License 2.0 | 5 votes |
package cats.effect package internals import java.util.concurrent.{ScheduledExecutorService, ScheduledThreadPoolExecutor, ThreadFactory, TimeUnit} import cats.effect.internals.Callback.T import cats.effect.internals.IOShift.Tick import scala.concurrent.ExecutionContext import scala.concurrent.duration._ import scala.util.Try def apply(ec: ExecutionContext, sc: ScheduledExecutorService): Timer[IO] = new IOTimer(ec, sc) private[internals] lazy val scheduler: ScheduledExecutorService = mkGlobalScheduler(sys.props) private[internals] def mkGlobalScheduler(props: collection.Map[String, String]): ScheduledThreadPoolExecutor = { val corePoolSize = props .get("cats.effect.global_scheduler.threads.core_pool_size") .flatMap(s => Try(s.toInt).toOption) .filter(_ > 0) .getOrElse(2) val keepAliveTime = props .get("cats.effect.global_scheduler.keep_alive_time_ms") .flatMap(s => Try(s.toLong).toOption) .filter(_ > 0L) val tp = new ScheduledThreadPoolExecutor(corePoolSize, new ThreadFactory { def newThread(r: Runnable): Thread = { val th = new Thread(r) th.setName(s"cats-effect-scheduler-${th.getId}") th.setDaemon(true) th } }) keepAliveTime.foreach { timeout => // Call in this order or it throws! tp.setKeepAliveTime(timeout, TimeUnit.MILLISECONDS) tp.allowCoreThreadTimeOut(true) } tp.setRemoveOnCancelPolicy(true) tp } final private class ShiftTick( conn: IOConnection, cb: Either[Throwable, Unit] => Unit, ec: ExecutionContext ) extends Runnable { def run(): Unit = { // Shifts actual execution on our `ExecutionContext`, because // the scheduler is in charge only of ticks and the execution // needs to shift because the tick might continue with whatever // bind continuation is linked to it, keeping the current thread // occupied conn.pop() ec.execute(new Tick(cb)) } } }
Example 4
Source File: KubernetesShell.scala From nelson with Apache License 2.0 | 5 votes |
package nelson package scheduler import nelson.Datacenter.{Deployment, StackName} import nelson.Kubectl.{DeploymentStatus, JobStatus, KubectlError} import nelson.scheduler.SchedulerOp._ import nelson.CatsHelpers._ import cats.~> import cats.effect.IO import cats.implicits._ import scala.concurrent.ExecutionContext import scala.concurrent.duration.FiniteDuration import java.util.concurrent.ScheduledExecutorService final class KubernetesShell( kubectl: Kubectl, timeout: FiniteDuration, scheduler: ScheduledExecutorService, executionContext: ExecutionContext ) extends (SchedulerOp ~> IO) { import KubernetesShell._ private implicit val kubernetesShellExecutionContext = executionContext def apply[A](fa: SchedulerOp[A]): IO[A] = fa match { case Delete(_, deployment) => delete(deployment) .retryExponentially(limit = 3)(scheduler, kubernetesShellExecutionContext) .timed(timeout) case Launch(_, _, _, _, _, _, bp) => kubectl.apply(bp) .retryExponentially(limit = 3)(scheduler, kubernetesShellExecutionContext) .timed(timeout) case Summary(_, ns, stackName) => summary(ns, stackName) .retryExponentially(limit = 3)(scheduler, kubernetesShellExecutionContext) .timed(timeout) } def delete(deployment: Deployment): IO[Unit] = { val ns = deployment.namespace.name val stack = deployment.stackName // We don't have enough information here to determine what exactly // we're trying to delete so try each one in turn.. val fallback = kubectl.deleteService(ns, stack).void.recoverWith { case err@KubectlError(_) if notFound(err) => kubectl.deleteCronJob(ns, stack).void.recoverWith { case err@KubectlError(_) if notFound(err) => kubectl.deleteJob(ns, stack).void.recover { case err@KubectlError(_) if notFound(err) => () } } } deployment.renderedBlueprint.fold(fallback)(spec => kubectl.delete(spec).void) } // Janky heuristic to see if an attempted (legacy) deletion failed because // it was not found as opposed to some other reason like RBAC permissions private def notFound(error: KubectlError): Boolean = error.stderr.exists(_.startsWith("Error from server (NotFound)")) def summary(ns: NamespaceName, stackName: StackName): IO[Option[DeploymentSummary]] = deploymentSummary(ns, stackName).recoverWith { case _ => cronJobSummary(ns, stackName).recoverWith { case _ => jobSummary(ns, stackName).recover { case _ => None } } } def deploymentSummary(ns: NamespaceName, stackName: StackName): IO[Option[DeploymentSummary]] = kubectl.getDeployment(ns, stackName).map { case DeploymentStatus(available, unavailable) => Some(DeploymentSummary( running = available, pending = unavailable, completed = None, failed = None )) } def cronJobSummary(ns: NamespaceName, stackName: StackName): IO[Option[DeploymentSummary]] = kubectl.getCronJob(ns, stackName).map(js => Some(jobStatusToSummary(js))) def jobSummary(ns: NamespaceName, stackName: StackName): IO[Option[DeploymentSummary]] = kubectl.getJob(ns, stackName).map(js => Some(jobStatusToSummary(js))) } object KubernetesShell { private def jobStatusToSummary(js: JobStatus): DeploymentSummary = DeploymentSummary( running = js.active, pending = None, // Doesn't seem like K8s API gives this info completed = js.succeeded, failed = js.failed ) }
Example 5
Source File: TcpServiceImpl.scala From c4proto with Apache License 2.0 | 5 votes |
package ee.cone.c4gate_server import java.net.InetSocketAddress import java.nio.ByteBuffer import java.nio.channels.{AsynchronousServerSocketChannel, AsynchronousSocketChannel, CompletionHandler} import java.util.UUID import java.util.concurrent.{Executors, ScheduledExecutorService, ScheduledFuture, TimeUnit} import com.typesafe.scalalogging.LazyLogging import ee.cone.c4actor._ import scala.collection.concurrent.TrieMap import scala.collection.immutable.Queue @SuppressWarnings(Array("org.wartremover.warts.Var")) class ChannelHandler( channel: AsynchronousSocketChannel, unregister: ()=>Unit, fail: Throwable=>Unit, executor: ScheduledExecutorService, timeout: Long, val compressor: Option[Compressor] ) extends CompletionHandler[Integer,Unit] with SenderToAgent { private var queue: Queue[Array[Byte]] = Queue.empty private var activeElement: Option[ByteBuffer] = None private var purge: Option[ScheduledFuture[_]] = None private def startWrite(): Unit = queue.dequeueOption.foreach{ case (element,nextQueue) => queue = nextQueue activeElement = Option(ByteBuffer.wrap(element)) channel.write[Unit](activeElement.get, (), this) } def add(data: Array[Byte]): Unit = synchronized { queue = queue.enqueue(data) if(activeElement.isEmpty) startWrite() } def completed(result: Integer, att: Unit): Unit = Trace { synchronized { if(activeElement.get.hasRemaining) channel.write[Unit](activeElement.get, (), this) else { purge.foreach(_.cancel(false)) purge = Option(executor.schedule(new Runnable { def run(): Unit = close() },timeout,TimeUnit.SECONDS)) activeElement = None startWrite() } } } def failed(exc: Throwable, att: Unit): Unit = { fail(exc) close() } def close(): Unit = { unregister() channel.close() //does close block? } } class TcpServerImpl( port: Int, tcpHandler: TcpHandler, timeout: Long, compressorFactory: StreamCompressorFactory, channels: TrieMap[String,ChannelHandler] = TrieMap() ) extends TcpServer with Executable with LazyLogging { def getSender(connectionKey: String): Option[SenderToAgent] = channels.get(connectionKey) def run(): Unit = concurrent.blocking{ tcpHandler.beforeServerStart() val address = new InetSocketAddress(port) val listener = AsynchronousServerSocketChannel.open().bind(address) val executor = Executors.newScheduledThreadPool(1) listener.accept[Unit]((), new CompletionHandler[AsynchronousSocketChannel,Unit] { def completed(ch: AsynchronousSocketChannel, att: Unit): Unit = Trace { listener.accept[Unit]((), this) val key = UUID.randomUUID.toString val sender = new ChannelHandler(ch, {() => assert(channels.remove(key).nonEmpty) tcpHandler.afterDisconnect(key) }, { error => logger.error("channel",error) }, executor, timeout, compressorFactory.create()) assert(channels.put(key,sender).isEmpty) tcpHandler.afterConnect(key, sender) } def failed(exc: Throwable, att: Unit): Unit = logger.error("tcp",exc) //! may be set status-finished }) } }
Example 6
Source File: SimpleScheduler.scala From CM-Well with Apache License 2.0 | 5 votes |
package cmwell.util.concurrent import java.util.concurrent.{ScheduledExecutorService, ScheduledFuture, ScheduledThreadPoolExecutor} import com.typesafe.scalalogging.LazyLogging import scala.concurrent.{ExecutionContext, Future, Promise} import scala.concurrent.duration.{Duration, FiniteDuration} import scala.util.Try object SimpleScheduler extends LazyLogging { private[this] lazy val timer = { val executor = new ScheduledThreadPoolExecutor(1) executor.setRemoveOnCancelPolicy(true) executor.asInstanceOf[ScheduledExecutorService] } //method is private, since we must keep execution on the expense of out timer thread to be as limited as possible. //this method can be used if and only if we know `body` is a safe and small job. private[util] def scheduleInstant[T](duration: FiniteDuration)(body: => T) = { val p = Promise[T]() val cancellable = timer.schedule( new Runnable { override def run(): Unit = { // body must not be expensive to compute since it will be run in our only timer thread expense. p.complete(Try(body)) } }, duration.toMillis, java.util.concurrent.TimeUnit.MILLISECONDS ) p.future -> Cancellable(cancellable) } def scheduleAtFixedRate(initialDelay: FiniteDuration, period: FiniteDuration, mayInterruptIfRunning: Boolean = false)( task: => Any )(implicit executionContext: ExecutionContext): Cancellable = { // memoize runnable task val runnable: Runnable = new Runnable { override def run(): Unit = Try(task).failed.foreach { err => logger.error("schedueled task failed", err) } } val cancellable = timer.scheduleAtFixedRate(new Runnable { override def run(): Unit = executionContext.execute(runnable) }, initialDelay.toMillis, period.toMillis, java.util.concurrent.TimeUnit.MILLISECONDS) Cancellable(cancellable, mayInterruptIfRunning) } def schedule[T](duration: FiniteDuration)(body: => T)(implicit executionContext: ExecutionContext): Future[T] = { val p = Promise[T]() timer.schedule( new Runnable { override def run(): Unit = { // body may be expensive to compute, and must not be run in our only timer thread expense, // so we compute the task inside a `Future` and make it run on the expense of the given executionContext. p.completeWith(Future(body)(executionContext)) } }, duration.toMillis, java.util.concurrent.TimeUnit.MILLISECONDS ) p.future } def scheduleFuture[T](duration: Duration)(body: => Future[T]): Future[T] = { val p = Promise[T]() timer.schedule(new Runnable { override def run(): Unit = p.completeWith(body) }, duration.toMillis, java.util.concurrent.TimeUnit.MILLISECONDS) p.future } } object Cancellable { def apply(scheduledFuture: ScheduledFuture[_], mayInterruptIfRunning: Boolean = false)= new Cancellable { override def cancel(): Boolean = scheduledFuture.cancel(mayInterruptIfRunning) } } trait Cancellable { def cancel(): Boolean }
Example 7
Source File: Task.scala From coursier with Apache License 2.0 | 5 votes |
package coursier.util import java.util.concurrent.ScheduledExecutorService import scala.concurrent.duration.Duration import scala.concurrent.{ExecutionContext, Future, Promise} import scala.util.{Failure, Success} final case class Task[+T](value: ExecutionContext => Future[T]) extends AnyVal { def map[U](f: T => U): Task[U] = Task(implicit ec => value(ec).map(f)) def flatMap[U](f: T => Task[U]): Task[U] = Task(implicit ec => value(ec).flatMap(t => f(t).value(ec))) def handle[U >: T](f: PartialFunction[Throwable, U]): Task[U] = Task(ec => value(ec).recover(f)(ec)) def future()(implicit ec: ExecutionContext): Future[T] = value(ec) def attempt: Task[Either[Throwable, T]] = map(Right(_)) .handle { case t: Throwable => Left(t) } def schedule(duration: Duration, es: ScheduledExecutorService): Task[T] = { Task { implicit ec => val p = Promise[T]() val r: Runnable = new Runnable { def run() = value(ec).onComplete(p.complete) } es.schedule(r, duration.length, duration.unit) p.future } } } object Task extends PlatformTaskCompanion { def point[A](a: A): Task[A] = { val future = Future.successful(a) Task(_ => future) } def delay[A](a: => A): Task[A] = Task(ec => Future(a)(ec)) def never[A]: Task[A] = Task(_ => Promise[A].future) def fromEither[T](e: Either[Throwable, T]): Task[T] = Task(_ => Future.fromTry(e.fold(Failure(_), Success(_)))) def fail(e: Throwable): Task[Nothing] = Task(_ => Future.fromTry(Failure(e))) def tailRecM[A, B](a: A)(fn: A => Task[Either[A, B]]): Task[B] = Task[B] { implicit ec => def loop(a: A): Future[B] = fn(a).future().flatMap { case Right(b) => Future.successful(b) case Left(a) => // this is safe because recursive // flatMap is safe on Future loop(a) } loop(a) } def gather: Gather[Task] = sync }
Example 8
Source File: PlatformTaskCompanion.scala From coursier with Apache License 2.0 | 5 votes |
package coursier.util import java.util.concurrent.{ExecutorService, ScheduledExecutorService} import scala.concurrent.{Await, ExecutionContext, ExecutionContextExecutorService, Future} import scala.concurrent.duration.{Duration, FiniteDuration} import scala.concurrent.Promise import scala.util.Success abstract class PlatformTaskCompanion { self => def schedule[A](pool: ExecutorService)(f: => A): Task[A] = { val ec0 = pool match { case eces: ExecutionContextExecutorService => eces case _ => ExecutionContext.fromExecutorService(pool) // FIXME Is this instantiation costly? Cache it? } Task(_ => Future(f)(ec0)) } def completeAfter(pool: ScheduledExecutorService, duration: FiniteDuration): Task[Unit] = Task.delay { val p = Promise[Unit]() val runnable = new Runnable { def run(): Unit = p.complete(Success(())) } pool.schedule(runnable, duration.length, duration.unit) Task(_ => p.future) }.flatMap(identity) implicit val sync: Sync[Task] = new TaskSync { def schedule[A](pool: ExecutorService)(f: => A) = self.schedule(pool)(f) } implicit class PlatformTaskOps[T](private val task: Task[T]) { def unsafeRun()(implicit ec: ExecutionContext): T = Await.result(task.future(), Duration.Inf) } }
Example 9
Source File: ThreadUtil.scala From coursier with Apache License 2.0 | 5 votes |
package coursier.cache.internal import java.util.concurrent.{ExecutorService, LinkedBlockingQueue, ScheduledExecutorService, ScheduledThreadPoolExecutor, ThreadFactory, ThreadPoolExecutor, TimeUnit} import java.util.concurrent.atomic.AtomicInteger object ThreadUtil { private val poolNumber = new AtomicInteger(1) def daemonThreadFactory(): ThreadFactory = { val poolNumber0 = poolNumber.getAndIncrement() val threadNumber = new AtomicInteger(1) new ThreadFactory { def newThread(r: Runnable) = { val threadNumber0 = threadNumber.getAndIncrement() val t = new Thread(r, s"coursier-pool-$poolNumber0-thread-$threadNumber0") t.setDaemon(true) t.setPriority(Thread.NORM_PRIORITY) t } } } def fixedThreadPool(size: Int): ExecutorService = { val factory = daemonThreadFactory() // 1 min keep alive, so that threads get stopped a bit after resolution / downloading is done val executor = new ThreadPoolExecutor( size, size, 1L, TimeUnit.MINUTES, new LinkedBlockingQueue[Runnable], factory ) executor.allowCoreThreadTimeOut(true) executor } def fixedScheduledThreadPool(size: Int): ScheduledExecutorService = { val factory = daemonThreadFactory() val executor = new ScheduledThreadPoolExecutor(size, factory) executor.setKeepAliveTime(1L, TimeUnit.MINUTES) executor.allowCoreThreadTimeOut(true) executor } def withFixedThreadPool[T](size: Int)(f: ExecutorService => T): T = { var pool: ExecutorService = null try { pool = fixedThreadPool(size) f(pool) } finally { if (pool != null) pool.shutdown() } } }
Example 10
Source File: MultiFixtureBase.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.api.testing.utils import java.util.concurrent.{Executors, ScheduledExecutorService, TimeUnit} import com.daml.dec.DirectExecutionContext import org.scalatest._ import org.scalatest.concurrent.{AsyncTimeLimitedTests, ScaledTimeSpans} import org.scalatest.exceptions.TestCanceledException import org.scalatest.time.Span import scala.collection.immutable.Iterable import scala.concurrent.duration.DurationInt import scala.concurrent.{Future, Promise, TimeoutException} import scala.util.control.{NoStackTrace, NonFatal} trait MultiFixtureBase[FixtureId, TestContext] extends Assertions with BeforeAndAfterAll with ScaledTimeSpans with AsyncTimeLimitedTests { self: AsyncTestSuite => private var es: ScheduledExecutorService = _ override protected def beforeAll(): Unit = { super.beforeAll() es = Executors.newScheduledThreadPool(1) } override protected def afterAll(): Unit = { es.shutdownNow() super.afterAll() } protected class TestFixture(val id: FixtureId, createContext: () => TestContext) { def context(): TestContext = createContext() } def timeLimit: Span = scaled(30.seconds) object TestFixture { def apply(id: FixtureId, createContext: () => TestContext): TestFixture = new TestFixture(id, createContext) def unapply(testFixture: TestFixture): Option[(FixtureId, TestContext)] = Some((testFixture.id, testFixture.context())) } protected def fixtures: Iterable[TestFixture] protected def allFixtures(runTest: TestContext => Future[Assertion]): Future[Assertion] = forAllFixtures(fixture => runTest(fixture.context)) protected def forAllFixtures(runTest: TestFixture => Future[Assertion]): Future[Assertion] = { forAllMatchingFixtures { case f => runTest(f) } } protected def forAllMatchingFixtures( runTest: PartialFunction[TestFixture, Future[Assertion]]): Future[Assertion] = { if (parallelExecution) { val results = fixtures.map( fixture => if (runTest.isDefinedAt(fixture)) runTestAgainstFixture(fixture, runTest) else Future.successful(succeed)) Future.sequence(results).map(foldAssertions) } else { fixtures.foldLeft(Future.successful(succeed)) { case (resultSoFar, thisFixture) => resultSoFar.flatMap { case Succeeded => runTestAgainstFixture(thisFixture, runTest) case other => Future.successful(other) } } } } }
Example 11
Source File: Executors.scala From kafka-journal with MIT License | 5 votes |
package com.evolutiongaming.kafka.journal.util import java.util.concurrent.ScheduledExecutorService import cats.effect.{Resource, Sync} import com.evolutiongaming.catshelper.Runtime import com.evolutiongaming.kafka.journal.execution.{ForkJoinPoolOf, ScheduledExecutorServiceOf, ThreadFactoryOf, ThreadPoolOf} import scala.concurrent.{ExecutionContext, ExecutionContextExecutorService} object Executors { def blocking[F[_] : Sync]( name: String, ): Resource[F, ExecutionContextExecutorService] = { for { threadFactory <- Resource.liftF(ThreadFactoryOf[F](name)) threadPool <- ThreadPoolOf[F](2, Int.MaxValue, threadFactory) } yield { ExecutionContext.fromExecutorService(threadPool) } } def nonBlocking[F[_] : Sync]( name: String, ): Resource[F, ExecutionContextExecutorService] = { for { cores <- Resource.liftF(Runtime[F].availableCores) parallelism = cores + 1 forkJoinPool <- ForkJoinPoolOf[F](name, parallelism) } yield { ExecutionContext.fromExecutorService(forkJoinPool) } } def scheduled[F[_] : Sync]( name: String, parallelism: Int ): Resource[F, ScheduledExecutorService] = { for { threadFactory <- Resource.liftF(ThreadFactoryOf[F](name)) result <- ScheduledExecutorServiceOf[F](parallelism, threadFactory) } yield result } }
Example 12
Source File: Scheduler.scala From temperature-machine with Apache License 2.0 | 5 votes |
package bad.robot.temperature.task import java.util.concurrent.{ScheduledExecutorService, ScheduledFuture} import bad.robot.logging._ import scala.concurrent.duration.Duration object Scheduler { implicit class ScheduledExecutorServiceOps(executor: ScheduledExecutorService) { def schedule(frequency: Duration, tasks: Runnable*): List[ScheduledFuture[_]] = { this.schedule(frequency, printError(_), tasks:_*) } def schedule(frequency: Duration, errorHandler: Throwable => Runnable => Unit, tasks: Runnable*): List[ScheduledFuture[_]] = { tasks.map(task => { executor.scheduleAtFixedRate(wrapWithErrorHandler(task, errorHandler), 0, frequency.length, frequency.unit) }).toList } } def wrapWithErrorHandler(task: Runnable, errorHandler: Throwable => Runnable => Unit): Runnable = { () => try { task.run() } catch { case e: Throwable => errorHandler(e)(task) } } private def printError(e: Throwable): Runnable => Unit = { task => Log.error(s"An error occurred executed a scheduled task ($task) ${e.getMessage}") } }
Example 13
Source File: Scheduler.scala From zio with Apache License 2.0 | 5 votes |
package zio.internal import java.util.concurrent.ScheduledExecutorService import java.util.concurrent.TimeUnit import zio.duration.Duration import zio.internal.Scheduler.CancelToken private[zio] trait Scheduler { def schedule(task: Runnable, duration: Duration): CancelToken } private[zio] object Scheduler { type CancelToken = () => Boolean def fromScheduledExecutorService(service: ScheduledExecutorService): Scheduler = new Scheduler { val ConstFalse = () => false override def schedule(task: Runnable, duration: Duration): CancelToken = duration match { case Duration.Infinity => ConstFalse case Duration.Zero => task.run() ConstFalse case duration: Duration.Finite => val future = service.schedule(new Runnable { def run: Unit = task.run() }, duration.toNanos, TimeUnit.NANOSECONDS) () => future.cancel(true) } } }
Example 14
Source File: RichScheduledExecutorService.scala From mango with Apache License 2.0 | 5 votes |
package com.kakao.mango.concurrent import java.util.concurrent.{Callable, ScheduledFuture, TimeUnit, ScheduledExecutorService} import scala.concurrent.duration.Duration import scala.concurrent.duration._ import scala.language.postfixOps class RichScheduledExecutorService(underlying: ScheduledExecutorService) extends RichExecutorService(underlying) with ScheduledExecutorService { def scheduleIn[T](delay: Duration)(command: => T): ScheduledFuture[T] = schedule(new Callable[T] { override def call(): T = command }, delay.toMillis, TimeUnit.MILLISECONDS) def withFixedRate[T](rate: Duration, initialDelay: Duration = 0.second)(command: => Unit) = scheduleAtFixedRate(new Runnable { override def run(): Unit = command }, initialDelay.toMillis, rate.toMillis, TimeUnit.MILLISECONDS) def withFixedDelay[T](delay: Duration, initialDelay: Duration = 0.second)(command: => Unit) = scheduleWithFixedDelay(new Runnable { override def run(): Unit = command }, initialDelay.toMillis, delay.toMicros, TimeUnit.MILLISECONDS) // delegating to underlying override def schedule(command: Runnable, delay: Long, unit: TimeUnit): ScheduledFuture[_] = underlying.schedule(wrap(command), delay, unit) override def scheduleAtFixedRate(command: Runnable, initialDelay: Long, period: Long, unit: TimeUnit): ScheduledFuture[_] = underlying.scheduleAtFixedRate(wrap(command), initialDelay, period, unit) override def schedule[V](callable: Callable[V], delay: Long, unit: TimeUnit): ScheduledFuture[V] = underlying.schedule(wrap(callable), delay, unit) override def scheduleWithFixedDelay(command: Runnable, initialDelay: Long, delay: Long, unit: TimeUnit): ScheduledFuture[_] = underlying.scheduleWithFixedDelay(wrap(command), initialDelay, delay, unit) }
Example 15
Source File: NamedExecutors.scala From mango with Apache License 2.0 | 5 votes |
package com.kakao.mango.concurrent import java.util.concurrent.Executors._ import java.util.concurrent.ForkJoinPool.ForkJoinWorkerThreadFactory import java.util.concurrent.atomic.AtomicInteger import java.util.concurrent.{ForkJoinWorkerThread, ExecutorService, ScheduledExecutorService, ForkJoinPool} import scala.language.implicitConversions object NamedExecutors { implicit def toRich(e: ExecutorService): RichExecutorService = new RichExecutorService(e) implicit def toRich(e: ScheduledExecutorService): RichScheduledExecutorService = new RichScheduledExecutorService(e) def scheduled(name: String, daemon: Boolean = true): RichScheduledExecutorService = { newSingleThreadScheduledExecutor(NamedThreadFactory(name, daemon)) } def scheduledPool(name: String, size: Int, daemon: Boolean = true): RichScheduledExecutorService = { newScheduledThreadPool(size, NamedThreadFactory(name, daemon)) } def cached(name: String, daemon: Boolean = true): RichExecutorService = { newCachedThreadPool(NamedThreadFactory(name, daemon)) } def fixed(name: String, size: Int, daemon: Boolean = true): RichExecutorService = { newFixedThreadPool(size, NamedThreadFactory(name, daemon)) } def single(name: String, daemon: Boolean = true): RichExecutorService = { newSingleThreadExecutor(NamedThreadFactory(name, daemon)) } def forkJoin(name: String, size: Int, daemon: Boolean = true, asyncMode: Boolean = false): RichExecutorService = { val counter = new AtomicInteger() new ForkJoinPool(size, new ForkJoinWorkerThreadFactory { override def newThread(pool: ForkJoinPool): ForkJoinWorkerThread = { val thread = new ForkJoinWorkerThread(pool) {} thread.setName(s"$name-${counter.incrementAndGet()}") thread.setDaemon(daemon) thread } }, null, asyncMode) } }
Example 16
Source File: ScalaUtilIT.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.client.binding.util import java.util.concurrent.{Executors, ScheduledExecutorService} import com.daml.ledger.client.binding.util.ScalaUtil.FutureOps import org.scalatest.concurrent.AsyncTimeLimitedTests import org.scalatest.time.Span import org.scalatest.time.SpanSugar._ import org.scalatest.{AsyncWordSpec, BeforeAndAfterAll, Matchers} import scala.concurrent.{Future, Promise, TimeoutException} class ScalaUtilIT extends AsyncWordSpec with AsyncTimeLimitedTests with Matchers with BeforeAndAfterAll { implicit val scheduler: ScheduledExecutorService = Executors.newSingleThreadScheduledExecutor() override def afterAll(): Unit = { scheduler.shutdownNow() super.afterAll() } "FutureOps" can { "future with timeout" should { "fail Future with TimoutException after specified duration" in { val promise = Promise[Unit]() // never completes val future = promise.future.timeout("name", 1000.millis, 100.millis) recoverToSucceededIf[TimeoutException](future) } "be able to complete within specified duration" in { val future = Future { "result" }.timeoutWithDefaultWarn("name", 1.second) future.map(_ shouldBe "result") } } } override lazy val timeLimit: Span = 10.seconds }
Example 17
Source File: ScalaUtil.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.client.binding.util import java.util.concurrent.{ScheduledExecutorService, ScheduledFuture, TimeUnit} import com.typesafe.scalalogging.LazyLogging import scala.concurrent.duration._ import scala.concurrent.{ExecutionContext, Future, Promise, TimeoutException} object ScalaUtil { implicit class FutureOps[T](val future: Future[T]) extends LazyLogging { def timeout( name: String, failTimeout: FiniteDuration = 1.minute, warnTimeout: FiniteDuration = 30.seconds)( implicit ec: ExecutionContext, scheduler: ScheduledExecutorService): Future[T] = { val promise = Promise[T] @SuppressWarnings(Array("org.wartremover.warts.JavaSerializable")) val warningTask = schedule(warnTimeout) { logger.warn("Function {} takes more than {}", name, warnTimeout) } val errorTask = schedule(failTimeout) { val error = new TimeoutException(s"Function call $name took more than $failTimeout") promise.tryFailure(error) () } future.onComplete { outcome => warningTask.cancel(false) errorTask.cancel(false) promise.tryComplete(outcome) } promise.future } private def schedule(timeout: FiniteDuration)(f: => Unit)( implicit scheduler: ScheduledExecutorService): ScheduledFuture[_] = { val runnable = new Runnable { override def run(): Unit = f } scheduler.schedule(runnable, timeout.toMillis, TimeUnit.MILLISECONDS) } def timeoutWithDefaultWarn(name: String, failTimeout: FiniteDuration)( implicit ec: ExecutionContext, scheduler: ScheduledExecutorService): Future[T] = timeout(name, failTimeout, 10.seconds) } }
Example 18
Source File: AkkaTest.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.client.testing import java.util import java.util.concurrent.{Executors, ScheduledExecutorService} import akka.NotUsed import akka.actor.{ActorSystem, Scheduler} import akka.stream.scaladsl.{Sink, Source} import akka.stream.Materializer import akka.util.ByteString import com.daml.grpc.adapter.{ExecutionSequencerFactory, SingleThreadExecutionSequencerPool} import com.typesafe.config.{Config, ConfigFactory, ConfigValueFactory} import com.typesafe.scalalogging.LazyLogging import org.scalatest.{BeforeAndAfterAll, Suite} import scala.concurrent.duration._ import scala.concurrent.{Await, ExecutionContextExecutor, Future} import scala.util.control.NonFatal trait AkkaTest extends BeforeAndAfterAll with LazyLogging { self: Suite => // TestEventListener is needed for log testing private val loggers = util.Arrays.asList("akka.event.slf4j.Slf4jLogger", "akka.testkit.TestEventListener") protected implicit val sysConfig: Config = ConfigFactory .load() .withValue("akka.loggers", ConfigValueFactory.fromIterable(loggers)) .withValue("akka.logger-startup-timeout", ConfigValueFactory.fromAnyRef("30s")) .withValue("akka.stdout-loglevel", ConfigValueFactory.fromAnyRef("INFO")) protected implicit val system: ActorSystem = ActorSystem("test", sysConfig) protected implicit val ec: ExecutionContextExecutor = system.dispatchers.lookup("test-dispatcher") protected implicit val scheduler: Scheduler = system.scheduler protected implicit val schedulerService: ScheduledExecutorService = Executors.newSingleThreadScheduledExecutor() protected implicit val materializer: Materializer = Materializer(system) protected implicit val esf: ExecutionSequencerFactory = new SingleThreadExecutionSequencerPool("testSequencerPool") protected val timeout: FiniteDuration = 2.minutes protected val shortTimeout: FiniteDuration = 5.seconds protected def await[T](fun: => Future[T]): T = Await.result(fun, timeout) protected def awaitShort[T](fun: => Future[T]): T = Await.result(fun, shortTimeout) protected def drain(source: Source[ByteString, NotUsed]): ByteString = { val futureResult: Future[ByteString] = source.runFold(ByteString.empty) { (a, b) => a.concat(b) } awaitShort(futureResult) } protected def drain[A, B](source: Source[A, B]): Seq[A] = { val futureResult: Future[Seq[A]] = source.runWith(Sink.seq) awaitShort(futureResult) } override protected def afterAll(): Unit = { try { val _ = await(system.terminate()) } catch { case NonFatal(_) => () } schedulerService.shutdownNow() super.afterAll() } }