java.util.concurrent.ThreadFactory Scala Examples
The following examples show how to use java.util.concurrent.ThreadFactory.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: PlainRabbit.scala From graphcool-framework with Apache License 2.0 | 5 votes |
package cool.graph.rabbit import java.util.concurrent.{Executors, ThreadFactory} import scala.util.Try import com.rabbitmq.client.{ConnectionFactory, Channel => RabbitChannel} import cool.graph.bugsnag.BugSnagger object PlainRabbit { def connect(name: String, amqpUri: String, numberOfThreads: Int, qos: Option[Int])(implicit bugSnag: BugSnagger): Try[RabbitChannel] = Try { val threadFactory: ThreadFactory = Utils.newNamedThreadFactory(name) val factory = { val f = new ConnectionFactory() val timeout = sys.env.getOrElse("RABBIT_TIMEOUT_MS", "500").toInt f.setUri(amqpUri) f.setConnectionTimeout(timeout) f.setExceptionHandler(RabbitExceptionHandler(bugSnag)) f.setThreadFactory(threadFactory) f.setAutomaticRecoveryEnabled(true) f } val executor = Executors.newFixedThreadPool(numberOfThreads, threadFactory) val connection = factory.newConnection(executor) val theQos = qos.orElse(sys.env.get("RABBIT_CHANNEL_QOS").map(_.toInt)).getOrElse(500) val chan = connection.createChannel() chan.basicQos(theQos) chan } }
Example 2
Source File: standard_thread.scala From libisabelle with Apache License 2.0 | 5 votes |
package isabelle import java.lang.Thread import java.util.concurrent.{ExecutorService, ThreadPoolExecutor, TimeUnit, LinkedBlockingQueue, ThreadFactory} object Standard_Thread { final class Delay private[Standard_Thread]( first: Boolean, delay: => Time, log: Logger, event: => Unit) { private var running: Option[Event_Timer.Request] = None private def run: Unit = { val do_run = synchronized { if (running.isDefined) { running = None; true } else false } if (do_run) { try { event } catch { case exn: Throwable if !Exn.is_interrupt(exn) => log(Exn.message(exn)); throw exn } } } def invoke(): Unit = synchronized { val new_run = running match { case Some(request) => if (first) false else { request.cancel; true } case None => true } if (new_run) running = Some(Event_Timer.request(Time.now() + delay)(run)) } def revoke(): Unit = synchronized { running match { case Some(request) => request.cancel; running = None case None => } } def postpone(alt_delay: Time): Unit = synchronized { running match { case Some(request) => val alt_time = Time.now() + alt_delay if (request.time < alt_time && request.cancel) { running = Some(Event_Timer.request(alt_time)(run)) } case None => } } } // delayed event after first invocation def delay_first(delay: => Time, log: Logger = No_Logger)(event: => Unit): Delay = new Delay(true, delay, log, event) // delayed event after last invocation def delay_last(delay: => Time, log: Logger = No_Logger)(event: => Unit): Delay = new Delay(false, delay, log, event) }
Example 3
Source File: standard_thread.scala From libisabelle with Apache License 2.0 | 5 votes |
package isabelle import java.lang.Thread import java.util.concurrent.{ExecutorService, ThreadPoolExecutor, TimeUnit, LinkedBlockingQueue, ThreadFactory} object Standard_Thread { final class Delay private[Standard_Thread]( first: Boolean, delay: => Time, log: Logger, event: => Unit) { private var running: Option[Event_Timer.Request] = None private def run: Unit = { val do_run = synchronized { if (running.isDefined) { running = None; true } else false } if (do_run) { try { event } catch { case exn: Throwable if !Exn.is_interrupt(exn) => log(Exn.message(exn)); throw exn } } } def invoke(): Unit = synchronized { val new_run = running match { case Some(request) => if (first) false else { request.cancel; true } case None => true } if (new_run) running = Some(Event_Timer.request(Time.now() + delay)(run)) } def revoke(): Unit = synchronized { running match { case Some(request) => request.cancel; running = None case None => } } def postpone(alt_delay: Time): Unit = synchronized { running match { case Some(request) => val alt_time = Time.now() + alt_delay if (request.time < alt_time && request.cancel) { running = Some(Event_Timer.request(alt_time)(run)) } case None => } } } // delayed event after first invocation def delay_first(delay: => Time, log: Logger = No_Logger)(event: => Unit): Delay = new Delay(true, delay, log, event) // delayed event after last invocation def delay_last(delay: => Time, log: Logger = No_Logger)(event: => Unit): Delay = new Delay(false, delay, log, event) }
Example 4
Source File: standard_thread.scala From libisabelle with Apache License 2.0 | 5 votes |
package isabelle import java.lang.Thread import java.util.concurrent.{ExecutorService, ThreadPoolExecutor, TimeUnit, LinkedBlockingQueue, ThreadFactory} object Standard_Thread { final class Delay private[Standard_Thread]( first: Boolean, delay: => Time, log: Logger, event: => Unit) { private var running: Option[Event_Timer.Request] = None private def run: Unit = { val do_run = synchronized { if (running.isDefined) { running = None; true } else false } if (do_run) { try { event } catch { case exn: Throwable if !Exn.is_interrupt(exn) => log(Exn.message(exn)); throw exn } } } def invoke(): Unit = synchronized { val new_run = running match { case Some(request) => if (first) false else { request.cancel; true } case None => true } if (new_run) running = Some(Event_Timer.request(Time.now() + delay)(run)) } def revoke(): Unit = synchronized { running match { case Some(request) => request.cancel; running = None case None => } } def postpone(alt_delay: Time): Unit = synchronized { running match { case Some(request) => val alt_time = Time.now() + alt_delay if (request.time < alt_time && request.cancel) { running = Some(Event_Timer.request(alt_time)(run)) } case None => } } } // delayed event after first invocation def delay_first(delay: => Time, log: Logger = No_Logger)(event: => Unit): Delay = new Delay(true, delay, log, event) // delayed event after last invocation def delay_last(delay: => Time, log: Logger = No_Logger)(event: => Unit): Delay = new Delay(false, delay, log, event) }
Example 5
Source File: DoobieHikariModule.scala From scala-server-toolkit with MIT License | 5 votes |
package com.avast.sst.doobie import java.util.Properties import java.util.concurrent.{ScheduledExecutorService, ThreadFactory} import cats.Show import cats.effect.{Async, Blocker, ContextShift, Resource, Sync} import cats.syntax.show._ import com.zaxxer.hikari.HikariConfig import com.zaxxer.hikari.metrics.MetricsTrackerFactory import doobie.enum.TransactionIsolation import doobie.hikari.HikariTransactor import scala.concurrent.ExecutionContext object DoobieHikariModule { def make[F[_]: Async]( config: DoobieHikariConfig, boundedConnectExecutionContext: ExecutionContext, blocker: Blocker, metricsTrackerFactory: Option[MetricsTrackerFactory] = None )(implicit cs: ContextShift[F]): Resource[F, HikariTransactor[F]] = { for { hikariConfig <- Resource.liftF(makeHikariConfig(config, metricsTrackerFactory)) transactor <- HikariTransactor.fromHikariConfig(hikariConfig, boundedConnectExecutionContext, blocker) } yield transactor } implicit private val transactionIsolationShow: Show[TransactionIsolation] = { case TransactionIsolation.TransactionNone => "TRANSACTION_NONE" case TransactionIsolation.TransactionReadUncommitted => "TRANSACTION_READ_UNCOMMITTED" case TransactionIsolation.TransactionReadCommitted => "TRANSACTION_READ_COMMITTED" case TransactionIsolation.TransactionRepeatableRead => "TRANSACTION_REPEATABLE_READ" case TransactionIsolation.TransactionSerializable => "TRANSACTION_SERIALIZABLE" } private def makeHikariConfig[F[_]: Sync]( config: DoobieHikariConfig, metricsTrackerFactory: Option[MetricsTrackerFactory], scheduledExecutorService: Option[ScheduledExecutorService] = None, threadFactory: Option[ThreadFactory] = None ): F[HikariConfig] = { Sync[F].delay { val c = new HikariConfig() c.setDriverClassName(config.driver) c.setJdbcUrl(config.url) c.setUsername(config.username) c.setPassword(config.password) c.setAutoCommit(config.autoCommit) c.setConnectionTimeout(config.connectionTimeout.toMillis) c.setIdleTimeout(config.idleTimeout.toMillis) c.setMaxLifetime(config.maxLifeTime.toMillis) c.setMinimumIdle(config.minimumIdle) c.setMaximumPoolSize(config.maximumPoolSize) c.setReadOnly(config.readOnly) c.setAllowPoolSuspension(config.allowPoolSuspension) c.setIsolateInternalQueries(config.isolateInternalQueries) c.setRegisterMbeans(config.registerMBeans) val dataSourceProperties = new Properties() config.dataSourceProperties.foreach { case (k, v) => dataSourceProperties.put(k, v) } c.setDataSourceProperties(dataSourceProperties) config.leakDetectionThreshold.map(_.toMillis).foreach(c.setLeakDetectionThreshold) config.initializationFailTimeout.map(_.toMillis).foreach(c.setInitializationFailTimeout) config.poolName.foreach(c.setPoolName) config.validationTimeout.map(_.toMillis).foreach(c.setValidationTimeout) config.transactionIsolation.map(_.show).foreach(c.setTransactionIsolation) scheduledExecutorService.foreach(c.setScheduledExecutor) threadFactory.foreach(c.setThreadFactory) metricsTrackerFactory.foreach(c.setMetricsTrackerFactory) c } } }
Example 6
Source File: LinebackerSpec.scala From linebacker with MIT License | 5 votes |
package io.chrisdavenport.linebacker import org.specs2._ import cats.effect._ import cats.implicits._ import java.lang.Thread import scala.concurrent.ExecutionContext import java.util.concurrent.atomic.AtomicLong import java.util.concurrent.{Executors, ThreadFactory} import _root_.io.chrisdavenport.linebacker.contexts.{Executors => E} import scala.concurrent.ExecutionContext.global class LinebackerSpec extends Spec { override def is = s2""" Threads Run On Linebacker $runsOnLinebacker Threads Afterwards Run on Provided EC $runsOffLinebackerAfterwards """ def runsOnLinebacker = { val testRun = E .unbound[IO] .map(Linebacker.fromExecutorService[IO]) .use { implicit linebacker => implicit val cs = IO.contextShift(global) Linebacker[IO].blockContextShift(IO(Thread.currentThread().getName)) } testRun.unsafeRunSync must_=== "linebacker-thread-0" } def runsOffLinebackerAfterwards = { val executor = Executors.newCachedThreadPool(new ThreadFactory { private val counter = new AtomicLong(0L) def newThread(r: Runnable) = { val th = new Thread(r) th.setName("test-ec-" + counter.getAndIncrement.toString) th.setDaemon(true) th } }) implicit val ec = ExecutionContext .fromExecutorService(executor) implicit val linebacker = Linebacker.fromExecutionContext[IO](global) // Block Onto Global implicit val cs = IO.contextShift(ec) // Should return to custom val testRun = Linebacker[IO].blockContextShift(IO.unit) *> IO(Thread.currentThread().getName) <* IO(executor.shutdownNow) testRun.unsafeRunSync must_=== "test-ec-0" } }
Example 7
Source File: CancellableFuturePool.scala From almond with BSD 3-Clause "New" or "Revised" License | 5 votes |
package almond.interpreter.util import java.lang.Thread.UncaughtExceptionHandler import java.util.concurrent.{Executors, ThreadFactory} import almond.logger.LoggerContext import scala.concurrent.{Future, Promise} import scala.util.control.NonFatal import scala.util.{Failure, Success} final class CancellableFuturePool( logCtx: LoggerContext ) { private val log = logCtx(getClass) private val pool = Executors.newCachedThreadPool( // from scalaz.concurrent.Strategy.DefaultDaemonThreadFactory new ThreadFactory { val defaultThreadFactory = Executors.defaultThreadFactory() def newThread(r: Runnable) = { val t = defaultThreadFactory.newThread(r) t.setDaemon(true) t.setUncaughtExceptionHandler( new UncaughtExceptionHandler { def uncaughtException(t: Thread, e: Throwable) = log.warn(s"Uncaught exception in thread $t", e) } ) t } } ) def future[T](result: => T): Future[T] = { val p = Promise[T]() pool.submit( new Runnable { def run() = p.complete { try Success(result) catch { case NonFatal(e) => Failure(e) } } } ) p.future } def cancellableFuture[T](result: T): CancellableFuture[T] = { @volatile var completionThreadOpt = Option.empty[Thread] def result0(): T = { completionThreadOpt = Some(Thread.currentThread()) try result finally { completionThreadOpt = None } } def cancel(): Unit = for (t <- completionThreadOpt) t.stop() CancellableFuture(future(result0()), () => cancel()) } def shutdown(): Unit = pool.shutdown() }
Example 8
Source File: ThreadUtil.scala From almond with BSD 3-Clause "New" or "Revised" License | 5 votes |
package almond.util import java.lang.Thread.UncaughtExceptionHandler import java.util.concurrent.{Executors, ThreadFactory} import java.util.concurrent.atomic.AtomicInteger import scala.concurrent.{ExecutionContext, ExecutionContextExecutorService} import scala.util.control.NonFatal object ThreadUtil { // From https://github.com/functional-streams-for-scala/fs2/blob/d47f903bc6bbcdd5d8bc6d573bc7cfd956f0cbb6/core/jvm/src/main/scala/fs2/Strategy.scala#L19-L41 def daemonThreadFactory(threadName: String, exitJvmOnFatalError: Boolean = true): ThreadFactory = new ThreadFactory { val defaultThreadFactory = Executors.defaultThreadFactory() val idx = new AtomicInteger(0) def newThread(r: Runnable) = { val t = defaultThreadFactory.newThread(r) t.setDaemon(true) t.setName(s"$threadName-${idx.incrementAndGet()}") t.setUncaughtExceptionHandler(new UncaughtExceptionHandler { def uncaughtException(t: Thread, e: Throwable): Unit = { System.err.println(s"------------ UNHANDLED EXCEPTION ---------- (${t.getName})") e.printStackTrace(System.err) if (exitJvmOnFatalError) { e match { case NonFatal(_) => () case fatal => System.exit(-1) } } } }) t } } def sequentialExecutionContext(): ExecutionContext = new SequentialExecutionContext def singleThreadedExecutionContext(threadName: String): ExecutionContext = ExecutionContext.fromExecutorService( Executors.newSingleThreadExecutor(daemonThreadFactory(threadName)) ) def attemptShutdownExecutionContext(ec: ExecutionContext): Boolean = ec match { case _: SequentialExecutionContext => true case es: ExecutionContextExecutorService => es.shutdown() true case _ => false } }
Example 9
Source File: PoolUtils.scala From cats-effect with Apache License 2.0 | 5 votes |
package cats.effect package internals import scala.concurrent.ExecutionContext import scala.util.control.NonFatal import java.util.concurrent.{Executors, ThreadFactory} import java.util.concurrent.atomic.AtomicInteger private[internals] object PoolUtils { // we can initialize this eagerly because the enclosing object is lazy val ioAppGlobal: ExecutionContext = { // lower-bound of 2 to prevent pathological deadlocks on virtual machines val bound = math.max(2, Runtime.getRuntime().availableProcessors()) val executor = Executors.newFixedThreadPool( bound, new ThreadFactory { val ctr = new AtomicInteger(0) def newThread(r: Runnable): Thread = { val back = new Thread(r, s"ioapp-compute-${ctr.getAndIncrement()}") back.setDaemon(true) back } } ) exitOnFatal(ExecutionContext.fromExecutor(executor)) } def exitOnFatal(ec: ExecutionContext): ExecutionContext = new ExecutionContext { def execute(r: Runnable): Unit = ec.execute(new Runnable { def run(): Unit = try { r.run() } catch { case NonFatal(t) => reportFailure(t) case t: Throwable => // under most circumstances, this will work even with fatal errors t.printStackTrace() System.exit(1) } }) def reportFailure(t: Throwable): Unit = ec.reportFailure(t) } }
Example 10
Source File: IOTimer.scala From cats-effect with Apache License 2.0 | 5 votes |
package cats.effect package internals import java.util.concurrent.{ScheduledExecutorService, ScheduledThreadPoolExecutor, ThreadFactory, TimeUnit} import cats.effect.internals.Callback.T import cats.effect.internals.IOShift.Tick import scala.concurrent.ExecutionContext import scala.concurrent.duration._ import scala.util.Try def apply(ec: ExecutionContext, sc: ScheduledExecutorService): Timer[IO] = new IOTimer(ec, sc) private[internals] lazy val scheduler: ScheduledExecutorService = mkGlobalScheduler(sys.props) private[internals] def mkGlobalScheduler(props: collection.Map[String, String]): ScheduledThreadPoolExecutor = { val corePoolSize = props .get("cats.effect.global_scheduler.threads.core_pool_size") .flatMap(s => Try(s.toInt).toOption) .filter(_ > 0) .getOrElse(2) val keepAliveTime = props .get("cats.effect.global_scheduler.keep_alive_time_ms") .flatMap(s => Try(s.toLong).toOption) .filter(_ > 0L) val tp = new ScheduledThreadPoolExecutor(corePoolSize, new ThreadFactory { def newThread(r: Runnable): Thread = { val th = new Thread(r) th.setName(s"cats-effect-scheduler-${th.getId}") th.setDaemon(true) th } }) keepAliveTime.foreach { timeout => // Call in this order or it throws! tp.setKeepAliveTime(timeout, TimeUnit.MILLISECONDS) tp.allowCoreThreadTimeOut(true) } tp.setRemoveOnCancelPolicy(true) tp } final private class ShiftTick( conn: IOConnection, cb: Either[Throwable, Unit] => Unit, ec: ExecutionContext ) extends Runnable { def run(): Unit = { // Shifts actual execution on our `ExecutionContext`, because // the scheduler is in charge only of ticks and the execution // needs to shift because the tick might continue with whatever // bind continuation is linked to it, keeping the current thread // occupied conn.pop() ec.execute(new Tick(cb)) } } }
Example 11
Source File: Contexts.scala From scala-loci with Apache License 2.0 | 5 votes |
package loci package contexts import java.util.concurrent.{Executors, ThreadFactory} import scala.concurrent.{ExecutionContext, ExecutionContextExecutor} import scala.util.control.NonFatal object Pooled { lazy val global: ExecutionContextExecutor = new logging.ReportingExecutionContext(ExecutionContext.global) object Implicits { implicit lazy val global: ExecutionContext = Pooled.global } } object Immediate { lazy val global: ExecutionContextExecutor = new ExecutionContextExecutor { def execute(runnable: Runnable) = try runnable.run() catch { case NonFatal(exception) => reportFailure(exception) } def reportFailure(throwable: Throwable) = logging.reportException(throwable) } object Implicits { implicit lazy val global: ExecutionContext = Immediate.global } } object Queued { lazy val global = create() def create(): ExecutionContextExecutor = ExecutionContext.fromExecutorService( Executors.newSingleThreadExecutor(new ThreadFactory { def newThread(runnable: Runnable) = { val thread = new Thread(new Runnable { def run() = try runnable.run() catch { case NonFatal(exception) => if (exception.getCause != null) logging.reportException(exception.getCause) else logging.reportException(exception) } }) thread.setDaemon(true) thread } }), logging.reportException) object Implicits { implicit lazy val global: ExecutionContext = Queued.global } }
Example 12
Source File: ExecutionContextSchedulerTest.scala From reactor-scala-extensions with Apache License 2.0 | 5 votes |
package reactor.core.scala.scheduler import java.util.concurrent.{Executors, ThreadFactory} import org.scalatest.freespec.AnyFreeSpec import org.scalatest.matchers.should.Matchers import reactor.core.scala.publisher.SMono import reactor.test.StepVerifier import scala.concurrent.ExecutionContext class ExecutionContextSchedulerTest extends AnyFreeSpec with Matchers { "ExecutionContextScheduler" - { "should create a Scheduler using provided ExecutionContext" - { "on SMono" in { val executionContext = ExecutionContext.fromExecutorService(Executors.newFixedThreadPool(1, new ThreadFactory { override def newThread(r: Runnable): Thread = new Thread(r, "THREAD-NAME-SMONO") })) val mono = SMono.just(1) .subscribeOn(ExecutionContextScheduler(executionContext)) .doOnNext(i => Thread.currentThread().getName shouldBe "THREAD-NAME-SMONO") StepVerifier.create(mono) .expectNext(1) .verifyComplete() } } } }
Example 13
Source File: BitVectorSocket.scala From skunk with MIT License | 5 votes |
// Copyright (c) 2018-2020 by Rob Norris // This software is licensed under the MIT License (MIT). // For more information see LICENSE or https://opensource.org/licenses/MIT package skunk.net import cats._ import cats.effect._ import cats.implicits._ import fs2.Chunk import fs2.io.tcp.Socket import scala.concurrent.duration.FiniteDuration import scodec.bits.BitVector import java.net.InetSocketAddress import java.nio.channels._ import java.util.concurrent.Executors import java.util.concurrent.ThreadFactory import fs2.io.tcp.SocketGroup def apply[F[_]: Concurrent: ContextShift]( host: String, port: Int, readTimeout: FiniteDuration, writeTimeout: FiniteDuration, sg: SocketGroup, sslOptions: Option[SSLNegotiation.Options[F]], ): Resource[F, BitVectorSocket[F]] = for { sock <- sg.client[F](new InetSocketAddress(host, port)) sockʹ <- sslOptions.fold(sock.pure[Resource[F, ?]])(SSLNegotiation.negotiateSSL(sock, readTimeout, writeTimeout, _)) } yield fromSocket(sockʹ, readTimeout, writeTimeout) }
Example 14
Source File: Module.scala From elastiknn with Apache License 2.0 | 5 votes |
import java.util.concurrent.{ExecutorService, Executors, ThreadFactory} import com.google.common.util.concurrent.ThreadFactoryBuilder import com.google.inject.{AbstractModule, TypeLiteral} import com.klibisz.elastiknn.client.{ElastiknnClient, ElastiknnFutureClient} import javax.inject.Provider import play.api.{Configuration, Environment} import scala.concurrent.ExecutionContext class Module(environment: Environment, configuration: Configuration) extends AbstractModule { val eknnProvider = new Provider[ElastiknnFutureClient] { override def get(): ElastiknnFutureClient = { val tfac: ThreadFactory = new ThreadFactoryBuilder().setDaemon(true).setNameFormat("elastiknn-%d").build() val exec: ExecutorService = Executors.newFixedThreadPool(Runtime.getRuntime.availableProcessors(), tfac) implicit val ec: ExecutionContext = ExecutionContext.fromExecutor(exec) val host = configuration.underlying.getString("elastiknn.elasticsearch.host") val port = configuration.underlying.getInt("elastiknn.elasticsearch.port") ElastiknnClient.futureClient(host, port) } } override def configure(): Unit = { // Weird that you have to use this constructor, but it works. bind(new TypeLiteral[ElastiknnFutureClient]() {}).toProvider(eknnProvider) } }
Example 15
Source File: FixedParallelSuite.scala From scala-debugger with Apache License 2.0 | 5 votes |
package org.scaladebugger.test.helpers import java.util.concurrent.{ExecutorService, Executors, ThreadFactory} import FixedParallelSuite._ object FixedParallelSuite { lazy val DefaultExecutorService = Executors.newFixedThreadPool( ControlledParallelSuite.calculatePoolSize(), ControlledParallelSuite.threadFactory ) } trait FixedParallelSuite extends ControlledParallelSuite { protected lazy val executorService = DefaultExecutorService override protected def newExecutorService( poolSize: Int, threadFactory: ThreadFactory ): ExecutorService = { executorService } }
Example 16
Source File: Utils.scala From graphcool-framework with Apache License 2.0 | 5 votes |
package cool.graph.rabbit import java.text.SimpleDateFormat import java.util.{Date, UUID} import java.util.concurrent.ThreadFactory import java.util.concurrent.atomic.AtomicLong object Utils { def timestamp: String = { val formatter = new SimpleDateFormat("HH:mm:ss.SSS-dd.MM.yyyy") val now = new Date() formatter.format(now) } def timestampWithRandom: String = timestamp + "-" + UUID.randomUUID() def newNamedThreadFactory(name: String): ThreadFactory = new ThreadFactory { val count = new AtomicLong(0) override def newThread(runnable: Runnable): Thread = { val thread = new Thread(runnable) thread.setName(s"$name-" + count.getAndIncrement) thread.setDaemon(true) thread } } }
Example 17
Source File: ThreadFactoryBuilder.scala From gfc-concurrent with Apache License 2.0 | 5 votes |
package com.gilt.gfc.concurrent import java.lang.Thread.UncaughtExceptionHandler import java.util.concurrent.atomic.AtomicLong import java.util.concurrent.ThreadFactory import com.gilt.gfc.logging.Loggable object ThreadFactoryBuilder { def apply(): ThreadFactoryBuilder = ThreadFactoryBuilder(None, None, None, None, true) def apply(groupName: String, threadName: String): ThreadFactoryBuilder = { val group = ThreadGroupBuilder().withName(groupName).build() ThreadFactoryBuilder().withNameFormat(threadName + "-%s").withThreadGroup(group) } val LogUncaughtExceptionHandler = new Thread.UncaughtExceptionHandler with Loggable { override def uncaughtException(t: Thread, e: Throwable): Unit = { error("Failed to catch exception in thread " + t.getName(), e) } } } case class ThreadFactoryBuilder private (private val nameFormat: Option[String], private val priority: Option[Int], private val exceptionHandler: Option[UncaughtExceptionHandler], private val threadGroup: Option[ThreadGroup], private val daemon: Boolean) { def withNameFormat(nameFormat: String): ThreadFactoryBuilder = copy(nameFormat = Some(nameFormat)) def withPriority(priority: Int): ThreadFactoryBuilder = copy(priority = Some(priority)) def withUncaughtExceptionHandler(exceptionHandler: UncaughtExceptionHandler): ThreadFactoryBuilder = copy(exceptionHandler = Some(exceptionHandler)) def withThreadGroup(threadGroup: ThreadGroup): ThreadFactoryBuilder = copy(threadGroup = Some(threadGroup)) def withDaemonFlag(isDaemon: Boolean): ThreadFactoryBuilder = copy(daemon = isDaemon) def build(): ThreadFactory = { val nameF: Option[() => String] = nameFormat.map { nf => val count = new AtomicLong(0) () => nf.format(count.getAndIncrement) } new ThreadFactory { override def newThread(runnable: Runnable): Thread = { val group = threadGroup.getOrElse(ThreadGroupBuilder.currentThreadGroup()) val thread = new Thread(group, runnable) nameF.foreach(f => thread.setName(f())) priority.foreach(thread.setPriority) exceptionHandler.foreach(thread.setUncaughtExceptionHandler) thread.setDaemon(daemon) thread } } } }
Example 18
Source File: SingleThreadedActorSystem.scala From graphcool-framework with Apache License 2.0 | 5 votes |
package cool.graph.akkautil import java.util.concurrent.atomic.AtomicLong import java.util.concurrent.{Executors, ThreadFactory} import akka.actor.ActorSystem object SingleThreadedActorSystem { def apply(name: String): ActorSystem = { val ec = scala.concurrent.ExecutionContext.fromExecutor(Executors.newSingleThreadExecutor(newNamedThreadFactory(name))) ActorSystem(name, defaultExecutionContext = Some(ec)) } def newNamedThreadFactory(name: String): ThreadFactory = new ThreadFactory { val count = new AtomicLong(0) override def newThread(runnable: Runnable): Thread = { val thread = new Thread(runnable) thread.setDaemon(true) thread.setName(s"$name-" + count.getAndIncrement) thread } } }
Example 19
Source File: ThreadUtil.scala From coursier with Apache License 2.0 | 5 votes |
package coursier.cache.internal import java.util.concurrent.{ExecutorService, LinkedBlockingQueue, ScheduledExecutorService, ScheduledThreadPoolExecutor, ThreadFactory, ThreadPoolExecutor, TimeUnit} import java.util.concurrent.atomic.AtomicInteger object ThreadUtil { private val poolNumber = new AtomicInteger(1) def daemonThreadFactory(): ThreadFactory = { val poolNumber0 = poolNumber.getAndIncrement() val threadNumber = new AtomicInteger(1) new ThreadFactory { def newThread(r: Runnable) = { val threadNumber0 = threadNumber.getAndIncrement() val t = new Thread(r, s"coursier-pool-$poolNumber0-thread-$threadNumber0") t.setDaemon(true) t.setPriority(Thread.NORM_PRIORITY) t } } } def fixedThreadPool(size: Int): ExecutorService = { val factory = daemonThreadFactory() // 1 min keep alive, so that threads get stopped a bit after resolution / downloading is done val executor = new ThreadPoolExecutor( size, size, 1L, TimeUnit.MINUTES, new LinkedBlockingQueue[Runnable], factory ) executor.allowCoreThreadTimeOut(true) executor } def fixedScheduledThreadPool(size: Int): ScheduledExecutorService = { val factory = daemonThreadFactory() val executor = new ScheduledThreadPoolExecutor(size, factory) executor.setKeepAliveTime(1L, TimeUnit.MINUTES) executor.allowCoreThreadTimeOut(true) executor } def withFixedThreadPool[T](size: Int)(f: ExecutorService => T): T = { var pool: ExecutorService = null try { pool = fixedThreadPool(size) f(pool) } finally { if (pool != null) pool.shutdown() } } }
Example 20
Source File: ThreadFactories.scala From docspell with GNU General Public License v3.0 | 5 votes |
package docspell.common import java.util.concurrent.ForkJoinPool import java.util.concurrent.ForkJoinPool.ForkJoinWorkerThreadFactory import java.util.concurrent.ForkJoinWorkerThread import java.util.concurrent.atomic.AtomicLong import java.util.concurrent.{Executors, ThreadFactory} import scala.concurrent._ import cats.effect._ object ThreadFactories { def ofName(prefix: String): ThreadFactory = new ThreadFactory { val counter = new AtomicLong(0) override def newThread(r: Runnable): Thread = { val t = Executors.defaultThreadFactory().newThread(r) t.setName(s"$prefix-${counter.getAndIncrement()}") t } } def ofNameFJ(prefix: String): ForkJoinWorkerThreadFactory = new ForkJoinWorkerThreadFactory { val tf = ForkJoinPool.defaultForkJoinWorkerThreadFactory val counter = new AtomicLong(0) def newThread(pool: ForkJoinPool): ForkJoinWorkerThread = { val t = tf.newThread(pool) t.setName(s"$prefix-${counter.getAndIncrement()}") t } } def executorResource[F[_]: Sync]( c: => ExecutionContextExecutorService ): Resource[F, ExecutionContextExecutorService] = Resource.make(Sync[F].delay(c))(ec => Sync[F].delay(ec.shutdown)) def cached[F[_]: Sync]( tf: ThreadFactory ): Resource[F, ExecutionContextExecutorService] = executorResource( ExecutionContext.fromExecutorService(Executors.newCachedThreadPool(tf)) ) def fixed[F[_]: Sync]( n: Int, tf: ThreadFactory ): Resource[F, ExecutionContextExecutorService] = executorResource( ExecutionContext.fromExecutorService(Executors.newFixedThreadPool(n, tf)) ) def workSteal[F[_]: Sync]( n: Int, tf: ForkJoinWorkerThreadFactory ): Resource[F, ExecutionContextExecutorService] = executorResource( ExecutionContext.fromExecutorService( new ForkJoinPool(n, tf, null, true) ) ) def workSteal[F[_]: Sync]( tf: ForkJoinWorkerThreadFactory ): Resource[F, ExecutionContextExecutorService] = workSteal[F](Runtime.getRuntime().availableProcessors() + 1, tf) }
Example 21
Source File: ThreadPoolOf.scala From kafka-journal with MIT License | 5 votes |
package com.evolutiongaming.kafka.journal.execution import java.util.concurrent.{SynchronousQueue, ThreadFactory, ThreadPoolExecutor} import cats.effect.{Resource, Sync} import cats.implicits._ import scala.concurrent.duration._ object ThreadPoolOf { def apply[F[_] : Sync]( minSize: Int, maxSize: Int, threadFactory: ThreadFactory, keepAlive: FiniteDuration = 1.minute, ): Resource[F, ThreadPoolExecutor] = { val result = for { result <- Sync[F].delay { new ThreadPoolExecutor( minSize, maxSize, keepAlive.length, keepAlive.unit, new SynchronousQueue[Runnable], threadFactory) } } yield { val release = Sync[F].delay { result.shutdown() } (result, release) } Resource(result) } }
Example 22
Source File: NettyUtil.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.transport.netty import java.net.InetSocketAddress import java.util.concurrent.{Executors, ThreadFactory} import org.jboss.netty.bootstrap.{ClientBootstrap, ServerBootstrap} import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory import org.jboss.netty.channel.{Channel, ChannelFactory, ChannelPipelineFactory} object NettyUtil { def newNettyServer( name: String, pipelineFactory: ChannelPipelineFactory, buffer_size: Int, inputPort: Int = 0): (Int, Channel) = { val bossFactory: ThreadFactory = new NettyRenameThreadFactory(name + "-boss") val workerFactory: ThreadFactory = new NettyRenameThreadFactory(name + "-worker") val factory = new NioServerSocketChannelFactory(Executors.newCachedThreadPool(bossFactory), Executors.newCachedThreadPool(workerFactory), 1) val bootstrap = createServerBootStrap(factory, pipelineFactory, buffer_size) val channel: Channel = bootstrap.bind(new InetSocketAddress(inputPort)) val port = channel.getLocalAddress().asInstanceOf[InetSocketAddress].getPort() (port, channel) } def createServerBootStrap( factory: ChannelFactory, pipelineFactory: ChannelPipelineFactory, buffer_size: Int) : ServerBootstrap = { val bootstrap = new ServerBootstrap(factory) bootstrap.setOption("child.tcpNoDelay", true) bootstrap.setOption("child.receiveBufferSize", buffer_size) bootstrap.setOption("child.keepAlive", true) bootstrap.setPipelineFactory(pipelineFactory) bootstrap } def createClientBootStrap( factory: ChannelFactory, pipelineFactory: ChannelPipelineFactory, buffer_size: Int) : ClientBootstrap = { val bootstrap = new ClientBootstrap(factory) bootstrap.setOption("tcpNoDelay", true) bootstrap.setOption("sendBufferSize", buffer_size) bootstrap.setOption("keepAlive", true) bootstrap.setPipelineFactory(pipelineFactory) bootstrap } }
Example 23
Source File: ThreadPoolNamingSupport.scala From catbird with Apache License 2.0 | 5 votes |
package io.catbird.util.effect import java.lang.{ Runnable, Thread } import java.util.concurrent.{ Executors, ThreadFactory } import scala.concurrent.{ ExecutionContext, ExecutionContextExecutorService } trait ThreadPoolNamingSupport { def newNamedThreadPool(name: String): ExecutionContextExecutorService = ExecutionContext.fromExecutorService( Executors.newSingleThreadExecutor(new ThreadFactory { override def newThread(r: Runnable): Thread = { val thread = Executors.defaultThreadFactory().newThread(r) thread.setName(name) thread.setDaemon(true) // Don't block shutdown of JVM thread } }) ) def currentThreadName(): String = Thread.currentThread().getName }
Example 24
Source File: TemperatureMachineThreadFactory.scala From temperature-machine with Apache License 2.0 | 5 votes |
package bad.robot.temperature.task import java.util.concurrent.ThreadFactory import java.util.concurrent.atomic.AtomicInteger object TemperatureMachineThreadFactory { def apply(name: String, daemon: Boolean = true): ThreadFactory = { new ThreadFactory() { val count = new AtomicInteger def newThread(runnable: Runnable): Thread = { val thread = new Thread(runnable, s"temperature-$name-" + count.incrementAndGet()) thread.setDaemon(daemon) thread } } } }
Example 25
Source File: AsyncHandler.scala From airframe with Apache License 2.0 | 5 votes |
package wvlet.log import java.io.Flushable import java.util import java.util.concurrent.atomic.AtomicBoolean import java.util.concurrent.{Executors, ThreadFactory} import java.util.{logging => jl} class AsyncHandler(parent: jl.Handler) extends jl.Handler with Guard with AutoCloseable with Flushable { private val executor = { Executors.newCachedThreadPool( new ThreadFactory { override def newThread(r: Runnable): Thread = { val t = new Thread(r, "WvletLogAsyncHandler") t.setDaemon(true) t } } ) } private val queue = new util.ArrayDeque[jl.LogRecord] private val isNotEmpty = newCondition private val closed = new AtomicBoolean(false) // Start a poller thread executor.submit(new Runnable { override def run(): Unit = { while (!closed.get()) { val record: jl.LogRecord = guard { if (queue.isEmpty) { isNotEmpty.await() } queue.pollFirst() } if (record != null) { parent.publish(record) } } } }) override def flush(): Unit = { val records = Seq.newBuilder[jl.LogRecord] guard { while (!queue.isEmpty) { records += queue.pollFirst() } } records.result.map(parent.publish _) parent.flush() } override def publish(record: jl.LogRecord): Unit = { guard { queue.addLast(record) isNotEmpty.signal() } } override def close(): Unit = { if (closed.compareAndSet(false, true)) { flush() // Wake up the poller thread guard { isNotEmpty.signalAll() } executor.shutdown() } } }
Example 26
Source File: instances.scala From cats-retry with Apache License 2.0 | 5 votes |
package retry package alleycats import cats.{Eval, Id} import scala.concurrent.duration.FiniteDuration import scala.concurrent.{Future, Promise} import java.util.concurrent.{ThreadFactory, Executors} object instances { implicit val threadSleepId: Sleep[Id] = new Sleep[Id] { def sleep(delay: FiniteDuration): Id[Unit] = Thread.sleep(delay.toMillis) } implicit val threadSleepEval: Sleep[Eval] = new Sleep[Eval] { def sleep(delay: FiniteDuration): Eval[Unit] = Eval.later(Thread.sleep(delay.toMillis)) } private lazy val scheduler = Executors.newSingleThreadScheduledExecutor(new ThreadFactory { override def newThread(runnable: Runnable) = { val t = new Thread(runnable) t.setDaemon(true) t.setName("cats-retry scheduler") t } }) implicit val threadSleepFuture: Sleep[Future] = new Sleep[Future] { def sleep(delay: FiniteDuration): Future[Unit] = { val promise = Promise[Unit]() scheduler.schedule(new Runnable { def run: Unit = { promise.success(()) () } }, delay.length, delay.unit) promise.future } } }
Example 27
Source File: NamedThreadFactory.scala From mango with Apache License 2.0 | 5 votes |
package com.kakao.mango.concurrent import java.lang.Thread.UncaughtExceptionHandler import java.util.concurrent.ThreadFactory import com.kakao.shaded.guava.util.concurrent.ThreadFactoryBuilder object NamedThreadFactory { def apply(prefix: String, daemon: Boolean = true): ThreadFactory = { new ThreadFactoryBuilder() .setDaemon(daemon) .setNameFormat(s"$prefix-%d") .setUncaughtExceptionHandler(new UncaughtExceptionHandler { override def uncaughtException(t: Thread, e: Throwable): Unit = { System.err.print(s"Uncaught ${e.getClass.getSimpleName} in thread ${t.getName}:") e.printStackTrace(System.err) } }) .build() } }
Example 28
Source File: Blocking.scala From keycloak-benchmark with Apache License 2.0 | 5 votes |
package io.gatling.keycloak import java.util.concurrent.atomic.AtomicInteger import java.util.concurrent.{ThreadFactory, Executors} import io.gatling.core.validation.Success import io.gatling.core.akka.GatlingActorSystem object Blocking { GatlingActorSystem.instance.registerOnTermination(() => shutdown()) private val threadPool = Executors.newCachedThreadPool(new ThreadFactory { val counter = new AtomicInteger(); override def newThread(r: Runnable): Thread = new Thread(r, "blocking-thread-" + counter.incrementAndGet()) }) def apply(f: () => Unit) = { threadPool.execute(new Runnable() { override def run = { f() } }) Success(()) } def shutdown() = { threadPool.shutdownNow() } }
Example 29
Source File: WaitForTaskDsl.scala From algoliasearch-client-scala with MIT License | 5 votes |
package algolia.dsl import java.time.ZonedDateTime import java.util.concurrent.{Executors, ThreadFactory, TimeUnit} import algolia.definitions.{WaitForTaskDefinition, WaitForTimeoutException} import algolia.responses.{AlgoliaTask, TaskStatus} import algolia.{AlgoliaClient, Executable} import io.netty.util.{HashedWheelTimer, Timeout, TimerTask} import scala.concurrent.{ExecutionContext, Future, Promise} trait WaitForTaskDsl { case object waitFor { def task(task: AlgoliaTask): WaitForTaskDefinition = WaitForTaskDefinition(task.idToWaitFor) def task(taskID: Long): WaitForTaskDefinition = WaitForTaskDefinition(taskID) } implicit object WaitForTaskDefinitionExecutable extends Executable[WaitForTaskDefinition, TaskStatus] { // Run every 100 ms, use a wheel with 512 buckets private lazy val timer = { val threadFactory = new ThreadFactory { override def newThread(r: Runnable): Thread = { val t = Executors.defaultThreadFactory().newThread(r) t.setDaemon(true) t.setName("algolia-waitfor-thread-" + ZonedDateTime.now()) t } } new HashedWheelTimer(threadFactory, 100, TimeUnit.MILLISECONDS, 512) } override def apply(client: AlgoliaClient, query: WaitForTaskDefinition)( implicit executor: ExecutionContext ): Future[TaskStatus] = { def request(d: Long, totalDelay: Long): Future[TaskStatus] = delay[TaskStatus](d) { client.request[TaskStatus](query.build()) }.flatMap { res => if (res.status == "published") { Future.successful(res) } else if (totalDelay > query.maxDelay) { Future.failed( WaitForTimeoutException( s"Waiting for task `${query.taskId}` on index `${query.index.get}` timeout after ${d}ms" ) ) } else { request(d * 2, totalDelay + d) } } request(query.baseDelay, 0L) } private def delay[T](delay: Long)(block: => Future[T]): Future[T] = { val promise = Promise[T]() val task = new TimerTask { override def run(timeout: Timeout): Unit = promise.completeWith(block) } timer.newTimeout(task, delay, TimeUnit.MILLISECONDS) promise.future } } }
Example 30
Source File: NamedThreadFactory.scala From zorechka-bot with MIT License | 5 votes |
package com.wix.zorechka.utils.concurrent import java.util.concurrent.ThreadFactory import java.util.concurrent.atomic.AtomicInteger case class NamedThreadFactory(name: String, daemon: Boolean) extends ThreadFactory { private val parentGroup = Option(System.getSecurityManager).fold(Thread.currentThread().getThreadGroup)(_.getThreadGroup) private val threadGroup = new ThreadGroup(parentGroup, name) private val threadCount = new AtomicInteger(1) private val threadHash = Integer.toUnsignedString(this.hashCode()) override def newThread(r: Runnable): Thread = { val newThreadNumber = threadCount.getAndIncrement() val thread = new Thread(threadGroup, r) thread.setName(s"$name-$newThreadNumber-$threadHash") thread.setDaemon(daemon) thread } }
Example 31
Source File: BlockingIO.scala From gbf-raidfinder with MIT License | 5 votes |
package walfie.gbf.raidfinder.util import java.util.concurrent.atomic.AtomicLong import java.util.concurrent.{Executors, ThreadFactory} import scala.concurrent.{ExecutionContext, ExecutionContextExecutor, Future, Promise, blocking} import scala.util.control.NonFatal import monix.execution.Scheduler // https://github.com/alexandru/scala-best-practices/blob/master/sections/4-concurrency-parallelism.md object BlockingIO { private val ioThreadPool = Scheduler.io(name = "io-thread") def future[T](t: => T): Future[T] = { val p = Promise[T]() val runnable = new Runnable { def run() = try { p.success(blocking(t)) } catch { case NonFatal(ex) => p.failure(ex) } } ioThreadPool.execute(runnable) p.future } }
Example 32
Source File: ThreadUtil.scala From iotchain with MIT License | 5 votes |
package jbok.common.thread import java.lang.Thread.UncaughtExceptionHandler import java.nio.channels.AsynchronousChannelGroup import java.nio.channels.spi.AsynchronousChannelProvider import java.util.concurrent.atomic.AtomicInteger import java.util.concurrent.{Executors, ThreadFactory} import cats.effect.{Resource, Sync} import scala.concurrent.ExecutionContext import scala.util.control.NonFatal object ThreadUtil { def named(threadPrefix: String, daemon: Boolean, exitJvmOnFatalError: Boolean = true): ThreadFactory = new ThreadFactory { val defaultThreadFactory = Executors.defaultThreadFactory() val idx = new AtomicInteger(0) def newThread(r: Runnable) = { val t = defaultThreadFactory.newThread(r) t.setDaemon(daemon) t.setName(s"$threadPrefix-${idx.incrementAndGet()}") t.setUncaughtExceptionHandler(new UncaughtExceptionHandler { def uncaughtException(t: Thread, e: Throwable): Unit = { ExecutionContext.defaultReporter(e) if (exitJvmOnFatalError) { e match { case NonFatal(_) => () case _ => System.exit(-1) } } } }) t } } def blockingThreadPool[F[_]](name: String)(implicit F: Sync[F]): Resource[F, ExecutionContext] = Resource(F.delay { val factory = named(name, daemon = true) val executor = Executors.newCachedThreadPool(factory) val ec = ExecutionContext.fromExecutor(executor) (ec, F.delay(executor.shutdown())) }) def acg[F[_]](implicit F: Sync[F]): Resource[F, AsynchronousChannelGroup] = Resource(F.delay { val acg = acgUnsafe (acg, F.delay(acg.shutdownNow())) }) def acgUnsafe: AsynchronousChannelGroup = AsynchronousChannelProvider .provider() .openAsynchronousChannelGroup(8, named("jbok-ag-tcp", daemon = true)) lazy val acgGlobal: AsynchronousChannelGroup = acgUnsafe }