java.util.concurrent.ThreadPoolExecutor Scala Examples
The following examples show how to use java.util.concurrent.ThreadPoolExecutor.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: MultiThreadPipeline.scala From scrapy4s with GNU Lesser General Public License v3.0 | 5 votes |
package com.scrapy4s.pipeline import java.util.concurrent.ThreadPoolExecutor.CallerRunsPolicy import java.util.concurrent.{LinkedBlockingQueue, ThreadPoolExecutor, TimeUnit} import com.scrapy4s.http.Response import org.slf4j.LoggerFactory class MultiThreadPipeline(threadCount: Int, pipe: Pipeline ) extends Pipeline { val logger = LoggerFactory.getLogger(classOf[MultiThreadPipeline]) lazy private val threadPool = new ThreadPoolExecutor(threadCount, threadCount, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue[Runnable](), new CallerRunsPolicy()) override def pipe(response: Response): Unit = { threadPool.execute(() => { logger.debug(s"pipe -> exec ${response.url}") pipe.pipe(response) }) } override def close(): Unit = { threadPool.shutdown() while (!threadPool.awaitTermination(1, TimeUnit.SECONDS)) { logger.debug("wait for spider done ...") } pipe.close() logger.debug("spider done !") } } object MultiThreadPipeline { def apply[T](pipe: Pipeline) (implicit threadCount: Int = Runtime.getRuntime.availableProcessors() * 2): MultiThreadPipeline = new MultiThreadPipeline(threadCount, pipe) }
Example 2
Source File: standard_thread.scala From libisabelle with Apache License 2.0 | 5 votes |
package isabelle import java.lang.Thread import java.util.concurrent.{ExecutorService, ThreadPoolExecutor, TimeUnit, LinkedBlockingQueue, ThreadFactory} object Standard_Thread { final class Delay private[Standard_Thread]( first: Boolean, delay: => Time, log: Logger, event: => Unit) { private var running: Option[Event_Timer.Request] = None private def run: Unit = { val do_run = synchronized { if (running.isDefined) { running = None; true } else false } if (do_run) { try { event } catch { case exn: Throwable if !Exn.is_interrupt(exn) => log(Exn.message(exn)); throw exn } } } def invoke(): Unit = synchronized { val new_run = running match { case Some(request) => if (first) false else { request.cancel; true } case None => true } if (new_run) running = Some(Event_Timer.request(Time.now() + delay)(run)) } def revoke(): Unit = synchronized { running match { case Some(request) => request.cancel; running = None case None => } } def postpone(alt_delay: Time): Unit = synchronized { running match { case Some(request) => val alt_time = Time.now() + alt_delay if (request.time < alt_time && request.cancel) { running = Some(Event_Timer.request(alt_time)(run)) } case None => } } } // delayed event after first invocation def delay_first(delay: => Time, log: Logger = No_Logger)(event: => Unit): Delay = new Delay(true, delay, log, event) // delayed event after last invocation def delay_last(delay: => Time, log: Logger = No_Logger)(event: => Unit): Delay = new Delay(false, delay, log, event) }
Example 3
Source File: standard_thread.scala From libisabelle with Apache License 2.0 | 5 votes |
package isabelle import java.lang.Thread import java.util.concurrent.{ExecutorService, ThreadPoolExecutor, TimeUnit, LinkedBlockingQueue, ThreadFactory} object Standard_Thread { final class Delay private[Standard_Thread]( first: Boolean, delay: => Time, log: Logger, event: => Unit) { private var running: Option[Event_Timer.Request] = None private def run: Unit = { val do_run = synchronized { if (running.isDefined) { running = None; true } else false } if (do_run) { try { event } catch { case exn: Throwable if !Exn.is_interrupt(exn) => log(Exn.message(exn)); throw exn } } } def invoke(): Unit = synchronized { val new_run = running match { case Some(request) => if (first) false else { request.cancel; true } case None => true } if (new_run) running = Some(Event_Timer.request(Time.now() + delay)(run)) } def revoke(): Unit = synchronized { running match { case Some(request) => request.cancel; running = None case None => } } def postpone(alt_delay: Time): Unit = synchronized { running match { case Some(request) => val alt_time = Time.now() + alt_delay if (request.time < alt_time && request.cancel) { running = Some(Event_Timer.request(alt_time)(run)) } case None => } } } // delayed event after first invocation def delay_first(delay: => Time, log: Logger = No_Logger)(event: => Unit): Delay = new Delay(true, delay, log, event) // delayed event after last invocation def delay_last(delay: => Time, log: Logger = No_Logger)(event: => Unit): Delay = new Delay(false, delay, log, event) }
Example 4
Source File: standard_thread.scala From libisabelle with Apache License 2.0 | 5 votes |
package isabelle import java.lang.Thread import java.util.concurrent.{ExecutorService, ThreadPoolExecutor, TimeUnit, LinkedBlockingQueue, ThreadFactory} object Standard_Thread { final class Delay private[Standard_Thread]( first: Boolean, delay: => Time, log: Logger, event: => Unit) { private var running: Option[Event_Timer.Request] = None private def run: Unit = { val do_run = synchronized { if (running.isDefined) { running = None; true } else false } if (do_run) { try { event } catch { case exn: Throwable if !Exn.is_interrupt(exn) => log(Exn.message(exn)); throw exn } } } def invoke(): Unit = synchronized { val new_run = running match { case Some(request) => if (first) false else { request.cancel; true } case None => true } if (new_run) running = Some(Event_Timer.request(Time.now() + delay)(run)) } def revoke(): Unit = synchronized { running match { case Some(request) => request.cancel; running = None case None => } } def postpone(alt_delay: Time): Unit = synchronized { running match { case Some(request) => val alt_time = Time.now() + alt_delay if (request.time < alt_time && request.cancel) { running = Some(Event_Timer.request(alt_time)(run)) } case None => } } } // delayed event after first invocation def delay_first(delay: => Time, log: Logger = No_Logger)(event: => Unit): Delay = new Delay(true, delay, log, event) // delayed event after last invocation def delay_last(delay: => Time, log: Logger = No_Logger)(event: => Unit): Delay = new Delay(false, delay, log, event) }
Example 5
Source File: Concurrent.scala From zen with Apache License 2.0 | 5 votes |
package com.github.cloudml.zen.ml.util import java.util.concurrent.{Executors, LinkedBlockingQueue, ThreadPoolExecutor} import scala.concurrent._ import scala.concurrent.duration._ object Concurrent extends Serializable { @inline def withFuture[T](body: => T)(implicit es: ExecutionContextExecutorService): Future[T] = { Future(body)(es) } @inline def withAwaitReady[T](future: Future[T]): Unit = { Await.ready(future, 1.hour) } def withAwaitReadyAndClose[T](future: Future[T])(implicit es: ExecutionContextExecutorService): Unit = { Await.ready(future, 1.hour) closeExecutionContext(es) } @inline def withAwaitResult[T](future: Future[T]): T = { Await.result(future, 1.hour) } def withAwaitResultAndClose[T](future: Future[T])(implicit es: ExecutionContextExecutorService): T = { val res = Await.result(future, 1.hour) closeExecutionContext(es) res } @inline def initExecutionContext(numThreads: Int): ExecutionContextExecutorService = { ExecutionContext.fromExecutorService(Executors.newFixedThreadPool(numThreads)) } @inline def closeExecutionContext(es: ExecutionContextExecutorService): Unit = { es.shutdown() } } object DebugConcurrent extends Serializable { def withFuture[T](body: => T)(implicit es: ExecutionContextExecutorService): Future[T] = { val future = Future(body)(es) future.onFailure { case e => e.printStackTrace() }(scala.concurrent.ExecutionContext.Implicits.global) future } def withAwaitReady[T](future: Future[T]): Unit = { Await.ready(future, 1.hour) } def withAwaitReadyAndClose[T](future: Future[T])(implicit es: ExecutionContextExecutorService): Unit = { future.onComplete { _ => closeExecutionContext(es) }(scala.concurrent.ExecutionContext.Implicits.global) Await.ready(future, 1.hour) } def withAwaitResult[T](future: Future[T]): T = { Await.result(future, 1.hour) } def withAwaitResultAndClose[T](future: Future[T])(implicit es: ExecutionContextExecutorService): T = { future.onComplete { _ => closeExecutionContext(es) }(scala.concurrent.ExecutionContext.Implicits.global) Await.result(future, 1.hour) } def initExecutionContext(numThreads: Int): ExecutionContextExecutorService = { val es = new ThreadPoolExecutor(numThreads, numThreads, 0L, MILLISECONDS, new LinkedBlockingQueue[Runnable], Executors.defaultThreadFactory, new ThreadPoolExecutor.AbortPolicy) ExecutionContext.fromExecutorService(es) } def closeExecutionContext(es: ExecutionContextExecutorService): Unit = { es.shutdown() if (!es.awaitTermination(1L, SECONDS)) { System.err.println("Error: ExecutorService does not exit itself, force to terminate.") } } }
Example 6
Source File: MapLifter.scala From diffy with GNU Affero General Public License v3.0 | 5 votes |
package ai.diffy.lifter import com.twitter.concurrent.NamedPoolThreadFactory import com.twitter.util.{ExecutorServiceFuturePool, Future, FuturePool} import java.util.concurrent.{ArrayBlockingQueue, ThreadPoolExecutor, TimeUnit} case class Message(endpoint: Option[String], result: FieldMap[Any]) trait MapLifter { def apply(input: Array[Byte]): Future[Message] } object MapLifterPool { val QueueSizeDefault = 5 def apply(mapLifterFactory: => MapLifter) = { val executorService = new ThreadPoolExecutor( 3, // core pool size 10, // max pool size 500, // keep alive time TimeUnit.MILLISECONDS, new ArrayBlockingQueue[Runnable](10), // work queue new NamedPoolThreadFactory("maplifter", makeDaemons = true), new ThreadPoolExecutor.AbortPolicy() ) executorService.prestartCoreThread() new MapLifterPool(mapLifterFactory, new ExecutorServiceFuturePool(executorService)) } } class MapLifterPool(underlying: MapLifter, futurePool: FuturePool) extends MapLifter { override def apply(input: Array[Byte]): Future[Message] = (futurePool { underlying(input) }).flatten }
Example 7
Source File: ExecutorSource.scala From BigDatalog with Apache License 2.0 | 5 votes |
package org.apache.spark.executor import java.util.concurrent.ThreadPoolExecutor import scala.collection.JavaConverters._ import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.hadoop.fs.FileSystem import org.apache.spark.metrics.source.Source private[spark] class ExecutorSource(threadPool: ThreadPoolExecutor, executorId: String) extends Source { private def fileStats(scheme: String) : Option[FileSystem.Statistics] = FileSystem.getAllStatistics.asScala.find(s => s.getScheme.equals(scheme)) private def registerFileSystemStat[T]( scheme: String, name: String, f: FileSystem.Statistics => T, defaultValue: T) = { metricRegistry.register(MetricRegistry.name("filesystem", scheme, name), new Gauge[T] { override def getValue: T = fileStats(scheme).map(f).getOrElse(defaultValue) }) } override val metricRegistry = new MetricRegistry() override val sourceName = "executor" // Gauge for executor thread pool's actively executing task counts metricRegistry.register(MetricRegistry.name("threadpool", "activeTasks"), new Gauge[Int] { override def getValue: Int = threadPool.getActiveCount() }) // Gauge for executor thread pool's approximate total number of tasks that have been completed metricRegistry.register(MetricRegistry.name("threadpool", "completeTasks"), new Gauge[Long] { override def getValue: Long = threadPool.getCompletedTaskCount() }) // Gauge for executor thread pool's current number of threads metricRegistry.register(MetricRegistry.name("threadpool", "currentPool_size"), new Gauge[Int] { override def getValue: Int = threadPool.getPoolSize() }) // Gauge got executor thread pool's largest number of threads that have ever simultaneously // been in th pool metricRegistry.register(MetricRegistry.name("threadpool", "maxPool_size"), new Gauge[Int] { override def getValue: Int = threadPool.getMaximumPoolSize() }) // Gauge for file system stats of this executor for (scheme <- Array("hdfs", "file")) { registerFileSystemStat(scheme, "read_bytes", _.getBytesRead(), 0L) registerFileSystemStat(scheme, "write_bytes", _.getBytesWritten(), 0L) registerFileSystemStat(scheme, "read_ops", _.getReadOps(), 0) registerFileSystemStat(scheme, "largeRead_ops", _.getLargeReadOps(), 0) registerFileSystemStat(scheme, "write_ops", _.getWriteOps(), 0) } }
Example 8
Source File: ExecutorPoolCaptureOom.scala From kyuubi with Apache License 2.0 | 5 votes |
package org.apache.kyuubi.util import java.util.concurrent.{Future, SynchronousQueue, ThreadPoolExecutor, TimeUnit} case class ExecutorPoolCaptureOom( poolName: String, corePoolSize: Int, maximumPoolSize: Int, keepAliveSeconds: Long, hook: Runnable) extends ThreadPoolExecutor( corePoolSize, maximumPoolSize, keepAliveSeconds, TimeUnit.SECONDS, new SynchronousQueue[Runnable](), NamedThreadFactory(poolName)) { override def afterExecute(r: Runnable, t: Throwable): Unit = { super.afterExecute(r, t) t match { case _: OutOfMemoryError => hook.run() case null => r match { case f: Future[_] => try { if (f.isDone) f.get() } catch { case _: InterruptedException => Thread.currentThread().interrupt() case _: OutOfMemoryError => hook.run() } case _ => } case _ => } } }
Example 9
Source File: ExecutorSource.scala From iolap with Apache License 2.0 | 5 votes |
package org.apache.spark.executor import java.util.concurrent.ThreadPoolExecutor import scala.collection.JavaConversions._ import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.hadoop.fs.FileSystem import org.apache.spark.metrics.source.Source private[spark] class ExecutorSource(threadPool: ThreadPoolExecutor, executorId: String) extends Source { private def fileStats(scheme: String) : Option[FileSystem.Statistics] = FileSystem.getAllStatistics().find(s => s.getScheme.equals(scheme)) private def registerFileSystemStat[T]( scheme: String, name: String, f: FileSystem.Statistics => T, defaultValue: T) = { metricRegistry.register(MetricRegistry.name("filesystem", scheme, name), new Gauge[T] { override def getValue: T = fileStats(scheme).map(f).getOrElse(defaultValue) }) } override val metricRegistry = new MetricRegistry() override val sourceName = "executor" // Gauge for executor thread pool's actively executing task counts metricRegistry.register(MetricRegistry.name("threadpool", "activeTasks"), new Gauge[Int] { override def getValue: Int = threadPool.getActiveCount() }) // Gauge for executor thread pool's approximate total number of tasks that have been completed metricRegistry.register(MetricRegistry.name("threadpool", "completeTasks"), new Gauge[Long] { override def getValue: Long = threadPool.getCompletedTaskCount() }) // Gauge for executor thread pool's current number of threads metricRegistry.register(MetricRegistry.name("threadpool", "currentPool_size"), new Gauge[Int] { override def getValue: Int = threadPool.getPoolSize() }) // Gauge got executor thread pool's largest number of threads that have ever simultaneously // been in th pool metricRegistry.register(MetricRegistry.name("threadpool", "maxPool_size"), new Gauge[Int] { override def getValue: Int = threadPool.getMaximumPoolSize() }) // Gauge for file system stats of this executor for (scheme <- Array("hdfs", "file")) { registerFileSystemStat(scheme, "read_bytes", _.getBytesRead(), 0L) registerFileSystemStat(scheme, "write_bytes", _.getBytesWritten(), 0L) registerFileSystemStat(scheme, "read_ops", _.getReadOps(), 0) registerFileSystemStat(scheme, "largeRead_ops", _.getLargeReadOps(), 0) registerFileSystemStat(scheme, "write_ops", _.getWriteOps(), 0) } }
Example 10
Source File: ExecutorSource.scala From multi-tenancy-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.executor import java.util.concurrent.ThreadPoolExecutor import scala.collection.JavaConverters._ import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.hadoop.fs.FileSystem import org.apache.spark.metrics.source.Source private[spark] class ExecutorSource(threadPool: ThreadPoolExecutor, executorId: String) extends Source { private def fileStats(scheme: String) : Option[FileSystem.Statistics] = FileSystem.getAllStatistics.asScala.find(s => s.getScheme.equals(scheme)) private def registerFileSystemStat[T]( scheme: String, name: String, f: FileSystem.Statistics => T, defaultValue: T) = { metricRegistry.register(MetricRegistry.name("filesystem", scheme, name), new Gauge[T] { override def getValue: T = fileStats(scheme).map(f).getOrElse(defaultValue) }) } override val metricRegistry = new MetricRegistry() override val sourceName = "executor" // Gauge for executor thread pool's actively executing task counts metricRegistry.register(MetricRegistry.name("threadpool", "activeTasks"), new Gauge[Int] { override def getValue: Int = threadPool.getActiveCount() }) // Gauge for executor thread pool's approximate total number of tasks that have been completed metricRegistry.register(MetricRegistry.name("threadpool", "completeTasks"), new Gauge[Long] { override def getValue: Long = threadPool.getCompletedTaskCount() }) // Gauge for executor thread pool's current number of threads metricRegistry.register(MetricRegistry.name("threadpool", "currentPool_size"), new Gauge[Int] { override def getValue: Int = threadPool.getPoolSize() }) // Gauge got executor thread pool's largest number of threads that have ever simultaneously // been in th pool metricRegistry.register(MetricRegistry.name("threadpool", "maxPool_size"), new Gauge[Int] { override def getValue: Int = threadPool.getMaximumPoolSize() }) // Gauge for file system stats of this executor for (scheme <- Array("hdfs", "file")) { registerFileSystemStat(scheme, "read_bytes", _.getBytesRead(), 0L) registerFileSystemStat(scheme, "write_bytes", _.getBytesWritten(), 0L) registerFileSystemStat(scheme, "read_ops", _.getReadOps(), 0) registerFileSystemStat(scheme, "largeRead_ops", _.getLargeReadOps(), 0) registerFileSystemStat(scheme, "write_ops", _.getWriteOps(), 0) } }
Example 11
Source File: ThreadPoolMetrics.scala From prometheus-akka with Apache License 2.0 | 5 votes |
package com.workday.prometheus.akka import java.util.Collections import java.util.concurrent.ThreadPoolExecutor import scala.collection.JavaConverters.seqAsJavaListConverter import scala.collection.concurrent.TrieMap import io.prometheus.client.Collector import io.prometheus.client.Collector.MetricFamilySamples import io.prometheus.client.GaugeMetricFamily object ThreadPoolMetrics extends Collector { val map = TrieMap[String, Option[ThreadPoolExecutor]]() this.register() override def collect(): java.util.List[MetricFamilySamples] = { val dispatcherNameList = List("dispatcherName").asJava val activeThreadCountGauge = new GaugeMetricFamily("akka_dispatcher_threadpoolexecutor_active_thread_count", "Akka ThreadPool Dispatcher Active Thread Count", dispatcherNameList) val corePoolSizeGauge = new GaugeMetricFamily("akka_dispatcher_threadpoolexecutor_core_pool_size", "Akka ThreadPool Dispatcher Core Pool Size", dispatcherNameList) val currentPoolSizeGauge = new GaugeMetricFamily("akka_dispatcher_threadpoolexecutor_current_pool_size", "Akka ThreadPool Dispatcher Current Pool Size", dispatcherNameList) val largestPoolSizeGauge = new GaugeMetricFamily("akka_dispatcher_threadpoolexecutor_largest_pool_size", "Akka ThreadPool Dispatcher Largest Pool Size", dispatcherNameList) val maxPoolSizeGauge = new GaugeMetricFamily("akka_dispatcher_threadpoolexecutor_max_pool_size", "Akka ThreadPool Dispatcher Max Pool Size", dispatcherNameList) val completedTaskCountGauge = new GaugeMetricFamily("akka_dispatcher_threadpoolexecutor_completed_task_count", "Akka ThreadPoolExecutor Dispatcher Completed Task Count", dispatcherNameList) val totalTaskCountGauge = new GaugeMetricFamily("akka_dispatcher_threadpoolexecutor_total_task_count", "Akka ThreadPoolExecutor Dispatcher Total Task Count", dispatcherNameList) map.foreach { case (dispatcherName, tpeOption) => val dispatcherNameList = List(dispatcherName).asJava tpeOption match { case Some(tpe) => { activeThreadCountGauge.addMetric(dispatcherNameList, tpe.getActiveCount) corePoolSizeGauge.addMetric(dispatcherNameList, tpe.getCorePoolSize) currentPoolSizeGauge.addMetric(dispatcherNameList, tpe.getPoolSize) largestPoolSizeGauge.addMetric(dispatcherNameList, tpe.getLargestPoolSize) maxPoolSizeGauge.addMetric(dispatcherNameList, tpe.getMaximumPoolSize) completedTaskCountGauge.addMetric(dispatcherNameList, tpe.getCompletedTaskCount) totalTaskCountGauge.addMetric(dispatcherNameList, tpe.getTaskCount) } case None => { activeThreadCountGauge.addMetric(dispatcherNameList, 0) corePoolSizeGauge.addMetric(dispatcherNameList, 0) currentPoolSizeGauge.addMetric(dispatcherNameList, 0) largestPoolSizeGauge.addMetric(dispatcherNameList, 0) maxPoolSizeGauge.addMetric(dispatcherNameList, 0) completedTaskCountGauge.addMetric(dispatcherNameList, 0) totalTaskCountGauge.addMetric(dispatcherNameList, 0) } } } val jul = new java.util.ArrayList[MetricFamilySamples] jul.add(activeThreadCountGauge) jul.add(corePoolSizeGauge) jul.add(currentPoolSizeGauge) jul.add(largestPoolSizeGauge) jul.add(maxPoolSizeGauge) jul.add(completedTaskCountGauge) jul.add(totalTaskCountGauge) Collections.unmodifiableList(jul) } def add(dispatcherName: String, tpe: ThreadPoolExecutor): Unit = { map.put(dispatcherName, Some(tpe)) } def remove(dispatcherName: String): Unit = { map.put(dispatcherName, None) } }
Example 12
Source File: ThreadUtil.scala From coursier with Apache License 2.0 | 5 votes |
package coursier.cache.internal import java.util.concurrent.{ExecutorService, LinkedBlockingQueue, ScheduledExecutorService, ScheduledThreadPoolExecutor, ThreadFactory, ThreadPoolExecutor, TimeUnit} import java.util.concurrent.atomic.AtomicInteger object ThreadUtil { private val poolNumber = new AtomicInteger(1) def daemonThreadFactory(): ThreadFactory = { val poolNumber0 = poolNumber.getAndIncrement() val threadNumber = new AtomicInteger(1) new ThreadFactory { def newThread(r: Runnable) = { val threadNumber0 = threadNumber.getAndIncrement() val t = new Thread(r, s"coursier-pool-$poolNumber0-thread-$threadNumber0") t.setDaemon(true) t.setPriority(Thread.NORM_PRIORITY) t } } } def fixedThreadPool(size: Int): ExecutorService = { val factory = daemonThreadFactory() // 1 min keep alive, so that threads get stopped a bit after resolution / downloading is done val executor = new ThreadPoolExecutor( size, size, 1L, TimeUnit.MINUTES, new LinkedBlockingQueue[Runnable], factory ) executor.allowCoreThreadTimeOut(true) executor } def fixedScheduledThreadPool(size: Int): ScheduledExecutorService = { val factory = daemonThreadFactory() val executor = new ScheduledThreadPoolExecutor(size, factory) executor.setKeepAliveTime(1L, TimeUnit.MINUTES) executor.allowCoreThreadTimeOut(true) executor } def withFixedThreadPool[T](size: Int)(f: ExecutorService => T): T = { var pool: ExecutorService = null try { pool = fixedThreadPool(size) f(pool) } finally { if (pool != null) pool.shutdown() } } }
Example 13
Source File: ExecutorSource.scala From sparkoscope with Apache License 2.0 | 5 votes |
package org.apache.spark.executor import java.util.concurrent.ThreadPoolExecutor import scala.collection.JavaConverters._ import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.hadoop.fs.FileSystem import org.apache.spark.metrics.source.Source private[spark] class ExecutorSource(threadPool: ThreadPoolExecutor, executorId: String) extends Source { private def fileStats(scheme: String) : Option[FileSystem.Statistics] = FileSystem.getAllStatistics.asScala.find(s => s.getScheme.equals(scheme)) private def registerFileSystemStat[T]( scheme: String, name: String, f: FileSystem.Statistics => T, defaultValue: T) = { metricRegistry.register(MetricRegistry.name("filesystem", scheme, name), new Gauge[T] { override def getValue: T = fileStats(scheme).map(f).getOrElse(defaultValue) }) } override val metricRegistry = new MetricRegistry() override val sourceName = "executor" // Gauge for executor thread pool's actively executing task counts metricRegistry.register(MetricRegistry.name("threadpool", "activeTasks"), new Gauge[Int] { override def getValue: Int = threadPool.getActiveCount() }) // Gauge for executor thread pool's approximate total number of tasks that have been completed metricRegistry.register(MetricRegistry.name("threadpool", "completeTasks"), new Gauge[Long] { override def getValue: Long = threadPool.getCompletedTaskCount() }) // Gauge for executor thread pool's current number of threads metricRegistry.register(MetricRegistry.name("threadpool", "currentPool_size"), new Gauge[Int] { override def getValue: Int = threadPool.getPoolSize() }) // Gauge got executor thread pool's largest number of threads that have ever simultaneously // been in th pool metricRegistry.register(MetricRegistry.name("threadpool", "maxPool_size"), new Gauge[Int] { override def getValue: Int = threadPool.getMaximumPoolSize() }) // Gauge for file system stats of this executor for (scheme <- Array("hdfs", "file")) { registerFileSystemStat(scheme, "read_bytes", _.getBytesRead(), 0L) registerFileSystemStat(scheme, "write_bytes", _.getBytesWritten(), 0L) registerFileSystemStat(scheme, "read_ops", _.getReadOps(), 0) registerFileSystemStat(scheme, "largeRead_ops", _.getLargeReadOps(), 0) registerFileSystemStat(scheme, "write_ops", _.getWriteOps(), 0) } }
Example 14
Source File: ThreadPoolOf.scala From kafka-journal with MIT License | 5 votes |
package com.evolutiongaming.kafka.journal.execution import java.util.concurrent.{SynchronousQueue, ThreadFactory, ThreadPoolExecutor} import cats.effect.{Resource, Sync} import cats.implicits._ import scala.concurrent.duration._ object ThreadPoolOf { def apply[F[_] : Sync]( minSize: Int, maxSize: Int, threadFactory: ThreadFactory, keepAlive: FiniteDuration = 1.minute, ): Resource[F, ThreadPoolExecutor] = { val result = for { result <- Sync[F].delay { new ThreadPoolExecutor( minSize, maxSize, keepAlive.length, keepAlive.unit, new SynchronousQueue[Runnable], threadFactory) } } yield { val release = Sync[F].delay { result.shutdown() } (result, release) } Resource(result) } }
Example 15
Source File: LongRunningPool.scala From CMAK with Apache License 2.0 | 5 votes |
package kafka.manager.base import java.util.concurrent.{LinkedBlockingQueue, ThreadPoolExecutor, TimeUnit} import akka.pattern._ import scala.concurrent.{ExecutionContext, Future} import scala.reflect.ClassTag import scala.util.Try case class LongRunningPoolConfig(threadPoolSize: Int, maxQueueSize: Int) trait LongRunningPoolActor extends BaseActor { protected val longRunningExecutor = new ThreadPoolExecutor( longRunningPoolConfig.threadPoolSize, longRunningPoolConfig.threadPoolSize,0L,TimeUnit.MILLISECONDS,new LinkedBlockingQueue[Runnable](longRunningPoolConfig.maxQueueSize)) protected val longRunningExecutionContext = ExecutionContext.fromExecutor(longRunningExecutor) protected def longRunningPoolConfig: LongRunningPoolConfig protected def longRunningQueueFull(): Unit protected def hasCapacityFor(taskCount: Int): Boolean = { longRunningExecutor.getQueue.remainingCapacity() >= taskCount } @scala.throws[Exception](classOf[Exception]) override def postStop(): Unit = { log.info("Shutting down long running executor...") Try(longRunningExecutor.shutdown()) super.postStop() } protected def longRunning[T](fn: => Future[T])(implicit ec: ExecutionContext, ct: ClassTag[T]) : Unit = { if(longRunningExecutor.getQueue.remainingCapacity() == 0) { longRunningQueueFull() } else { fn match { case _ if ct.runtimeClass == classOf[Unit] => //do nothing with unit case f => f pipeTo sender } } } }
Example 16
Source File: DefaultExecutors.scala From zio with Apache License 2.0 | 5 votes |
package zio.internal import java.util.concurrent.{ LinkedBlockingQueue, RejectedExecutionException, ThreadPoolExecutor, TimeUnit } private[internal] abstract class DefaultExecutors { final def makeDefault(yieldOpCount: Int): Executor = fromThreadPoolExecutor(_ => yieldOpCount) { val corePoolSize = Runtime.getRuntime.availableProcessors() * 2 val maxPoolSize = corePoolSize val keepAliveTime = 60000L val timeUnit = TimeUnit.MILLISECONDS val workQueue = new LinkedBlockingQueue[Runnable]() val threadFactory = new NamedThreadFactory("zio-default-async", true) val threadPool = new ThreadPoolExecutor( corePoolSize, maxPoolSize, keepAliveTime, timeUnit, workQueue, threadFactory ) threadPool.allowCoreThreadTimeOut(true) threadPool } final def fromThreadPoolExecutor(yieldOpCount0: ExecutionMetrics => Int)( es: ThreadPoolExecutor ): Executor = new Executor { private[this] def metrics0 = new ExecutionMetrics { def concurrency: Int = es.getMaximumPoolSize() def capacity: Int = { val queue = es.getQueue() val remaining = queue.remainingCapacity() if (remaining == Int.MaxValue) remaining else remaining + queue.size } def size: Int = es.getQueue().size def workersCount: Int = es.getPoolSize() def enqueuedCount: Long = es.getTaskCount() def dequeuedCount: Long = enqueuedCount - size.toLong } def metrics = Some(metrics0) def yieldOpCount = yieldOpCount0(metrics0) def submit(runnable: Runnable): Boolean = try { es.execute(runnable) true } catch { case _: RejectedExecutionException => false } def here = false } }
Example 17
Source File: FIFOConsumerManager.scala From Linkis with Apache License 2.0 | 5 votes |
package com.webank.wedatasphere.linkis.scheduler.queue.fifoqueue import java.util.concurrent.{ExecutorService, ThreadPoolExecutor} import com.webank.wedatasphere.linkis.common.utils.Utils import com.webank.wedatasphere.linkis.scheduler.SchedulerContext import com.webank.wedatasphere.linkis.scheduler.exception.SchedulerErrorException import com.webank.wedatasphere.linkis.scheduler.listener.ConsumerListener import com.webank.wedatasphere.linkis.scheduler.queue.{Consumer, ConsumerManager, Group, LoopArrayQueue} class FIFOConsumerManager(groupName: String) extends ConsumerManager { def this() = this("FIFO_GROUP") private var group: Group = _ private var executorService: ThreadPoolExecutor = _ private var consumerListener: ConsumerListener = _ private var consumerQueue: LoopArrayQueue = _ private var consumer: Consumer = _ override def setSchedulerContext(schedulerContext: SchedulerContext): Unit = { super.setSchedulerContext(schedulerContext) group = getSchedulerContext.getOrCreateGroupFactory.getOrCreateGroup(groupName) executorService = group match { case g: FIFOGroup => Utils.newCachedThreadPool(g.getMaxRunningJobs + 2, groupName + "-Thread-") case _ => throw new SchedulerErrorException(13000, s"FIFOConsumerManager need a FIFOGroup, but ${group.getClass} is supported.") } consumerQueue = new LoopArrayQueue(getSchedulerContext.getOrCreateGroupFactory.getOrCreateGroup(null)) consumer = createConsumer(null) } override def setConsumerListener(consumerListener: ConsumerListener): Unit = this.consumerListener = consumerListener override def getOrCreateExecutorService: ExecutorService = executorService override def getOrCreateConsumer(groupName: String): Consumer = consumer override protected def createConsumer(groupName: String): Consumer = { val group = getSchedulerContext.getOrCreateGroupFactory.getOrCreateGroup(null) val consumer = new FIFOUserConsumer(getSchedulerContext, getOrCreateExecutorService, group) consumer.setGroup(group) consumer.setConsumeQueue(consumerQueue) if(consumerListener != null) consumerListener.onConsumerCreated(consumer) consumer.start() consumer } override def destroyConsumer(groupName: String): Unit = { //ignore } override def shutdown(): Unit = { if(consumerListener != null) consumerListener.onConsumerDestroyed(consumer) consumer.shutdown() executorService.shutdownNow() } override def listConsumers(): Array[Consumer] = Array(consumer) }
Example 18
Source File: AppConfig.scala From zorechka-bot with MIT License | 5 votes |
package com.wix.zorechka import java.util.concurrent.{Executors, ThreadPoolExecutor} import com.wix.zorechka.HasAppConfig.Cfg import com.wix.zorechka.utils.concurrent.NamedThreadFactory import zio.{RIO, Task, ZIO} import zio.internal.Executor import scala.concurrent.ExecutionContext case class AppConfig(reposFile: String, db: DbConfig) case class DbConfig(url: String, username: String, password: String) trait HasAppConfig { val cfg: Cfg } object HasAppConfig { trait Cfg { val loadConfig: Task[AppConfig] val blockingCtx: ExecutionContext } trait Live extends HasAppConfig { import pureconfig.generic.auto._ val cfg: Cfg = new Cfg { override val loadConfig: Task[AppConfig] = Task.effect(pureconfig.loadConfigOrThrow[AppConfig]) override val blockingCtx: ExecutionContext = { val factory = NamedThreadFactory(name = "blocking-pool", daemon = true) Executor .fromThreadPoolExecutor(_ => Int.MaxValue)(Executors.newCachedThreadPool(factory).asInstanceOf[ThreadPoolExecutor]).asEC } } } def loadConfig(): RIO[HasAppConfig, AppConfig] = ZIO.accessM[HasAppConfig](_.cfg.loadConfig) }
Example 19
Source File: ExecutorSource.scala From drizzle-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.executor import java.util.concurrent.ThreadPoolExecutor import scala.collection.JavaConverters._ import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.hadoop.fs.FileSystem import org.apache.spark.metrics.source.Source private[spark] class ExecutorSource(threadPool: ThreadPoolExecutor, executorId: String) extends Source { private def fileStats(scheme: String) : Option[FileSystem.Statistics] = FileSystem.getAllStatistics.asScala.find(s => s.getScheme.equals(scheme)) private def registerFileSystemStat[T]( scheme: String, name: String, f: FileSystem.Statistics => T, defaultValue: T) = { metricRegistry.register(MetricRegistry.name("filesystem", scheme, name), new Gauge[T] { override def getValue: T = fileStats(scheme).map(f).getOrElse(defaultValue) }) } override val metricRegistry = new MetricRegistry() override val sourceName = "executor" // Gauge for executor thread pool's actively executing task counts metricRegistry.register(MetricRegistry.name("threadpool", "activeTasks"), new Gauge[Int] { override def getValue: Int = threadPool.getActiveCount() }) // Gauge for executor thread pool's approximate total number of tasks that have been completed metricRegistry.register(MetricRegistry.name("threadpool", "completeTasks"), new Gauge[Long] { override def getValue: Long = threadPool.getCompletedTaskCount() }) // Gauge for executor thread pool's current number of threads metricRegistry.register(MetricRegistry.name("threadpool", "currentPool_size"), new Gauge[Int] { override def getValue: Int = threadPool.getPoolSize() }) // Gauge got executor thread pool's largest number of threads that have ever simultaneously // been in th pool metricRegistry.register(MetricRegistry.name("threadpool", "maxPool_size"), new Gauge[Int] { override def getValue: Int = threadPool.getMaximumPoolSize() }) // Gauge for file system stats of this executor for (scheme <- Array("hdfs", "file")) { registerFileSystemStat(scheme, "read_bytes", _.getBytesRead(), 0L) registerFileSystemStat(scheme, "write_bytes", _.getBytesWritten(), 0L) registerFileSystemStat(scheme, "read_ops", _.getReadOps(), 0) registerFileSystemStat(scheme, "largeRead_ops", _.getLargeReadOps(), 0) registerFileSystemStat(scheme, "write_ops", _.getWriteOps(), 0) } }