java.util.concurrent.ForkJoinPool Scala Examples
The following examples show how to use java.util.concurrent.ForkJoinPool.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: Main.scala From Converter with GNU General Public License v3.0 | 5 votes |
package org.scalablytyped.converter import java.nio.file.Path import java.util.concurrent.ForkJoinPool import org.scalablytyped.converter.internal.constants.defaultCacheFolder import org.scalablytyped.converter.internal.importer.build.BinTrayPublisher import org.scalablytyped.converter.internal.importer.{withZipFs, Ci, Publisher} import org.scalablytyped.converter.internal.{constants, files} import scala.concurrent.ExecutionContext object Main { def main(args: Array[String]): Unit = { val Ci.Config(config) = args System.setProperty("scala.concurrent.context.numThreads", config.parallelScalas.toString) val publishFolder = constants.defaultLocalPublishFolder val pool = new ForkJoinPool(config.parallelLibraries) val ec = ExecutionContext.fromExecutorService(pool) withZipFs.maybe(files.existing(defaultCacheFolder) / "bintray.zip", config.enablePublish) { bintrayPathOpt => val publisher: Publisher = if (config.enablePublish) BinTrayPublisher(bintrayPathOpt, config.projectName, Some(config.repo), ec) match { case Left(err) => sys.error(err) case Right(value) => value } else BinTrayPublisher.Dummy withZipFs(defaultCacheFolder / "npmjs.zip") { npmjsPath => withZipFs.maybe(defaultCacheFolder / "parseCache.zip", config.enableParseCache && config.conserveSpace) { parseCachePathOpt => val parseCacheOpt: Option[Path] = parseCachePathOpt orElse { if (config.enableParseCache) Some((defaultCacheFolder / "parse").toNIO) else None } val paths = Ci.Paths(npmjsPath, parseCacheOpt, defaultCacheFolder, publishFolder, defaultCacheFolder / "git") val ci = new Ci(config, paths, publisher, pool, ec) if (config.benchmark) { println(ci.run()) println(ci.run()) println(ci.run()) } else { ci.run() } } } } pool.shutdown() System.exit(0) } }
Example 2
Source File: NamedExecutors.scala From mango with Apache License 2.0 | 5 votes |
package com.kakao.mango.concurrent import java.util.concurrent.Executors._ import java.util.concurrent.ForkJoinPool.ForkJoinWorkerThreadFactory import java.util.concurrent.atomic.AtomicInteger import java.util.concurrent.{ForkJoinWorkerThread, ExecutorService, ScheduledExecutorService, ForkJoinPool} import scala.language.implicitConversions object NamedExecutors { implicit def toRich(e: ExecutorService): RichExecutorService = new RichExecutorService(e) implicit def toRich(e: ScheduledExecutorService): RichScheduledExecutorService = new RichScheduledExecutorService(e) def scheduled(name: String, daemon: Boolean = true): RichScheduledExecutorService = { newSingleThreadScheduledExecutor(NamedThreadFactory(name, daemon)) } def scheduledPool(name: String, size: Int, daemon: Boolean = true): RichScheduledExecutorService = { newScheduledThreadPool(size, NamedThreadFactory(name, daemon)) } def cached(name: String, daemon: Boolean = true): RichExecutorService = { newCachedThreadPool(NamedThreadFactory(name, daemon)) } def fixed(name: String, size: Int, daemon: Boolean = true): RichExecutorService = { newFixedThreadPool(size, NamedThreadFactory(name, daemon)) } def single(name: String, daemon: Boolean = true): RichExecutorService = { newSingleThreadExecutor(NamedThreadFactory(name, daemon)) } def forkJoin(name: String, size: Int, daemon: Boolean = true, asyncMode: Boolean = false): RichExecutorService = { val counter = new AtomicInteger() new ForkJoinPool(size, new ForkJoinWorkerThreadFactory { override def newThread(pool: ForkJoinPool): ForkJoinWorkerThread = { val thread = new ForkJoinWorkerThread(pool) {} thread.setName(s"$name-${counter.incrementAndGet()}") thread.setDaemon(daemon) thread } }, null, asyncMode) } }
Example 3
Source File: ForkJoinPoolOf.scala From kafka-journal with MIT License | 5 votes |
package com.evolutiongaming.kafka.journal.execution import java.util.concurrent.ForkJoinPool import java.util.concurrent.ForkJoinPool.ForkJoinWorkerThreadFactory import cats.effect.{Resource, Sync} import cats.implicits._ object ForkJoinPoolOf { def apply[F[_] : Sync]( name: String, parallelism: Int ): Resource[F, ForkJoinPool] = { val threadFactory = ForkJoinPool.defaultForkJoinWorkerThreadFactory.withPrefix(name) val threadPool = Sync[F].delay { new ForkJoinPool( parallelism, threadFactory, UncaughtExceptionHandler.default, true) } val result = for { threadPool <- threadPool } yield { val release = Sync[F].delay { threadPool.shutdown() } (threadPool, release) } Resource(result) } implicit class ForkJoinWorkerThreadFactoryOps(val self: ForkJoinWorkerThreadFactory) extends AnyVal { def withPrefix(prefix: String): ForkJoinWorkerThreadFactory = new ForkJoinWorkerThreadFactory { def newThread(pool: ForkJoinPool) = { val thread = self.newThread(pool) val threadId = thread.getId thread.setName(s"$prefix-$threadId") thread } } } }
Example 4
Source File: ThreadFactories.scala From docspell with GNU General Public License v3.0 | 5 votes |
package docspell.common import java.util.concurrent.ForkJoinPool import java.util.concurrent.ForkJoinPool.ForkJoinWorkerThreadFactory import java.util.concurrent.ForkJoinWorkerThread import java.util.concurrent.atomic.AtomicLong import java.util.concurrent.{Executors, ThreadFactory} import scala.concurrent._ import cats.effect._ object ThreadFactories { def ofName(prefix: String): ThreadFactory = new ThreadFactory { val counter = new AtomicLong(0) override def newThread(r: Runnable): Thread = { val t = Executors.defaultThreadFactory().newThread(r) t.setName(s"$prefix-${counter.getAndIncrement()}") t } } def ofNameFJ(prefix: String): ForkJoinWorkerThreadFactory = new ForkJoinWorkerThreadFactory { val tf = ForkJoinPool.defaultForkJoinWorkerThreadFactory val counter = new AtomicLong(0) def newThread(pool: ForkJoinPool): ForkJoinWorkerThread = { val t = tf.newThread(pool) t.setName(s"$prefix-${counter.getAndIncrement()}") t } } def executorResource[F[_]: Sync]( c: => ExecutionContextExecutorService ): Resource[F, ExecutionContextExecutorService] = Resource.make(Sync[F].delay(c))(ec => Sync[F].delay(ec.shutdown)) def cached[F[_]: Sync]( tf: ThreadFactory ): Resource[F, ExecutionContextExecutorService] = executorResource( ExecutionContext.fromExecutorService(Executors.newCachedThreadPool(tf)) ) def fixed[F[_]: Sync]( n: Int, tf: ThreadFactory ): Resource[F, ExecutionContextExecutorService] = executorResource( ExecutionContext.fromExecutorService(Executors.newFixedThreadPool(n, tf)) ) def workSteal[F[_]: Sync]( n: Int, tf: ForkJoinWorkerThreadFactory ): Resource[F, ExecutionContextExecutorService] = executorResource( ExecutionContext.fromExecutorService( new ForkJoinPool(n, tf, null, true) ) ) def workSteal[F[_]: Sync]( tf: ForkJoinWorkerThreadFactory ): Resource[F, ExecutionContextExecutorService] = workSteal[F](Runtime.getRuntime().availableProcessors() + 1, tf) }
Example 5
Source File: TestExecutorImpl.scala From scalaprops with MIT License | 5 votes |
package scalaprops import scala.concurrent.{Await, ExecutionContext, Future} import scala.concurrent.duration.Duration import java.lang.Thread.UncaughtExceptionHandler import sbt.testing.Logger import java.util.concurrent.ForkJoinPool object TestExecutorImpl { private[this] def newInstance(log: Logger): TestExecutor = new TestExecutor { private[this] val executionContext = { lazy val executorService: ForkJoinPool = new ForkJoinPool( sys.runtime.availableProcessors(), ForkJoinPool.defaultForkJoinWorkerThreadFactory, new UncaughtExceptionHandler { def uncaughtException(t: Thread, e: Throwable): Unit = { log.error("uncaughtException Thread = " + t) log.trace(e) e.printStackTrace() executorService.shutdown() } }, false ) ExecutionContext.fromExecutorService(executorService) } override def execute[A](timeout: Duration)(f: => A): A = Await.result(Future(f)(executionContext), timeout) override def shutdown(): Unit = executionContext.shutdown() } def withExecutor[A](logger: Logger)(f: TestExecutor => A): A = { val executor = newInstance(logger) try f(executor) finally executor.shutdown() } }
Example 6
Source File: ReadmeExampleSpec.scala From laserdisc with MIT License | 5 votes |
import java.util.concurrent.ForkJoinPool import cats.effect.{ContextShift, IO, Timer} import munit.FunSuite import scala.concurrent.ExecutionContext import scala.concurrent.ExecutionContext.fromExecutor final class ReadmeExampleSpec extends FunSuite with TestLogCapture { private[this] val ec: ExecutionContext = fromExecutor(new ForkJoinPool()) private[this] implicit val timer: Timer[IO] = IO.timer(ec) private[this] implicit val contextShift: ContextShift[IO] = IO.contextShift(ec) test("The readme example gives the expected output and logs when a LogWriter is in scope") { import cats.syntax.flatMap._ import laserdisc._ import laserdisc.all._ import laserdisc.auto._ import laserdisc.fs2._ import log.effect.LogWriter import log.effect.fs2.SyncLogWriter def redisTest(implicit log: LogWriter[IO]): IO[Unit] = RedisClient.to("localhost", 6379).use { client => client.send( set("a", 23), set("b", 55), get[PosInt]("b"), get[PosInt]("a") ) >>= { case (Right(OK), Right(OK), Right(Some(getOfb)), Right(Some(getOfa))) if getOfb.value == 55 && getOfa.value == 23 => log info "yay!" case other => log.error(s"something went terribly wrong $other") >> IO.raiseError(new RuntimeException("boom")) } } val logged = capturedConsoleOutOf { redisTest(SyncLogWriter.consoleLog[IO]) } assert(logged contains "Starting connection") assert(logged contains "Connected to server localhost:6379") assert(logged contains "sending Arr(Bulk(SET),Bulk(a),Bulk(23))") assert(logged contains "receiving Str(OK)") assert(logged contains "sending Arr(Bulk(SET),Bulk(b),Bulk(55))") assert(logged contains "receiving Str(OK)") assert(logged contains "sending Arr(Bulk(GET),Bulk(b))") assert(logged contains "receiving Bulk(55)") assert(logged contains "sending Arr(Bulk(GET),Bulk(a))") assert(logged contains "receiving Bulk(23)") assert(logged contains "yay!") assert(logged contains "Shutting down connection") assert(logged contains "Shutdown complete") assert(logged contains "Connection terminated: No issues") } }
Example 7
Source File: DefaultLoggerSpec.scala From laserdisc with MIT License | 5 votes |
import java.util.concurrent.ForkJoinPool import cats.effect.{ContextShift, IO, Timer} import munit.FunSuite import scala.concurrent.ExecutionContext import scala.concurrent.ExecutionContext.fromExecutor final class DefaultLoggerSpec extends FunSuite with TestLogCapture { private def assertNot(c: =>Boolean): Unit = assert(!c) private[this] val ec: ExecutionContext = fromExecutor(new ForkJoinPool()) private[this] implicit val timer: Timer[IO] = IO.timer(ec) private[this] implicit val contextShift: ContextShift[IO] = IO.contextShift(ec) test("The readme example doesn't log when no LogWriter is given") { import cats.syntax.flatMap._ import laserdisc._ import laserdisc.all._ import laserdisc.auto._ import laserdisc.fs2._ import log.effect.fs2.SyncLogWriter.consoleLog val redisTest: IO[Unit] = RedisClient.to("localhost", 6379).use { client => client.send( set("a", 23), set("b", 55), get[PosInt]("b"), get[PosInt]("a") ) >>= { case (Right(OK), Right(OK), Right(Some(getOfb)), Right(Some(getOfa))) if getOfb.value == 55 && getOfa.value == 23 => consoleLog[IO].info("yay!") case other => consoleLog[IO].error(s"something went terribly wrong $other") >> IO.raiseError(new RuntimeException("boom")) } } val logged = capturedConsoleOutOf(redisTest) assertNot(logged contains "Starting connection") assertNot(logged contains "Server available for publishing: localhost:6379") assertNot(logged contains "sending Arr(Bulk(SET),Bulk(a),Bulk(23))") assertNot(logged contains "receiving Str(OK)") assertNot(logged contains "sending Arr(Bulk(SET),Bulk(b),Bulk(55))") assertNot(logged contains "receiving Str(OK)") assertNot(logged contains "sending Arr(Bulk(GET),Bulk(b))") assertNot(logged contains "receiving Bulk(55)") assertNot(logged contains "sending Arr(Bulk(GET),Bulk(a))") assertNot(logged contains "receiving Bulk(23)") assertNot(logged contains "Shutting down connection") assertNot(logged contains "Shutdown complete") assertNot(logged contains "Connection terminated: No issues") } }