com.google.common.util.concurrent.ThreadFactoryBuilder Scala Examples
The following examples show how to use com.google.common.util.concurrent.ThreadFactoryBuilder.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: SinkDataGroups.scala From ohara with Apache License 2.0 | 6 votes |
package oharastream.ohara.shabondi.sink import java.time.{Duration => JDuration} import java.util.concurrent._ import com.google.common.util.concurrent.ThreadFactoryBuilder import oharastream.ohara.common.util.Releasable import com.typesafe.scalalogging.Logger import oharastream.ohara.common.setting.{ObjectKey, TopicKey} import scala.jdk.CollectionConverters._ private[sink] object SinkDataGroups { def apply(config: SinkConfig) = new SinkDataGroups(config) } private class SinkDataGroups( objectKey: ObjectKey, brokerProps: String, topicKeys: Set[TopicKey], pollTimeout: JDuration ) extends Releasable { def this(config: SinkConfig) = { this(config.objectKey, config.brokers, config.sinkFromTopics, config.sinkPollTimeout) } private val threadPool: ExecutorService = Executors.newCachedThreadPool(new ThreadFactoryBuilder().setNameFormat("SinkDataGroups-%d").build()) private val log = Logger(classOf[SinkDataGroups]) private val dataGroups = new ConcurrentHashMap[String, DataGroup]() def removeGroup(name: String): Boolean = { val group = dataGroups.remove(name) if (group != null) { group.close() true } else false } def groupExist(name: String): Boolean = dataGroups.containsKey(name) def createIfAbsent(name: String): DataGroup = dataGroups.computeIfAbsent( name, { n => log.info("create data group: {}", n) val dataGroup = new DataGroup(n, objectKey, brokerProps, topicKeys, pollTimeout) threadPool.submit(dataGroup.queueProducer) dataGroup } ) def size: Int = dataGroups.size() def freeIdleGroup(idleTime: JDuration): Unit = { val groups = dataGroups.elements().asScala.toSeq groups.foreach { group => if (group.isIdle(idleTime)) { removeGroup(group.name) } } } override def close(): Unit = { dataGroups.asScala.foreach { case (_, dataGroup) => dataGroup.close() } threadPool.shutdown() } }
Example 2
Source File: BasicShabondiTest.scala From ohara with Apache License 2.0 | 5 votes |
package oharastream.ohara.shabondi import java.util import java.util.concurrent.{ExecutorService, Executors} import com.google.common.util.concurrent.ThreadFactoryBuilder import com.typesafe.scalalogging.Logger import oharastream.ohara.common.data.Row import oharastream.ohara.common.setting.TopicKey import oharastream.ohara.common.util.{CommonUtils, Releasable} import oharastream.ohara.kafka.TopicAdmin import oharastream.ohara.shabondi.common.ShabondiUtils import oharastream.ohara.shabondi.sink.SinkConfig import oharastream.ohara.shabondi.source.SourceConfig import oharastream.ohara.testing.WithBroker import org.junit.After import scala.collection.{immutable, mutable} import scala.concurrent.{ExecutionContext, Future} import scala.jdk.CollectionConverters._ private[shabondi] abstract class BasicShabondiTest extends WithBroker { protected val log = Logger(this.getClass()) protected val brokerProps = testUtil.brokersConnProps protected val topicAdmin: TopicAdmin = TopicAdmin.of(brokerProps) protected val newThreadPool: () => ExecutorService = () => Executors.newCachedThreadPool(new ThreadFactoryBuilder().setNameFormat(this.getClass.getSimpleName + "-").build()) protected val countRows: (util.Queue[Row], Long, ExecutionContext) => Future[Long] = (queue, executionTime, ec) => Future { log.debug("countRows begin...") val baseTime = System.currentTimeMillis() var count = 0L var running = true while (running) { val row = queue.poll() if (row != null) count += 1 else Thread.sleep(100) running = (System.currentTimeMillis() - baseTime) < executionTime } log.debug("countRows done") count }(ec) protected def createTopicKey = TopicKey.of("default", CommonUtils.randomString(5)) protected def createTestTopic(topicKey: TopicKey): Unit = topicAdmin.topicCreator .numberOfPartitions(1) .numberOfReplications(1.toShort) .topicKey(topicKey) .create protected def defaultSourceConfig( sourceToTopics: Seq[TopicKey] = Seq.empty[TopicKey] ): SourceConfig = { import ShabondiDefinitions._ val args = mutable.ArrayBuffer( GROUP_DEFINITION.key + "=" + CommonUtils.randomString(5), NAME_DEFINITION.key + "=" + CommonUtils.randomString(3), SHABONDI_CLASS_DEFINITION.key + "=" + classOf[ShabondiSource].getName, CLIENT_PORT_DEFINITION.key + "=8080", BROKERS_DEFINITION.key + "=" + testUtil.brokersConnProps ) if (sourceToTopics.nonEmpty) args += s"${SOURCE_TO_TOPICS_DEFINITION.key}=${TopicKey.toJsonString(sourceToTopics.asJava)}" val rawConfig = ShabondiUtils.parseArgs(args.toArray) new SourceConfig(rawConfig) } protected def defaultSinkConfig( sinkFromTopics: Seq[TopicKey] = Seq.empty[TopicKey] ): SinkConfig = { import ShabondiDefinitions._ val args = mutable.ArrayBuffer( GROUP_DEFINITION.key + "=" + CommonUtils.randomString(5), NAME_DEFINITION.key + "=" + CommonUtils.randomString(3), SHABONDI_CLASS_DEFINITION.key + "=" + classOf[ShabondiSink].getName, CLIENT_PORT_DEFINITION.key + "=8080", BROKERS_DEFINITION.key + "=" + testUtil.brokersConnProps ) if (sinkFromTopics.nonEmpty) args += s"${SINK_FROM_TOPICS_DEFINITION.key}=${TopicKey.toJsonString(sinkFromTopics.asJava)}" val rawConfig = ShabondiUtils.parseArgs(args.toArray) new SinkConfig(rawConfig) } protected def singleRow(columnSize: Int, rowId: Int = 0): Row = KafkaSupport.singleRow(columnSize, rowId) protected def multipleRows(rowSize: Int): immutable.Iterable[Row] = KafkaSupport.multipleRows(rowSize) @After def tearDown(): Unit = { Releasable.close(topicAdmin) } }
Example 3
Source File: AkkaBeforeAndAfterAll.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.api.testing.utils import java.util.concurrent.Executors import akka.actor.ActorSystem import akka.stream.Materializer import com.daml.grpc.adapter.{AkkaExecutionSequencerPool, ExecutionSequencerFactory} import com.google.common.util.concurrent.ThreadFactoryBuilder import org.scalatest.{BeforeAndAfterAll, Suite} import org.slf4j.LoggerFactory import scala.concurrent.duration.DurationInt import scala.concurrent.{Await, ExecutionContext} trait AkkaBeforeAndAfterAll extends BeforeAndAfterAll { self: Suite => private val logger = LoggerFactory.getLogger(getClass) protected def actorSystemName: String = this.getClass.getSimpleName private implicit lazy val executionContext: ExecutionContext = ExecutionContext.fromExecutorService( Executors.newSingleThreadExecutor( new ThreadFactoryBuilder() .setDaemon(true) .setNameFormat(s"$actorSystemName-thread-pool-worker-%d") .setUncaughtExceptionHandler((thread, _) => logger.error(s"got an uncaught exception on thread: ${thread.getName}")) .build())) protected implicit lazy val system: ActorSystem = ActorSystem(actorSystemName, defaultExecutionContext = Some(executionContext)) protected implicit lazy val materializer: Materializer = Materializer(system) protected implicit lazy val executionSequencerFactory: ExecutionSequencerFactory = new AkkaExecutionSequencerPool(poolName = actorSystemName, actorCount = 1) override protected def afterAll(): Unit = { executionSequencerFactory.close() materializer.shutdown() Await.result(system.terminate(), 30.seconds) super.afterAll() } }
Example 4
Source File: ScheduledTaskManager.scala From incubator-toree with Apache License 2.0 | 5 votes |
package org.apache.toree.utils import scala.language.existentials import java.util.concurrent._ import java.util.UUID import com.google.common.util.concurrent.ThreadFactoryBuilder import ScheduledTaskManager._ import scala.util.Try def stop() = { _taskMap.clear() _scheduler.shutdown() } } object ScheduledTaskManager { val DefaultMaxThreads = 4 val DefaultExecutionDelay = 10 // 10 milliseconds val DefaultTimeInterval = 100 // 100 milliseconds }
Example 5
Source File: SchedulerExecutionContext.scala From kinesis-stream with MIT License | 5 votes |
package px.kinesis.stream.consumer import java.util.concurrent.Executors import com.google.common.util.concurrent.ThreadFactoryBuilder import scala.concurrent.ExecutionContext object SchedulerExecutionContext { lazy val Global = SchedulerExecutionContext("KinesisScheduler") def apply(name: String): ExecutionContext = ExecutionContext.fromExecutor( Executors.newCachedThreadPool( new ThreadFactoryBuilder() .setNameFormat(s"$name-%04d") .setDaemon(true) .build ) ) }
Example 6
Source File: Module.scala From elastiknn with Apache License 2.0 | 5 votes |
import java.util.concurrent.{ExecutorService, Executors, ThreadFactory} import com.google.common.util.concurrent.ThreadFactoryBuilder import com.google.inject.{AbstractModule, TypeLiteral} import com.klibisz.elastiknn.client.{ElastiknnClient, ElastiknnFutureClient} import javax.inject.Provider import play.api.{Configuration, Environment} import scala.concurrent.ExecutionContext class Module(environment: Environment, configuration: Configuration) extends AbstractModule { val eknnProvider = new Provider[ElastiknnFutureClient] { override def get(): ElastiknnFutureClient = { val tfac: ThreadFactory = new ThreadFactoryBuilder().setDaemon(true).setNameFormat("elastiknn-%d").build() val exec: ExecutorService = Executors.newFixedThreadPool(Runtime.getRuntime.availableProcessors(), tfac) implicit val ec: ExecutionContext = ExecutionContext.fromExecutor(exec) val host = configuration.underlying.getString("elastiknn.elasticsearch.host") val port = configuration.underlying.getInt("elastiknn.elasticsearch.port") ElastiknnClient.futureClient(host, port) } } override def configure(): Unit = { // Weird that you have to use this constructor, but it works. bind(new TypeLiteral[ElastiknnFutureClient]() {}).toProvider(eknnProvider) } }
Example 7
Source File: ScheduledTaskManager.scala From incubator-toree with Apache License 2.0 | 5 votes |
package org.apache.toree.utils import scala.language.existentials import java.util.concurrent._ import java.util.UUID import com.google.common.util.concurrent.ThreadFactoryBuilder import ScheduledTaskManager._ import scala.util.Try def stop() = { _taskMap.clear() _scheduler.shutdown() } } object ScheduledTaskManager { val DefaultMaxThreads = 4 val DefaultExecutionDelay = 10 // 10 milliseconds val DefaultTimeInterval = 100 // 100 milliseconds }