java.util.concurrent.LinkedBlockingQueue Scala Examples
The following examples show how to use java.util.concurrent.LinkedBlockingQueue.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: MultiThreadPipeline.scala From scrapy4s with GNU Lesser General Public License v3.0 | 5 votes |
package com.scrapy4s.pipeline import java.util.concurrent.ThreadPoolExecutor.CallerRunsPolicy import java.util.concurrent.{LinkedBlockingQueue, ThreadPoolExecutor, TimeUnit} import com.scrapy4s.http.Response import org.slf4j.LoggerFactory class MultiThreadPipeline(threadCount: Int, pipe: Pipeline ) extends Pipeline { val logger = LoggerFactory.getLogger(classOf[MultiThreadPipeline]) lazy private val threadPool = new ThreadPoolExecutor(threadCount, threadCount, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue[Runnable](), new CallerRunsPolicy()) override def pipe(response: Response): Unit = { threadPool.execute(() => { logger.debug(s"pipe -> exec ${response.url}") pipe.pipe(response) }) } override def close(): Unit = { threadPool.shutdown() while (!threadPool.awaitTermination(1, TimeUnit.SECONDS)) { logger.debug("wait for spider done ...") } pipe.close() logger.debug("spider done !") } } object MultiThreadPipeline { def apply[T](pipe: Pipeline) (implicit threadCount: Int = Runtime.getRuntime.availableProcessors() * 2): MultiThreadPipeline = new MultiThreadPipeline(threadCount, pipe) }
Example 2
Source File: standard_thread.scala From libisabelle with Apache License 2.0 | 5 votes |
package isabelle import java.lang.Thread import java.util.concurrent.{ExecutorService, ThreadPoolExecutor, TimeUnit, LinkedBlockingQueue, ThreadFactory} object Standard_Thread { final class Delay private[Standard_Thread]( first: Boolean, delay: => Time, log: Logger, event: => Unit) { private var running: Option[Event_Timer.Request] = None private def run: Unit = { val do_run = synchronized { if (running.isDefined) { running = None; true } else false } if (do_run) { try { event } catch { case exn: Throwable if !Exn.is_interrupt(exn) => log(Exn.message(exn)); throw exn } } } def invoke(): Unit = synchronized { val new_run = running match { case Some(request) => if (first) false else { request.cancel; true } case None => true } if (new_run) running = Some(Event_Timer.request(Time.now() + delay)(run)) } def revoke(): Unit = synchronized { running match { case Some(request) => request.cancel; running = None case None => } } def postpone(alt_delay: Time): Unit = synchronized { running match { case Some(request) => val alt_time = Time.now() + alt_delay if (request.time < alt_time && request.cancel) { running = Some(Event_Timer.request(alt_time)(run)) } case None => } } } // delayed event after first invocation def delay_first(delay: => Time, log: Logger = No_Logger)(event: => Unit): Delay = new Delay(true, delay, log, event) // delayed event after last invocation def delay_last(delay: => Time, log: Logger = No_Logger)(event: => Unit): Delay = new Delay(false, delay, log, event) }
Example 3
Source File: standard_thread.scala From libisabelle with Apache License 2.0 | 5 votes |
package isabelle import java.lang.Thread import java.util.concurrent.{ExecutorService, ThreadPoolExecutor, TimeUnit, LinkedBlockingQueue, ThreadFactory} object Standard_Thread { final class Delay private[Standard_Thread]( first: Boolean, delay: => Time, log: Logger, event: => Unit) { private var running: Option[Event_Timer.Request] = None private def run: Unit = { val do_run = synchronized { if (running.isDefined) { running = None; true } else false } if (do_run) { try { event } catch { case exn: Throwable if !Exn.is_interrupt(exn) => log(Exn.message(exn)); throw exn } } } def invoke(): Unit = synchronized { val new_run = running match { case Some(request) => if (first) false else { request.cancel; true } case None => true } if (new_run) running = Some(Event_Timer.request(Time.now() + delay)(run)) } def revoke(): Unit = synchronized { running match { case Some(request) => request.cancel; running = None case None => } } def postpone(alt_delay: Time): Unit = synchronized { running match { case Some(request) => val alt_time = Time.now() + alt_delay if (request.time < alt_time && request.cancel) { running = Some(Event_Timer.request(alt_time)(run)) } case None => } } } // delayed event after first invocation def delay_first(delay: => Time, log: Logger = No_Logger)(event: => Unit): Delay = new Delay(true, delay, log, event) // delayed event after last invocation def delay_last(delay: => Time, log: Logger = No_Logger)(event: => Unit): Delay = new Delay(false, delay, log, event) }
Example 4
Source File: standard_thread.scala From libisabelle with Apache License 2.0 | 5 votes |
package isabelle import java.lang.Thread import java.util.concurrent.{ExecutorService, ThreadPoolExecutor, TimeUnit, LinkedBlockingQueue, ThreadFactory} object Standard_Thread { final class Delay private[Standard_Thread]( first: Boolean, delay: => Time, log: Logger, event: => Unit) { private var running: Option[Event_Timer.Request] = None private def run: Unit = { val do_run = synchronized { if (running.isDefined) { running = None; true } else false } if (do_run) { try { event } catch { case exn: Throwable if !Exn.is_interrupt(exn) => log(Exn.message(exn)); throw exn } } } def invoke(): Unit = synchronized { val new_run = running match { case Some(request) => if (first) false else { request.cancel; true } case None => true } if (new_run) running = Some(Event_Timer.request(Time.now() + delay)(run)) } def revoke(): Unit = synchronized { running match { case Some(request) => request.cancel; running = None case None => } } def postpone(alt_delay: Time): Unit = synchronized { running match { case Some(request) => val alt_time = Time.now() + alt_delay if (request.time < alt_time && request.cancel) { running = Some(Event_Timer.request(alt_time)(run)) } case None => } } } // delayed event after first invocation def delay_first(delay: => Time, log: Logger = No_Logger)(event: => Unit): Delay = new Delay(true, delay, log, event) // delayed event after last invocation def delay_last(delay: => Time, log: Logger = No_Logger)(event: => Unit): Delay = new Delay(false, delay, log, event) }
Example 5
Source File: TwitterStatusReader.scala From kafka-connect-twitter with Apache License 2.0 | 5 votes |
package com.eneco.trading.kafka.connect.twitter import java.util import java.util.concurrent.{TimeUnit, LinkedBlockingQueue, Executors} import com.eneco.trading.kafka.connect.twitter.domain.TwitterStatus import com.twitter.hbc.httpclient.BasicClient import com.twitter.hbc.twitter4j.Twitter4jStatusClient import org.apache.kafka.connect.data.Schema import org.apache.kafka.connect.source.SourceRecord import twitter4j._ import scala.collection.JavaConverters._ import Extensions._ class StatusEnqueuer(queue: LinkedBlockingQueue[Status]) extends StatusListener with Logging { override def onStallWarning(stallWarning: StallWarning) = log.warn("onStallWarning") override def onDeletionNotice(statusDeletionNotice: StatusDeletionNotice) = log.info("onDeletionNotice") override def onScrubGeo(l: Long, l1: Long) = { log.debug(s"onScrubGeo $l $l1") } override def onStatus(status: Status) = { log.debug("onStatus") queue.put(status) } override def onTrackLimitationNotice(i: Int) = log.info(s"onTrackLimitationNotice $i") override def onException(e: Exception)= log.warn("onException " + e.toString) } trait StatusToSourceRecord { def convert(status: Status, topic: String): SourceRecord } object StatusToStringKeyValue extends StatusToSourceRecord { def convert (status: Status, topic: String): SourceRecord = { new SourceRecord( Map("tweetSource" -> status.getSource).asJava, //source partitions? Map("tweetId" -> status.getId).asJava, //source offsets? topic, null, Schema.STRING_SCHEMA, status.getUser.getScreenName, Schema.STRING_SCHEMA, status.getText, status.getCreatedAt.getTime) } } object StatusToTwitterStatusStructure extends StatusToSourceRecord { def convert(status: Status, topic: String): SourceRecord = { //val ts = TwitterStatus.struct(TwitterStatus(status)) new SourceRecord( Map("tweetSource" -> status.getSource).asJava, //source partitions? Map("tweetId" -> status.getId).asJava, //source offsets? topic, null, Schema.STRING_SCHEMA, status.getUser.getScreenName, TwitterStatus.schema, TwitterStatus.struct(status), status.getCreatedAt.getTime) } } def stop() = { log.info("Stop Twitter client") client.stop() } }
Example 6
Source File: TwitterReader.scala From kafka-connect-twitter with Apache License 2.0 | 5 votes |
package com.eneco.trading.kafka.connect.twitter import java.util.concurrent.LinkedBlockingQueue import com.twitter.hbc.ClientBuilder import com.twitter.hbc.core.Constants import com.twitter.hbc.core.endpoint.StatusesFilterEndpoint import com.twitter.hbc.core.endpoint.StatusesSampleEndpoint import com.twitter.hbc.core.endpoint.DefaultStreamingEndpoint import com.twitter.hbc.core.processor.StringDelimitedProcessor import com.twitter.hbc.core.endpoint.Location import com.twitter.hbc.httpclient.auth.OAuth1 import org.apache.kafka.connect.source.{SourceRecord, SourceTaskContext} import twitter4j.Status import scala.collection.JavaConversions._ import scala.collection.JavaConverters._ object TwitterReader { def apply(config: TwitterSourceConfig, context: SourceTaskContext) = { //endpoints val endpoint: DefaultStreamingEndpoint = if (config.getString(TwitterSourceConfig.STREAM_TYPE).equals(TwitterSourceConfig.STREAM_TYPE_SAMPLE)) { new StatusesSampleEndpoint() } else { val trackEndpoint = new StatusesFilterEndpoint() val terms = config.getList(TwitterSourceConfig.TRACK_TERMS) if (!terms.isEmpty) { trackEndpoint.trackTerms(terms) } val locs = config.getList(TwitterSourceConfig.TRACK_LOCATIONS) if (!locs.isEmpty) { val locations = locs.toList.map({ x => Double.box(x.toDouble)}).grouped(4).toList .map({ l => new Location(new Location.Coordinate(l(0), l(1)), new Location.Coordinate(l(2), l(3)))}) .asJava trackEndpoint.locations(locations) } val follow = config.getList(TwitterSourceConfig.TRACK_FOLLOW) if (!follow.isEmpty) { val users = follow.toList.map({ x => Long.box(x.trim.toLong)}).asJava trackEndpoint.followings(users) } trackEndpoint } endpoint.stallWarnings(false) val language = config.getList(TwitterSourceConfig.LANGUAGE) if (!language.isEmpty) { // endpoint.languages(language) doesn't work as intended! endpoint.addQueryParameter(TwitterSourceConfig.LANGUAGE, language.toList.mkString(",")) } //twitter auth stuff val auth = new OAuth1(config.getString(TwitterSourceConfig.CONSUMER_KEY_CONFIG), config.getPassword(TwitterSourceConfig.CONSUMER_SECRET_CONFIG).value, config.getString(TwitterSourceConfig.TOKEN_CONFIG), config.getPassword(TwitterSourceConfig.SECRET_CONFIG).value) //batch size to take from the queue val batchSize = config.getInt(TwitterSourceConfig.BATCH_SIZE) val batchTimeout = config.getDouble(TwitterSourceConfig.BATCH_TIMEOUT) //The Kafka topic to append to val topic = config.getString(TwitterSourceConfig.TOPIC) //queue for client to buffer to val queue = new LinkedBlockingQueue[String](10000) //how the output is formatted val statusConverter = config.getString(TwitterSourceConfig.OUTPUT_FORMAT) match { case TwitterSourceConfig.OUTPUT_FORMAT_ENUM_STRING => StatusToStringKeyValue case TwitterSourceConfig.OUTPUT_FORMAT_ENUM_STRUCTURED => StatusToTwitterStatusStructure } //build basic client val client = new ClientBuilder() .name(config.getString(TwitterSourceConfig.TWITTER_APP_NAME)) .hosts(Constants.STREAM_HOST) .endpoint(endpoint) .authentication(auth) .processor(new StringDelimitedProcessor(queue)) .build() new TwitterStatusReader(client = client, rawQueue = queue, batchSize = batchSize, batchTimeout = batchTimeout, topic = topic, statusConverter = statusConverter) } }
Example 7
Source File: Concurrent.scala From zen with Apache License 2.0 | 5 votes |
package com.github.cloudml.zen.ml.util import java.util.concurrent.{Executors, LinkedBlockingQueue, ThreadPoolExecutor} import scala.concurrent._ import scala.concurrent.duration._ object Concurrent extends Serializable { @inline def withFuture[T](body: => T)(implicit es: ExecutionContextExecutorService): Future[T] = { Future(body)(es) } @inline def withAwaitReady[T](future: Future[T]): Unit = { Await.ready(future, 1.hour) } def withAwaitReadyAndClose[T](future: Future[T])(implicit es: ExecutionContextExecutorService): Unit = { Await.ready(future, 1.hour) closeExecutionContext(es) } @inline def withAwaitResult[T](future: Future[T]): T = { Await.result(future, 1.hour) } def withAwaitResultAndClose[T](future: Future[T])(implicit es: ExecutionContextExecutorService): T = { val res = Await.result(future, 1.hour) closeExecutionContext(es) res } @inline def initExecutionContext(numThreads: Int): ExecutionContextExecutorService = { ExecutionContext.fromExecutorService(Executors.newFixedThreadPool(numThreads)) } @inline def closeExecutionContext(es: ExecutionContextExecutorService): Unit = { es.shutdown() } } object DebugConcurrent extends Serializable { def withFuture[T](body: => T)(implicit es: ExecutionContextExecutorService): Future[T] = { val future = Future(body)(es) future.onFailure { case e => e.printStackTrace() }(scala.concurrent.ExecutionContext.Implicits.global) future } def withAwaitReady[T](future: Future[T]): Unit = { Await.ready(future, 1.hour) } def withAwaitReadyAndClose[T](future: Future[T])(implicit es: ExecutionContextExecutorService): Unit = { future.onComplete { _ => closeExecutionContext(es) }(scala.concurrent.ExecutionContext.Implicits.global) Await.ready(future, 1.hour) } def withAwaitResult[T](future: Future[T]): T = { Await.result(future, 1.hour) } def withAwaitResultAndClose[T](future: Future[T])(implicit es: ExecutionContextExecutorService): T = { future.onComplete { _ => closeExecutionContext(es) }(scala.concurrent.ExecutionContext.Implicits.global) Await.result(future, 1.hour) } def initExecutionContext(numThreads: Int): ExecutionContextExecutorService = { val es = new ThreadPoolExecutor(numThreads, numThreads, 0L, MILLISECONDS, new LinkedBlockingQueue[Runnable], Executors.defaultThreadFactory, new ThreadPoolExecutor.AbortPolicy) ExecutionContext.fromExecutorService(es) } def closeExecutionContext(es: ExecutionContextExecutorService): Unit = { es.shutdown() if (!es.awaitTermination(1L, SECONDS)) { System.err.println("Error: ExecutorService does not exit itself, force to terminate.") } } }
Example 8
Source File: TwitterStatusReader.scala From kafka-tweet-producer with Apache License 2.0 | 5 votes |
package com.eneco.trading.kafka.connect.twitter import java.util import java.util.concurrent.{TimeUnit, LinkedBlockingQueue, Executors} import com.eneco.trading.kafka.connect.twitter.domain.TwitterStatus import com.twitter.hbc.httpclient.BasicClient import com.twitter.hbc.twitter4j.Twitter4jStatusClient import org.apache.kafka.connect.data.Schema import org.apache.kafka.connect.source.SourceRecord import twitter4j._ import scala.collection.JavaConverters._ import Extensions._ class StatusEnqueuer(queue: LinkedBlockingQueue[Status]) extends StatusListener with Logging { override def onStallWarning(stallWarning: StallWarning) = log.warn("onStallWarning") override def onDeletionNotice(statusDeletionNotice: StatusDeletionNotice) = log.info("onDeletionNotice") override def onScrubGeo(l: Long, l1: Long) = { log.debug(s"onScrubGeo $l $l1") } override def onStatus(status: Status) = { log.debug("onStatus") queue.put(status) } override def onTrackLimitationNotice(i: Int) = log.info(s"onTrackLimitationNotice $i") override def onException(e: Exception)= log.warn("onException " + e.toString) } trait StatusToSourceRecord { def convert(status: Status, topic: String): SourceRecord } object StatusToStringKeyValue extends StatusToSourceRecord { def convert (status: Status, topic: String): SourceRecord = { new SourceRecord( Map("tweetSource" -> status.getSource).asJava, //source partitions? Map("tweetId" -> status.getId).asJava, //source offsets? topic, null, Schema.STRING_SCHEMA, status.getUser.getScreenName, Schema.STRING_SCHEMA, status.getText) } } object StatusToTwitterStatusStructure extends StatusToSourceRecord { def convert(status: Status, topic: String): SourceRecord = { //val ts = TwitterStatus.struct(TwitterStatus(status)) new SourceRecord( Map("tweetSource" -> status.getSource).asJava, //source partitions? Map("tweetId" -> status.getId).asJava, //source offsets? topic, TwitterStatus.schema, TwitterStatus.struct(status)) } } def stop() = { log.info("Stop Twitter client") client.stop() } }
Example 9
Source File: TwitterReader.scala From kafka-tweet-producer with Apache License 2.0 | 5 votes |
package com.eneco.trading.kafka.connect.twitter import java.util.concurrent.LinkedBlockingQueue import com.twitter.hbc.ClientBuilder import com.twitter.hbc.core.Constants import com.twitter.hbc.core.endpoint.StatusesFilterEndpoint import com.twitter.hbc.core.endpoint.StatusesSampleEndpoint import com.twitter.hbc.core.endpoint.DefaultStreamingEndpoint import com.twitter.hbc.core.processor.StringDelimitedProcessor import com.twitter.hbc.core.endpoint.Location import com.twitter.hbc.httpclient.auth.OAuth1 import org.apache.kafka.connect.source.{SourceRecord, SourceTaskContext} import twitter4j.Status import scala.collection.JavaConversions._ import scala.collection.JavaConverters._ object TwitterReader { def apply(config: TwitterSourceConfig, context: SourceTaskContext) = { //endpoints val endpoint: DefaultStreamingEndpoint = if (config.getString(TwitterSourceConfig.STREAM_TYPE).equals(TwitterSourceConfig.STREAM_TYPE_SAMPLE)) { new StatusesSampleEndpoint() } else { val trackEndpoint = new StatusesFilterEndpoint() val terms = config.getList(TwitterSourceConfig.TRACK_TERMS) if (!terms.isEmpty) { trackEndpoint.trackTerms(terms) } val locs = config.getList(TwitterSourceConfig.TRACK_LOCATIONS) if (!locs.isEmpty) { val locations = locs.toList.map({ x => Double.box(x.toDouble)}).grouped(4).toList .map({ l => new Location(new Location.Coordinate(l(0), l(1)), new Location.Coordinate(l(2), l(3)))}) .asJava trackEndpoint.locations(locations) } val follow = config.getList(TwitterSourceConfig.TRACK_FOLLOW) if (!follow.isEmpty) { val users = follow.toList.map({ x => Long.box(x.trim.toLong)}).asJava trackEndpoint.followings(users) } trackEndpoint } endpoint.stallWarnings(false) val language = config.getList(TwitterSourceConfig.LANGUAGE) if (!language.isEmpty) { // endpoint.languages(language) doesn't work as intended! endpoint.addQueryParameter(TwitterSourceConfig.LANGUAGE, language.toList.mkString(",")) } //twitter auth stuff val auth = new OAuth1(config.getString(TwitterSourceConfig.CONSUMER_KEY_CONFIG), config.getPassword(TwitterSourceConfig.CONSUMER_SECRET_CONFIG).value, config.getString(TwitterSourceConfig.TOKEN_CONFIG), config.getPassword(TwitterSourceConfig.SECRET_CONFIG).value) //batch size to take from the queue val batchSize = config.getInt(TwitterSourceConfig.BATCH_SIZE) val batchTimeout = config.getDouble(TwitterSourceConfig.BATCH_TIMEOUT) //The Kafka topic to append to val topic = config.getString(TwitterSourceConfig.TOPIC) //queue for client to buffer to val queue = new LinkedBlockingQueue[String](10000) //how the output is formatted val statusConverter = config.getString(TwitterSourceConfig.OUTPUT_FORMAT) match { case TwitterSourceConfig.OUTPUT_FORMAT_ENUM_STRING => StatusToStringKeyValue case TwitterSourceConfig.OUTPUT_FORMAT_ENUM_STRUCTURED => StatusToTwitterStatusStructure } //build basic client val client = new ClientBuilder() .name(config.getString(TwitterSourceConfig.TWITTER_APP_NAME)) .hosts(Constants.STREAM_HOST) .endpoint(endpoint) .authentication(auth) .processor(new StringDelimitedProcessor(queue)) .build() new TwitterStatusReader(client = client, rawQueue = queue, batchSize = batchSize, batchTimeout = batchTimeout, topic = topic, statusConverter = statusConverter) } }
Example 10
Source File: AlohaEventBus.scala From aloha with Apache License 2.0 | 5 votes |
package me.jrwang.aloha.scheduler.bus import java.util.concurrent.LinkedBlockingQueue import java.util.concurrent.atomic.{AtomicBoolean, AtomicLong} import me.jrwang.aloha.scheduler.bus.AsyncEventQueue.POISON_PILL class AlohaEventBus extends EventBus[AlohaEventListener, AlohaEvent] { override protected def doPostEvent(listener: AlohaEventListener, event: AlohaEvent): Unit = { event match { case e: AppStateChangedEvent => listener.onApplicationStateChange(e) case e: AppRelaunchedEvent => listener.onApplicationRelaunched(e) //TODO other specific event case _ => listener.onOtherEvent(event) } } } private[aloha] def stop(): Unit = { if (!started.get()) { throw new IllegalStateException(s"Attempted to stop $name that has not yet started!") } if (stopped.compareAndSet(false, true)) { eventCount.incrementAndGet() eventQueue.put(POISON_PILL) } // this thread might be trying to stop itself as part of error handling -- we can't join // in that case. if (Thread.currentThread() != dispatchThread) { dispatchThread.join() } } def post(event: AlohaEvent): Unit = { if (stopped.get()) { return } eventCount.incrementAndGet() if (eventQueue.offer(event)) { return } eventCount.decrementAndGet() if (logDroppedEvent.compareAndSet(false,true)) { // Only log the following message once to avoid duplicated annoying logs. logError(s"Dropping event from queue $name. " + "This likely means one of the listeners is too slow and cannot keep up with " + "the rate at which tasks are being started by the scheduler.") } logTrace(s"Dropping event $event") } override def removeListenerOnError(listener: AlohaEventListener): Unit = { // the listener failed in an unrecoverably way, we want to remove it from the entire // LiveListenerBus (potentially stopping a queue if it is empty) bus.removeListener(listener) } } object AsyncEventQueue { case object POISON_PILL extends AlohaEvent }
Example 11
Source File: CoapSourceTask.scala From stream-reactor with Apache License 2.0 | 5 votes |
package com.datamountaineer.streamreactor.connect.coap.source import java.util import java.util.concurrent.LinkedBlockingQueue import com.datamountaineer.streamreactor.connect.coap.configs.{CoapConstants, CoapSettings, CoapSourceConfig} import com.datamountaineer.streamreactor.connect.queues.QueueHelpers import com.datamountaineer.streamreactor.connect.utils.{JarManifest, ProgressCounter} import com.typesafe.scalalogging.StrictLogging import org.apache.kafka.connect.source.{SourceRecord, SourceTask} import scala.collection.JavaConverters._ class CoapSourceTask extends SourceTask with StrictLogging { private var readers: Set[CoapReader] = _ private val progressCounter = new ProgressCounter private var enableProgress: Boolean = false private val queue = new LinkedBlockingQueue[SourceRecord]() private var batchSize: Int = CoapConstants.BATCH_SIZE_DEFAULT private var lingerTimeout = CoapConstants.SOURCE_LINGER_MS_DEFAULT private val manifest = JarManifest(getClass.getProtectionDomain.getCodeSource.getLocation) override def start(props: util.Map[String, String]): Unit = { logger.info(scala.io.Source.fromInputStream(getClass.getResourceAsStream("/coap-source-ascii.txt")).mkString + s" $version") logger.info(manifest.printManifest()) val conf = if (context.configs().isEmpty) props else context.configs() val config = CoapSourceConfig(conf) enableProgress = config.getBoolean(CoapConstants.PROGRESS_COUNTER_ENABLED) val settings = CoapSettings(config) batchSize = config.getInt(CoapConstants.BATCH_SIZE) lingerTimeout = config.getInt(CoapConstants.SOURCE_LINGER_MS) enableProgress = config.getBoolean(CoapConstants.PROGRESS_COUNTER_ENABLED) readers = CoapReaderFactory(settings, queue) } override def poll(): util.List[SourceRecord] = { val records = new util.ArrayList[SourceRecord]() QueueHelpers.drainWithTimeoutNoGauva(records, batchSize, lingerTimeout * 1000000 , queue) if (enableProgress) { progressCounter.update(records.asScala.toVector) } records } override def stop(): Unit = { logger.info("Stopping Coap source and closing connections.") readers.foreach(_.stop()) progressCounter.empty } override def version: String = manifest.version() }
Example 12
Source File: CoapReaderFactory.scala From stream-reactor with Apache License 2.0 | 5 votes |
package com.datamountaineer.streamreactor.connect.coap.source import java.util import java.util.concurrent.LinkedBlockingQueue import com.datamountaineer.streamreactor.connect.coap.configs.CoapSetting import com.datamountaineer.streamreactor.connect.coap.connection.CoapManager import com.datamountaineer.streamreactor.connect.coap.domain.CoapMessageConverter import com.typesafe.scalalogging.StrictLogging import org.apache.kafka.connect.source.SourceRecord import org.eclipse.californium.core.{CoapHandler, CoapObserveRelation, CoapResponse, WebLink} class MessageHandler(resource: String, topic: String, queue: LinkedBlockingQueue[SourceRecord]) extends CoapHandler with StrictLogging { val converter = CoapMessageConverter() override def onError(): Unit = { logger.warn(s"Message dropped for $topic!") } override def onLoad(response: CoapResponse): Unit = { val records = converter.convert(resource, topic, response.advanced()) logger.debug(s"Received ${response.advanced().toString} for $topic") logger.debug(s"Records in queue ${queue.size()} for $topic") queue.put(records) } }
Example 13
Source File: ReThinkSourceReadersFactory.scala From stream-reactor with Apache License 2.0 | 5 votes |
package com.datamountaineer.streamreactor.connect.rethink.source import java.util import java.util.concurrent.LinkedBlockingQueue import java.util.concurrent.atomic.AtomicBoolean import com.datamountaineer.streamreactor.connect.rethink.ReThinkConnection import com.datamountaineer.streamreactor.connect.rethink.config.{ReThinkSourceConfig, ReThinkSourceSetting, ReThinkSourceSettings} import com.rethinkdb.RethinkDB import com.rethinkdb.net.{Connection, Cursor} import com.typesafe.scalalogging.StrictLogging import org.apache.kafka.connect.data.SchemaBuilder import org.apache.kafka.connect.source.SourceRecord import scala.collection.JavaConverters._ import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.Future object ReThinkSourceReadersFactory { def apply(config: ReThinkSourceConfig, r: RethinkDB): Set[ReThinkSourceReader] = { val conn = Some(ReThinkConnection(r, config)) val settings = ReThinkSourceSettings(config) settings.map(s => new ReThinkSourceReader(r, conn.get, s)) } } class ReThinkSourceReader(rethink: RethinkDB, conn: Connection, setting: ReThinkSourceSetting) extends StrictLogging { logger.info(s"Initialising ReThink Reader for ${setting.source}") private val keySchema = SchemaBuilder.string().optional().build() private val valueSchema = ChangeFeedStructBuilder.schema private val sourcePartition = Map.empty[String, String] private val offset = Map.empty[String, String] private val stopFeed = new AtomicBoolean(false) private val handlingFeed = new AtomicBoolean(false) private var feed : Cursor[util.HashMap[String, String]] = _ val queue = new LinkedBlockingQueue[SourceRecord]() val batchSize = setting.batchSize def start() = { feed = getChangeFeed() startFeed(feed) } def stop() = { logger.info(s"Closing change feed for ${setting.source}") stopFeed.set(true) while (handlingFeed.get()) { logger.debug("Waiting for feed to shutdown...") Thread.sleep(1000) } feed.close() logger.info(s"Change feed closed for ${setting.source}") } private def handleFeed(feed: Cursor[util.HashMap[String, String]]) = { handlingFeed.set(true) //feed.next is blocking while(!stopFeed.get()) { logger.debug(s"Waiting for next change feed event for ${setting.source}") val cdc = convert(feed.next().asScala.toMap) queue.put(cdc) } handlingFeed.set(false) } private def getChangeFeed(): Cursor[util.HashMap[String, String]] = { logger.info(s"Initialising change feed for ${setting.source}") rethink .db(setting.db) .table(setting.source) .changes() .optArg("include_states", true) .optArg("include_initial", setting.initialise) .optArg("include_types", true) .run(conn) } private def convert(feed: Map[String, String]) = { new SourceRecord(sourcePartition.asJava, offset.asJava, setting.target, keySchema, setting.source, valueSchema, ChangeFeedStructBuilder(feed)) } }
Example 14
Source File: KafkaSpout.scala From Raphtory with Apache License 2.0 | 5 votes |
package com.raphtory.spouts import java.util import java.util.Properties import com.raphtory.core.components.Spout.SpoutTrait import org.apache.kafka.clients.consumer.KafkaConsumer import scala.collection.JavaConverters._ import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.duration.Duration import scala.concurrent.duration.MILLISECONDS import scala.concurrent.duration.SECONDS import java.util.concurrent.LinkedBlockingQueue import akka.actor.Props import com.raphtory.core.components.Router.RouterManager import scala.concurrent.duration.Duration import scala.concurrent.duration._ import com.raphtory.core.utils.SchedulerUtil import scala.util.Random class KafkaSpout extends SpoutTrait { println("Starting kafka") var kafkaServer = System.getenv().getOrDefault("KAFKA_ADDRESS", "127.0.0.1").trim var kafkaIP = System.getenv().getOrDefault("KAFKA_PORT", "9092").trim var offset = System.getenv().getOrDefault("KAFKA_OFFSET", "earliest").trim val x = new Random().nextLong() var groupID = System.getenv().getOrDefault("KAFKA_GROUP", "group" + x).trim var topic = System.getenv().getOrDefault("KAFKA_TOPIC", "sample_topic").trim var restart = System.getenv().getOrDefault("RESTART_RATE", "10").trim val queue = new LinkedBlockingQueue[String] val props = new Properties() props.put("bootstrap.servers", s"$kafkaServer:$kafkaIP") props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer") props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer") props.put("auto.offset.reset", offset) props.put("group.id", groupID) val consumer: KafkaConsumer[String, String] = new KafkaConsumer[String, String](props) consumer.subscribe(util.Arrays.asList(topic)) //val helper = context.system.actorOf(Props(new KafkaSpoutBackPressure(queue)), "Spout_Helper") protected def ProcessSpoutTask(message: Any): Unit = message match { case StartSpout => AllocateSpoutTask(Duration(1, MILLISECONDS), "newLine") case "newLine" => consumeFromKafka() case _ => println("message not recognized!") } def consumeFromKafka() = { //println("Consuming") val record = consumer.poll(java.time.Duration.ofMillis(5000)).asScala for (data <- record.iterator) { sendTuple(data.value()) //helper ! KafkaData(data.value()) } AllocateSpoutTask(Duration(restart.toInt, MILLISECONDS), "newLine") } } case class KafkaData(data:String) class KafkaSpoutBackPressure(queue:LinkedBlockingQueue[String]) extends SpoutTrait { var startingSpeed = System.getenv().getOrDefault("STARTING_SPEED", "1000").trim.toInt var increaseBy = System.getenv().getOrDefault("INCREASE_BY", "1000").trim.toInt override def preStart(): Unit = { super.preStart() SchedulerUtil.scheduleTask(initialDelay = 60 seconds, interval = 60 second, receiver = self, message = "increase") } override protected def ProcessSpoutTask(receivedMessage: Any): Unit = receivedMessage match { case StartSpout => AllocateSpoutTask(Duration(1, MILLISECONDS), "newLine") case KafkaData(data) => queue.put(data) case "newLine" => consumeFromQueue case "increase" => startingSpeed+=increaseBy case _ => println("message not recognized!") } def consumeFromQueue() = { for(i<-0 to startingSpeed/100){ if(!queue.isEmpty) { sendTuple(queue.take()) } } AllocateSpoutTask(Duration(10, MILLISECONDS), "newLine") } }
Example 15
Source File: CmdManage.scala From scrapy4s with GNU Lesser General Public License v3.0 | 5 votes |
package com.scrapy4s.manage import java.util.concurrent.LinkedBlockingQueue import com.scrapy4s.http.Response import com.scrapy4s.http.proxy.ProxyResource import com.scrapy4s.spider.Spider import com.scrapy4s.thread.{DefaultThreadPool, ThreadPool} case class CmdManage( var spiders: Seq[Spider] = Seq.empty[Spider], var history: Option[Boolean] = None, var threadCount: Int = Runtime.getRuntime.availableProcessors() * 2, var currentThreadPool: Option[ThreadPool] = None, var test_func: Option[Response => Boolean] = None, var proxyResource: Option[ProxyResource] = None ) extends Manage { def setThreadPool(tp: DefaultThreadPool) = { this.currentThreadPool = Option(tp) this } def setThreadCount(count: Int) = { this.threadCount = count this } def setHistory(h: Boolean) = { this.history = Option(h) this } def setProxyResource(proxy: ProxyResource) = { this.proxyResource = Some(proxy) this } def setTestFunc(newTestFunc: Response => Boolean) = { this.test_func = Some(newTestFunc) this } def register(spider: Spider) = { this.spiders = spiders :+ spider this } lazy private val threadPool = { currentThreadPool match { case Some(tp) => tp case _ => new DefaultThreadPool( "cmdManage", threadCount, new LinkedBlockingQueue[Runnable]() ) } } override def start(): Unit = { spiders .foreach(s => { s.setThreadPool(threadPool) if (test_func.isDefined) { s.setTestFunc(test_func.get) } if (history.isDefined) { s.setHistory(history.get) } if (proxyResource.isDefined) { s.setProxyResource(proxyResource.get) } }) spiders.foreach(_.start()) } }
Example 16
Source File: SimpleRowSourceTask.scala From ohara with Apache License 2.0 | 5 votes |
package oharastream.ohara.client.kafka import java.util import java.util.concurrent.LinkedBlockingQueue import java.util.concurrent.atomic.AtomicBoolean import oharastream.ohara.common.data.{Row, Serializer} import oharastream.ohara.common.setting.TopicKey import oharastream.ohara.common.util.Releasable import oharastream.ohara.kafka.Consumer import oharastream.ohara.kafka.connector.{RowSourceRecord, RowSourceTask, TaskSetting} import scala.jdk.CollectionConverters._ import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.Future class SimpleRowSourceTask extends RowSourceTask { private[this] var settings: TaskSetting = _ private[this] val queue = new LinkedBlockingQueue[RowSourceRecord] private[this] val closed = new AtomicBoolean(false) private[this] var consumer: Consumer[Row, Array[Byte]] = _ override protected def run(settings: TaskSetting): Unit = { this.settings = settings this.consumer = Consumer .builder() .connectionProps(settings.stringValue(SimpleRowSourceConnector.BROKER)) .groupId(settings.name) .topicKeys(java.util.Set.copyOf(TopicKey.toTopicKeys(settings.stringValue(SimpleRowSourceConnector.INPUT)))) .offsetFromBegin() .keySerializer(Serializer.ROW) .valueSerializer(Serializer.BYTES) .build() Future { try while (!closed.get) { consumer .poll(java.time.Duration.ofSeconds(2)) .asScala .filter(_.key.isPresent) .map(_.key.get) .flatMap( row => settings.topicKeys().asScala.map(topic => RowSourceRecord.builder().row(row).topicKey(topic).build()) ) .foreach(r => queue.put(r)) } finally Releasable.close(consumer) } } override protected def pollRecords(): util.List[RowSourceRecord] = Iterator.continually(queue.poll()).takeWhile(_ != null).toSeq.asJava override protected def terminate(): Unit = { closed.set(true) consumer.wakeup() } }
Example 17
Source File: ThreadUtil.scala From coursier with Apache License 2.0 | 5 votes |
package coursier.cache.internal import java.util.concurrent.{ExecutorService, LinkedBlockingQueue, ScheduledExecutorService, ScheduledThreadPoolExecutor, ThreadFactory, ThreadPoolExecutor, TimeUnit} import java.util.concurrent.atomic.AtomicInteger object ThreadUtil { private val poolNumber = new AtomicInteger(1) def daemonThreadFactory(): ThreadFactory = { val poolNumber0 = poolNumber.getAndIncrement() val threadNumber = new AtomicInteger(1) new ThreadFactory { def newThread(r: Runnable) = { val threadNumber0 = threadNumber.getAndIncrement() val t = new Thread(r, s"coursier-pool-$poolNumber0-thread-$threadNumber0") t.setDaemon(true) t.setPriority(Thread.NORM_PRIORITY) t } } } def fixedThreadPool(size: Int): ExecutorService = { val factory = daemonThreadFactory() // 1 min keep alive, so that threads get stopped a bit after resolution / downloading is done val executor = new ThreadPoolExecutor( size, size, 1L, TimeUnit.MINUTES, new LinkedBlockingQueue[Runnable], factory ) executor.allowCoreThreadTimeOut(true) executor } def fixedScheduledThreadPool(size: Int): ScheduledExecutorService = { val factory = daemonThreadFactory() val executor = new ScheduledThreadPoolExecutor(size, factory) executor.setKeepAliveTime(1L, TimeUnit.MINUTES) executor.allowCoreThreadTimeOut(true) executor } def withFixedThreadPool[T](size: Int)(f: ExecutorService => T): T = { var pool: ExecutorService = null try { pool = fixedThreadPool(size) f(pool) } finally { if (pool != null) pool.shutdown() } } }
Example 18
Source File: ConsoleFixture.scala From codepropertygraph with Apache License 2.0 | 5 votes |
package io.shiftleft.console.testing import java.nio.file.Path import java.util.concurrent.LinkedBlockingQueue import better.files.Dsl.mkdir import better.files.File import io.shiftleft.console.cpgcreation.{CpgGenerator, LanguageFrontend} import io.shiftleft.console.{Console, ConsoleConfig, DefaultAmmoniteExecutor, InstallConfig} import io.shiftleft.console.workspacehandling.{Project, ProjectFile, WorkspaceLoader} import io.shiftleft.fuzzyc2cpg.FuzzyC2Cpg import io.shiftleft.proto.cpg.Cpg.CpgStruct object ConsoleFixture { def apply[T <: Console[Project]](constructor: String => T = { x => new TestConsole(x) })(fun: (T, File) => Unit): Unit = { File.usingTemporaryDirectory("console") { workspaceDir => File.usingTemporaryDirectory("console") { codeDir => mkdir(codeDir / "dir1") mkdir(codeDir / "dir2") (codeDir / "dir1" / "foo.c") .write("int main(int argc, char **argv) { char *ptr = 0x1 + argv; return argc; }") (codeDir / "dir2" / "bar.c").write("int bar(int x) { return x; }") val console = constructor(workspaceDir.toString) fun(console, codeDir) } } } } object TestWorkspaceLoader extends WorkspaceLoader[Project] { override def createProject(projectFile: ProjectFile, path: Path): Project = Project(projectFile, path) } class TestConsole(workspaceDir: String) extends Console[Project](DefaultAmmoniteExecutor, TestWorkspaceLoader) { override def config = new ConsoleConfig( install = new InstallConfig(Map("SHIFTLEFT_OCULAR_INSTALL_DIR" -> workspaceDir)) ) override val cpgGenerator = new TestCpgGenerator(config) } class TestCpgGenerator(config: ConsoleConfig) extends CpgGenerator(config) { override def createFrontendByPath( inputPath: String, ): Option[LanguageFrontend] = { Some(new FuzzyCTestingFrontend) } override def createFrontendByLanguage(language: String): Option[LanguageFrontend] = { Some(new FuzzyCTestingFrontend) } private class FuzzyCTestingFrontend extends LanguageFrontend { override def generate(inputPath: String, outputPath: String, namespaces: List[String]): Option[String] = { val queue = new LinkedBlockingQueue[CpgStruct.Builder]() val factory = new io.shiftleft.fuzzyc2cpg.output.overflowdb.OutputModuleFactory(outputPath, queue) val fuzzyc = new FuzzyC2Cpg(factory) File(inputPath).list.foreach(println(_)) fuzzyc.runAndOutput(Set(inputPath), Set(".c")) Some(outputPath) } def isAvailable: Boolean = true } }
Example 19
Source File: ParallelCpgPass.scala From codepropertygraph with Apache License 2.0 | 5 votes |
package io.shiftleft.passes import java.util.concurrent.LinkedBlockingQueue import io.shiftleft.SerializedCpg import io.shiftleft.codepropertygraph.Cpg import org.apache.logging.log4j.{LogManager, Logger} import org.slf4j.LoggerFactory abstract class ParallelCpgPass[T](cpg: Cpg, outName: String = "") extends CpgPassBase { private val logger: Logger = LogManager.getLogger(classOf[ParallelCpgPass[T]]) def init(): Unit = {} def partIterator: Iterator[T] def runOnPart(part: T): Option[DiffGraph] override def createAndApply(): Unit = { withWriter() { writer => enqueueInParallel(writer) } } override def createApplySerializeAndStore(serializedCpg: SerializedCpg, inverse: Boolean, prefix: String): Unit = { withWriter(serializedCpg, prefix, inverse) { writer => enqueueInParallel(writer) } } private def withWriter[X](serializedCpg: SerializedCpg = new SerializedCpg(), prefix: String = "", inverse: Boolean = false)(f: Writer => Unit): Unit = { val writer = new Writer(serializedCpg, prefix, inverse) val writerThread = new Thread(writer) writerThread.setName("Writer") writerThread.start() try { f(writer) } catch { case exception: Exception => logger.warn(exception) } finally { writer.enqueue(None) writerThread.join() } } private def enqueueInParallel(writer: Writer): Unit = { init() val it = new ParallelIteratorExecutor(partIterator).map { part => // Note: write.enqueue(runOnPart(part)) would be wrong because // it would terminate the writer as soon as a pass returns None // as None is used as a termination symbol for the queue runOnPart(part).foreach(diffGraph => writer.enqueue(Some(diffGraph))) } consume(it) } private def consume(it: Iterator[_]): Unit = { while (it.hasNext) { it.next() } } private class Writer(serializedCpg: SerializedCpg, prefix: String, inverse: Boolean) extends Runnable { private val logger = LoggerFactory.getLogger(getClass) private val queue = new LinkedBlockingQueue[Option[DiffGraph]] def enqueue(diffGraph: Option[DiffGraph]): Unit = { queue.put(diffGraph) } override def run(): Unit = { try { var terminate = false var index: Int = 0 while (!terminate) { queue.take() match { case Some(diffGraph) => val appliedDiffGraph = DiffGraph.Applier.applyDiff(diffGraph, cpg, inverse) if (!serializedCpg.isEmpty) { val overlay = serialize(appliedDiffGraph, inverse) val name = generateOutFileName(prefix, outName, index) index += 1 store(overlay, name, serializedCpg) } case None => logger.info("Shutting down WriterThread") terminate = true } } } catch { case _: InterruptedException => logger.info("Interrupted WriterThread") } } } }
Example 20
Source File: FetchThread.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.streaming.kafka.lib.source.consumer import java.nio.channels.ClosedByInterruptException import java.util.concurrent.LinkedBlockingQueue import kafka.common.TopicAndPartition import org.apache.gearpump.streaming.kafka.lib.util.KafkaClient import org.apache.gearpump.streaming.kafka.util.KafkaConfig import org.slf4j.Logger import org.apache.gearpump.util.LogUtil object FetchThread { private val LOG: Logger = LogUtil.getLogger(classOf[FetchThread]) val factory = new FetchThreadFactory class FetchThreadFactory extends java.io.Serializable { def getFetchThread(config: KafkaConfig, client: KafkaClient): FetchThread = { val fetchThreshold = config.getInt(KafkaConfig.FETCH_THRESHOLD_CONFIG) val fetchSleepMS = config.getLong(KafkaConfig.FETCH_SLEEP_MS_CONFIG) val startOffsetTime = config.getLong(KafkaConfig.CONSUMER_START_OFFSET_CONFIG) FetchThread(fetchThreshold, fetchSleepMS, startOffsetTime, client) } } def apply(fetchThreshold: Int, fetchSleepMS: Long, startOffsetTime: Long, client: KafkaClient): FetchThread = { val createConsumer = (tp: TopicAndPartition) => client.createConsumer(tp.topic, tp.partition, startOffsetTime) val incomingQueue = new LinkedBlockingQueue[KafkaMessage]() val sleeper = new ExponentialBackoffSleeper( backOffMultiplier = 2.0, initialDurationMs = 100L, maximumDurationMs = 10000L) new FetchThread(createConsumer, incomingQueue, sleeper, fetchThreshold, fetchSleepMS) } } private def fetchMessage: Boolean = { if (incomingQueue.size >= fetchThreshold) { false } else { consumers.foldLeft(false) { (hasNext, tpAndConsumer) => val (_, consumer) = tpAndConsumer if (consumer.hasNext) { incomingQueue.put(consumer.next()) true } else { hasNext } } } } private def createAllConsumers: Map[TopicAndPartition, KafkaConsumer] = { topicAndPartitions.map(tp => tp -> createConsumer(tp)).toMap } private def resetConsumers(nextOffsets: Map[TopicAndPartition, Long]): Unit = { consumers.values.foreach(_.close()) consumers = createAllConsumers consumers.foreach { case (tp, consumer) => consumer.setStartOffset(nextOffsets(tp)) } } }
Example 21
Source File: TwitterSource.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.streaming.twitter import java.time.Instant import java.util.concurrent.LinkedBlockingQueue import org.apache.gearpump.Message import org.apache.gearpump.streaming.source.DataSource import org.apache.gearpump.streaming.task.TaskContext import org.apache.gearpump.streaming.twitter.TwitterSource.{Factory, MessageListener} import twitter4j._ import twitter4j.conf.Configuration class TwitterSource private[twitter]( twitterFactory: Factory, filterQuery: Option[FilterQuery], statusListener: MessageListener ) extends DataSource { private var twitterStream: TwitterStream = _ class Factory(factory: TwitterStreamFactory) extends Serializable { def getTwitterStream: TwitterStream = { factory.getInstance() } } def apply(conf: Configuration): TwitterSource = { new TwitterSource(new Factory(new TwitterStreamFactory(conf)), None, new MessageListener) } def apply(conf: Configuration, query: FilterQuery): TwitterSource = { new TwitterSource(new Factory(new TwitterStreamFactory(conf)), Option(query), new MessageListener) } }
Example 22
Source File: AbstractEventProcessor.scala From spark-atlas-connector with Apache License 2.0 | 5 votes |
package com.hortonworks.spark.atlas import java.util.concurrent.{LinkedBlockingQueue, TimeUnit} import scala.reflect.ClassTag import scala.util.control.NonFatal import com.google.common.annotations.VisibleForTesting import com.hortonworks.spark.atlas.utils.Logging abstract class AbstractEventProcessor[T: ClassTag] extends Logging { def conf: AtlasClientConf private val capacity = conf.get(AtlasClientConf.BLOCKING_QUEUE_CAPACITY).toInt private[atlas] val eventQueue = new LinkedBlockingQueue[T](capacity) private val timeout = conf.get(AtlasClientConf.BLOCKING_QUEUE_PUT_TIMEOUT).toInt private val eventProcessThread = new Thread { override def run(): Unit = { eventProcess() } } def pushEvent(event: T): Unit = { event match { case e: T => if (!eventQueue.offer(e, timeout, TimeUnit.MILLISECONDS)) { logError(s"Fail to put event $e into queue within time limit $timeout, will throw it") } case _ => // Ignore other events } } def startThread(): Unit = { eventProcessThread.setName(this.getClass.getSimpleName + "-thread") eventProcessThread.setDaemon(true) val ctxClassLoader = Thread.currentThread().getContextClassLoader if (ctxClassLoader != null && getClass.getClassLoader != ctxClassLoader) { eventProcessThread.setContextClassLoader(ctxClassLoader) } eventProcessThread.start() } protected def process(e: T): Unit @VisibleForTesting private[atlas] def eventProcess(): Unit = { var stopped = false while (!stopped) { try { Option(eventQueue.poll(3000, TimeUnit.MILLISECONDS)).foreach { e => process(e) } } catch { case _: InterruptedException => logDebug("Thread is interrupted") stopped = true case NonFatal(f) => logWarn(s"Caught exception during parsing event", f) } } } }
Example 23
Source File: LongRunningPool.scala From CMAK with Apache License 2.0 | 5 votes |
package kafka.manager.base import java.util.concurrent.{LinkedBlockingQueue, ThreadPoolExecutor, TimeUnit} import akka.pattern._ import scala.concurrent.{ExecutionContext, Future} import scala.reflect.ClassTag import scala.util.Try case class LongRunningPoolConfig(threadPoolSize: Int, maxQueueSize: Int) trait LongRunningPoolActor extends BaseActor { protected val longRunningExecutor = new ThreadPoolExecutor( longRunningPoolConfig.threadPoolSize, longRunningPoolConfig.threadPoolSize,0L,TimeUnit.MILLISECONDS,new LinkedBlockingQueue[Runnable](longRunningPoolConfig.maxQueueSize)) protected val longRunningExecutionContext = ExecutionContext.fromExecutor(longRunningExecutor) protected def longRunningPoolConfig: LongRunningPoolConfig protected def longRunningQueueFull(): Unit protected def hasCapacityFor(taskCount: Int): Boolean = { longRunningExecutor.getQueue.remainingCapacity() >= taskCount } @scala.throws[Exception](classOf[Exception]) override def postStop(): Unit = { log.info("Shutting down long running executor...") Try(longRunningExecutor.shutdown()) super.postStop() } protected def longRunning[T](fn: => Future[T])(implicit ec: ExecutionContext, ct: ClassTag[T]) : Unit = { if(longRunningExecutor.getQueue.remainingCapacity() == 0) { longRunningQueueFull() } else { fn match { case _ if ct.runtimeClass == classOf[Unit] => //do nothing with unit case f => f pipeTo sender } } } }
Example 24
Source File: DefaultExecutors.scala From zio with Apache License 2.0 | 5 votes |
package zio.internal import java.util.concurrent.{ LinkedBlockingQueue, RejectedExecutionException, ThreadPoolExecutor, TimeUnit } private[internal] abstract class DefaultExecutors { final def makeDefault(yieldOpCount: Int): Executor = fromThreadPoolExecutor(_ => yieldOpCount) { val corePoolSize = Runtime.getRuntime.availableProcessors() * 2 val maxPoolSize = corePoolSize val keepAliveTime = 60000L val timeUnit = TimeUnit.MILLISECONDS val workQueue = new LinkedBlockingQueue[Runnable]() val threadFactory = new NamedThreadFactory("zio-default-async", true) val threadPool = new ThreadPoolExecutor( corePoolSize, maxPoolSize, keepAliveTime, timeUnit, workQueue, threadFactory ) threadPool.allowCoreThreadTimeOut(true) threadPool } final def fromThreadPoolExecutor(yieldOpCount0: ExecutionMetrics => Int)( es: ThreadPoolExecutor ): Executor = new Executor { private[this] def metrics0 = new ExecutionMetrics { def concurrency: Int = es.getMaximumPoolSize() def capacity: Int = { val queue = es.getQueue() val remaining = queue.remainingCapacity() if (remaining == Int.MaxValue) remaining else remaining + queue.size } def size: Int = es.getQueue().size def workersCount: Int = es.getPoolSize() def enqueuedCount: Long = es.getTaskCount() def dequeuedCount: Long = enqueuedCount - size.toLong } def metrics = Some(metrics0) def yieldOpCount = yieldOpCount0(metrics0) def submit(runnable: Runnable): Boolean = try { es.execute(runnable) true } catch { case _: RejectedExecutionException => false } def here = false } }
Example 25
Source File: LeanMessagingProvider.scala From openwhisk with Apache License 2.0 | 5 votes |
package org.apache.openwhisk.connector.lean import java.util.concurrent.BlockingQueue import java.util.concurrent.LinkedBlockingQueue import scala.collection.mutable.Map import scala.collection.concurrent.TrieMap import scala.concurrent.duration.FiniteDuration import scala.util.Success import scala.util.Try import akka.actor.ActorSystem import org.apache.openwhisk.common.Logging import org.apache.openwhisk.core.WhiskConfig import org.apache.openwhisk.core.connector.MessageConsumer import org.apache.openwhisk.core.connector.MessageProducer import org.apache.openwhisk.core.connector.MessagingProvider import org.apache.openwhisk.core.entity.ByteSize val queues: Map[String, BlockingQueue[Array[Byte]]] = new TrieMap[String, BlockingQueue[Array[Byte]]] def getConsumer(config: WhiskConfig, groupId: String, topic: String, maxPeek: Int, maxPollInterval: FiniteDuration)( implicit logging: Logging, actorSystem: ActorSystem): MessageConsumer = { val queue = queues.getOrElseUpdate(topic, new LinkedBlockingQueue[Array[Byte]]()) new LeanConsumer(queue, maxPeek) } def getProducer(config: WhiskConfig, maxRequestSize: Option[ByteSize] = None)( implicit logging: Logging, actorSystem: ActorSystem): MessageProducer = new LeanProducer(queues) def ensureTopic(config: WhiskConfig, topic: String, topicConfigKey: String, maxMessageBytes: Option[ByteSize] = None)( implicit logging: Logging): Try[Unit] = { if (queues.contains(topic)) { Success(logging.info(this, s"topic $topic already existed")) } else { queues.put(topic, new LinkedBlockingQueue[Array[Byte]]()) Success(logging.info(this, s"topic $topic created")) } } }
Example 26
Source File: TestConnector.scala From openwhisk with Apache License 2.0 | 5 votes |
package org.apache.openwhisk.core.connector.test import java.util.ArrayList import java.util.concurrent.LinkedBlockingQueue import scala.concurrent.Future import scala.concurrent.duration._ import scala.collection.JavaConverters._ import org.apache.kafka.clients.producer.RecordMetadata import org.apache.kafka.common.TopicPartition import common.StreamLogging import org.apache.openwhisk.common.Counter import org.apache.openwhisk.core.connector.Message import org.apache.openwhisk.core.connector.MessageConsumer import org.apache.openwhisk.core.connector.MessageProducer class TestConnector(topic: String, override val maxPeek: Int, allowMoreThanMax: Boolean) extends MessageConsumer with StreamLogging { override def peek(duration: FiniteDuration, retry: Int = 0) = { val msgs = new ArrayList[Message] queue.synchronized { queue.drainTo(msgs, if (allowMoreThanMax) Int.MaxValue else maxPeek) msgs.asScala map { m => offset += 1 (topic, -1, offset, m.serialize.getBytes) } } } override def commit(retry: Int = 0) = { if (throwCommitException) { throw new Exception("commit failed") } else { // nothing to do } } def occupancy = queue.size def send(msg: Message): Future[RecordMetadata] = { producer.send(topic, msg) } def send(msgs: Seq[Message]): Future[RecordMetadata] = { import scala.language.reflectiveCalls producer.sendBulk(topic, msgs) } def close() = { closed = true producer.close() } private val producer = new MessageProducer { def send(topic: String, msg: Message, retry: Int = 0): Future[RecordMetadata] = { queue.synchronized { if (queue.offer(msg)) { logging.info(this, s"put: $msg") Future.successful(new RecordMetadata(new TopicPartition(topic, 0), 0, queue.size, -1, Long.box(-1L), -1, -1)) } else { logging.error(this, s"put failed: $msg") Future.failed(new IllegalStateException("failed to write msg")) } } } def sendBulk(topic: String, msgs: Seq[Message]): Future[RecordMetadata] = { queue.synchronized { if (queue.addAll(msgs.asJava)) { logging.info(this, s"put: ${msgs.length} messages") Future.successful(new RecordMetadata(new TopicPartition(topic, 0), 0, queue.size, -1, Long.box(-1L), -1, -1)) } else { logging.error(this, s"put failed: ${msgs.length} messages") Future.failed(new IllegalStateException("failed to write msg")) } } } def close() = {} def sentCount() = counter.next() val counter = new Counter() } var throwCommitException = false private val queue = new LinkedBlockingQueue[Message]() @volatile private var closed = false private var offset = -1L }
Example 27
Source File: JdbcSinkWriter.scala From eel-sdk with Apache License 2.0 | 5 votes |
package io.eels.component.jdbc import java.sql.Connection import java.util.concurrent.{Executors, LinkedBlockingQueue, TimeUnit} import com.sksamuel.exts.Logging import io.eels.component.jdbc.dialect.JdbcDialect import io.eels.schema.{Field, StructType} import io.eels.{Row, SinkWriter} class JdbcSinkWriter(schema: StructType, connFn: () => Connection, table: String, createTable: Boolean, dropTable: Boolean, dialect: JdbcDialect, threads: Int, batchSize: Int, batchesPerCommit: Int, autoCommit: Boolean, bufferSize: Int) extends SinkWriter with Logging { logger.info(s"Creating Jdbc writer with $threads threads, batch size $batchSize, autoCommit=$autoCommit") require(bufferSize >= batchSize) private val Sentinel = Row(StructType(Field("____jdbcsentinel")), Seq(null)) import com.sksamuel.exts.concurrent.ExecutorImplicits._ // the buffer is a concurrent receiver for the write method. It needs to hold enough elements so that // the invokers of this class can keep pumping in rows while we wait for a buffer to fill up. // the buffer size must be >= batch size or we'll never fill up enough to trigger a batch private val buffer = new LinkedBlockingQueue[Row](bufferSize) // the coordinator pool is just a single thread that runs the coordinator private val coordinatorPool = Executors.newSingleThreadExecutor() private lazy val inserter = { val inserter = new JdbcInserter(connFn, table, schema, autoCommit, batchesPerCommit, dialect) if (dropTable) { inserter.dropTable() } if (createTable) { inserter.ensureTableCreated() } inserter } // todo this needs to allow multiple batches at once coordinatorPool.submit { try { logger.debug("Starting JdbcWriter Coordinator") // once we receive the pill its all over for the writer Iterator.continually(buffer.take) .takeWhile(_ != Sentinel) .grouped(batchSize).withPartial(true) .foreach { batch => inserter.insertBatch(batch) } logger.debug("Write completed; shutting down coordinator") } catch { case t: Throwable => logger.error("Some error in coordinator", t) } } // the coordinate only runs the one task, that is to read from the buffer // and do the inserts coordinatorPool.shutdown() override def close(): Unit = { buffer.put(Sentinel) logger.info("Closing JDBC Writer... waiting on writes to finish") coordinatorPool.awaitTermination(1, TimeUnit.DAYS) } // when we get a row to write, we won't commit it immediately to the database, // but we'll buffer it so we can do batched inserts override def write(row: Row): Unit = { buffer.put(row) } }
Example 28
Source File: DataStreamPublisher.scala From eel-sdk with Apache License 2.0 | 5 votes |
package io.eels.datastream import java.util.concurrent.LinkedBlockingQueue import java.util.concurrent.atomic.{AtomicBoolean, AtomicMarkableReference, AtomicReference} import com.sksamuel.exts.collection.BlockingQueueConcurrentIterator import io.eels.Row import io.eels.schema.StructType class DataStreamPublisher(override val schema: StructType) extends DataStream { private val queue = new LinkedBlockingQueue[Seq[Row]] private val running = new AtomicBoolean(true) private val failure = new AtomicReference[Throwable](null) def isCancelled: Boolean = !running.get override def subscribe(subscriber: Subscriber[Seq[Row]]): Unit = { try { subscriber.subscribed(new Subscription { override def cancel(): Unit = { queue.clear() queue.put(Row.Sentinel) running.set(false) } }) BlockingQueueConcurrentIterator(queue, Row.Sentinel).takeWhile(_ => running.get).foreach(subscriber.next) failure.get match { case t: Throwable => subscriber.error(t) case _ => subscriber.completed() } } catch { case t: Throwable => subscriber.error(t) } } def publish(row: Seq[Row]): Unit = queue.put(row) def error(t: Throwable): Unit = { failure.set(t) queue.clear() queue.add(Row.Sentinel) } def close(): Unit = queue.add(Row.Sentinel) }
Example 29
Source File: Publisher.scala From eel-sdk with Apache License 2.0 | 5 votes |
package io.eels.datastream import java.util.concurrent.atomic.{AtomicInteger, AtomicReference} import java.util.concurrent.{ExecutorService, LinkedBlockingQueue, TimeUnit} import com.sksamuel.exts.Logging import com.sksamuel.exts.collection.BlockingQueueConcurrentIterator import com.sksamuel.exts.concurrent.ExecutorImplicits._ import scala.collection.concurrent.TrieMap trait Publisher[T] { def subscribe(subscriber: Subscriber[T]) } object Publisher extends Logging { def merge[T](publishers: Seq[Publisher[T]], sentinel: T)(implicit executor: ExecutorService): Publisher[T] = { new Publisher[T] { override def subscribe(s: Subscriber[T]): Unit = { // subscribers to the returned publisher will be fed from an intermediate queue val queue = new LinkedBlockingQueue[Either[Throwable, T]](DataStream.DefaultBufferSize) // to keep track of how many subscribers are yet to finish; only once all upstream // publishers have finished will this subscriber be completed. val outstanding = new AtomicInteger(publishers.size) // we make a collection of all the subscriptions, so if there's an error at any point in the // merge, we can cancel all upstream producers val subscriptions = TrieMap.empty[Subscription, Int] // this cancellable can be used to cancel all the subscriptions val subscription = new Subscription { override def cancel(): Unit = subscriptions.keys.foreach(_.cancel) } // status flag that an error occured and the subscriptions should watch for it val errorRef = new AtomicReference[Throwable](null) def terminate(t: Throwable): Unit = { logger.error(s"Error in merge", t) errorRef.set(t) subscription.cancel() queue.clear() queue.put(Right(sentinel)) } // each subscriber will occupy its own thread, on the provided executor publishers.foreach { publisher => executor.submit { try { publisher.subscribe(new Subscriber[T] { override def subscribed(sub: Subscription): Unit = if (sub != null) subscriptions.put(sub, 1) override def next(t: T): Unit = { var success = true do { success = queue.offer(Right(t), 100, TimeUnit.MILLISECONDS) } while(!success && errorRef.get == null) } override def error(t: Throwable): Unit = terminate(t) override def completed(): Unit = { if (outstanding.decrementAndGet() == 0) { logger.debug("All subscribers have finished; marking queue with sentinel") queue.put(Right(sentinel)) } } }) } catch { case t: Throwable => terminate(t) } } } try { s.subscribed(subscription) BlockingQueueConcurrentIterator(queue, Right(sentinel)).takeWhile(_ => errorRef.get == null).foreach { case Left(t) => s.error(t) case Right(t) => s.next(t) } // once we've had an error that's it, we don't complete the subscriber if (errorRef.get == null) s.completed() else s.error(errorRef.get) } catch { case t: Throwable => logger.error("Error in merge subscriber", t) subscription.cancel() s.error(t) } logger.debug("Merge subscriber has completed") } } } }
Example 30
Source File: PruneWorker.scala From spatial with MIT License | 5 votes |
package spatial.dse import java.util.concurrent.LinkedBlockingQueue import argon.State import spatial.metadata.params._ import spatial.metadata.bounds._ import scala.collection.mutable.ArrayBuffer case class PruneWorker( start: Int, size: Int, prods: Seq[BigInt], dims: Seq[BigInt], indexedSpace: Seq[(Domain[_],Int)], restricts: Set[Restrict], queue: LinkedBlockingQueue[Seq[Int]] )(implicit state: State) extends Runnable { private def isLegalSpace(): Boolean = restricts.forall(_.evaluate()) def run(): Unit = { println(s"Searching from $start until ${start+size}") val pts = (start until (start+size)).filter{i => indexedSpace.foreach{case (domain,d) => domain.set( ((i / prods(d)) % dims(d)).toInt ) } isLegalSpace() } queue.put(pts) } }