java.util.concurrent.Callable Scala Examples
The following examples show how to use java.util.concurrent.Callable.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: TsStreamingTest.scala From spark-riak-connector with Apache License 2.0 | 7 votes |
package com.basho.riak.spark.streaming import java.nio.ByteBuffer import java.util.concurrent.{Callable, Executors, TimeUnit} import com.basho.riak.spark._ import com.basho.riak.spark.rdd.RiakTSTests import com.basho.riak.spark.rdd.timeseries.{AbstractTimeSeriesTest, TimeSeriesData} import com.fasterxml.jackson.core.JsonParser import com.fasterxml.jackson.databind.{DeserializationFeature, ObjectMapper, SerializationFeature} import com.fasterxml.jackson.module.scala.DefaultScalaModule import org.apache.spark.sql.Row import org.junit.Assert._ import org.junit.experimental.categories.Category import org.junit.{After, Before, Test} @Category(Array(classOf[RiakTSTests])) class TsStreamingTest extends AbstractTimeSeriesTest(false) with SparkStreamingFixture { protected final val executorService = Executors.newCachedThreadPool() private val dataSource = new SocketStreamingDataSource private var port = -1 @Before def setUp(): Unit = { port = dataSource.start(client => { testData .map(tolerantMapper.writeValueAsString) .foreach(x => client.write(ByteBuffer.wrap(s"$x\n".getBytes))) logInfo(s"${testData.length} values were send to client") }) } @After def tearDown(): Unit = { dataSource.stop() } @Test(timeout = 10 * 1000) // 10 seconds timeout def saveToRiak(): Unit = { executorService.submit(new Runnable { override def run(): Unit = { ssc.socketTextStream("localhost", port) .map(string => { val tsdata = new ObjectMapper() .configure(DeserializationFeature.FAIL_ON_NULL_FOR_PRIMITIVES, true) .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, true) .configure(JsonParser.Feature.ALLOW_SINGLE_QUOTES, true) .configure(JsonParser.Feature.ALLOW_UNQUOTED_FIELD_NAMES, true) .configure(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS, false) .registerModule(DefaultScalaModule) .readValue(string, classOf[TimeSeriesData]) Row(1, "f", tsdata.time, tsdata.user_id, tsdata.temperature_k) }) .saveToRiakTS(bucketName) ssc.start() ssc.awaitTerminationOrTimeout(5 * 1000) } }) val result = executorService.submit(new Callable[Array[Seq[Any]]] { override def call(): Array[Seq[Any]] = { var rdd = sc.riakTSTable[Row](bucketName) .sql(s"SELECT user_id, temperature_k FROM $bucketName $sqlWhereClause") var count = rdd.count() while (count < testData.length) { TimeUnit.SECONDS.sleep(2) rdd = sc.riakTSTable[Row](bucketName) .sql(s"SELECT user_id, temperature_k FROM $bucketName $sqlWhereClause") count = rdd.count() } rdd.collect().map(_.toSeq) } }).get() assertEquals(testData.length, result.length) assertEqualsUsingJSONIgnoreOrder( """ |[ | ['bryce',305.37], | ['bryce',300.12], | ['bryce',295.95], | ['ratman',362.121], | ['ratman',3502.212] |] """.stripMargin, result) } }
Example 2
Source File: FetchJob.scala From Swallow with Apache License 2.0 | 5 votes |
package com.intel.hibench.common.streaming.metrics import java.util.concurrent.Callable import com.codahale.metrics.Histogram class FetchJob(zkConnect: String, topic: String, partition: Int, histogram: Histogram) extends Callable[FetchJobResult] { override def call(): FetchJobResult = { val result = new FetchJobResult() val consumer = new KafkaConsumer(zkConnect, topic, partition) while (consumer.hasNext) { val times = new String(consumer.next(), "UTF-8").split(":") val startTime = times(0).toLong val endTime = times(1).toLong // correct negative value which might be caused by difference of system time histogram.update(Math.max(0, endTime - startTime)) result.update(startTime, endTime) } println(s"Collected ${result.count} results for partition: ${partition}") result } } class FetchJobResult(var minTime: Long, var maxTime: Long, var count: Long) { def this() = this(Long.MaxValue, Long.MinValue, 0) def update(startTime: Long ,endTime: Long): Unit = { count += 1 if(startTime < minTime) { minTime = startTime } if(endTime > maxTime) { maxTime = endTime } } }
Example 3
Source File: AsynchbasePatcher.scala From incubator-s2graph with Apache License 2.0 | 5 votes |
package org.apache.s2graph.core.storage.hbase import java.lang.Integer.valueOf import java.nio.charset.StandardCharsets import java.util.concurrent.Callable import net.bytebuddy.ByteBuddy import net.bytebuddy.description.modifier.Visibility.PUBLIC import net.bytebuddy.dynamic.loading.ClassLoadingStrategy import net.bytebuddy.implementation.FieldAccessor import net.bytebuddy.implementation.MethodDelegation.to import net.bytebuddy.implementation.bind.annotation.{SuperCall, This} import net.bytebuddy.matcher.ElementMatchers._ import org.apache.commons.io.IOUtils import org.hbase.async._ import org.objectweb.asm.Opcodes.{ACC_FINAL, ACC_PRIVATE, ACC_PROTECTED, ACC_PUBLIC} import org.objectweb.asm._ import scala.collection.JavaConversions._ private def loadClass(name: String): Class[_] = { classLoader.getResources(s"org/hbase/async/$name.class").toSeq.headOption match { case Some(url) => val stream = url.openStream() val bytes = try { IOUtils.toByteArray(stream) } finally { stream.close() } // patch the bytecode so that the class is no longer final and the methods are all accessible val cw = new ClassWriter(ClassWriter.COMPUTE_FRAMES) new ClassReader(bytes).accept(new ClassAdapter(cw) { override def visit(version: Int, access: Int, name: String, signature: String, superName: String, interfaces: Array[String]): Unit = { super.visit(version, access & ~ACC_FINAL, name, signature, superName, interfaces) } override def visitMethod(access: Int, name: String, desc: String, signature: String, exceptions: Array[String]): MethodVisitor = { super.visitMethod(access & ~ACC_PRIVATE & ~ACC_PROTECTED & ~ACC_FINAL | ACC_PUBLIC, name, desc, signature, exceptions) } }, 0) val patched = cw.toByteArray defineClass.setAccessible(true) defineClass.invoke(classLoader, s"org.hbase.async.$name", patched, valueOf(0), valueOf(patched.length)).asInstanceOf[Class[_]] case None => throw new ClassNotFoundException(s"Could not find Asynchbase class: $name") } } }
Example 4
Source File: ConcurrentUtil.scala From sona with Apache License 2.0 | 5 votes |
package com.tencent.angel.sona.tree.util import java.util.concurrent.{Callable, ExecutorService, Executors, Future} object ConcurrentUtil { private[tree] var numThread: Int = 1 private[tree] var threadPool: ExecutorService = _ private[tree] val DEFAULT_BATCH_SIZE = 1000000 private[tree] def reset(parallelism: Int): Unit = { ConcurrentUtil.getClass.synchronized { this.numThread = parallelism this.threadPool = Executors.newFixedThreadPool(parallelism) } } private[tree] def rangeParallel[A](f: (Int, Int) => A, start: Int, end: Int, batchSize: Int = DEFAULT_BATCH_SIZE): Array[Future[A]] = { val futures = Array.ofDim[Future[A]](MathUtil.idivCeil(end - start, batchSize)) var cur = start var threadId = 0 while (cur < end) { val i = cur val j = (cur + batchSize) min end futures(threadId) = threadPool.submit(new Callable[A] { override def call(): A = f(i, j) }) cur = j threadId += 1 } futures } private[tree] def shutdown(): Unit = ConcurrentUtil.getClass.synchronized { if (threadPool != null) threadPool.shutdown() } }
Example 5
Source File: CancelFutureTask.scala From lemon-schedule with GNU General Public License v2.0 | 5 votes |
package com.gabry.job.experiment import java.util.concurrent.{Callable, FutureTask} import com.gabry.job.utils.TaskClassLoader object CancelFutureTask { def main(args: Array[String]): Unit = { //D:/MyCode/lemon-schedule/lemon-schedule-examples/target/lemon-schedule-examples-1.0-SNAPSHOT.jar val classLoader = new TaskClassLoader("D:/MyCode/lemon-schedule/lemon-schedule-examples/target/lemon-schedule-examples-1.0-SNAPSHOT.jar") classLoader.init() classLoader.destroy() import scala.concurrent._ import ExecutionContext.Implicits.global class Task extends Callable[Int]{ override def call(): Int = { println(s"thread [${Thread.currentThread().getName}]") val count = 5 0 until count foreach { i=> println(s"task alive $i") Thread.sleep(1000) } count } } val futureTask = new FutureTask[Int](new Task) Future{ futureTask.run() println(s"futureTask执行成功 ${futureTask.get()}") } Thread.sleep(3*1000) println("futureTask提前终止") futureTask.cancel(true) Thread.sleep(3*1000) println(s"futureTask执行 ${futureTask.get()}") } }
Example 6
Source File: GraphiteMockServer.scala From kafka-offset-monitor-graphite with Apache License 2.0 | 5 votes |
package pl.allegro.tech.kafka.offset.monitor.graphite import java.io.InputStream import java.lang import java.net.ServerSocket import java.util.concurrent.{Callable, ExecutorService, Executors} import com.jayway.awaitility.Awaitility._ import com.jayway.awaitility.Duration class GraphiteMockServer(port: Int) { var serverSocket: ServerSocket = null val executor: ExecutorService = Executors.newFixedThreadPool(10) @volatile var listen: Boolean = false var expectedMetrics: scala.collection.mutable.Map[String, Double] = scala.collection.mutable.Map() var receivedMetrics: scala.collection.mutable.Map[String, Double] = scala.collection.mutable.Map() def start() { serverSocket = new ServerSocket(port) listen = true handleConnections() } private def handleConnections() { executor.execute(new Runnable { override def run() { while(listen) { readData(serverSocket.accept().getInputStream()) } } }) } private def readData(stream: InputStream) { executor.execute(new Runnable { override def run() { scala.io.Source.fromInputStream(stream).getLines().foreach((line) => handleMetric(line)) } }) } private def handleMetric(metricLine: String) { val metric = metricLine.split(" ")(0) val value = metricLine.split(" ")(1) if(expectedMetrics.contains(metric)) { receivedMetrics += (metric -> value.toDouble) } } def stop() { listen = false serverSocket.close() } def reset() { expectedMetrics.clear() receivedMetrics.clear() } def expectMetric(metricNamePattern: String, value: Double) { expectedMetrics += (metricNamePattern -> value) } def waitUntilReceived() { await.atMost(Duration.FIVE_SECONDS).until(new Callable[lang.Boolean] { override def call(): lang.Boolean = { expectedMetrics.forall { case (k, v) => receivedMetrics.get(k).exists( (rv) => v == rv ) } } }) } }
Example 7
Source File: EventManager.scala From Mycat-spider with Apache License 2.0 | 5 votes |
package turbo.crawler.power import java.util.ArrayList import java.util.Hashtable import java.util.concurrent.Callable import java.util.concurrent.FutureTask import java.util.concurrent.ScheduledThreadPoolExecutor import turbo.crawler.Lifecycle import turbo.crawler.Logable import turbo.crawler.StringAdapter import java.util.Collections /** * Event manager * @author mclaren * */ object EventManager extends Lifecycle with Logable with StringAdapter with MessageDriven { /** * 线程池 */ private val exec = new ScheduledThreadPoolExecutor(sysprop("fetch.threads", "100").toInt) /** * 事件处理器 */ private val handlers = new Hashtable[String, java.util.List[Evt => Unit]]() /** * 获取JVM配置参数 */ private def sysprop(key: String, default: String) = { var matched = System.getProperty(key) if (isNotEmpty(matched)) matched else default } /** * 卸载系统 */ override def shutdown = { try { while (true) { if (exec.getActiveCount == 0) { exec.shutdown() throw new RuntimeException() } } } catch { case e: Exception => logger.info("Fetch completed and shutdown concurrenty fetchers.") } } /** * 向系统注册事件监听 */ def attachEvent(eventId: String, handler: Evt => Unit): Unit = { handlers.synchronized { var hds = handlers.get(eventId) if (hds == null) hds = new ArrayList[Evt => Unit]() hds.add(handler) handlers.put(eventId, hds) } } /** * 处理事件分发 */ override def fireEvent(evt: Evt): Unit = { if (handlers.containsKey(evt.eventId)) new WrapList[Evt => Unit](handlers.get(evt.eventId)).foreach(fd => dispatchEventConcurrently(evt, fd)) else logger.error("No handlers for event" + evt) } /** * 并行分发事件 */ private def dispatchEventConcurrently(evt: Evt, f: Evt => Unit) = { var task = new FutureTask[Unit](new Callable[Unit]() { def call: Unit = f(evt) }) this.exec.submit(task) } /** * 包装Java列表为SCALA风格 */ private class WrapList[T](list: java.util.List[T]) { def foreach(f: T => Unit) = for (i <- 0 to list.size() - 1) f(list.get(i)) } }
Example 8
Source File: PythonFunction.scala From polynote with Apache License 2.0 | 5 votes |
package polynote.runtime.python import java.util.concurrent.{Callable, ExecutorService} import jep.python.PyCallable import shapeless.Witness import scala.collection.JavaConverters._ import scala.language.dynamics class PythonFunction(callable: PyCallable, runner: PythonObject.Runner) extends TypedPythonObject[PythonFunction.function](callable, runner) with Dynamic { private def unwrapArg(arg: Any): Any = arg match { case pyObj: PythonObject => pyObj.unwrap case obj => obj } override def applyDynamic(method: String)(args: Any*): PythonObject = { if (method == "apply" || method == "call" || method == "__call__") callPosArgs(callable, args.asInstanceOf[Seq[AnyRef]]) else super.applyDynamic(method)(args: _*) } override def applyDynamicNamed(method: String)(args: (String, Any)*): PythonObject = { if (method == "apply" || method == "call" || method == "__call__") callKwArgs(callable, args) else super.applyDynamicNamed(method)(args: _*) } } object PythonFunction { type function = Witness.`"function"`.T }
Example 9
Source File: Neo4jUtils.scala From neo4j-spark-connector with Apache License 2.0 | 5 votes |
package org.neo4j.spark.utils import java.sql.Timestamp import java.time._ import java.util.concurrent.Callable import java.util.function import io.github.resilience4j.retry.{Retry, RetryConfig} import org.apache.spark.sql.catalyst.util.DateTimeUtils import org.neo4j.driver.exceptions.{ServiceUnavailableException, SessionExpiredException, TransientException} import org.neo4j.driver.{Driver, Result, Session, Transaction} import org.neo4j.spark.Neo4jConfig import org.slf4j.LoggerFactory class Neo4jUtils object Neo4jUtils { private val logger = LoggerFactory.getLogger(classOf[Neo4jUtils]) def close(driver: Driver, session: Session): Unit = { try { if (session != null && session.isOpen) { closeSafety(session) } } finally { if (driver != null) { closeSafety(driver) } } } private def closeSafety(closable: AutoCloseable): Unit = { try { closable.close() } catch { case e: Throwable => { logger.error("Exception while trying to close an AutoCloseable, because of the following exception", e) } } } private val retryConfig = RetryConfig.custom.retryExceptions( classOf[SessionExpiredException], classOf[ServiceUnavailableException] // retry on the same exceptions the driver does [1] ) .retryOnException(new function.Predicate[Throwable] { override def test(exception: Throwable): Boolean = exception match { case t: TransientException => { val code = t.code() !("Neo.TransientError.Transaction.Terminated" == code) && !("Neo.TransientError.Transaction.LockClientStopped" == code) } case _ => false } }) .maxAttempts(3) .build def executeTxWithRetries[T](neo4jConfig: Neo4jConfig, query: String, params: java.util.Map[String, AnyRef], write: Boolean): (Driver, Session, Transaction, Result) = { val driver: Driver = neo4jConfig.driver() val session: Session = driver.session(neo4jConfig.sessionConfig(write)) Retry.decorateCallable( Retry.of("neo4jTransactionRetryPool", retryConfig), new Callable[(Driver, Session, Transaction, Result)] { override def call(): (Driver, Session, Transaction, Result) = { val transaction = session.beginTransaction() val result = transaction.run(query, params) (driver, session, transaction, result) } } ) .call() } def convert(value: AnyRef): AnyRef = value match { case m: ZonedDateTime => new Timestamp(DateTimeUtils.fromUTCTime(m.toInstant.toEpochMilli, m.getZone.getId)) case m: LocalDateTime => new Timestamp(DateTimeUtils.fromUTCTime(m.toInstant(ZoneOffset.UTC).toEpochMilli,"UTC")) case m: LocalDate => java.sql.Date.valueOf(m) case m: OffsetTime => new Timestamp(m.atDate(LocalDate.ofEpochDay(0)).toInstant.toEpochMilli) case _ => value } }
Example 10
Source File: ProtoBuffTest.scala From c4proto with Apache License 2.0 | 5 votes |
package ee.cone.c4actor import java.lang.management.ManagementFactory import java.util import java.util.concurrent.{Callable, Executors} import ee.cone.c4actor.AnyAdapter._ import ee.cone.c4actor.AnyOrigProtocol.N_AnyOrig import ee.cone.c4actor.ProtoBuffTestProtocol.{D_TestOrig, D_TestOrigForDecode} import ee.cone.c4di.{c4, c4app} import ee.cone.c4proto._ import scala.collection.immutable import scala.util.Random trait ProtoBuffTestProtocolAppBase @protocol("ProtoBuffTestProtocolApp") object ProtoBuffTestProtocol { @Id(0x1) case class D_TestOrig( @Id(0x2) srcId: String, @Id(0x3) list: List[String], @Id(0x4) byteStr: List[N_AnyOrig] ) @Id(0x5) case class D_TestOrigForDecode( @Id(0x6) srcId: String, @Id(0x7) number: Long ) } @c4app class SeqProtoBuffTestAppBase extends ProtoBuffTestApp @c4app class ParProtoBuffTestAppBase extends ProtoBuffTestApp trait ProtoBuffTestApp extends VMExecutionApp with ExecutableApp with BaseApp with ProtoApp with ProtoBuffTestProtocolApp with AnyOrigProtocolApp class SerializationRunnable(pid: Int, testOrigs: Seq[D_TestOrigForDecode], qAdapterRegistry: QAdapterRegistry) extends Callable[Long] { def call(): Long = { TestCode.test(testOrigs, qAdapterRegistry) } } object TestCode { def test(testOrigs: Seq[D_TestOrigForDecode], qAdapterRegistry: QAdapterRegistry): Long = { val time = System.currentTimeMillis() val encoded: immutable.Seq[N_AnyOrig] = testOrigs.map(encode(qAdapterRegistry)(_)) val testOrigsss: immutable.Seq[D_TestOrig] = encoded.zipWithIndex.map { case (a, b) => D_TestOrig(b.toString, a.toString.split(",").toList, List(a)) } val encoded2: immutable.Seq[N_AnyOrig] = testOrigsss.map(encode(qAdapterRegistry)(_)) val decoded: immutable.Seq[D_TestOrig] = encoded2.map(decode[D_TestOrig](qAdapterRegistry)) // assert (testOrigsss == decoded) val time2 = System.currentTimeMillis() time2 - time } }
Example 11
Source File: DynamicExecution.scala From hazelcast-scala with Apache License 2.0 | 5 votes |
package com.hazelcast.Scala.serialization import java.util.{ Arrays, Comparator } import java.util.concurrent.Callable import scala.reflect.ClassTag import com.hazelcast.Scala.{ Aggregator, Pipe } import com.hazelcast.core.IFunction import com.hazelcast.map.{ EntryBackupProcessor, EntryProcessor } import com.hazelcast.nio.{ ObjectDataInput, ObjectDataOutput } import com.hazelcast.query.Predicate object DynamicExecution extends DynamicExecution { protected def serializeBytecodeFor(cls: Class[_]) = true } abstract class DynamicExecution extends SerializerEnum(Defaults) { protected def serializeBytecodeFor(cls: Class[_]): Boolean private[this] val loaderByClass = new ClassValue[Option[ByteArrayClassLoader]] { private[this] val excludePackages = Set("com.hazelcast.", "scala.", "java.", "javax.") private def include(cls: Class[_]): Boolean = !excludePackages.exists(cls.getName.startsWith) && serializeBytecodeFor(cls) def computeValue(cls: Class[_]): Option[ByteArrayClassLoader] = if (include(cls)) { try { Some(ByteArrayClassLoader(cls)) } catch { case ncdf: NoClassDefFoundError => classByName.get(cls.getName) match { case Some((bytes, classForBytes)) if cls == classForBytes => Some(new ByteArrayClassLoader(cls.getName, bytes)) case _ => throw ncdf } } } else None } private[this] val classByName = new collection.concurrent.TrieMap[String, (Array[Byte], Class[_])] private class ClassBytesSerializer[T: ClassTag] extends StreamSerializer[T] { def write(out: ObjectDataOutput, any: T): Unit = { out.writeUTF(any.getClass.getName) loaderByClass.get(any.getClass) match { case Some(cl) => out.writeByteArray(cl.bytes) case _ => out.writeByteArray(Array.emptyByteArray) } UnsafeSerializer.write(out, any) } def read(inp: ObjectDataInput): T = { val className = inp.readUTF() val classBytes = inp.readByteArray() val cls = if (classBytes.length == 0) { Class.forName(className) } else { classByName.get(className) match { case Some((bytes, cls)) if Arrays.equals(classBytes, bytes) => cls case _ => val cl = new ByteArrayClassLoader(className, classBytes) val cls = Class.forName(className, true, cl) classByName.put(className, classBytes -> cls) cls } } UnsafeSerializer.read(inp, cls).asInstanceOf[T] } } type S[T] = StreamSerializer[T] val Function0Ser: S[Function0[_]] = new ClassBytesSerializer val Function1Ser: S[Function1[_, _]] = new ClassBytesSerializer val Function2Ser: S[Function2[_, _, _]] = new ClassBytesSerializer val Function3Ser: S[Function3[_, _, _, _]] = new ClassBytesSerializer val PartialFunctionSer: S[PartialFunction[_, _]] = new ClassBytesSerializer val EntryProcessorSer: S[EntryProcessor[_, _]] = new ClassBytesSerializer val EntryBackupProcessorSer: S[EntryBackupProcessor[_, _]] = new ClassBytesSerializer val CallableSer: S[Callable[_]] = new ClassBytesSerializer val RunnableSer: S[Runnable] = new ClassBytesSerializer val PredicateSer: S[Predicate[_, _]] = new ClassBytesSerializer val PipeSer: S[Pipe[_]] = new ClassBytesSerializer val AggregatorSer: S[Aggregator[_, _]] = new ClassBytesSerializer val ComparatorSer: S[Comparator[_]] = new ClassBytesSerializer val IFunctionSer: S[IFunction[_, _]] = new ClassBytesSerializer }
Example 12
Source File: PrepareStatementCache.scala From quill with Apache License 2.0 | 5 votes |
package io.getquill.context.cassandra import java.util.concurrent.Callable import com.google.common.base.Charsets import com.google.common.cache.CacheBuilder import com.google.common.hash.Hashing class PrepareStatementCache[V <: AnyRef](size: Long) { private val cache = CacheBuilder .newBuilder .maximumSize(size) .build[java.lang.Long, V]() private val hasher = Hashing.goodFastHash(128) def apply(stmt: String)(prepare: String => V): V = { cache.get( hash(stmt), new Callable[V] { override def call: V = prepare(stmt) } ) } def invalidate(stmt: String): Unit = cache.invalidate(hash(stmt)) private def hash(string: String): java.lang.Long = { hasher .hashString(string, Charsets.UTF_8) .asLong() } }
Example 13
Source File: WindowRenderer.scala From Vegas with MIT License | 5 votes |
package vegas.render import java.util.concurrent.{Callable, FutureTask} import scalafx.application.Platform import scalafx.scene.Scene import scalafx.stage.Stage import scalafx.scene.web.WebView import javafx.embed.swing.JFXPanel import com.sun.javafx.webkit.WebConsoleListener import scala.collection.mutable class Window { Platform.implicitExit = false val jsErrors = mutable.Buffer[String]() val webView = new WebView {} private val webEngine = webView.engine private def html(specJson: String) = StaticHTMLRenderer(specJson).pageHTML() def close = stage.close def load(specJson: String) = { webEngine.loadContent(html(specJson)) } // Log JS errors WebConsoleListener.setDefaultListener(new WebConsoleListener { def messageAdded(webView: javafx.scene.web.WebView, message: String, lineNumber: Int, sourceId: String) = { if (message.contains("Error")) jsErrors.append(message) println(jsErrors) } }) val stage = new Stage { title.value = "Vegas" width = 300 height = 300 scene = new Scene { content = webView } } Platform.runLater { stage.showAndWait() } } case class WindowRenderer(specJson: String) { lazy val window = new Window() def onUIThread[T](op: => T): T = if (Platform.isFxApplicationThread) { op } else { val futureTask = new FutureTask(new Callable[T] { override def call: T = onUIThread(op) }) Platform.runLater(futureTask) futureTask.get() } def errors: List[String] = onUIThread { window.jsErrors.toList } def close = onUIThread { window.close } def show = { val _ = WindowRenderer.init Platform.runLater { window.load(specJson) } } } object WindowRenderer { lazy val init = new JFXPanel() }
Example 14
Source File: RetryUtils.scala From spark-druid-olap with Apache License 2.0 | 5 votes |
package org.sparklinedata.druid import java.util.concurrent.{Callable, TimeUnit} import com.google.common.base.Throwables import org.apache.spark.sql.SPLLogging import scala.reflect._ object RetryUtils extends SPLLogging { var DEFAULT_RETRY_COUNT: Int = 10 var DEFAULT_RETRY_SLEEP: Long = TimeUnit.SECONDS.toMillis(30) def retryUntil(callable: Callable[Boolean], expectedValue: Boolean, delayInMillis: Long, retryCount: Int, taskMessage: String) : Unit = { try { var currentTry: Int = 0 while (callable.call != expectedValue) { if (currentTry > retryCount) { throw new IllegalStateException( s"Max number of retries[$retryCount] exceeded for Task[$taskMessage]. Failing." ) } logInfo(s"Attempt[$currentTry]: " + s"Task $taskMessage still not complete. Next retry in $delayInMillis ms") Thread.sleep(delayInMillis) currentTry += 1 } } catch { case e: Exception => { throw Throwables.propagate(e) } } } def ifException[E <: Exception : ClassTag] = (e: Exception) => classTag[E].runtimeClass.isAssignableFrom(e.getClass) def backoff(start : Int, cap : Int) : Stream[Int] = { def next(current : Int) : Stream[Int] = { Stream.cons(current, next(Math.min(current *2, cap))) } next(start) } def execWithBackOff[X](taskMessage : String, f : Int => Option[X])( numTries : Int = Int.MaxValue, start : Int = 200, cap : Int = 5000) : X = { val b = backoff(start, cap).iterator var tries = 0 while(tries < numTries) { val nextBackOff = b.next() f(nextBackOff) match { case Some(x) => return x case _ => { Thread.sleep(b.next) tries += 1 } } } throw new IllegalStateException( s"Max number of retries[$numTries] exceeded for Task[$taskMessage]. Failing." ) } def retryOnErrors[X](isTransients: (Exception => Boolean)*)( taskMessage: String, x: => X, numTries: Int = Int.MaxValue, start: Int = 200, cap: Int = 5000 ): X = { execWithBackOff(taskMessage, { nextBackOff => try Some(x) catch { case e: Exception if isTransients.find(_ (e)).isDefined => logWarning(s"Transient error in $taskMessage, retrying after $nextBackOff ms") None } })(numTries, start, cap) } def retryOnError(isTransient: Exception => Boolean) = new { def apply[X](taskMessage: String, x: => X)( numTries: Int = Int.MaxValue, start: Int = 200, cap: Int = 5000) = retryOnErrors(isTransient)(taskMessage, x, numTries, start, cap) } }
Example 15
Source File: IvyLockReporter.scala From sbt-optimizer with Apache License 2.0 | 5 votes |
package net.virtualvoid.optimizer import java.io.File import java.util.concurrent.Callable import sbt.Keys import xsbti._ object IvyLockReporter { case class SpentTimeInLock(lockFile: File, startNanos: Long, endNanos: Long) extends Span trait Listener { def spentTimeInLock(spent: SpentTimeInLock): Unit } val listener = new ThreadLocal[Listener] def withListener[T](l: Listener)(body: ⇒ T): T = { val oldListener = listener.get listener.set(l) try body finally listener.set(oldListener) } def locked(spent: SpentTimeInLock): Unit = { val l = listener.get if (l ne null) l.spentTimeInLock(spent) } def install() = Keys.appConfiguration in sbt.Global ~= { oldApp ⇒ new AppConfiguration { def arguments(): Array[String] = oldApp.arguments() def baseDirectory(): File = oldApp.baseDirectory() def provider(): AppProvider = new AppProvider { def newMain(): AppMain = oldApp.provider.newMain def components(): ComponentProvider = oldApp.provider.components def entryPoint(): Class[_] = oldApp.provider.entryPoint def scalaProvider(): ScalaProvider = new ScalaProvider { val oldProvider = oldApp.provider.scalaProvider def launcher(): Launcher = new Launcher { val oldLauncher = oldProvider.launcher def globalLock(): GlobalLock = new GlobalLock { def apply[T](lockFile: File, run: Callable[T]): T = { val start = System.nanoTime() oldLauncher.globalLock.apply(lockFile, new Callable[T] { def call(): T = { val end = System.nanoTime() IvyLockReporter.locked(IvyLockReporter.SpentTimeInLock(lockFile, start, end)) run.call() } }) } } def isOverrideRepositories: Boolean = oldLauncher.isOverrideRepositories def ivyHome(): File = oldLauncher.ivyHome def getScala(version: String): ScalaProvider = oldLauncher.getScala(version) def getScala(version: String, reason: String): ScalaProvider = oldLauncher.getScala(version, reason) def getScala(version: String, reason: String, scalaOrg: String): ScalaProvider = oldLauncher.getScala(version, reason, scalaOrg) def topLoader(): ClassLoader = oldLauncher.topLoader def ivyRepositories(): Array[Repository] = oldLauncher.ivyRepositories def app(id: ApplicationID, version: String): AppProvider = oldLauncher.app(id, version) def checksums(): Array[String] = oldLauncher.checksums def appRepositories(): Array[Repository] = oldLauncher.appRepositories def bootDirectory(): File = oldLauncher.bootDirectory } def compilerJar(): File = oldProvider.compilerJar def app(id: ApplicationID): AppProvider = oldProvider.app(id) def loader(): ClassLoader = oldProvider.loader def libraryJar(): File = oldProvider.libraryJar def version(): String = oldProvider.version def jars(): Array[File] = oldProvider.jars } def loader(): ClassLoader = oldApp.provider.loader def mainClasspath(): Array[File] = oldApp.provider.mainClasspath def mainClass(): Class[_ <: AppMain] = oldApp.provider.mainClass def id(): ApplicationID = oldApp.provider.id } } } }
Example 16
Source File: JBasicUtils.scala From scala-commons with MIT License | 5 votes |
package com.avsystem.commons package jiop import java.util.Comparator import java.util.concurrent.Callable import java.{lang => jl, math => jm, util => ju} import com.avsystem.commons.misc.{Sam, TimestampConversions} trait JBasicUtils { def jRunnable(code: => Any) = Sam[Runnable](code) def jCallable[T](expr: => T) = Sam[Callable[T]](expr) def jComparator[T](cmp: (T, T) => Int) = Sam[Comparator[T]](cmp) implicit def jDateTimestampConversions(date: JDate): TimestampConversions = new TimestampConversions(date.getTime) type JByte = jl.Byte type JShort = jl.Short type JInteger = jl.Integer type JLong = jl.Long type JFloat = jl.Float type JDouble = jl.Double type JBoolean = jl.Boolean type JCharacter = jl.Character type JBigInteger = jm.BigInteger type JBigDecimal = jm.BigDecimal type JDate = ju.Date type JNumber = jl.Number type JVoid = jl.Void type JEnum[E <: jl.Enum[E]] = jl.Enum[E] type JStringBuilder = jl.StringBuilder }
Example 17
Source File: ScalaObjectHandlerTest.scala From fintrospect with Apache License 2.0 | 5 votes |
package io.fintrospect.templating import java.io.{StringReader, StringWriter} import java.util.concurrent.Callable import com.github.mustachejava.DefaultMustacheFactory import org.scalatest.{FunSpec, Matchers} class ScalaObjectHandlerTest extends FunSpec with Matchers { describe("ScalaObjectHandler") { it("maps") { render("{{#map}}{{test}}{{test2}}{{/map}}", Map("map" -> Map("test" -> "fred"))) shouldBe "fred" } it("handler") { val model = new { val list = Seq(new { lazy val optionalHello = Some("Hello") val futureWorld = new Callable[String] { def call(): String = "world" } val test = true val num = 0 }, new { val optionalHello = Some("Goodbye") val futureWorld = new Callable[String] { def call(): String = "thanks for all the fish" } lazy val test = false val map = Map("value" -> "test") val num = 1 }) } render("{{#list}}{{optionalHello}}, {{futureWorld}}!" + "{{#test}}?{{/test}}{{^test}}!{{/test}}{{#num}}?{{/num}}{{^num}}!{{/num}}" + "{{#map}}{{value}}{{/map}}\n{{/list}}", model) shouldBe "Hello, world!?!\nGoodbye, thanks for all the fish!!?test\n" } it("steams") { val model = new { val stream = Stream( new { val value = "hello" }, new { val value = "world" }) } render("{{#stream}}{{value}}{{/stream}}", model) shouldBe "helloworld" } it("unit") { val model = new { val test = if (false) "test" } render("{{test}}", model) shouldBe "" } it("options") { val model = new { val foo = Some("Hello") val bar = None } render("{{foo}}{{bar}}", model) shouldBe "Hello" } } private def render(template: String, model: Any): String = { val mf = new DefaultMustacheFactory() mf.setObjectHandler(new ScalaObjectHandler) val m = mf.compile(new StringReader(template), "name") val sw = new StringWriter m.execute(sw, model).close() sw.toString } }
Example 18
Source File: Unit.scala From learn-scala-java-devs with Apache License 2.0 | 5 votes |
package s4j.scala.chapter04 import java.util.concurrent.Callable object Unit { // () is the single instance of the type Unit val example: Unit = () // similar to java's Void // // public class DoNothing implements Callable<Void> { // @Override // public Void call() throws Exception { // return null; // } // } class DoNothing extends Callable[Unit] { def call: Unit = () } }
Example 19
Source File: TestCreateTableIfNotExists.scala From carbondata with Apache License 2.0 | 5 votes |
package org.apache.carbondata.spark.testsuite.createTable import java.util.concurrent.{Callable, Executors, ExecutorService, Future, TimeUnit} import org.apache.spark.sql.test.util.QueryTest import org.apache.spark.sql.AnalysisException import org.scalatest.BeforeAndAfterAll class TestCreateTableIfNotExists extends QueryTest with BeforeAndAfterAll { override def beforeAll { sql("use default") sql("drop table if exists test") sql("drop table if exists sourceTable") sql("drop table if exists targetTable") } test("test create table if not exists") { sql("create table test(a int, b string) STORED AS carbondata") try { // table creation should be successful sql("create table if not exists test(a int, b string) STORED AS carbondata") assert(true) } catch { case ex: Exception => assert(false) } } test("test create table if not exist concurrently") { val executorService: ExecutorService = Executors.newFixedThreadPool(10) var futures: List[Future[_]] = List() for (i <- 0 until (3)) { futures = futures :+ runAsync() } executorService.shutdown() executorService.awaitTermination(30L, TimeUnit.SECONDS) futures.foreach { future => assertResult("PASS")(future.get.toString) } def runAsync(): Future[String] = { executorService.submit(new Callable[String] { override def call() = { // Create table var result = "PASS" try { sql("create table IF NOT EXISTS TestIfExists(name string) STORED AS carbondata") } catch { case exception: Exception => result = exception.getMessage exception.printStackTrace() } result } }) } } test("test create table without column specified") { val exception = intercept[AnalysisException] { sql("create table TableWithoutColumn STORED AS carbondata tblproperties('sort_columns'='')") } assert(exception.getMessage.contains("Unable to infer the schema")) } override def afterAll { sql("use default") sql("drop table if exists test") sql("drop table if exists sourceTable") sql("drop table if exists targetTable") sql("drop table if exists TestIfExists") } }
Example 20
Source File: threads.scala From Scientific-Computing-with-Scala with MIT License | 5 votes |
import java.util.concurrent.Callable import scala.util.Random object Accumulator { var c = 0.0 def inc() { this.synchronized { c = c + 1.0 } } } class SamplingThread extends Runnable { def run() { for (i <- 0 until 500000000) { val x = Random.nextDouble val y = Random.nextDouble if (x * x + y * y < 1.0) { Accumulator.inc() } } } } object PiParallel { def main(args: Array[String]) { var c = 0.0 val threads = for (i <- 0 until 2) yield { new Thread(new SamplingThread) } threads.foreach { (thread: Thread) => thread.start } threads.foreach { (thread: Thread) => thread.join } println(Accumulator.c / (500000000 * 2)) } }
Example 21
Source File: AsyncGuavaTests.scala From freestyle with Apache License 2.0 | 5 votes |
package freestyle.async package guava import java.util.concurrent.{Callable, Executors} import com.google.common.util.concurrent.{ListenableFuture, ListeningExecutorService, MoreExecutors} import org.scalatest._ import scala.concurrent.duration.Duration import scala.concurrent.{Await, ExecutionContext} class AsyncGuavaTests extends WordSpec with Matchers with Implicits { import ExecutionContext.Implicits.global import implicits._ val exception: Throwable = new RuntimeException("Test exception") val service: ListeningExecutorService = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(10)) def failedFuture[T]: ListenableFuture[T] = service.submit(new Callable[T] { override def call(): T = throw exception }) def successfulFuture[T](value: T): ListenableFuture[T] = service.submit(new Callable[T] { override def call(): T = value }) val foo = "Bar" "Guava ListenableFuture Freestyle integration" should { "transform guava ListenableFutures into scala.concurrent.Future successfully" in { Await.result(listenableFuture2Async(successfulFuture(foo)), Duration.Inf) shouldBe foo } "recover from failed guava ListenableFutures wrapping them into scala.concurrent.Future" in { Await.result(listenableFuture2Async(failedFuture[String]).failed, Duration.Inf) shouldBe exception } "transform guava ListenableFuture[Void] into scala.concurrent.Future successfully through an implicit conversion" in { Await.result( listenableFuture2Async(listenableVoidToListenableUnit(successfulFuture[Void](None.orNull))), Duration.Inf) shouldBe ((): Unit) } "recover from failed guava ListenableFuture[Void] wrapping them into scala.concurrent.Future through an implicit conversion" in { Await.result( listenableFuture2Async(listenableVoidToListenableUnit(failedFuture[Void])).failed, Duration.Inf) shouldBe exception } } }
Example 22
Source File: ConcurrentSupport.scala From money with Apache License 2.0 | 5 votes |
package com.comcast.money.core.concurrent import java.util.concurrent.Callable import com.comcast.money.api.SpanId import com.comcast.money.core.internal.SpanLocal trait ConcurrentSupport { val testCallable: Callable[Option[SpanId]] = new Callable[Option[SpanId]] with SpanAware { override def call(): Option[SpanId] = captureCurrentSpan() } val testRunnable: Runnable = new Runnable with SpanAware { override def run(): Unit = captureCurrentSpan() } } trait SpanAware { private var savedSpanId: Option[SpanId] = _ def spanId: Option[SpanId] = savedSpanId def captureCurrentSpan(): Option[SpanId] = { savedSpanId = SpanLocal.current.map(_.info.id) savedSpanId } }
Example 23
Source File: TraceFriendlyThreadPoolExecutorSpec.scala From money with Apache License 2.0 | 5 votes |
package com.comcast.money.core.concurrent import java.util.concurrent.{ Callable, ExecutorService } import com.comcast.money.api.SpanId import com.comcast.money.core.SpecHelpers import com.comcast.money.core.internal.SpanLocal import org.scalatest.mockito.MockitoSugar import org.scalatest.{ Matchers, OneInstancePerTest, WordSpecLike } import org.slf4j.MDC class TraceFriendlyThreadPoolExecutorSpec extends WordSpecLike with MockitoSugar with Matchers with ConcurrentSupport with OneInstancePerTest with SpecHelpers { val executor: ExecutorService = TraceFriendlyThreadPoolExecutor.newCachedThreadPool "TraceFriendlyThreadPoolExecutor cachedThreadPool" should { "propagate the current span local value" in { val traceId = new SpanId("1", 2L, 3L) SpanLocal.push(testSpan(traceId)) val future = executor.submit(testCallable) future.get shouldEqual Some(traceId) SpanLocal.clear() } "propagate no span value if none is present" in { SpanLocal.clear() val future = executor.submit(testCallable) future.get shouldEqual None SpanLocal.current shouldEqual None } "propagate only the current span id value" in { val traceId1 = new SpanId() val traceId2 = new SpanId() SpanLocal.push(testSpan(traceId1)) SpanLocal.push(testSpan(traceId2)) val future = executor.submit(testCallable) future.get shouldEqual Some(traceId2) } "propagate MDC" in { val traceId = new SpanId("1", 2L, 3L) SpanLocal.push(testSpan(traceId)) MDC.put("foo", "bar") val mdcCallable = new Callable[String] { override def call(): String = MDC.get("foo") } val future = executor.submit(mdcCallable) future.get shouldEqual "bar" SpanLocal.clear() } } "TraceFriendlyThreadPoolExecutor fixedThreadPool" should { val threadPool: TraceFriendlyThreadPoolExecutor = TraceFriendlyThreadPoolExecutor.newFixedThreadPool(1) .asInstanceOf[TraceFriendlyThreadPoolExecutor] "created the pool with the specified number of threads" in { threadPool.getCorePoolSize shouldEqual 1 } } }
Example 24
Source File: MockedDefaultSourceSuite.scala From HANAVora-Extensions with Apache License 2.0 | 5 votes |
package org.apache.spark import java.util.concurrent.{Callable, Executors} import com.sap.spark.dsmock.DefaultSource import org.apache.spark.sql.sources.HashPartitioningFunction import org.apache.spark.sql.{GlobalSapSQLContext, Row, SQLContext} import org.mockito.Matchers._ import org.mockito.Mockito._ import org.scalatest.FunSuite import scala.concurrent.duration._ class MockedDefaultSourceSuite extends FunSuite with GlobalSapSQLContext { val testTimeout = 10 // seconds private def numberOfThreads: Int = { val noOfCores = Runtime.getRuntime.availableProcessors() assert(noOfCores > 0) if (noOfCores == 1) 2 // It should always be multithreaded although only // one processor is available (pseudo-multithreading) else noOfCores } def runMultiThreaded[A](op: Int => A): Seq[A] = { info(s"Running with $numberOfThreads threads") val pool = Executors.newFixedThreadPool(numberOfThreads) val futures = 1 to numberOfThreads map { i => val task = new Callable[A] { override def call(): A = op(i) } pool.submit(task) } futures.map(_.get(testTimeout, SECONDS)) } test("Underlying mocks of multiple threads are distinct") { val dataSources = runMultiThreaded { _ => DefaultSource.withMock(identity) } dataSources foreach { current => val sourcesWithoutCurrent = dataSources.filter(_.ne(current)) assert(sourcesWithoutCurrent.forall(_.underlying ne current)) } } test("Mocking works as expected") { runMultiThreaded { i => DefaultSource.withMock { defaultSource => when(defaultSource.getAllPartitioningFunctions( anyObject[SQLContext], anyObject[Map[String, String]])) .thenReturn(Seq(HashPartitioningFunction(s"foo$i", Seq.empty, None))) val Array(Row(name)) = sqlc .sql("SHOW PARTITION FUNCTIONS USING com.sap.spark.dsmock") .select("name") .collect() assertResult(s"foo$i")(name) } } } }
Example 25
Source File: RichScheduledExecutorService.scala From mango with Apache License 2.0 | 5 votes |
package com.kakao.mango.concurrent import java.util.concurrent.{Callable, ScheduledFuture, TimeUnit, ScheduledExecutorService} import scala.concurrent.duration.Duration import scala.concurrent.duration._ import scala.language.postfixOps class RichScheduledExecutorService(underlying: ScheduledExecutorService) extends RichExecutorService(underlying) with ScheduledExecutorService { def scheduleIn[T](delay: Duration)(command: => T): ScheduledFuture[T] = schedule(new Callable[T] { override def call(): T = command }, delay.toMillis, TimeUnit.MILLISECONDS) def withFixedRate[T](rate: Duration, initialDelay: Duration = 0.second)(command: => Unit) = scheduleAtFixedRate(new Runnable { override def run(): Unit = command }, initialDelay.toMillis, rate.toMillis, TimeUnit.MILLISECONDS) def withFixedDelay[T](delay: Duration, initialDelay: Duration = 0.second)(command: => Unit) = scheduleWithFixedDelay(new Runnable { override def run(): Unit = command }, initialDelay.toMillis, delay.toMicros, TimeUnit.MILLISECONDS) // delegating to underlying override def schedule(command: Runnable, delay: Long, unit: TimeUnit): ScheduledFuture[_] = underlying.schedule(wrap(command), delay, unit) override def scheduleAtFixedRate(command: Runnable, initialDelay: Long, period: Long, unit: TimeUnit): ScheduledFuture[_] = underlying.scheduleAtFixedRate(wrap(command), initialDelay, period, unit) override def schedule[V](callable: Callable[V], delay: Long, unit: TimeUnit): ScheduledFuture[V] = underlying.schedule(wrap(callable), delay, unit) override def scheduleWithFixedDelay(command: Runnable, initialDelay: Long, delay: Long, unit: TimeUnit): ScheduledFuture[_] = underlying.scheduleWithFixedDelay(wrap(command), initialDelay, delay, unit) }
Example 26
Source File: CacheAsyncConnection.scala From play-ws with Apache License 2.0 | 5 votes |
package play.api.libs.ws.ahc.cache import java.util.concurrent.Callable import java.util.concurrent.CompletableFuture import java.util.concurrent.Executor import java.util.concurrent.TimeUnit import java.util.function.BiConsumer import play.shaded.ahc.org.asynchttpclient.AsyncHandler import play.shaded.ahc.org.asynchttpclient.ListenableFuture import play.shaded.ahc.org.asynchttpclient.Request import org.slf4j.LoggerFactory import play.shaded.ahc.org.asynchttpclient.handler.ProgressAsyncHandler class CacheFuture[T](handler: AsyncHandler[T]) extends ListenableFuture[T] { private var innerFuture: java.util.concurrent.CompletableFuture[T] = _ def setInnerFuture(future: java.util.concurrent.CompletableFuture[T]) = { innerFuture = future } override def isDone: Boolean = innerFuture.isDone override def done(): Unit = {} override def touch(): Unit = {} override def abort(t: Throwable): Unit = { innerFuture.completeExceptionally(t) } override def isCancelled: Boolean = { innerFuture.isCancelled } override def get(): T = { get(1000L, java.util.concurrent.TimeUnit.MILLISECONDS) } override def get(timeout: Long, unit: TimeUnit): T = { innerFuture.get(timeout, unit) } override def cancel(mayInterruptIfRunning: Boolean): Boolean = { innerFuture.cancel(mayInterruptIfRunning) } override def toString: String = { s"CacheFuture" } override def toCompletableFuture: CompletableFuture[T] = innerFuture override def addListener(listener: Runnable, executor: Executor): ListenableFuture[T] = { innerFuture.whenCompleteAsync( new BiConsumer[T, Throwable]() { override def accept(t: T, u: Throwable): Unit = listener.run() }, executor ) this } }
Example 27
Source File: NonblockingPar.scala From learning-fpinscala with MIT License | 5 votes |
package com.satansk.fpinscala.parallelism import java.util.concurrent.{Callable, CountDownLatch, ExecutorService} import java.util.concurrent.atomic.AtomicReference import com.sun.glass.ui.MenuItem.Callback def unit[A](a: A): Par[A] = _ ⇒ new Future[A] { def apply(callback: A ⇒ Unit): Unit = callback(a) } def fork[A](a: ⇒ Par[A]): Par[A] = es ⇒ new Future[A] { def apply(callback: (A) ⇒ Unit): Unit = eval(es)(a(es)(callback)) } def eval(es: ExecutorService)(r: ⇒ Unit): Unit = es.submit(new Callable[Unit] { def call = r }) }
Example 28
Source File: CacheableRPCInterceptor.scala From Linkis with Apache License 2.0 | 5 votes |
package com.webank.wedatasphere.linkis.rpc.interceptor.common import java.util.concurrent.{Callable, TimeUnit} import com.google.common.cache.{Cache, CacheBuilder, RemovalListener, RemovalNotification} import com.webank.wedatasphere.linkis.common.exception.WarnException import com.webank.wedatasphere.linkis.common.utils.Logging import com.webank.wedatasphere.linkis.protocol.CacheableProtocol import com.webank.wedatasphere.linkis.rpc.interceptor.{RPCInterceptor, RPCInterceptorChain, RPCInterceptorExchange} import org.springframework.stereotype.Component @Component class CacheableRPCInterceptor extends RPCInterceptor with Logging{ private val guavaCache: Cache[Any, Any] = CacheBuilder.newBuilder().concurrencyLevel(5) .expireAfterAccess(120000, TimeUnit.MILLISECONDS).initialCapacity(20) //TODO Make parameters(做成参数) .maximumSize(1000).recordStats().removalListener(new RemovalListener[Any, Any] { override def onRemoval(removalNotification: RemovalNotification[Any, Any]): Unit = { debug(s"CacheSender removed key => ${removalNotification.getKey}, value => ${removalNotification.getValue}.") } }).asInstanceOf[CacheBuilder[Any, Any]].build() override val order: Int = 10 override def intercept(interceptorExchange: RPCInterceptorExchange, chain: RPCInterceptorChain): Any = interceptorExchange.getProtocol match { case cacheable: CacheableProtocol => guavaCache.get(cacheable.toString, new Callable[Any] { override def call(): Any = { val returnMsg = chain.handle(interceptorExchange) returnMsg match { case warn: WarnException => throw warn case _ => returnMsg } } }) case _ => chain.handle(interceptorExchange) } }
Example 29
Source File: Blocking.scala From scala-concurrency-playground with MIT License | 5 votes |
package org.zalando.benchmarks import java.util.concurrent.{Callable, Executors} import akka.actor.ActorSystem import scala.concurrent.Await import scala.concurrent.duration._ class Blocking(system: ActorSystem) { import ComputationFollowedByAsyncPublishing._ def benchmark(coreFactor: Int): Unit = { // let's do this Ye Olde Schoole Way val exec = Executors newFixedThreadPool numWorkers(coreFactor) try { val futures = 1 to numTasks map Job map { job => exec.submit(new Callable[PublishResult] { // explicitly turn async publishing operation into a blocking operation override def call(): PublishResult = Await.result(Publisher publish (Computer compute job, system), 1 hour) }) } printResult(futures map (_.get)) } finally { // never forget exec.shutdown() } } }
Example 30
Source File: ParallelExecutor.scala From nyaya with GNU Lesser General Public License v2.1 | 5 votes |
package nyaya.test import java.util.concurrent.{Callable, ExecutorService, Executors, Future, TimeUnit} import java.util.concurrent.atomic.AtomicInteger import nyaya.gen.ThreadNumber import nyaya.prop.Prop import ParallelExecutor._ import PTest._ import Executor.{DataCtx, Data} // TODO data SampleSize = TotalSamples(n) | Fn(qty|%, gensize|%) | PerWorker(sampleSize) object ParallelExecutor { val defaultThreadCount = 1.max(Runtime.getRuntime.availableProcessors - 1) def merge[A](a: RunState[A], b: RunState[A]): RunState[A] = { val runs = a.runs max b.runs (a.success, b.success) match { case (false, true) => RunState(runs, a.result) case _ => RunState(runs, b.result) } } } case class ParallelExecutor(workers: Int = defaultThreadCount) extends Executor { val debugPrefixes = (0 until workers).toVector.map(i => s"Worker #$i: ") override def run[A](p: Prop[A], g: Data[A], S: Settings): RunState[A] = { val sss = { var rem = S.sampleSize.value var i = workers var v = Vector.empty[SampleSize] while(i > 0) { val p = rem / i v :+= SampleSize(p) rem -= p i -= 1 } v } if (S.debug) { val szs = sss.map(_.value) println(s"Samples/Worker: ${szs.mkString("{", ",", "}")} = Σ${szs.sum}") } val ai = new AtomicInteger(0) def task(worker: Int) = mkTask { val dp = debugPrefixes(worker) val data = g(DataCtx(sss(worker), ThreadNumber(worker), S.seed, dp)) testN(p, data, () => ai.incrementAndGet(), S) } runAsync2(workers, task) } override def prove[A](p: Prop[A], d: Domain[A], S: Settings): RunState[A] = { val threads = workers min d.size val ai = new AtomicInteger(0) def task(worker: Int) = mkTask { proveN(p, d, worker, threads, _ => ai.incrementAndGet, S) } runAsync2(threads, task) } private[this] def mkTask[A](f: => RunState[A]) = new Callable[RunState[A]] { override def call(): RunState[A] = f } private[this] def runAsync2[A](threads: Int, f: Int => Callable[RunState[A]]): RunState[A] = runAsync(es => (0 until threads).toList.map(es submit f(_))) private[this] def runAsync[A](start: ExecutorService => List[Future[RunState[A]]]): RunState[A] = { val es: ExecutorService = Executors.newFixedThreadPool(workers) val fs = start(es) es.shutdown() val rss = fs.map(_.get()) es.awaitTermination(1, TimeUnit.MINUTES) rss.foldLeft(RunState.empty[A])(merge) } }