java.util.concurrent.locks.ReentrantLock Scala Examples
The following examples show how to use java.util.concurrent.locks.ReentrantLock.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: ContextWaiter.scala From drizzle-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.streaming import java.util.concurrent.TimeUnit import java.util.concurrent.locks.ReentrantLock private[streaming] class ContextWaiter { private val lock = new ReentrantLock() private val condition = lock.newCondition() // Guarded by "lock" private var error: Throwable = null // Guarded by "lock" private var stopped: Boolean = false def notifyError(e: Throwable): Unit = { lock.lock() try { error = e condition.signalAll() } finally { lock.unlock() } } def notifyStop(): Unit = { lock.lock() try { stopped = true condition.signalAll() } finally { lock.unlock() } } def waitForStopOrError(timeout: Long = -1): Boolean = { lock.lock() try { if (timeout < 0) { while (!stopped && error == null) { condition.await() } } else { var nanos = TimeUnit.MILLISECONDS.toNanos(timeout) while (!stopped && error == null && nanos > 0) { nanos = condition.awaitNanos(nanos) } } // If already had error, then throw it if (error != null) throw error // already stopped or timeout stopped } finally { lock.unlock() } } }
Example 2
Source File: Guard.scala From airframe with Apache License 2.0 | 5 votes |
package wvlet.log import java.util.concurrent.locks.ReentrantLock trait Guard { private[this] val lock = new ReentrantLock() protected def newCondition = lock.newCondition() def guard[U](body: => U): U = { lock.lockInterruptibly() try { body } finally { lock.unlock() } } }
Example 3
Source File: LineBufferedStream.scala From incubator-livy with Apache License 2.0 | 5 votes |
package org.apache.livy.utils import java.io.InputStream import java.util import java.util.concurrent.locks.ReentrantLock import scala.io.Source import org.apache.livy.Logging class CircularQueue[T](var capacity: Int) extends util.LinkedList[T] { override def add(t: T): Boolean = { if (size >= capacity) removeFirst super.add(t) } } class LineBufferedStream(inputStream: InputStream, logSize: Int) extends Logging { private[this] val _lines: CircularQueue[String] = new CircularQueue[String](logSize) private[this] val _lock = new ReentrantLock() private[this] val _condition = _lock.newCondition() private[this] var _finished = false private val thread = new Thread { override def run() = { val lines = Source.fromInputStream(inputStream).getLines() for (line <- lines) { info(line) _lock.lock() try { _lines.add(line) _condition.signalAll() } finally { _lock.unlock() } } _lock.lock() try { _finished = true _condition.signalAll() } finally { _lock.unlock() } } } thread.setDaemon(true) thread.start() def lines: IndexedSeq[String] = { _lock.lock() val lines = IndexedSeq.empty[String] ++ _lines.toArray(Array.empty[String]) _lock.unlock() lines } def iterator: Iterator[String] = { new LinesIterator } def waitUntilClose(): Unit = thread.join() private class LinesIterator extends Iterator[String] { override def hasNext: Boolean = { if (_lines.size > 0) { true } else { // Otherwise we might still have more data. _lock.lock() try { if (_finished) { false } else { _condition.await() _lines.size > 0 } } finally { _lock.unlock() } } } override def next(): String = { _lock.lock() val line = _lines.poll() _lock.unlock() line } } }
Example 4
Source File: ContextWaiter.scala From sparkoscope with Apache License 2.0 | 5 votes |
package org.apache.spark.streaming import java.util.concurrent.TimeUnit import java.util.concurrent.locks.ReentrantLock private[streaming] class ContextWaiter { private val lock = new ReentrantLock() private val condition = lock.newCondition() // Guarded by "lock" private var error: Throwable = null // Guarded by "lock" private var stopped: Boolean = false def notifyError(e: Throwable): Unit = { lock.lock() try { error = e condition.signalAll() } finally { lock.unlock() } } def notifyStop(): Unit = { lock.lock() try { stopped = true condition.signalAll() } finally { lock.unlock() } } def waitForStopOrError(timeout: Long = -1): Boolean = { lock.lock() try { if (timeout < 0) { while (!stopped && error == null) { condition.await() } } else { var nanos = TimeUnit.MILLISECONDS.toNanos(timeout) while (!stopped && error == null && nanos > 0) { nanos = condition.awaitNanos(nanos) } } // If already had error, then throw it if (error != null) throw error // already stopped or timeout stopped } finally { lock.unlock() } } }
Example 5
Source File: AkkaBlockingConnection.scala From scredis with Apache License 2.0 | 5 votes |
package scredis.io import java.util.concurrent.locks.ReentrantLock import akka.actor._ import scredis.exceptions._ import scredis.protocol._ import scredis.util.UniqueNameGenerator import scala.concurrent.Await import scala.concurrent.duration._ import scala.util.Try abstract class AkkaBlockingConnection( system: ActorSystem, host: String, port: Int, passwordOpt: Option[String], database: Int, nameOpt: Option[String], decodersCount: Int, connectTimeout: FiniteDuration, maxWriteBatchSize: Int, tcpSendBufferSizeHint: Int, tcpReceiveBufferSizeHint: Int, akkaListenerDispatcherPath: String, akkaIODispatcherPath: String, akkaDecoderDispatcherPath: String, failCommandOnConnecting:Boolean ) extends AbstractAkkaConnection( system = system, host = host, port = port, passwordOpt = passwordOpt, database = database, nameOpt = nameOpt, decodersCount = decodersCount, receiveTimeoutOpt = None, connectTimeout = connectTimeout, maxWriteBatchSize = maxWriteBatchSize, tcpSendBufferSizeHint = tcpSendBufferSizeHint, tcpReceiveBufferSizeHint = tcpReceiveBufferSizeHint, akkaListenerDispatcherPath = akkaListenerDispatcherPath, akkaIODispatcherPath = akkaIODispatcherPath, akkaDecoderDispatcherPath = akkaDecoderDispatcherPath ) with BlockingConnection { private val lock = new ReentrantLock() protected val listenerActor = system.actorOf( Props( classOf[ListenerActor], host, port, passwordOpt, database, nameOpt, decodersCount, receiveTimeoutOpt, connectTimeout, maxWriteBatchSize, tcpSendBufferSizeHint, tcpReceiveBufferSizeHint, akkaIODispatcherPath, akkaDecoderDispatcherPath, failCommandOnConnecting ).withDispatcher(akkaListenerDispatcherPath), UniqueNameGenerator.getUniqueName(s"${nameOpt.getOrElse(s"$host-$port")}-listener-actor") ) private def withLock[A](f: => A): A = { if (lock.tryLock) { try { f } finally { lock.unlock() } } else { throw RedisIOException("Trying to send request on a blocked connection") } } override protected[scredis] def sendBlocking[A](request: Request[A])( implicit timeout: Duration ): Try[A] = withLock { logger.debug(s"Sending blocking request: $request") updateState(request) val future = Protocol.send(request, listenerActor) Try(Await.result(future, timeout)) } }
Example 6
Source File: ContextWaiter.scala From multi-tenancy-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.streaming import java.util.concurrent.TimeUnit import java.util.concurrent.locks.ReentrantLock private[streaming] class ContextWaiter { private val lock = new ReentrantLock() private val condition = lock.newCondition() // Guarded by "lock" private var error: Throwable = null // Guarded by "lock" private var stopped: Boolean = false def notifyError(e: Throwable): Unit = { lock.lock() try { error = e condition.signalAll() } finally { lock.unlock() } } def notifyStop(): Unit = { lock.lock() try { stopped = true condition.signalAll() } finally { lock.unlock() } } def waitForStopOrError(timeout: Long = -1): Boolean = { lock.lock() try { if (timeout < 0) { while (!stopped && error == null) { condition.await() } } else { var nanos = TimeUnit.MILLISECONDS.toNanos(timeout) while (!stopped && error == null && nanos > 0) { nanos = condition.awaitNanos(nanos) } } // If already had error, then throw it if (error != null) throw error // already stopped or timeout stopped } finally { lock.unlock() } } }
Example 7
Source File: ContextWaiter.scala From iolap with Apache License 2.0 | 5 votes |
package org.apache.spark.streaming import java.util.concurrent.TimeUnit import java.util.concurrent.locks.ReentrantLock private[streaming] class ContextWaiter { private val lock = new ReentrantLock() private val condition = lock.newCondition() // Guarded by "lock" private var error: Throwable = null // Guarded by "lock" private var stopped: Boolean = false def notifyError(e: Throwable): Unit = { lock.lock() try { error = e condition.signalAll() } finally { lock.unlock() } } def notifyStop(): Unit = { lock.lock() try { stopped = true condition.signalAll() } finally { lock.unlock() } } def waitForStopOrError(timeout: Long = -1): Boolean = { lock.lock() try { if (timeout < 0) { while (!stopped && error == null) { condition.await() } } else { var nanos = TimeUnit.MILLISECONDS.toNanos(timeout) while (!stopped && error == null && nanos > 0) { nanos = condition.awaitNanos(nanos) } } // If already had error, then throw it if (error != null) throw error // already stopped or timeout stopped } finally { lock.unlock() } } }
Example 8
Source File: ContextWaiter.scala From spark1.52 with Apache License 2.0 | 5 votes |
package org.apache.spark.streaming import java.util.concurrent.TimeUnit import java.util.concurrent.locks.ReentrantLock private[streaming] class ContextWaiter { private val lock = new ReentrantLock() private val condition = lock.newCondition() // Guarded by "lock" //由“锁”守护 private var error: Throwable = null // Guarded by "lock" //由“锁”守护 private var stopped: Boolean = false def notifyError(e: Throwable): Unit = { lock.lock() try { error = e condition.signalAll() } finally { lock.unlock() } } def notifyStop(): Unit = { lock.lock() try { stopped = true condition.signalAll() } finally { lock.unlock() } } def waitForStopOrError(timeout: Long = -1): Boolean = { lock.lock() try { if (timeout < 0) { while (!stopped && error == null) { condition.await() } } else { var nanos = TimeUnit.MILLISECONDS.toNanos(timeout) while (!stopped && error == null && nanos > 0) { nanos = condition.awaitNanos(nanos) } } // If already had error, then throw it //如果已经有错误,则抛出它 if (error != null) throw error // already stopped or timeout //已经停止或暂停 stopped } finally { lock.unlock() } } }
Example 9
Source File: ContextWaiter.scala From Spark-2.3.1 with Apache License 2.0 | 5 votes |
package org.apache.spark.streaming import java.util.concurrent.TimeUnit import java.util.concurrent.locks.ReentrantLock private[streaming] class ContextWaiter { private val lock = new ReentrantLock() private val condition = lock.newCondition() // Guarded by "lock" private var error: Throwable = null // Guarded by "lock" private var stopped: Boolean = false def notifyError(e: Throwable): Unit = { lock.lock() try { error = e condition.signalAll() } finally { lock.unlock() } } def notifyStop(): Unit = { lock.lock() try { stopped = true condition.signalAll() } finally { lock.unlock() } } def waitForStopOrError(timeout: Long = -1): Boolean = { lock.lock() try { if (timeout < 0) { while (!stopped && error == null) { condition.await() } } else { var nanos = TimeUnit.MILLISECONDS.toNanos(timeout) while (!stopped && error == null && nanos > 0) { nanos = condition.awaitNanos(nanos) } } // If already had error, then throw it if (error != null) throw error // already stopped or timeout stopped } finally { lock.unlock() } } }
Example 10
Source File: ContextWaiter.scala From BigDatalog with Apache License 2.0 | 5 votes |
package org.apache.spark.streaming import java.util.concurrent.TimeUnit import java.util.concurrent.locks.ReentrantLock private[streaming] class ContextWaiter { private val lock = new ReentrantLock() private val condition = lock.newCondition() // Guarded by "lock" private var error: Throwable = null // Guarded by "lock" private var stopped: Boolean = false def notifyError(e: Throwable): Unit = { lock.lock() try { error = e condition.signalAll() } finally { lock.unlock() } } def notifyStop(): Unit = { lock.lock() try { stopped = true condition.signalAll() } finally { lock.unlock() } } def waitForStopOrError(timeout: Long = -1): Boolean = { lock.lock() try { if (timeout < 0) { while (!stopped && error == null) { condition.await() } } else { var nanos = TimeUnit.MILLISECONDS.toNanos(timeout) while (!stopped && error == null && nanos > 0) { nanos = condition.awaitNanos(nanos) } } // If already had error, then throw it if (error != null) throw error // already stopped or timeout stopped } finally { lock.unlock() } } }
Example 11
Source File: PerTestSparkSession.scala From Spark-RSVD with Apache License 2.0 | 5 votes |
package com.criteo.rsvd import java.io.File import java.nio.file.{Files, Path} import java.util.concurrent.locks.ReentrantLock import org.apache.commons.io.FileUtils import org.apache.spark.SparkContext import org.apache.spark.rdd.RDD import org.apache.spark.sql.{SQLContext, SparkSession} import org.scalatest.{BeforeAndAfterEach, Suite} import scala.reflect.ClassTag import scala.util.control.NonFatal object LocalSparkSession { private[this] val lock = new ReentrantLock() def acquire(): Unit = lock.lock() def release(): Unit = lock.unlock() def builder: SparkSession.Builder = { SparkSession .builder() .master("local[*]") .appName("test") .config("spark.ui.enabled", false) } } def sparkConf: Map[String, Any] = Map() def toRDD[T: ClassTag](input: Seq[T]): RDD[T] = sc.parallelize(input) def toArray[T](input: RDD[T]): Array[T] = input.collect() protected def closeSession() = { currentSession.foreach(_.stop()) currentSession = None try { checkpointDir.foreach(path => FileUtils.deleteDirectory(new File(path.toString))) } catch { case NonFatal(_) => } checkpointDir = None LocalSparkSession.release() } private def getOrCreateSession = synchronized { if (currentSession.isEmpty) { val builder = LocalSparkSession.builder for ((key, value) <- sparkConf) { builder.config(key, value.toString) } currentSession = Some(builder.getOrCreate()) checkpointDir = Some(Files.createTempDirectory("spark-unit-test-checkpoint-")) currentSession.get.sparkContext .setCheckpointDir(checkpointDir.get.toString) currentSession.get.sparkContext.setLogLevel("WARN") } currentSession.get } override def beforeEach(): Unit = { LocalSparkSession.acquire() super.beforeEach() } override def afterEach(): Unit = { try { super.afterEach() } finally { closeSession() } } }