java.util.concurrent.ConcurrentLinkedQueue Scala Examples

The following examples show how to use java.util.concurrent.ConcurrentLinkedQueue. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: Schedulable.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler

import java.util.concurrent.ConcurrentLinkedQueue

import scala.collection.mutable.ArrayBuffer

import org.apache.spark.scheduler.SchedulingMode.SchedulingMode


private[spark] trait Schedulable {
  var parent: Pool
  // child queues
  def schedulableQueue: ConcurrentLinkedQueue[Schedulable]
  def schedulingMode: SchedulingMode
  def weight: Int
  def minShare: Int
  def runningTasks: Int
  def priority: Int
  def stageId: Int
  def name: String

  def addSchedulable(schedulable: Schedulable): Unit
  def removeSchedulable(schedulable: Schedulable): Unit
  def getSchedulableByName(name: String): Schedulable
  def executorLost(executorId: String, host: String): Unit
  def checkSpeculatableTasks(): Boolean
  def getSortedTaskSetQueue: ArrayBuffer[TaskSetManager]
} 
Example 2
Source File: DataGenerator.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.behavioral.null_object

import java.util.concurrent.ConcurrentLinkedQueue

import scala.util.Random

class DataGenerator extends Runnable {

  val MAX_VAL = 10
  val MAX_TIME = 10000
  
  private var isStop = false
  
  private val queue: ConcurrentLinkedQueue[Int] = new ConcurrentLinkedQueue[Int]()
  
  override def run(): Unit = {
    val random = new Random()
    while (!isStop) {
      Thread.sleep(random.nextInt(MAX_TIME))
      queue.add(random.nextInt(MAX_VAL))
    }
  }
  
  def getMessage(): Option[Message] =
    Option(queue.poll()).map {
      case number => Message(number)
    }

  def requestStop(): Unit = this.synchronized {
    isStop = true
  }
} 
Example 3
Source File: DataGenerator.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.behavioral.null_object

import java.util.concurrent.ConcurrentLinkedQueue

import scala.util.Random

class DataGenerator extends Runnable {

  val MAX_VAL = 10
  val MAX_TIME = 10000
  
  private var isStop = false
  
  private val queue: ConcurrentLinkedQueue[Int] = new ConcurrentLinkedQueue[Int]()
  
  override def run(): Unit = {
    val random = new Random()
    while (!isStop) {
      Thread.sleep(random.nextInt(MAX_TIME))
      queue.add(random.nextInt(MAX_VAL))
    }
  }
  
  def getMessage(): Option[Message] =
    Option(queue.poll()).map {
      case number => Message(number)
    }

  def requestStop(): Unit = this.synchronized {
    isStop = true
  }
} 
Example 4
Source File: CommandExecutor.scala    From renku   with Apache License 2.0 5 votes vote down vote up
package ch.renku.acceptancetests.tooling.console

import java.io.{File, InputStream}
import java.nio.file.Path
import java.util
import java.util.concurrent.ConcurrentLinkedQueue

import cats.effect.IO
import cats.implicits._
import ch.renku.acceptancetests.model.users.UserCredentials
import ch.renku.acceptancetests.tooling.TestLogger.logger
import ch.renku.acceptancetests.tooling.console.Command.UserInput

import scala.jdk.CollectionConverters._
import scala.language.postfixOps
import scala.sys.process._

private class CommandExecutor(command: Command) {

  def execute(implicit workPath: Path, userCredentials: UserCredentials): String = {

    implicit val output: util.Collection[String] = new ConcurrentLinkedQueue[String]()

    IO {
      executeCommand
      output.asString
    } recoverWith consoleException
  }.unsafeRunSync()

  def safeExecute(implicit workPath: Path, userCredentials: UserCredentials): String = {
    implicit val output: util.Collection[String] = new ConcurrentLinkedQueue[String]()

    IO {
      executeCommand
      output.asString
    } recover outputAsString
  }.unsafeRunSync()

  private def executeCommand(implicit workPath: Path,
                             output:            util.Collection[String],
                             userCredentials:   UserCredentials): Unit =
    command.userInputs.foldLeft(buildProcess) { (process, userInput) =>
      process #< userInput.asStream
    } lazyLines ProcessLogger(logLine _) foreach logLine

  private def buildProcess(implicit workPath: Path) =
    command.maybeFileName.foldLeft(Process(command.toString.stripMargin, workPath.toFile)) { (process, fileName) =>
      process #>> new File(workPath.toUri resolve fileName.value)
    }

  private def logLine(
      line:          String
  )(implicit output: util.Collection[String], userCredentials: UserCredentials): Unit = line.trim match {
    case "" => ()
    case line =>
      val obfuscatedLine = line.replace(userCredentials.password.value, "###")
      output add obfuscatedLine
      logger debug obfuscatedLine
  }

  private def consoleException(implicit output: util.Collection[String]): PartialFunction[Throwable, IO[String]] = {
    case _ =>
      ConsoleException {
        s"$command failed with:\n${output.asString}"
      }.raiseError[IO, String]
  }

  private def outputAsString(implicit output: util.Collection[String]): PartialFunction[Throwable, String] = {
    case _ => output.asString
  }

  private implicit class OutputOps(output: util.Collection[String]) {
    lazy val asString: String = output.asScala.mkString("\n")
  }

  private implicit class UserInputOps(userInput: UserInput) {
    import java.nio.charset.StandardCharsets.UTF_8

    lazy val asStream: InputStream = new java.io.ByteArrayInputStream(
      userInput.value.getBytes(UTF_8.name)
    )
  }
} 
Example 5
Source File: AkkaStreamSuite.scala    From bahir   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.akka

import java.util.concurrent.ConcurrentLinkedQueue

import scala.collection.JavaConverters._
import scala.concurrent.Await
import scala.concurrent.duration._

import akka.actor._
import com.typesafe.config.ConfigFactory
import org.scalatest.BeforeAndAfter
import org.scalatest.concurrent.Eventually

import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.streaming.{Milliseconds, StreamingContext}

class AkkaStreamSuite extends SparkFunSuite with Eventually with BeforeAndAfter {

  private var ssc: StreamingContext = _

  private var actorSystem: ActorSystem = _

  after {
    if (ssc != null) {
      ssc.stop()
      ssc = null
    }
    if (actorSystem != null) {
      Await.ready(actorSystem.terminate(), 30.seconds)
      actorSystem = null
    }
  }

  test("actor input stream") {
    val sparkConf = new SparkConf().setMaster("local[4]").setAppName(this.getClass.getSimpleName)
    ssc = new StreamingContext(sparkConf, Milliseconds(500))

    // we set the TCP port to "0" to have the port chosen automatically for the Feeder actor and
    // the Receiver actor will "pick it up" from the Feeder URI when it subscribes to the Feeder
    // actor (http://doc.akka.io/docs/akka/2.3.11/scala/remoting.html)
    val akkaConf = ConfigFactory.parseMap(
      Map(
        "akka.actor.provider" -> "akka.remote.RemoteActorRefProvider",
        "akka.remote.netty.tcp.transport-class" -> "akka.remote.transport.netty.NettyTransport",
        "akka.remote.netty.tcp.port" -> "0").
        asJava)
    actorSystem = ActorSystem("test", akkaConf)
    actorSystem.actorOf(Props(classOf[FeederActor]), "FeederActor")
    val feederUri =
      actorSystem.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress + "/user/FeederActor"

    val actorStream =
      AkkaUtils.createStream[String](ssc, Props(classOf[TestActorReceiver], feederUri),
        "TestActorReceiver")
    val result = new ConcurrentLinkedQueue[String]
    actorStream.foreachRDD { rdd =>
      rdd.collect().foreach(result.add)
    }
    ssc.start()

    eventually(timeout(10.seconds), interval(10.milliseconds)) {
      assert((1 to 10).map(_.toString) === result.asScala.toList)
    }
  }
}

case class SubscribeReceiver(receiverActor: ActorRef)

class FeederActor extends Actor {

  def receive: Receive = {
    case SubscribeReceiver(receiverActor: ActorRef) =>
      (1 to 10).foreach(i => receiverActor ! i.toString())
  }
}

class TestActorReceiver(uriOfPublisher: String) extends ActorReceiver {

  lazy private val remotePublisher = context.actorSelection(uriOfPublisher)

  override def preStart(): Unit = {
    remotePublisher ! SubscribeReceiver(self)
  }

  def receive: PartialFunction[Any, Unit] = {
    case msg: String => store(msg)
  }

} 
Example 6
Source File: TestOutputStream.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming

import java.io.{IOException, ObjectInputStream}
import java.util.concurrent.ConcurrentLinkedQueue

import scala.reflect.ClassTag

import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.{DStream, ForEachDStream}
import org.apache.spark.util.Utils


class TestOutputStream[T: ClassTag](parent: DStream[T],
    val output: ConcurrentLinkedQueue[Seq[T]] = new ConcurrentLinkedQueue[Seq[T]]())
  extends ForEachDStream[T](parent, (rdd: RDD[T], t: Time) => {
    val collected = rdd.collect()
    output.add(collected)
  }, false) {

  // This is to clear the output buffer every it is read from a checkpoint
  @throws(classOf[IOException])
  private def readObject(ois: ObjectInputStream): Unit = Utils.tryOrIOException {
    ois.defaultReadObject()
    output.clear()
  }
} 
Example 7
Source File: FlumeStreamSuite.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.flume

import java.util.concurrent.ConcurrentLinkedQueue

import scala.collection.JavaConverters._
import scala.concurrent.duration._
import scala.language.postfixOps

import org.jboss.netty.channel.ChannelPipeline
import org.jboss.netty.channel.socket.SocketChannel
import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory
import org.jboss.netty.handler.codec.compression._
import org.scalatest.{BeforeAndAfter, Matchers}
import org.scalatest.concurrent.Eventually._

import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.internal.Logging
import org.apache.spark.network.util.JavaUtils
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.{Milliseconds, StreamingContext, TestOutputStream}

class FlumeStreamSuite extends SparkFunSuite with BeforeAndAfter with Matchers with Logging {
  val conf = new SparkConf().setMaster("local[4]").setAppName("FlumeStreamSuite")
  var ssc: StreamingContext = null

  test("flume input stream") {
    testFlumeStream(testCompression = false)
  }

  test("flume input compressed stream") {
    testFlumeStream(testCompression = true)
  }

  
  private class CompressionChannelFactory(compressionLevel: Int)
    extends NioClientSocketChannelFactory {

    override def newChannel(pipeline: ChannelPipeline): SocketChannel = {
      val encoder = new ZlibEncoder(compressionLevel)
      pipeline.addFirst("deflater", encoder)
      pipeline.addFirst("inflater", new ZlibDecoder())
      super.newChannel(pipeline)
    }
  }
} 
Example 8
Source File: RecurringTimerSuite.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.util

import java.util.concurrent.ConcurrentLinkedQueue

import scala.collection.JavaConverters._
import scala.concurrent.duration._

import org.scalatest.PrivateMethodTester
import org.scalatest.concurrent.Eventually._

import org.apache.spark.SparkFunSuite
import org.apache.spark.util.ManualClock

class RecurringTimerSuite extends SparkFunSuite with PrivateMethodTester {

  test("basic") {
    val clock = new ManualClock()
    val results = new ConcurrentLinkedQueue[Long]()
    val timer = new RecurringTimer(clock, 100, time => {
      results.add(time)
    }, "RecurringTimerSuite-basic")
    timer.start(0)
    eventually(timeout(10.seconds), interval(10.millis)) {
      assert(results.asScala.toSeq === Seq(0L))
    }
    clock.advance(100)
    eventually(timeout(10.seconds), interval(10.millis)) {
      assert(results.asScala.toSeq === Seq(0L, 100L))
    }
    clock.advance(200)
    eventually(timeout(10.seconds), interval(10.millis)) {
      assert(results.asScala.toSeq === Seq(0L, 100L, 200L, 300L))
    }
    assert(timer.stop(interruptTimer = true) === 300L)
  }

  test("SPARK-10224: call 'callback' after stopping") {
    val clock = new ManualClock()
    val results = new ConcurrentLinkedQueue[Long]
    val timer = new RecurringTimer(clock, 100, time => {
      results.add(time)
    }, "RecurringTimerSuite-SPARK-10224")
    timer.start(0)
    eventually(timeout(10.seconds), interval(10.millis)) {
      assert(results.asScala.toSeq === Seq(0L))
    }
    @volatile var lastTime = -1L
    // Now RecurringTimer is waiting for the next interval
    val thread = new Thread {
      override def run(): Unit = {
        lastTime = timer.stop(interruptTimer = false)
      }
    }
    thread.start()
    val stopped = PrivateMethod[RecurringTimer]('stopped)
    // Make sure the `stopped` field has been changed
    eventually(timeout(10.seconds), interval(10.millis)) {
      assert(timer.invokePrivate(stopped()) === true)
    }
    clock.advance(200)
    // When RecurringTimer is awake from clock.waitTillTime, it will call `callback` once.
    // Then it will find `stopped` is true and exit the loop, but it should call `callback` again
    // before exiting its internal thread.
    thread.join()
    assert(results.asScala.toSeq === Seq(0L, 100L, 200L))
    assert(lastTime === 200L)
  }
} 
Example 9
Source File: Schedulable.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler

import java.util.concurrent.ConcurrentLinkedQueue

import scala.collection.mutable.ArrayBuffer

import org.apache.spark.scheduler.SchedulingMode.SchedulingMode


private[spark] trait Schedulable {
  var parent: Pool
  // child queues
  def schedulableQueue: ConcurrentLinkedQueue[Schedulable]
  def schedulingMode: SchedulingMode
  def weight: Int
  def minShare: Int
  def runningTasks: Int
  def priority: Int
  def stageId: Int
  def name: String

  def addSchedulable(schedulable: Schedulable): Unit
  def removeSchedulable(schedulable: Schedulable): Unit
  def getSchedulableByName(name: String): Schedulable
  def executorLost(executorId: String, host: String, reason: ExecutorLossReason): Unit
  def checkSpeculatableTasks(minTimeToSpeculation: Int): Boolean
  def getSortedTaskSetQueue: ArrayBuffer[TaskSetManager]
} 
Example 10
Source File: Schedulable.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler

import java.util.concurrent.ConcurrentLinkedQueue

import scala.collection.mutable.ArrayBuffer

import org.apache.spark.scheduler.SchedulingMode.SchedulingMode


private[spark] trait Schedulable {
  var parent: Pool
  // child queues
  def schedulableQueue: ConcurrentLinkedQueue[Schedulable]
  def schedulingMode: SchedulingMode
  def weight: Int
  def minShare: Int
  def runningTasks: Int
  def priority: Int
  def stageId: Int
  def name: String

  def addSchedulable(schedulable: Schedulable): Unit
  def removeSchedulable(schedulable: Schedulable): Unit
  def getSchedulableByName(name: String): Schedulable
  def executorLost(executorId: String, host: String): Unit
  def checkSpeculatableTasks(): Boolean
  def getSortedTaskSetQueue: ArrayBuffer[TaskSetManager]
} 
Example 11
Source File: QueryResultTypeJdbcSpec.scala    From quill   with Apache License 2.0 5 votes vote down vote up
package io.getquill.context.jdbc.oracle

import java.util.concurrent.ConcurrentLinkedQueue

import io.getquill.context.sql.{ testContext => _, _ }

import scala.jdk.CollectionConverters._

class QueryResultTypeJdbcSpec extends QueryResultTypeSpec {

  override val context = testContext
  import context._

  def await[T](r: T) = r

  val insertedProducts = new ConcurrentLinkedQueue[Product]

  override def beforeAll = {
    context.run(deleteAll)
    val ids = context.run(liftQuery(productEntries).foreach(p => productInsert(p)))
    val inserted = (ids zip productEntries).map {
      case (id, prod) => prod.copy(id = id)
    }
    insertedProducts.addAll(inserted.asJava)
    ()
  }

  def products = insertedProducts.asScala.toList

  "return list" - {
    "select" in {
      await(context.run(selectAll)) must contain theSameElementsAs (products)
    }
    "map" in {
      await(context.run(map)) must contain theSameElementsAs (products.map(_.id))
    }
    "sortBy" in {
      await(context.run(sortBy)) must contain theSameElementsInOrderAs (products)
    }
    "take" in {
      await(context.run(take)) must contain theSameElementsAs (products)
    }
    "drop" in {
      await(context.run(drop)) must contain theSameElementsAs (products.drop(1))
    }
    "++" in {
      await(context.run(`++`)) must contain theSameElementsAs (products ++ products)
    }
    "unionAll" in {
      await(context.run(unionAll)) must contain theSameElementsAs (products ++ products)
    }
    "union" in {
      await(context.run(union)) must contain theSameElementsAs (products)
    }
    "join" in {
      await(context.run(join)) must contain theSameElementsAs (products zip products)
    }
    "distinct" in {
      await(context.run(distinct)) must contain theSameElementsAs (products.map(_.id).distinct)
    }
  }

  "return single result" - {
    "min" - {
      "some" in {
        await(context.run(minExists)) mustEqual Some(products.map(_.sku).min)
      }
      "none" in {
        await(context.run(minNonExists)) mustBe None
      }
    }
    "max" - {
      "some" in {
        await(context.run(maxExists)) mustBe Some(products.map(_.sku).max)
      }
      "none" in {
        await(context.run(maxNonExists)) mustBe None
      }
    }
    "avg" - {
      "some" in {
        await(context.run(avgExists)) mustBe Some(BigDecimal(products.map(_.sku).sum) / products.size)
      }
      "none" in {
        await(context.run(avgNonExists)) mustBe None
      }
    }
    "size" in {
      await(context.run(productSize)) mustEqual products.size
    }
    "parametrized size" in {
      await(context.run(parametrizedSize(lift(10000)))) mustEqual 0
    }
  }
} 
Example 12
Source File: DataGenerator.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.behavioral.null_object

import java.util.concurrent.ConcurrentLinkedQueue

import scala.util.Random

class DataGenerator extends Runnable {

  val MAX_VAL = 10
  val MAX_TIME = 10000
  
  private var isStop = false
  
  private val queue: ConcurrentLinkedQueue[Int] = new ConcurrentLinkedQueue[Int]()
  
  override def run(): Unit = {
    val random = new Random()
    while (!isStop) {
      Thread.sleep(random.nextInt(MAX_TIME))
      queue.add(random.nextInt(MAX_VAL))
    }
  }
  
  def getMessage(): Option[Message] =
    Option(queue.poll()).map {
      case number => Message(number)
    }

  def requestStop(): Unit = this.synchronized {
    isStop = true
  }
} 
Example 13
Source File: TestOutputStream.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming

import java.io.{IOException, ObjectInputStream}
import java.util.concurrent.ConcurrentLinkedQueue

import scala.reflect.ClassTag

import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.{DStream, ForEachDStream}
import org.apache.spark.util.Utils


class TestOutputStream[T: ClassTag](parent: DStream[T],
    val output: ConcurrentLinkedQueue[Seq[T]] = new ConcurrentLinkedQueue[Seq[T]]())
  extends ForEachDStream[T](parent, (rdd: RDD[T], t: Time) => {
    val collected = rdd.collect()
    output.add(collected)
  }, false) {

  // This is to clear the output buffer every it is read from a checkpoint
  @throws(classOf[IOException])
  private def readObject(ois: ObjectInputStream): Unit = Utils.tryOrIOException {
    ois.defaultReadObject()
    output.clear()
  }
} 
Example 14
Source File: FlumeStreamSuite.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.flume

import java.util.concurrent.ConcurrentLinkedQueue

import scala.collection.JavaConverters._
import scala.concurrent.duration._
import scala.language.postfixOps

import org.jboss.netty.channel.ChannelPipeline
import org.jboss.netty.channel.socket.SocketChannel
import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory
import org.jboss.netty.handler.codec.compression._
import org.scalatest.{BeforeAndAfter, Matchers}
import org.scalatest.concurrent.Eventually._

import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.internal.Logging
import org.apache.spark.network.util.JavaUtils
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.{Milliseconds, StreamingContext, TestOutputStream}

class FlumeStreamSuite extends SparkFunSuite with BeforeAndAfter with Matchers with Logging {
  val conf = new SparkConf().setMaster("local[4]").setAppName("FlumeStreamSuite")
  var ssc: StreamingContext = null

  test("flume input stream") {
    testFlumeStream(testCompression = false)
  }

  test("flume input compressed stream") {
    testFlumeStream(testCompression = true)
  }

  
  private class CompressionChannelFactory(compressionLevel: Int)
    extends NioClientSocketChannelFactory {

    override def newChannel(pipeline: ChannelPipeline): SocketChannel = {
      val encoder = new ZlibEncoder(compressionLevel)
      pipeline.addFirst("deflater", encoder)
      pipeline.addFirst("inflater", new ZlibDecoder())
      super.newChannel(pipeline)
    }
  }
} 
Example 15
Source File: RecurringTimerSuite.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.util

import java.util.concurrent.ConcurrentLinkedQueue

import scala.collection.JavaConverters._
import scala.concurrent.duration._

import org.scalatest.PrivateMethodTester
import org.scalatest.concurrent.Eventually._

import org.apache.spark.SparkFunSuite
import org.apache.spark.util.ManualClock

class RecurringTimerSuite extends SparkFunSuite with PrivateMethodTester {

  test("basic") {
    val clock = new ManualClock()
    val results = new ConcurrentLinkedQueue[Long]()
    val timer = new RecurringTimer(clock, 100, time => {
      results.add(time)
    }, "RecurringTimerSuite-basic")
    timer.start(0)
    eventually(timeout(10.seconds), interval(10.millis)) {
      assert(results.asScala.toSeq === Seq(0L))
    }
    clock.advance(100)
    eventually(timeout(10.seconds), interval(10.millis)) {
      assert(results.asScala.toSeq === Seq(0L, 100L))
    }
    clock.advance(200)
    eventually(timeout(10.seconds), interval(10.millis)) {
      assert(results.asScala.toSeq === Seq(0L, 100L, 200L, 300L))
    }
    assert(timer.stop(interruptTimer = true) === 300L)
  }

  test("SPARK-10224: call 'callback' after stopping") {
    val clock = new ManualClock()
    val results = new ConcurrentLinkedQueue[Long]
    val timer = new RecurringTimer(clock, 100, time => {
      results.add(time)
    }, "RecurringTimerSuite-SPARK-10224")
    timer.start(0)
    eventually(timeout(10.seconds), interval(10.millis)) {
      assert(results.asScala.toSeq === Seq(0L))
    }
    @volatile var lastTime = -1L
    // Now RecurringTimer is waiting for the next interval
    val thread = new Thread {
      override def run(): Unit = {
        lastTime = timer.stop(interruptTimer = false)
      }
    }
    thread.start()
    val stopped = PrivateMethod[RecurringTimer]('stopped)
    // Make sure the `stopped` field has been changed
    eventually(timeout(10.seconds), interval(10.millis)) {
      assert(timer.invokePrivate(stopped()) === true)
    }
    clock.advance(200)
    // When RecurringTimer is awake from clock.waitTillTime, it will call `callback` once.
    // Then it will find `stopped` is true and exit the loop, but it should call `callback` again
    // before exiting its internal thread.
    thread.join()
    assert(results.asScala.toSeq === Seq(0L, 100L, 200L))
    assert(lastTime === 200L)
  }
} 
Example 16
Source File: Schedulable.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler

import java.util.concurrent.ConcurrentLinkedQueue

import scala.collection.mutable.ArrayBuffer

import org.apache.spark.scheduler.SchedulingMode.SchedulingMode


private[spark] trait Schedulable {
  var parent: Pool
  // child queues
  def schedulableQueue: ConcurrentLinkedQueue[Schedulable]
  def schedulingMode: SchedulingMode
  def weight: Int
  def minShare: Int
  def runningTasks: Int
  def priority: Int
  def stageId: Int
  def name: String

  def addSchedulable(schedulable: Schedulable): Unit
  def removeSchedulable(schedulable: Schedulable): Unit
  def getSchedulableByName(name: String): Schedulable
  def executorLost(executorId: String, host: String, reason: ExecutorLossReason): Unit
  def checkSpeculatableTasks(minTimeToSpeculation: Int): Boolean
  def getSortedTaskSetQueue: ArrayBuffer[TaskSetManager]
} 
Example 17
Source File: Schedulable.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler

import java.util.concurrent.ConcurrentLinkedQueue

import scala.collection.mutable.ArrayBuffer

import org.apache.spark.scheduler.SchedulingMode.SchedulingMode


private[spark] trait Schedulable {
  var parent: Pool
  // child queues
  def schedulableQueue: ConcurrentLinkedQueue[Schedulable]
  def schedulingMode: SchedulingMode
  def weight: Int
  def minShare: Int
  def runningTasks: Int
  def priority: Int
  def stageId: Int
  def name: String

  def addSchedulable(schedulable: Schedulable): Unit
  def removeSchedulable(schedulable: Schedulable): Unit
  def getSchedulableByName(name: String): Schedulable
  def executorLost(executorId: String, host: String, reason: ExecutorLossReason): Unit
  def checkSpeculatableTasks(): Boolean
  def getSortedTaskSetQueue: ArrayBuffer[TaskSetManager]
} 
Example 18
Source File: AkkaHttpWebsocketTest.scala    From sttp   with Apache License 2.0 5 votes vote down vote up
package sttp.client.akkahttp

import java.util.concurrent.ConcurrentLinkedQueue

import akka.Done
import akka.http.scaladsl.model.ws.{Message, TextMessage}
import akka.stream.Materializer
import akka.stream.scaladsl._
import org.scalatest.BeforeAndAfterAll
import org.scalatest.concurrent.{Eventually, IntegrationPatience}
import sttp.client._

import scala.collection.JavaConverters._
import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future, Promise}
import scala.util.Success
import org.scalatest.flatspec.AsyncFlatSpec
import org.scalatest.matchers.should.Matchers
import sttp.client.testing.HttpTest.wsEndpoint

class AkkaHttpWebsocketTest
    extends AsyncFlatSpec
    with Matchers
    with BeforeAndAfterAll
    with Eventually
    with IntegrationPatience {
  implicit val ec: ExecutionContext = scala.concurrent.ExecutionContext.global
  implicit val backend: SttpBackend[Future, Nothing, Flow[Message, Message, *]] = AkkaHttpBackend()

  it should "send and receive ten messages" in {
    val received = new ConcurrentLinkedQueue[String]()

    val sink: Sink[Message, Future[Done]] = collectionSink(received)

    val n = 10
    val source: Source[Message, Promise[Option[Message]]] =
      Source((1 to n).map(i => TextMessage(s"test$i"))).concatMat(Source.maybe[Message])(Keep.right)

    val flow: Flow[Message, Message, (Future[Done], Promise[Option[Message]])] =
      Flow.fromSinkAndSourceMat(sink, source)(Keep.both)

    basicRequest.get(uri"$wsEndpoint/ws/echo").openWebsocket(flow).flatMap { r =>
      eventually {
        received.asScala.toList shouldBe (1 to n).map(i => s"echo: test$i").toList
      }

      r.result._2.complete(Success(None)) // the source should now complete
      r.result._1.map(_ => succeed) // the future should be completed once the stream completes (and the ws closes)
    }
  }

  it should "receive two messages" in {
    val received = new ConcurrentLinkedQueue[String]()
    val sink: Sink[Message, Future[Done]] = collectionSink(received)
    val source: Source[Message, Promise[Option[Message]]] = Source.maybe[Message]

    val flow: Flow[Message, Message, Promise[Option[Message]]] =
      Flow.fromSinkAndSourceMat(sink, source)(Keep.right)

    basicRequest.get(uri"$wsEndpoint/ws/send_and_wait").openWebsocket(flow).flatMap { r =>
      eventually {
        received.asScala.toList shouldBe List("test10", "test20")
      }
      r.result.success(None) // closing
      succeed
    }
  }

  it should "error if the endpoint is not a websocket" in {
    basicRequest.get(uri"$wsEndpoint/echo").openWebsocket(Flow.apply[Message]).failed.map { t =>
      t shouldBe a[NotAWebsocketException]
    }
  }

  def collectionSink(queue: ConcurrentLinkedQueue[String]): Sink[Message, Future[Done]] =
    Sink
      .setup[Message, Future[Done]] { (_materializer, _) =>
        Flow[Message]
        // mapping with parallelism 1 so that messages don't get reordered
          .mapAsync(1) {
            case m: TextMessage =>
              implicit val materializer: Materializer = _materializer
              m.toStrict(1.second).map(Some(_))
            case _ => Future.successful(None)
          }
          .collect {
            case Some(TextMessage.Strict(text)) => text
          }
          .toMat(Sink.foreach(queue.add))(Keep.right)
      }
      .mapMaterializedValue(_.flatMap(identity))

  override protected def afterAll(): Unit = {
    backend.close()
    super.afterAll()
  }
} 
Example 19
Source File: LowLevelListenerWebSocketTest.scala    From sttp   with Apache License 2.0 5 votes vote down vote up
package sttp.client.testing.websocket

import java.util.concurrent.ConcurrentLinkedQueue

import org.scalatest.concurrent.{Eventually, IntegrationPatience}
import org.scalatest.{Assertion, BeforeAndAfterAll}
import sttp.client._
import sttp.client.monad.MonadError
import sttp.client.testing.{ConvertToFuture, ToFutureWrapper}
import sttp.client.monad.syntax._

import scala.collection.JavaConverters._
import org.scalatest.SuiteMixin
import org.scalatest.flatspec.AsyncFlatSpecLike
import org.scalatest.matchers.should.Matchers
import sttp.client.testing.HttpTest.wsEndpoint

// TODO: change to `extends AsyncFlatSpec` when https://github.com/scalatest/scalatest/issues/1802 is fixed
trait LowLevelListenerWebSocketTest[F[_], WS, WS_HANDLER[_]]
    extends SuiteMixin
    with AsyncFlatSpecLike
    with Matchers
    with BeforeAndAfterAll
    with ToFutureWrapper
    with Eventually
    with IntegrationPatience {

  implicit def backend: SttpBackend[F, Nothing, WS_HANDLER]
  implicit def convertToFuture: ConvertToFuture[F]
  private implicit lazy val monad: MonadError[F] = backend.responseMonad
  def testErrorWhenEndpointIsNotWebsocket: Boolean = true
  def createHandler(onTextFrame: String => Unit): WS_HANDLER[WS]
  def sendText(ws: WS, t: String): Unit
  def sendCloseFrame(ws: WS): Unit

  it should "send and receive ten messages" in {
    val n = 10
    val received = new ConcurrentLinkedQueue[String]()
    basicRequest
      .get(uri"$wsEndpoint/ws/echo")
      .openWebsocket(createHandler(received.add))
      .map { response =>
        (1 to n).foreach { i =>
          val msg = s"test$i"
          info(s"Sending text message: $msg")
          sendText(response.result, msg)
        }
        eventually {
          received.asScala.toList shouldBe (1 to n).map(i => s"echo: test$i").toList
        }
        sendCloseFrame(response.result)
        succeed
      }
      .toFuture()
  }

  it should "receive two messages" in {
    val received = new ConcurrentLinkedQueue[String]()
    basicRequest
      .get(uri"$wsEndpoint/ws/send_and_wait")
      .openWebsocket(createHandler(received.add))
      .map { response =>
        eventually {
          received.asScala.toList shouldBe List("test10", "test20")
        }
        sendCloseFrame(response.result)
        succeed
      }
      .toFuture()
  }
  if (testErrorWhenEndpointIsNotWebsocket) {
    it should "error if the endpoint is not a websocket" in {
      monad
        .handleError(
          basicRequest
            .get(uri"$wsEndpoint/echo")
            .openWebsocket(createHandler(_ => ()))
            .map(_ => fail("An exception should be thrown"): Assertion)
        ) {
          case e => (e shouldBe a[SttpClientException.ReadException]).unit
        }
        .toFuture()
    }
  }

  override protected def afterAll(): Unit = {
    backend.close().toFuture()
    super.afterAll()
  }
} 
Example 20
Source File: MockPublish.scala    From polynote   with Apache License 2.0 5 votes vote down vote up
package polynote.testing

import java.util.concurrent.ConcurrentLinkedQueue
import scala.collection.JavaConverters._

import fs2.Pipe
import polynote.kernel.KernelStatusUpdate
import polynote.kernel.util.Publish
import zio.{Task, ZIO}

class MockPublish[A]() extends Publish[Task, A] {
  private val published = new ConcurrentLinkedQueue[A]()
  @volatile private var publishCount = 0

  def publish1(a: A): Task[Unit] = {
    publishCount += 1
    ZIO {
      published.add(a)
    }.unit
  }
  override def publish: Pipe[Task, A, Unit] = _.evalMap(publish1)
  def reset(): Unit = {
    published.clear()
    publishCount = 0
  }
  def toList: Task[List[A]] = ZIO.flatten {
    ZIO {
      if (published.size() < publishCount) {
        // waiting on something to publish asynchronously
        toList
      } else ZIO.succeed(published.iterator().asScala.toList)
    }
  }
} 
Example 21
Source File: WSPlayListener.scala    From scala-loci   with Apache License 2.0 5 votes vote down vote up
package loci
package communicator
package ws.akka

import java.net.URI
import java.util.concurrent.ConcurrentLinkedQueue

import play.api.mvc.Security.AuthenticatedRequest
import play.api.mvc.{RequestHeader, Results, WebSocket}

import scala.concurrent.Future
import scala.util.{Failure, Success, Try}

private object WSPlayListener {
  locally(WSPlayListener)

  def apply[P <: WS: WSProtocolFactory](properties: WS.Properties) =
    new Listener[P] with WebSocketHandler {
      private def webSocket(authenticated: Either[Option[String], Any]) =
        WebSocket { request =>
          val uri = new URI(s"dummy://${request.host}")
          val host = uri.getHost
          val port = uri.getPort

          val certificates = request.clientCertificateChain.toSeq.flatten
          val isAuthenticated =
            authenticated.isRight ||
            compatibility.either.left(authenticated).nonEmpty ||
            (request.secure && certificates.nonEmpty)
          val isProtected = request.secure
          val isEncrypted = request.secure

          val ws = implicitly[WSProtocolFactory[P]] make (
            request.uri,
            Option(host),
            if (port < 0) None else Some(port),
            this, isAuthenticated, isEncrypted, isProtected,
            Some(Left(request)),
            authenticated.left.toOption.flatten toRight certificates)

          Future successful (ws match {
            case Failure(exception) =>
              connectionEstablished(Failure(exception))
              Left(Results.NotFound)

            case Success(ws) =>
              Right(WSPlayHandler handleWebSocket (
                Future successful ws, properties, connectionEstablished))
          })
        }

      def apply(authenticatedName: String) = webSocket(Left(Some(authenticatedName)))
      def apply(authenticatedName: Option[String]) = webSocket(Left(authenticatedName))
      def apply(request: RequestHeader) = request match {
        case request: AuthenticatedRequest[_, _] =>
          request.user match {
            case user: String =>
              webSocket(Left(Some(user)))(request)
            case user =>
              webSocket(Right(user))(request)
          }
        case _ =>
          webSocket(Left(None))(request)
      }

      private val connected = new ConcurrentLinkedQueue[Connected[P]]

      private def connectionEstablished(connection: Try[Connection[P]]) = {
        val iterator = connected.iterator
        while (iterator.hasNext)
          iterator.next().fire(connection)
      }

      protected def startListening(connectionEstablished: Connected[P]): Try[Listening] = {
        connected.add(connectionEstablished)

        Success(new Listening {
          def stopListening(): Unit = connected.remove(connectionEstablished)
        })
      }
    }
} 
Example 22
Source File: BigtableDoFnTest.scala    From scio   with Apache License 2.0 5 votes vote down vote up
package com.spotify.scio.bigtable

import java.util.concurrent.ConcurrentLinkedQueue

import com.google.cloud.bigtable.grpc.BigtableSession
import com.google.common.cache.{Cache, CacheBuilder}
import com.google.common.util.concurrent.{Futures, ListenableFuture}
import com.spotify.scio.testing._
import com.spotify.scio.transforms.BaseAsyncLookupDoFn.CacheSupplier

import scala.jdk.CollectionConverters._
import scala.util.{Failure, Success}

class BigtableDoFnTest extends PipelineSpec {
  "BigtableDoFn" should "work" in {
    val fn = new TestBigtableDoFn
    val output = runWithData(1 to 10)(_.parDo(fn))
      .map(kv => (kv.getKey, kv.getValue.get()))
    output should contain theSameElementsAs (1 to 10).map(x => (x, x.toString))
  }

  it should "work with cache" in {
    val fn = new TestCachingBigtableDoFn
    val output = runWithData((1 to 10) ++ (6 to 15))(_.parDo(fn))
      .map(kv => (kv.getKey, kv.getValue.get()))
    output should contain theSameElementsAs ((1 to 10) ++ (6 to 15)).map(x => (x, x.toString))
    BigtableDoFnTest.queue.asScala.toSet should contain theSameElementsAs (1 to 15)
    BigtableDoFnTest.queue.size() should be <= 20
  }

  it should "work with failures" in {
    val fn = new TestFailingBigtableDoFn
    val output = runWithData(1 to 10)(_.parDo(fn)).map { kv =>
      val r = kv.getValue.asScala match {
        case Success(v) => v
        case Failure(e) => e.getMessage
      }
      (kv.getKey, r)
    }
    output should contain theSameElementsAs (1 to 10).map { x =>
      val prefix = if (x % 2 == 0) "success" else "failure"
      (x, prefix + x.toString)
    }
  }
}

object BigtableDoFnTest {
  val queue: ConcurrentLinkedQueue[Int] = new ConcurrentLinkedQueue[Int]()
}

class TestBigtableDoFn extends BigtableDoFn[Int, String](null) {
  override def newClient(): BigtableSession = null
  override def asyncLookup(session: BigtableSession, input: Int): ListenableFuture[String] =
    Futures.immediateFuture(input.toString)
}

class TestCachingBigtableDoFn extends BigtableDoFn[Int, String](null, 100, new TestCacheSupplier) {
  override def newClient(): BigtableSession = null
  override def asyncLookup(session: BigtableSession, input: Int): ListenableFuture[String] = {
    BigtableDoFnTest.queue.add(input)
    Futures.immediateFuture(input.toString)
  }
}

class TestFailingBigtableDoFn extends BigtableDoFn[Int, String](null) {
  override def newClient(): BigtableSession = null
  override def asyncLookup(session: BigtableSession, input: Int): ListenableFuture[String] =
    if (input % 2 == 0) {
      Futures.immediateFuture("success" + input)
    } else {
      Futures.immediateFailedFuture(new RuntimeException("failure" + input))
    }
}

class TestCacheSupplier extends CacheSupplier[Int, String, java.lang.Long] {
  override def createCache(): Cache[java.lang.Long, String] =
    CacheBuilder.newBuilder().build[java.lang.Long, String]()
  override def getKey(input: Int): java.lang.Long = input.toLong
} 
Example 23
Source File: MonitorMailbox.scala    From 006877   with MIT License 5 votes vote down vote up
package aia.performance.monitor

import akka.actor.{ ActorRef, ActorSystem }
import akka.dispatch._
import scala.Some
import java.util.Queue
import com.typesafe.config.Config
import java.util.concurrent.ConcurrentLinkedQueue
import akka.dispatch.{ MailboxType, MessageQueue, UnboundedMessageQueueSemantics }
import akka.event.LoggerMessageQueueSemantics

case class MonitorEnvelope(queueSize: Int,
                           receiver: String,
                           entryTime: Long,
                           handle: Envelope)

case class MailboxStatistics(queueSize: Int,
                             receiver: String,
                             sender: String,
                             entryTime: Long,
                             exitTime: Long)




class MonitorQueue(val system: ActorSystem)
    extends MessageQueue
    with UnboundedMessageQueueSemantics
    with LoggerMessageQueueSemantics {
  private final val queue = new ConcurrentLinkedQueue[MonitorEnvelope]()



  def numberOfMessages = queue.size
  def hasMessages = !queue.isEmpty

  def cleanUp(owner: ActorRef, deadLetters: MessageQueue): Unit = {
    if (hasMessages) {
      var envelope = dequeue
      while (envelope ne null) {
        deadLetters.enqueue(owner, envelope)
        envelope = dequeue
      }
    }
  }



  def enqueue(receiver: ActorRef, handle: Envelope): Unit = {
    val env = MonitorEnvelope(queueSize = queue.size() + 1,
      receiver = receiver.toString(),
      entryTime = System.currentTimeMillis(),
      handle = handle)
    queue add env
  }



  def dequeue(): Envelope = {
    val monitor = queue.poll()
    if (monitor != null) {
      monitor.handle.message match {
        case stat: MailboxStatistics => //skip message <co id="ch14-mailbox-dequeue-1" />
        case _ => {
          val stat = MailboxStatistics(
            queueSize = monitor.queueSize,
            receiver = monitor.receiver,
            sender = monitor.handle.sender.toString(),
            entryTime = monitor.entryTime,
            exitTime = System.currentTimeMillis())
          system.eventStream.publish(stat)
        }
      }
      monitor.handle
    } else {
      null
    }
  }


}



class MonitorMailboxType(settings: ActorSystem.Settings, config: Config)
    extends MailboxType 
    with ProducesMessageQueue[MonitorQueue]{

  final override def create(owner: Option[ActorRef],
                            system: Option[ActorSystem]): MessageQueue = {
    system match {
      case Some(sys) =>
        new MonitorQueue(sys)
      case _ =>
        throw new IllegalArgumentException("requires a system")
    }
  }
} 
Example 24
Source File: BufferingObserver.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.grpc.adapter.utils

import java.util.concurrent.ConcurrentLinkedQueue
import java.util.concurrent.atomic.AtomicInteger

import io.grpc.stub.StreamObserver

import scala.concurrent.Promise

class BufferingObserver[T](limit: Option[Int] = None) extends StreamObserver[T] {
  private val promise = Promise[Vector[T]]()
  val buffer = new ConcurrentLinkedQueue[T]()
  val size = new AtomicInteger(0)
  def resultsF = promise.future

  override def onError(t: Throwable): Unit = promise.failure(t)

  override def onCompleted(): Unit = {
    val vec = Vector.newBuilder[T]
    buffer.forEach((e) => vec += e)
    promise.trySuccess(vec.result())
    ()
  }

  override def onNext(value: T): Unit = {
    size.updateAndGet(curr => {
      if (limit.fold(false)(_ <= curr)) {
        onCompleted()
        curr
      } else {
        buffer.add(value)
        curr + 1
      }
    })
    ()
  }
} 
Example 25
Source File: TestOutputStream.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming

import java.io.{IOException, ObjectInputStream}
import java.util.concurrent.ConcurrentLinkedQueue

import scala.reflect.ClassTag

import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.{DStream, ForEachDStream}
import org.apache.spark.util.Utils


class TestOutputStream[T: ClassTag](parent: DStream[T],
    val output: ConcurrentLinkedQueue[Seq[T]] = new ConcurrentLinkedQueue[Seq[T]]())
  extends ForEachDStream[T](parent, (rdd: RDD[T], t: Time) => {
    val collected = rdd.collect()
    output.add(collected)
  }, false) {

  // This is to clear the output buffer every it is read from a checkpoint
  @throws(classOf[IOException])
  private def readObject(ois: ObjectInputStream): Unit = Utils.tryOrIOException {
    ois.defaultReadObject()
    output.clear()
  }
} 
Example 26
Source File: FlumeStreamSuite.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.flume

import java.util.concurrent.ConcurrentLinkedQueue

import scala.collection.JavaConverters._
import scala.concurrent.duration._
import scala.language.postfixOps

import org.jboss.netty.channel.ChannelPipeline
import org.jboss.netty.channel.socket.SocketChannel
import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory
import org.jboss.netty.handler.codec.compression._
import org.scalatest.{BeforeAndAfter, Matchers}
import org.scalatest.concurrent.Eventually._

import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.internal.Logging
import org.apache.spark.network.util.JavaUtils
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.{Milliseconds, StreamingContext, TestOutputStream}

class FlumeStreamSuite extends SparkFunSuite with BeforeAndAfter with Matchers with Logging {
  val conf = new SparkConf().setMaster("local[4]").setAppName("FlumeStreamSuite")
  var ssc: StreamingContext = null

  test("flume input stream") {
    testFlumeStream(testCompression = false)
  }

  test("flume input compressed stream") {
    testFlumeStream(testCompression = true)
  }

  
  private class CompressionChannelFactory(compressionLevel: Int)
    extends NioClientSocketChannelFactory {

    override def newChannel(pipeline: ChannelPipeline): SocketChannel = {
      val encoder = new ZlibEncoder(compressionLevel)
      pipeline.addFirst("deflater", encoder)
      pipeline.addFirst("inflater", new ZlibDecoder())
      super.newChannel(pipeline)
    }
  }
} 
Example 27
Source File: RecurringTimerSuite.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.util

import java.util.concurrent.ConcurrentLinkedQueue

import scala.collection.JavaConverters._
import scala.concurrent.duration._

import org.scalatest.PrivateMethodTester
import org.scalatest.concurrent.Eventually._

import org.apache.spark.SparkFunSuite
import org.apache.spark.util.ManualClock

class RecurringTimerSuite extends SparkFunSuite with PrivateMethodTester {

  test("basic") {
    val clock = new ManualClock()
    val results = new ConcurrentLinkedQueue[Long]()
    val timer = new RecurringTimer(clock, 100, time => {
      results.add(time)
    }, "RecurringTimerSuite-basic")
    timer.start(0)
    eventually(timeout(10.seconds), interval(10.millis)) {
      assert(results.asScala.toSeq === Seq(0L))
    }
    clock.advance(100)
    eventually(timeout(10.seconds), interval(10.millis)) {
      assert(results.asScala.toSeq === Seq(0L, 100L))
    }
    clock.advance(200)
    eventually(timeout(10.seconds), interval(10.millis)) {
      assert(results.asScala.toSeq === Seq(0L, 100L, 200L, 300L))
    }
    assert(timer.stop(interruptTimer = true) === 300L)
  }

  test("SPARK-10224: call 'callback' after stopping") {
    val clock = new ManualClock()
    val results = new ConcurrentLinkedQueue[Long]
    val timer = new RecurringTimer(clock, 100, time => {
      results.add(time)
    }, "RecurringTimerSuite-SPARK-10224")
    timer.start(0)
    eventually(timeout(10.seconds), interval(10.millis)) {
      assert(results.asScala.toSeq === Seq(0L))
    }
    @volatile var lastTime = -1L
    // Now RecurringTimer is waiting for the next interval
    val thread = new Thread {
      override def run(): Unit = {
        lastTime = timer.stop(interruptTimer = false)
      }
    }
    thread.start()
    val stopped = PrivateMethod[RecurringTimer]('stopped)
    // Make sure the `stopped` field has been changed
    eventually(timeout(10.seconds), interval(10.millis)) {
      assert(timer.invokePrivate(stopped()) === true)
    }
    clock.advance(200)
    // When RecurringTimer is awake from clock.waitTillTime, it will call `callback` once.
    // Then it will find `stopped` is true and exit the loop, but it should call `callback` again
    // before exiting its internal thread.
    thread.join()
    assert(results.asScala.toSeq === Seq(0L, 100L, 200L))
    assert(lastTime === 200L)
  }
} 
Example 28
Source File: Schedulable.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler

import java.util.concurrent.ConcurrentLinkedQueue

import scala.collection.mutable.ArrayBuffer

import org.apache.spark.scheduler.SchedulingMode.SchedulingMode


private[spark] trait Schedulable {
  var parent: Pool
  // child queues
  def schedulableQueue: ConcurrentLinkedQueue[Schedulable]
  def schedulingMode: SchedulingMode
  def weight: Int
  def minShare: Int
  def runningTasks: Int
  def priority: Int
  def stageId: Int
  def name: String

  def addSchedulable(schedulable: Schedulable): Unit
  def removeSchedulable(schedulable: Schedulable): Unit
  def getSchedulableByName(name: String): Schedulable
  def executorLost(executorId: String, host: String, reason: ExecutorLossReason): Unit
  def checkSpeculatableTasks(minTimeToSpeculation: Int): Boolean
  def getSortedTaskSetQueue: ArrayBuffer[TaskSetManager]
} 
Example 29
Source File: WsConnection.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.api.ws.connection

import java.util.concurrent.ConcurrentLinkedQueue

import akka.Done
import akka.actor.{ActorRef, ActorSystem, Status}
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.ws.{BinaryMessage, Message, TextMessage, WebSocketRequest}
import akka.stream.scaladsl.{Flow, Sink, Source}
import akka.stream.{CompletionStrategy, Materializer, OverflowStrategy}
import com.wavesplatform.dex.api.ws.protocol.{WsClientMessage, WsMessage, WsPingOrPong, WsServerMessage}
import com.wavesplatform.dex.domain.utils.ScorexLogging
import play.api.libs.json.Json

import scala.collection.JavaConverters._
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util.{Failure, Success, Try}

class WsConnection(uri: String, keepAlive: Boolean = true)(implicit system: ActorSystem, materializer: Materializer) extends ScorexLogging {

  log.info(s"""Connecting to Matcher WS API:
            |         URI = $uri
            |  Keep alive = $keepAlive""".stripMargin)

  import materializer.executionContext

  private val wsHandlerRef = system.actorOf(TestWsHandlerActor props keepAlive)

  protected def stringifyClientMessage(cm: WsClientMessage): TextMessage.Strict =
    WsMessage.toStrictTextMessage(cm)(WsClientMessage.wsClientMessageWrites)

  // From test to server
  private val source: Source[TextMessage.Strict, ActorRef] = {
    val completionMatcher: PartialFunction[Any, CompletionStrategy] = { case akka.actor.Status.Success(_) => CompletionStrategy.draining }
    val failureMatcher: PartialFunction[Any, Throwable]             = { case Status.Failure(cause)        => cause }

    Source
      .actorRef[WsClientMessage](completionMatcher, failureMatcher, 10, OverflowStrategy.fail)
      .map(stringifyClientMessage)
      .mapMaterializedValue { source =>
        wsHandlerRef.tell(TestWsHandlerActor.AssignSourceRef, source)
        source
      }
  }

  private val messagesBuffer: ConcurrentLinkedQueue[WsServerMessage] = new ConcurrentLinkedQueue[WsServerMessage]()

  // From server to test
  private val sink: Sink[Message, Future[Done]] = Sink.foreach {
    case tm: TextMessage =>
      for {
        strictText <- tm.toStrict(1.second).map(_.getStrictText)
        clientMessage <- {
          log.trace(s"Got $strictText")
          Try { Json.parse(strictText).as[WsServerMessage] } match {
            case Failure(exception) => Future.failed(exception)
            case Success(x) => {
              messagesBuffer.add(x)
              if (keepAlive) x match {
                case value: WsPingOrPong => wsHandlerRef ! value
                case _                   =>
              }
              Future.successful(x)
            }
          }
        }
      } yield clientMessage

    case bm: BinaryMessage =>
      bm.dataStream.runWith(Sink.ignore)
      Future.failed { new IllegalArgumentException("Binary messages are not supported") }
  }

  private val flow: Flow[Message, TextMessage.Strict, Future[Done]] = Flow.fromSinkAndSourceCoupled(sink, source).watchTermination() {
    case (_, f) =>
      f.onComplete {
        case Success(_) => log.info(s"WebSocket connection to $uri successfully closed")
        case Failure(e) => log.error(s"WebSocket connection to $uri closed with an error", e)
      }(materializer.executionContext)
      f
  }

  val (connectionResponse, closed) = Http().singleWebSocketRequest(WebSocketRequest(uri), flow)

  val connectionOpenedTs: Long                   = System.currentTimeMillis
  val connectionClosedTs: Future[Long]           = closed.map(_ => System.currentTimeMillis)
  val connectionLifetime: Future[FiniteDuration] = connectionClosedTs.map(cc => FiniteDuration(cc - connectionOpenedTs, MILLISECONDS))

  def messages: List[WsServerMessage] = messagesBuffer.iterator().asScala.toList
  def clearMessages(): Unit           = messagesBuffer.clear()

  def send(message: WsClientMessage): Unit = wsHandlerRef ! TestWsHandlerActor.SendToServer(message)

  def close(): Unit     = if (!isClosed) wsHandlerRef ! TestWsHandlerActor.CloseConnection
  def isClosed: Boolean = closed.isCompleted
} 
Example 30
Source File: LocalQueueStore.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.db

import java.util.concurrent.ConcurrentLinkedQueue
import java.util.concurrent.atomic.AtomicLong

import com.google.common.primitives.{Longs, Shorts}
import com.wavesplatform.dex.db.DbKeys._
import com.wavesplatform.dex.db.leveldb.{DBExt, ReadOnlyDB}
import com.wavesplatform.dex.queue.{QueueEvent, QueueEventWithMeta}
import org.iq80.leveldb.{DB, ReadOptions}

class LocalQueueStore(db: DB) {

  private val newestIdx        = new AtomicLong(db.get(lqNewestIdx))
  private val inMemQueue       = new ConcurrentLinkedQueue[QueueEventWithMeta]
  private var startInMemOffset = Option.empty[QueueEventWithMeta.Offset]

  def enqueue(event: QueueEvent, timestamp: Long): QueueEventWithMeta.Offset = {
    val offset   = newestIdx.incrementAndGet()
    val eventKey = lpqElement(offset)

    val x = QueueEventWithMeta(offset, timestamp, event)
    db.readWrite { rw =>
      rw.put(eventKey, Some(x))
      rw.put(lqNewestIdx, offset)
    }

    inMemQueue.add(x)
    if (startInMemOffset.isEmpty) startInMemOffset = Some(offset)
    offset
  }

  def getFrom(offset: QueueEventWithMeta.Offset, maxElements: Int): Vector[QueueEventWithMeta] = {
    if (startInMemOffset.exists(_ <= offset)) {
      if (inMemQueue.isEmpty) Vector.empty
      else {
        val xs    = Vector.newBuilder[QueueEventWithMeta]
        var added = 0

        while (!inMemQueue.isEmpty && added < maxElements) Option(inMemQueue.poll()).foreach { x =>
          xs += x
          added += 1
        }

        xs.result()
      }
    } else
      new ReadOnlyDB(db, new ReadOptions())
        .read(LqElementKeyName, LqElementPrefixBytes, lpqElement(math.max(offset, 0)).keyBytes, Int.MaxValue) { e =>
          val offset = Longs.fromByteArray(e.getKey.slice(Shorts.BYTES, Shorts.BYTES + Longs.BYTES))
          lpqElement(offset).parse(e.getValue).getOrElse(throw new RuntimeException(s"Can't find a queue event at $offset"))
        }
  }

  def newestOffset: Option[QueueEventWithMeta.Offset] = {
    val idx      = newestIdx.get()
    val eventKey = lpqElement(idx)
    eventKey.parse(db.get(eventKey.keyBytes)).map(_.offset)
  }

  def dropUntil(offset: QueueEventWithMeta.Offset): Unit = db.readWrite { rw =>
    val oldestIdx = math.max(db.get(lqOldestIdx), 0)
    (oldestIdx until offset).foreach { offset =>
      rw.delete(lpqElement(offset))
    }
    rw.put(lqOldestIdx, offset)
  }

} 
Example 31
Source File: SingletonMemorySink.scala    From milan   with Apache License 2.0 5 votes vote down vote up
package com.amazon.milan.application.sinks

import java.time.{Duration, Instant}
import java.util.concurrent.{ConcurrentHashMap, ConcurrentLinkedQueue}
import java.util.function

import com.amazon.milan.Id
import com.amazon.milan.application.DataSink
import com.amazon.milan.typeutil.TypeDescriptor
import com.fasterxml.jackson.annotation.JsonIgnore
import com.fasterxml.jackson.databind.annotation.{JsonDeserialize, JsonSerialize}

import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import scala.concurrent.TimeoutException


object SingletonMemorySink {
  private val values = new ConcurrentHashMap[String, ArrayBuffer[MemorySinkRecord[_]]]()
  private val nextSeqNum = new mutable.HashMap[String, Int]()
  private val locks = new ConcurrentHashMap[String, Object]()

  private def makeCreateBufferFunction[T]: java.util.function.Function[String, ArrayBuffer[MemorySinkRecord[_]]] =
    new function.Function[String, ArrayBuffer[MemorySinkRecord[_]]] {
      override def apply(t: String): ArrayBuffer[MemorySinkRecord[_]] =
        (new ArrayBuffer[MemorySinkRecord[T]]()).asInstanceOf[ArrayBuffer[MemorySinkRecord[_]]]
    }

  private val createLocker = new java.util.function.Function[String, Object] {
    override def apply(t: String): AnyRef = new Object()
  }

  
  @JsonIgnore
  def getRecordCount: Int = SingletonMemorySink.getBuffer(this.sinkId).size

  @JsonIgnore
  def getValues: List[T] = {
    SingletonMemorySink.getBuffer[T](this.sinkId).map(_.value).toList
  }

  @JsonIgnore
  def getRecords: List[MemorySinkRecord[T]] = {
    SingletonMemorySink.getBuffer[T](this.sinkId).toList
  }

  def waitForItems(itemCount: Int, timeout: Duration = null): Unit = {
    val endTime = if (timeout == null) Instant.MAX else Instant.now().plus(timeout)

    while (SingletonMemorySink.getBuffer(this.sinkId).size < itemCount) {
      if (Instant.now().isAfter(endTime)) {
        throw new TimeoutException()
      }

      Thread.sleep(1)
    }
  }

  override def equals(obj: Any): Boolean = {
    obj match {
      case o: SingletonMemorySink[_] =>
        this.sinkId.equals(o.sinkId)

      case _ =>
        false
    }
  }
}


class MemorySinkRecord[T](val seqNum: String, val createdTime: Instant, val value: T) extends Serializable 
Example 32
Source File: ApolloTracingExtension.scala    From sangria-slowlog   with Apache License 2.0 5 votes vote down vote up
package sangria.slowlog

import java.time.Instant
import java.time.format.DateTimeFormatter
import java.util.concurrent.ConcurrentLinkedQueue

import sangria.ast._
import sangria.execution._
import sangria.schema.Context
import sangria.marshalling.queryAst._
import sangria.renderer.SchemaRenderer

import scala.collection.JavaConverters._

object ApolloTracingExtension extends Middleware[Any] with MiddlewareExtension[Any] with MiddlewareAfterField[Any] with MiddlewareErrorField[Any] {
  type QueryVal = QueryTrace
  type FieldVal = Long

  def beforeQuery(context: MiddlewareQueryContext[Any, _, _]) =
    QueryTrace(Instant.now(), System.nanoTime(), new ConcurrentLinkedQueue)

  def afterQuery(queryVal: QueryVal, context: MiddlewareQueryContext[Any, _, _]) = ()

  def beforeField(queryVal: QueryVal, mctx: MiddlewareQueryContext[Any, _, _], ctx: Context[Any, _]) =
    continue(System.nanoTime())

  def afterField(queryVal: QueryVal, fieldVal: FieldVal, value: Any, mctx: MiddlewareQueryContext[Any, _, _], ctx: Context[Any, _]) = {
    updateMetric(queryVal, fieldVal, ctx)
    None
  }

  def fieldError(queryVal: QueryVal, fieldVal: FieldVal, error: Throwable, mctx: MiddlewareQueryContext[Any, _, _], ctx: Context[Any, _]) =
    updateMetric(queryVal, fieldVal, ctx)

  def updateMetric(queryVal: QueryVal, fieldVal: FieldVal, ctx: Context[Any, _]): Unit =
    queryVal.fieldData.add(ObjectValue(
      "path" -> ListValue(ctx.path.path.map(queryAstResultMarshaller.scalarNode(_, "Any", Set.empty))),
      "parentType" -> StringValue(ctx.parentType.name),
      "fieldName" -> StringValue(ctx.field.name),
      "returnType" -> StringValue(SchemaRenderer.renderTypeName(ctx.field.fieldType)),
      "startOffset" -> BigIntValue(fieldVal - queryVal.startNanos),
      "duration" -> BigIntValue(System.nanoTime() - fieldVal)))

  def afterQueryExtensions(queryVal: QueryVal, context: MiddlewareQueryContext[Any, _, _]): Vector[Extension[_]] =
    Vector(Extension(ObjectValue(
      "tracing" -> ObjectValue(
        "version" -> IntValue(1),
        "startTime" -> StringValue(DateTimeFormatter.ISO_INSTANT.format(queryVal.startTime)),
        "endTime" -> StringValue(DateTimeFormatter.ISO_INSTANT.format(Instant.now())),
        "duration" -> BigIntValue(System.nanoTime() - queryVal.startNanos),
        "execution" -> ObjectValue(
          "resolvers" -> ListValue(queryVal.fieldData.asScala.toVector)))): Value))

  case class QueryTrace(startTime: Instant, startNanos: Long, fieldData: ConcurrentLinkedQueue[Value])
} 
Example 33
Source File: PingPongSuite.scala    From lsp4s   with Apache License 2.0 5 votes vote down vote up
package tests

import java.util.concurrent.ConcurrentLinkedQueue
import minitest.SimpleTestSuite
import monix.execution.Scheduler.Implicits.global
import scala.collection.JavaConverters._
import scala.concurrent.Promise
import scala.meta.jsonrpc._
import scala.meta.jsonrpc.testkit._
import scribe.Logger


object PingPongSuite extends SimpleTestSuite {

  private val Ping = Endpoint.notification[String]("ping")
  private val Pong = Endpoint.notification[String]("pong")
  private val Hello = Endpoint.request[String, String]("hello")

  testAsync("ping pong") {
    val promise = Promise[Unit]()
    val pongs = new ConcurrentLinkedQueue[String]()
    val services = Services
      .empty(Logger.root)
      .request(Hello) { msg =>
        s"$msg, World!"
      }
      .notification(Pong) { message =>
        assert(pongs.add(message))
        if (pongs.size() == 2) {
          promise.complete(util.Success(()))
        }
      }
    val pongBack: LanguageClient => Services = { client =>
      services.notification(Ping) { message =>
        Pong.notify(message.replace("Ping", "Pong"))(client)
      }
    }
    val conn = TestConnection(pongBack, pongBack)
    for {
      _ <- Ping.notify("Ping from client")(conn.alice.client)
      _ <- Ping.notify("Ping from server")(conn.bob.client)
      Right(helloWorld) <- Hello.request("Hello")(conn.alice.client).runAsync
      _ <- promise.future
    } yield {
      assertEquals(helloWorld, "Hello, World!")
      val obtainedPongs = pongs.asScala.toList.sorted
      val expectedPongs = List("Pong from client", "Pong from server")
      assertEquals(obtainedPongs, expectedPongs)
      conn.cancel()
    }
  }

} 
Example 34
Source File: RowQueue.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.shabondi.sink

import java.time.{Duration => JDuration}
import java.util.concurrent.ConcurrentLinkedQueue
import java.util.concurrent.atomic.AtomicLong

import oharastream.ohara.common.data.Row

private[sink] class RowQueue extends ConcurrentLinkedQueue[Row] {
  private[sink] val lastTime = new AtomicLong(System.currentTimeMillis())

  override def poll(): Row =
    try {
      super.poll()
    } finally {
      lastTime.set(System.currentTimeMillis())
    }

  def isIdle(idleTime: JDuration): Boolean =
    System.currentTimeMillis() > (idleTime.toMillis + lastTime.get())
} 
Example 35
Source File: AsynchronousLogHandler.scala    From scribe   with MIT License 5 votes vote down vote up
package scribe.handler

import java.util.concurrent.ConcurrentLinkedQueue
import java.util.concurrent.atomic.AtomicLong

import scribe.LogRecord
import scribe.format.Formatter
import scribe.modify.LogModifier
import scribe.writer.{ConsoleWriter, Writer}
import perfolation._

import scala.language.implicitConversions

case class AsynchronousLogHandler(formatter: Formatter = Formatter.default,
                                  writer: Writer = ConsoleWriter,
                                  modifiers: List[LogModifier] = Nil,
                                  maxBuffer: Int = AsynchronousLogHandler.DefaultMaxBuffer,
                                  overflow: Overflow = Overflow.DropOld) extends LogHandler {
  private lazy val cached = new AtomicLong(0L)

  private lazy val queue = {
    val q = new ConcurrentLinkedQueue[LogRecord[_]]
    val t = new Thread {
      setDaemon(true)

      override def run(): Unit = while (true) {
        Option(q.poll()) match {
          case Some(record) => {
            cached.decrementAndGet()
            SynchronousLogHandler.log(AsynchronousLogHandler.this, record)
            Thread.sleep(1L)
          }
          case None => Thread.sleep(10L)
        }
      }
    }
    t.start()
    q
  }

  def withMaxBuffer(maxBuffer: Int): AsynchronousLogHandler = copy(maxBuffer = maxBuffer)

  def withOverflow(overflow: Overflow): AsynchronousLogHandler = copy(overflow = overflow)

  override def withFormatter(formatter: Formatter): AsynchronousLogHandler = copy(formatter = formatter)

  override def withWriter(writer: Writer): AsynchronousLogHandler = copy(writer = writer)

  override def setModifiers(modifiers: List[LogModifier]): AsynchronousLogHandler = copy(modifiers = modifiers)

  override def log[M](record: LogRecord[M]): Unit = {
    val add = if (!cached.incrementIfLessThan(maxBuffer)) {
      overflow match {
        case Overflow.DropOld => {
          queue.poll()
          true
        }
        case Overflow.DropNew => false
        case Overflow.Block => {
          while(!cached.incrementIfLessThan(maxBuffer)) {
            Thread.sleep(1L)
          }
          true
        }
        case Overflow.Error => throw new LogOverflowException(p"Queue filled (max: $maxBuffer) while attempting to asynchronously log")
      }
    } else {
      true
    }
    if (add) {
      queue.add(record)
    }
  }
}

object AsynchronousLogHandler {
  val DefaultMaxBuffer: Int = 1000
} 
Example 36
Source File: AsynchronousLoggingSpec.scala    From scribe   with MIT License 5 votes vote down vote up
package spec

import java.io.File
import java.util.concurrent.ConcurrentLinkedQueue

import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AsyncWordSpec
import perfolation._
import scribe.{LogRecord, Logger}
import scribe.format._
import scribe.output.LogOutput
import scribe.writer.{FileWriter, Writer}

import scala.collection.JavaConverters._
import scala.concurrent.Future
import scala.io.Source

class AsynchronousLoggingSpec extends AsyncWordSpec with Matchers {
  private val Regex = """(\d+) - (.+)""".r
  private val threads = "abcdefghijklmnopqrstuvwxyz"
  private val iterations = 10
  private val total = threads.length * iterations

  "Asynchronous Logging" should {
    s"log $total records in the proper order with simple logging" in {
      val queue = new ConcurrentLinkedQueue[String]
      val logger = Logger.empty.orphan().withHandler(
        formatter = AsynchronousLoggingSpec.format,
        writer = new Writer {
          override def write[M](record: LogRecord[M], output: LogOutput): Unit = queue.add(output.plainText.trim)
        }
      )

      Future.sequence(threads.map { char =>
        Future {
          (0 until iterations).foreach { index =>
            logger.info(p"$char:$index")
          }
        }
      }).map { _ =>
        var previous = 0L
        queue.iterator().asScala.foreach {
          case Regex(ts, _) => {
            val timeStamp = ts.toLong
            timeStamp should be >= previous
            previous = timeStamp
          }
        }
        queue.size() should be(total)
      }
    }
    s"log $total records in the proper order with file logging" in {
      val file = new File("logs/app.log")
      file.delete()

      val fileWriter = FileWriter().nio
      val logger = Logger.empty.orphan().withHandler(
        formatter = AsynchronousLoggingSpec.format,
        writer = fileWriter
      )

      Future.sequence(threads.map { char =>
        Future {
          (0 until iterations).foreach { index =>
            logger.info(p"$char:$index")
          }
        }
      }).map { _ =>
        var previous = 0L
        fileWriter.flush()
        fileWriter.dispose()
        val lines = Source.fromFile(file).getLines().toList
        lines.foreach {
          case Regex(ts, message) => {
            val timeStamp = ts.toLong
            timeStamp should be >= previous
            previous = timeStamp
          }
        }
        lines.length should be(threads.length * iterations)
      }
    }
  }
}

object AsynchronousLoggingSpec {
  val format = formatter"$timeStamp - $message"
} 
Example 37
Source File: SessionSpec.scala    From incubator-livy   with Apache License 2.0 5 votes vote down vote up
package org.apache.livy.repl

import java.util.Properties
import java.util.concurrent.{ConcurrentLinkedQueue, CountDownLatch, TimeUnit}

import org.apache.spark.SparkConf
import org.scalatest.{BeforeAndAfter, FunSpec}
import org.scalatest.Matchers._
import org.scalatest.concurrent.Eventually
import org.scalatest.time._

import org.apache.livy.LivyBaseUnitTestSuite
import org.apache.livy.repl.Interpreter.ExecuteResponse
import org.apache.livy.rsc.RSCConf
import org.apache.livy.sessions._

class SessionSpec extends FunSpec with Eventually with LivyBaseUnitTestSuite with BeforeAndAfter {
  override implicit val patienceConfig =
    PatienceConfig(timeout = scaled(Span(30, Seconds)), interval = scaled(Span(100, Millis)))

  private val rscConf = new RSCConf(new Properties()).set(RSCConf.Entry.SESSION_KIND, "spark")

  describe("Session") {
    var session: Session = null

    after {
      if (session != null) {
        session.close()
        session = null
      }
    }

    it("should call state changed callbacks in happy path") {
      val expectedStateTransitions =
        Array("not_started", "starting", "idle", "busy", "idle", "busy", "idle")
      val actualStateTransitions = new ConcurrentLinkedQueue[String]()

      session = new Session(rscConf, new SparkConf(), None,
        { s => actualStateTransitions.add(s.toString) })
      session.start()
      session.execute("")

      eventually {
        actualStateTransitions.toArray shouldBe expectedStateTransitions
      }
    }

    it("should not transit to idle if there're any pending statements.") {
      val expectedStateTransitions =
        Array("not_started", "starting", "idle", "busy", "busy", "busy", "idle", "busy", "idle")
      val actualStateTransitions = new ConcurrentLinkedQueue[String]()

      val blockFirstExecuteCall = new CountDownLatch(1)
      val interpreter = new SparkInterpreter(new SparkConf()) {
        override def execute(code: String): ExecuteResponse = {
          blockFirstExecuteCall.await(10, TimeUnit.SECONDS)
          super.execute(code)
        }
      }
      session = new Session(rscConf, new SparkConf(), Some(interpreter),
        { s => actualStateTransitions.add(s.toString) })
      session.start()

      for (_ <- 1 to 2) {
        session.execute("")
      }

      blockFirstExecuteCall.countDown()
      eventually {
        actualStateTransitions.toArray shouldBe expectedStateTransitions
      }
    }

    it("should remove old statements when reaching threshold") {
      rscConf.set(RSCConf.Entry.RETAINED_STATEMENTS, 2)
      session = new Session(rscConf, new SparkConf())
      session.start()

      session.statements.size should be (0)
      session.execute("")
      session.statements.size should be (1)
      session.statements.map(_._1).toSet should be (Set(0))
      session.execute("")
      session.statements.size should be (2)
      session.statements.map(_._1).toSet should be (Set(0, 1))
      session.execute("")
      eventually {
        session.statements.size should be (2)
        session.statements.map(_._1).toSet should be (Set(1, 2))
      }

      // Continue submitting statements, total statements in memory should be 2.
      session.execute("")
      eventually {
        session.statements.size should be (2)
        session.statements.map(_._1).toSet should be (Set(2, 3))
      }
    }
  }
} 
Example 38
Source File: CustomMailbox.scala    From Akka-Cookbook   with MIT License 5 votes vote down vote up
package com.packt.chapter1

import java.util.concurrent.ConcurrentLinkedQueue

import akka.actor.{Props, Actor, ActorSystem, ActorRef}

import akka.dispatch.{MailboxType, ProducesMessageQueue, Envelope, MessageQueue}

import com.typesafe.config.Config


object CustomMailbox extends App  {

  val actorSystem = ActorSystem("HelloAkka")
  val actor = actorSystem.actorOf(Props[MySpecialActor].withDispatcher("custom-dispatcher"))
  val actor1 = actorSystem.actorOf(Props[MyActor],"xyz")

  val actor2 = actorSystem.actorOf(Props[MyActor],"MyActor")

  actor1 !  ("hello", actor)
  actor2 !  ("hello", actor)
  
}

class MySpecialActor extends Actor {
  override def receive: Receive = {
    case msg: String => println(s"msg is $msg" )
  }
}
class MyActor extends Actor {
  override def receive: Receive = {
    case (msg: String, actorRef: ActorRef) => actorRef ! msg
    case msg => println(msg)
  }
}



trait MyUnboundedMessageQueueSemantics

  // This is the MessageQueue implementation
  class MyMessageQueue extends MessageQueue
  {

    private final val queue = new ConcurrentLinkedQueue[Envelope]()

    // these should be implemented; queue used as example
    def enqueue(receiver: ActorRef, handle: Envelope): Unit = {
      if(handle.sender.path.name == "MyActor") {
        handle.sender !  "Hey dude, How are you?, I Know your name,processing your request"
          queue.offer(handle)
          }
          else handle.sender ! "I don't talk to strangers, I can't process your request"
    }
    def dequeue(): Envelope = queue.poll
    def numberOfMessages: Int = queue.size
    def hasMessages: Boolean = !queue.isEmpty
    def cleanUp(owner: ActorRef, deadLetters: MessageQueue) {
      while (hasMessages) {
        deadLetters.enqueue(owner, dequeue())
      }
    }
}

class MyUnboundedMailbox extends MailboxType
with ProducesMessageQueue[MyMessageQueue] {

  // This constructor signature must exist, it will be called by Akka
  def this(settings: ActorSystem.Settings, config: Config) = {
    // put your initialization code here
    this()
  }

  // The create method is called to create the MessageQueue
  final override def create(owner: Option[ActorRef],
                            system: Option[ActorSystem]): MessageQueue =
    new MyMessageQueue()
} 
Example 39
Source File: TestOutputStream.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming

import java.io.{IOException, ObjectInputStream}
import java.util.concurrent.ConcurrentLinkedQueue

import scala.reflect.ClassTag

import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.{DStream, ForEachDStream}
import org.apache.spark.util.Utils


class TestOutputStream[T: ClassTag](parent: DStream[T],
    val output: ConcurrentLinkedQueue[Seq[T]] = new ConcurrentLinkedQueue[Seq[T]]())
  extends ForEachDStream[T](parent, (rdd: RDD[T], t: Time) => {
    val collected = rdd.collect()
    output.add(collected)
  }, false) {

  // This is to clear the output buffer every it is read from a checkpoint
  @throws(classOf[IOException])
  private def readObject(ois: ObjectInputStream): Unit = Utils.tryOrIOException {
    ois.defaultReadObject()
    output.clear()
  }
} 
Example 40
Source File: FlumeStreamSuite.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.flume

import java.util.concurrent.ConcurrentLinkedQueue

import scala.collection.JavaConverters._
import scala.concurrent.duration._
import scala.language.postfixOps

import org.jboss.netty.channel.ChannelPipeline
import org.jboss.netty.channel.socket.SocketChannel
import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory
import org.jboss.netty.handler.codec.compression._
import org.scalatest.{BeforeAndAfter, Matchers}
import org.scalatest.concurrent.Eventually._

import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.internal.Logging
import org.apache.spark.network.util.JavaUtils
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.{Milliseconds, StreamingContext, TestOutputStream}

class FlumeStreamSuite extends SparkFunSuite with BeforeAndAfter with Matchers with Logging {
  val conf = new SparkConf().setMaster("local[4]").setAppName("FlumeStreamSuite")
  var ssc: StreamingContext = null

  test("flume input stream") {
    testFlumeStream(testCompression = false)
  }

  test("flume input compressed stream") {
    testFlumeStream(testCompression = true)
  }

  
  private class CompressionChannelFactory(compressionLevel: Int)
    extends NioClientSocketChannelFactory {

    override def newChannel(pipeline: ChannelPipeline): SocketChannel = {
      val encoder = new ZlibEncoder(compressionLevel)
      pipeline.addFirst("deflater", encoder)
      pipeline.addFirst("inflater", new ZlibDecoder())
      super.newChannel(pipeline)
    }
  }
} 
Example 41
Source File: RecurringTimerSuite.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.util

import java.util.concurrent.ConcurrentLinkedQueue

import scala.collection.JavaConverters._
import scala.concurrent.duration._

import org.scalatest.PrivateMethodTester
import org.scalatest.concurrent.Eventually._

import org.apache.spark.SparkFunSuite
import org.apache.spark.util.ManualClock

class RecurringTimerSuite extends SparkFunSuite with PrivateMethodTester {

  test("basic") {
    val clock = new ManualClock()
    val results = new ConcurrentLinkedQueue[Long]()
    val timer = new RecurringTimer(clock, 100, time => {
      results.add(time)
    }, "RecurringTimerSuite-basic")
    timer.start(0)
    eventually(timeout(10.seconds), interval(10.millis)) {
      assert(results.asScala.toSeq === Seq(0L))
    }
    clock.advance(100)
    eventually(timeout(10.seconds), interval(10.millis)) {
      assert(results.asScala.toSeq === Seq(0L, 100L))
    }
    clock.advance(200)
    eventually(timeout(10.seconds), interval(10.millis)) {
      assert(results.asScala.toSeq === Seq(0L, 100L, 200L, 300L))
    }
    assert(timer.stop(interruptTimer = true) === 300L)
  }

  test("SPARK-10224: call 'callback' after stopping") {
    val clock = new ManualClock()
    val results = new ConcurrentLinkedQueue[Long]
    val timer = new RecurringTimer(clock, 100, time => {
      results.add(time)
    }, "RecurringTimerSuite-SPARK-10224")
    timer.start(0)
    eventually(timeout(10.seconds), interval(10.millis)) {
      assert(results.asScala.toSeq === Seq(0L))
    }
    @volatile var lastTime = -1L
    // Now RecurringTimer is waiting for the next interval
    val thread = new Thread {
      override def run(): Unit = {
        lastTime = timer.stop(interruptTimer = false)
      }
    }
    thread.start()
    val stopped = PrivateMethod[RecurringTimer]('stopped)
    // Make sure the `stopped` field has been changed
    eventually(timeout(10.seconds), interval(10.millis)) {
      assert(timer.invokePrivate(stopped()) === true)
    }
    clock.advance(200)
    // When RecurringTimer is awake from clock.waitTillTime, it will call `callback` once.
    // Then it will find `stopped` is true and exit the loop, but it should call `callback` again
    // before exiting its internal thread.
    thread.join()
    assert(results.asScala.toSeq === Seq(0L, 100L, 200L))
    assert(lastTime === 200L)
  }
} 
Example 42
Source File: Schedulable.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler

import java.util.concurrent.ConcurrentLinkedQueue

import scala.collection.mutable.ArrayBuffer

import org.apache.spark.scheduler.SchedulingMode.SchedulingMode


private[spark] trait Schedulable {
  var parent: Pool
  // child queues
  def schedulableQueue: ConcurrentLinkedQueue[Schedulable]
  def schedulingMode: SchedulingMode
  def weight: Int
  def minShare: Int
  def runningTasks: Int
  def priority: Int
  def stageId: Int
  def name: String

  def addSchedulable(schedulable: Schedulable): Unit
  def removeSchedulable(schedulable: Schedulable): Unit
  def getSchedulableByName(name: String): Schedulable
  def executorLost(executorId: String, host: String, reason: ExecutorLossReason): Unit
  def checkSpeculatableTasks(minTimeToSpeculation: Int): Boolean
  def getSortedTaskSetQueue: ArrayBuffer[TaskSetManager]
} 
Example 43
Source File: Schedulable.scala    From SparkCore   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler

import java.util.concurrent.ConcurrentLinkedQueue

import scala.collection.mutable.ArrayBuffer

import org.apache.spark.scheduler.SchedulingMode.SchedulingMode


private[spark] trait Schedulable {
  var parent: Pool
  // child queues
  def schedulableQueue: ConcurrentLinkedQueue[Schedulable]
  def schedulingMode: SchedulingMode
  def weight: Int
  def minShare: Int
  def runningTasks: Int
  def priority: Int
  def stageId: Int
  def name: String

  def addSchedulable(schedulable: Schedulable): Unit
  def removeSchedulable(schedulable: Schedulable): Unit
  def getSchedulableByName(name: String): Schedulable
  def executorLost(executorId: String, host: String): Unit
  def checkSpeculatableTasks(): Boolean
  def getSortedTaskSetQueue: ArrayBuffer[TaskSetManager]
} 
Example 44
Source File: DataGenerator.scala    From Scala-Design-Patterns-Second-Edition   with MIT License 5 votes vote down vote up
package com.ivan.nikolov.behavioral.null_object

import java.util.concurrent.ConcurrentLinkedQueue

import scala.util.Random

class DataGenerator extends Runnable {

  val MAX_VAL = 10
  val MAX_TIME = 10000
  
  private var isStop = false
  
  private val queue: ConcurrentLinkedQueue[Int] = new ConcurrentLinkedQueue[Int]()
  
  override def run(): Unit = {
    val random = new Random()
    while (!isStop) {
      Thread.sleep(random.nextInt(MAX_TIME))
      queue.add(random.nextInt(MAX_VAL))
    }
  }
  
  def getMessage(): Option[Message] =
    Option(queue.poll()).map {
      case number => Message(number)
    }

  def requestStop(): Unit = this.synchronized {
    isStop = true
  }
}