java.util.concurrent.CountDownLatch Scala Examples

The following examples show how to use java.util.concurrent.CountDownLatch. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: UtxPoolSynchronizerSpec.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.network
import java.util.concurrent.CountDownLatch

import com.wavesplatform.account.PublicKey
import com.wavesplatform.common.utils.EitherExt2
import com.wavesplatform.lang.ValidationError
import com.wavesplatform.settings.SynchronizationSettings.UtxSynchronizerSettings
import com.wavesplatform.transaction.smart.script.trace.TracedResult
import com.wavesplatform.transaction.{GenesisTransaction, Transaction}
import com.wavesplatform.utils.Schedulers
import io.netty.util.HashedWheelTimer
import monix.execution.atomic.AtomicInt
import monix.reactive.Observable
import org.scalatest.{BeforeAndAfterAll, FreeSpec, Matchers}

import scala.concurrent.duration._

class UtxPoolSynchronizerSpec extends FreeSpec with Matchers with BeforeAndAfterAll {
  private[this] val timer     = new HashedWheelTimer
  private[this] val scheduler = Schedulers.timeBoundedFixedPool(timer, 1.second, 2, "test-utx-sync")

  "UtxPoolSynchronizer" - {
    val latch   = new CountDownLatch(5)
    val counter = AtomicInt(10)

    def countTransactions(tx: Transaction): TracedResult[ValidationError, Boolean] = {
      if (counter.getAndDecrement() > 5)
        while (!Thread.currentThread().isInterrupted) {} else
        latch.countDown()

      TracedResult(Right(true))
    }

    "accepts only those transactions from network which can be validated quickly" in withUPS(countTransactions) { ups =>
      1 to 10 foreach { i =>
        ups.publish(GenesisTransaction.create(PublicKey(new Array[Byte](32)).toAddress, i * 10L, 0L).explicitGet())
      }
      latch.await()
      counter.get() shouldEqual 0
    }
  }

  private def withUPS(putIfNew: Transaction => TracedResult[ValidationError, Boolean])(f: UtxPoolSynchronizer => Unit): Unit = {
    val ups = new UtxPoolSynchronizerImpl(UtxSynchronizerSettings(1000, 2, 1000, true), putIfNew, (_, _) => (), Observable.empty, scheduler)
    f(ups)
    ups.close()
  }

  override protected def afterAll(): Unit = {
    super.afterAll()
    scheduler.shutdown()
    timer.stop()
  }
} 
Example 2
Source File: ThreadUtilsSuite.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.util

import java.util.concurrent.{CountDownLatch, TimeUnit}

import scala.concurrent.{Await, Future}
import scala.concurrent.duration._

import org.scalatest.concurrent.Eventually._

import org.apache.spark.SparkFunSuite

class ThreadUtilsSuite extends SparkFunSuite {

  test("newDaemonSingleThreadExecutor") {
    val executor = ThreadUtils.newDaemonSingleThreadExecutor("this-is-a-thread-name")
    @volatile var threadName = ""
    executor.submit(new Runnable {
      override def run(): Unit = {
        threadName = Thread.currentThread().getName()
      }
    })
    executor.shutdown()
    executor.awaitTermination(10, TimeUnit.SECONDS)
    assert(threadName === "this-is-a-thread-name")
  }

  test("newDaemonSingleThreadScheduledExecutor") {
    val executor = ThreadUtils.newDaemonSingleThreadScheduledExecutor("this-is-a-thread-name")
    try {
      val latch = new CountDownLatch(1)
      @volatile var threadName = ""
      executor.schedule(new Runnable {
        override def run(): Unit = {
          threadName = Thread.currentThread().getName()
          latch.countDown()
        }
      }, 1, TimeUnit.MILLISECONDS)
      latch.await(10, TimeUnit.SECONDS)
      assert(threadName === "this-is-a-thread-name")
    } finally {
      executor.shutdownNow()
    }
  }

  test("newDaemonCachedThreadPool") {
    val maxThreadNumber = 10
    val startThreadsLatch = new CountDownLatch(maxThreadNumber)
    val latch = new CountDownLatch(1)
    val cachedThreadPool = ThreadUtils.newDaemonCachedThreadPool(
      "ThreadUtilsSuite-newDaemonCachedThreadPool",
      maxThreadNumber,
      keepAliveSeconds = 2)
    try {
      for (_ <- 1 to maxThreadNumber) {
        cachedThreadPool.execute(new Runnable {
          override def run(): Unit = {
            startThreadsLatch.countDown()
            latch.await(10, TimeUnit.SECONDS)
          }
        })
      }
      startThreadsLatch.await(10, TimeUnit.SECONDS)
      assert(cachedThreadPool.getActiveCount === maxThreadNumber)
      assert(cachedThreadPool.getQueue.size === 0)

      // Submit a new task and it should be put into the queue since the thread number reaches the
      // limitation
      cachedThreadPool.execute(new Runnable {
        override def run(): Unit = {
          latch.await(10, TimeUnit.SECONDS)
        }
      })

      assert(cachedThreadPool.getActiveCount === maxThreadNumber)
      assert(cachedThreadPool.getQueue.size === 1)

      latch.countDown()
      eventually(timeout(10.seconds)) {
        // All threads should be stopped after keepAliveSeconds
        assert(cachedThreadPool.getActiveCount === 0)
        assert(cachedThreadPool.getPoolSize === 0)
      }
    } finally {
      cachedThreadPool.shutdownNow()
    }
  }

  test("sameThread") {
    val callerThreadName = Thread.currentThread().getName()
    val f = Future {
      Thread.currentThread().getName()
    }(ThreadUtils.sameThread)
    val futureThreadName = Await.result(f, 10.seconds)
    assert(futureThreadName === callerThreadName)
  }
} 
Example 3
Source File: Node.scala    From affinity   with Apache License 2.0 5 votes vote down vote up
package io.amient.affinity.core.cluster


import java.util.concurrent.{CountDownLatch, TimeUnit, TimeoutException}

import akka.actor.{Actor, Props}
import akka.event.Logging
import akka.util.Timeout
import com.typesafe.config.{Config, ConfigFactory}
import io.amient.affinity.core.ack
import io.amient.affinity.core.actor.Controller._
import io.amient.affinity.core.actor.Gateway.{GatewayClusterStatus, GatewayConf}
import io.amient.affinity.core.actor._
import io.amient.affinity.core.config._
import io.amient.affinity.{AffinityActorSystem, Conf}

import scala.concurrent.duration._
import scala.concurrent.{Await, Future, Promise}
import scala.language.{implicitConversions, postfixOps}
import scala.reflect.ClassTag

object Node {

  class NodeConf extends CfgStruct[NodeConf] {
    val Containers: CfgGroup[CfgIntList] = group("container", classOf[CfgIntList], false)
      .doc("Array of partitions assigned to this node, <ID> represents the Keyspace, e.g. assigning first four partitions of MyKeySpace: affinity.node.container.MyKeySpace = [0,1,2,3] ")
    val Gateway: GatewayConf = struct("gateway", new GatewayConf, false)
    val SuspendQueueMaxSize = integer("suspend.queue.max.size", 1000).doc("Size of the queue when the cluster enters suspended mode")
    val StartupTimeoutMs = longint("startup.timeout.ms", Integer.MAX_VALUE).doc("Maximum time a node can take to startup - this number must account for any potential state bootstrap")
    val ShutdownTimeoutMs = longint("shutdown.timeout.ms", 30000).doc("Maximum time a node can take to shutdown gracefully")
    val DataDir = filepath("data.dir", false).doc("Location under which any local state or registers will be kept - this is required if running in a distributed mode or when using persisted kv stores")
    val DataAutoAssign = bool("data.auto.assign", true, false).doc("Determines whether this node auto-balances data its containers; if set tot false the fixed list of container partitions will be used")
    val DataAutoDelete = bool("data.auto.delete", true, false).doc("If set to true, any unassigned partitions will be deleted from the local storage")
  }

}

class Node(config: Config) {

  def this(configResource: String) = this(ConfigFactory.parseResources(configResource).resolve)

  val conf = Conf(config)

  val startupTimeout = conf.Affi.Node.StartupTimeoutMs().toLong milliseconds
  val shutdownTimeout = conf.Affi.Node.ShutdownTimeoutMs().toLong milliseconds

  implicit val system = AffinityActorSystem.create(config)

  private val log = Logging.getLogger(system, this)

  private val controller = system.actorOf(Props(new Controller), name = "controller")

  private val httpGatewayPort = Promise[List[Int]]()

  private val clusterReady = new CountDownLatch(1)

  @volatile private var shuttingDown = false

  @volatile private var fatalError: Option[Throwable] = None

  import scala.concurrent.ExecutionContext.Implicits.global

  val systemEventsWatcher = system.actorOf(Props(new Actor {
    override def receive: Receive = {
      case GatewayClusterStatus(false) => clusterReady.countDown()
      case FatalErrorShutdown(e) =>
        fatalError = Some(e)
        shutdown()
    }
  }))

  system.eventStream.subscribe(systemEventsWatcher, classOf[GatewayClusterStatus])

  system.eventStream.subscribe(systemEventsWatcher, classOf[FatalErrorShutdown])

  sys.addShutdownHook {
    if (!shuttingDown) {
      log.info("process killed - attempting graceful shutdown")
      fatalError = None
      shutdown()
    }
    Await.ready(system.terminate, shutdownTimeout)
  }

  
  def start[T <: Gateway](creator: => T)(implicit tag: ClassTag[T]): Future[List[Int]] = {
    controller ! StartRebalance()
    implicit val timeout = Timeout(startupTimeout)
    val result = controller ?? CreateGateway(Props(creator))
    httpGatewayPort.completeWith(result)
    result
  }

} 
Example 4
Source File: WebSocketTest.scala    From OUTDATED_ledger-wallet-android   with MIT License 5 votes vote down vote up
package co.ledger.wallet.core.net

import java.util.concurrent.{TimeUnit, CountDownLatch}

import android.net.Uri
import android.test.InstrumentationTestCase
import co.ledger.wallet.core.utils.logs.Logger
import junit.framework.Assert
import scala.util.{Failure, Success}
import scala.concurrent.ExecutionContext.Implicits.global
import co.ledger.wallet.common._

class WebSocketTest extends InstrumentationTestCase {

  var signal: CountDownLatch = _
  val uri = Uri.parse("wss://echo.websocket.org")

  override def setUp(): Unit = {
    super.setUp()
    signal = new CountDownLatch(2)
  }

  def testConnectAndEcho(): Unit = {
    val testString = "Ledger Wallet is on Android too!"
    Logger.d(s"Connecting to ${uri.toString}")
    WebSocket.connect(uri).onComplete {
      case Success(ws) =>
        Logger.d(s"Connected to ${uri.toString}")
        ws.onStringMessage((message) => {
          Logger.d(s"Just received a message $message")
          Assert.assertEquals(testString, message)
          signal.countDown()
          ws.onClose((ex) => {signal.countDown()})
          ws.close()
        })
        ws.send(testString)
      case Failure(ex) =>
        Logger.d(s"Failed connection to ${uri.toString}")
        ex.printStackTrace()
        throw ex
    }
    signal.await(30, TimeUnit.SECONDS)
    Assert.assertEquals(0, signal.getCount)
  }

  def testConnectAndEchoJson(): Unit = {
    val testJson = json"{foo: ${"bar"}, ledger: ${"wallet"}}"
    Logger.d(s"Connecting to ${uri.toString}")
    WebSocket.connect(uri).onComplete {
      case Success(ws) =>
        Logger.d(s"Connected to ${uri.toString}")
        ws.onJsonMessage((json) => {
          Logger.d(s"Just received a message ${json.toString}")
          Assert.assertEquals(json.get("foo"), testJson.get("foo"))
          Assert.assertEquals(json.get("ledger"), testJson.get("ledger"))
          signal.countDown()
          ws.onClose((ex) => {signal.countDown()})
          ws.close()
        })
        ws.send(testJson)
      case Failure(ex) =>
        Logger.d(s"Failed connection to ${uri.toString}")
        ex.printStackTrace()
        throw ex
    }
    signal.await(30, TimeUnit.SECONDS)
    Assert.assertEquals(0, signal.getCount)
  }

  def testShouldFailedConnection(): Unit = {
    WebSocket.connect(Uri.parse("wss://an_uri_that_will_never_handle_websockets.never/ever")).onComplete {
      case Success(ws) => // WTF???
        ws.onClose((ex) => Logger.d(s"Received ${ex.getMessage}"))
        Logger.d(s"WS is ${ws.isOpen} ${ws.isClosed}")
        ws.onClose((error) => Logger.d(s"Received ${error.getMessage}"))
        Assert.fail("It should failed connection")
      case Failure(ex) => {
        Logger.d("Failed to connect")
        ex.printStackTrace()
        signal.countDown()
        signal.countDown()
      }
    }
    signal.await(30, TimeUnit.SECONDS)
    Assert.assertEquals(0, signal.getCount)
  }

} 
Example 5
Source File: HttpClientTest.scala    From OUTDATED_ledger-wallet-android   with MIT License 5 votes vote down vote up
package co.ledger.wallet.core.net

import java.util.concurrent.CountDownLatch

import android.net.Uri
import android.test.InstrumentationTestCase
import co.ledger.wallet.core.utils.logs.Logger
import junit.framework.Assert
import org.json.JSONObject
import scala.collection.JavaConversions._
import scala.util.{Failure, Success}
import scala.concurrent.ExecutionContext.Implicits.global

class HttpClientTest extends InstrumentationTestCase {

  var client: HttpClient = _
  var signal: CountDownLatch = _

  override def setUp(): Unit = {
    super.setUp()
    client = new HttpClient(Uri.parse("http://httpbin.org"))
    signal = new CountDownLatch(1)
  }

  def testGet(): Unit = {
    client
      .get("get")
      .param("Toto" -> 12)
      .json.onComplete {
        case Success((json, response)) =>
          assert("http://httpbin.org/get?Toto=12" == json.get("url"))
          assert(12 == json.getJSONObject("args").getInt("Toto"))
          signal.countDown()
        case Failure(ex) =>
          ex.printStackTrace()
    }
    signal.await()
  }

  def testPost(): Unit = {
    val json = new JSONObject()
    json.put("a_param", "a_value")
    json.put("another_param", 42)
    client
      .post("post")
      .body(json)
      .json.onComplete {
        case Success((result, response)) =>
          val data = new JSONObject(result.getString("data"))
          for (key <- json.keys()) {
            assert(json.get(key) == data.get(key))
          }
          signal.countDown()
        case Failure(ex) =>
          ex.printStackTrace()
    }
    signal.await()
  }

} 
Example 6
Source File: MesosClusterDispatcher.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy.mesos

import java.util.concurrent.CountDownLatch

import org.apache.spark.deploy.mesos.ui.MesosClusterUI
import org.apache.spark.deploy.rest.mesos.MesosRestServer
import org.apache.spark.scheduler.cluster.mesos._
import org.apache.spark.util.SignalLogger
import org.apache.spark.{Logging, SecurityManager, SparkConf}


private[mesos] class MesosClusterDispatcher(
    args: MesosClusterDispatcherArguments,
    conf: SparkConf)
  extends Logging {
  //Spark master和workers使用的公共DNS(默认空)
  private val publicAddress = Option(conf.getenv("SPARK_PUBLIC_DNS")).getOrElse(args.host)
  private val recoveryMode = conf.get("spark.mesos.deploy.recoveryMode", "NONE").toUpperCase()
  logInfo("Recovery mode in Mesos dispatcher set to: " + recoveryMode)

  private val engineFactory = recoveryMode match {
    case "NONE" => new BlackHoleMesosClusterPersistenceEngineFactory
    case "ZOOKEEPER" => new ZookeeperMesosClusterPersistenceEngineFactory(conf)
    case _ => throw new IllegalArgumentException("Unsupported recovery mode: " + recoveryMode)
  }

  private val scheduler = new MesosClusterScheduler(engineFactory, conf)

  private val server = new MesosRestServer(args.host, args.port, conf, scheduler)
  private val webUi = new MesosClusterUI(
    new SecurityManager(conf),
    args.webUiPort,
    conf,
    publicAddress,
    scheduler)

  private val shutdownLatch = new CountDownLatch(1)

  def start(): Unit = {
    webUi.bind()
    scheduler.frameworkUrl = webUi.activeWebUiUrl
    scheduler.start()
    server.start()
  }

  def awaitShutdown(): Unit = {
    shutdownLatch.await()
  }

  def stop(): Unit = {
    webUi.stop()
    server.stop()
    scheduler.stop()
    shutdownLatch.countDown()
  }
}

private[mesos] object MesosClusterDispatcher extends Logging {
  def main(args: Array[String]) {
    SignalLogger.register(log)
    val conf = new SparkConf
    val dispatcherArgs = new MesosClusterDispatcherArguments(args, conf)
    conf.setMaster(dispatcherArgs.masterUrl)
    conf.setAppName(dispatcherArgs.name)
    dispatcherArgs.zookeeperUrl.foreach { z =>
      conf.set("spark.mesos.deploy.recoveryMode", "ZOOKEEPER")
      conf.set("spark.mesos.deploy.zookeeper.url", z)
    }
    val dispatcher = new MesosClusterDispatcher(dispatcherArgs, conf)
    dispatcher.start()
    val shutdownHook = new Thread() {
      override def run() {
        logInfo("Shutdown hook is shutting down dispatcher")
        dispatcher.stop()
        dispatcher.awaitShutdown()
      }
    }
    Runtime.getRuntime.addShutdownHook(shutdownHook)
    dispatcher.awaitShutdown()
  }
} 
Example 7
Source File: TestTopic.scala    From hazelcast-scala   with Apache License 2.0 5 votes vote down vote up
package joe.schmoe

import org.scalatest._
import com.hazelcast.Scala._
import scala.concurrent.duration._
import java.util.concurrent.CountDownLatch
import scala.util.Try

object TestTopic extends ClusterSetup {
  val smallRB = "smallRB"
  val smallRBCapacity = 3
  override def clusterSize = 1
  def init = {
    memberConfig.getRingbufferConfig(smallRB)
      .setCapacity(smallRBCapacity)
    clientConfig.getReliableTopicConfig(smallRB)
      .setReadBatchSize(smallRBCapacity)
  }
  def destroy = ()

}

class TestTopic extends FunSuite with BeforeAndAfterAll {
  import TestTopic._

  override def beforeAll = beforeClass()
  override def afterAll = afterClass()

  test("simple") {
    val messages = Seq(1, 2, 3)

    val cdl = new CountDownLatch(messages.sum)

    val memberFoo = member.getTopic[Int]("foo")
    assert(Try(memberFoo.onSeqMessage()(println(_))).isFailure)

    val registration = memberFoo.onMessage() { msg =>
      val n = msg.get
      for (_ <- 1 to n) cdl.countDown()
    }
    val clientFoo = client.getTopic[Int](memberFoo.getName)
    messages.foreach(clientFoo.publish)
    assert(cdl.await(5, SECONDS))
    registration.cancel()
  }

  test("reliable") {
    val messages = Seq("a", "b", "c")
    val cdl = new CountDownLatch(messages.length)
    val rTopic = client.getReliableTopic[String]("rTopic")
    val reg = rTopic.onSeqMessage() {
      case SeqMessage(seq, value) =>
        assert(messages.length - cdl.getCount === seq)
        assert(messages(seq.toInt) === value)
        cdl.countDown()
    }
    messages.foreach(rTopic.publish)
    assert(cdl.await(5, SECONDS))
    reg.cancel()
  }

  test("stale") {
    val messages = Seq("a", "b", "c", "d", "e")
    val cdl = new CountDownLatch(smallRBCapacity)
    val rTopic = client.getReliableTopic[String](smallRB)
    messages.foreach(rTopic.publish)
    val reg = rTopic.onSeqMessage(startFrom = 0, gapTolerant = true) {
      case SeqMessage(seq, value) =>
        assert(messages.length - cdl.getCount === seq)
        assert(messages(seq.toInt) === value)
        cdl.countDown()
    }
    assert(cdl.await(5, SECONDS))
    reg.cancel()
  }
} 
Example 8
Source File: MesosClusterDispatcher.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy.mesos

import java.util.concurrent.CountDownLatch

import org.apache.spark.{SecurityManager, SparkConf}
import org.apache.spark.deploy.mesos.config._
import org.apache.spark.deploy.mesos.ui.MesosClusterUI
import org.apache.spark.deploy.rest.mesos.MesosRestServer
import org.apache.spark.internal.Logging
import org.apache.spark.scheduler.cluster.mesos._
import org.apache.spark.util.{CommandLineUtils, ShutdownHookManager, SparkUncaughtExceptionHandler, Utils}


private[mesos] class MesosClusterDispatcher(
    args: MesosClusterDispatcherArguments,
    conf: SparkConf)
  extends Logging {

  private val publicAddress = Option(conf.getenv("SPARK_PUBLIC_DNS")).getOrElse(args.host)
  private val recoveryMode = conf.get(RECOVERY_MODE).toUpperCase()
  logInfo("Recovery mode in Mesos dispatcher set to: " + recoveryMode)

  private val engineFactory = recoveryMode match {
    case "NONE" => new BlackHoleMesosClusterPersistenceEngineFactory
    case "ZOOKEEPER" => new ZookeeperMesosClusterPersistenceEngineFactory(conf)
    case _ => throw new IllegalArgumentException("Unsupported recovery mode: " + recoveryMode)
  }

  private val scheduler = new MesosClusterScheduler(engineFactory, conf)

  private val server = new MesosRestServer(args.host, args.port, conf, scheduler)
  private val webUi = new MesosClusterUI(
    new SecurityManager(conf),
    args.webUiPort,
    conf,
    publicAddress,
    scheduler)

  private val shutdownLatch = new CountDownLatch(1)

  def start(): Unit = {
    webUi.bind()
    scheduler.frameworkUrl = conf.get(DISPATCHER_WEBUI_URL).getOrElse(webUi.activeWebUiUrl)
    scheduler.start()
    server.start()
  }

  def awaitShutdown(): Unit = {
    shutdownLatch.await()
  }

  def stop(): Unit = {
    webUi.stop()
    server.stop()
    scheduler.stop()
    shutdownLatch.countDown()
  }
}

private[mesos] object MesosClusterDispatcher
  extends Logging
  with CommandLineUtils {

  override def main(args: Array[String]) {
    Thread.setDefaultUncaughtExceptionHandler(new SparkUncaughtExceptionHandler)
    Utils.initDaemon(log)
    val conf = new SparkConf
    val dispatcherArgs = new MesosClusterDispatcherArguments(args, conf)
    conf.setMaster(dispatcherArgs.masterUrl)
    conf.setAppName(dispatcherArgs.name)
    dispatcherArgs.zookeeperUrl.foreach { z =>
      conf.set(RECOVERY_MODE, "ZOOKEEPER")
      conf.set(ZOOKEEPER_URL, z)
    }
    val dispatcher = new MesosClusterDispatcher(dispatcherArgs, conf)
    dispatcher.start()
    logDebug("Adding shutdown hook") // force eager creation of logger
    ShutdownHookManager.addShutdownHook { () =>
      logInfo("Shutdown hook is shutting down dispatcher")
      dispatcher.stop()
      dispatcher.awaitShutdown()
    }
    dispatcher.awaitShutdown()
  }
} 
Example 9
Source File: BlockingSource.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.streaming.util

import java.util.concurrent.CountDownLatch

import org.apache.spark.sql.{SQLContext, _}
import org.apache.spark.sql.execution.streaming.{LongOffset, Offset, Sink, Source}
import org.apache.spark.sql.sources.{StreamSinkProvider, StreamSourceProvider}
import org.apache.spark.sql.streaming.OutputMode
import org.apache.spark.sql.types.{IntegerType, StructField, StructType}


class BlockingSource extends StreamSourceProvider with StreamSinkProvider {

  private val fakeSchema = StructType(StructField("a", IntegerType) :: Nil)

  override def sourceSchema(
      spark: SQLContext,
      schema: Option[StructType],
      providerName: String,
      parameters: Map[String, String]): (String, StructType) = {
    ("dummySource", fakeSchema)
  }

  override def createSource(
      spark: SQLContext,
      metadataPath: String,
      schema: Option[StructType],
      providerName: String,
      parameters: Map[String, String]): Source = {
    BlockingSource.latch.await()
    new Source {
      override def schema: StructType = fakeSchema
      override def getOffset: Option[Offset] = Some(new LongOffset(0))
      override def getBatch(start: Option[Offset], end: Offset): DataFrame = {
        import spark.implicits._
        Seq[Int]().toDS().toDF()
      }
      override def stop() {}
    }
  }

  override def createSink(
      spark: SQLContext,
      parameters: Map[String, String],
      partitionColumns: Seq[String],
      outputMode: OutputMode): Sink = {
    new Sink {
      override def addBatch(batchId: Long, data: DataFrame): Unit = {}
    }
  }
}

object BlockingSource {
  var latch: CountDownLatch = null
} 
Example 10
Source File: TraversalSpec.scala    From futiles   with Apache License 2.0 5 votes vote down vote up
package markatta.futiles

import java.util.concurrent.CountDownLatch

import scala.concurrent.Future

class TraversalSpec extends Spec {

  import Traversal._

  describe("the sequential traversal") {

    it("executes the futures sequentially") {
      val latch = new CountDownLatch(3)
      val result = traverseSequentially(List(1, 2, 3)) { n =>
        Future {
          latch.countDown()
          val l = latch.getCount.toInt
          (n, l)
        }
      }

      result.futureValue should be(List((1, 2), (2, 1), (3, 0)))
    }

    it("fails if one of the futures fails") {
      val latch = new CountDownLatch(2)
      val result = traverseSequentially(List(1, 2, 3)) { n =>
        Future {
          latch.countDown()
          val l = latch.getCount.toInt
          if (l == 0) throw new RuntimeException("fail")
          (n, l)
        }
      }

      Lifting.liftTry(result).futureValue.isFailure should be(true)
    }

  }

  describe("the sequential foldLeft") {

    it("executes the futures sequentially") {
      val latch = new CountDownLatch(3)
      val result = foldLeftSequentially(List(1, 2, 3))(Seq.empty[Int]) {
        (acc, n) =>
          Future {
            latch.countDown()
            val l = latch.getCount.toInt
            acc :+ l
          }
      }

      result.futureValue should be(List(2, 1, 0))

    }

    it("executes fails if one of the futures fails") {
      val latch = new CountDownLatch(3)
      val result = foldLeftSequentially(List(1, 2, 3))(Seq.empty[Int]) {
        (acc, n) =>
          Future {
            latch.countDown()
            val l = latch.getCount.toInt
            if (l == 1) throw new RuntimeException("fail")
            acc :+ l
          }
      }

      Lifting.liftTry(result).futureValue.isFailure should be(true)

    }
  }
} 
Example 11
Source File: StoragePerfTester.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.tools

import java.util.concurrent.{CountDownLatch, Executors}
import java.util.concurrent.atomic.AtomicLong

import org.apache.spark.executor.ShuffleWriteMetrics
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.serializer.KryoSerializer
import org.apache.spark.shuffle.hash.HashShuffleManager
import org.apache.spark.util.Utils


    val numOutputSplits = sys.env.get("NUM_REDUCERS").map(_.toInt).getOrElse(500)

    val recordLength = 1000 // ~1KB records
    val totalRecords = dataSizeMb * 1000
    val recordsPerMap = totalRecords / numMaps

    val writeKey = "1" * (recordLength / 2)
    val writeValue = "1" * (recordLength / 2)
    val executor = Executors.newFixedThreadPool(numMaps)

    val conf = new SparkConf()
      .set("spark.shuffle.compress", "false")
      .set("spark.shuffle.sync", "true")
      .set("spark.shuffle.manager", "org.apache.spark.shuffle.hash.HashShuffleManager")

    // This is only used to instantiate a BlockManager. All thread scheduling is done manually.
    val sc = new SparkContext("local[4]", "Write Tester", conf)
    val hashShuffleManager = sc.env.shuffleManager.asInstanceOf[HashShuffleManager]

    def writeOutputBytes(mapId: Int, total: AtomicLong): Unit = {
      val shuffle = hashShuffleManager.shuffleBlockResolver.forMapTask(1, mapId, numOutputSplits,
        new KryoSerializer(sc.conf), new ShuffleWriteMetrics())
      val writers = shuffle.writers
      for (i <- 1 to recordsPerMap) {
        writers(i % numOutputSplits).write(writeKey, writeValue)
      }
      writers.map { w =>
        w.commitAndClose()
        total.addAndGet(w.fileSegment().length)
      }

      shuffle.releaseWriters(true)
    }

    val start = System.currentTimeMillis()
    val latch = new CountDownLatch(numMaps)
    val totalBytes = new AtomicLong()
    for (task <- 1 to numMaps) {
      executor.submit(new Runnable() {
        override def run(): Unit = {
          try {
            writeOutputBytes(task, totalBytes)
            latch.countDown()
          } catch {
            case e: Exception =>
              // scalastyle:off println
              println("Exception in child thread: " + e + " " + e.getMessage)
              // scalastyle:on println
              System.exit(1)
          }
        }
      })
    }
    latch.await()
    val end = System.currentTimeMillis()
    val time = (end - start) / 1000.0
    val bytesPerSecond = totalBytes.get() / time
    val bytesPerFile = (totalBytes.get() / (numOutputSplits * numMaps.toDouble)).toLong

    // scalastyle:off println
    System.err.println("files_total\t\t%s".format(numMaps * numOutputSplits))
    System.err.println("bytes_per_file\t\t%s".format(Utils.bytesToString(bytesPerFile)))
    System.err.println("agg_throughput\t\t%s/s".format(Utils.bytesToString(bytesPerSecond.toLong)))
    // scalastyle:on println

    executor.shutdown()
    sc.stop()
  }
} 
Example 12
Source File: MesosClusterDispatcher.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy.mesos

import java.util.concurrent.CountDownLatch

import org.apache.spark.deploy.mesos.ui.MesosClusterUI
import org.apache.spark.deploy.rest.mesos.MesosRestServer
import org.apache.spark.scheduler.cluster.mesos._
import org.apache.spark.util.SignalLogger
import org.apache.spark.{Logging, SecurityManager, SparkConf}


private[mesos] class MesosClusterDispatcher(
    args: MesosClusterDispatcherArguments,
    conf: SparkConf)
  extends Logging {

  private val publicAddress = Option(conf.getenv("SPARK_PUBLIC_DNS")).getOrElse(args.host)
  private val recoveryMode = conf.get("spark.mesos.deploy.recoveryMode", "NONE").toUpperCase()
  logInfo("Recovery mode in Mesos dispatcher set to: " + recoveryMode)

  private val engineFactory = recoveryMode match {
    case "NONE" => new BlackHoleMesosClusterPersistenceEngineFactory
    case "ZOOKEEPER" => new ZookeeperMesosClusterPersistenceEngineFactory(conf)
    case _ => throw new IllegalArgumentException("Unsupported recovery mode: " + recoveryMode)
  }

  private val scheduler = new MesosClusterScheduler(engineFactory, conf)

  private val server = new MesosRestServer(args.host, args.port, conf, scheduler)
  private val webUi = new MesosClusterUI(
    new SecurityManager(conf),
    args.webUiPort,
    conf,
    publicAddress,
    scheduler)

  private val shutdownLatch = new CountDownLatch(1)

  def start(): Unit = {
    webUi.bind()
    scheduler.frameworkUrl = webUi.activeWebUiUrl
    scheduler.start()
    server.start()
  }

  def awaitShutdown(): Unit = {
    shutdownLatch.await()
  }

  def stop(): Unit = {
    webUi.stop()
    server.stop()
    scheduler.stop()
    shutdownLatch.countDown()
  }
}

private[mesos] object MesosClusterDispatcher extends Logging {
  def main(args: Array[String]) {
    SignalLogger.register(log)
    val conf = new SparkConf
    val dispatcherArgs = new MesosClusterDispatcherArguments(args, conf)
    conf.setMaster(dispatcherArgs.masterUrl)
    conf.setAppName(dispatcherArgs.name)
    dispatcherArgs.zookeeperUrl.foreach { z =>
      conf.set("spark.mesos.deploy.recoveryMode", "ZOOKEEPER")
      conf.set("spark.mesos.deploy.zookeeper.url", z)
    }
    val dispatcher = new MesosClusterDispatcher(dispatcherArgs, conf)
    dispatcher.start()
    val shutdownHook = new Thread() {
      override def run() {
        logInfo("Shutdown hook is shutting down dispatcher")
        dispatcher.stop()
        dispatcher.awaitShutdown()
      }
    }
    Runtime.getRuntime.addShutdownHook(shutdownHook)
    dispatcher.awaitShutdown()
  }
} 
Example 13
Source File: AsyncFunctionLoopTest.scala    From stream-reactor   with Apache License 2.0 5 votes vote down vote up
package com.landoop.streamreactor.connect.hive

import java.util.concurrent.{CountDownLatch, TimeUnit}

import org.scalatest.funsuite.AnyFunSuite
import org.scalatest.matchers.should.Matchers

import scala.concurrent.duration._

class AsyncFunctionLoopTest extends AnyFunSuite with Matchers {
  test("it loops 5 times in 10 seconds with 2s delay") {
    val countDownLatch = new CountDownLatch(5)
    val looper = new AsyncFunctionLoop(2.seconds, "test")({
      countDownLatch.countDown()
    })
    looper.start()
    countDownLatch.await(11000, TimeUnit.MILLISECONDS) shouldBe true
    looper.close()
  }
} 
Example 14
Source File: AsyncFunctionLoopTest.scala    From stream-reactor   with Apache License 2.0 5 votes vote down vote up
package com.landoop.streamreactor.connect.hive

import java.util.concurrent.{CountDownLatch, TimeUnit}

import org.scalatest.funsuite.AnyFunSuite
import org.scalatest.matchers.should.Matchers

import scala.concurrent.duration._

class AsyncFunctionLoopTest extends AnyFunSuite with Matchers {
  test("it loops 5 times in 10 seconds with 2s delay") {
    val countDownLatch = new CountDownLatch(5)
    val looper = new AsyncFunctionLoop(2.seconds, "test")({
      countDownLatch.countDown()
    })
    looper.start()
    countDownLatch.await(11000, TimeUnit.MILLISECONDS) shouldBe true
    looper.close()
  }
} 
Example 15
Source File: ExternalShuffleService.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy

import java.util.concurrent.CountDownLatch

import scala.collection.JavaConversions._

import org.apache.spark.{Logging, SparkConf, SecurityManager}
import org.apache.spark.network.TransportContext
import org.apache.spark.network.netty.SparkTransportConf
import org.apache.spark.network.sasl.SaslServerBootstrap
import org.apache.spark.network.server.TransportServer
import org.apache.spark.network.shuffle.ExternalShuffleBlockHandler
import org.apache.spark.util.Utils


object ExternalShuffleService extends Logging {
  @volatile
  private var server: ExternalShuffleService = _

  private val barrier = new CountDownLatch(1)

  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf
    Utils.loadDefaultSparkProperties(sparkConf)
    val securityManager = new SecurityManager(sparkConf)

    // we override this value since this service is started from the command line
    // and we assume the user really wants it to be running
    sparkConf.set("spark.shuffle.service.enabled", "true")
    server = new ExternalShuffleService(sparkConf, securityManager)
    server.start()

    installShutdownHook()

    // keep running until the process is terminated
    barrier.await()
  }

  private def installShutdownHook(): Unit = {
    Runtime.getRuntime.addShutdownHook(new Thread("External Shuffle Service shutdown thread") {
      override def run() {
        logInfo("Shutting down shuffle service.")
        server.stop()
        barrier.countDown()
      }
    })
  }
} 
Example 16
Source File: TPSITestService.scala    From ez-framework   with Apache License 2.0 5 votes vote down vote up
package com.ecfront.ez.framework.service.tpsi

import java.util.concurrent.CountDownLatch

import com.ecfront.common.Resp
import com.ecfront.ez.framework.core.rpc.{REPLY, RESP, RPC, SUB}
import com.fasterxml.jackson.databind.JsonNode

import scala.beans.BeanProperty

@RPC("/tpsi/","","")
object TPSITestService extends TPSIService {

  val counter = new CountDownLatch(3)

  @REPLY("reply/","","","","")
  def reply(parameter: Map[String, String], body: TPSITestObj): Resp[TPSITestObj] = {
    assert(parameter("id") == "1")
    assert(body.t == "测试")
    assert(body.d == 2.2)
    exec(parameter("id"), "reply", body)
  }

  @SUB("sub/","","","","")
  def sub(parameter: Map[String, String], body: TPSITestObj): Resp[TPSITestObj] = {
    assert(parameter("id") == "1")
    assert(body.t == "测试")
    assert(body.d == 2.2)
    exec(parameter("id"), "sub", body)
  }

  @RESP("resp/","","","","")
  def resp(parameter: Map[String, String], body: TPSITestObj): Resp[TPSITestObj] = {
    assert(parameter("id") == "1")
    assert(body.t == "测试")
    assert(body.d == 2.2)
    exec(parameter("id"), "resp", body)
  }

  override protected def init(args: JsonNode): Unit = {
    assert(args.get("tt").asText() == "字段")
  }

  def exec(id: String, funName: String, body: TPSITestObj): Resp[TPSITestObj] = {
    execute(id, funName, {
      Thread.sleep(100)
      body
    }, {
      body =>
        counter.countDown()
        Resp.success(body.asInstanceOf[TPSITestObj])
    })
  }


}

class TPSITestObj {
  @BeanProperty
  var t: String = _
  @BeanProperty
  var d: BigDecimal = _
}

object TPSITestObj {
  def apply(t: String, d: BigDecimal): TPSITestObj = {
    val obj = new TPSITestObj()
    obj.t = t
    obj.d = d
    obj
  }
} 
Example 17
Source File: MapSpec.scala    From ez-framework   with Apache License 2.0 5 votes vote down vote up
package com.ecfront.ez.framework.cluster.redis

import java.util.concurrent.CountDownLatch
import java.util.{Timer, TimerTask}

import com.ecfront.ez.framework.core.EZ
import com.ecfront.ez.framework.test.MockStartupSpec

import scala.beans.BeanProperty


class MapSpec extends MockStartupSpec {

  test("Map Test") {

    val mapObj = EZ.dist.map[TestMapObj]("test_obj_map")
    mapObj.clear()
    val obj = new TestMapObj
    obj.a = "测试"
    assert(mapObj.put("a", obj).get("a").a == "测试")


    val map = EZ.dist.map[Long]("test_map")
    map.clear()

    val timer = new Timer()
    timer.schedule(new TimerTask {
      override def run(): Unit = {
        map.put("a", System.currentTimeMillis())
      }
    }, 0, 1000)
    timer.schedule(new TimerTask {
      override def run(): Unit = {
        map.foreach({
          (k, v) =>
            println(">>a:" + v)
        })
      }
    }, 0, 10000)
    new CountDownLatch(1).await()
  }

}

class TestMapObj extends Serializable {
  @BeanProperty
  var a: String = _
} 
Example 18
Source File: CacheSpec.scala    From ez-framework   with Apache License 2.0 5 votes vote down vote up
package com.ecfront.ez.framework.cluster.redis

import java.util.concurrent.CountDownLatch

import com.ecfront.common.JsonHelper
import com.ecfront.ez.framework.core.EZ
import com.ecfront.ez.framework.test.MockStartupSpec

class CacheSpec extends MockStartupSpec {

  test("Redis Cache Test") {

    EZ.cache.flushdb()

    EZ.cache.del("n_test")
    assert(!EZ.cache.exists("n_test"))
    EZ.cache.set("n_test", s"""{"name":"jzy"}""", 1)
    assert(EZ.cache.exists("n_test"))
    assert(JsonHelper.toJson(EZ.cache.get("n_test")).get("name").asText() == "jzy")
    Thread.sleep(1000)
    assert(!EZ.cache.exists("n_test"))
    assert(EZ.cache.get("n_test") == null)

    EZ.cache.del("hash_test")
    EZ.cache.hmset("hash_test", Map("f1" -> "v1", "f2" -> "v2"))
    EZ.cache.hset("hash_test", "f3", "v3")
    assert(EZ.cache.hget("hash_test", "f3") == "v3")
    assert(EZ.cache.hget("hash_test", "notexist") == null)
    assert(EZ.cache.hexists("hash_test", "f3"))
    val hashVals = EZ.cache.hgetAll("hash_test")
    assert(hashVals.size == 3 && hashVals("f1") == "v1" && hashVals("f2") == "v2" && hashVals("f3") == "v3")
    EZ.cache.hdel("hash_test", "f3")
    assert(!EZ.cache.hexists("hash_test", "f3"))
    EZ.cache.del("hash_test")
    assert(!EZ.cache.exists("hash_test"))

    EZ.cache.del("list_test")
    EZ.cache.lmset("list_test", List("v1", "v2"))
    EZ.cache.lpush("list_test", "v0")
    EZ.cache.lset("list_test", "v2_new", 2)
    assert(EZ.cache.llen("list_test") == 3)
    assert(EZ.cache.lpop("list_test") == "v0")
    assert(EZ.cache.llen("list_test") == 2)
    assert(EZ.cache.lindex("list_test", 1) == "v2_new")
    val listVals = EZ.cache.lget("list_test")
    assert(listVals.size == 2 && listVals == List("v2", "v2_new"))

    EZ.cache.del("int_test")
    assert(EZ.cache.incr("int_test", 0) == 0)
    EZ.cache.incr("int_test", 10)
    assert(EZ.cache.get("int_test") == "10")
    EZ.cache.incr("int_test", 0)
    assert(EZ.cache.get("int_test") == "10")
    EZ.cache.incr("int_test", 10)
    assert(EZ.cache.get("int_test") == "20")
    EZ.cache.decr("int_test", 4)
    EZ.cache.decr("int_test", 2)
    assert(EZ.cache.get("int_test") == "14")
    EZ.cache.expire("int_test", 1)
    assert(EZ.cache.get("int_test") == "14")
    Thread.sleep(1100)
    assert(EZ.cache.get("int_test") == null)
  }

  test("Compression test") {
    EZ.cache.flushdb()
    val max = 1000
    val cdl = new CountDownLatch(max)
    val setThreads = for (i <- 0 until max) yield
      new Thread(new Runnable {
        override def run(): Unit = {
          EZ.cache.set(s"test$i", i + "")
        }
      })
    setThreads.foreach(_.start())
    val getThreads = for (i <- 0 until max) yield
      new Thread(new Runnable {
        override def run(): Unit = {
          while (!EZ.cache.exists(s"test$i")) {
          }
          EZ.cache.get(s"test$i")
          EZ.cache.del(s"test$i")
          cdl.countDown()
        }
      })
    getThreads.foreach(_.start())
    cdl.await()
  }

} 
Example 19
Source File: RabbitmqSpec.scala    From ez-framework   with Apache License 2.0 5 votes vote down vote up
package com.ecfront.ez.framework.cluster.rabbitmq

import java.util.concurrent.CountDownLatch
import java.util.concurrent.atomic.AtomicLong

import com.ecfront.ez.framework.core.logger.Logging
import com.rabbitmq.client.AMQP.BasicProperties
import com.rabbitmq.client.{ConnectionFactory, QueueingConsumer}
import org.scalatest.{BeforeAndAfter, FunSuite}


class RabbitmqSpec extends FunSuite with BeforeAndAfter with Logging {

  test("rabbitmq test") {
    val p = new AtomicLong(0)
    val c = new AtomicLong(0)

    val factory = new ConnectionFactory()
    factory.setUsername("user")
    factory.setPassword("password")
    factory.setHost("127.0.0.1")
    val connection = factory.newConnection()
    // produce
    val produceThreads = for (i <- 0 until 50)
      yield new Thread(new Runnable {
        override def run(): Unit = {
          val channel = connection.createChannel()
          val replyQueueName = channel.queueDeclare().getQueue
          val replyConsumer = new QueueingConsumer(channel)
          channel.basicConsume(replyQueueName, true, replyConsumer)
          val corrId = java.util.UUID.randomUUID().toString
          val opt = new BasicProperties.Builder().correlationId(corrId).replyTo(replyQueueName).build()
          channel.basicPublish("", "a", opt, s"test${p.incrementAndGet()}".getBytes())
          var delivery = replyConsumer.nextDelivery()
          while (true) {
            if (delivery.getProperties.getCorrelationId.equals(corrId)) {
              logger.info(s"reply " + new String(delivery.getBody))
            }
            delivery = replyConsumer.nextDelivery()
          }
          channel.close()
        }
      })
    produceThreads.foreach(_.start())

    // consumer
    new Thread(new Runnable {
      override def run(): Unit = {
        val channel = connection.createChannel()
        channel.queueDeclare("a", false, false, false, null)
        val consumer = new QueueingConsumer(channel)
        channel.basicConsume("a", true, consumer)
        while (true) {
          val delivery = consumer.nextDelivery()
          val props = delivery.getProperties()
          val message = new String(delivery.getBody())
          new Thread(new Runnable {
            override def run(): Unit = {
              Thread.sleep(10000)
              logger.info(s"receive 1 [${c.incrementAndGet()}] " + message)
              channel.basicPublish("", props.getReplyTo(), new BasicProperties.Builder().correlationId(props.getCorrelationId()).build(), message.getBytes)
            }
          }).start()
        }
      }
    }).start()
   

    new CountDownLatch(1).await()
  }
} 
Example 20
Source File: NatsSpec.scala    From ez-framework   with Apache License 2.0 5 votes vote down vote up
package com.ecfront.ez.framework.cluster.nats

import java.util.concurrent.CountDownLatch
import java.util.concurrent.atomic.AtomicLong

import com.ecfront.ez.framework.test.BasicSpec
import io.nats.client.{ConnectionFactory, Message, MessageHandler}

class NatsSpec extends BasicSpec {

  test("nats test") {
    new Thread(new Runnable() {
      override def run() = {
        val cf = new ConnectionFactory()
        cf.setServers(Array("nats://127.0.0.1:4222"))
        val connection = cf.createConnection()
        connection.subscribe("/test/", "111",new MessageHandler {
          override def onMessage(msg: Message) = {
            logger.info(">>" + new String(msg.getData, "UTF-8"))
            connection.publish(msg.getReplyTo, (new String(msg.getData, "UTF-8") + "_reply").getBytes("UTF-8"))
          }
        })
        new CountDownLatch(1).await()
      }
    }).start()
    new Thread(new Runnable() {
      override def run() = {
        val cf = new ConnectionFactory()
        cf.setServers(Array("nats://127.0.0.1:4222"))
        val connection = cf.createConnection()
        connection.subscribe("/test/","111", new MessageHandler {
          override def onMessage(msg: Message) = {
            logger.info(">>" + new String(msg.getData, "UTF-8"))
            connection.publish(msg.getReplyTo, (new String(msg.getData, "UTF-8") + "_reply").getBytes("UTF-8"))
          }
        })
        new CountDownLatch(1).await()
      }
    }).start()
    new Thread(new Runnable() {
      override def run() = {
        val cf = new ConnectionFactory()
        cf.setServers(Array("nats://127.0.0.1:4222"))
        val connection = cf.createConnection()
        connection.subscribe("/test/11/","111", new MessageHandler {
          override def onMessage(msg: Message) = {
            logger.info(">>" + new String(msg.getData, "UTF-8"))
            connection.publish(msg.getReplyTo, (new String(msg.getData, "UTF-8") + "_reply").getBytes("UTF-8"))
          }
        })
        new CountDownLatch(1).await()
      }
    }).start()

    val counter = new AtomicLong(0)
    new Thread(new Runnable() {
      override def run() = {
        val cf = new ConnectionFactory()
        cf.setServers(Array("nats://127.0.0.1:4222"))
        val connection = cf.createConnection()
        while (true) {
          val reply = connection.request("/test/", s"A test_mesage ${counter.getAndIncrement()}".getBytes("UTF-8"))
          logger.info("A<<" + new String(reply.getData, "UTF-8"))
        }
        new CountDownLatch(1).await()
      }
    }).start()

    new Thread(new Runnable() {
      override def run() = {
        val cf = new ConnectionFactory()
        cf.setServers(Array("nats://127.0.0.1:4222"))
        val connection = cf.createConnection()
        while (true) {
          var reply = connection.request("/test/", s"B test_mesage ${counter.getAndIncrement()}".getBytes("UTF-8"))
          logger.info("B<<" + new String(reply.getData, "UTF-8"))
          reply = connection.request("/test/11/", s"B msg ${counter.getAndIncrement()}".getBytes("UTF-8"))
          logger.info("B<<" + new String(reply.getData, "UTF-8"))
        }
        new CountDownLatch(1).await()
      }
    }).start()

    new CountDownLatch(1).await()
  }

} 
Example 21
Source File: Startup.scala    From ez-framework   with Apache License 2.0 5 votes vote down vote up
package com.ecfront.ez.framework.test.perf

import java.util.concurrent.CountDownLatch
import java.util.concurrent.atomic.AtomicLong

import com.ecfront.common.JsonHelper
import com.mdataset.excavator.http.HttpHelper
import com.typesafe.scalalogging.slf4j.LazyLogging


object Startup extends App with LazyLogging {

  var token: String = ""

  def u(path: String): String = s"http://host.wangzifinance.cn:8070/$path?__ez_token__=$token"

  val client = HttpHelper.getClient
  token = JsonHelper.toJson(client.post(u("public/ez/auth/login/"),
    s"""
       |{
       |  "id":"sysadmin",
       |  "password":"admin"
       |}
     """.stripMargin)).get("body").get("token").asText()

  val counter = new AtomicLong(0)

  val threads = for (i <- 0 until 1000)
    yield new Thread(new Runnable {
      override def run(): Unit = {
        while (true) {
          assert(JsonHelper.toJson(client.post(u("test1/normal/"), "abc")).get("body").asText() == "abc")
          assert(JsonHelper.toJson(client.post(u("test2/normal/"), "def")).get("body").asText() == "def")
          logger.info(">> " + counter.incrementAndGet())
          
        }
      }
    })
  threads.foreach(_.start())
  new CountDownLatch(1).await()

} 
Example 22
Source File: IssueActor.scala    From BacklogMigration-Redmine   with MIT License 5 votes vote down vote up
package com.nulabinc.backlog.r2b.mapping.collector.actor

import java.util.concurrent.CountDownLatch

import akka.actor.Actor
import com.nulabinc.backlog.migration.common.utils.Logging
import com.nulabinc.backlog.r2b.mapping.collector.core.MappingData
import com.nulabinc.backlog.r2b.redmine.conf.RedmineConstantValue
import com.nulabinc.backlog.r2b.redmine.service.IssueService
import com.taskadapter.redmineapi.Include
import com.taskadapter.redmineapi.bean.{Issue, Journal, JournalDetail, User}

import scala.jdk.CollectionConverters._
import scala.collection.mutable
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._


private[collector] class IssueActor(issueService: IssueService, mappingData: MappingData, allUsers: Seq[User]) extends Actor with Logging {

  override def preRestart(reason: Throwable, message: Option[Any]) = {
    logger.debug(s"preRestart: reason: ${reason}, message: ${message}")
    for { value <- message } yield {
      context.system.scheduler.scheduleOnce(10.seconds, self, value)
    }
  }

  private[this] val users    = mutable.Set.empty[Option[User]]
  private[this] val statuses = mutable.Set.empty[Option[String]]

  def receive: Receive = {
    case IssueActor.Do(issueId: Int, completion: CountDownLatch, allCount: Int, console: ((Int, Int) => Unit)) =>
      logger.debug(s"[START ISSUE]${issueId} thread numbers:${java.lang.Thread.activeCount()}")
      val issue = issueService.issueOfId(issueId, Include.journals)
      parse(issue)
      mappingData.users ++= users.flatten
      mappingData.statuses ++= statuses.flatten

      completion.countDown()
      console((allCount - completion.getCount).toInt, allCount)
  }

  private[this] def parse(issue: Issue): Unit = {
    users += Option(issue.getAssignee)
    users += Option(issue.getAuthor)

    issue.getJournals.asScala.foreach(parse)
  }

  private[this] def parse(journal: Journal): Unit = {
    users += Option(journal.getUser)
    journal.getDetails.asScala.foreach(parse)
  }

  private[this] def parse(detail: JournalDetail): Unit = {
    if (detail.getName == RedmineConstantValue.Attr.ASSIGNED) {
      addUser(detail.getOldValue)
      addUser(detail.getNewValue)
    }
    if (detail.getName == RedmineConstantValue.Attr.STATUS) {
      addStatus(detail.getOldValue)
      addStatus(detail.getNewValue)
    }
  }

  private[this] def addUser(value: String) =
    for { userId <- Option(value) } yield users += allUsers.find(user => user.getId.intValue() == userId.toInt)

  private[this] def addStatus(value: String) = statuses += Option(value)

}

private[collector] object IssueActor {

  case class Do(issueId: Int, completion: CountDownLatch, allCount: Int, console: ((Int, Int) => Unit))

} 
Example 23
Source File: IssuesActor.scala    From BacklogMigration-Redmine   with MIT License 5 votes vote down vote up
package com.nulabinc.backlog.r2b.mapping.collector.actor

import java.util.concurrent.CountDownLatch

import akka.actor.SupervisorStrategy.Restart
import akka.actor.{Actor, ActorRef, OneForOneStrategy, Props}
import akka.routing.SmallestMailboxPool
import com.nulabinc.backlog.migration.common.conf.BacklogConfiguration
import com.nulabinc.backlog.migration.common.utils.{ConsoleOut, Logging, ProgressBar}
import com.nulabinc.backlog.r2b.mapping.collector.core.{MappingContext, MappingData}
import com.nulabinc.backlog4j.BacklogAPIException
import com.osinka.i18n.Messages
import com.taskadapter.redmineapi.bean.User

import scala.concurrent.duration._


private[collector] class IssuesActor(mappingContext: MappingContext) extends Actor with BacklogConfiguration with Logging {

  private[this] val strategy =
    OneForOneStrategy(maxNrOfRetries = 5, withinTimeRange = 10 seconds) {
      case e: BacklogAPIException if e.getMessage.contains("429") =>
        Restart
      case e: BacklogAPIException if e.getMessage.contains("Stream closed") =>
        Restart
      case e =>
        ConsoleOut.error("Fatal error: " + e.getMessage)
        logger.error(e.getStackTrace.mkString("\n"))
        sys.exit(2)
    }

  private[this] val limit: Int = exportLimitAtOnce
  private[this] val allCount   = mappingContext.issueService.countIssues()
  private[this] val completion = new CountDownLatch(allCount)
  private[this] val console =
    (ProgressBar.progress _)(Messages("common.issues"), Messages("message.analyzing"), Messages("message.analyzed"))
  private[this] val issuesInfoProgress =
    (ProgressBar.progress _)(Messages("common.issues_info"), Messages("message.collecting"), Messages("message.collected"))

  def receive: Receive = {
    case IssuesActor.Do(mappingData: MappingData, allUsers: Seq[User]) =>
      val router     = SmallestMailboxPool(akkaMailBoxPool, supervisorStrategy = strategy)
      val issueActor = context.actorOf(router.props(Props(new IssueActor(mappingContext.issueService, mappingData, allUsers))))

      (0 until (allCount, limit))
        .foldLeft(Seq.empty[Int]) { (acc, offset) =>
          acc concat issueIds(offset)
        }
        .map(issues)
        .foreach(_(issueActor))

      completion.await
      sender() ! IssuesActor.Done
  }

  private[this] def issueIds(offset: Int): Seq[Int] = {
    val params =
      Map("offset"        -> offset.toString,
          "limit"         -> limit.toString,
          "project_id"    -> mappingContext.projectId.value.toString,
          "status_id"     -> "*",
          "subproject_id" -> "!*")
    val ids = mappingContext.issueService.allIssues(params).map(_.getId.intValue())
    issuesInfoProgress(((offset / limit) + 1), ((allCount / limit) + 1))
    ids
  }

  private[this] def issues(issueId: Int)(issueActor: ActorRef) = {
    issueActor ! IssueActor.Do(issueId, completion, allCount, console)
  }

}

private[collector] object IssuesActor {

  case class Do(mappingData: MappingData, allUsers: Seq[User])

  case object Done

} 
Example 24
Source File: WikiActor.scala    From BacklogMigration-Redmine   with MIT License 5 votes vote down vote up
package com.nulabinc.backlog.r2b.mapping.collector.actor

import java.util.concurrent.CountDownLatch

import akka.actor.Actor
import com.nulabinc.backlog.migration.common.utils.Logging
import com.nulabinc.backlog.r2b.mapping.collector.core.MappingData
import com.nulabinc.backlog.r2b.redmine.service.WikiService
import com.taskadapter.redmineapi.bean.{User, WikiPage, WikiPageDetail}

import scala.collection.mutable
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._


private[collector] class WikiActor(wikiService: WikiService, mappingData: MappingData) extends Actor with Logging {

  override def preRestart(reason: Throwable, message: Option[Any]) = {
    logger.debug(s"preRestart: reason: ${reason}, message: ${message}")
    for { value <- message } yield {
      context.system.scheduler.scheduleOnce(10.seconds, self, value)
    }
  }

  private[this] val users = mutable.Set.empty[Option[User]]

  def receive: Receive = {
    case WikiActor.Do(wiki: WikiPage, completion: CountDownLatch, allCount: Int, console: ((Int, Int) => Unit)) =>
      wikiService.optWikiDetail(wiki.getTitle).foreach { wikiDetail =>
        parse(wikiDetail)
        mappingData.users ++= users.flatten
      }
      completion.countDown()
      console((allCount - completion.getCount).toInt, allCount)
  }

  private[this] def parse(wikiDetail: WikiPageDetail) =
    users += Option(wikiDetail.getUser)

}

private[collector] object WikiActor {

  case class Do(wiki: WikiPage, completion: CountDownLatch, allCount: Int, console: ((Int, Int) => Unit))

} 
Example 25
Source File: IssueActor.scala    From BacklogMigration-Redmine   with MIT License 5 votes vote down vote up
package com.nulabinc.backlog.r2b.exporter.actor

import java.util.concurrent.CountDownLatch

import akka.actor.Actor
import better.files.File
import com.nulabinc.backlog.migration.common.convert.Convert
import com.nulabinc.backlog.migration.common.domain.{BacklogComment, BacklogIssue, BacklogTextFormattingRule}
import com.nulabinc.backlog.migration.common.utils.{DateUtil, IOUtil, Logging}
import com.nulabinc.backlog.r2b.exporter.convert.{IssueWrites, JournalWrites}
import com.nulabinc.backlog.r2b.exporter.core.ExportContext
import com.nulabinc.backlog.r2b.exporter.service.{ChangeLogReducer, CommentReducer, IssueInitializer}
import com.taskadapter.redmineapi.Include
import com.taskadapter.redmineapi.bean.{Attachment, _}
import spray.json._

import scala.jdk.CollectionConverters._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._


private[exporter] class IssueActor(exportContext: ExportContext, backlogTextFormattingRule: BacklogTextFormattingRule) extends Actor with Logging {

  import com.nulabinc.backlog.migration.common.formatters.BacklogJsonProtocol._
  import IssueActor.ConsoleF

  private implicit val issueWrites: IssueWrites = exportContext.issueWrites
  private implicit val journalWrites: JournalWrites = exportContext.journalWrites


  override def preRestart(reason: Throwable, message: Option[Any]): Unit = {
    logger.debug(s"preRestart: reason: $reason, message: $message")
    for { value <- message } yield {
      context.system.scheduler.scheduleOnce(10.seconds, self, value)
    }
  }

  def receive: Receive = {
    case IssueActor.Do(issueId: Int, completion: CountDownLatch, allCount: Int, console: ConsoleF)=>
      logger.debug(s"[START ISSUE]$issueId thread numbers:${java.lang.Thread.activeCount()}")

      val issue                        = exportContext.issueService.issueOfId(issueId, Include.attachments, Include.journals)
      val journals                     = issue.getJournals.asScala.toSeq.sortWith((c1, c2) => c1.getCreatedOn.before(c2.getCreatedOn))
      val attachments: Seq[Attachment] = issue.getAttachments.asScala.toSeq

      exportIssue(issue, journals, attachments)
      exportComments(issue, journals, attachments)

      completion.countDown()
      console((allCount - completion.getCount).toInt, allCount)
  }

  private[this] def exportIssue(issue: Issue, journals: Seq[Journal], attachments: Seq[Attachment]): File = {
    val issueCreated     = DateUtil.tryIsoParse(Option(issue.getCreatedOn).map(DateUtil.isoFormat))
    val issueDirPath     = exportContext.backlogPaths.issueDirectoryPath("issue", issue.getId.intValue(), issueCreated, 0)
    val issueInitializer = new IssueInitializer(exportContext, issueDirPath, journals, attachments, backlogTextFormattingRule)
    val backlogIssue     = issueInitializer.initialize(issue)

    IOUtil.output(exportContext.backlogPaths.issueJson(issueDirPath), backlogIssue.toJson.prettyPrint)
  }

  private[this] def exportComments(issue: Issue, journals: Seq[Journal], attachments: Seq[Attachment]): Unit = {
    val backlogIssue    = Convert.toBacklog(issue)
    val backlogComments = journals.map(Convert.toBacklog(_))
    backlogComments.zipWithIndex.foreach {
      case (comment, index) =>
        exportComment(comment, backlogIssue, backlogComments, attachments, index)
    }
  }

  private[this] def exportComment(comment: BacklogComment,
                                  issue: BacklogIssue,
                                  comments: Seq[BacklogComment],
                                  attachments: Seq[Attachment],
                                  index: Int) : File = {
    val commentCreated   = DateUtil.tryIsoParse(comment.optCreated)
    val issueDirPath     = exportContext.backlogPaths.issueDirectoryPath("comment", issue.id, commentCreated, index)
    val changeLogReducer = new ChangeLogReducer(exportContext, issueDirPath, issue, comments, attachments)
    val commentReducer   = new CommentReducer(issue.id, changeLogReducer)
    val reduced          = commentReducer.reduce(comment)

    IOUtil.output(exportContext.backlogPaths.issueJson(issueDirPath), reduced.toJson.prettyPrint)
  }

}

private[exporter] object IssueActor {

  type ConsoleF = (Int, Int) => Unit

  case class Do(issueId: Int, completion: CountDownLatch, allCount: Int, console: ConsoleF)

} 
Example 26
Source File: IssuesActor.scala    From BacklogMigration-Redmine   with MIT License 5 votes vote down vote up
package com.nulabinc.backlog.r2b.exporter.actor

import java.util.concurrent.CountDownLatch

import akka.actor.SupervisorStrategy.Restart
import akka.actor.{Actor, ActorRef, OneForOneStrategy, Props}
import akka.routing.SmallestMailboxPool
import com.nulabinc.backlog.migration.common.conf.BacklogConfiguration
import com.nulabinc.backlog.migration.common.domain.BacklogTextFormattingRule
import com.nulabinc.backlog.migration.common.utils.{ConsoleOut, Logging, ProgressBar}
import com.nulabinc.backlog.r2b.exporter.core.ExportContext
import com.nulabinc.backlog4j.BacklogAPIException
import com.osinka.i18n.Messages

import scala.concurrent.duration._


private[exporter] class IssuesActor(exportContext: ExportContext, backlogTextFormattingRule: BacklogTextFormattingRule) extends Actor with BacklogConfiguration with Logging {

  private[this] val strategy =
    OneForOneStrategy(maxNrOfRetries = 5, withinTimeRange = 10 seconds) {
      case e: BacklogAPIException if e.getMessage.contains("429") =>
        Restart
      case e: BacklogAPIException if e.getMessage.contains("Stream closed") =>
        Restart
      case e =>
        ConsoleOut.error("Fatal error: " + e.getMessage)
        logger.error(e.getStackTrace.mkString("\n"))
        sys.exit(2)
    }

  private[this] val limit      = exportLimitAtOnce
  private[this] val allCount   = exportContext.issueService.countIssues()
  private[this] val completion = new CountDownLatch(allCount)

  private[this] val console =
    (ProgressBar.progress _)(Messages("common.issues"), Messages("message.exporting"), Messages("message.exported"))
  private[this] val issuesInfoProgress =
    (ProgressBar.progress _)(Messages("common.issues_info"), Messages("message.collecting"), Messages("message.collected"))

  def receive: Receive = {
    case IssuesActor.Do =>
      val router     = SmallestMailboxPool(akkaMailBoxPool, supervisorStrategy = strategy)
      val issueActor = context.actorOf(router.props(Props(new IssueActor(exportContext, backlogTextFormattingRule))))

      (0 until (allCount, limit))
        .foldLeft(Seq.empty[Int]) { (acc, offset) =>
          acc concat issueIds(offset)
        }
        .map(issues)
        .foreach(_(issueActor))

      completion.await
      sender() ! IssuesActor.Done
  }

  private[this] def issueIds(offset: Int): Seq[Int] = {
    val params = Map(
      "offset"        -> offset.toString,
      "limit"         -> limit.toString,
      "project_id"    -> exportContext.projectId.value.toString,
      "status_id"     -> "*",
      "subproject_id" -> "!*"
    )
    val ids = exportContext.issueService.allIssues(params).map(_.getId.intValue())
    issuesInfoProgress(((offset / limit) + 1), ((allCount / limit) + 1))
    ids
  }

  private[this] def issues(issueId: Int)(issueActor: ActorRef): Unit = {
    issueActor ! IssueActor.Do(issueId, completion, allCount, console)
  }

}

private[exporter] object IssuesActor {

  val name = "IssuesActor"

  case object Do

  case object Done

} 
Example 27
Source File: StreamProcessorApp.scala    From event-sourcing-kafka-streams   with MIT License 5 votes vote down vote up
package org.amitayh.invoices.streamprocessor

import java.util.Properties
import java.util.concurrent.CountDownLatch

import org.amitayh.invoices.common.Config
import org.apache.kafka.streams.KafkaStreams.State
import org.apache.kafka.streams.{KafkaStreams, StreamsConfig, Topology}
import org.log4s.getLogger

trait StreamProcessorApp extends App {

  def appId: String

  def topology: Topology

  private val logger = getLogger

  private val latch = new CountDownLatch(1)

  private val streams: KafkaStreams = {
    val props = new Properties
    props.put(StreamsConfig.APPLICATION_ID_CONFIG, appId)
    props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, Config.BootstrapServers)
    props.put(StreamsConfig.PROCESSING_GUARANTEE_CONFIG, StreamsConfig.EXACTLY_ONCE)
    new KafkaStreams(topology, props)
  }

  streams.setStateListener((newState: State, oldState: State) => {
    logger.info(s"$oldState -> $newState")
  })

  streams.setUncaughtExceptionHandler((_: Thread, e: Throwable) => {
    logger.error(e)(s"Exception was thrown in stream processor $appId")
    latch.countDown()
  })

  def start(): Unit = {
    logger.info("Starting...")
    streams.start()
    sys.ShutdownHookThread(close())
    latch.await()
  }

  def close(): Unit = {
    logger.info("Shutting down...")
    streams.close()
  }

  start()

} 
Example 28
Source File: ProcessingTimeExecutorSuite.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.streaming

import java.util.concurrent.{CountDownLatch, TimeUnit}

import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.streaming.ProcessingTime
import org.apache.spark.util.{Clock, ManualClock, SystemClock}

class ProcessingTimeExecutorSuite extends SparkFunSuite {

  test("nextBatchTime") {
    val processingTimeExecutor = ProcessingTimeExecutor(ProcessingTime(100))
    assert(processingTimeExecutor.nextBatchTime(0) === 100)
    assert(processingTimeExecutor.nextBatchTime(1) === 100)
    assert(processingTimeExecutor.nextBatchTime(99) === 100)
    assert(processingTimeExecutor.nextBatchTime(100) === 200)
    assert(processingTimeExecutor.nextBatchTime(101) === 200)
    assert(processingTimeExecutor.nextBatchTime(150) === 200)
  }

  test("calling nextBatchTime with the result of a previous call should return the next interval") {
    val intervalMS = 100
    val processingTimeExecutor = ProcessingTimeExecutor(ProcessingTime(intervalMS))

    val ITERATION = 10
    var nextBatchTime: Long = 0
    for (it <- 1 to ITERATION) {
      nextBatchTime = processingTimeExecutor.nextBatchTime(nextBatchTime)
    }

    // nextBatchTime should be 1000
    assert(nextBatchTime === intervalMS * ITERATION)
  }

  private def testBatchTermination(intervalMs: Long): Unit = {
    var batchCounts = 0
    val processingTimeExecutor = ProcessingTimeExecutor(ProcessingTime(intervalMs))
    processingTimeExecutor.execute(() => {
      batchCounts += 1
      // If the batch termination works well, batchCounts should be 3 after `execute`
      batchCounts < 3
    })
    assert(batchCounts === 3)
  }

  test("batch termination") {
    testBatchTermination(0)
    testBatchTermination(10)
  }

  test("notifyBatchFallingBehind") {
    val clock = new ManualClock()
    @volatile var batchFallingBehindCalled = false
    val latch = new CountDownLatch(1)
    val t = new Thread() {
      override def run(): Unit = {
        val processingTimeExecutor = new ProcessingTimeExecutor(ProcessingTime(100), clock) {
          override def notifyBatchFallingBehind(realElapsedTimeMs: Long): Unit = {
            batchFallingBehindCalled = true
          }
        }
        processingTimeExecutor.execute(() => {
          latch.countDown()
          clock.waitTillTime(200)
          false
        })
      }
    }
    t.start()
    // Wait until the batch is running so that we don't call `advance` too early
    assert(latch.await(10, TimeUnit.SECONDS), "the batch has not yet started in 10 seconds")
    clock.advance(200)
    t.join()
    assert(batchFallingBehindCalled === true)
  }
} 
Example 29
Source File: ProcessingTimeExecutorSuite.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.streaming

import java.util.concurrent.{CountDownLatch, TimeUnit}

import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.streaming.ProcessingTime
import org.apache.spark.util.{Clock, ManualClock, SystemClock}

class ProcessingTimeExecutorSuite extends SparkFunSuite {

  test("nextBatchTime") {
    val processingTimeExecutor = ProcessingTimeExecutor(ProcessingTime(100))
    assert(processingTimeExecutor.nextBatchTime(0) === 100)
    assert(processingTimeExecutor.nextBatchTime(1) === 100)
    assert(processingTimeExecutor.nextBatchTime(99) === 100)
    assert(processingTimeExecutor.nextBatchTime(100) === 200)
    assert(processingTimeExecutor.nextBatchTime(101) === 200)
    assert(processingTimeExecutor.nextBatchTime(150) === 200)
  }

  test("calling nextBatchTime with the result of a previous call should return the next interval") {
    val intervalMS = 100
    val processingTimeExecutor = ProcessingTimeExecutor(ProcessingTime(intervalMS))

    val ITERATION = 10
    var nextBatchTime: Long = 0
    for (it <- 1 to ITERATION) {
      nextBatchTime = processingTimeExecutor.nextBatchTime(nextBatchTime)
    }

    // nextBatchTime should be 1000
    assert(nextBatchTime === intervalMS * ITERATION)
  }

  private def testBatchTermination(intervalMs: Long): Unit = {
    var batchCounts = 0
    val processingTimeExecutor = ProcessingTimeExecutor(ProcessingTime(intervalMs))
    processingTimeExecutor.execute(() => {
      batchCounts += 1
      // If the batch termination works well, batchCounts should be 3 after `execute`
      batchCounts < 3
    })
    assert(batchCounts === 3)
  }

  test("batch termination") {
    testBatchTermination(0)
    testBatchTermination(10)
  }

  test("notifyBatchFallingBehind") {
    val clock = new ManualClock()
    @volatile var batchFallingBehindCalled = false
    val latch = new CountDownLatch(1)
    val t = new Thread() {
      override def run(): Unit = {
        val processingTimeExecutor = new ProcessingTimeExecutor(ProcessingTime(100), clock) {
          override def notifyBatchFallingBehind(realElapsedTimeMs: Long): Unit = {
            batchFallingBehindCalled = true
          }
        }
        processingTimeExecutor.execute(() => {
          latch.countDown()
          clock.waitTillTime(200)
          false
        })
      }
    }
    t.start()
    // Wait until the batch is running so that we don't call `advance` too early
    assert(latch.await(10, TimeUnit.SECONDS), "the batch has not yet started in 10 seconds")
    clock.advance(200)
    t.join()
    assert(batchFallingBehindCalled === true)
  }
} 
Example 30
Source File: ExternalShuffleService.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy

import java.util.concurrent.CountDownLatch

import scala.collection.JavaConverters._

import org.apache.spark.{SecurityManager, SparkConf}
import org.apache.spark.internal.Logging
import org.apache.spark.metrics.MetricsSystem
import org.apache.spark.network.TransportContext
import org.apache.spark.network.netty.SparkTransportConf
import org.apache.spark.network.sasl.SaslServerBootstrap
import org.apache.spark.network.server.{TransportServer, TransportServerBootstrap}
import org.apache.spark.network.shuffle.ExternalShuffleBlockHandler
import org.apache.spark.network.util.TransportConf
import org.apache.spark.util.{ShutdownHookManager, Utils}


  private[spark] def main(
      args: Array[String],
      newShuffleService: (SparkConf, SecurityManager) => ExternalShuffleService): Unit = {
    Utils.initDaemon(log)
    val sparkConf = new SparkConf
    Utils.loadDefaultSparkProperties(sparkConf)
    val securityManager = new SecurityManager(sparkConf)

    // we override this value since this service is started from the command line
    // and we assume the user really wants it to be running
    sparkConf.set("spark.shuffle.service.enabled", "true")
    server = newShuffleService(sparkConf, securityManager)
    server.start()

    logDebug("Adding shutdown hook") // force eager creation of logger
    ShutdownHookManager.addShutdownHook { () =>
      logInfo("Shutting down shuffle service.")
      server.stop()
      barrier.countDown()
    }

    // keep running until the process is terminated
    barrier.await()
  }
} 
Example 31
Source File: FlowLauncher.scala    From piflow   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package cn.piflow.util

import java.io.File
import java.util.Date
import java.util.concurrent.CountDownLatch

import cn.piflow.Flow
import org.apache.hadoop.security.SecurityUtil
import org.apache.http.client.methods.{CloseableHttpResponse, HttpPut}
import org.apache.http.entity.StringEntity
import org.apache.http.impl.client.HttpClients
import org.apache.http.util.EntityUtils
import org.apache.spark.launcher.SparkLauncher


object FlowLauncher {

  def launch(flow: Flow) : SparkLauncher = {

    var flowJson = flow.getFlowJson()
    println("FlowLauncher json:" + flowJson)

    val flowJsonencryptAES = SecurityUtil.encryptAES(flowJson)

    var appId : String = ""
    val countDownLatch = new CountDownLatch(1)
    val launcher = new SparkLauncher
    val sparkLauncher =launcher
      .setAppName(flow.getFlowName())
      .setMaster(PropertyUtil.getPropertyValue("spark.master"))
      //.setDeployMode(PropertyUtil.getPropertyValue("spark.deploy.mode"))
      .setAppResource(ConfigureUtil.getPiFlowBundlePath())
      .setVerbose(true)
      .setConf("spark.driver.memory", flow.getDriverMemory())
      .setConf("spark.executor.instances", flow.getExecutorNum())
      .setConf("spark.executor.memory", flow.getExecutorMem())
      .setConf("spark.executor.cores",flow.getExecutorCores())
      .addFile(PropertyUtil.getConfigureFile())
      .addFile(ServerIpUtil.getServerIpFile())
      .setMainClass("cn.piflow.api.StartFlowMain")
      .addAppArgs(flowJsonencryptAES)

    val sparkMaster = PropertyUtil.getPropertyValue("spark.master")
    if(sparkMaster.equals("yarn")){
      sparkLauncher.setDeployMode(PropertyUtil.getPropertyValue("spark.deploy.mode"))
      sparkLauncher.setConf("spark.hadoop.yarn.resourcemanager.hostname", PropertyUtil.getPropertyValue("yarn.resourcemanager.hostname"))
    }

    //add other jars for application
    val classPath = PropertyUtil.getClassPath()
    val classPathFile = new File(classPath)
    if(classPathFile.exists()){
      FileUtil.getJarFile(new File(classPath)).foreach(f => {
        println(f.getPath)
        sparkLauncher.addJar(f.getPath)
      })
    }

    val scalaPath = PropertyUtil.getScalaPath()
    val scalaPathFile = new File(scalaPath)
    if(scalaPathFile.exists()){
      FileUtil.getJarFile(new File(scalaPath)).foreach(f => {
        println(f.getPath)
        sparkLauncher.addJar(f.getPath)
      })
    }

    sparkLauncher
  }

  def stop(appID: String) = {

    println("Stop Flow !!!!!!!!!!!!!!!!!!!!!!!!!!")
    //yarn application kill appId
    val url = ConfigureUtil.getYarnResourceManagerWebAppAddress() + appID + "/state"
    val client = HttpClients.createDefault()
    val put:HttpPut = new HttpPut(url)
    val body ="{\"state\":\"KILLED\"}"
    put.addHeader("Content-Type", "application/json")
    put.setEntity(new StringEntity(body))
    val response:CloseableHttpResponse = client.execute(put)
    val entity = response.getEntity
    val str = EntityUtils.toString(entity,"UTF-8")

    //update db
    println("Update flow state after Stop Flow !!!!!!!!!!!!!!!!!!!!!!!!!!")
    H2Util.updateFlowState(appID, FlowState.KILLED)
    H2Util.updateFlowFinishedTime(appID, new Date().toString)


    "ok"
  }

} 
Example 32
Source File: RequestTimeoutTest.scala    From spark-riak-connector   with Apache License 2.0 5 votes vote down vote up
package com.basho.riak.spark.rdd.failover

import java.util.concurrent.CountDownLatch

import com.basho.riak.spark._
import com.basho.riak.stub.{RequestBasedMessageAdapter, RiakMessageHandler}
import org.junit.rules.ExpectedException
import org.junit.{After, Rule, Test}
import shaded.com.basho.riak.protobuf.RiakKvPB._
import shaded.com.google.protobuf.ByteString

import scala.collection.JavaConverters._

class RequestTimeoutTest extends AbstractFailoverOfflineTest {

  val _expectedException: ExpectedException = ExpectedException.none()

  @Rule
  def expectedException: ExpectedException = _expectedException

  override val riakMessageHandler: Option[RiakMessageHandler] = Some(new RequestBasedMessageAdapter {
    override def handleCoverageRequest(req: RpbCoverageReq): RpbCoverageResp = RpbCoverageResp.newBuilder()
      .addAllEntries(riakNodes
        .zip(distributeEvenly(COVERAGE_ENTRIES_COUNT, riakHosts))
        .flatMap {
          case ((a, _), partitionsPerNode) => (0 until partitionsPerNode).map {
            case partitionIndex: Int => RpbCoverageEntry.newBuilder()
              .setIp(ByteString.copyFromUtf8(a.getHost))
              .setPort(a.getPort)
              .setCoverContext(ByteString.copyFromUtf8(s"StubCoverageEntry-${a.toString}-$partitionIndex"))
              .setKeyspaceDesc(ByteString.copyFromUtf8(s"StubCoverageEntry-${a.toString}-$partitionIndex"))
              .build()
          }
        }.asJava)
      .build()

    override def handleIndexRequest(req: RpbIndexReq): RpbIndexResp = {
      logInfo("Index Request is going to stuck...")
      latch.await()
      logInfo("Timeout verified. Thread execution continued.")

      RpbIndexResp.newBuilder().build()
    }
  })

  val latch = new CountDownLatch(1)

  @Test(timeout = 5000) // scalastyle:ignore
  def fullBucketReadShouldFailWithTimeout(): Unit = {
    expectedException.expectMessage("test timed out after 5000 milliseconds")
    sc.riakBucket[String](NAMESPACE).queryAll().collect()
  }

  @After
  def after(): Unit = {
    latch.countDown()
  }
} 
Example 33
Source File: SlackApiClientTest.scala    From slack-scala-client   with MIT License 5 votes vote down vote up
package slack

import slack.api.SlackApiClient
import slack.models.{ActionField, Attachment, PublicChannel}

import scala.concurrent.Await
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
import org.scalatest.funsuite.AnyFunSuite

import java.util.concurrent.CountDownLatch


class SlackApiClientTest extends AnyFunSuite with Credentials {

  (user, token) match {
    case (Some(slackUser), Some(slackToken)) =>
      val apiClient = SlackApiClient(slackToken)
      val channel = system.settings.config.getString("test.channel")

      test("send attachment with action") {
        val actionField = Seq(ActionField("accept", "Accept", "button", Some("primary")))
        val attachment = Attachment(
          text = Some("Do you want to accept?"),
          fallback = Some("backup message: code-123456"),
          callback_id = Some("code-123456"),
          actions = Some(actionField)
        )

        apiClient.listChannels().map { channels =>
          channels.foreach(channel => println(s"${channel.id}|${channel.name}"))
        }
        val future = apiClient.postChatMessage(channel, "Request", attachments = Some(Seq(attachment)))
        val result = Await.result(future, 5.seconds)

        println(result)
      }

      test("list users with pagination") {
        val latch = new CountDownLatch(1)
        apiClient.listUsers().map { users =>
          println(s"Total: ${users.size} users")
          users.foreach(user => println(s"${user.id}|${user.name}|${user.is_bot}|${user.is_admin}"))
          latch.countDown()
        }
        latch.await()
      }

      test("list channels using conversations.list") {
        val latch = new CountDownLatch(1)
        apiClient.listConversations(Seq(PublicChannel)).map { channels =>
          println(s"Total: ${channels.size} channels")
          channels.foreach(channel => println(s"${channel.id}|${channel.name}|${channel.is_private}|${channel.is_member}"))
          latch.countDown()
        }
        latch.await()
      }


    case _ =>
      println("Skipping the test as the API credentials are not available")
  }
} 
Example 34
Source File: SlackRtmClientTest.scala    From slack-scala-client   with MIT License 5 votes vote down vote up
package slack

import java.util.concurrent.{CountDownLatch, TimeUnit}

import slack.api.SlackApiClient
import slack.models.Reply
import slack.rtm.SlackRtmClient

import scala.concurrent.duration._
import scala.concurrent.{Await, Promise}
import org.scalatest.funsuite.AnyFunSuite
import org.scalatest.matchers.should.Matchers

class SlackRtmClientTest extends AnyFunSuite with Matchers with Credentials {

  rtmToken match {
    case Some(slackToken) =>

      val channel = system.settings.config.getString("test.channel")

      lazy val rtmClient = {
        val rtm = SlackRtmClient(slackToken)
        assert(rtm.state.self.id != null)
        rtm
      }
      test("rtm typing") {
        rtmClient.indicateTyping(channel)
      }

      test("team domain") {
        val domain = rtmClient.state.team.domain
        val name = rtmClient.state.team.name
        domain should be(system.settings.config.getString("test.team.domain"))
        name should be(system.settings.config.getString("test.team.name"))
      }

      test("send message and parse reply") {
        val latch = new CountDownLatch(1)
        val promise = Promise[Long]()
        rtmClient.onEvent {
          case r: Reply =>
            assert(r.reply_to.equals(Await.result(promise.future, 2.seconds)))
            latch.countDown()
          case e => println("EVENT >>>>> " + e)
        }
        val messageIdFuture = rtmClient.sendMessage(channel, "Hi there")
        promise.completeWith(messageIdFuture)
        latch.await(5, TimeUnit.SECONDS)
      }

      ignore("edit message as bot") {
        val rtmApi = SlackApiClient(slackToken)
        val future = rtmApi.updateChatMessage(channel, "1465891701.000006", "edit-x", asUser = Some(true))
        val result = Await.result(future, 5.seconds)
        assert(result.ok.equals(true))
      }

    case _ =>
      println("Skipping the test as the API credentials are not available")

  }
} 
Example 35
Source File: ListenerTest.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels

import java.util.concurrent.{CountDownLatch, TimeUnit}

import io.eels.component.csv.{CsvSink, CsvSource}
import io.eels.datastream.DataStream
import io.eels.schema.StructType
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.scalatest.{Matchers, WordSpec}

import scala.util.Random

class ListenerTest extends WordSpec with Matchers {

  implicit val conf = new Configuration()
  implicit val fs = FileSystem.get(conf)

  val schema = StructType("a", "b", "c", "d", "e")
  val rows = List.fill(1000)(Row(schema, Random.nextBoolean(), Random.nextFloat(), Random.nextGaussian(), Random.nextLong(), Random.nextString(10)))
  val ds = DataStream.fromRows(schema, rows)

  val path = new Path("listener_test.csv")

  "DataStream" should {
    "support user's listeners" in {

      val latch = new CountDownLatch(1000)
      fs.delete(path, false)

      ds.listener(new Listener {
        override def onNext(value: Row): Unit = latch.countDown()
        override def onError(e: Throwable): Unit = ()
        override def onComplete(): Unit = ()
      }).to(CsvSink(path))

      latch.await(20, TimeUnit.SECONDS) shouldBe true

      fs.delete(path, false)
    }
    "propagate errors in listeners" in {

      class TestSink extends Sink {
        override def open(schema: StructType): SinkWriter = new SinkWriter {
          override def close(): Unit = ()
          override def write(row: Row): Unit = ()
        }
      }

      try {
        ds.listener(new Listener {
          override def onNext(value: Row): Unit = sys.error("boom")
          override def onError(e: Throwable): Unit = ()
          override def onComplete(): Unit = ()
        }).to(new TestSink)
        assert(false)
      } catch {
        case _: Throwable =>
      }
    }
  }

  "Source.toDataStream" should {
    "call on next for each row" in {

      val latch = new CountDownLatch(1000)

      fs.delete(path, false)
      ds.to(CsvSink(path))

      CsvSource(path).toDataStream(new Listener {
        override def onNext(value: Row): Unit = latch.countDown()
        override def onError(e: Throwable): Unit = ()
        override def onComplete(): Unit = ()
      }).collect

      latch.await(5, TimeUnit.SECONDS) shouldBe true
      fs.delete(path, false)
    }
    "call on complete once finished" in {

      val latch = new CountDownLatch(1001)

      fs.delete(path, false)
      ds.to(CsvSink(path))

      CsvSource(path).toDataStream(new Listener {
        override def onNext(value: Row): Unit = latch.countDown()
        override def onError(e: Throwable): Unit = ()
        override def onComplete(): Unit = latch.countDown()
      }).collect

      latch.await(5, TimeUnit.SECONDS) shouldBe true
      fs.delete(path, false)
    }
  }
} 
Example 36
Source File: NonblockingPar.scala    From learning-fpinscala   with MIT License 5 votes vote down vote up
package com.satansk.fpinscala.parallelism

import java.util.concurrent.{Callable, CountDownLatch, ExecutorService}
import java.util.concurrent.atomic.AtomicReference

import com.sun.glass.ui.MenuItem.Callback


  def unit[A](a: A): Par[A] =
    _ ⇒ new Future[A] {
      def apply(callback: A ⇒ Unit): Unit = callback(a)
    }

  def fork[A](a: ⇒ Par[A]): Par[A] =
    es ⇒ new Future[A] {
      def apply(callback: (A) ⇒ Unit): Unit =
        eval(es)(a(es)(callback))
    }

  def eval(es: ExecutorService)(r: ⇒ Unit): Unit =
    es.submit(new Callable[Unit] {
      def call = r
    })

} 
Example 37
Source File: BlockingSource.scala    From XSQL   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.streaming.util

import java.util.concurrent.CountDownLatch

import org.apache.spark.sql.{SQLContext, _}
import org.apache.spark.sql.execution.streaming.{LongOffset, Offset, Sink, Source}
import org.apache.spark.sql.sources.{StreamSinkProvider, StreamSourceProvider}
import org.apache.spark.sql.streaming.OutputMode
import org.apache.spark.sql.types.{IntegerType, StructField, StructType}


class BlockingSource extends StreamSourceProvider with StreamSinkProvider {

  private val fakeSchema = StructType(StructField("a", IntegerType) :: Nil)

  override def sourceSchema(
      spark: SQLContext,
      schema: Option[StructType],
      providerName: String,
      parameters: Map[String, String]): (String, StructType) = {
    ("dummySource", fakeSchema)
  }

  override def createSource(
      spark: SQLContext,
      metadataPath: String,
      schema: Option[StructType],
      providerName: String,
      parameters: Map[String, String]): Source = {
    BlockingSource.latch.await()
    new Source {
      override def schema: StructType = fakeSchema
      override def getOffset: Option[Offset] = Some(new LongOffset(0))
      override def getBatch(start: Option[Offset], end: Offset): DataFrame = {
        import spark.implicits._
        Seq[Int]().toDS().toDF()
      }
      override def stop() {}
    }
  }

  override def createSink(
      spark: SQLContext,
      parameters: Map[String, String],
      partitionColumns: Seq[String],
      outputMode: OutputMode): Sink = {
    new Sink {
      override def addBatch(batchId: Long, data: DataFrame): Unit = {}
    }
  }
}

object BlockingSource {
  var latch: CountDownLatch = null
} 
Example 38
package com.tomekl007.chapter_2

import java.util.concurrent.{CountDownLatch, Executors}

import org.scalatest.FunSuite

import scala.collection.mutable.ListBuffer

class MultithreadedImmutabilityTest extends FunSuite {

  test("warning: race condition with mutability") {
    //given
    var listMutable = new ListBuffer[String]()
    val executors = Executors.newFixedThreadPool(2)
    val latch = new CountDownLatch(2)

    //when
    executors.submit(new Runnable {
      override def run(): Unit = {
        latch.countDown()
        listMutable += "A"
      }
    })

    executors.submit(new Runnable {
      override def run(): Unit = {
        latch.countDown()
        if(!listMutable.contains("A")) {
          listMutable += "A"
        }
      }
    })

    latch.await()

    //then
    //listMutable can have ("A") or ("A","A")

  }

} 
Example 39
Source File: ScalaClientTestUtils.scala    From incubator-livy   with Apache License 2.0 5 votes vote down vote up
package org.apache.livy.scalaapi

import java.util.Random
import java.util.concurrent.{CountDownLatch, TimeUnit}

import scala.collection.mutable.ArrayBuffer
import scala.concurrent.{Await, Future}
import scala.concurrent.duration._

import org.scalatest.FunSuite

import org.apache.livy.LivyBaseUnitTestSuite

object ScalaClientTestUtils extends FunSuite with LivyBaseUnitTestSuite {

  val Timeout = 40

  def helloJob(context: ScalaJobContext): String = "hello"

  def throwExceptionJob(context: ScalaJobContext): Unit = throw new CustomTestFailureException

  def simpleSparkJob(context: ScalaJobContext): Long = {
    val r = new Random
    val count = 5
    val partitions = Math.min(r.nextInt(10) + 1, count)
    val buffer = new ArrayBuffer[Int]()
    for (a <- 1 to count) {
      buffer += r.nextInt()
    }
    context.sc.parallelize(buffer, partitions).count()
  }

  def assertAwait(lock: CountDownLatch): Unit = {
    assert(lock.await(Timeout, TimeUnit.SECONDS) == true)
  }

  def assertTestPassed[T](future: Future[T], expectedValue: T): Unit = {
    val result = Await.result(future, Timeout second)
    assert(result === expectedValue)
  }
} 
Example 40
Source File: SessionSpec.scala    From incubator-livy   with Apache License 2.0 5 votes vote down vote up
package org.apache.livy.repl

import java.util.Properties
import java.util.concurrent.{ConcurrentLinkedQueue, CountDownLatch, TimeUnit}

import org.apache.spark.SparkConf
import org.scalatest.{BeforeAndAfter, FunSpec}
import org.scalatest.Matchers._
import org.scalatest.concurrent.Eventually
import org.scalatest.time._

import org.apache.livy.LivyBaseUnitTestSuite
import org.apache.livy.repl.Interpreter.ExecuteResponse
import org.apache.livy.rsc.RSCConf
import org.apache.livy.sessions._

class SessionSpec extends FunSpec with Eventually with LivyBaseUnitTestSuite with BeforeAndAfter {
  override implicit val patienceConfig =
    PatienceConfig(timeout = scaled(Span(30, Seconds)), interval = scaled(Span(100, Millis)))

  private val rscConf = new RSCConf(new Properties()).set(RSCConf.Entry.SESSION_KIND, "spark")

  describe("Session") {
    var session: Session = null

    after {
      if (session != null) {
        session.close()
        session = null
      }
    }

    it("should call state changed callbacks in happy path") {
      val expectedStateTransitions =
        Array("not_started", "starting", "idle", "busy", "idle", "busy", "idle")
      val actualStateTransitions = new ConcurrentLinkedQueue[String]()

      session = new Session(rscConf, new SparkConf(), None,
        { s => actualStateTransitions.add(s.toString) })
      session.start()
      session.execute("")

      eventually {
        actualStateTransitions.toArray shouldBe expectedStateTransitions
      }
    }

    it("should not transit to idle if there're any pending statements.") {
      val expectedStateTransitions =
        Array("not_started", "starting", "idle", "busy", "busy", "busy", "idle", "busy", "idle")
      val actualStateTransitions = new ConcurrentLinkedQueue[String]()

      val blockFirstExecuteCall = new CountDownLatch(1)
      val interpreter = new SparkInterpreter(new SparkConf()) {
        override def execute(code: String): ExecuteResponse = {
          blockFirstExecuteCall.await(10, TimeUnit.SECONDS)
          super.execute(code)
        }
      }
      session = new Session(rscConf, new SparkConf(), Some(interpreter),
        { s => actualStateTransitions.add(s.toString) })
      session.start()

      for (_ <- 1 to 2) {
        session.execute("")
      }

      blockFirstExecuteCall.countDown()
      eventually {
        actualStateTransitions.toArray shouldBe expectedStateTransitions
      }
    }

    it("should remove old statements when reaching threshold") {
      rscConf.set(RSCConf.Entry.RETAINED_STATEMENTS, 2)
      session = new Session(rscConf, new SparkConf())
      session.start()

      session.statements.size should be (0)
      session.execute("")
      session.statements.size should be (1)
      session.statements.map(_._1).toSet should be (Set(0))
      session.execute("")
      session.statements.size should be (2)
      session.statements.map(_._1).toSet should be (Set(0, 1))
      session.execute("")
      eventually {
        session.statements.size should be (2)
        session.statements.map(_._1).toSet should be (Set(1, 2))
      }

      // Continue submitting statements, total statements in memory should be 2.
      session.execute("")
      eventually {
        session.statements.size should be (2)
        session.statements.map(_._1).toSet should be (Set(2, 3))
      }
    }
  }
} 
Example 41
Source File: MesosClusterDispatcher.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy.mesos

import java.util.concurrent.CountDownLatch

import org.apache.spark.{SecurityManager, SparkConf}
import org.apache.spark.deploy.mesos.config._
import org.apache.spark.deploy.mesos.ui.MesosClusterUI
import org.apache.spark.deploy.rest.mesos.MesosRestServer
import org.apache.spark.internal.Logging
import org.apache.spark.scheduler.cluster.mesos._
import org.apache.spark.util.{ShutdownHookManager, Utils}


private[mesos] class MesosClusterDispatcher(
    args: MesosClusterDispatcherArguments,
    conf: SparkConf)
  extends Logging {

  private val publicAddress = Option(conf.getenv("SPARK_PUBLIC_DNS")).getOrElse(args.host)
  private val recoveryMode = conf.get(RECOVERY_MODE).toUpperCase()
  logInfo("Recovery mode in Mesos dispatcher set to: " + recoveryMode)

  private val engineFactory = recoveryMode match {
    case "NONE" => new BlackHoleMesosClusterPersistenceEngineFactory
    case "ZOOKEEPER" => new ZookeeperMesosClusterPersistenceEngineFactory(conf)
    case _ => throw new IllegalArgumentException("Unsupported recovery mode: " + recoveryMode)
  }

  private val scheduler = new MesosClusterScheduler(engineFactory, conf)

  private val server = new MesosRestServer(args.host, args.port, conf, scheduler)
  private val webUi = new MesosClusterUI(
    new SecurityManager(conf),
    args.webUiPort,
    conf,
    publicAddress,
    scheduler)

  private val shutdownLatch = new CountDownLatch(1)

  def start(): Unit = {
    webUi.bind()
    scheduler.frameworkUrl = conf.get(DISPATCHER_WEBUI_URL).getOrElse(webUi.activeWebUiUrl)
    scheduler.start()
    server.start()
  }

  def awaitShutdown(): Unit = {
    shutdownLatch.await()
  }

  def stop(): Unit = {
    webUi.stop()
    server.stop()
    scheduler.stop()
    shutdownLatch.countDown()
  }
}

private[mesos] object MesosClusterDispatcher extends Logging {
  def main(args: Array[String]) {
    Utils.initDaemon(log)
    val conf = new SparkConf
    val dispatcherArgs = new MesosClusterDispatcherArguments(args, conf)
    conf.setMaster(dispatcherArgs.masterUrl)
    conf.setAppName(dispatcherArgs.name)
    dispatcherArgs.zookeeperUrl.foreach { z =>
      conf.set(RECOVERY_MODE, "ZOOKEEPER")
      conf.set(ZOOKEEPER_URL, z)
    }
    val dispatcher = new MesosClusterDispatcher(dispatcherArgs, conf)
    dispatcher.start()
    logDebug("Adding shutdown hook") // force eager creation of logger
    ShutdownHookManager.addShutdownHook { () =>
      logInfo("Shutdown hook is shutting down dispatcher")
      dispatcher.stop()
      dispatcher.awaitShutdown()
    }
    dispatcher.awaitShutdown()
  }
} 
Example 42
Source File: MesosClusterDispatcher.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy.mesos

import java.util.concurrent.CountDownLatch

import org.apache.spark.{SecurityManager, SparkConf}
import org.apache.spark.deploy.mesos.ui.MesosClusterUI
import org.apache.spark.deploy.rest.mesos.MesosRestServer
import org.apache.spark.internal.Logging
import org.apache.spark.scheduler.cluster.mesos._
import org.apache.spark.util.{ShutdownHookManager, Utils}


private[mesos] class MesosClusterDispatcher(
    args: MesosClusterDispatcherArguments,
    conf: SparkConf)
  extends Logging {

  private val publicAddress = Option(conf.getenv("SPARK_PUBLIC_DNS")).getOrElse(args.host)
  private val recoveryMode = conf.get("spark.deploy.recoveryMode", "NONE").toUpperCase()
  logInfo("Recovery mode in Mesos dispatcher set to: " + recoveryMode)

  private val engineFactory = recoveryMode match {
    case "NONE" => new BlackHoleMesosClusterPersistenceEngineFactory
    case "ZOOKEEPER" => new ZookeeperMesosClusterPersistenceEngineFactory(conf)
    case _ => throw new IllegalArgumentException("Unsupported recovery mode: " + recoveryMode)
  }

  private val scheduler = new MesosClusterScheduler(engineFactory, conf)

  private val server = new MesosRestServer(args.host, args.port, conf, scheduler)
  private val webUi = new MesosClusterUI(
    new SecurityManager(conf),
    args.webUiPort,
    conf,
    publicAddress,
    scheduler)

  private val shutdownLatch = new CountDownLatch(1)

  def start(): Unit = {
    webUi.bind()
    scheduler.frameworkUrl = conf.get("spark.mesos.dispatcher.webui.url", webUi.activeWebUiUrl)
    scheduler.start()
    server.start()
  }

  def awaitShutdown(): Unit = {
    shutdownLatch.await()
  }

  def stop(): Unit = {
    webUi.stop()
    server.stop()
    scheduler.stop()
    shutdownLatch.countDown()
  }
}

private[mesos] object MesosClusterDispatcher extends Logging {
  def main(args: Array[String]) {
    Utils.initDaemon(log)
    val conf = new SparkConf
    val dispatcherArgs = new MesosClusterDispatcherArguments(args, conf)
    conf.setMaster(dispatcherArgs.masterUrl)
    conf.setAppName(dispatcherArgs.name)
    dispatcherArgs.zookeeperUrl.foreach { z =>
      conf.set("spark.deploy.recoveryMode", "ZOOKEEPER")
      conf.set("spark.deploy.zookeeper.url", z)
    }
    val dispatcher = new MesosClusterDispatcher(dispatcherArgs, conf)
    dispatcher.start()
    logDebug("Adding shutdown hook") // force eager creation of logger
    ShutdownHookManager.addShutdownHook { () =>
      logInfo("Shutdown hook is shutting down dispatcher")
      dispatcher.stop()
      dispatcher.awaitShutdown()
    }
    dispatcher.awaitShutdown()
  }
} 
Example 43
Source File: BlockingSource.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.streaming.util

import java.util.concurrent.CountDownLatch

import org.apache.spark.sql.{SQLContext, _}
import org.apache.spark.sql.execution.streaming.{LongOffset, Offset, Sink, Source}
import org.apache.spark.sql.sources.{StreamSinkProvider, StreamSourceProvider}
import org.apache.spark.sql.streaming.OutputMode
import org.apache.spark.sql.types.{IntegerType, StructField, StructType}


class BlockingSource extends StreamSourceProvider with StreamSinkProvider {

  private val fakeSchema = StructType(StructField("a", IntegerType) :: Nil)

  override def sourceSchema(
      spark: SQLContext,
      schema: Option[StructType],
      providerName: String,
      parameters: Map[String, String]): (String, StructType) = {
    ("dummySource", fakeSchema)
  }

  override def createSource(
      spark: SQLContext,
      metadataPath: String,
      schema: Option[StructType],
      providerName: String,
      parameters: Map[String, String]): Source = {
    BlockingSource.latch.await()
    new Source {
      override def schema: StructType = fakeSchema
      override def getOffset: Option[Offset] = Some(new LongOffset(0))
      override def getBatch(start: Option[Offset], end: Offset): DataFrame = {
        import spark.implicits._
        Seq[Int]().toDS().toDF()
      }
      override def stop() {}
    }
  }

  override def createSink(
      spark: SQLContext,
      parameters: Map[String, String],
      partitionColumns: Seq[String],
      outputMode: OutputMode): Sink = {
    new Sink {
      override def addBatch(batchId: Long, data: DataFrame): Unit = {}
    }
  }
}

object BlockingSource {
  var latch: CountDownLatch = null
} 
Example 44
Source File: InitialSpec.scala    From embedded-kafka   with Apache License 2.0 5 votes vote down vote up
package com.tuplejump.embedded.kafka

import java.util.concurrent.{TimeUnit, CountDownLatch}

import org.apache.kafka.common.serialization.StringDeserializer
import org.scalatest.concurrent.Eventually
import org.scalatest.concurrent.PatienceConfiguration.Timeout
import org.scalatest.time.{Millis, Span}

class InitialSpec extends AbstractSpec with Eventually with Logging {

  private val timeout = Timeout(Span(10000, Millis))

  "Initially, EmbeddedKafka" must {
    val kafka = new EmbeddedKafka()
    val topic = "test"
    val total = 1000
    val latch = new CountDownLatch(total)

    "start embedded zookeeper and embedded kafka" in {
      kafka.isRunning should be (false)
      kafka.start()
      eventually(timeout)(kafka.isRunning)
    }
    "create a topic" in {
      kafka.createTopic(topic, 1, 1)
    }
    "publish messages to the embedded kafka instance" in {
      val config = kafka.consumerConfig(
        group = "some.group",
        kafkaConnect = kafka.kafkaConfig.hostName + ":" + kafka.kafkaConfig.port,
        zkConnect = kafka.kafkaConfig.zkConnect,
        offsetPolicy = "largest",//latest with new consumer
        autoCommitEnabled = true,
        kDeserializer = classOf[StringDeserializer],
        vDeserializer = classOf[StringDeserializer])
      val consumer = new SimpleConsumer(latch, config, topic, "consumer.group", 1, 1)

      val batch1 = for (n <- 0 until total) yield s"message-test-$n"

      logger.info(s"Publishing ${batch1.size} messages...")

      kafka.sendMessages(topic, batch1)
      latch.await(10000, TimeUnit.MILLISECONDS)
      latch.getCount should be (0)

      consumer.shutdown()
    }
    "shut down relatively cleanly for now" in {
      kafka.shutdown()
      eventually(timeout)(!kafka.isRunning)
    }
  }
} 
Example 45
Source File: SimpleConsumer.scala    From embedded-kafka   with Apache License 2.0 5 votes vote down vote up
package com.tuplejump.embedded.kafka

import java.util.Properties
import java.util.concurrent.{CountDownLatch, Executors}

import scala.util.Try
import kafka.serializer.StringDecoder
import kafka.consumer.{ Consumer, ConsumerConfig }


class SimpleConsumer(
    val latch: CountDownLatch,
    consumerConfig: Map[String, String],
    topic: String,
    groupId: String,
    partitions: Int,
    numThreads: Int) {

  val connector = Consumer.create(createConsumerConfig)

  val streams = connector
    .createMessageStreams(Map(topic -> partitions), new StringDecoder(), new StringDecoder())
    .get(topic)

  val executor = Executors.newFixedThreadPool(numThreads)

  for (stream <- streams) {
    executor.submit(new Runnable() {
      def run(): Unit = {
        for (s <- stream) {
          while (s.iterator.hasNext) {
            latch.countDown()
          }
        }
      }
    })
  }

  private def createConsumerConfig: ConsumerConfig = {
    import scala.collection.JavaConverters._
    val props = new Properties()
    props.putAll(consumerConfig.asJava)
    new ConsumerConfig(props)
  }

  def shutdown(): Unit = Try {
    connector.shutdown()
    executor.shutdown()
  }
} 
Example 46
Source File: PoolSuite.scala    From reactive-async   with BSD 2-Clause "Simplified" License 5 votes vote down vote up
package com.phaller.rasync
package test

import java.util.concurrent.{ ConcurrentHashMap, CountDownLatch }

import com.phaller.rasync.cell.{ Cell, CellCompleter }
import org.scalatest.FunSuite

import scala.concurrent.{ Await, Promise }
import scala.concurrent.duration._
import com.phaller.rasync.lattice.Updater
import com.phaller.rasync.pool.HandlerPool
import com.phaller.rasync.test.lattice.{ IntUpdater, StringIntKey }

class PoolSuite extends FunSuite {
  test("onQuiescent") {
    val pool = HandlerPool[Int]

    var i = 0
    while (i < 10000) {
      val p1 = Promise[Boolean]()
      val p2 = Promise[Boolean]()
      pool.execute { () => { p1.success(true) }: Unit }
      pool.onQuiescent { () => p2.success(true) }
      try {
        Await.result(p2.future, 1.seconds)
      } catch {
        case t: Throwable =>
          assert(false, s"failure after $i iterations")
      }
      i += 1
    }

    pool.shutdown()
  }

  test("register cells concurrently") {
    implicit val stringIntUpdater: Updater[Int] = new IntUpdater

    implicit val pool = new HandlerPool[Int, Null](new StringIntKey("s"))
    var regCells = new ConcurrentHashMap[Cell[Int, Null], Cell[Int, Null]]()
    for (_ <- 1 to 1000) {
      pool.execute(() => {
        val completer = CellCompleter[Int, Null]()
        completer.cell.trigger()
        regCells.put(completer.cell, completer.cell)
        ()
      })
    }
    val fut = pool.quiescentResolveCell // set all (registered) cells to 1 via key.fallback
    Await.ready(fut, 5.seconds)

    regCells.values().removeIf(_.getResult() != 0)
    assert(regCells.size === 0)
  }

  test("register cells concurrently 2") {
    implicit val stringIntUpdater: Updater[Int] = new IntUpdater

    implicit val pool = new HandlerPool[Int, Null](new StringIntKey("s"))
    var regCells = new ConcurrentHashMap[Cell[Int, Null], Cell[Int, Null]]()
    for (_ <- 1 to 1000) {
      pool.execute(() => {
        val completer = CellCompleter[Int, Null]()
        regCells.put(completer.cell, completer.cell)
        ()
      })
    }
    val fut = pool.quiescentResolveCell // set all (registered) cells to 1 via key.fallback
    Await.ready(fut, 5.seconds)

    assert(regCells.size === 1000)
  }

  test("handler pool quiescence") {
    implicit val pool = new HandlerPool[Int, Null]
    val latch = new CountDownLatch(1)
    val latch2 = new CountDownLatch(1)
    pool.execute { () => latch.await() }
    pool.onQuiescent { () => latch2.countDown() }
    latch.countDown()

    latch2.await()
    assert(true)

    pool.onQuiescenceShutdown()
  }

} 
Example 47
Source File: AbstractAkkaConnection.scala    From scredis   with Apache License 2.0 5 votes vote down vote up
package scredis.io

import java.util.concurrent.{CountDownLatch, TimeUnit}

import akka.actor._
import com.typesafe.scalalogging.LazyLogging
import scredis.protocol.Request
import scredis.protocol.requests.ConnectionRequests.{Auth, Quit, Select}
import scredis.protocol.requests.ServerRequests.{ClientSetName, Shutdown}

import scala.concurrent.duration._

abstract class AbstractAkkaConnection(
  protected val system: ActorSystem,
  val host: String,
  val port: Int,
  @volatile protected var passwordOpt: Option[String],
  @volatile protected var database: Int,
  @volatile protected var nameOpt: Option[String],
  protected val decodersCount: Int,
  protected val receiveTimeoutOpt: Option[FiniteDuration],
  protected val connectTimeout: FiniteDuration,
  protected val maxWriteBatchSize: Int,
  protected val tcpSendBufferSizeHint: Int,
  protected val tcpReceiveBufferSizeHint: Int,
  protected val akkaListenerDispatcherPath: String,
  protected val akkaIODispatcherPath: String,
  protected val akkaDecoderDispatcherPath: String
) extends Connection with LazyLogging {
  
  private val shutdownLatch = new CountDownLatch(1)
  
  @volatile protected var isShuttingDown = false
  
  override implicit val dispatcher = system.dispatcher
  
  protected val listenerActor: ActorRef
  
  protected def updateState(request: Request[_]): Unit = request match {
    case Auth(password) => if (password.isEmpty) {
      passwordOpt = None
    } else {
      passwordOpt = Some(password)
    }
    case Select(db) => database = db
    case ClientSetName(name) => if (name.isEmpty) {
      nameOpt = None
    } else {
      nameOpt = Some(name)
    }
    case Quit() | Shutdown(_) => isShuttingDown = true
    case _            =>
  }
  
  protected def getPasswordOpt: Option[String] = passwordOpt
  protected def getDatabase: Int = database
  protected def getNameOpt: Option[String] = nameOpt
  
  protected def watchTermination(): Unit = system.actorOf(
    Props(
      classOf[WatchActor],
      listenerActor,
      shutdownLatch
    )
  )
  
  
  def awaitTermination(timeout: Duration = Duration.Inf): Unit = {
    if (timeout.isFinite) {
      shutdownLatch.await(timeout.toMillis, TimeUnit.MILLISECONDS)
    } else {
      shutdownLatch.await()
    }
  }
  
}

class WatchActor(actor: ActorRef, shutdownLatch: CountDownLatch) extends Actor {
  def receive: Receive = {
    case Terminated(_) => {
      shutdownLatch.countDown()
      context.stop(self)
    }
  }
  context.watch(actor)
} 
Example 48
Source File: ThroughputSpec.scala    From eclair   with Apache License 2.0 5 votes vote down vote up
package fr.acinq.eclair.channel

import java.util.UUID
import java.util.concurrent.CountDownLatch
import java.util.concurrent.atomic.AtomicLong

import akka.actor.{Actor, ActorRef, ActorSystem, Props}
import akka.testkit.TestProbe
import fr.acinq.bitcoin.{ByteVector32, Crypto}
import fr.acinq.eclair.TestConstants.{Alice, Bob}
import fr.acinq.eclair._
import fr.acinq.eclair.blockchain._
import fr.acinq.eclair.blockchain.bitcoind.ZmqWatcher
import fr.acinq.eclair.payment.relay.{CommandBuffer, Relayer}
import fr.acinq.eclair.wire.{Init, UpdateAddHtlc}
import org.scalatest.funsuite.AnyFunSuite

import scala.concurrent.duration._
import scala.util.Random

class ThroughputSpec extends AnyFunSuite {
  ignore("throughput") {
    implicit val system = ActorSystem("test")
    val pipe = system.actorOf(Props[Pipe], "pipe")
    val blockCount = new AtomicLong()
    val blockchain = system.actorOf(ZmqWatcher.props(blockCount, new TestBitcoinClient()), "blockchain")
    val paymentHandler = system.actorOf(Props(new Actor() {
      val random = new Random()

      context.become(run(Map()))

      override def receive: Receive = ???

      def run(h2r: Map[ByteVector32, ByteVector32]): Receive = {
        case ('add, tgt: ActorRef) =>
          val r = randomBytes32
          val h = Crypto.sha256(r)
          tgt ! CMD_ADD_HTLC(1 msat, h, CltvExpiry(1), TestConstants.emptyOnionPacket, Upstream.Local(UUID.randomUUID()))
          context.become(run(h2r + (h -> r)))

        case ('sig, tgt: ActorRef) => tgt ! CMD_SIGN

        case htlc: UpdateAddHtlc if h2r.contains(htlc.paymentHash) =>
          val r = h2r(htlc.paymentHash)
          sender ! CMD_FULFILL_HTLC(htlc.id, r)
          context.become(run(h2r - htlc.paymentHash))
      }
    }), "payment-handler")
    val registerA = TestProbe()
    val registerB = TestProbe()
    val commandBufferA = system.actorOf(Props(new CommandBuffer(Alice.nodeParams, registerA.ref)))
    val commandBufferB = system.actorOf(Props(new CommandBuffer(Bob.nodeParams, registerB.ref)))
    val relayerA = system.actorOf(Relayer.props(Alice.nodeParams, TestProbe().ref, registerA.ref, commandBufferA, paymentHandler))
    val relayerB = system.actorOf(Relayer.props(Bob.nodeParams, TestProbe().ref, registerB.ref, commandBufferB, paymentHandler))
    val wallet = new TestWallet
    val alice = system.actorOf(Channel.props(Alice.nodeParams, wallet, Bob.nodeParams.nodeId, blockchain, relayerA, None), "a")
    val bob = system.actorOf(Channel.props(Bob.nodeParams, wallet, Alice.nodeParams.nodeId, blockchain, relayerB, None), "b")
    val aliceInit = Init(Alice.channelParams.features)
    val bobInit = Init(Bob.channelParams.features)
    alice ! INPUT_INIT_FUNDER(ByteVector32.Zeroes, TestConstants.fundingSatoshis, TestConstants.pushMsat, TestConstants.feeratePerKw, TestConstants.feeratePerKw, Alice.channelParams, pipe, bobInit, ChannelFlags.Empty, ChannelVersion.STANDARD)
    bob ! INPUT_INIT_FUNDEE(ByteVector32.Zeroes, Bob.channelParams, pipe, aliceInit)

    val latch = new CountDownLatch(2)
    val listener = system.actorOf(Props(new Actor {
      override def receive: Receive = {
        case ChannelStateChanged(_, _, _, _, NORMAL, _) => latch.countDown()
      }
    }), "listener")
    system.eventStream.subscribe(listener, classOf[ChannelEvent])

    pipe ! (alice, bob)
    latch.await()

    var i = new AtomicLong(0)
    val random = new Random()

    def msg = random.nextInt(100) % 5 match {
      case 0 | 1 | 2 | 3 => 'add
      case 4 => 'sig
    }

    import scala.concurrent.ExecutionContext.Implicits.global
    system.scheduler.schedule(0 seconds, 50 milliseconds, new Runnable() {
      override def run(): Unit = paymentHandler ! (msg, alice)
    })
    system.scheduler.schedule(5 seconds, 70 milliseconds, new Runnable() {
      override def run(): Unit = paymentHandler ! (msg, bob)
    })

    Thread.sleep(Long.MaxValue)
  }
} 
Example 49
Source File: DefaultThreadPool.scala    From scrapy4s   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.scrapy4s.thread

import java.util.concurrent.{BlockingQueue, CountDownLatch, Executor, TimeUnit}
import java.util.concurrent.atomic.AtomicBoolean

import com.scrapy4s.exception.QueueTimeOutException
import org.slf4j.LoggerFactory


class DefaultThreadPool(
                name: String,
                threadCount: Int,
                queue: BlockingQueue[Runnable]
                ) extends ThreadPool {

  val logger = LoggerFactory.getLogger(classOf[DefaultThreadPool])
  val startFlag = new AtomicBoolean(false)
  val countDownLatch = new CountDownLatch(threadCount)
  init()

  private def init(): Unit = {
    if (startFlag.compareAndSet(false, true)) {
      (1 to threadCount).foreach(i => {
        val thread = new Thread(() => {task()})
        thread.setName(s"pool-$name-$i")
        thread.start()
      })
    } else {
      throw new Exception("线程池已经启动")
    }
  }



  def task() = {
    try {
      while (startFlag.get()) {
        try {
          val runnable = queue.poll(1, TimeUnit.SECONDS)
          if (runnable == null) {
            throw new QueueTimeOutException()
          }
          runnable.run()
        } catch {
          case _: QueueTimeOutException =>
          case e: Exception =>
            logger.error("thread pool exception", e)
        }
      }
    } finally {
      countDownLatch.countDown()
    }
  }

  override def shutdown() = {
    startFlag.compareAndSet(true, false)
  }

  override def waitForStop() = {
    countDownLatch.await()
  }

  override def waitForStop(timeout: Long, unit: TimeUnit): Boolean = {
    countDownLatch.await(timeout, unit)
  }

  override def execute(command: Runnable) = {
    if (command == null) throw new NullPointerException()
    queue.put(command)
  }
} 
Example 50
Source File: MesosClusterDispatcher.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy.mesos

import java.util.concurrent.CountDownLatch

import org.apache.spark.{SecurityManager, SparkConf}
import org.apache.spark.deploy.mesos.config._
import org.apache.spark.deploy.mesos.ui.MesosClusterUI
import org.apache.spark.deploy.rest.mesos.MesosRestServer
import org.apache.spark.internal.Logging
import org.apache.spark.scheduler.cluster.mesos._
import org.apache.spark.util.{ShutdownHookManager, Utils}


private[mesos] class MesosClusterDispatcher(
    args: MesosClusterDispatcherArguments,
    conf: SparkConf)
  extends Logging {

  private val publicAddress = Option(conf.getenv("SPARK_PUBLIC_DNS")).getOrElse(args.host)
  private val recoveryMode = conf.get(RECOVERY_MODE).toUpperCase()
  logInfo("Recovery mode in Mesos dispatcher set to: " + recoveryMode)

  private val engineFactory = recoveryMode match {
    case "NONE" => new BlackHoleMesosClusterPersistenceEngineFactory
    case "ZOOKEEPER" => new ZookeeperMesosClusterPersistenceEngineFactory(conf)
    case _ => throw new IllegalArgumentException("Unsupported recovery mode: " + recoveryMode)
  }

  private val scheduler = new MesosClusterScheduler(engineFactory, conf)

  private val server = new MesosRestServer(args.host, args.port, conf, scheduler)
  private val webUi = new MesosClusterUI(
    new SecurityManager(conf),
    args.webUiPort,
    conf,
    publicAddress,
    scheduler)

  private val shutdownLatch = new CountDownLatch(1)

  def start(): Unit = {
    webUi.bind()
    scheduler.frameworkUrl = conf.get(DISPATCHER_WEBUI_URL).getOrElse(webUi.activeWebUiUrl)
    scheduler.start()
    server.start()
  }

  def awaitShutdown(): Unit = {
    shutdownLatch.await()
  }

  def stop(): Unit = {
    webUi.stop()
    server.stop()
    scheduler.stop()
    shutdownLatch.countDown()
  }
}

private[mesos] object MesosClusterDispatcher extends Logging {
  def main(args: Array[String]) {
    Utils.initDaemon(log)
    val conf = new SparkConf
    val dispatcherArgs = new MesosClusterDispatcherArguments(args, conf)
    conf.setMaster(dispatcherArgs.masterUrl)
    conf.setAppName(dispatcherArgs.name)
    dispatcherArgs.zookeeperUrl.foreach { z =>
      conf.set(RECOVERY_MODE, "ZOOKEEPER")
      conf.set(ZOOKEEPER_URL, z)
    }
    val dispatcher = new MesosClusterDispatcher(dispatcherArgs, conf)
    dispatcher.start()
    logDebug("Adding shutdown hook") // force eager creation of logger
    ShutdownHookManager.addShutdownHook { () =>
      logInfo("Shutdown hook is shutting down dispatcher")
      dispatcher.stop()
      dispatcher.awaitShutdown()
    }
    dispatcher.awaitShutdown()
  }
} 
Example 51
Source File: ProcessingTimeExecutorSuite.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.execution.streaming

import java.util.concurrent.{CountDownLatch, TimeUnit}

import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.streaming.ProcessingTime
import org.apache.spark.util.{Clock, ManualClock, SystemClock}

class ProcessingTimeExecutorSuite extends SparkFunSuite {

  test("nextBatchTime") {
    val processingTimeExecutor = ProcessingTimeExecutor(ProcessingTime(100))
    assert(processingTimeExecutor.nextBatchTime(0) === 100)
    assert(processingTimeExecutor.nextBatchTime(1) === 100)
    assert(processingTimeExecutor.nextBatchTime(99) === 100)
    assert(processingTimeExecutor.nextBatchTime(100) === 200)
    assert(processingTimeExecutor.nextBatchTime(101) === 200)
    assert(processingTimeExecutor.nextBatchTime(150) === 200)
  }

  test("calling nextBatchTime with the result of a previous call should return the next interval") {
    val intervalMS = 100
    val processingTimeExecutor = ProcessingTimeExecutor(ProcessingTime(intervalMS))

    val ITERATION = 10
    var nextBatchTime: Long = 0
    for (it <- 1 to ITERATION) {
      nextBatchTime = processingTimeExecutor.nextBatchTime(nextBatchTime)
    }

    // nextBatchTime should be 1000
    assert(nextBatchTime === intervalMS * ITERATION)
  }

  private def testBatchTermination(intervalMs: Long): Unit = {
    var batchCounts = 0
    val processingTimeExecutor = ProcessingTimeExecutor(ProcessingTime(intervalMs))
    processingTimeExecutor.execute(() => {
      batchCounts += 1
      // If the batch termination works well, batchCounts should be 3 after `execute`
      batchCounts < 3
    })
    assert(batchCounts === 3)
  }

  test("batch termination") {
    testBatchTermination(0)
    testBatchTermination(10)
  }

  test("notifyBatchFallingBehind") {
    val clock = new ManualClock()
    @volatile var batchFallingBehindCalled = false
    val latch = new CountDownLatch(1)
    val t = new Thread() {
      override def run(): Unit = {
        val processingTimeExecutor = new ProcessingTimeExecutor(ProcessingTime(100), clock) {
          override def notifyBatchFallingBehind(realElapsedTimeMs: Long): Unit = {
            batchFallingBehindCalled = true
          }
        }
        processingTimeExecutor.execute(() => {
          latch.countDown()
          clock.waitTillTime(200)
          false
        })
      }
    }
    t.start()
    // Wait until the batch is running so that we don't call `advance` too early
    assert(latch.await(10, TimeUnit.SECONDS), "the batch has not yet started in 10 seconds")
    clock.advance(200)
    t.join()
    assert(batchFallingBehindCalled === true)
  }
} 
Example 52
Source File: BlockingSource.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.streaming.util

import java.util.concurrent.CountDownLatch

import org.apache.spark.sql.{SQLContext, _}
import org.apache.spark.sql.execution.streaming.{LongOffset, Offset, Sink, Source}
import org.apache.spark.sql.sources.{StreamSinkProvider, StreamSourceProvider}
import org.apache.spark.sql.streaming.OutputMode
import org.apache.spark.sql.types.{IntegerType, StructField, StructType}


class BlockingSource extends StreamSourceProvider with StreamSinkProvider {

  private val fakeSchema = StructType(StructField("a", IntegerType) :: Nil)

  override def sourceSchema(
      spark: SQLContext,
      schema: Option[StructType],
      providerName: String,
      parameters: Map[String, String]): (String, StructType) = {
    ("dummySource", fakeSchema)
  }

  override def createSource(
      spark: SQLContext,
      metadataPath: String,
      schema: Option[StructType],
      providerName: String,
      parameters: Map[String, String]): Source = {
    BlockingSource.latch.await()
    new Source {
      override def schema: StructType = fakeSchema
      override def getOffset: Option[Offset] = Some(new LongOffset(0))
      override def getBatch(start: Option[Offset], end: Offset): DataFrame = {
        import spark.implicits._
        Seq[Int]().toDS().toDF()
      }
      override def stop() {}
    }
  }

  override def createSink(
      spark: SQLContext,
      parameters: Map[String, String],
      partitionColumns: Seq[String],
      outputMode: OutputMode): Sink = {
    new Sink {
      override def addBatch(batchId: Long, data: DataFrame): Unit = {}
    }
  }
}

object BlockingSource {
  var latch: CountDownLatch = null
} 
Example 53
Source File: StoragePerfTester.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.tools

import java.util.concurrent.{CountDownLatch, Executors}
import java.util.concurrent.atomic.AtomicLong

import org.apache.spark.executor.ShuffleWriteMetrics
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.serializer.KryoSerializer
import org.apache.spark.shuffle.hash.HashShuffleManager
import org.apache.spark.util.Utils


    val numOutputSplits = sys.env.get("NUM_REDUCERS").map(_.toInt).getOrElse(500)

    val recordLength = 1000 // ~1KB records
    val totalRecords = dataSizeMb * 1000
    val recordsPerMap = totalRecords / numMaps

    val writeKey = "1" * (recordLength / 2)
    val writeValue = "1" * (recordLength / 2)
    val executor = Executors.newFixedThreadPool(numMaps)

    val conf = new SparkConf()
      .set("spark.shuffle.compress", "false")
      .set("spark.shuffle.sync", "true")
      .set("spark.shuffle.manager", "org.apache.spark.shuffle.hash.HashShuffleManager")

    // This is only used to instantiate a BlockManager. All thread scheduling is done manually.
    val sc = new SparkContext("local[4]", "Write Tester", conf)
    val hashShuffleManager = sc.env.shuffleManager.asInstanceOf[HashShuffleManager]

    def writeOutputBytes(mapId: Int, total: AtomicLong): Unit = {
      val shuffle = hashShuffleManager.shuffleBlockResolver.forMapTask(1, mapId, numOutputSplits,
        new KryoSerializer(sc.conf), new ShuffleWriteMetrics())
      val writers = shuffle.writers
      for (i <- 1 to recordsPerMap) {
        writers(i % numOutputSplits).write(writeKey, writeValue)
      }
      writers.map { w =>
        w.commitAndClose()
        total.addAndGet(w.fileSegment().length)
      }

      shuffle.releaseWriters(true)
    }

    val start = System.currentTimeMillis()
    val latch = new CountDownLatch(numMaps)
    val totalBytes = new AtomicLong()
    for (task <- 1 to numMaps) {
      executor.submit(new Runnable() {
        override def run(): Unit = {
          try {
            writeOutputBytes(task, totalBytes)
            latch.countDown()
          } catch {
            case e: Exception =>
              println("Exception in child thread: " + e + " " + e.getMessage)
              System.exit(1)
          }
        }
      })
    }
    latch.await()
    val end = System.currentTimeMillis()
    val time = (end - start) / 1000.0
    val bytesPerSecond = totalBytes.get() / time
    val bytesPerFile = (totalBytes.get() / (numOutputSplits * numMaps.toDouble)).toLong

    System.err.println("files_total\t\t%s".format(numMaps * numOutputSplits))
    System.err.println("bytes_per_file\t\t%s".format(Utils.bytesToString(bytesPerFile)))
    System.err.println("agg_throughput\t\t%s/s".format(Utils.bytesToString(bytesPerSecond.toLong)))

    executor.shutdown()
    sc.stop()
  }
} 
Example 54
Source File: MesosClusterDispatcher.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy.mesos

import java.util.concurrent.CountDownLatch

import org.apache.spark.deploy.mesos.ui.MesosClusterUI
import org.apache.spark.deploy.rest.mesos.MesosRestServer
import org.apache.spark.scheduler.cluster.mesos._
import org.apache.spark.util.SignalLogger
import org.apache.spark.{Logging, SecurityManager, SparkConf}


private[mesos] class MesosClusterDispatcher(
    args: MesosClusterDispatcherArguments,
    conf: SparkConf)
  extends Logging {

  private val publicAddress = Option(conf.getenv("SPARK_PUBLIC_DNS")).getOrElse(args.host)
  private val recoveryMode = conf.get("spark.mesos.deploy.recoveryMode", "NONE").toUpperCase()
  logInfo("Recovery mode in Mesos dispatcher set to: " + recoveryMode)

  private val engineFactory = recoveryMode match {
    case "NONE" => new BlackHoleMesosClusterPersistenceEngineFactory
    case "ZOOKEEPER" => new ZookeeperMesosClusterPersistenceEngineFactory(conf)
    case _ => throw new IllegalArgumentException("Unsupported recovery mode: " + recoveryMode)
  }

  private val scheduler = new MesosClusterScheduler(engineFactory, conf)

  private val server = new MesosRestServer(args.host, args.port, conf, scheduler)
  private val webUi = new MesosClusterUI(
    new SecurityManager(conf),
    args.webUiPort,
    conf,
    publicAddress,
    scheduler)

  private val shutdownLatch = new CountDownLatch(1)

  def start(): Unit = {
    webUi.bind()
    scheduler.frameworkUrl = webUi.activeWebUiUrl
    scheduler.start()
    server.start()
  }

  def awaitShutdown(): Unit = {
    shutdownLatch.await()
  }

  def stop(): Unit = {
    webUi.stop()
    server.stop()
    scheduler.stop()
    shutdownLatch.countDown()
  }
}

private[mesos] object MesosClusterDispatcher extends Logging {
  def main(args: Array[String]) {
    SignalLogger.register(log)
    val conf = new SparkConf
    val dispatcherArgs = new MesosClusterDispatcherArguments(args, conf)
    conf.setMaster(dispatcherArgs.masterUrl)
    conf.setAppName(dispatcherArgs.name)
    dispatcherArgs.zookeeperUrl.foreach { z =>
      conf.set("spark.mesos.deploy.recoveryMode", "ZOOKEEPER")
      conf.set("spark.mesos.deploy.zookeeper.url", z)
    }
    val dispatcher = new MesosClusterDispatcher(dispatcherArgs, conf)
    dispatcher.start()
    val shutdownHook = new Thread() {
      override def run() {
        logInfo("Shutdown hook is shutting down dispatcher")
        dispatcher.stop()
        dispatcher.awaitShutdown()
      }
    }
    Runtime.getRuntime.addShutdownHook(shutdownHook)
    dispatcher.awaitShutdown()
  }
}