java.util.concurrent.Executors Scala Examples

The following examples show how to use java.util.concurrent.Executors. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: TsStreamingTest.scala    From spark-riak-connector   with Apache License 2.0 7 votes vote down vote up
package com.basho.riak.spark.streaming

import java.nio.ByteBuffer
import java.util.concurrent.{Callable, Executors, TimeUnit}

import com.basho.riak.spark._
import com.basho.riak.spark.rdd.RiakTSTests
import com.basho.riak.spark.rdd.timeseries.{AbstractTimeSeriesTest, TimeSeriesData}
import com.fasterxml.jackson.core.JsonParser
import com.fasterxml.jackson.databind.{DeserializationFeature, ObjectMapper, SerializationFeature}
import com.fasterxml.jackson.module.scala.DefaultScalaModule
import org.apache.spark.sql.Row
import org.junit.Assert._
import org.junit.experimental.categories.Category
import org.junit.{After, Before, Test}

@Category(Array(classOf[RiakTSTests]))
class TsStreamingTest extends AbstractTimeSeriesTest(false) with SparkStreamingFixture {

  protected final val executorService = Executors.newCachedThreadPool()
  private val dataSource = new SocketStreamingDataSource
  private var port = -1

  @Before
  def setUp(): Unit = {
    port = dataSource.start(client => {
      testData
        .map(tolerantMapper.writeValueAsString)
        .foreach(x => client.write(ByteBuffer.wrap(s"$x\n".getBytes)))
      logInfo(s"${testData.length} values were send to client")
    })
  }

  @After
  def tearDown(): Unit = {
    dataSource.stop()
  }

  @Test(timeout = 10 * 1000) // 10 seconds timeout
  def saveToRiak(): Unit = {
    executorService.submit(new Runnable {
      override def run(): Unit = {
        ssc.socketTextStream("localhost", port)
          .map(string => {
            val tsdata = new ObjectMapper()
              .configure(DeserializationFeature.FAIL_ON_NULL_FOR_PRIMITIVES, true)
              .configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, true)
              .configure(JsonParser.Feature.ALLOW_SINGLE_QUOTES, true)
              .configure(JsonParser.Feature.ALLOW_UNQUOTED_FIELD_NAMES, true)
              .configure(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS, false)
              .registerModule(DefaultScalaModule)
              .readValue(string, classOf[TimeSeriesData])
            Row(1, "f", tsdata.time, tsdata.user_id, tsdata.temperature_k)
          })
          .saveToRiakTS(bucketName)

        ssc.start()
        ssc.awaitTerminationOrTimeout(5 * 1000)
      }
    })

    val result = executorService.submit(new Callable[Array[Seq[Any]]] {
      override def call(): Array[Seq[Any]] = {
        var rdd = sc.riakTSTable[Row](bucketName)
          .sql(s"SELECT user_id, temperature_k FROM $bucketName $sqlWhereClause")
        var count = rdd.count()
        while (count < testData.length) {
          TimeUnit.SECONDS.sleep(2)

          rdd = sc.riakTSTable[Row](bucketName)
            .sql(s"SELECT user_id, temperature_k FROM $bucketName $sqlWhereClause")
          count = rdd.count()
        }
        rdd.collect().map(_.toSeq)
      }
    }).get()

    assertEquals(testData.length, result.length)
    assertEqualsUsingJSONIgnoreOrder(
      """
        |[
        |   ['bryce',305.37],
        |   ['bryce',300.12],
        |   ['bryce',295.95],
        |   ['ratman',362.121],
        |   ['ratman',3502.212]
        |]
      """.stripMargin, result)
  }
} 
Example 2
Source File: TrafficMonitorThread.scala    From shadowsocksr-android   with GNU General Public License v3.0 5 votes vote down vote up
package com.github.shadowsocks.utils

import java.io.{File, IOException}
import java.nio.{ByteBuffer, ByteOrder}
import java.util.concurrent.Executors

import android.content.Context
import android.net.{LocalServerSocket, LocalSocket, LocalSocketAddress}
import android.util.Log

class TrafficMonitorThread(context: Context) extends Thread {

  val TAG = "TrafficMonitorThread"
  lazy val PATH = context.getApplicationInfo.dataDir + "/stat_path"

  @volatile var serverSocket: LocalServerSocket = null
  @volatile var isRunning: Boolean = true

  def closeServerSocket() {
    if (serverSocket != null) {
      try {
        serverSocket.close()
      } catch {
        case _: Exception => // ignore
      }
      serverSocket = null
      }
  }

  def stopThread() {
    isRunning = false
    closeServerSocket()
  }

  override def run() {

    try {
      new File(PATH).delete()
    } catch {
      case _: Exception => // ignore
    }

    try {
      val localSocket = new LocalSocket
      localSocket.bind(new LocalSocketAddress(PATH, LocalSocketAddress.Namespace.FILESYSTEM))
      serverSocket = new LocalServerSocket(localSocket.getFileDescriptor)
    } catch {
      case e: IOException =>
        Log.e(TAG, "unable to bind", e)
        return
    }

    val pool = Executors.newFixedThreadPool(1)

    while (isRunning) {
      try {
        val socket = serverSocket.accept()

        pool.execute(() => {
          try {
            val input = socket.getInputStream
            val output = socket.getOutputStream

            val buffer = new Array[Byte](16)
            if (input.read(buffer) != 16) throw new IOException("Unexpected traffic stat length")
            val stat = ByteBuffer.wrap(buffer).order(ByteOrder.LITTLE_ENDIAN)
            TrafficMonitor.update(stat.getLong(0), stat.getLong(8))

            output.write(0)

            input.close()
            output.close()

          } catch {
            case e: Exception =>
              Log.e(TAG, "Error when recv traffic stat", e)
          }

          // close socket
          try {
            socket.close()
          } catch {
            case _: Exception => // ignore
          }

        })
      } catch {
        case e: IOException =>
          Log.e(TAG, "Error when accept socket", e)
          return
      }
    }
  }
} 
Example 3
Source File: BasicShabondiTest.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.shabondi

import java.util
import java.util.concurrent.{ExecutorService, Executors}

import com.google.common.util.concurrent.ThreadFactoryBuilder
import com.typesafe.scalalogging.Logger
import oharastream.ohara.common.data.Row
import oharastream.ohara.common.setting.TopicKey
import oharastream.ohara.common.util.{CommonUtils, Releasable}
import oharastream.ohara.kafka.TopicAdmin
import oharastream.ohara.shabondi.common.ShabondiUtils
import oharastream.ohara.shabondi.sink.SinkConfig
import oharastream.ohara.shabondi.source.SourceConfig
import oharastream.ohara.testing.WithBroker
import org.junit.After

import scala.collection.{immutable, mutable}
import scala.concurrent.{ExecutionContext, Future}
import scala.jdk.CollectionConverters._

private[shabondi] abstract class BasicShabondiTest extends WithBroker {
  protected val log = Logger(this.getClass())

  protected val brokerProps            = testUtil.brokersConnProps
  protected val topicAdmin: TopicAdmin = TopicAdmin.of(brokerProps)

  protected val newThreadPool: () => ExecutorService = () =>
    Executors.newCachedThreadPool(new ThreadFactoryBuilder().setNameFormat(this.getClass.getSimpleName + "-").build())

  protected val countRows: (util.Queue[Row], Long, ExecutionContext) => Future[Long] =
    (queue, executionTime, ec) =>
      Future {
        log.debug("countRows begin...")
        val baseTime = System.currentTimeMillis()
        var count    = 0L
        var running  = true
        while (running) {
          val row = queue.poll()
          if (row != null) count += 1 else Thread.sleep(100)
          running = (System.currentTimeMillis() - baseTime) < executionTime
        }
        log.debug("countRows done")
        count
      }(ec)

  protected def createTopicKey = TopicKey.of("default", CommonUtils.randomString(5))

  protected def createTestTopic(topicKey: TopicKey): Unit =
    topicAdmin.topicCreator
      .numberOfPartitions(1)
      .numberOfReplications(1.toShort)
      .topicKey(topicKey)
      .create

  protected def defaultSourceConfig(
    sourceToTopics: Seq[TopicKey] = Seq.empty[TopicKey]
  ): SourceConfig = {
    import ShabondiDefinitions._
    val args = mutable.ArrayBuffer(
      GROUP_DEFINITION.key + "=" + CommonUtils.randomString(5),
      NAME_DEFINITION.key + "=" + CommonUtils.randomString(3),
      SHABONDI_CLASS_DEFINITION.key + "=" + classOf[ShabondiSource].getName,
      CLIENT_PORT_DEFINITION.key + "=8080",
      BROKERS_DEFINITION.key + "=" + testUtil.brokersConnProps
    )
    if (sourceToTopics.nonEmpty)
      args += s"${SOURCE_TO_TOPICS_DEFINITION.key}=${TopicKey.toJsonString(sourceToTopics.asJava)}"

    val rawConfig = ShabondiUtils.parseArgs(args.toArray)
    new SourceConfig(rawConfig)
  }

  protected def defaultSinkConfig(
    sinkFromTopics: Seq[TopicKey] = Seq.empty[TopicKey]
  ): SinkConfig = {
    import ShabondiDefinitions._
    val args = mutable.ArrayBuffer(
      GROUP_DEFINITION.key + "=" + CommonUtils.randomString(5),
      NAME_DEFINITION.key + "=" + CommonUtils.randomString(3),
      SHABONDI_CLASS_DEFINITION.key + "=" + classOf[ShabondiSink].getName,
      CLIENT_PORT_DEFINITION.key + "=8080",
      BROKERS_DEFINITION.key + "=" + testUtil.brokersConnProps
    )
    if (sinkFromTopics.nonEmpty)
      args += s"${SINK_FROM_TOPICS_DEFINITION.key}=${TopicKey.toJsonString(sinkFromTopics.asJava)}"
    val rawConfig = ShabondiUtils.parseArgs(args.toArray)
    new SinkConfig(rawConfig)
  }

  protected def singleRow(columnSize: Int, rowId: Int = 0): Row =
    KafkaSupport.singleRow(columnSize, rowId)

  protected def multipleRows(rowSize: Int): immutable.Iterable[Row] =
    KafkaSupport.multipleRows(rowSize)

  @After
  def tearDown(): Unit = {
    Releasable.close(topicAdmin)
  }
} 
Example 4
Source File: TestConfiguratorMain.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.configurator
import java.util.concurrent.{Executors, TimeUnit}
import oharastream.ohara.common.rule.OharaTest
import oharastream.ohara.common.util.{CommonUtils, Releasable}
import oharastream.ohara.configurator.Configurator.Mode
import org.junit.{After, Test}
import org.scalatest.matchers.should.Matchers._
import scala.concurrent.{ExecutionContext, Future}

class TestConfiguratorMain extends OharaTest {
  @Test
  def illegalK8sUrl(): Unit =
    intercept[IllegalArgumentException] {
      Configurator.main(Array[String](Configurator.K8S_KEY, s"http://localhost:${CommonUtils.availablePort()}"))
    }.getMessage should include("unable to access")

  @Test
  def emptyK8sArgument(): Unit =
    an[IllegalArgumentException] should be thrownBy Configurator.main(Array[String](Configurator.K8S_KEY, ""))

  @Test
  def nullK8sArgument(): Unit =
    an[IllegalArgumentException] should be thrownBy Configurator.main(Array[String](Configurator.K8S_KEY))

  @Test
  def fakeWithK8s(): Unit =
    an[IllegalArgumentException] should be thrownBy Configurator.main(
      Array[String](Configurator.K8S_KEY, "http://localhost", Configurator.FAKE_KEY, "true")
    )

  @Test
  def k8sWithFake(): Unit =
    an[IllegalArgumentException] should be thrownBy Configurator.main(
      Array[String](Configurator.FAKE_KEY, "true", Configurator.K8S_KEY, "http://localhost")
    )

  @Test
  def testFakeMode(): Unit =
    runMain(
      Array[String](Configurator.HOSTNAME_KEY, "localhost", Configurator.PORT_KEY, "0", Configurator.FAKE_KEY, "true"),
      configurator => configurator.mode shouldBe Mode.FAKE
    )

  @Test
  def testDockerMode(): Unit =
    runMain(
      Array[String](Configurator.HOSTNAME_KEY, "localhost", Configurator.PORT_KEY, "0"),
      configurator => configurator.mode shouldBe Mode.DOCKER
    )

  private[this] def runMain(args: Array[String], action: Configurator => Unit): Unit = {
    Configurator.GLOBAL_CONFIGURATOR_SHOULD_CLOSE = false
    val service = ExecutionContext.fromExecutorService(Executors.newSingleThreadExecutor())
    Future[Unit](Configurator.main(args))(service)
    import java.time.Duration
    try {
      CommonUtils.await(() => Configurator.GLOBAL_CONFIGURATOR_RUNNING, Duration.ofSeconds(30))
      action(Configurator.GLOBAL_CONFIGURATOR)
    } finally {
      Configurator.GLOBAL_CONFIGURATOR_SHOULD_CLOSE = true
      service.shutdownNow()
      service.awaitTermination(60, TimeUnit.SECONDS)
    }
  }

  @After
  def tearDown(): Unit = {
    Configurator.GLOBAL_CONFIGURATOR_SHOULD_CLOSE = false
    Releasable.close(Configurator.GLOBAL_CONFIGURATOR)
    Configurator.GLOBAL_CONFIGURATOR == null
  }
} 
Example 5
Source File: TestConcurrentAccess.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.configurator

import java.util.concurrent.atomic.AtomicInteger
import java.util.concurrent.{Executors, TimeUnit}

import oharastream.ohara.client.configurator.NodeApi
import oharastream.ohara.common.rule.OharaTest
import oharastream.ohara.common.util.{CommonUtils, Releasable}
import org.junit.{After, Test}
import org.scalatest.matchers.should.Matchers._

import scala.concurrent.duration.Duration
import scala.concurrent.{Await, ExecutionContext, ExecutionContextExecutor, Future}

class TestConcurrentAccess extends OharaTest {
  private[this] val configurator = Configurator.builder.fake().build()

  private[this] val nodeApi = NodeApi.access.hostname(configurator.hostname).port(configurator.port)

  private[this] def result[T](f: Future[T]): T = Await.result(f, Duration(10, TimeUnit.SECONDS))

  
  @Test
  def deletedObjectShouldDisappearFromGet(): Unit = {
    val threadCount                                         = 10
    val threadsPool                                         = Executors.newFixedThreadPool(threadCount)
    val unmatchedCount                                      = new AtomicInteger()
    implicit val executionContext: ExecutionContextExecutor = ExecutionContext.fromExecutor(threadsPool)
    (0 until threadCount).foreach { _ =>
      threadsPool.execute { () =>
        val nodeName = CommonUtils.randomString(10)
        val nodes = result(
          nodeApi.request
            .nodeName(nodeName)
            .user(CommonUtils.randomString(10))
            .password(CommonUtils.randomString(10))
            .create()
            .flatMap(node => nodeApi.delete(node.key))
            .flatMap(_ => nodeApi.list())
        )
        if (nodes.exists(_.hostname == nodeName)) unmatchedCount.incrementAndGet()
      }
    }
    threadsPool.shutdown()
    threadsPool.awaitTermination(60, TimeUnit.SECONDS) shouldBe true
    unmatchedCount.get() shouldBe 0
  }

  @After
  def tearDown(): Unit = Releasable.close(configurator)
} 
Example 6
Source File: MultiFixtureBase.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.api.testing.utils

import java.util.concurrent.{Executors, ScheduledExecutorService, TimeUnit}

import com.daml.dec.DirectExecutionContext
import org.scalatest._
import org.scalatest.concurrent.{AsyncTimeLimitedTests, ScaledTimeSpans}
import org.scalatest.exceptions.TestCanceledException
import org.scalatest.time.Span

import scala.collection.immutable.Iterable
import scala.concurrent.duration.DurationInt
import scala.concurrent.{Future, Promise, TimeoutException}
import scala.util.control.{NoStackTrace, NonFatal}

trait MultiFixtureBase[FixtureId, TestContext]
    extends Assertions
    with BeforeAndAfterAll
    with ScaledTimeSpans
    with AsyncTimeLimitedTests {
  self: AsyncTestSuite =>

  private var es: ScheduledExecutorService = _

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    es = Executors.newScheduledThreadPool(1)
  }

  override protected def afterAll(): Unit = {
    es.shutdownNow()
    super.afterAll()
  }

  protected class TestFixture(val id: FixtureId, createContext: () => TestContext) {
    def context(): TestContext = createContext()
  }

  def timeLimit: Span = scaled(30.seconds)

  object TestFixture {
    def apply(id: FixtureId, createContext: () => TestContext): TestFixture =
      new TestFixture(id, createContext)

    def unapply(testFixture: TestFixture): Option[(FixtureId, TestContext)] =
      Some((testFixture.id, testFixture.context()))
  }

  protected def fixtures: Iterable[TestFixture]

  
  protected def allFixtures(runTest: TestContext => Future[Assertion]): Future[Assertion] =
    forAllFixtures(fixture => runTest(fixture.context))

  protected def forAllFixtures(runTest: TestFixture => Future[Assertion]): Future[Assertion] = {
    forAllMatchingFixtures { case f => runTest(f) }
  }

  protected def forAllMatchingFixtures(
      runTest: PartialFunction[TestFixture, Future[Assertion]]): Future[Assertion] = {
    if (parallelExecution) {
      val results = fixtures.map(
        fixture =>
          if (runTest.isDefinedAt(fixture))
            runTestAgainstFixture(fixture, runTest)
          else
            Future.successful(succeed))
      Future.sequence(results).map(foldAssertions)
    } else {
      fixtures.foldLeft(Future.successful(succeed)) {
        case (resultSoFar, thisFixture) =>
          resultSoFar.flatMap {
            case Succeeded => runTestAgainstFixture(thisFixture, runTest)
            case other => Future.successful(other)
          }
      }
    }
  }

} 
Example 7
Source File: AkkaBeforeAndAfterAll.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.api.testing.utils

import java.util.concurrent.Executors

import akka.actor.ActorSystem
import akka.stream.Materializer
import com.daml.grpc.adapter.{AkkaExecutionSequencerPool, ExecutionSequencerFactory}
import com.google.common.util.concurrent.ThreadFactoryBuilder
import org.scalatest.{BeforeAndAfterAll, Suite}
import org.slf4j.LoggerFactory

import scala.concurrent.duration.DurationInt
import scala.concurrent.{Await, ExecutionContext}

trait AkkaBeforeAndAfterAll extends BeforeAndAfterAll {
  self: Suite =>
  private val logger = LoggerFactory.getLogger(getClass)

  protected def actorSystemName: String = this.getClass.getSimpleName

  private implicit lazy val executionContext: ExecutionContext =
    ExecutionContext.fromExecutorService(
      Executors.newSingleThreadExecutor(
        new ThreadFactoryBuilder()
          .setDaemon(true)
          .setNameFormat(s"$actorSystemName-thread-pool-worker-%d")
          .setUncaughtExceptionHandler((thread, _) =>
            logger.error(s"got an uncaught exception on thread: ${thread.getName}"))
          .build()))

  protected implicit lazy val system: ActorSystem =
    ActorSystem(actorSystemName, defaultExecutionContext = Some(executionContext))

  protected implicit lazy val materializer: Materializer = Materializer(system)

  protected implicit lazy val executionSequencerFactory: ExecutionSequencerFactory =
    new AkkaExecutionSequencerPool(poolName = actorSystemName, actorCount = 1)

  override protected def afterAll(): Unit = {
    executionSequencerFactory.close()
    materializer.shutdown()
    Await.result(system.terminate(), 30.seconds)
    super.afterAll()
  }
} 
Example 8
Source File: AkkaTest.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.client.testing

import java.util
import java.util.concurrent.{Executors, ScheduledExecutorService}

import akka.NotUsed
import akka.actor.{ActorSystem, Scheduler}
import akka.stream.scaladsl.{Sink, Source}
import akka.stream.Materializer
import akka.util.ByteString
import com.daml.grpc.adapter.{ExecutionSequencerFactory, SingleThreadExecutionSequencerPool}
import com.typesafe.config.{Config, ConfigFactory, ConfigValueFactory}
import com.typesafe.scalalogging.LazyLogging
import org.scalatest.{BeforeAndAfterAll, Suite}

import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContextExecutor, Future}
import scala.util.control.NonFatal

trait AkkaTest extends BeforeAndAfterAll with LazyLogging { self: Suite =>
  // TestEventListener is needed for log testing
  private val loggers =
    util.Arrays.asList("akka.event.slf4j.Slf4jLogger", "akka.testkit.TestEventListener")
  protected implicit val sysConfig: Config = ConfigFactory
    .load()
    .withValue("akka.loggers", ConfigValueFactory.fromIterable(loggers))
    .withValue("akka.logger-startup-timeout", ConfigValueFactory.fromAnyRef("30s"))
    .withValue("akka.stdout-loglevel", ConfigValueFactory.fromAnyRef("INFO"))
  protected implicit val system: ActorSystem = ActorSystem("test", sysConfig)
  protected implicit val ec: ExecutionContextExecutor =
    system.dispatchers.lookup("test-dispatcher")
  protected implicit val scheduler: Scheduler = system.scheduler
  protected implicit val schedulerService: ScheduledExecutorService =
    Executors.newSingleThreadScheduledExecutor()
  protected implicit val materializer: Materializer = Materializer(system)
  protected implicit val esf: ExecutionSequencerFactory =
    new SingleThreadExecutionSequencerPool("testSequencerPool")
  protected val timeout: FiniteDuration = 2.minutes
  protected val shortTimeout: FiniteDuration = 5.seconds

  protected def await[T](fun: => Future[T]): T = Await.result(fun, timeout)

  protected def awaitShort[T](fun: => Future[T]): T = Await.result(fun, shortTimeout)

  protected def drain(source: Source[ByteString, NotUsed]): ByteString = {
    val futureResult: Future[ByteString] = source.runFold(ByteString.empty) { (a, b) =>
      a.concat(b)
    }
    awaitShort(futureResult)
  }

  protected def drain[A, B](source: Source[A, B]): Seq[A] = {
    val futureResult: Future[Seq[A]] = source.runWith(Sink.seq)
    awaitShort(futureResult)
  }

  override protected def afterAll(): Unit = {
    try {
      val _ = await(system.terminate())
    } catch {
      case NonFatal(_) => ()
    }
    schedulerService.shutdownNow()
    super.afterAll()
  }
} 
Example 9
Source File: ScalaUtilIT.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.client.binding.util

import java.util.concurrent.{Executors, ScheduledExecutorService}

import com.daml.ledger.client.binding.util.ScalaUtil.FutureOps
import org.scalatest.concurrent.AsyncTimeLimitedTests
import org.scalatest.time.Span
import org.scalatest.time.SpanSugar._
import org.scalatest.{AsyncWordSpec, BeforeAndAfterAll, Matchers}

import scala.concurrent.{Future, Promise, TimeoutException}

class ScalaUtilIT
    extends AsyncWordSpec
    with AsyncTimeLimitedTests
    with Matchers
    with BeforeAndAfterAll {

  implicit val scheduler: ScheduledExecutorService = Executors.newSingleThreadScheduledExecutor()

  override def afterAll(): Unit = {
    scheduler.shutdownNow()
    super.afterAll()
  }

  "FutureOps" can {

    "future with timeout" should {

      "fail Future with TimoutException after specified duration" in {
        val promise = Promise[Unit]() // never completes
        val future = promise.future.timeout("name", 1000.millis, 100.millis)
        recoverToSucceededIf[TimeoutException](future)
      }

      "be able to complete within specified duration" in {
        val future = Future {
          "result"
        }.timeoutWithDefaultWarn("name", 1.second)

        future.map(_ shouldBe "result")
      }

    }

  }
  override lazy val timeLimit: Span = 10.seconds
} 
Example 10
Source File: ProgramResource.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.resources

import java.util.concurrent.{Executors, TimeUnit}

import com.daml.logging.ContextualizedLogger
import com.daml.logging.LoggingContext.newLoggingContext
import com.daml.resources.ProgramResource._

import scala.concurrent.duration.{DurationInt, FiniteDuration}
import scala.concurrent.{Await, ExecutionContext}
import scala.util.Try
import scala.util.control.{NoStackTrace, NonFatal}

class ProgramResource[T](
    owner: => ResourceOwner[T],
    tearDownTimeout: FiniteDuration = 10.seconds,
) {
  private val logger = ContextualizedLogger.get(getClass)

  private val executorService = Executors.newCachedThreadPool()

  def run(): Unit = {
    newLoggingContext { implicit logCtx =>
      val resource = {
        implicit val executionContext: ExecutionContext =
          ExecutionContext.fromExecutor(executorService)
        Try(owner.acquire()).fold(Resource.failed, identity)
      }

      def stop(): Unit = {
        Await.result(resource.release(), tearDownTimeout)
        executorService.shutdown()
        executorService.awaitTermination(tearDownTimeout.toMillis, TimeUnit.MILLISECONDS)
        ()
      }

      sys.runtime.addShutdownHook(new Thread(() => {
        try {
          stop()
        } catch {
          case NonFatal(exception) =>
            logger.error("Failed to stop successfully.", exception)
        }
      }))

      // On failure, shut down immediately.
      resource.asFuture.failed.foreach { exception =>
        exception match {
          // The error is suppressed; we don't need to print anything more.
          case _: SuppressedStartupException =>
          case _: StartupException =>
            logger.error(
              s"Shutting down because of an initialization error.\n${exception.getMessage}")
          case NonFatal(_) =>
            logger.error("Shutting down because of an initialization error.", exception)
        }
        sys.exit(1) // `stop` will be triggered by the shutdown hook.
      }(ExecutionContext.global) // Run on the global execution context to avoid deadlock.
    }
  }
}

object ProgramResource {

  trait StartupException extends NoStackTrace {
    self: Exception =>
  }

  trait SuppressedStartupException {
    self: Exception =>
  }
} 
Example 11
Source File: ThreadUtil.scala    From iotchain   with MIT License 5 votes vote down vote up
package jbok.common.thread

import java.lang.Thread.UncaughtExceptionHandler
import java.nio.channels.AsynchronousChannelGroup
import java.nio.channels.spi.AsynchronousChannelProvider
import java.util.concurrent.atomic.AtomicInteger
import java.util.concurrent.{Executors, ThreadFactory}

import cats.effect.{Resource, Sync}

import scala.concurrent.ExecutionContext
import scala.util.control.NonFatal

object ThreadUtil {
  def named(threadPrefix: String, daemon: Boolean, exitJvmOnFatalError: Boolean = true): ThreadFactory =
    new ThreadFactory {
      val defaultThreadFactory = Executors.defaultThreadFactory()
      val idx                  = new AtomicInteger(0)
      def newThread(r: Runnable) = {
        val t = defaultThreadFactory.newThread(r)
        t.setDaemon(daemon)
        t.setName(s"$threadPrefix-${idx.incrementAndGet()}")
        t.setUncaughtExceptionHandler(new UncaughtExceptionHandler {
          def uncaughtException(t: Thread, e: Throwable): Unit = {
            ExecutionContext.defaultReporter(e)
            if (exitJvmOnFatalError) {
              e match {
                case NonFatal(_) => ()
                case _           => System.exit(-1)
              }
            }
          }
        })
        t
      }
    }

  def blockingThreadPool[F[_]](name: String)(implicit F: Sync[F]): Resource[F, ExecutionContext] =
    Resource(F.delay {
      val factory  = named(name, daemon = true)
      val executor = Executors.newCachedThreadPool(factory)
      val ec       = ExecutionContext.fromExecutor(executor)
      (ec, F.delay(executor.shutdown()))
    })

  def acg[F[_]](implicit F: Sync[F]): Resource[F, AsynchronousChannelGroup] =
    Resource(F.delay {
      val acg = acgUnsafe
      (acg, F.delay(acg.shutdownNow()))
    })

  def acgUnsafe: AsynchronousChannelGroup =
    AsynchronousChannelProvider
      .provider()
      .openAsynchronousChannelGroup(8, named("jbok-ag-tcp", daemon = true))

  lazy val acgGlobal: AsynchronousChannelGroup = acgUnsafe
} 
Example 12
Source File: BlockingIO.scala    From gbf-raidfinder   with MIT License 5 votes vote down vote up
package walfie.gbf.raidfinder.util

import java.util.concurrent.atomic.AtomicLong
import java.util.concurrent.{Executors, ThreadFactory}
import scala.concurrent.{ExecutionContext, ExecutionContextExecutor, Future, Promise, blocking}
import scala.util.control.NonFatal
import monix.execution.Scheduler

// https://github.com/alexandru/scala-best-practices/blob/master/sections/4-concurrency-parallelism.md
object BlockingIO {
  private val ioThreadPool = Scheduler.io(name = "io-thread")

  def future[T](t: => T): Future[T] = {
    val p = Promise[T]()

    val runnable = new Runnable {
      def run() = try {
        p.success(blocking(t))
      } catch {
        case NonFatal(ex) => p.failure(ex)
      }
    }

    ioThreadPool.execute(runnable)

    p.future
  }
} 
Example 13
Source File: FlumeInputDStream.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.flume

import java.io.{Externalizable, ObjectInput, ObjectOutput}
import java.net.InetSocketAddress
import java.nio.ByteBuffer
import java.util.concurrent.Executors

import scala.collection.JavaConverters._
import scala.reflect.ClassTag

import org.apache.avro.ipc.NettyServer
import org.apache.avro.ipc.specific.SpecificResponder
import org.apache.flume.source.avro.{AvroFlumeEvent, AvroSourceProtocol, Status}
import org.jboss.netty.channel.{ChannelPipeline, ChannelPipelineFactory, Channels}
import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory
import org.jboss.netty.handler.codec.compression._

import org.apache.spark.internal.Logging
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.dstream._
import org.apache.spark.streaming.receiver.Receiver
import org.apache.spark.util.Utils

private[streaming]
class FlumeInputDStream[T: ClassTag](
  _ssc: StreamingContext,
  host: String,
  port: Int,
  storageLevel: StorageLevel,
  enableDecompression: Boolean
) extends ReceiverInputDStream[SparkFlumeEvent](_ssc) {

  override def getReceiver(): Receiver[SparkFlumeEvent] = {
    new FlumeReceiver(host, port, storageLevel, enableDecompression)
  }
}


  private[streaming]
  class CompressionChannelPipelineFactory extends ChannelPipelineFactory {
    def getPipeline(): ChannelPipeline = {
      val pipeline = Channels.pipeline()
      val encoder = new ZlibEncoder(6)
      pipeline.addFirst("deflater", encoder)
      pipeline.addFirst("inflater", new ZlibDecoder())
      pipeline
    }
  }
} 
Example 14
Source File: SparkSQLSessionManager.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.hive.thriftserver

import java.util.concurrent.Executors

import org.apache.commons.logging.Log
import org.apache.hadoop.hive.conf.HiveConf
import org.apache.hadoop.hive.conf.HiveConf.ConfVars
import org.apache.hive.service.cli.SessionHandle
import org.apache.hive.service.cli.session.SessionManager
import org.apache.hive.service.cli.thrift.TProtocolVersion
import org.apache.hive.service.server.HiveServer2

import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.hive.{HiveSessionState, HiveUtils}
import org.apache.spark.sql.hive.thriftserver.ReflectionUtils._
import org.apache.spark.sql.hive.thriftserver.server.SparkSQLOperationManager


private[hive] class SparkSQLSessionManager(hiveServer: HiveServer2, sqlContext: SQLContext)
  extends SessionManager(hiveServer)
  with ReflectedCompositeService {

  private lazy val sparkSqlOperationManager = new SparkSQLOperationManager()

  override def init(hiveConf: HiveConf) {
    setSuperField(this, "hiveConf", hiveConf)

    // Create operation log root directory, if operation logging is enabled
    if (hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_LOGGING_OPERATION_ENABLED)) {
      invoke(classOf[SessionManager], this, "initOperationLogRootDir")
    }

    val backgroundPoolSize = hiveConf.getIntVar(ConfVars.HIVE_SERVER2_ASYNC_EXEC_THREADS)
    setSuperField(this, "backgroundOperationPool", Executors.newFixedThreadPool(backgroundPoolSize))
    getAncestorField[Log](this, 3, "LOG").info(
      s"HiveServer2: Async execution pool size $backgroundPoolSize")

    setSuperField(this, "operationManager", sparkSqlOperationManager)
    addService(sparkSqlOperationManager)

    initCompositeService(hiveConf)
  }

  override def openSession(
      protocol: TProtocolVersion,
      username: String,
      passwd: String,
      ipAddress: String,
      sessionConf: java.util.Map[String, String],
      withImpersonation: Boolean,
      delegationToken: String): SessionHandle = {
    val sessionHandle =
      super.openSession(protocol, username, passwd, ipAddress, sessionConf, withImpersonation,
          delegationToken)
    val session = super.getSession(sessionHandle)
    HiveThriftServer2.listener.onSessionCreated(
      session.getIpAddress, sessionHandle.getSessionId.toString, session.getUsername)
    val sessionState = sqlContext.sessionState.asInstanceOf[HiveSessionState]
    val ctx = if (sessionState.hiveThriftServerSingleSession) {
      sqlContext
    } else {
      sqlContext.newSession()
    }
    ctx.setConf("spark.sql.hive.version", HiveUtils.hiveExecutionVersion)
    if (sessionConf != null && sessionConf.containsKey("use:database")) {
      ctx.sql(s"use ${sessionConf.get("use:database")}")
    }
    sparkSqlOperationManager.sessionToContexts.put(sessionHandle, ctx)
    sessionHandle
  }

  override def closeSession(sessionHandle: SessionHandle) {
    HiveThriftServer2.listener.onSessionClosed(sessionHandle.getSessionId.toString)
    super.closeSession(sessionHandle)
    sparkSqlOperationManager.sessionToActivePool.remove(sessionHandle)
    sparkSqlOperationManager.sessionToContexts.remove(sessionHandle)
  }
} 
Example 15
Source File: AppConfig.scala    From zorechka-bot   with MIT License 5 votes vote down vote up
package com.wix.zorechka

import java.util.concurrent.{Executors, ThreadPoolExecutor}

import com.wix.zorechka.HasAppConfig.Cfg
import com.wix.zorechka.utils.concurrent.NamedThreadFactory
import zio.{RIO, Task, ZIO}
import zio.internal.Executor

import scala.concurrent.ExecutionContext

case class AppConfig(reposFile: String, db: DbConfig)

case class DbConfig(url: String, username: String, password: String)

trait HasAppConfig {
  val cfg: Cfg
}

object HasAppConfig {
  trait Cfg {
    val loadConfig: Task[AppConfig]
    val blockingCtx: ExecutionContext
  }

  trait Live extends HasAppConfig {
    import pureconfig.generic.auto._

    val cfg: Cfg = new Cfg {
      override val loadConfig: Task[AppConfig] = Task.effect(pureconfig.loadConfigOrThrow[AppConfig])

      override val blockingCtx: ExecutionContext = {
        val factory = NamedThreadFactory(name = "blocking-pool", daemon = true)
        Executor
          .fromThreadPoolExecutor(_ => Int.MaxValue)(Executors.newCachedThreadPool(factory).asInstanceOf[ThreadPoolExecutor]).asEC
      }
    }
  }

  def loadConfig(): RIO[HasAppConfig, AppConfig] = ZIO.accessM[HasAppConfig](_.cfg.loadConfig)
} 
Example 16
Source File: Bootstrap.scala    From incubator-s2graph   with Apache License 2.0 5 votes vote down vote up
package org.apache.s2graph.rest.play

import java.util.concurrent.Executors

import org.apache.s2graph.core.rest.{RequestParser, RestHandler}
import org.apache.s2graph.core.utils.logger
import org.apache.s2graph.core.{ExceptionHandler, S2Graph, Management}
import org.apache.s2graph.rest.play.actors.QueueActor
import org.apache.s2graph.rest.play.config.Config
import org.apache.s2graph.rest.play.controllers.ApplicationController
import play.api.Application
import play.api.mvc.{WithFilters, _}
import play.filters.gzip.GzipFilter

import scala.concurrent.{ExecutionContext, Future}
import scala.io.Source
import scala.util.Try

object Global extends WithFilters(new GzipFilter()) {
  var s2graph: S2Graph = _
  var storageManagement: Management = _
  var s2parser: RequestParser = _
  var s2rest: RestHandler = _
  var wallLogHandler: ExceptionHandler = _

  def startup() = {
    val numOfThread = Runtime.getRuntime.availableProcessors()
    val threadPool = Executors.newFixedThreadPool(numOfThread)
    val ec = ExecutionContext.fromExecutor(threadPool)

    val config = Config.conf.underlying

    // init s2graph with config
    s2graph = new S2Graph(config)(ec)
    storageManagement = new Management(s2graph)
    s2parser = new RequestParser(s2graph) 
    s2rest = new RestHandler(s2graph)(ec)

    logger.info(s"starts with num of thread: $numOfThread, ${threadPool.getClass.getSimpleName}")

    config
  }

  def shutdown() = {
    s2graph.shutdown()
  }

  // Application entry point
  override def onStart(app: Application) {
    ApplicationController.isHealthy = false

    val config = startup()
    wallLogHandler = new ExceptionHandler(config)

    QueueActor.init(s2graph, wallLogHandler)

    val defaultHealthOn = Config.conf.getBoolean("app.health.on").getOrElse(true)
    ApplicationController.deployInfo = Try(Source.fromFile("./release_info").mkString("")).recover { case _ => "release info not found\n" }.get

    ApplicationController.isHealthy = defaultHealthOn
  }

  override def onStop(app: Application) {
    wallLogHandler.shutdown()
    QueueActor.shutdown()

    
    shutdown()
  }

  override def onError(request: RequestHeader, ex: Throwable): Future[Result] = {
    logger.error(s"onError => ip:${request.remoteAddress}, request:${request}", ex)
    Future.successful(Results.InternalServerError)
  }

  override def onHandlerNotFound(request: RequestHeader): Future[Result] = {
    logger.error(s"onHandlerNotFound => ip:${request.remoteAddress}, request:${request}")
    Future.successful(Results.NotFound)
  }

  override def onBadRequest(request: RequestHeader, error: String): Future[Result] = {
    logger.error(s"onBadRequest => ip:${request.remoteAddress}, request:$request, error:$error")
    Future.successful(Results.BadRequest(error))
  }
} 
Example 17
Source File: BlockchainCacheSpecification.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.grpc.integration.caches

import java.time.Duration
import java.util.concurrent.{ConcurrentHashMap, ExecutorService, Executors}

import mouse.any.anySyntaxMouse
import org.scalatest.BeforeAndAfterAll
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpecLike

import scala.collection.JavaConverters._
import scala.concurrent._

class BlockchainCacheSpecification extends AnyWordSpecLike with Matchers with BeforeAndAfterAll {

  private val executor: ExecutorService                          = Executors.newCachedThreadPool
  implicit private val blockingContext: ExecutionContextExecutor = ExecutionContext.fromExecutor(executor)

  private class BlockchainCacheTest(loader: String => Future[String], expiration: Option[Duration], invalidationPredicate: String => Boolean)
      extends BlockchainCache[String, String](loader, expiration, invalidationPredicate)

  private def createCache(loader: String => Future[String],
                          expiration: Option[Duration] = None,
                          invalidationPredicate: String => Boolean = _ => false): BlockchainCacheTest = {
    new BlockchainCacheTest(loader, expiration, invalidationPredicate)
  }

  override def afterAll(): Unit = {
    super.afterAll()
    executor.shutdownNow()
  }

  private val andThenAwaitTimeout = 300

  "BlockchainCache" should {

    "not keep failed futures" in {

      val goodKey = "good key"
      val badKey  = "gRPC Error"

      val keyAccessMap = new ConcurrentHashMap[String, Int] unsafeTap (m => { m.put(goodKey, 0); m.put(badKey, 0) })
      val gRPCError    = new RuntimeException("gRPC Error occurred")

      val cache =
        createCache(
          key => {
            (if (key == badKey) Future.failed(gRPCError) else Future.successful(s"value = $key")) unsafeTap { _ =>
              keyAccessMap.computeIfPresent(key, (_, prev) => prev + 1)
            }
          }
        )

      val badKeyAccessCount = 10

      Await.result(
        (1 to badKeyAccessCount).foldLeft { Future.successful("") } { (prev, _) =>
          for {
            _ <- prev
            _ <- cache get goodKey
            r <- cache get badKey recover { case _ => "sad" }
          } yield { Thread.sleep(andThenAwaitTimeout); r }
        },
        scala.concurrent.duration.Duration.Inf
      )

      keyAccessMap.get(goodKey) shouldBe 1
      keyAccessMap.get(badKey) should be > 1
    }

    "not keep values according to the predicate" in {

      val goodKey = "111"
      val badKey  = "222"

      val keyAccessMap = new ConcurrentHashMap[String, Int](Map(goodKey -> 0, badKey -> 0).asJava)

      val cache = createCache(
        key => { keyAccessMap.computeIfPresent(key, (_, prev) => prev + 1); Future.successful(key) },
        invalidationPredicate = _.startsWith("2")
      )

      Await.result(
        (1 to 10).foldLeft { Future.successful("") } { (prev, _) =>
          for {
            _ <- prev
            _ <- cache get goodKey
            r <- cache get badKey
          } yield blocking { Thread.sleep(andThenAwaitTimeout); r }
        },
        scala.concurrent.duration.Duration.Inf
      )

      keyAccessMap.get(goodKey) shouldBe 1
      keyAccessMap.get(badKey) should be > 1
    }
  }
} 
Example 18
Source File: ParallelExecutor.scala    From nyaya   with GNU Lesser General Public License v2.1 5 votes vote down vote up
package nyaya.test

import java.util.concurrent.{Callable, ExecutorService, Executors, Future, TimeUnit}
import java.util.concurrent.atomic.AtomicInteger
import nyaya.gen.ThreadNumber
import nyaya.prop.Prop
import ParallelExecutor._
import PTest._
import Executor.{DataCtx, Data}

// TODO data SampleSize = TotalSamples(n) | Fn(qty|%, gensize|%) | PerWorker(sampleSize)

object ParallelExecutor {
  val defaultThreadCount = 1.max(Runtime.getRuntime.availableProcessors - 1)

  def merge[A](a: RunState[A], b: RunState[A]): RunState[A] = {
    val runs = a.runs max b.runs
    (a.success, b.success) match {
      case (false, true) => RunState(runs, a.result)
      case _             => RunState(runs, b.result)
    }
  }
}

case class ParallelExecutor(workers: Int = defaultThreadCount) extends Executor {

  val debugPrefixes = (0 until workers).toVector.map(i => s"Worker #$i: ")

  override def run[A](p: Prop[A], g: Data[A], S: Settings): RunState[A] = {
    val sss = {
      var rem = S.sampleSize.value
      var i = workers
      var v = Vector.empty[SampleSize]
      while(i > 0) {
        val p = rem / i
        v :+= SampleSize(p)
        rem -= p
        i -= 1
      }
      v
    }

    if (S.debug) {
      val szs = sss.map(_.value)
      println(s"Samples/Worker: ${szs.mkString("{", ",", "}")} = Σ${szs.sum}")
    }

    val ai = new AtomicInteger(0)
    def task(worker: Int) = mkTask {
      val dp = debugPrefixes(worker)
      val data = g(DataCtx(sss(worker), ThreadNumber(worker), S.seed, dp))
      testN(p, data, () => ai.incrementAndGet(), S)
    }
    runAsync2(workers, task)
  }

  override def prove[A](p: Prop[A], d: Domain[A], S: Settings): RunState[A] = {
    val threads = workers min d.size

    val ai = new AtomicInteger(0)
    def task(worker: Int) = mkTask {
      proveN(p, d, worker, threads, _ => ai.incrementAndGet, S)
    }
    runAsync2(threads, task)
  }

  private[this] def mkTask[A](f: => RunState[A]) = new Callable[RunState[A]] {
    override def call(): RunState[A] = f
  }

  private[this] def runAsync2[A](threads: Int, f: Int => Callable[RunState[A]]): RunState[A] =
    runAsync(es => (0 until threads).toList.map(es submit f(_)))

  private[this] def runAsync[A](start: ExecutorService => List[Future[RunState[A]]]): RunState[A] = {
    val es: ExecutorService = Executors.newFixedThreadPool(workers)
    val fs = start(es)
    es.shutdown()
    val rss = fs.map(_.get())
    es.awaitTermination(1, TimeUnit.MINUTES)
    rss.foldLeft(RunState.empty[A])(merge)
  }
} 
Example 19
Source File: Futures.scala    From scala-concurrency-playground   with MIT License 5 votes vote down vote up
package org.zalando.benchmarks

import java.util.concurrent.Executors

import akka.actor.ActorSystem

import scala.concurrent.{Await, ExecutionContext, Future}
import scala.concurrent.duration._

class Futures(system: ActorSystem) {
  import ComputationFollowedByAsyncPublishing._

  def benchmark(coreFactor: Int): Unit = {
    import system.dispatcher

    // execution context only for the (cpu-bound) computation
    val ec = ExecutionContext fromExecutorService Executors.newFixedThreadPool(numWorkers(coreFactor))

    try {
      // `traverse` will distribute the tasks to the thread pool, the rest happens fully async
      printResult(Await.result(Future.traverse(1 to numTasks map Job) { job =>
        Future(Computer compute job)(ec) flatMap (Publisher.publish(_, system))
      }, 1 hour))

    } finally {
      // the execution context has to be shut down explicitly
      ec.shutdown()
    }
  }
} 
Example 20
Source File: RxScala.scala    From scala-concurrency-playground   with MIT License 5 votes vote down vote up
package org.zalando.benchmarks

import java.util.concurrent.{ExecutorService, Executors}

import akka.actor.ActorSystem
import rx.lang.scala.Observable

import scala.concurrent.{ExecutionContext, Future}

class RxScala(system: ActorSystem) {
  import ComputationFollowedByAsyncPublishing._

  def benchmark(coreFactor: Int): Unit = {
    val executor: ExecutorService = Executors.newCachedThreadPool()
    implicit val ec = ExecutionContext.fromExecutor(executor)
    try {
      Observable
        .from(1 to numTasks)
        .map(Job)
        .flatMap(numWorkers(coreFactor), job => Observable.from(Future(Computer compute job)))
        .flatMap(r => Observable.from(Publisher publish (r, system))(system dispatcher))
        .foldLeft(0) { case (s, r) => s + computeResult(r) }
        .toBlocking
        .foreach(println)
    } finally {
      executor.shutdown()
    }
  }
} 
Example 21
Source File: Blocking.scala    From scala-concurrency-playground   with MIT License 5 votes vote down vote up
package org.zalando.benchmarks

import java.util.concurrent.{Callable, Executors}

import akka.actor.ActorSystem

import scala.concurrent.Await
import scala.concurrent.duration._

class Blocking(system: ActorSystem) {
  import ComputationFollowedByAsyncPublishing._

  def benchmark(coreFactor: Int): Unit = {
    // let's do this Ye Olde Schoole Way
    val exec = Executors newFixedThreadPool numWorkers(coreFactor)

    try {
      val futures = 1 to numTasks map Job map { job =>
        exec.submit(new Callable[PublishResult] {
          // explicitly turn async publishing operation into a blocking operation
          override def call(): PublishResult = Await.result(Publisher publish (Computer compute job, system), 1 hour)
        })
      }
      printResult(futures map (_.get))

    } finally {
      // never forget
      exec.shutdown()
    }
  }
} 
Example 22
Source File: WaitForTaskDsl.scala    From algoliasearch-client-scala   with MIT License 5 votes vote down vote up
package algolia.dsl

import java.time.ZonedDateTime
import java.util.concurrent.{Executors, ThreadFactory, TimeUnit}

import algolia.definitions.{WaitForTaskDefinition, WaitForTimeoutException}
import algolia.responses.{AlgoliaTask, TaskStatus}
import algolia.{AlgoliaClient, Executable}
import io.netty.util.{HashedWheelTimer, Timeout, TimerTask}

import scala.concurrent.{ExecutionContext, Future, Promise}

trait WaitForTaskDsl {

  case object waitFor {
    def task(task: AlgoliaTask): WaitForTaskDefinition =
      WaitForTaskDefinition(task.idToWaitFor)

    def task(taskID: Long): WaitForTaskDefinition =
      WaitForTaskDefinition(taskID)
  }

  implicit object WaitForTaskDefinitionExecutable
      extends Executable[WaitForTaskDefinition, TaskStatus] {

    // Run every 100 ms, use a wheel with 512 buckets
    private lazy val timer = {
      val threadFactory = new ThreadFactory {
        override def newThread(r: Runnable): Thread = {
          val t = Executors.defaultThreadFactory().newThread(r)
          t.setDaemon(true)
          t.setName("algolia-waitfor-thread-" + ZonedDateTime.now())
          t
        }
      }
      new HashedWheelTimer(threadFactory, 100, TimeUnit.MILLISECONDS, 512)
    }

    
    override def apply(client: AlgoliaClient, query: WaitForTaskDefinition)(
        implicit executor: ExecutionContext
    ): Future[TaskStatus] = {

      def request(d: Long, totalDelay: Long): Future[TaskStatus] =
        delay[TaskStatus](d) {
          client.request[TaskStatus](query.build())
        }.flatMap { res =>
          if (res.status == "published") {
            Future.successful(res)
          } else if (totalDelay > query.maxDelay) {
            Future.failed(
              WaitForTimeoutException(
                s"Waiting for task `${query.taskId}` on index `${query.index.get}` timeout after ${d}ms"
              )
            )
          } else {
            request(d * 2, totalDelay + d)
          }
        }

      request(query.baseDelay, 0L)
    }

    private def delay[T](delay: Long)(block: => Future[T]): Future[T] = {
      val promise = Promise[T]()
      val task = new TimerTask {
        override def run(timeout: Timeout): Unit = promise.completeWith(block)
      }
      timer.newTimeout(task, delay, TimeUnit.MILLISECONDS)
      promise.future
    }
  }

} 
Example 23
Source File: Blocking.scala    From keycloak-benchmark   with Apache License 2.0 5 votes vote down vote up
package io.gatling.keycloak

import java.util.concurrent.atomic.AtomicInteger
import java.util.concurrent.{ThreadFactory, Executors}

import io.gatling.core.validation.Success
import io.gatling.core.akka.GatlingActorSystem


object Blocking {
  GatlingActorSystem.instance.registerOnTermination(() => shutdown())

  private val threadPool = Executors.newCachedThreadPool(new ThreadFactory {
    val counter = new AtomicInteger();

    override def newThread(r: Runnable): Thread =
      new Thread(r, "blocking-thread-" + counter.incrementAndGet())
  })

  def apply(f: () => Unit) = {
    threadPool.execute(new Runnable() {
      override def run = {
        f()
      }
    })
    Success(())
  }

  def shutdown() = {
    threadPool.shutdownNow()
  }
} 
Example 24
Source File: Server.scala    From seals   with Apache License 2.0 5 votes vote down vote up
package com.example.server

import java.net.{ InetSocketAddress, InetAddress }
import java.nio.channels.AsynchronousChannelGroup
import java.util.concurrent.Executors

import scala.concurrent.duration._

import cats.implicits._
import cats.effect.{ IO, IOApp, ExitCode, Resource, Blocker }

import fs2.{ Stream, Chunk }
import fs2.io.tcp

import scodec.bits.BitVector
import scodec.Codec

import dev.tauri.seals.scodec.Codecs._

import com.example.proto._

object Server extends IOApp {

  final val bufferSize = 32 * 1024
  final val timeout = Some(2.seconds)
  final val maxClients = 200
  final val port = 8080

  val rnd = new scala.util.Random

  def addr(port: Int): InetSocketAddress =
    new InetSocketAddress(InetAddress.getLoopbackAddress, port)

  override def run(args: List[String]): IO[ExitCode] = {
    Blocker[IO].use { bl =>
      tcp.SocketGroup[IO](bl).use { sg =>
        serve(port, sg).compile.drain.as(ExitCode.Success)
      }
    }
  }

  def serve(port: Int, sg: tcp.SocketGroup): Stream[IO, Unit] = {
    Stream.resource(sg.serverResource[IO](addr(port))).flatMap {
      case (localAddr, sockets) =>
        val s = sockets.map { socket =>
          Stream.resource(socket).flatMap { socket =>
            val bvs: Stream[IO, BitVector] = socket.reads(bufferSize, timeout).chunks.map(ch => BitVector.view(ch.toArray))
            val tsk: IO[BitVector] = bvs.compile.toVector.map(_.foldLeft(BitVector.empty)(_ ++ _))
            val request: IO[Request] = tsk.flatMap { bv =>
              Codec[Request].decode(bv).fold(
                err => IO.raiseError(new Exception(err.toString)),
                result => IO.pure(result.value)
              )
            }
            val response: IO[Response] = request.flatMap(logic)
            val encoded: Stream[IO, Byte] = Stream.eval(response)
              .map(r => Codec[Response].encode(r).require)
              .flatMap { bv => Stream.chunk(Chunk.bytes(bv.bytes.toArray)) }
            encoded.through(socket.writes(timeout)).onFinalize(socket.endOfOutput)
          }
        }
        s.parJoin[IO, Unit](maxClients)
    }
  }

  def logic(req: Request): IO[Response] = req match {
    case RandomNumber(min, max) =>
      if (min < max) {
        IO {
          val v = rnd.nextInt(max - min + 1) + min
          Number(v)
        }
      } else if (min === max) {
        IO.pure(Number(min))
      } else {
        IO.raiseError(new IllegalArgumentException("min must not be greater than max"))
      }
    case ReSeed(s) =>
      IO {
        rnd.setSeed(s)
        Ok
      }
  }
} 
Example 25
Source File: ServerSpec.scala    From seals   with Apache License 2.0 5 votes vote down vote up
package com.example.server

import java.util.concurrent.Executors

import scala.concurrent.ExecutionContext

import cats.effect.{ IO, Blocker, ContextShift }

import org.scalatest.{ FlatSpec, Matchers, BeforeAndAfterAll }

import fs2.{ Stream, Chunk }

import scodec.bits._
import scodec.Codec

import dev.tauri.seals.scodec.Codecs._

import com.example.proto._

class ServerSpec extends FlatSpec with Matchers with BeforeAndAfterAll {

  implicit val cs: ContextShift[IO] = IO.contextShift(ExecutionContext.global)

  val ex = Executors.newCachedThreadPool()
  val ec = ExecutionContext.fromExecutor(ex)
  val bl = Blocker.liftExecutionContext(ec)
  val (sg, closeSg) = fs2.io.tcp.SocketGroup[IO](bl).allocated.unsafeRunSync()

  override def afterAll(): Unit = {
    super.afterAll()
    closeSg.unsafeRunSync()
    ex.shutdown()
  }

  "Server" should "respond to a request" in {
    val responses: Vector[Response] = Stream(
      Server.serve(Server.port, sg).drain,
      client(Server.port)
    ).parJoin(Int.MaxValue).take(1L).compile.toVector.unsafeRunSync()
    responses should === (Vector(Ok))
  }

  def client(port: Int): Stream[IO, Response] = {
    Stream.resource(sg.client[IO](Server.addr(port))).flatMap { socket =>
      val bvs: Stream[IO, BitVector] = Stream(Codec[Request].encode(ReSeed(56)).require)
      val bs: Stream[IO, Byte] = bvs.flatMap { bv =>
        Stream.chunk(Chunk.bytes(bv.bytes.toArray))
      }
      val read = bs.through(socket.writes(Server.timeout)).drain.onFinalize(socket.endOfOutput) ++
        socket.reads(Server.bufferSize, Server.timeout).chunks.map(ch => BitVector.view(ch.toArray))
      read.fold(BitVector.empty)(_ ++ _).map(bv => Codec[Response].decode(bv).require.value)
    }
  }
} 
Example 26
Source File: ServerSpec.scala    From seals   with Apache License 2.0 5 votes vote down vote up
package com.example.lib

import java.util.concurrent.Executors
import java.nio.channels.{ AsynchronousChannelGroup => ACG }

import scala.concurrent.ExecutionContext

import cats.effect.IO

import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers

import fs2.{ Stream, Chunk }

import scodec.bits._

import Protocol.v1.{ Response, Request, Random, RandInt, Seed, Seeded }

import dev.tauri.seals.scodec.StreamCodecs

class ServerSpec extends AnyFlatSpec with Matchers with TcpTest {

  val ex = Executors.newCachedThreadPool()
  implicit val cg = ACG.withThreadPool(ex)
  implicit override lazy val ec = ExecutionContext.global

  override def afterAll(): Unit = {
    super.afterAll()
    cg.shutdown()
    ex.shutdown()
  }

  "Server" should "respond to requests" in {
    val nClients = 20
    val nRandom = testData.count {
      case Random(_, _) => true
      case _ => false
    }
    val nSeed = testData.count {
      case Seed(_) => true
      case _ => false
    }
    val str: Stream[IO, Stream[IO, Response]] = Server.serveAddr(0, this.sockGroup).map { localAddr =>
      clients(localAddr.getPort, nClients)
    }
    val responses: Vector[Response] = str
      .parJoin(Int.MaxValue)
      .take((nClients * testData.size).toLong).compile.toVector.unsafeRunSync()

    val randInts = responses.collect { case RandInt(i) => i }
    val seededs = responses.collect { case Seeded => () }

    randInts.size should === (nClients * nRandom)
    randInts.foreach { i =>
      i should be >= 1
      i should be <= 100
    }

    seededs.size should === (nClients * nSeed)
  }

  val testData = Vector[Request](
    Random(10, 19),
    Seed(0xdeadbeefL),
    Random(1, 100)
  )

  def clients(port: Int, count: Int, maxConcurrent: Int = 10): Stream[IO, Response] = {
    val cls: Stream[IO, Stream[IO, Response]] = {
      Stream.range(0, count).map { i =>
        Stream.resource(this.sockGroup.client[IO](Server.addr(port))).flatMap { socket =>
          val bvs: Stream[IO, BitVector] = StreamCodecs.streamEncoderFromReified[Request].encode(Stream.emits(testData).covary[IO])
          val bs: Stream[IO, Byte] = bvs.flatMap { bv =>
            Stream.chunk(Chunk.bytes(bv.bytes.toArray))
          }
          val read = bs.through(socket.writes(Server.timeout)).drain.onFinalize(socket.endOfOutput) ++
            socket.reads(Server.bufferSize, Server.timeout).chunks.map(ch => BitVector.view(ch.toArray))
          read.through(StreamCodecs.pipe[IO, Response])
        }
      }
    }

    cls.parJoin(maxConcurrent)
  }
} 
Example 27
Source File: DataStreamSource.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.datastream

import java.util.concurrent.{Executors, TimeUnit}

import com.sksamuel.exts.Logging
import com.sksamuel.exts.io.Using
import io.eels.schema.StructType
import io.eels.{Row, Source}

// an implementation of DataStream that provides a subscribe powered by constitent parts
class DataStreamSource(source: Source) extends DataStream with Using with Logging {

  override def schema: StructType = source.schema

  override def subscribe(s: Subscriber[Seq[Row]]): Unit = {

    val publishers = source.parts()
    if (publishers.isEmpty) {
      logger.info("No parts for this source")
      s.subscribed(Subscription.empty)
      s.completed()
    } else {
      logger.info(s"Datastream has ${publishers.size} parts")
      val executor = Executors.newCachedThreadPool()
      Publisher.merge(publishers, Row.Sentinel)(executor).subscribe(s)
      executor.shutdown()
      executor.awaitTermination(999, TimeUnit.DAYS)
      logger.info("Datastream source has completed")
    }
  }
} 
Example 28
Source File: JdbcSinkWriter.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.component.jdbc

import java.sql.Connection
import java.util.concurrent.{Executors, LinkedBlockingQueue, TimeUnit}

import com.sksamuel.exts.Logging
import io.eels.component.jdbc.dialect.JdbcDialect
import io.eels.schema.{Field, StructType}
import io.eels.{Row, SinkWriter}

class JdbcSinkWriter(schema: StructType,
                     connFn: () => Connection,
                     table: String,
                     createTable: Boolean,
                     dropTable: Boolean,
                     dialect: JdbcDialect,
                     threads: Int,
                     batchSize: Int,
                     batchesPerCommit: Int,
                     autoCommit: Boolean,
                     bufferSize: Int) extends SinkWriter with Logging {
  logger.info(s"Creating Jdbc writer with $threads threads, batch size $batchSize, autoCommit=$autoCommit")
  require(bufferSize >= batchSize)

  private val Sentinel = Row(StructType(Field("____jdbcsentinel")), Seq(null))

  import com.sksamuel.exts.concurrent.ExecutorImplicits._

  // the buffer is a concurrent receiver for the write method. It needs to hold enough elements so that
  // the invokers of this class can keep pumping in rows while we wait for a buffer to fill up.
  // the buffer size must be >= batch size or we'll never fill up enough to trigger a batch
  private val buffer = new LinkedBlockingQueue[Row](bufferSize)

  // the coordinator pool is just a single thread that runs the coordinator
  private val coordinatorPool = Executors.newSingleThreadExecutor()

  private lazy val inserter = {
    val inserter = new JdbcInserter(connFn, table, schema, autoCommit, batchesPerCommit, dialect)
    if (dropTable) {
      inserter.dropTable()
    }
    if (createTable) {
      inserter.ensureTableCreated()
    }
    inserter
  }

  // todo this needs to allow multiple batches at once
  coordinatorPool.submit {
    try {
      logger.debug("Starting JdbcWriter Coordinator")
      // once we receive the pill its all over for the writer
      Iterator.continually(buffer.take)
        .takeWhile(_ != Sentinel)
        .grouped(batchSize).withPartial(true)
        .foreach { batch =>
          inserter.insertBatch(batch)
        }
      logger.debug("Write completed; shutting down coordinator")
    } catch {
      case t: Throwable =>
        logger.error("Some error in coordinator", t)
    }
  }
  // the coordinate only runs the one task, that is to read from the buffer
  // and do the inserts
  coordinatorPool.shutdown()

  override def close(): Unit = {
    buffer.put(Sentinel)
    logger.info("Closing JDBC Writer... waiting on writes to finish")
    coordinatorPool.awaitTermination(1, TimeUnit.DAYS)
  }

  // when we get a row to write, we won't commit it immediately to the database,
  // but we'll buffer it so we can do batched inserts
  override def write(row: Row): Unit = {
    buffer.put(row)
  }
} 
Example 29
Source File: HdfsWatcher.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.component.hdfs

import java.util.concurrent.Executors
import java.util.concurrent.atomic.AtomicBoolean

import com.sksamuel.exts.Logging
import io.eels.util.HdfsIterator
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.hdfs.client.HdfsAdmin
import org.apache.hadoop.hdfs.inotify.Event

import scala.concurrent.duration._
import scala.util.control.NonFatal

class HdfsWatcher(path: Path, callback: FileCallback)
                 (implicit fs: FileSystem, conf: Configuration) extends Logging {

  private val files = HdfsIterator.remote(fs.listFiles(path, false)).map(_.getPath).toBuffer
  files.foreach(callback.onStart)

  private val executor = Executors.newSingleThreadExecutor()
  private val running = new AtomicBoolean(true)
  private val interval = 5.seconds

  private val admin = new HdfsAdmin(path.toUri, conf)
  private val eventStream = admin.getInotifyEventStream

  executor.submit(new Runnable {
    override def run(): Unit = {
      while (running.get) {
        try {
          Thread.sleep(interval.toMillis)
          val events = eventStream.take
          for (event <- events.getEvents) {
            event match {
              case create: Event.CreateEvent => callback.onCreate(create)
              case append: Event.AppendEvent => callback.onAppend(append)
              case rename: Event.RenameEvent => callback.onRename(rename)
              case close: Event.CloseEvent => callback.onClose(close)
              case _ =>
            }
          }
        } catch {
          case NonFatal(e) => logger.error("Error while polling fs", e)
        }
      }
    }
  })

  def stop(): Unit = {
    running.set(false)
    executor.shutdownNow()
  }
}

trait FileCallback {
  def onStart(path: Path): Unit
  def onClose(close: Event.CloseEvent): Unit
  def onRename(rename: Event.RenameEvent): Unit
  def onAppend(append: Event.AppendEvent): Unit
  def onCreate(path: Event.CreateEvent): Unit
} 
Example 30
Source File: BoxStoreTest.scala    From fs2-blobstore   with Apache License 2.0 5 votes vote down vote up
package blobstore.box


import java.util.concurrent.Executors

import blobstore.Path
import cats.effect.{Blocker, ContextShift, IO}
import com.box.sdk.BoxAPIConnection
import org.scalatest.matchers.must.Matchers
import org.scalatest.flatspec.AnyFlatSpec

import scala.concurrent.ExecutionContext

class BoxStoreTest extends AnyFlatSpec with Matchers {

  implicit val cs: ContextShift[IO] = IO.contextShift(ExecutionContext.global)
  val blocker = Blocker.liftExecutionContext(ExecutionContext.fromExecutor(Executors.newCachedThreadPool))
  "splitPath" should "correctly split a long path" in {
    val boxStore = new BoxStore[IO](new BoxAPIConnection(""), "", blocker)
    val testPath = Path("long/path/to/filename")
    val (pathToParentFolder, key) = boxStore.splitPath(testPath)
    pathToParentFolder must be("long" :: "path" :: "to" :: Nil)
    key must be("filename")
  }

  it should "split a single element path into a single element list and empty string key" in {
    val boxStore = new BoxStore[IO](new BoxAPIConnection(""), "", blocker)
    val testPath = Path("filename")
    val (pathToParentFolder, key) = boxStore.splitPath(testPath)
    pathToParentFolder must be("filename"::Nil)
    key must be("")
  }

  it should "split an empty path into empty list, empty string key" in {
    val boxStore = new BoxStore[IO](new BoxAPIConnection(""), "", blocker)
    val testPath = Path("")
    val (pathToParentFolder, key) = boxStore.splitPath(testPath)
    pathToParentFolder must be(""::Nil)
    key must be("")
  }

} 
Example 31
Source File: StoreOpsTest.scala    From fs2-blobstore   with Apache License 2.0 5 votes vote down vote up
package blobstore

import java.nio.charset.Charset
import java.nio.file.Files
import java.util.concurrent.Executors

import cats.effect.{Blocker, IO}
import cats.effect.laws.util.TestInstances
import cats.implicits._
import fs2.Pipe
import org.scalatest.Assertion
import org.scalatest.flatspec.AnyFlatSpec
import implicits._
import org.scalatest.matchers.must.Matchers

import scala.collection.mutable.ArrayBuffer
import scala.concurrent.ExecutionContext


class StoreOpsTest extends AnyFlatSpec with Matchers with TestInstances {

  implicit val cs = IO.contextShift(ExecutionContext.global)
  val blocker = Blocker.liftExecutionContext(ExecutionContext.fromExecutor(Executors.newCachedThreadPool))

  behavior of "PutOps"
  it should "buffer contents and compute size before calling Store.put" in {
    val bytes: Array[Byte] = "AAAAAAAAAA".getBytes(Charset.forName("utf-8"))
    val store = DummyStore(_.size must be(Some(bytes.length)))

    fs2.Stream.emits(bytes).covary[IO].through(store.bufferedPut(Path("path/to/file.txt"), blocker)).compile.drain.unsafeRunSync()
    store.buf.toArray must be(bytes)

  }

  it should "upload a file from a nio Path" in {
    val bytes = "hello".getBytes(Charset.forName("utf-8"))
    val store = DummyStore(_.size must be(Some(bytes.length)))

    fs2.Stream.bracket(IO(Files.createTempFile("test-file", ".bin"))) { p =>
      IO(p.toFile.delete).void
    }.flatMap { p =>
      fs2.Stream.emits(bytes).covary[IO].through(fs2.io.file.writeAll(p, blocker)).drain ++
        fs2.Stream.eval(store.put(p, Path("path/to/file.txt"), blocker))
    }.compile.drain.unsafeRunSync()
    store.buf.toArray must be(bytes)
  }

}

final case class DummyStore(check: Path => Assertion) extends Store[IO] {
  val buf = new ArrayBuffer[Byte]()
  override def put(path: Path): Pipe[IO, Byte, Unit] = {
    check(path)
    in => {
      buf.appendAll(in.compile.toVector.unsafeRunSync())
      fs2.Stream.emit(())
    }
  }
  override def list(path: Path): fs2.Stream[IO, Path] = ???
  override def get(path: Path, chunkSize: Int): fs2.Stream[IO, Byte] = ???
  override def move(src: Path, dst: Path): IO[Unit] = ???
  override def copy(src: Path, dst: Path): IO[Unit] = ???
  override def remove(path: Path): IO[Unit] = ???
} 
Example 32
Source File: Shifting.scala    From http4s-jdk-http-client   with Apache License 2.0 5 votes vote down vote up
package org.http4s.client.jdkhttpclient

import java.util.concurrent.Executors

import cats.effect._
import cats.effect.testing.specs2.CatsEffect
import org.http4s.implicits._
import org.specs2.mutable.Specification

import scala.concurrent.ExecutionContext

class Shifting extends Specification with CatsEffect {
  val testThreadName = "test-thread-name"
  val ec: ExecutionContext =
    ExecutionContext.fromExecutor(Executors.newCachedThreadPool { r =>
      val t = new Thread(r)
      t.setName(testThreadName)
      t
    })
  implicit val timer: cats.effect.Timer[IO] = IO.timer(ec)
  implicit val cs: cats.effect.ContextShift[IO] = IO.contextShift(ec)

  "The clients" should {
    "shift back from the HTTP thread pool" in {
      for {
        http <- JdkHttpClient.simple[IO]
        ws <- JdkWSClient.simple[IO]
        threadName = IO(Thread.currentThread().getName)
        name1 <- http.expect[String](uri"https://example.org") *> threadName
        name2 <- ws.connectHighLevel(WSRequest(uri"wss://echo.websocket.org")).use(_ => threadName)
      } yield List(name1, name2).forall(_ == testThreadName)
    }
  }
} 
Example 33
Source File: Threading.scala    From watr-works   with Apache License 2.0 5 votes vote down vote up
package edu.umass.cs.iesl.watr
package utils

import java.util.concurrent._



object Threading {

  import java.util.concurrent.Executors
  import scala.concurrent._

  val MainCPUBound = ExecutionContext.global
  val BlockingFileIO = ExecutionContext.fromExecutor(Executors.newCachedThreadPool())
  val NonBlockingIOPolling = ExecutionContext.fromExecutor(Executors.newFixedThreadPool(1))


  implicit val DefaultThreadPool = MainCPUBound

}


//DEBUG USE ONLY!
class ThreadPoolExecutorMonitor(executor: ThreadPoolExecutor, delaySeconds: Int = 1) extends Runnable {
  // Taken from Code from 'Don't Cheat the Executor' nescala talk
  private var running = true
  def finish(): Unit = {
    running = false
  }

  def run(): Unit = {
    while (running) {
      println(s"""
                  |Thread Pool Stats
                  |Pool Size:${executor.getPoolSize}
                  |Core Pool Size: ${executor.getCorePoolSize}
                  |Active Count: ${executor.getActiveCount}
                  |Completed Count: ${executor.getCompletedTaskCount}
                  |Current Queue Size: ${executor.getQueue.size}""".stripMargin)
      Thread.sleep(1000L * delaySeconds)
    }
  }

}

//DEBUG USE ONLY!
class ForkJoinMonitor(forkJoinPool: ForkJoinPool, delaySeconds: Int = 1) extends Runnable {

  private var running = true
  def finish(): Unit = {
    running = false
  }

  def run(): Unit = {
    while (running) {
      println(s"""
                  |Pool Size:${forkJoinPool.getPoolSize}
                  |Active Thread Count: ${forkJoinPool.getActiveThreadCount}
                  |Running Thread Count: ${forkJoinPool.getActiveThreadCount}
                  |Queued Submissions: ${forkJoinPool.getQueuedSubmissionCount}
                  |Queued Tasks: ${forkJoinPool.getQueuedTaskCount}
                  |Has Queued Submissions: ${forkJoinPool.hasQueuedSubmissions}
                  |""".stripMargin)
      Thread.sleep(1000L * delaySeconds)
    }
  }

} 
Example 34
Source File: SparkSQLSessionManager.scala    From XSQL   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.hive.thriftserver

import java.util.concurrent.Executors

import org.apache.commons.logging.Log
import org.apache.hadoop.hive.conf.HiveConf
import org.apache.hadoop.hive.conf.HiveConf.ConfVars
import org.apache.hive.service.cli.SessionHandle
import org.apache.hive.service.cli.session.SessionManager
import org.apache.hive.service.cli.thrift.TProtocolVersion
import org.apache.hive.service.server.HiveServer2

import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.hive.HiveUtils
import org.apache.spark.sql.hive.thriftserver.ReflectionUtils._
import org.apache.spark.sql.hive.thriftserver.server.SparkSQLOperationManager


private[hive] class SparkSQLSessionManager(hiveServer: HiveServer2, sqlContext: SQLContext)
  extends SessionManager(hiveServer)
  with ReflectedCompositeService {

  private lazy val sparkSqlOperationManager = new SparkSQLOperationManager()

  override def init(hiveConf: HiveConf) {
    setSuperField(this, "operationManager", sparkSqlOperationManager)
    super.init(hiveConf)
  }

  override def openSession(
      protocol: TProtocolVersion,
      username: String,
      passwd: String,
      ipAddress: String,
      sessionConf: java.util.Map[String, String],
      withImpersonation: Boolean,
      delegationToken: String): SessionHandle = {
    val sessionHandle =
      super.openSession(protocol, username, passwd, ipAddress, sessionConf, withImpersonation,
          delegationToken)
    val session = super.getSession(sessionHandle)
    HiveThriftServer2.listener.onSessionCreated(
      session.getIpAddress, sessionHandle.getSessionId.toString, session.getUsername)
    val ctx = if (sqlContext.conf.hiveThriftServerSingleSession) {
      sqlContext
    } else {
      sqlContext.newSession()
    }
    ctx.setConf(HiveUtils.FAKE_HIVE_VERSION.key, HiveUtils.builtinHiveVersion)
    if (sessionConf != null && sessionConf.containsKey("use:database")) {
      ctx.sql(s"use ${sessionConf.get("use:database")}")
    }
    sparkSqlOperationManager.sessionToContexts.put(sessionHandle, ctx)
    sessionHandle
  }

  override def closeSession(sessionHandle: SessionHandle) {
    HiveThriftServer2.listener.onSessionClosed(sessionHandle.getSessionId.toString)
    super.closeSession(sessionHandle)
    sparkSqlOperationManager.sessionToActivePool.remove(sessionHandle)
    sparkSqlOperationManager.sessionToContexts.remove(sessionHandle)
  }
} 
Example 35
Source File: ApplicationIdleMonitor.scala    From XSQL   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.monitor.job

import java.util.concurrent.{Executors, ScheduledFuture, TimeUnit}
import java.util.concurrent.atomic.AtomicReference

import scala.collection.JavaConverters._

import org.apache.spark.JobExecutionStatus
import org.apache.spark.alarm.{AlertMessage, HtmlMessage}
import org.apache.spark.monitor.{Monitor, MonitorItem}
import org.apache.spark.monitor.MonitorItem.MonitorItem
import org.apache.spark.scheduler.{SparkListenerEvent, SparkListenerJobEnd, SparkListenerJobStart}
import org.apache.spark.status.JobDataWrapper

class ApplicationIdleMonitor extends JobMonitor {

  override val item: MonitorItem = MonitorItem.APP_IDLE_WARNER
  val delayThread = Executors.newScheduledThreadPool(1)
  lazy val endureLimit =
    conf.getTimeAsMs(s"${Monitor.PREFIX}.${item.toString.toLowerCase}.timeout", "1h")
  private var idleTimeout: AtomicReference[ScheduledFuture[_]] = new AtomicReference()

  private def getActiveJobNum(): Int = {
//    appStore.count(classOf[JobDataWrapper], "completionTime", -1L)
    kvStore
      .view(classOf[JobDataWrapper])
      .reverse()
      .asScala
      .map(_.info)
      .filter(_.status == JobExecutionStatus.RUNNING)
      .size
  }

  private def stopIdleTimeout(): Unit = {
    val idleTimeout = this.idleTimeout.getAndSet(null)
    if (idleTimeout != null) {
      idleTimeout.cancel(false)
    }
  }

  private def setupIdleTimeout(): Unit = {
    if (getActiveJobNum > 0) return
    val timeoutTask = new Runnable() {
      override def run(): Unit = {
        // scalastyle:off
        val driverlUrl = conf
          .get(
            "spark.org.apache.hadoop.yarn.server.webproxy.amfilter.AmIpFilter.param.PROXY_URI_BASES")
          .split(",")
          .head
        val a = <h2>您的Spark应用</h2>
            <a href={driverlUrl}>{driverlUrl}</a>
            <h2>空闲已超过 {conf.get(
              s"${Monitor.PREFIX}.${item}.timeout", "1h")}</h2>
            <h2>请及时关闭</h2>
        val message = new HtmlMessage(title = item, content = a.mkString)
        alarms.foreach(_.alarm(message))
        // scalastyle:on
      }
    }

    val timeout = delayThread
      .scheduleWithFixedDelay(timeoutTask, endureLimit, endureLimit, TimeUnit.MILLISECONDS)
    // If there's already an idle task registered, then cancel the new one.
    if (!this.idleTimeout.compareAndSet(null, timeout)) {
      timeout.cancel(false)
    }
    // If a new client connected while the idle task was being set up, then stop the task.
    if (getActiveJobNum > 0) stopIdleTimeout()
  }

  override def watchOut(event: SparkListenerEvent): Option[AlertMessage] = {
    event match {
      case env: SparkListenerJobStart =>
        stopIdleTimeout
        Option.empty
      case env: SparkListenerJobEnd =>
        setupIdleTimeout
        Option.empty
      case _ =>
        Option.empty
    }
  }
} 
Example 36
Source File: MockedDefaultSourceSuite.scala    From HANAVora-Extensions   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark

import java.util.concurrent.{Callable, Executors}

import com.sap.spark.dsmock.DefaultSource
import org.apache.spark.sql.sources.HashPartitioningFunction
import org.apache.spark.sql.{GlobalSapSQLContext, Row, SQLContext}
import org.mockito.Matchers._
import org.mockito.Mockito._
import org.scalatest.FunSuite

import scala.concurrent.duration._


class MockedDefaultSourceSuite
  extends FunSuite
  with GlobalSapSQLContext {

  val testTimeout = 10 // seconds

  private def numberOfThreads: Int = {
    val noOfCores = Runtime.getRuntime.availableProcessors()
    assert(noOfCores > 0)

    if (noOfCores == 1) 2 // It should always be multithreaded although only
                          // one processor is available (pseudo-multithreading)
    else noOfCores
  }

  def runMultiThreaded[A](op: Int => A): Seq[A] = {
    info(s"Running with $numberOfThreads threads")
    val pool = Executors.newFixedThreadPool(numberOfThreads)

    val futures = 1 to numberOfThreads map { i =>
      val task = new Callable[A] {
        override def call(): A = op(i)
      }
      pool.submit(task)
    }

    futures.map(_.get(testTimeout, SECONDS))
  }

  test("Underlying mocks of multiple threads are distinct") {
    val dataSources = runMultiThreaded { _ =>
      DefaultSource.withMock(identity)
    }

    dataSources foreach { current =>
      val sourcesWithoutCurrent = dataSources.filter(_.ne(current))
      assert(sourcesWithoutCurrent.forall(_.underlying ne current))
    }
  }

  test("Mocking works as expected") {
    runMultiThreaded { i =>
      DefaultSource.withMock { defaultSource =>
        when(defaultSource.getAllPartitioningFunctions(
          anyObject[SQLContext],
          anyObject[Map[String, String]]))
          .thenReturn(Seq(HashPartitioningFunction(s"foo$i", Seq.empty, None)))

        val Array(Row(name)) = sqlc
          .sql("SHOW PARTITION FUNCTIONS USING com.sap.spark.dsmock")
          .select("name")
          .collect()

        assertResult(s"foo$i")(name)
      }
    }
  }
} 
Example 37
Source File: ThrottlingConfig.scala    From maha   with Apache License 2.0 5 votes vote down vote up
package com.yahoo.maha.worker.throttling

import java.util.concurrent.Executors

import com.yahoo.maha.core.Engine
import com.yahoo.maha.job.service.{JobMetadata, JobStatus, JobType}
import com.yahoo.maha.worker.request.MahaWorkerRequest
import grizzled.slf4j.Logging
import org.joda.time.{DateTime, DateTimeZone}

import scala.concurrent.{Await, ExecutionContext}
import scala.concurrent.duration._
import scala.util.{Failure, Success}


case class EngineBasedThrottler(throttlingConfig: EngineThrottlingConfig, jobMetadata : JobMetadata, jobMetaExecConfig: JobMetaExecConfig)  extends Throttler with Logging {

  implicit val executor = ExecutionContext.fromExecutor(Executors.newFixedThreadPool(jobMetaExecConfig.poolSize))


  override def throttle(mahaWorkerRequest: MahaWorkerRequest): Boolean = {
    val engine: Engine = mahaWorkerRequest.engine

    val jobType:JobType = {
      val jobTypeOption = JobType.getJobType(engine)
      require(jobTypeOption.isDefined, s"Unable to get the job type for engine $engine")
      jobTypeOption.get
    }

    var timesChecked = 0
    var countOfRunningJobs = getRunningJobs(engine, jobType)
    while (countOfRunningJobs > throttlingConfig.countThreshold && timesChecked < throttlingConfig.maxChecks) {
      warn(s"Throttling: Number of running jobs ($countOfRunningJobs) exceeds threshold (${throttlingConfig.countThreshold}). Checked $timesChecked times.")
      Thread.sleep(throttlingConfig.checkDelayMs)
      countOfRunningJobs = getRunningJobs(engine, jobType)
      timesChecked += 1
    }
    if (timesChecked == throttlingConfig.maxChecks && countOfRunningJobs > throttlingConfig.countThreshold) {
      warn(s"Timeout: Count of running jobs exceeds threshold even after ${throttlingConfig.checkDelayMs * throttlingConfig.maxChecks} ms. Continuing to process to avoid increasing PULSAR/KAFKA backlog.")
      //monManager.incrementMetric(Metrics.ThrottleCheckTimeouts)
    }
    info(s"Number of running jobs ($countOfRunningJobs) below threshold (${throttlingConfig.countThreshold}), proceeding to process message.")

    if(timesChecked > 0) {
      true
    } else false
  }

  def getRunningJobs(engine: Engine, jobType:JobType): Int = {
    try {
      val jobCreatedTs = DateTime.now(DateTimeZone.UTC).minusMinutes(throttlingConfig.lookbackMins)
      val countOfRunningJobsFuture = jobMetadata.countJobsByTypeAndStatus(jobType, JobStatus.RUNNING, jobCreatedTs)

      Await.result(countOfRunningJobsFuture, jobMetaExecConfig.maxWaitMills millis)

      val runningJobCount: Int = if(countOfRunningJobsFuture.value.isEmpty) {
        warn(s"Failed to get the runningJobCount in ${jobMetaExecConfig.maxWaitMills}")
        0
      } else {
        countOfRunningJobsFuture.value.get match {
          case Success(count) =>  count
          case Failure(t) =>  {
            error(s"Failed to get the result from jobMeta ${t.getMessage}", t)
            0
          }
        }
      }
      runningJobCount
    } catch {
      case e:Exception =>
        e.printStackTrace()
        0
    }
  }

} 
Example 38
Source File: TaskSpec.scala    From arrows   with Apache License 2.0 5 votes vote down vote up
package arrows.stdlib

import language.postfixOps
import scala.concurrent._
import scala.concurrent.duration._
import scala.util._
import scala.concurrent.ExecutionContext.Implicits.global
import java.util.concurrent.Executors

class TaskSpec extends Spec {

  object ex extends Exception

  def eval[T](t: Task[T]) = Await.result(t.run, Duration.Inf)
  def evalTry[T](t: Task[T]) = Try(eval(t))

  "async" - {
    "success" in {
      eval(Task.async(Future.successful(1))) mustEqual 1
    }
    "failure" in {
      evalTry(Task.async(Future.failed(ex))) mustEqual Failure(ex)
    }
  }

  "fromTry" - {
    "success" in {
      eval(Task.fromTry(Success(1))) mustEqual 1
    }
    "failure" in {
      evalTry(Task.fromTry(Failure(ex))) mustEqual Failure(ex)
    }
  }

  "value" in {
    eval(Task.successful(1)) mustEqual 1
  }

  "exception" in {
    evalTry(Task.failed(ex)) mustEqual Failure(ex)
  }

  "fork" in {
    val ex = Executors.newSingleThreadExecutor()
    try {
      implicit val ec = ExecutionContext.fromExecutor(ex)
      var thread: Thread = null
      eval(Task.fork(Task {
        thread = Thread.currentThread
      })(ec))
      thread != null && thread != Thread.currentThread() mustEqual true
    } finally {
      ex.shutdown()
    }
  }

  "never" in {
    Try(Await.result(Task.never.run, 5 millis)) match {
      case Failure(_: TimeoutException) =>
      case _                            => fail
    }
  }

  "apply" in {
    var c = false
    val t = Task(c = true)
    c mustEqual false
    eval(t)
    c mustEqual true
  }

  "sequence" - {
    "empty" in {
      eval(Task.sequence(List.empty[Task[Int]])).toList mustEqual Nil
    }
    "non-empty" in {
      val a1 = Task.successful(1)
      val a2 = Task.successful(2)
      val a = Task.sequence[Int, List](List(a1, a2))
      eval(a).toList mustEqual List(1, 2)
    }
  }
} 
Example 39
Source File: instances.scala    From cats-retry   with Apache License 2.0 5 votes vote down vote up
package retry
package alleycats

import cats.{Eval, Id}
import scala.concurrent.duration.FiniteDuration
import scala.concurrent.{Future, Promise}
import java.util.concurrent.{ThreadFactory, Executors}

object instances {
  implicit val threadSleepId: Sleep[Id] = new Sleep[Id] {
    def sleep(delay: FiniteDuration): Id[Unit] = Thread.sleep(delay.toMillis)
  }

  implicit val threadSleepEval: Sleep[Eval] = new Sleep[Eval] {
    def sleep(delay: FiniteDuration): Eval[Unit] =
      Eval.later(Thread.sleep(delay.toMillis))
  }

  private lazy val scheduler =
    Executors.newSingleThreadScheduledExecutor(new ThreadFactory {
      override def newThread(runnable: Runnable) = {
        val t = new Thread(runnable)
        t.setDaemon(true)
        t.setName("cats-retry scheduler")
        t
      }
    })

  implicit val threadSleepFuture: Sleep[Future] =
    new Sleep[Future] {
      def sleep(delay: FiniteDuration): Future[Unit] = {
        val promise = Promise[Unit]()
        scheduler.schedule(new Runnable {
          def run: Unit = {
            promise.success(())
            ()
          }
        }, delay.length, delay.unit)
        promise.future
      }
    }
} 
Example 40
Source File: OAuthToken.scala    From spark-power-bi   with Apache License 2.0 5 votes vote down vote up
package com.granturing.spark.powerbi

import java.util.concurrent.{ExecutionException, TimeUnit, Executors}
import com.microsoft.aad.adal4j.{AuthenticationResult, AuthenticationCallback, AuthenticationContext}
import dispatch._
import org.apache.spark.Logging
import scala.concurrent.{Await, promise}
import scala.util.{Try, Failure, Success}

private class OAuthReq(token: OAuthTokenHandler) extends (Req => Req) {

  override def apply(req: Req): Req = {
    req <:< Map("Authorization" -> s"Bearer ${token()}")
  }

}

private class OAuthTokenHandler(authConf: ClientConf, initialToken: Option[String] = None) extends Logging {

  private var _token: Option[String] = initialToken

  def apply(refresh: Boolean = false): String = {
    _token match {
      case Some(s) if !refresh => s
      case _ => {
        refreshToken match {
          case Success(s) => {
            _token = Some(s)
            s
          }
          case Failure(e) => throw e
        }
      }
    }
  }

  private def refreshToken: Try[String] = {
    log.info("refreshing OAuth token")

    val service = Executors.newFixedThreadPool(1);
    val context = new AuthenticationContext(authConf.token_uri, true, service)

    val p = promise[AuthenticationResult]
    val future = p.future

    context.acquireToken(authConf.resource, authConf.clientid, authConf.username, authConf.password, new AuthenticationCallback {
      def onSuccess(result: AuthenticationResult): Unit = {
        p.success(result)
      }

      def onFailure(ex: Throwable): Unit = {
        p.failure(ex)
      }
    })

    try {
      val result = Await.result(future, authConf.timeout)

      log.info("OAuth token refresh successful")

      Success(result.getAccessToken)
    } catch {
      case e: ExecutionException => Failure(e.getCause)
      case t: Throwable => Failure(t)
    } finally {
      service.shutdown()
    }

  }

} 
Example 41
Source File: BufferedAmazonKinesis.scala    From aws-kinesis-scala   with Apache License 2.0 5 votes vote down vote up
package jp.co.bizreach.kinesis

import java.util.concurrent.{TimeUnit, Executors}

import com.amazonaws.ClientConfiguration
import com.amazonaws.auth.AWSCredentialsProvider
import com.amazonaws.regions.Regions

object BufferedAmazonKinesis {
  def apply(amount: Int, interval: Long)(implicit region: Regions): BufferedAmazonKinesis = {
    new BufferedAmazonKinesis(AmazonKinesis(), amount, interval)
  }
  def apply(credentials: AWSCredentialsProvider, amount: Int, interval: Long)(implicit region: Regions): BufferedAmazonKinesis = {
    new BufferedAmazonKinesis(AmazonKinesis(credentials), amount, interval)
  }
  def apply(config: ClientConfiguration, amount: Int, interval: Long)(implicit region: Regions): BufferedAmazonKinesis = {
    new BufferedAmazonKinesis(AmazonKinesis(config), amount, interval)
  }
  def apply(credentials: AWSCredentialsProvider, config: ClientConfiguration, amount: Int, interval: Long)(implicit region: Regions): BufferedAmazonKinesis = {
    new BufferedAmazonKinesis(AmazonKinesis(credentials, config), amount, interval)
  }
  def apply(client: AmazonKinesis, amount: Int, interval: Long): BufferedAmazonKinesis = {
    new BufferedAmazonKinesis(client, amount, interval)
  }
}

// TODO Would like to provide DiskBufferClient also
class BufferedAmazonKinesis(client: AmazonKinesis, amount: Int, interval: Long) {

  private val queue = new java.util.concurrent.ConcurrentLinkedQueue[Any]

  private val scheduler = Executors.newSingleThreadScheduledExecutor()
  scheduler.scheduleAtFixedRate(new BufferedKinesisSendTask(), 0, interval, TimeUnit.MILLISECONDS)

  def putRecord(request: PutRecordRequest): Unit = queue.add(request)

  def putRecords(request: PutRecordsRequest): Unit = queue.add(request)

  def shutdown(): Unit = {
    scheduler.shutdownNow()
    client.shutdown()
  }

  
  def error(e: Exception): Unit = {
    e.printStackTrace()
  }

  private class BufferedKinesisSendTask extends Runnable {

    override def run(): Unit = {
      try {
        val requests = for(i <- 1 to amount if queue.size() != 0) yield queue.poll()
        requests.foreach {
          case r: PutRecordRequest  => client.putRecord(r)
          case r: PutRecordsRequest => client.putRecords(r)
        }
      } catch {
        case e: Exception => error(e)
      }
    }
  }

} 
Example 42
Source File: DockerKit.scala    From docker-it-scala   with MIT License 5 votes vote down vote up
package com.whisk.docker

import java.util.concurrent.Executors

import org.slf4j.LoggerFactory

import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContext, Future}
import scala.language.implicitConversions

trait DockerKit {
  implicit def dockerFactory: DockerFactory

  private lazy val log = LoggerFactory.getLogger(this.getClass)

  val PullImagesTimeout = 20.minutes
  val StartContainersTimeout = 20.seconds
  val StopContainersTimeout = 10.seconds

  def dockerContainers: List[DockerContainer] = Nil

  // we need ExecutionContext in order to run docker.init() / docker.stop() there
  implicit lazy val dockerExecutionContext: ExecutionContext = {
    // using Math.max to prevent unexpected zero length of docker containers
    ExecutionContext.fromExecutor(
      Executors.newFixedThreadPool(Math.max(1, dockerContainers.length * 2)))
  }
  implicit lazy val dockerExecutor = dockerFactory.createExecutor()

  lazy val containerManager = new DockerContainerManager(dockerContainers, dockerExecutor)

  def isContainerReady(container: DockerContainer): Future[Boolean] =
    containerManager.isReady(container)

  def getContainerState(container: DockerContainer): DockerContainerState = {
    containerManager.getContainerState(container)
  }

  implicit def containerToState(c: DockerContainer): DockerContainerState = {
    getContainerState(c)
  }

  def startAllOrFail(): Unit = {
    Await.result(containerManager.pullImages(), PullImagesTimeout)
    val allRunning: Boolean = try {
      val future: Future[Boolean] =
        containerManager.initReadyAll(StartContainersTimeout).map(_.map(_._2).forall(identity))
      sys.addShutdownHook(
        Await.ready(containerManager.stopRmAll(), StopContainersTimeout)
      )
      Await.result(future, StartContainersTimeout)
    } catch {
      case e: Exception =>
        log.error("Exception during container initialization", e)
        false
    }
    if (!allRunning) {
      Await.ready(containerManager.stopRmAll(), StopContainersTimeout)
      throw new RuntimeException("Cannot run all required containers")
    }
  }

  def stopAllQuietly(): Unit = {
    try {
      Await.ready(containerManager.stopRmAll(), StopContainersTimeout)
    } catch {
      case e: Throwable =>
        log.error(e.getMessage, e)
    }
  }

} 
Example 43
Source File: RunServer.scala    From mleap   with Apache License 2.0 5 votes vote down vote up
package ml.combust.mleap.grpc.server

import java.util.concurrent.{Executors, TimeUnit}

import akka.Done
import akka.actor.{ActorSystem, CoordinatedShutdown}
import akka.stream.{ActorMaterializer, Materializer}
import com.typesafe.config.Config
import com.typesafe.scalalogging.Logger
import io.grpc.ServerBuilder
import ml.combust.mleap.executor.MleapExecutor
import ml.combust.mleap.pb.MleapGrpc

import scala.concurrent.{ExecutionContext, Future}
import scala.language.existentials
import scala.util.{Failure, Success, Try}

class RunServer(config: Config)
               (implicit system: ActorSystem) {
  private val logger = Logger(classOf[RunServer])

  private var coordinator: Option[CoordinatedShutdown] = None

  def run(): Unit = {
    Try {
      logger.info("Starting MLeap gRPC Server")

      val coordinator = CoordinatedShutdown(system)
      this.coordinator = Some(coordinator)

      implicit val materializer: Materializer = ActorMaterializer()

      val grpcServerConfig = new GrpcServerConfig(config.getConfig("default"))
      val mleapExecutor = MleapExecutor(system)
      val port: Int = config.getInt("port")
      val threads: Option[Int] = if (config.hasPath("threads")) Some(config.getInt("threads")) else None
      val threadCount = threads.getOrElse {
        Math.min(Math.max(Runtime.getRuntime.availableProcessors() * 4, 32), 64)
      }

      logger.info(s"Creating thread pool for server with size $threadCount")
      val grpcThreadPool = Executors.newFixedThreadPool(threadCount)
      implicit val ec: ExecutionContext = ExecutionContext.fromExecutor(grpcThreadPool)

      coordinator.addTask(CoordinatedShutdown.PhaseServiceRequestsDone, "threadPoolShutdownNow") {
        () =>
          Future {
            logger.info("Shutting down gRPC thread pool")
            grpcThreadPool.shutdown()
            grpcThreadPool.awaitTermination(5, TimeUnit.SECONDS)

            Done
          }
      }

      logger.info(s"Creating executor service")
      val grpcService: GrpcServer = new GrpcServer(mleapExecutor, grpcServerConfig)
      val builder = ServerBuilder.forPort(port)
      builder.intercept(new ErrorInterceptor)
      builder.addService(MleapGrpc.bindService(grpcService, ec))
      val grpcServer = builder.build()

      logger.info(s"Starting server on port $port")
      grpcServer.start()

      coordinator.addTask(CoordinatedShutdown.PhaseServiceUnbind, "grpcServiceShutdown") {
        () =>
          Future {
            logger.info("Shutting down gRPC")
            grpcServer.shutdown()
            grpcServer.awaitTermination(10, TimeUnit.SECONDS)
            Done
          }(ExecutionContext.global)
      }

      coordinator.addTask(CoordinatedShutdown.PhaseServiceStop, "grpcServiceShutdownNow") {
        () =>
          Future {
            if (!grpcServer.isShutdown) {
              logger.info("Shutting down gRPC NOW!")

              grpcServer.shutdownNow()
              grpcServer.awaitTermination(5, TimeUnit.SECONDS)
            }

            Done
          }(ExecutionContext.global)
      }
    } match {
      case Success(_) =>
      case Failure(err) =>
        logger.error("Error encountered starting server", err)
        for (c <- this.coordinator) {
          c.run(CoordinatedShutdown.UnknownReason)
        }
        throw err
    }
  }
} 
Example 44
Source File: HttpRepository.scala    From mleap   with Apache License 2.0 5 votes vote down vote up
package ml.combust.mleap.executor.repository

import java.net.URI
import java.nio.file.{Files, Path}
import java.util.concurrent.Executors

import akka.actor.ActorSystem
import com.typesafe.config.{Config, ConfigFactory}

import scala.concurrent.{ExecutionContext, Future}
import scala.concurrent.duration.TimeUnit

object HttpRepositoryConfig {
  val defaults: Config = ConfigFactory.load().getConfig("ml.combust.mleap.executor.repository-defaults.http")
}

class HttpRepositoryConfig(_config: Config) {
  val config: Config = _config.withFallback(FileRepositoryConfig.defaults)

  val threads: Int = config.getInt("threads")
}

class HttpRepository(config: HttpRepositoryConfig) extends Repository {
  private val threadPool = Executors.newFixedThreadPool(config.threads)
  implicit val diskEc: ExecutionContext = ExecutionContext.fromExecutor(threadPool)

  override def downloadBundle(uri: URI): Future[Path] = Future {
    val tmpFile = Files.createTempFile("mleap", ".bundle.zip")
    Files.copy(uri.toURL.openStream(), tmpFile)
    tmpFile
  }

  override def canHandle(uri: URI): Boolean = uri.getScheme == "http" || uri.getScheme == "https"

  override def shutdown(): Unit = threadPool.shutdown()

  override def awaitTermination(timeout: Long, unit: TimeUnit): Unit = threadPool.awaitTermination(timeout, unit)
}

object HttpRepositoryProvider extends RepositoryProvider {
  override def create(config: Config)
                     (implicit system: ActorSystem): HttpRepository = {
    new HttpRepository(new HttpRepositoryConfig(config))
  }
} 
Example 45
Source File: FileRepository.scala    From mleap   with Apache License 2.0 5 votes vote down vote up
package ml.combust.mleap.executor.repository

import java.io.File
import java.net.URI
import java.nio.file.{Files, Path, StandardCopyOption}
import java.util.concurrent.Executors

import akka.actor.ActorSystem
import com.typesafe.config.{Config, ConfigFactory}
import ml.combust.mleap.executor.error.BundleException

import scala.concurrent.duration.TimeUnit
import scala.concurrent.{ExecutionContext, Future}

object FileRepositoryConfig {
  val defaults: Config = ConfigFactory.load().getConfig("ml.combust.mleap.executor.repository-defaults.file")
}

class FileRepositoryConfig(_config: Config) {
  val config: Config = _config.withFallback(FileRepositoryConfig.defaults)

  val move: Boolean = config.getBoolean("move")
  val threads: Int = config.getInt("threads")
}

class FileRepository(config: FileRepositoryConfig) extends Repository {
  private val threadPool = Executors.newFixedThreadPool(config.threads)
  implicit val diskEc: ExecutionContext = ExecutionContext.fromExecutor(threadPool)

  def this() = this(new FileRepositoryConfig(FileRepositoryConfig.defaults))

  override def downloadBundle(uri: URI): Future[Path] = Future {

    if (uri.getPath.isEmpty) {
      throw new BundleException("file path cannot be empty")
    }

    val local = new File(uri.getPath).toPath
    if (!Files.exists(local)) {
      throw new BundleException(s"file does not exist $local")
    }

    if (config.move) {
      val tmpFile = Files.createTempFile("mleap", ".bundle.zip")
      Files.copy(local, tmpFile, StandardCopyOption.REPLACE_EXISTING)
      tmpFile.toFile.deleteOnExit()
      tmpFile
    } else {
      local
    }
  }

  override def canHandle(uri: URI): Boolean = uri.getScheme == "file" || uri.getScheme == "jar:file"

  override def shutdown(): Unit = threadPool.shutdown()

  override def awaitTermination(timeout: Long, unit: TimeUnit): Unit = threadPool.awaitTermination(timeout, unit)
}

object FileRepositoryProvider extends RepositoryProvider {
  override def create(tConfig: Config)
                     (implicit system: ActorSystem): Repository = {
    val config = new FileRepositoryConfig(tConfig)

    new FileRepository(config)
  }
} 
Example 46
Source File: S3Repository.scala    From mleap   with Apache License 2.0 5 votes vote down vote up
package ml.combust.mleap.repository.s3

import java.net.URI
import java.nio.file.{Files, Path}
import java.util.concurrent.Executors

import akka.actor.ActorSystem
import com.amazonaws.services.s3.{AmazonS3ClientBuilder, AmazonS3URI}
import com.typesafe.config.Config
import ml.combust.mleap.executor.repository.{Repository, RepositoryProvider}

import scala.concurrent.{ExecutionContext, Future}
import scala.concurrent.duration.TimeUnit
import scala.util.Try

class S3RepositoryConfig(config: Config) {
  val threads: Int = config.getInt("threads")
}

class S3Repository(config: S3RepositoryConfig) extends Repository {
  private val client = AmazonS3ClientBuilder.defaultClient()
  private val threadPool = Executors.newFixedThreadPool(config.threads)
  implicit val diskEc: ExecutionContext = ExecutionContext.fromExecutor(threadPool)

  override def downloadBundle(uri: URI): Future[Path] = Future {
    val s3Uri = new AmazonS3URI(uri)
    val bucket = s3Uri.getBucket
    val key = s3Uri.getKey

    val tmpFile = Files.createTempFile("mleap", ".bundle.zip")
    Files.copy(client.getObject(bucket, key).getObjectContent, tmpFile)
    tmpFile
  }

  override def canHandle(uri: URI): Boolean = Try(new AmazonS3URI(uri)).isSuccess

  override def shutdown(): Unit = threadPool.shutdown()
  override def awaitTermination(timeout: Long, unit: TimeUnit): Unit = threadPool.awaitTermination(timeout, unit)
}

class S3RepositoryProvider extends RepositoryProvider {
  override def create(config: Config)
                     (implicit system: ActorSystem): S3Repository = {
    new S3Repository(new S3RepositoryConfig(config))
  }
} 
Example 47
Source File: CancelableFutureSpecJVM.scala    From zio   with Apache License 2.0 5 votes vote down vote up
package zio

import java.util.concurrent.Executors

import scala.concurrent.ExecutionContext

import zio.duration._
import zio.internal.Executor
import zio.test.Assertion._
import zio.test.TestAspect._
import zio.test._

object CancelableFutureSpecJVM extends ZIOBaseSpec {

  import ZIOTag._

  def spec =
    suite("CancelableFutureSpecJVM")(
      testM("fromFuture/unsafeRunToFuture doesn't deadlock") {

        val tst =
          for {
            runtime <- ZIO.runtime[Any]
            r       <- ZIO.fromFuture(_ => runtime.unsafeRunToFuture(UIO.succeedNow(0)))
          } yield assert(r)(equalTo(0))
        ZIO
          .runtime[Any]
          .map(
            _.mapPlatform(
              _.withExecutor(
                Executor.fromExecutionContext(1)(
                  ExecutionContext.fromExecutor(Executors.newSingleThreadScheduledExecutor())
                )
              )
            ).unsafeRun(tst)
          )
      } @@ timeout(1.second)
    ) @@ zioTag(future)
} 
Example 48
package com.tomekl007.chapter_2

import java.util.concurrent.{CountDownLatch, Executors}

import org.scalatest.FunSuite

import scala.collection.mutable.ListBuffer

class MultithreadedImmutabilityTest extends FunSuite {

  test("warning: race condition with mutability") {
    //given
    var listMutable = new ListBuffer[String]()
    val executors = Executors.newFixedThreadPool(2)
    val latch = new CountDownLatch(2)

    //when
    executors.submit(new Runnable {
      override def run(): Unit = {
        latch.countDown()
        listMutable += "A"
      }
    })

    executors.submit(new Runnable {
      override def run(): Unit = {
        latch.countDown()
        if(!listMutable.contains("A")) {
          listMutable += "A"
        }
      }
    })

    latch.await()

    //then
    //listMutable can have ("A") or ("A","A")

  }

} 
Example 49
Source File: DI_06_ProviderBinding.scala    From airframe   with Apache License 2.0 5 votes vote down vote up
package wvlet.airframe.examples.di

import java.util.concurrent.Executors

import wvlet.log.LogSupport


object DI_06_ProviderBinding extends App {
  import wvlet.airframe._

  case class MyAppConfig(numThreads: Int = 5)

  trait MyApp extends LogSupport {
    // MyAppConfig will be injected from the session
    private val threadManager = bind { config: MyAppConfig =>
      info(s"config: numThreads = ${config.numThreads}")
      // Create a thread manager using the given config
      Executors.newFixedThreadPool(config.numThreads)
    }.onShutdown(_.shutdown()) // Add a clean-up step

    def run: Unit = {
      threadManager.submit(new Runnable {
        override def run(): Unit = {
          logger.info("Hello Provider!")
        }
      })
    }
  }

  val d = newSilentDesign
    .bind[MyAppConfig].toInstance(MyAppConfig(numThreads = 2))

  d.build[MyApp] { app => app.run }
} 
Example 50
Source File: DI_05_LifecycleHooks.scala    From airframe   with Apache License 2.0 5 votes vote down vote up
package wvlet.airframe.examples.di

import java.util.concurrent.{Executor, ExecutorService, Executors}

import wvlet.log.LogSupport


object DI_05_LifecycleHooks extends App {
  import wvlet.airframe._

  trait MyApp extends LogSupport {
    private val threadManager = bind[ExecutorService] { Executors.newCachedThreadPool() }
      .onStart { x => info(f"Started a thread manager: ${x.hashCode()}%x") }
      .onShutdown { x =>
        info(f"Shutting down the thread manager: ${x.hashCode()}%x")
        x.shutdown()
      }
  }

  val d = newDesign

  d.build[MyApp] { app =>
    // Thread manager will start here
  }
  // Thread manager will be shutdown here.
} 
Example 51
Source File: BuildInFutureTest.scala    From airframe   with Apache License 2.0 5 votes vote down vote up
package wvlet.airframe

import java.util.concurrent.Executors

import wvlet.airspec.AirSpec

import scala.concurrent.duration.Duration
import scala.concurrent.{Await, ExecutionContext, Future}

case class Config1(port: Int = 8080)
case class Config2()

class BuildInFutureTest extends AirSpec {

  // We need to use an executor which can load applicttion classes #918.
  //
  // https://github.com/sbt/sbt/issues/5410
  private val threadPool              = Executors.newCachedThreadPool()
  private implicit val futureExecutor = ExecutionContext.fromExecutor(threadPool)

  override protected def afterAll: Unit = {
    threadPool.shutdownNow()
  }

  def `Building in Future causes MISSING_DEPENDENCY` = {
    val f = Future {
      newSilentDesign.build[Config1] { config => debug(config) }
    }
    Await.result(f, Duration.Inf)
  }

  def `Building in Future causes java.lang.ClassCastException` = {
    val f = Future {
      newSilentDesign
        .bind[Config2].toInstance(Config2())
        .build[Config1] { config => debug(config) }
    }
    Await.result(f, Duration.Inf)
  }
} 
Example 52
Source File: AsyncHandler.scala    From airframe   with Apache License 2.0 5 votes vote down vote up
package wvlet.log

import java.io.Flushable
import java.util
import java.util.concurrent.atomic.AtomicBoolean
import java.util.concurrent.{Executors, ThreadFactory}
import java.util.{logging => jl}


class AsyncHandler(parent: jl.Handler) extends jl.Handler with Guard with AutoCloseable with Flushable {
  private val executor = {
    Executors.newCachedThreadPool(
      new ThreadFactory {
        override def newThread(r: Runnable): Thread = {
          val t = new Thread(r, "WvletLogAsyncHandler")
          t.setDaemon(true)
          t
        }
      }
    )
  }

  private val queue      = new util.ArrayDeque[jl.LogRecord]
  private val isNotEmpty = newCondition
  private val closed     = new AtomicBoolean(false)

  // Start a poller thread
  executor.submit(new Runnable {
    override def run(): Unit = {
      while (!closed.get()) {
        val record: jl.LogRecord = guard {
          if (queue.isEmpty) {
            isNotEmpty.await()
          }
          queue.pollFirst()
        }
        if (record != null) {
          parent.publish(record)
        }
      }
    }
  })

  override def flush(): Unit = {
    val records = Seq.newBuilder[jl.LogRecord]
    guard {
      while (!queue.isEmpty) {
        records += queue.pollFirst()
      }
    }

    records.result.map(parent.publish _)
    parent.flush()
  }

  override def publish(record: jl.LogRecord): Unit = {
    guard {
      queue.addLast(record)
      isNotEmpty.signal()
    }
  }

  override def close(): Unit = {
    if (closed.compareAndSet(false, true)) {
      flush()
      // Wake up the poller thread
      guard {
        isNotEmpty.signalAll()
      }
      executor.shutdown()
    }
  }
} 
Example 53
Source File: RerunnableBenchmark.scala    From catbird   with Apache License 2.0 5 votes vote down vote up
package io.catbird.benchmark

import com.twitter.util.{ Await, Future, FuturePool }
import io.catbird.util.Rerunnable
import java.util.concurrent.{ ExecutorService, Executors, TimeUnit }
import org.openjdk.jmh.annotations._


@State(Scope.Thread)
@BenchmarkMode(Array(Mode.Throughput))
@OutputTimeUnit(TimeUnit.SECONDS)
class RerunnableBenchmark {
  val count: Int = 100000
  val numbers: IndexedSeq[Int] = 0 to count
  var es: ExecutorService = _
  var pool: FuturePool = _

  @Setup
  def initPool(): Unit = {
    es = Executors.newFixedThreadPool(4)
    pool = FuturePool(es)
  }

  @TearDown
  def shutdownPool(): Unit = es.shutdown()

  @Benchmark
  def sumIntsF: Int = Await.result(
    numbers.foldLeft(Future(0)) {
      case (acc, i) => acc.flatMap(prev => Future(prev + i))
    }
  )

  @Benchmark
  def sumIntsR: Int = Await.result(
    numbers
      .foldLeft(Rerunnable(0)) {
        case (acc, i) => acc.flatMap(prev => Rerunnable(prev + i))
      }
      .run
  )

  @Benchmark
  def sumIntsPF: Int = Await.result(
    numbers.foldLeft(pool(0)) {
      case (acc, i) => acc.flatMap(prev => pool(prev + i))
    }
  )

  @Benchmark
  def sumIntsPR: Int = Await.result(
    numbers
      .foldLeft(Rerunnable.withFuturePool(pool)(0)) {
        case (acc, i) => acc.flatMap(prev => Rerunnable.withFuturePool(pool)(prev + i))
      }
      .run
  )
} 
Example 54
Source File: ThreadPoolNamingSupport.scala    From catbird   with Apache License 2.0 5 votes vote down vote up
package io.catbird.util.effect

import java.lang.{ Runnable, Thread }
import java.util.concurrent.{ Executors, ThreadFactory }

import scala.concurrent.{ ExecutionContext, ExecutionContextExecutorService }

trait ThreadPoolNamingSupport {

  def newNamedThreadPool(name: String): ExecutionContextExecutorService =
    ExecutionContext.fromExecutorService(
      Executors.newSingleThreadExecutor(new ThreadFactory {
        override def newThread(r: Runnable): Thread = {
          val thread = Executors.defaultThreadFactory().newThread(r)
          thread.setName(name)
          thread.setDaemon(true) // Don't block shutdown of JVM
          thread
        }
      })
    )

  def currentThreadName(): String = Thread.currentThread().getName
} 
Example 55
Source File: ExecutorService.scala    From grafter   with MIT License 5 votes vote down vote up
package org.zalando.grafter.examples.simple

import java.util.concurrent.Executors

import cats.Eval
import org.zalando.grafter._
import org.zalando.grafter.macros.reader

@reader
case class ExecutorService(config: ThreadPoolConfig) extends Start with Stop {
  implicit lazy val executor = Executors.newFixedThreadPool(config.threadsNb)

  def start: Eval[StartResult] =
    StartResult.eval("executor") {
      executor
    }

  def stop: Eval[StopResult] =
    StopResult.eval("executor") {
      executor.shutdown
    }
}

case class ThreadPoolConfig(threadsNb: Int) 
Example 56
Source File: WixHttpTestkitResources.scala    From wix-http-testkit   with MIT License 5 votes vote down vote up
package com.wix.e2e.http

import java.util.concurrent.Executors

import akka.actor.ActorSystem
import akka.stream.SystemMaterializer
import com.wix.e2e.http.utils._

import scala.concurrent.ExecutionContext
import scala.xml.PrettyPrinter

object WixHttpTestkitResources {
  implicit val system = ActorSystem("wix-http-testkit")
  implicit val materializer = SystemMaterializer.get(system).materializer
  private val threadPool = Executors.newCachedThreadPool
  implicit val executionContext = ExecutionContext.fromExecutor(threadPool)

  system.registerOnTermination {
    threadPool.shutdownNow()
  }

  def xmlPrinter = new PrettyPrinter(80, 2)

  sys.addShutdownHook {
    system.terminate()
    waitFor(system.whenTerminated)
  }
} 
Example 57
Source File: KeyVaultADALAuthenticator.scala    From azure-kusto-spark   with Apache License 2.0 5 votes vote down vote up
package com.microsoft.kusto.spark.utils

import java.net.MalformedURLException
import java.util.concurrent.{ExecutionException, ExecutorService, Executors, Future}

import com.microsoft.aad.adal4j.{AuthenticationContext, AuthenticationResult, ClientCredential}
import com.microsoft.azure.keyvault.KeyVaultClient
import com.microsoft.azure.keyvault.authentication.KeyVaultCredentials


class KeyVaultADALAuthenticator(clientId: String, clientKey: String) {

  def getAuthenticatedClient: KeyVaultClient = {
    // Creates the KeyVaultClient using the created credentials.
    new KeyVaultClient(createCredentials)
  }

  private def createCredentials: KeyVaultCredentials = {
    new KeyVaultCredentials() { //Callback that supplies the token type and access token on request.
      override def doAuthenticate(authorization: String, resource: String, scope: String): String = {
        try {
          val authResult = getAccessToken(authorization, resource)
          authResult.getAccessToken
        } catch {
          case e: Exception =>
            KustoDataSourceUtils.logError("KeyVaultADALAuthenticator", "Exception trying to access Key Vault:" + e.getMessage)
            ""
        }
      }
    }
  }

  @throws[InterruptedException]
  @throws[ExecutionException]
  @throws[MalformedURLException]
  private def getAccessToken(authorization: String, resource: String): AuthenticationResult  = {
    var result: AuthenticationResult = null
    var service: ExecutorService = null

    //Starts a service to fetch access token.
    try {
      service = Executors.newFixedThreadPool(1)
      val context = new AuthenticationContext(authorization, false, service)

      //Acquires token based on client ID and client secret.
      var future: Future[AuthenticationResult] = null
      if (clientId != null && clientKey != null) {
        val credentials = new ClientCredential(clientId, clientKey)
        future = context.acquireToken(resource, credentials, null)
      }

      result = future.get
    } finally service.shutdown()
    if (result == null) throw new RuntimeException("Authentication results were null.")
    result
  }
} 
Example 58
Source File: StoreOpsTest.scala    From fs2-blobstore   with Apache License 2.0 5 votes vote down vote up
package blobstore

import java.nio.charset.Charset
import java.nio.file.Files
import java.util.concurrent.Executors

import cats.effect.{Blocker, IO}
import cats.effect.laws.util.TestInstances
import fs2.{Pipe, Stream}
import org.scalatest.Assertion
import org.scalatest.flatspec.AnyFlatSpec
import implicits._
import org.scalatest.matchers.must.Matchers

import scala.collection.mutable.ArrayBuffer
import scala.concurrent.ExecutionContext

class StoreOpsTest extends AnyFlatSpec with Matchers with TestInstances {

  implicit val cs = IO.contextShift(ExecutionContext.global)
  val blocker     = Blocker.liftExecutionContext(ExecutionContext.fromExecutor(Executors.newCachedThreadPool))

  behavior of "PutOps"
  it should "buffer contents and compute size before calling Store.put" in {
    val bytes: Array[Byte] = "AAAAAAAAAA".getBytes(Charset.forName("utf-8"))
    val store              = DummyStore(_.size must be(Some(bytes.length)))

    Stream
      .emits(bytes)
      .covary[IO]
      .through(store.bufferedPut(Path("path/to/file.txt"), blocker))
      .compile
      .drain
      .unsafeRunSync()
    store.buf.toArray must be(bytes)

  }

  it should "upload a file from a nio Path" in {
    val bytes = "hello".getBytes(Charset.forName("utf-8"))
    val store = DummyStore(_.size must be(Some(bytes.length)))

    Stream
      .bracket(IO(Files.createTempFile("test-file", ".bin"))) { p => IO(p.toFile.delete).void }
      .flatMap { p =>
        Stream.emits(bytes).covary[IO].through(fs2.io.file.writeAll(p, blocker)).drain ++
          Stream.eval(store.put(p, Path("path/to/file.txt"), blocker))
      }
      .compile
      .drain
      .unsafeRunSync()
    store.buf.toArray must be(bytes)
  }

  it should "download a file to a nio path" in {
    val bytes = "hello".getBytes(Charset.forName("utf-8"))
    val store = DummyStore(_ => succeed)
    val path  = Path("path/to/file.txt")
    Stream.emits(bytes).through(store.put(path)).compile.drain.unsafeRunSync()

    Stream
      .bracket(IO(Files.createTempFile("test-file", ".bin")))(p => IO(p.toFile.delete).void)
      .flatMap { nioPath =>
        Stream.eval(store.get(path, nioPath, blocker)) >> Stream.eval {
          IO {
            Files.readAllBytes(nioPath) mustBe bytes
          }
        }
      }
      .compile
      .drain
      .unsafeRunSync()
  }
}

final case class DummyStore(check: Path => Assertion) extends Store[IO] {
  val buf = new ArrayBuffer[Byte]()
  override def put(path: Path, overwrite: Boolean): Pipe[IO, Byte, Unit] = {
    check(path)
    in => {
      buf.appendAll(in.compile.toVector.unsafeRunSync())
      Stream.emit(())
    }
  }
  override def get(path: Path, chunkSize: Int): Stream[IO, Byte]                   = Stream.emits(buf)
  override def list(path: Path, recursive: Boolean = false): Stream[IO, Path]      = ???
  override def move(src: Path, dst: Path): IO[Unit]                                = ???
  override def copy(src: Path, dst: Path): IO[Unit]                                = ???
  override def remove(path: Path): IO[Unit]                                        = ???
  override def putRotate(computePath: IO[Path], limit: Long): Pipe[IO, Byte, Unit] = ???
} 
Example 59
Source File: BosonInjectorValue.scala    From boson   with Apache License 2.0 5 votes vote down vote up
package io.zink.boson.impl

import io.zink.boson.Boson
import io.zink.boson.bson.bsonPath.Interpreter
import io.zink.boson.bson.value.Value
import shapeless.TypeCase

import scala.concurrent.{ExecutionContext, Future}

class BosonInjectorValue[T](expression: String, injectValue: Value)(implicit tp: Option[TypeCase[T]]) extends Boson {

  private val interpreter: Interpreter[T] = new Interpreter[T](expression, vInj = Some(Left(injectValue)))(tp, None)

  
  override def go(bsonByteEncoding: String): Future[String] = {
    import java.util.concurrent.Executors
    import scala.concurrent.JavaConversions.asExecutionContext
    implicit val context = asExecutionContext(Executors.newSingleThreadExecutor())
    Future {
      runInterpreter(Right(bsonByteEncoding)) match {
        case Right(jsonString) => jsonString
      }
    }
  }
} 
Example 60
Source File: OapRuntimeSuite.scala    From OAP   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.oap

import java.util.concurrent.{Executors, ExecutorService, TimeUnit}

import org.apache.spark.sql.QueryTest
import org.apache.spark.sql.test.oap.SharedOapLocalClusterContext

class OapRuntimeSuite extends QueryTest with SharedOapLocalClusterContext {

  test("OapRuntime is created once") {
    val oapruntime = new Array[OapRuntime](2)
    val threadPool: ExecutorService = Executors.newFixedThreadPool(2)
    try {
      for (i <- 0 to 1) {
        threadPool.execute(new Runnable {
          override def run(): Unit = {
            oapruntime(i) = OapRuntime.getOrCreate
          }
        })
      }
      threadPool.awaitTermination(1000, TimeUnit.MILLISECONDS)
    } finally {
      threadPool.shutdown()
    }
    assert(oapruntime(0) == oapruntime(1))
  }

  test("get sparkSession from OapRuntime") {
    assert(OapRuntime.getOrCreate.sparkSession == spark)
  }
} 
Example 61
Source File: AsyncGuavaTests.scala    From freestyle   with Apache License 2.0 5 votes vote down vote up
package freestyle.async
package guava

import java.util.concurrent.{Callable, Executors}

import com.google.common.util.concurrent.{ListenableFuture, ListeningExecutorService, MoreExecutors}
import org.scalatest._

import scala.concurrent.duration.Duration
import scala.concurrent.{Await, ExecutionContext}

class AsyncGuavaTests extends WordSpec with Matchers with Implicits {

  import ExecutionContext.Implicits.global
  import implicits._

  val exception: Throwable = new RuntimeException("Test exception")

  val service: ListeningExecutorService =
    MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(10))

  def failedFuture[T]: ListenableFuture[T] =
    service.submit(new Callable[T] {
      override def call(): T = throw exception
    })

  def successfulFuture[T](value: T): ListenableFuture[T] =
    service.submit(new Callable[T] {
      override def call(): T = value
    })

  val foo = "Bar"

  "Guava ListenableFuture Freestyle integration" should {

    "transform guava ListenableFutures into scala.concurrent.Future successfully" in {
      Await.result(listenableFuture2Async(successfulFuture(foo)), Duration.Inf) shouldBe foo
    }

    "recover from failed guava ListenableFutures wrapping them into scala.concurrent.Future" in {
      Await.result(listenableFuture2Async(failedFuture[String]).failed, Duration.Inf) shouldBe exception
    }

    "transform guava ListenableFuture[Void] into scala.concurrent.Future successfully through an implicit conversion" in {
      Await.result(
        listenableFuture2Async(listenableVoidToListenableUnit(successfulFuture[Void](None.orNull))),
        Duration.Inf) shouldBe ((): Unit)
    }

    "recover from failed guava ListenableFuture[Void] wrapping them into scala.concurrent.Future through an implicit conversion" in {
      Await.result(
        listenableFuture2Async(listenableVoidToListenableUnit(failedFuture[Void])).failed,
        Duration.Inf) shouldBe exception
    }

  }

} 
Example 62
Source File: RpcServerImpl.scala    From finagle-protobuf   with Apache License 2.0 5 votes vote down vote up
package com.twitter.finagle.protobuf.rpc.impl

import com.twitter.finagle.protobuf.rpc.channel.ProtoBufCodec
import com.twitter.finagle.protobuf.rpc.{RpcServer, Util}
import com.twitter.util._
import com.twitter.util.Duration
import com.twitter.util.FuturePool
import com.twitter.finagle.builder.{Server, ServerBuilder, ServerConfig}
import java.net.InetSocketAddress
import java.util.logging.Logger
import scala.None
import java.util.concurrent.Executors
import java.util.concurrent.ExecutorService
import com.google.common.base.Preconditions
import com.twitter.finagle.protobuf.rpc.ServiceExceptionHandler
import com.google.protobuf.DynamicMessage
import com.google.protobuf.DynamicMessage.Builder
import com.google.protobuf._
import com.google.protobuf.Descriptors._
import com.twitter.util.Promise

class RpcServerImpl(sb: ServerBuilder[(String, Message), (String, Message), Any, Any, Any], port: Int, service: Service, handler: ServiceExceptionHandler[Message], executorService: ExecutorService) extends RpcServer {

  private val log = Logger.getLogger(getClass.toString)

  Preconditions.checkNotNull(executorService)
  Preconditions.checkNotNull(handler)

  private val execFuturePool = new ExecutorServiceFuturePool(executorService)

  private val server: Server = ServerBuilder.safeBuild(ServiceDispatcher(service, handler, execFuturePool),
    sb
      .codec(new ProtoBufCodec(service))
      .name(getClass().getName())
      .bindTo(new InetSocketAddress(port)))

  def close(d: Duration) = {
    server.close(d)
  }
}

class ServiceDispatcher(service: com.google.protobuf.Service, handler: ServiceExceptionHandler[Message], futurePool: FuturePool) extends com.twitter.finagle.Service[(String, Message), (String, Message)] {

  private val log = Logger.getLogger(getClass.toString)

  def apply(request: (String, Message)) = {

    val methodName = request._1
    val reqMessage = request._2

    Util.log("Request", methodName, reqMessage)
    val m = service.getDescriptorForType().findMethodByName(methodName);
    if (m == null) {
      throw new java.lang.AssertionError("Should never happen, we already decoded " + methodName)
    }

    val promise = new Promise[(String, Message)]()

    // dispatch to the service method
    val task = () => {
      try {
        service.callMethod(m, null, reqMessage, new RpcCallback[Message]() {

          def run(msg: Message) = {
            Util.log("Response", methodName, msg)
            promise.setValue((methodName, msg))
          }

        })
      } catch {
        case e: RuntimeException => {
          log.warning("#apply# Exception: "+e.getMessage)
          if (handler.canHandle(e)) {
            promise.setValue((methodName, handler.handle(e, constructEmptyResponseMessage(m))))
          }
        }
      }
    }
    futurePool(task())
    promise
  }

  def constructEmptyResponseMessage(m: MethodDescriptor): Message = {
    val outputType = m.getOutputType();
    DynamicMessage.newBuilder(outputType).build()
  }
}

object ServiceDispatcher {
  def apply(service: com.google.protobuf.Service, handler: ServiceExceptionHandler[Message], futurePool: FuturePool): ServiceDispatcher = {
    new ServiceDispatcher(service, handler, futurePool)
  }
} 
Example 63
Source File: DirectDataMultiThreadedInjector.scala    From SparkOnKudu   with Apache License 2.0 5 votes vote down vote up
package org.kududb.spark.demo.gamer.cdc

import java.text.SimpleDateFormat
import java.util.Random
import java.util.concurrent.atomic.AtomicInteger
import java.util.concurrent.{TimeUnit, Executors}

import org.kududb.client.{Operation, PartialRow, KuduClient}
import org.kududb.spark.demo.gamer.aggregates.GamerDataGenerator

object DirectDataMultiThreadedInjector {
  val simpleDateFormat = new SimpleDateFormat("MM,dd,yyyy")
  val random = new Random
  def main(args:Array[String]): Unit = {

    if (args.length == 0) {
      println("<kuduMaster> <tableName> <numberOfRecords> <numberOfThreads>")
      return
    }

    val kuduMaster = args(0)
    val tableName = args(1)
    val numberOfRecords = args(2).toInt
    val executor = Executors.newFixedThreadPool(args(3).toInt)
    val numberOfGamers = args(4).toInt
    val sleepTime = args(5).toInt

    val kuduClient = new KuduClient.KuduClientBuilder(kuduMaster).build()
    val leftToRun = new AtomicInteger()

    for (i <- 0 to numberOfRecords) {
      leftToRun.incrementAndGet()
      executor.execute(new ApplyNewRecordRunnable(GamerDataGenerator.makeNewGamerRecord(numberOfGamers),
      kuduClient, tableName, leftToRun))
      println("Summited:" + i)

      Thread.sleep(sleepTime)
    }


    val startTime = System.currentTimeMillis()
    while (!executor.awaitTermination(10000, TimeUnit.SECONDS)) {
      val newTime = System.currentTimeMillis()
      println("> Still Waiting: {Time:" + (newTime - startTime) + ", LeftToRun:" + leftToRun + "}" )
    }


    kuduClient.close()


  }
} 
Example 64
Source File: NettyUtil.scala    From incubator-retired-gearpump   with Apache License 2.0 5 votes vote down vote up
package org.apache.gearpump.transport.netty

import java.net.InetSocketAddress
import java.util.concurrent.{Executors, ThreadFactory}

import org.jboss.netty.bootstrap.{ClientBootstrap, ServerBootstrap}
import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory
import org.jboss.netty.channel.{Channel, ChannelFactory, ChannelPipelineFactory}

object NettyUtil {

  def newNettyServer(
      name: String,
      pipelineFactory: ChannelPipelineFactory,
      buffer_size: Int,
      inputPort: Int = 0): (Int, Channel) = {
    val bossFactory: ThreadFactory = new NettyRenameThreadFactory(name + "-boss")
    val workerFactory: ThreadFactory = new NettyRenameThreadFactory(name + "-worker")
    val factory = new NioServerSocketChannelFactory(Executors.newCachedThreadPool(bossFactory),
      Executors.newCachedThreadPool(workerFactory), 1)

    val bootstrap = createServerBootStrap(factory, pipelineFactory, buffer_size)
    val channel: Channel = bootstrap.bind(new InetSocketAddress(inputPort))
    val port = channel.getLocalAddress().asInstanceOf[InetSocketAddress].getPort()
    (port, channel)
  }

  def createServerBootStrap(
      factory: ChannelFactory, pipelineFactory: ChannelPipelineFactory, buffer_size: Int)
    : ServerBootstrap = {
    val bootstrap = new ServerBootstrap(factory)
    bootstrap.setOption("child.tcpNoDelay", true)
    bootstrap.setOption("child.receiveBufferSize", buffer_size)
    bootstrap.setOption("child.keepAlive", true)
    bootstrap.setPipelineFactory(pipelineFactory)
    bootstrap
  }

  def createClientBootStrap(
      factory: ChannelFactory, pipelineFactory: ChannelPipelineFactory, buffer_size: Int)
    : ClientBootstrap = {
    val bootstrap = new ClientBootstrap(factory)
    bootstrap.setOption("tcpNoDelay", true)
    bootstrap.setOption("sendBufferSize", buffer_size)
    bootstrap.setOption("keepAlive", true)
    bootstrap.setPipelineFactory(pipelineFactory)
    bootstrap
  }
} 
Example 65
Source File: ProcessAlgTest.scala    From scala-steward   with Apache License 2.0 5 votes vote down vote up
package org.scalasteward.core.io

import better.files.File
import cats.effect.{Blocker, IO}
import java.util.concurrent.Executors
import org.scalasteward.core.TestInstances._
import org.scalasteward.core.io.ProcessAlgTest.ioProcessAlg
import org.scalasteward.core.mock.MockContext._
import org.scalasteward.core.mock.MockState
import org.scalasteward.core.util.Nel
import org.scalatest.funsuite.AnyFunSuite
import org.scalatest.matchers.should.Matchers

class ProcessAlgTest extends AnyFunSuite with Matchers {
  test("exec echo") {
    ioProcessAlg
      .exec(Nel.of("echo", "-n", "hello"), File.currentWorkingDirectory)
      .unsafeRunSync() shouldBe List("hello")
  }

  test("exec false") {
    ioProcessAlg
      .exec(Nel.of("ls", "--foo"), File.currentWorkingDirectory)
      .attempt
      .map(_.isLeft)
      .unsafeRunSync()
  }

  test("respect the disableSandbox setting") {
    val cfg = config.copy(disableSandbox = true)
    val processAlg = new MockProcessAlg()(cfg)

    val state = processAlg
      .execSandboxed(Nel.of("echo", "hello"), File.temp)
      .runS(MockState.empty)
      .unsafeRunSync()

    state shouldBe MockState.empty.copy(
      commands = Vector(
        List("TEST_VAR=GREAT", "ANOTHER_TEST_VAR=ALSO_GREAT", File.temp.toString, "echo", "hello")
      )
    )
  }

  test("execSandboxed echo") {
    val state = processAlg
      .execSandboxed(Nel.of("echo", "hello"), File.temp)
      .runS(MockState.empty)
      .unsafeRunSync()

    state shouldBe MockState.empty.copy(
      commands = Vector(
        List(
          "TEST_VAR=GREAT",
          "ANOTHER_TEST_VAR=ALSO_GREAT",
          File.temp.toString,
          "firejail",
          s"--whitelist=${File.temp}",
          "echo",
          "hello"
        )
      )
    )
  }
}

object ProcessAlgTest {
  val blocker: Blocker = Blocker.liftExecutorService(Executors.newCachedThreadPool())
  implicit val ioProcessAlg: ProcessAlg[IO] = ProcessAlg.create[IO](blocker)
} 
Example 66
Source File: LagomDevModeConsoleHelper.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.devmode

import java.io.Closeable
import java.util.concurrent.Executors
import java.util.concurrent.TimeUnit

import com.lightbend.lagom.devmode.Reloader.DevServerBinding
import play.dev.filewatch.LoggerProxy

import scala.concurrent.Await
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import scala.concurrent.duration._


class ConsoleHelper(colors: Colors) {
  def printStartScreen(log: LoggerProxy, services: Seq[ServiceBindingInfo]): Unit = {
    services.foreach {
      case ServiceBindingInfo(name, bindings) =>
        bindings.foreach(b => log.info(s"Service $name listening for ${b.protocol} on ${b.address}:${b.port}"))
    }
    log.info(
      colors.green(
        s"(Service${if (services.size > 1) "s" else ""} started, press enter to stop and go back to the console...)"
      )
    )
  }

  def blockUntilExit() = {
    // blocks until user presses enter
    System.in.read()
  }

  def shutdownAsynchronously(log: LoggerProxy, services: Seq[Closeable], infrastructureServices: Seq[Closeable]) = {
    // shut down all running services
    log.info("Stopping services")

    val n = java.lang.Runtime.getRuntime.availableProcessors
    log.debug("nb proc : " + n)
    //creating a dedicated execution context
    // with a fixed number of thread (indexed on number of cpu)
    implicit val ecn = ExecutionContext.fromExecutorService(
      Executors.newFixedThreadPool(n)
    )

    try {
      //Stop services in asynchronous manner
      val closing = Future.traverse(services)(serv =>
        Future {
          serv.close()
        }
      )
      closing.onComplete(_ => log.info("All services are stopped"))
      Await.result(closing, 60.seconds)

      println()
      // and finally shut down any other possibly running embedded server
      Await.result(
        Future.traverse(infrastructureServices)(serv =>
          Future {
            serv.close()
          }
        ),
        60.seconds
      )
    } finally {
      // and the last part concern the closing of execution context that has been created above
      ecn.shutdown()
      ecn.awaitTermination(60, TimeUnit.SECONDS)
    }
  }
}

class Colors(logNoFormat: String) {
  import scala.Console._

  val isANSISupported: Boolean = {
    Option(System.getProperty(logNoFormat))
      .map(_ != "true")
      .orElse {
        Option(System.getProperty("os.name"))
          .map(_.toLowerCase(java.util.Locale.ENGLISH))
          .filter(_.contains("windows"))
          .map(_ => false)
      }
      .getOrElse(true)
  }

  private def color(code: String, str: String) = if (isANSISupported) code + str + RESET else str

  def red(str: String): String     = color(RED, str)
  def blue(str: String): String    = color(BLUE, str)
  def cyan(str: String): String    = color(CYAN, str)
  def green(str: String): String   = color(GREEN, str)
  def magenta(str: String): String = color(MAGENTA, str)
  def white(str: String): String   = color(WHITE, str)
  def black(str: String): String   = color(BLACK, str)
  def yellow(str: String): String  = color(YELLOW, str)
} 
Example 67
Source File: SparkSQLSessionManager.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.hive.thriftserver

import java.util.concurrent.Executors

import org.apache.commons.logging.Log
import org.apache.hadoop.hive.conf.HiveConf
import org.apache.hadoop.hive.conf.HiveConf.ConfVars
import org.apache.hive.service.cli.SessionHandle
import org.apache.hive.service.cli.session.SessionManager
import org.apache.hive.service.cli.thrift.TProtocolVersion
import org.apache.hive.service.server.HiveServer2

import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.hive.{HiveSessionState, HiveUtils}
import org.apache.spark.sql.hive.thriftserver.ReflectionUtils._
import org.apache.spark.sql.hive.thriftserver.server.SparkSQLOperationManager


private[hive] class SparkSQLSessionManager(hiveServer: HiveServer2, sqlContext: SQLContext)
  extends SessionManager(hiveServer)
  with ReflectedCompositeService {

  private lazy val sparkSqlOperationManager = new SparkSQLOperationManager()

  override def init(hiveConf: HiveConf) {
    setSuperField(this, "hiveConf", hiveConf)

    // Create operation log root directory, if operation logging is enabled
    if (hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_LOGGING_OPERATION_ENABLED)) {
      invoke(classOf[SessionManager], this, "initOperationLogRootDir")
    }

    val backgroundPoolSize = hiveConf.getIntVar(ConfVars.HIVE_SERVER2_ASYNC_EXEC_THREADS)
    setSuperField(this, "backgroundOperationPool", Executors.newFixedThreadPool(backgroundPoolSize))
    getAncestorField[Log](this, 3, "LOG").info(
      s"HiveServer2: Async execution pool size $backgroundPoolSize")

    setSuperField(this, "operationManager", sparkSqlOperationManager)
    addService(sparkSqlOperationManager)

    initCompositeService(hiveConf)
  }

  override def openSession(
      protocol: TProtocolVersion,
      username: String,
      passwd: String,
      ipAddress: String,
      sessionConf: java.util.Map[String, String],
      withImpersonation: Boolean,
      delegationToken: String): SessionHandle = {
    val sessionHandle =
      super.openSession(protocol, username, passwd, ipAddress, sessionConf, withImpersonation,
          delegationToken)
    val session = super.getSession(sessionHandle)
    HiveThriftServer2.listener.onSessionCreated(
      session.getIpAddress, sessionHandle.getSessionId.toString, session.getUsername)
    val sessionState = sqlContext.sessionState.asInstanceOf[HiveSessionState]
    val ctx = if (sessionState.hiveThriftServerSingleSession) {
      sqlContext
    } else {
      sqlContext.newSession()
    }
    ctx.setConf("spark.sql.hive.version", HiveUtils.hiveExecutionVersion)
    if (sessionConf != null && sessionConf.containsKey("use:database")) {
      ctx.sql(s"use ${sessionConf.get("use:database")}")
    }
    sparkSqlOperationManager.sessionToContexts.put(sessionHandle, ctx)
    sessionHandle
  }

  override def closeSession(sessionHandle: SessionHandle) {
    HiveThriftServer2.listener.onSessionClosed(sessionHandle.getSessionId.toString)
    super.closeSession(sessionHandle)
    sparkSqlOperationManager.sessionToActivePool.remove(sessionHandle)
    sparkSqlOperationManager.sessionToContexts.remove(sessionHandle)
  }
} 
Example 68
package commons_test.test_helpers

import java.util.concurrent.Executors

import commons_test.test_helpers.RealWorldWithServerAndTestConfigBaseTest.RealWorldWithTestConfig
import commons_test.test_helpers.WsScalaTestClientWithHost.TestWsClient
import config.RealWorldComponents
import org.scalatest._
import play.api.ApplicationLoader.Context
import play.api.Configuration
import play.api.http.Status
import play.api.test.DefaultAwaitTimeout

import scala.concurrent.ExecutionContext

trait RealWorldWithServerBaseTest extends FlatSpec
  with MustMatchers
  with OptionValues
  with WsScalaTestClientWithHost
  with OneServerPerTestWithComponents_FixedForCompileTimeTestSetUp
  with Status
  with DefaultAwaitTimeout
  with WithAwaitUtilities
  with WithTestExecutionContext {

  override implicit val executionContext: ExecutionContext = ExecutionContext.fromExecutorService(Executors.newFixedThreadPool(1))

  implicit val host: Host = Host("http://localhost:")

  override type TestComponents <: RealWorldWithTestConfig

  implicit def wsClientWithConnectionData: TestWsClient = {
    TestWsClient(host, portNumber, components.wsClient)
  }

}

object RealWorldWithServerAndTestConfigBaseTest {

  class RealWorldWithTestConfig(context: Context) extends RealWorldComponents(context) {

    override def configuration: Configuration = {
      val testConfig = Configuration.from(TestUtils.config)
      val config = super.configuration
      testConfig.withFallback(config)
    }

  }

}

class RealWorldWithServerAndTestConfigBaseTest extends RealWorldWithServerBaseTest {
  override type TestComponents = RealWorldWithTestConfig

  override def createComponents: TestComponents = {
      new RealWorldWithTestConfig(context)
    }
} 
Example 69
Source File: WoWChat.scala    From wowchat   with GNU General Public License v3.0 5 votes vote down vote up
package wowchat

import java.util.concurrent.{Executors, TimeUnit}

import wowchat.common.{CommonConnectionCallback, Global, ReconnectDelay, WowChatConfig}
import wowchat.discord.Discord
import wowchat.game.GameConnector
import wowchat.realm.{RealmConnectionCallback, RealmConnector}
import com.typesafe.scalalogging.StrictLogging
import io.netty.channel.nio.NioEventLoopGroup

import scala.io.Source

object WoWChat extends StrictLogging {

  private val RELEASE = "v1.3.3"

  def main(args: Array[String]): Unit = {
    logger.info(s"Running WoWChat - $RELEASE")
    val confFile = if (args.nonEmpty) {
      args(0)
    } else {
      logger.info("No configuration file supplied. Trying with default wowchat.conf.")
      "wowchat.conf"
    }
    Global.config = WowChatConfig(confFile)

    checkForNewVersion

    val gameConnectionController: CommonConnectionCallback = new CommonConnectionCallback {

      private val reconnectExecutor = Executors.newSingleThreadScheduledExecutor
      private val reconnectDelay = new ReconnectDelay

      override def connect: Unit = {
        Global.group = new NioEventLoopGroup

        val realmConnector = new RealmConnector(new RealmConnectionCallback {
          override def success(host: String, port: Int, realmName: String, realmId: Int, sessionKey: Array[Byte]): Unit = {
            gameConnect(host, port, realmName, realmId, sessionKey)
          }

          override def disconnected: Unit = doReconnect

          override def error: Unit = sys.exit(1)
        })

        realmConnector.connect
      }

      private def gameConnect(host: String, port: Int, realmName: String, realmId: Int, sessionKey: Array[Byte]): Unit = {
        new GameConnector(host, port, realmName, realmId, sessionKey, this).connect
      }

      override def connected: Unit = reconnectDelay.reset

      override def disconnected: Unit = doReconnect

      def doReconnect: Unit = {
        Global.group.shutdownGracefully()
        Global.discord.changeRealmStatus("Connecting...")
        val delay = reconnectDelay.getNext
        logger.info(s"Disconnected from server! Reconnecting in $delay seconds...")

        reconnectExecutor.schedule(new Runnable {
          override def run(): Unit = connect
        }, delay, TimeUnit.SECONDS)
      }
    }

    logger.info("Connecting to Discord...")
    Global.discord = new Discord(new CommonConnectionCallback {
      override def connected: Unit = gameConnectionController.connect

      override def error: Unit = sys.exit(1)
    })
  }

  private def checkForNewVersion = {
    // This is JSON, but I really just didn't want to import a full blown JSON library for one string.
    val data = Source.fromURL("https://api.github.com/repos/fjaros/wowchat/releases/latest").mkString
    val regex = "\"tag_name\":\"(.+?)\",".r
    val repoTagName = regex
      .findFirstMatchIn(data)
      .map(_.group(1))
      .getOrElse("NOT FOUND")

    if (repoTagName != RELEASE) {
      logger.error( "~~~ !!!                YOUR WoWChat VERSION IS OUT OF DATE                !!! ~~~")
      logger.error(s"~~~ !!!                     Current Version:  $RELEASE                      !!! ~~~")
      logger.error(s"~~~ !!!                     Repo    Version:  $repoTagName                      !!! ~~~")
      logger.error( "~~~ !!! RUN git pull OR GO TO https://github.com/fjaros/wowchat TO UPDATE !!! ~~~")
      logger.error( "~~~ !!!                YOUR WoWChat VERSION IS OUT OF DATE                !!! ~~~")
    }
  }
} 
Example 70
Source File: SimpleConsumer.scala    From embedded-kafka   with Apache License 2.0 5 votes vote down vote up
package com.tuplejump.embedded.kafka

import java.util.Properties
import java.util.concurrent.{CountDownLatch, Executors}

import scala.util.Try
import kafka.serializer.StringDecoder
import kafka.consumer.{ Consumer, ConsumerConfig }


class SimpleConsumer(
    val latch: CountDownLatch,
    consumerConfig: Map[String, String],
    topic: String,
    groupId: String,
    partitions: Int,
    numThreads: Int) {

  val connector = Consumer.create(createConsumerConfig)

  val streams = connector
    .createMessageStreams(Map(topic -> partitions), new StringDecoder(), new StringDecoder())
    .get(topic)

  val executor = Executors.newFixedThreadPool(numThreads)

  for (stream <- streams) {
    executor.submit(new Runnable() {
      def run(): Unit = {
        for (s <- stream) {
          while (s.iterator.hasNext) {
            latch.countDown()
          }
        }
      }
    })
  }

  private def createConsumerConfig: ConsumerConfig = {
    import scala.collection.JavaConverters._
    val props = new Properties()
    props.putAll(consumerConfig.asJava)
    new ConsumerConfig(props)
  }

  def shutdown(): Unit = Try {
    connector.shutdown()
    executor.shutdown()
  }
} 
Example 71
Source File: LeaseContentionSpec.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package akka.coordination.lease.kubernetes

import java.util.concurrent.Executors

import akka.actor.ActorSystem
import akka.coordination.lease.TimeoutSettings
import akka.coordination.lease.kubernetes.internal.KubernetesApiImpl
import akka.coordination.lease.scaladsl.LeaseProvider
import akka.testkit.TestKit
import com.typesafe.config.ConfigFactory
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike}

import scala.collection.immutable
import scala.concurrent.{ExecutionContext, Future}


class LeaseContentionSpec extends TestKit(ActorSystem("LeaseContentionSpec", ConfigFactory.parseString(
  """
    akka.loglevel = INFO
    akka.coordination.lease.kubernetes {
      api-service-host = localhost
      api-service-port = 8080
      namespace = "lease"
      namespace-path = ""
      secure-api-server = false
    }

  """
))) with WordSpecLike with Matchers with ScalaFutures with BeforeAndAfterAll {

  implicit val patience: PatienceConfig = PatienceConfig(testKitSettings.DefaultTimeout.duration)

  // for cleanup
  val k8sApi = new KubernetesApiImpl(system, KubernetesSettings(system, TimeoutSettings(system.settings.config.getConfig("akka.coordination.lease.kubernetes"))))

  val lease1 = "contended-lease"
  val lease2 = "contended-lease-2"


  override protected def beforeAll(): Unit = {
    k8sApi.removeLease(lease1).futureValue
    k8sApi.removeLease(lease2).futureValue
  }

  override protected def afterAll(): Unit ={
    TestKit.shutdownActorSystem(system)
  }

  "A lease under contention" should {

    "only allow one client to get acquire lease" in {
      val underTest = LeaseProvider(system)
      val nrClients = 30
      implicit val ec = ExecutionContext.fromExecutor(Executors.newFixedThreadPool(nrClients)) // too many = HTTP request queue of pool fills up
      // could make this more contended with a countdown latch so they all start at the same time
      val leases: immutable.Seq[(String, Boolean)] = Future.sequence((0 until nrClients).map(i => {
        val clientName = s"client$i"
        val lease = underTest.getLease(lease1, KubernetesLease.configPath, clientName)
        Future {
          lease.acquire()
        }.flatMap(identity).map(granted => (clientName, granted))
      })).futureValue

      val numberGranted = leases.count { case (_, granted) => granted }
      withClue(s"More than one lease granted $leases") {
        numberGranted shouldEqual 1
      }
    }
  }

} 
Example 72
Source File: DummyCpgProvider.scala    From codepropertygraph   with Apache License 2.0 5 votes vote down vote up
package io.shiftleft.cpgserver.cpg

import java.util.UUID
import java.util.concurrent.{ConcurrentHashMap, Executors}

import scala.jdk.CollectionConverters._
import scala.collection.concurrent.Map
import scala.concurrent.ExecutionContext
import cats.data.OptionT
import cats.effect.{Blocker, ContextShift, IO}
import io.shiftleft.codepropertygraph.Cpg
import io.shiftleft.codepropertygraph.generated.nodes.NewMethod
import io.shiftleft.cpgserver.query.{CpgOperationFailure, CpgOperationResult, CpgOperationSuccess}
import io.shiftleft.passes.{CpgPass, DiffGraph}
import io.shiftleft.semanticcpg.language._


class DummyCpgProvider(implicit cs: ContextShift[IO]) extends CpgProvider {

  private val blocker: Blocker =
    Blocker.liftExecutionContext(ExecutionContext.fromExecutor(Executors.newFixedThreadPool(2)))

  private val cpgMap: Map[UUID, CpgOperationResult[Cpg]] =
    new ConcurrentHashMap[UUID, CpgOperationResult[Cpg]].asScala

  private val uuidProvider = IO(UUID.randomUUID)

  private class MyPass(cpg: Cpg) extends CpgPass(cpg) {
    override def run(): Iterator[DiffGraph] = {
      implicit val diffGraph: DiffGraph.Builder = DiffGraph.newBuilder
      NewMethod(name = "main", isExternal = false).start.store
      Iterator(diffGraph.build())
    }
  }

  override def createCpg(filenames: Set[String]): IO[UUID] = {
    val cpg = new Cpg

    for {
      cpgId <- uuidProvider
      _ <- blocker
        .blockOn(IO(new MyPass(cpg).createAndApply()))
        .runAsync {
          case Right(_) => IO(cpgMap.put(cpgId, CpgOperationSuccess(cpg))).map(_ => ())
          case Left(ex) => IO(cpgMap.put(cpgId, CpgOperationFailure(ex))).map(_ => ())
        }
        .toIO
    } yield cpgId
  }

  override def retrieveCpg(uuid: UUID): OptionT[IO, CpgOperationResult[Cpg]] = {
    OptionT.fromOption(cpgMap.get(uuid))
  }
} 
Example 73
Source File: ServerAmmoniteExecutor.scala    From codepropertygraph   with Apache License 2.0 5 votes vote down vote up
package io.shiftleft.cpgserver.query

import cats.data.OptionT
import cats.effect.{Blocker, ContextShift, IO}

import io.shiftleft.codepropertygraph.Cpg
import io.shiftleft.console.scripting.AmmoniteExecutor

import java.util.UUID
import java.util.concurrent.{ConcurrentHashMap, Executors}

import scala.collection.concurrent.Map
import scala.concurrent.ExecutionContext
import scala.jdk.CollectionConverters._

abstract class ServerAmmoniteExecutor(implicit cs: ContextShift[IO]) extends AmmoniteExecutor {

  private val blocker: Blocker =
    Blocker.liftExecutionContext(ExecutionContext.fromExecutor(Executors.newFixedThreadPool(2)))

  private val queryResultMap: Map[UUID, CpgOperationResult[String]] =
    new ConcurrentHashMap[UUID, CpgOperationResult[String]].asScala

  private val uuidProvider = IO { UUID.randomUUID }

  def executeQuery(cpg: Cpg, query: String): IO[UUID] = {
    for {
      resultUuid <- uuidProvider
      _ <- blocker
        .blockOn(runQuery(query, cpg))
        .runAsync {
          case Right(result) => IO(queryResultMap.put(resultUuid, CpgOperationSuccess(result.toString))).map(_ => ())
          case Left(ex)      => IO(queryResultMap.put(resultUuid, CpgOperationFailure(ex))).map(_ => ())
        }
        .toIO
    } yield resultUuid
  }

  def retrieveQueryResult(queryId: UUID): OptionT[IO, CpgOperationResult[String]] = {
    OptionT.fromOption(queryResultMap.get(queryId))
  }

  def executeQuerySync(cpg: Cpg, query: String): IO[CpgOperationResult[String]] = {
    for {
      result <- runQuery(query, cpg)
        .map(v => CpgOperationSuccess(v.toString))
        .handleErrorWith(err => IO(CpgOperationFailure(err)))
    } yield result
  }
} 
Example 74
Source File: SwaggerRoute.scala    From codepropertygraph   with Apache License 2.0 5 votes vote down vote up
package io.shiftleft.cpgserver.route

import java.util.concurrent.Executors

import scala.concurrent.ExecutionContext
import cats.data.OptionT
import cats.effect.{Blocker, ContextShift, IO}
import io.circe.generic.auto._
import io.circe.syntax._
import org.http4s._
import org.http4s.circe._
import org.http4s.dsl.io._
import org.http4s.headers.Location
import org.webjars.WebJarAssetLocator
import io.shiftleft.cpgserver.route.CpgRoute.ApiError

final class SwaggerRoute {

  private val blockingEc = ExecutionContext.fromExecutor(Executors.newSingleThreadExecutor)
  private val blocker = Blocker.liftExecutionContext(blockingEc)
  private implicit val blockingCs: ContextShift[IO] = IO.contextShift(blockingEc)

  private val swaggerUiVersion = IO { new WebJarAssetLocator().getWebJars.get("swagger-ui") }
  private val swaggerUiResources = swaggerUiVersion.map { ver =>
    s"/META-INF/resources/webjars/swagger-ui/$ver"
  }
  private val swaggerUiPath = Path("swagger-ui")

  val routes: HttpRoutes[IO] = HttpRoutes.of {
    case GET -> Root / ("swagger-ui" | "docs") =>
      PermanentRedirect(Location(Uri.unsafeFromString("swagger-ui/index.html")))

    // TODO discuss with jacob: according to scalac this is unreachable... commenting for now since it probably never worked anyway
    case req @ GET -> (Root | `swaggerUiPath`) / "swagger.yaml" =>
      StaticFile
        .fromResource("/swagger.yaml", blocker, Some(req))
        .getOrElseF(InternalServerError(ApiError("Swagger documentation is missing.").asJson))

    case req @ GET -> path if path.startsWith(swaggerUiPath) => {
      val file = path.toList.tail.mkString("/", "/", "") match {
        case f if f == "/index.html" =>
          StaticFile.fromResource[IO]("/swagger-ui/index.html", blocker, Some(req))
        case f =>
          OptionT.liftF(swaggerUiResources).flatMap { resources =>
            StaticFile.fromResource[IO](resources + f, blocker, Some(req))
          }
      }
      file.getOrElseF(InternalServerError(ApiError(s"Requested file [$file] is missing.").asJson))
    }
  }
}

object SwaggerRoute {
  def apply(): SwaggerRoute =
    new SwaggerRoute
} 
Example 75
Source File: DistributedCountRDD.scala    From carbondata   with Apache License 2.0 5 votes vote down vote up
package org.apache.carbondata.indexserver

import java.util.concurrent.Executors

import scala.collection.JavaConverters._
import scala.concurrent.{Await, ExecutionContext, ExecutionContextExecutor, Future}
import scala.concurrent.duration.Duration

import org.apache.hadoop.mapred.TaskAttemptID
import org.apache.hadoop.mapreduce.{InputSplit, TaskType}
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl
import org.apache.spark.{Partition, SparkEnv, TaskContext}
import org.apache.spark.sql.SparkSession

import org.apache.carbondata.common.logging.LogServiceFactory
import org.apache.carbondata.core.cache.CacheProvider
import org.apache.carbondata.core.datastore.impl.FileFactory
import org.apache.carbondata.core.index.{IndexInputFormat, IndexStoreManager}
import org.apache.carbondata.core.index.dev.expr.IndexInputSplitWrapper
import org.apache.carbondata.core.util.{CarbonProperties, CarbonThreadFactory}
import org.apache.carbondata.spark.rdd.CarbonRDD


class DistributedCountRDD(@transient ss: SparkSession, indexInputFormat: IndexInputFormat)
  extends CarbonRDD[(String, String)](ss, Nil) {

  @transient private val LOGGER = LogServiceFactory.getLogService(classOf[DistributedPruneRDD]
    .getName)

  override protected def getPreferredLocations(split: Partition): Seq[String] = {
    if (split.asInstanceOf[IndexRDDPartition].getLocations != null) {
      split.asInstanceOf[IndexRDDPartition].getLocations.toSeq
    } else {
      Seq()
    }
  }

  override def internalCompute(split: Partition,
      context: TaskContext): Iterator[(String, String)] = {
    val attemptId = new TaskAttemptID(DistributedRDDUtils.generateTrackerId,
      id, TaskType.MAP, split.index, 0)
    val attemptContext = new TaskAttemptContextImpl(FileFactory.getConfiguration, attemptId)
    val inputSplits = split.asInstanceOf[IndexRDDPartition].inputSplit
    val numOfThreads = CarbonProperties.getInstance().getNumOfThreadsForExecutorPruning
    val service = Executors
      .newFixedThreadPool(numOfThreads, new CarbonThreadFactory("IndexPruningPool", true))
    implicit val ec: ExecutionContextExecutor = ExecutionContext
      .fromExecutor(service)
    if (indexInputFormat.ifAsyncCall()) {
      // to clear cache of invalid segments during pre-priming in index server
      IndexStoreManager.getInstance().clearInvalidSegments(indexInputFormat.getCarbonTable,
        indexInputFormat.getInvalidSegments)
    }
    val futures = if (inputSplits.length <= numOfThreads) {
      inputSplits.map {
        split => generateFuture(Seq(split))
      }
    } else {
      DistributedRDDUtils.groupSplits(inputSplits, numOfThreads).map {
        splits => generateFuture(splits)
      }
    }
    // scalastyle:off awaitresult
    val results = Await.result(Future.sequence(futures), Duration.Inf).flatten
    // scalastyle:on awaitresult
    val executorIP = s"${ SparkEnv.get.blockManager.blockManagerId.host }_${
      SparkEnv.get.blockManager.blockManagerId.executorId
    }"
    val cacheSize = if (CacheProvider.getInstance().getCarbonCache != null) {
      CacheProvider.getInstance().getCarbonCache.getCurrentSize
    } else {
      0L
    }
    Iterator((executorIP + "_" + cacheSize.toString, results.map(_._2.toLong).sum.toString))
  }

  override protected def internalGetPartitions: Array[Partition] = {
    new DistributedPruneRDD(ss, indexInputFormat).partitions
  }

  private def generateFuture(split: Seq[InputSplit])
    (implicit executionContext: ExecutionContext) = {
    Future {
      val segments = split.map { inputSplit =>
        val distributable = inputSplit.asInstanceOf[IndexInputSplitWrapper]
        distributable.getDistributable.getSegment
          .setReadCommittedScope(indexInputFormat.getReadCommittedScope)
        distributable.getDistributable.getSegment
      }
      val defaultIndex = IndexStoreManager.getInstance
        .getIndex(indexInputFormat.getCarbonTable, split.head
          .asInstanceOf[IndexInputSplitWrapper].getDistributable.getIndexSchema)
      defaultIndex.getBlockRowCount(defaultIndex, segments.toList.asJava, indexInputFormat
        .getPartitions).asScala
    }
  }

} 
Example 76
Source File: TestCreateTableIfNotExists.scala    From carbondata   with Apache License 2.0 5 votes vote down vote up
package org.apache.carbondata.spark.testsuite.createTable

import java.util.concurrent.{Callable, Executors, ExecutorService, Future, TimeUnit}

import org.apache.spark.sql.test.util.QueryTest
import org.apache.spark.sql.AnalysisException
import org.scalatest.BeforeAndAfterAll

class TestCreateTableIfNotExists extends QueryTest with BeforeAndAfterAll {

  override def beforeAll {
    sql("use default")
    sql("drop table if exists test")
    sql("drop table if exists sourceTable")
    sql("drop table if exists targetTable")
  }

  test("test create table if not exists") {
    sql("create table test(a int, b string) STORED AS carbondata")
    try {
      // table creation should be successful
      sql("create table if not exists test(a int, b string) STORED AS carbondata")
      assert(true)
    } catch {
      case ex: Exception =>
        assert(false)
    }
  }

  test("test create table if not exist concurrently") {

    val executorService: ExecutorService = Executors.newFixedThreadPool(10)
    var futures: List[Future[_]] = List()
    for (i <- 0 until (3)) {
      futures = futures :+ runAsync()
    }

    executorService.shutdown()
    executorService.awaitTermination(30L, TimeUnit.SECONDS)

    futures.foreach { future =>
      assertResult("PASS")(future.get.toString)
    }

    def runAsync(): Future[String] = {
      executorService.submit(new Callable[String] {
        override def call() = {
          // Create table
          var result = "PASS"
          try {
            sql("create table IF NOT EXISTS TestIfExists(name string) STORED AS carbondata")
          } catch {
            case exception: Exception =>
              result = exception.getMessage
              exception.printStackTrace()
          }
          result
        }
      })
    }
  }

  test("test create table without column specified") {
    val exception = intercept[AnalysisException] {
      sql("create table TableWithoutColumn STORED AS carbondata tblproperties('sort_columns'='')")
    }
    assert(exception.getMessage.contains("Unable to infer the schema"))
  }

  override def afterAll {
    sql("use default")
    sql("drop table if exists test")
    sql("drop table if exists sourceTable")
    sql("drop table if exists targetTable")
    sql("drop table if exists TestIfExists")
  }

} 
Example 77
Source File: ScheduleExamples.scala    From netty-in-action-scala   with Apache License 2.0 5 votes vote down vote up
package nia.chapter7

import io.netty.channel.socket.nio.NioSocketChannel
import java.util.concurrent.Executors
import java.util.concurrent.TimeUnit


  def cancelingTaskUsingScheduledFuture(): Unit = {
    val ch = CHANNEL_FROM_SOMEWHERE
    //调度任务,并获得所返回的ScheduledFuture
    val future = ch.eventLoop.scheduleAtFixedRate(new Runnable() {
      override def run(): Unit = {
        System.out.println("Run every 60 seconds")
      }
    }, 60, 60, TimeUnit.SECONDS)
    // Some other code that runs...
    val mayInterruptIfRunning = false
    //取消该任务,防止它再次运行
    future.cancel(mayInterruptIfRunning)
  }
} 
Example 78
Source File: ChannelOperationExamples.scala    From netty-in-action-scala   with Apache License 2.0 5 votes vote down vote up
package nia.chapter4

import io.netty.buffer.Unpooled
import io.netty.channel.ChannelFuture
import io.netty.channel.ChannelFutureListener
import io.netty.channel.socket.nio.NioSocketChannel
import io.netty.util.CharsetUtil
import java.util.concurrent.Executors


  def writingToChannelFromManyThreads(): Unit = {
    val channel = CHANNEL_FROM_SOMEWHERE
    //创建持有要写数据的ByteBuf
    val buf = Unpooled.copiedBuffer("your data", CharsetUtil.UTF_8)
    //创建将数据写到Channel 的 Runnable
    val writer: Runnable = () ⇒ channel.write(buf.duplicate())
    //获取到线程池Executor 的引用
    val executor = Executors.newCachedThreadPool
    //递交写任务给线程池以便在某个线程中执行
    // write in one thread
    executor.execute(writer)
    //递交另一个写任务以便在另一个线程中执行
    // write in another thread
    executor.execute(writer)
    //...
  }
} 
Example 79
Source File: MonixAutoAckConsumer.scala    From fs2-rabbit   with Apache License 2.0 5 votes vote down vote up
package dev.profunktor.fs2rabbit.examples

import cats.data.NonEmptyList
import cats.effect._
import cats.syntax.functor._
import dev.profunktor.fs2rabbit.config.{Fs2RabbitConfig, Fs2RabbitNodeConfig}
import dev.profunktor.fs2rabbit.interpreter.RabbitClient
import dev.profunktor.fs2rabbit.resiliency.ResilientStream
import monix.eval.{Task, TaskApp}
import java.util.concurrent.Executors

object MonixAutoAckConsumer extends TaskApp {

  private val config: Fs2RabbitConfig = Fs2RabbitConfig(
    virtualHost = "/",
    nodes = NonEmptyList.one(
      Fs2RabbitNodeConfig(
        host = "127.0.0.1",
        port = 5672
      )
    ),
    username = Some("guest"),
    password = Some("guest"),
    ssl = false,
    connectionTimeout = 3,
    requeueOnNack = false,
    requeueOnReject = false,
    internalQueueSize = Some(500),
    automaticRecovery = true
  )

  val blockerResource =
    Resource
      .make(Task(Executors.newCachedThreadPool()))(es => Task(es.shutdown()))
      .map(Blocker.liftExecutorService)

  override def run(args: List[String]): Task[ExitCode] =
    blockerResource.use { blocker =>
      RabbitClient[Task](config, blocker).flatMap { client =>
        ResilientStream
          .runF(new AutoAckConsumerDemo[Task](client).program)
          .as(ExitCode.Success)
      }
    }

} 
Example 80
Source File: IOAckerConsumer.scala    From fs2-rabbit   with Apache License 2.0 5 votes vote down vote up
package dev.profunktor.fs2rabbit.examples

import java.util.concurrent.Executors

import cats.data.NonEmptyList
import cats.effect._
import cats.syntax.functor._
import dev.profunktor.fs2rabbit.config.{Fs2RabbitConfig, Fs2RabbitNodeConfig}
import dev.profunktor.fs2rabbit.interpreter.RabbitClient
import dev.profunktor.fs2rabbit.resiliency.ResilientStream

object IOAckerConsumer extends IOApp {

  private val config: Fs2RabbitConfig = Fs2RabbitConfig(
    virtualHost = "/",
    nodes = NonEmptyList.one(Fs2RabbitNodeConfig(host = "127.0.0.1", port = 5672)),
    username = Some("guest"),
    password = Some("guest"),
    ssl = false,
    connectionTimeout = 3,
    requeueOnNack = false,
    requeueOnReject = false,
    internalQueueSize = Some(500),
    automaticRecovery = true
  )

  val blockerResource =
    Resource
      .make(IO(Executors.newCachedThreadPool()))(es => IO(es.shutdown()))
      .map(Blocker.liftExecutorService)

  override def run(args: List[String]): IO[ExitCode] =
    blockerResource.use { blocker =>
      RabbitClient[IO](config, blocker).flatMap { client =>
        ResilientStream
          .runF(new AckerConsumerDemo[IO](client).program)
          .as(ExitCode.Success)
      }
    }
} 
Example 81
Source File: ZIOAutoAckConsumer.scala    From fs2-rabbit   with Apache License 2.0 5 votes vote down vote up
package dev.profunktor.fs2rabbit.examples

import dev.profunktor.fs2rabbit.config.Fs2RabbitConfig
import dev.profunktor.fs2rabbit.interpreter.RabbitClient

import cats.effect.{Blocker, Resource}
import zio._
import zio.interop.catz._
import zio.interop.catz.implicits._
import dev.profunktor.fs2rabbit.resiliency.ResilientStream
import java.util.concurrent.Executors

object ZIOAutoAckConsumer extends CatsApp {

  val config = Fs2RabbitConfig(
    virtualHost = "/",
    host = "127.0.0.1",
    username = Some("guest"),
    password = Some("guest"),
    port = 5672,
    ssl = false,
    connectionTimeout = 3,
    requeueOnNack = false,
    requeueOnReject = false,
    internalQueueSize = Some(500)
  )

  val blockerResource =
    Resource
      .make(Task(Executors.newCachedThreadPool()))(es => Task(es.shutdown()))
      .map(Blocker.liftExecutorService)

  override def run(args: List[String]): UIO[Int] =
    blockerResource
      .use { blocker =>
        RabbitClient[Task](config, blocker).flatMap { client =>
          ResilientStream
            .runF(new AutoAckConsumerDemo[Task](client).program)
        }
      }
      .run
      .map(_ => 0)

} 
Example 82
Source File: RetryPolicyDefaults.scala    From akka-cloudpubsub   with Apache License 2.0 5 votes vote down vote up
package com.qubit.pubsub.client.retry

import java.util.concurrent.Executors

import com.gilt.gfc.concurrent.ThreadFactoryBuilder
import com.typesafe.scalalogging.LazyLogging
import io.grpc.{Status, StatusRuntimeException}

import scala.concurrent.ExecutionContext
import scala.util.Failure

object RetryPolicyDefaults extends LazyLogging {
  import atmos.dsl._
  import Slf4jSupport._

  import scala.concurrent.duration._

  private val unrecoverableErrorCodes = Set(Status.Code.PERMISSION_DENIED,
                                            Status.Code.UNAUTHENTICATED,
                                            Status.Code.INVALID_ARGUMENT)
  private val rateLimitingErrorCodes =
    Set(Status.Code.RESOURCE_EXHAUSTED, Status.Code.UNAVAILABLE)

  val retryPolicy = retryFor {
    10.attempts
  } using selectedBackoff {
    case Failure(sre: StatusRuntimeException)
        if rateLimitingErrorCodes.contains(sre.getStatus.getCode) =>
      linearBackoff { 50.seconds }
    case _ =>
      exponentialBackoff { 30.seconds } randomized 10.second -> 100.seconds
  } monitorWith {
    logger.underlying
  } onError {
    case sre: StatusRuntimeException
        if unrecoverableErrorCodes.contains(sre.getStatus.getCode) =>
      stopRetrying
  }

  val retryExecCtx = ExecutionContext.fromExecutor(
    Executors.newFixedThreadPool(
      10,
      ThreadFactoryBuilder("retry-pool", "retry-worker").build()
    ))
} 
Example 83
Source File: ThreadFactories.scala    From docspell   with GNU General Public License v3.0 5 votes vote down vote up
package docspell.common

import java.util.concurrent.ForkJoinPool
import java.util.concurrent.ForkJoinPool.ForkJoinWorkerThreadFactory
import java.util.concurrent.ForkJoinWorkerThread
import java.util.concurrent.atomic.AtomicLong
import java.util.concurrent.{Executors, ThreadFactory}

import scala.concurrent._

import cats.effect._

object ThreadFactories {

  def ofName(prefix: String): ThreadFactory =
    new ThreadFactory {

      val counter = new AtomicLong(0)

      override def newThread(r: Runnable): Thread = {
        val t = Executors.defaultThreadFactory().newThread(r)
        t.setName(s"$prefix-${counter.getAndIncrement()}")
        t
      }
    }

  def ofNameFJ(prefix: String): ForkJoinWorkerThreadFactory =
    new ForkJoinWorkerThreadFactory {
      val tf      = ForkJoinPool.defaultForkJoinWorkerThreadFactory
      val counter = new AtomicLong(0)

      def newThread(pool: ForkJoinPool): ForkJoinWorkerThread = {
        val t = tf.newThread(pool)
        t.setName(s"$prefix-${counter.getAndIncrement()}")
        t
      }
    }

  def executorResource[F[_]: Sync](
      c: => ExecutionContextExecutorService
  ): Resource[F, ExecutionContextExecutorService] =
    Resource.make(Sync[F].delay(c))(ec => Sync[F].delay(ec.shutdown))

  def cached[F[_]: Sync](
      tf: ThreadFactory
  ): Resource[F, ExecutionContextExecutorService] =
    executorResource(
      ExecutionContext.fromExecutorService(Executors.newCachedThreadPool(tf))
    )

  def fixed[F[_]: Sync](
      n: Int,
      tf: ThreadFactory
  ): Resource[F, ExecutionContextExecutorService] =
    executorResource(
      ExecutionContext.fromExecutorService(Executors.newFixedThreadPool(n, tf))
    )

  def workSteal[F[_]: Sync](
      n: Int,
      tf: ForkJoinWorkerThreadFactory
  ): Resource[F, ExecutionContextExecutorService] =
    executorResource(
      ExecutionContext.fromExecutorService(
        new ForkJoinPool(n, tf, null, true)
      )
    )

  def workSteal[F[_]: Sync](
      tf: ForkJoinWorkerThreadFactory
  ): Resource[F, ExecutionContextExecutorService] =
    workSteal[F](Runtime.getRuntime().availableProcessors() + 1, tf)
} 
Example 84
Source File: GzipDecompressor.scala    From m3d-engine   with Apache License 2.0 5 votes vote down vote up
package com.adidas.analytics.algo

import java.util.concurrent.{Executors, TimeUnit}

import com.adidas.analytics.algo.GzipDecompressor.{changeFileExtension, compressedExtension, _}
import com.adidas.analytics.algo.core.JobRunner
import com.adidas.analytics.config.GzipDecompressorConfiguration
import com.adidas.analytics.util.DFSWrapper
import com.adidas.analytics.util.DFSWrapper._
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.io.IOUtils
import org.apache.hadoop.io.compress.CompressionCodecFactory
import org.apache.spark.sql.SparkSession
import org.slf4j.{Logger, LoggerFactory}

import scala.concurrent._
import scala.concurrent.duration._


final class GzipDecompressor protected(val spark: SparkSession, val dfs: DFSWrapper, val configLocation: String)
  extends JobRunner with GzipDecompressorConfiguration {

  private val hadoopConfiguration: Configuration = spark.sparkContext.hadoopConfiguration
  private val fileSystem: FileSystem = dfs.getFileSystem(inputDirectoryPath)


  override def run(): Unit = {
    //check if directory exists
    if (!fileSystem.exists(inputDirectoryPath)){
      logger.error(s"Input directory: $inputDirectoryPath does not exist.")
      throw new RuntimeException(s"Directory $inputDirectoryPath does not exist.")
    }

    val compressedFilePaths = fileSystem.ls(inputDirectoryPath, recursive)
      .filterNot(path => fileSystem.isDirectory(path))
      .filter(_.getName.toLowerCase.endsWith(compressedExtension))

    if (compressedFilePaths.isEmpty) {
      logger.warn(s"Input directory $inputDirectoryPath does not contain compressed files. Skipping...")
    } else {
      implicit val ec: ExecutionContext = ExecutionContext.fromExecutor(Executors.newFixedThreadPool(threadPoolSize))
      Await.result(Future.sequence(
        compressedFilePaths.map { compressedFilePath =>
          Future {
            logger.info(s"Decompressing file: $compressedFilePath")

            val decompressedFileName = changeFileExtension(compressedFilePath.getName, compressedExtension, outputExtension)
            val decompressedFilePath = new Path(compressedFilePath.getParent, decompressedFileName)

            val compressionCodecFactory = new CompressionCodecFactory(hadoopConfiguration)
            val inputCodec = compressionCodecFactory.getCodec(compressedFilePath)

            val inputStream = inputCodec.createInputStream(fileSystem.open(compressedFilePath))
            val output = fileSystem.create(decompressedFilePath)

            IOUtils.copyBytes(inputStream, output, hadoopConfiguration)
            logger.info(s"Finished decompressing file: $compressedFilePath")

            //Delete the compressed file
            fileSystem.delete(compressedFilePath, false)
            logger.info(s"Removed file: $compressedFilePath")
          }
        }
      ), Duration(4, TimeUnit.HOURS))
    }
  }
}


object GzipDecompressor {

  private val logger: Logger = LoggerFactory.getLogger(this.getClass)

  private val compressedExtension: String = ".gz"

  def apply(spark: SparkSession, dfs: DFSWrapper, configLocation: String): GzipDecompressor = {
    new GzipDecompressor(spark, dfs, configLocation)
  }

  private def changeFileExtension(fileName: String, currentExt: String, newExt: String): String = {
    val newFileName =  fileName.substring(0, fileName.lastIndexOf(currentExt))
    if (newFileName.endsWith(newExt)) newFileName else newFileName + newExt
  }
} 
Example 85
Source File: H2TestableTransactor.scala    From eff   with MIT License 5 votes vote down vote up
package org.atnos.eff.addon.doobie

import java.util.concurrent.Executors

import cats.effect._
import cats.implicits._
import doobie.free.connection.{ConnectionIO, close, commit, delay, rollback, setAutoCommit}
import doobie.util.transactor.{Strategy, Transactor}
import org.h2.jdbcx.JdbcConnectionPool

import scala.concurrent.ExecutionContext

object H2TestableTransactor {

  final class OpHistory {
    var calls: List[String] = List.empty[String]

    def registerConnection(): Unit = calls :+= "connection"
    def registerBefore(): Unit     = calls :+= "before"
    def registerAfter(): Unit      = calls :+= "after"
    def incrementOops(): Unit      = calls :+= "oops"
    def registerAlways(): Unit     = calls :+= "always"
  }

  def create[M[_]: ContextShift](url: String = "jdbc:h2:mem:test;DB_CLOSE_DELAY=-1",
                   user: String = "sa",
                   pass: String = "",
                   before: ConnectionIO[Unit] = setAutoCommit(false),
                   after:  ConnectionIO[Unit] = commit,
                   oops:   ConnectionIO[Unit] = rollback,
                   always: ConnectionIO[Unit] = close)(
      implicit async: Async[M]): (Transactor[M], OpHistory) = {
    
    val pool = JdbcConnectionPool.create(url, user, pass)

    val c = new OpHistory()

    val ec = ExecutionContext.fromExecutorService(Executors.newCachedThreadPool)

    val blocker = Blocker.liftExecutionContext(ec)

    val pre = Transactor.fromDataSource.apply(pool, ec, blocker)

    val t = pre.copy(
      connect0 = con => pre.connect(con).evalTap(async.pure(_) <* async.pure(c.registerConnection())),
      strategy0 = Strategy(
        before = before.flatMap(a => delay(c.registerBefore()).map(_ => a)),
        after  = after .flatMap(a => delay(c.registerAfter()) .map(_ => a)),
        oops   = oops  .flatMap(a => delay(c.incrementOops()) .map(_ => a)),
        always = always.flatMap(a => delay(c.registerAlways()).map(_ => a))
      )
    )

    (t, c)
  }

} 
Example 86
Source File: package.scala    From ionroller   with MIT License 5 votes vote down vote up
import java.util.concurrent.{ExecutorService, Executors, ScheduledExecutorService}

import com.amazonaws.services.elasticbeanstalk.model.ConfigurationOptionSetting
import com.typesafe.scalalogging.StrictLogging
import ionroller.aws.Dynamo
import ionroller.tracking.Event
import play.api.libs.functional.syntax._
import play.api.libs.json._

import scala.concurrent.duration.FiniteDuration
import scalaz.concurrent.Task
import scalaz.{-\/, \/-}

package object ionroller extends StrictLogging {
  val ionrollerExecutorService: ExecutorService = Executors.newFixedThreadPool(4)

  implicit val `| Implicit executor service        |`: ExecutorService = ionrollerExecutorService
  implicit val ` | is disabled - define explicitly  |`: ExecutorService = ionrollerExecutorService

  implicit val timer: ScheduledExecutorService = scalaz.concurrent.Strategy.DefaultTimeoutScheduler

  def ionrollerRole(awsAccountId: String) = s"arn:aws:iam::$awsAccountId:role/ionroller"

  implicit lazy val finiteDurationFormat = {

    def applyFiniteDuration(l: Long, u: String): FiniteDuration = {
      FiniteDuration(l, u.toLowerCase)
    }

    def unapplyFiniteDuration(d: FiniteDuration): (Long, String) = {
      (d.length, d.unit.toString)
    }

    ((JsPath \ "length").format[Long] and
      (JsPath \ "unit").format[String])(applyFiniteDuration, unapplyFiniteDuration)
  }

  implicit lazy val configurationOptionSettingFormat: Format[ConfigurationOptionSetting] = {
    def applyConfigOptionSetting(ns: String, optionName: String, value: String) =
      new ConfigurationOptionSetting(ns, optionName, value)

    def unapplyConfigOptionSetting(o: ConfigurationOptionSetting): Option[(String, String, String)] = {
      for {
        ns <- Option(o.getNamespace)
        n <- Option(o.getOptionName)
        v <- Option(o.getValue)
      } yield (ns, n, v)
    }

    ((JsPath \ "Namespace").format[String] and
      (JsPath \ "OptionName").format[String] and
      (JsPath \ "Value").format[String])(applyConfigOptionSetting _, unlift(unapplyConfigOptionSetting))
  }

  def enabled(name: TimelineName) = {
    ConfigurationManager.modifyEnvironments &&
      (ConfigurationManager.modifyEnvironmentsWhitelist.isEmpty || ConfigurationManager.modifyEnvironmentsWhitelist.contains(name)) &&
      !ConfigurationManager.modifyEnvironmentsBlacklist.contains(name)
  }

  def logEvent(evt: Event) = {
    logger.info(s"$evt (enabled = ${enabled(evt.service)})")
    if (enabled(evt.service))
      Dynamo.EventLogger.log(evt)
        .flatMap({
          case \/-(s) => Task.now(())
          case -\/(f) => Task.delay(logger.error(f.getMessage, f))
        })
    else Task.now(())
  }

} 
Example 87
Source File: Counter.scala    From toketi-iothubreact   with MIT License 5 votes vote down vote up
// Copyright (c) Microsoft. All rights reserved.

package it.helpers

import java.util.concurrent.Executors

import akka.actor.{Actor, Stash}

import scala.concurrent.ExecutionContext


class Counter extends Actor with Stash {

  implicit val executionContext = ExecutionContext
    .fromExecutorService(Executors.newFixedThreadPool(sys.runtime.availableProcessors))

  private[this] var count: Long = 0

  override def receive: Receive = ready

  def ready: Receive = {
    case "reset" ⇒ {
      context.become(busy)
      count = 0
      context.become(ready)
      unstashAll()
    }
    case "inc"   ⇒ {
      context.become(busy)
      count += 1
      context.become(ready)
      unstashAll()
    }
    case "get"   ⇒ sender() ! count
  }

  def busy: Receive = {
    case _ ⇒ stash()
  }
} 
Example 88
Source File: PowerBIAuthenticationWithAuthorizationCode.scala    From spark-powerbi-connector   with Apache License 2.0 5 votes vote down vote up
package com.microsoft.azure.powerbi.authentication

import java.net.URI
import java.util.concurrent.{Executors, ExecutorService, Future}
import javax.naming.ServiceUnavailableException

import com.microsoft.aad.adal4j.{AuthenticationContext, AuthenticationResult}

case class PowerBIAuthenticationWithAuthorizationCode(powerBIAuthorityURL: String,
                                                      powerBIResourceURL: String,
                                                      powerBIClientID: String,
                                                      activeDirectoryAuthorizationCode: String,
                                                      activeDirectoryRedirectUri: URI)
  extends PowerBIAuthentication{

  def getAccessToken: String =
    if (this.accessToken != null && this.accessToken.nonEmpty) this.accessToken
    else refreshAccessToken

  def refreshAccessToken: String = retrieveToken.getAccessToken

  private def retrieveToken: AuthenticationResult = {

    var authenticationResult: AuthenticationResult = null
    var executorService: ExecutorService = null

    try {

      executorService = Executors.newFixedThreadPool(1)

      val authenticationContext: AuthenticationContext =
        new AuthenticationContext(powerBIAuthorityURL, true, executorService)

      val authenticationResultFuture: Future[AuthenticationResult] =
        authenticationContext.acquireTokenByAuthorizationCode(activeDirectoryAuthorizationCode,
          powerBIResourceURL, powerBIClientID, activeDirectoryRedirectUri, null)

      authenticationResult = authenticationResultFuture.get()
    }
    finally
    {
      executorService.shutdown()
    }

    if (authenticationResult == null) {
      throw new ServiceUnavailableException("Authentication result empty")
    }

    this.accessToken = authenticationResult.getAccessToken

    authenticationResult
  }

  private var accessToken: String = _
} 
Example 89
Source File: PowerBIAuthenticationWithUsernamePassword.scala    From spark-powerbi-connector   with Apache License 2.0 5 votes vote down vote up
package com.microsoft.azure.powerbi.authentication

import java.net.URI
import java.util.concurrent.{Executors, ExecutorService, Future}
import javax.naming.ServiceUnavailableException

import com.microsoft.aad.adal4j.{AuthenticationContext, AuthenticationResult}

case class PowerBIAuthenticationWithUsernamePassword(powerBIAuthorityURL: String,
                                                     powerBIResourceURL: String,
                                                     powerBIClientID: String,
                                                     activeDirectoryUsername: String,
                                                     activeDirectoryPassword: String)
  extends PowerBIAuthentication{

  def getAccessToken: String =
    if (this.accessToken != null && this.accessToken.nonEmpty) this.accessToken
    else refreshAccessToken

  def refreshAccessToken: String = retrieveToken.getAccessToken

  private def retrieveToken: AuthenticationResult = {

    var authenticationResult: AuthenticationResult = null
    var executorService: ExecutorService = null

    try {

      executorService = Executors.newFixedThreadPool(1)

      val authenticationContext: AuthenticationContext =
        new AuthenticationContext(powerBIAuthorityURL, true, executorService)

      val authenticationResultFuture: Future[AuthenticationResult] =
        authenticationContext.acquireToken(powerBIResourceURL, powerBIClientID,
          activeDirectoryUsername, activeDirectoryPassword, null)

      authenticationResult = authenticationResultFuture.get()
    }
    finally
    {
      executorService.shutdown()
    }

    if (authenticationResult == null) {
      throw new ServiceUnavailableException("Authentication result empty")
    }

    this.accessToken = authenticationResult.getAccessToken

    authenticationResult
  }

  private var accessToken: String = _
} 
Example 90
Source File: SchedulerExecutionContext.scala    From kinesis-stream   with MIT License 5 votes vote down vote up
package px.kinesis.stream.consumer

import java.util.concurrent.Executors

import com.google.common.util.concurrent.ThreadFactoryBuilder

import scala.concurrent.ExecutionContext

object SchedulerExecutionContext {

  lazy val Global = SchedulerExecutionContext("KinesisScheduler")

  def apply(name: String): ExecutionContext =
    ExecutionContext.fromExecutor(
      Executors.newCachedThreadPool(
        new ThreadFactoryBuilder()
          .setNameFormat(s"$name-%04d")
          .setDaemon(true)
          .build
      )
    )
} 
Example 91
Source File: SingleThreadedActorSystem.scala    From graphcool-framework   with Apache License 2.0 5 votes vote down vote up
package cool.graph.akkautil

import java.util.concurrent.atomic.AtomicLong
import java.util.concurrent.{Executors, ThreadFactory}

import akka.actor.ActorSystem

object SingleThreadedActorSystem {
  def apply(name: String): ActorSystem = {
    val ec = scala.concurrent.ExecutionContext.fromExecutor(Executors.newSingleThreadExecutor(newNamedThreadFactory(name)))
    ActorSystem(name, defaultExecutionContext = Some(ec))
  }

  def newNamedThreadFactory(name: String): ThreadFactory = new ThreadFactory {
    val count = new AtomicLong(0)

    override def newThread(runnable: Runnable): Thread = {
      val thread = new Thread(runnable)
      thread.setDaemon(true)
      thread.setName(s"$name-" + count.getAndIncrement)
      thread
    }
  }
} 
Example 92
Source File: PlainRabbit.scala    From graphcool-framework   with Apache License 2.0 5 votes vote down vote up
package cool.graph.rabbit

import java.util.concurrent.{Executors, ThreadFactory}

import scala.util.Try
import com.rabbitmq.client.{ConnectionFactory, Channel => RabbitChannel}
import cool.graph.bugsnag.BugSnagger

object PlainRabbit {
  def connect(name: String, amqpUri: String, numberOfThreads: Int, qos: Option[Int])(implicit bugSnag: BugSnagger): Try[RabbitChannel] = Try {

    val threadFactory: ThreadFactory = Utils.newNamedThreadFactory(name)
    val factory = {
      val f       = new ConnectionFactory()
      val timeout = sys.env.getOrElse("RABBIT_TIMEOUT_MS", "500").toInt
      f.setUri(amqpUri)
      f.setConnectionTimeout(timeout)
      f.setExceptionHandler(RabbitExceptionHandler(bugSnag))
      f.setThreadFactory(threadFactory)
      f.setAutomaticRecoveryEnabled(true)
      f
    }
    val executor   = Executors.newFixedThreadPool(numberOfThreads, threadFactory)
    val connection = factory.newConnection(executor)
    val theQos     = qos.orElse(sys.env.get("RABBIT_CHANNEL_QOS").map(_.toInt)).getOrElse(500)
    val chan       = connection.createChannel()
    chan.basicQos(theQos)
    chan
  }
} 
Example 93
Source File: DelayedFuture.scala    From ncdbg   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package com.programmaticallyspeaking.ncd.infra

import java.util.concurrent.{Executors, TimeUnit}

import scala.concurrent.duration.{Duration, FiniteDuration}
import scala.concurrent.{CanAwait, ExecutionContext, Future, Promise}
import scala.util.Try

class CancellableFuture[+A](fut: Future[A], canceller: () => Unit) extends Future[A] {
  override def onComplete[U](f: (Try[A]) => U)(implicit executor: ExecutionContext): Unit = fut.onComplete(f)
  override def isCompleted: Boolean = fut.isCompleted
  override def value: Option[Try[A]] = fut.value
  override def transform[S](f: (Try[A]) => Try[S])(implicit executor: ExecutionContext): Future[S] = fut.transform(f)
  override def transformWith[S](f: (Try[A]) => Future[S])(implicit executor: ExecutionContext): Future[S] = fut.transformWith(f)
  override def ready(atMost: Duration)(implicit permit: CanAwait): this.type = this
  override def result(atMost: Duration)(implicit permit: CanAwait): A = fut.result(atMost)

  def cancel(): Unit = canceller()
}

object DelayedFuture {
  private def executor = Executors.newSingleThreadScheduledExecutor()

  def apply[R](delay: FiniteDuration)(fun: => R)(implicit executionContext: ExecutionContext): CancellableFuture[R] = {
    val resultPromise = Promise[R]()
    var isCancelled = false
    executor.schedule(new Runnable {
      override def run(): Unit = {
        if (!isCancelled)
          resultPromise.completeWith(Future(fun))
      }
    }, delay.toMillis, TimeUnit.MILLISECONDS)
    def cancel(): Unit = isCancelled = true
    new CancellableFuture[R](resultPromise.future, () => cancel())
  }
} 
Example 94
Source File: WebSocketServer.scala    From ncdbg   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package com.programmaticallyspeaking.ncd.chrome.net

import java.net.InetAddress
import java.util.concurrent.Executors

import akka.actor.ActorSystem
import com.programmaticallyspeaking.ncd.chrome.domains.DomainFactory
import com.programmaticallyspeaking.ncd.chrome.net.Protocol.Message
import com.programmaticallyspeaking.ncd.infra.ObjectMapping.{fromJson, toJson}
import com.programmaticallyspeaking.ncd.messaging.Observer
import com.programmaticallyspeaking.tinyws.Server
import com.programmaticallyspeaking.tinyws.Server.{LogLevel, WebSocketClient}
import org.slf4s.Logging

class WebSocketServer(domainFactory: DomainFactory, fileServer: Option[FileServer])(implicit system: ActorSystem) extends Logging {

  private val chromeServerFactory = new ChromeServerFactory(domainFactory)
  private val executor = Executors.newCachedThreadPool()
  private var server: Server = _

  def start(host: String, port: Int): Unit = {
    val addr = InetAddress.getByName(host)
    server = new Server(executor, Server.Options.withPort(port).andAddress(addr).andLogger(new TinyWSLogger))
    server.addHandlerFactory("/dbg", () => new Handler)
    fileServer.foreach(server.setFallbackHandler)
    server.start()
  }

  def stop(): Unit = {
    Option(server).foreach(_.stop())
    server = null
  }

  class Handler extends Server.WebSocketHandler with Logging {
    private var theClient: WebSocketClient = _
    private var chromeServer: Option[ChromeServer] = None

    override def onOpened(client: WebSocketClient): Unit = {
      theClient = client
      val cs = chromeServerFactory.create()
      cs.connect().subscribe(new Observer[Message] {
        override def onError(error: Throwable) = {
          log.error("Closing due to error", error)
          theClient.close()
        }

        override def onComplete() = {
          log.debug("Disconnecting client")
          theClient.close()
        }

        override def onNext(item: Message) = {
          val json = toJson(item)
          log.trace(s"Sending over websocket: $json")
          theClient.sendTextMessage(json)
        }
      })
      chromeServer = Some(cs)
    }

    override def onClosedByClient(code: Int, reason: String): Unit = {
      log.trace(s"DevTools client closed the connection: $code ($reason)")
      chromeServer.foreach(_.disconnect())
    }

    override def onFailure(t: Throwable): Unit = {
      log.error("WebSocket error", t)
      chromeServer.foreach(_.disconnect())
    }

    override def onTextMessage(text: CharSequence): Unit = {
      val msg = fromJson[Protocol.IncomingMessage](text.toString)
      chromeServer.foreach(_.sendMessage(msg))
    }

    override def onBinaryData(data: Array[Byte]): Unit = {
      log.warn("Binary data ignored!")
    }

    override def onClosedByServer(code: Int, reason: String): Unit = {}
  }

  class TinyWSLogger extends Server.Logger {
    override def log(level: LogLevel, message: String, error: Throwable): Unit = level match {
      case LogLevel.INFO => WebSocketServer.this.log.info(message, error)
      case LogLevel.WARN => WebSocketServer.this.log.warn(message, error)
      case LogLevel.ERROR => WebSocketServer.this.log.error(message, error)
      case _ => // don't bother with trace or debug logging
    }

    override def isEnabledAt(level: LogLevel): Boolean = level match {
      case LogLevel.INFO => WebSocketServer.this.log.underlying.isInfoEnabled
      case LogLevel.WARN => WebSocketServer.this.log.underlying.isWarnEnabled
      case LogLevel.ERROR => WebSocketServer.this.log.underlying.isErrorEnabled
      case _ => false // don't bother with trace or debug logging
    }
  }
} 
Example 95
Source File: MultipleContainersParallelExecution.scala    From CM-Well   with Apache License 2.0 5 votes vote down vote up
package cmwell.util.testSuitHelpers.test

import java.util.concurrent.Executors

import com.dimafeng.testcontainers.lifecycle.TestLifecycleAware
import com.dimafeng.testcontainers.{Container, LazyContainer}
import org.junit.runner.Description
import org.testcontainers.lifecycle.TestDescription

import scala.concurrent.{Await, ExecutionContext, Future}

class MultipleContainersParallelExecution private(containers: Seq[LazyContainer[_]]) extends Container with TestLifecycleAware {
  implicit val ec = ExecutionContext.fromExecutor(Executors.newWorkStealingPool(10))
  import scala.concurrent.duration._


  override def beforeTest(description: TestDescription): Unit = {
    containers.foreach(_.beforeTest(description))
  }

  override def afterTest(description: TestDescription, throwable: Option[Throwable]): Unit = {
    containers.foreach(_.afterTest(description, throwable))
  }

  override def start(): Unit = {
    val f = Future.traverse(containers)(lazyContainer => Future(lazyContainer.start()))
    Await.ready(f, 5.minutes)
  }

  override def stop(): Unit = {
    val f = Future.traverse(containers)(lazyContainer => Future(lazyContainer.stop()))
    Await.ready(f, 5.minutes)
  }
}

object MultipleContainersParallelExecution {

  def apply(containers: LazyContainer[_]*): MultipleContainersParallelExecution = new MultipleContainersParallelExecution(containers)
} 
Example 96
Source File: S3ObjectUploader.scala    From CM-Well   with Apache License 2.0 5 votes vote down vote up
package cmwell.tools.neptune.export

import java.io._
import java.util
import java.util.concurrent.{Executors, TimeoutException}
import java.util.stream.Collectors
import java.util.{Collections, Vector}

import com.amazonaws.auth.profile.ProfileCredentialsProvider
import com.amazonaws.services.s3.AmazonS3ClientBuilder
import com.amazonaws.services.s3.model.{ObjectMetadata, PutObjectRequest}
import com.amazonaws.{AmazonServiceException, ClientConfiguration, Protocol, SdkClientException}
import org.apache.commons.io.{FileUtils, IOUtils}
import org.slf4j.LoggerFactory

import scala.concurrent.{Await, ExecutionContext, Future}
import scala.concurrent.duration.{FiniteDuration, _}

object S3ObjectUploader{

  val executor = Executors.newFixedThreadPool(1)
  implicit val ec: ExecutionContext = scala.concurrent.ExecutionContext.fromExecutor(executor)
  protected lazy val logger = LoggerFactory.getLogger("s3_uploader")


  def init(proxyHost:Option[String], proxyPort:Option[Int]) = {
    val clientRegion = "us-east-1"
    val config = new ClientConfiguration
    config.setProtocol(Protocol.HTTPS)
    proxyHost.foreach(host => config.setProxyHost(host))
    proxyPort.foreach(port =>  config.setProxyPort(port))
    val s3Client = AmazonS3ClientBuilder.standard()
      .withRegion(clientRegion)
      .withClientConfiguration(config)
      .withCredentials(new ProfileCredentialsProvider())
      .build()
    s3Client
  }


  def persistChunkToS3Bucket(chunkData:String, fileName:String, proxyHost:Option[String], proxyPort:Option[Int], s3Directory:String) = {
        try{
          init(proxyHost, proxyPort).putObject(s3Directory, fileName, chunkData)
      }
      catch {
        case e: AmazonServiceException =>
          e.printStackTrace()
          throw e
        case e: SdkClientException =>
          e.printStackTrace()
          throw e
      }
  }

  def persistChunkToS3Bucket(tmpFile:File, proxyHost:Option[String], proxyPort:Option[Int], s3Directory:String, retryCount:Int = 3):Unit = {
    try{
      val s3UploadTask = Future{init(proxyHost, proxyPort).putObject(s3Directory, tmpFile.getName, tmpFile)}(ec)
      Await.result(s3UploadTask,  5.minutes)
      tmpFile.delete()
    }
    catch {
      case e:TimeoutException =>
        if(retryCount > 0) {
          logger.error("S3 upload task run more than 5 minutes..Going to retry")
          persistChunkToS3Bucket(tmpFile, proxyHost, proxyPort, s3Directory, retryCount-1)
        }
        else{
          throw new Exception( "S3 upload task duration was more than 5 minutes")
        }
      case e: AmazonServiceException =>
        e.printStackTrace()
        throw e
      case e: SdkClientException =>
        e.printStackTrace()
        throw e
    }
  }

} 
Example 97
Source File: EventHubReceiver.scala    From toketi-kafka-connect-iothub   with MIT License 5 votes vote down vote up
// Copyright (c) Microsoft. All rights reserved.

package com.microsoft.azure.iot.kafka.connect.source

import java.time.{Duration, Instant}
import java.util.concurrent.Executors

import com.microsoft.azure.eventhubs.{EventHubClient, EventPosition, PartitionReceiver}

import scala.collection.JavaConverters._
import scala.collection.mutable.ListBuffer

class EventHubReceiver(val connectionString: String, val receiverConsumerGroup: String, val partition: String,
    var offset: Option[String], val startTime: Option[Instant], val receiveTimeout: Duration) extends DataReceiver {

  private[this] var isClosing = false

  private val executorService = Executors.newSingleThreadExecutor()
  private val eventHubClient = EventHubClient.createSync(connectionString, executorService)
  if (eventHubClient == null) {
    throw new IllegalArgumentException("Unable to create EventHubClient from the input parameters.")
  }

  private val eventPosition = if (startTime.isDefined) {
    EventPosition.fromEnqueuedTime(startTime.get)
  }  else {
    EventPosition.fromOffset(offset.get)
  }
  private val eventHubReceiver: PartitionReceiver = eventHubClient.createReceiverSync(
    receiverConsumerGroup, partition.toString, eventPosition)
  if (this.eventHubReceiver == null) {
    throw new IllegalArgumentException("Unable to create PartitionReceiver from the input parameters.")
  }
  this.eventHubReceiver.setReceiveTimeout(receiveTimeout)

  override def close(): Unit = {
    if (this.eventHubReceiver != null) {
      this.eventHubReceiver.synchronized {
        this.isClosing = true
        eventHubReceiver.close().join()
      }
    }
  }

  override def receiveData(batchSize: Int): Iterable[IotMessage] = {
    var iotMessages = ListBuffer.empty[IotMessage]
      var curBatchSize = batchSize
      var endReached = false
      // Synchronize on the eventHubReceiver object, and make sure the task is not closing,
      // in which case, the eventHubReceiver might be closed.
      while (curBatchSize > 0 && !endReached && !this.isClosing) {
        this.eventHubReceiver.synchronized {
          if(!this.isClosing) {
            val batch = this.eventHubReceiver.receiveSync(curBatchSize)
            if (batch != null) {
              val batchIterable = batch.asScala
              iotMessages ++= batchIterable.map(e => {
                val content = new String(e.getBytes)
                val iotDeviceData = IotMessage(content, e.getSystemProperties.asScala, e.getProperties.asScala)
                iotDeviceData
              })
              curBatchSize -= batchIterable.size
            } else {
              endReached = true
            }
          }
        }
    }
    iotMessages
  }
} 
Example 98
Source File: AllModules.scala    From akka-http-microservice-templates   with MIT License 5 votes vote down vote up
package modules

import java.util.concurrent.Executors

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import com.softwaremill.macwire._
import com.typesafe.config.{Config, ConfigFactory}
import endpoints.{Endpoints, HealthCheckEndpoint, UserEndpoint}
import models.repository.UserRepository
import scalikejdbc.{AutoSession, ConnectionPool, ConnectionPoolSettings, DBSession}

import scala.concurrent.ExecutionContext

class AllModules extends EndpointModule

trait EndpointModule extends AkkaModule with RepositoryModule {
  lazy val healthCheckEndpoint = wire[HealthCheckEndpoint]
  lazy val userEndpoint = wire[UserEndpoint]

  lazy val endpoints = wire[Endpoints]
}

trait ScalikeJDBCModule extends ConfigModule {
  Class.forName(config.getString("jdbc.driver"))

  val connectionPoolSettings = ConnectionPoolSettings(
    initialSize = 1,
    maxSize = config.getInt("jdbc.maxConnections")
  )

  ConnectionPool.singleton(config.getString("jdbc.url"), config.getString("jdbc.username"), config.getString("jdbc.password"), connectionPoolSettings)

  lazy val session: DBSession = AutoSession
  lazy val databaseExecutorContext: ExecutionContext =
    ExecutionContext.fromExecutorService(Executors.newFixedThreadPool(config.getInt("jdbc.maxConnections")))
}

trait RepositoryModule extends ScalikeJDBCModule {
  lazy val userRepository = wire[UserRepository]
}

trait AkkaModule {
  implicit lazy val system = ActorSystem("simpleHttpServerJson")
  implicit lazy val materializer = ActorMaterializer()
  implicit lazy val executor: ExecutionContext = system.dispatcher
}

trait ConfigModule {
  lazy val config: Config = ConfigFactory.load()
} 
Example 99
Source File: MainApp.scala    From kafka-lag-exporter   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.kafkalagexporter

import java.util.concurrent.Executors

import akka.actor.typed.ActorSystem
import com.typesafe.config.{Config, ConfigFactory}
import io.prometheus.client.CollectorRegistry
import io.prometheus.client.exporter.HTTPServer

import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContext}

object MainApp extends App {
  val system = start()

  // Add shutdown hook to respond to SIGTERM and gracefully shutdown the actor system
  sys.ShutdownHookThread {
    system ! KafkaClusterManager.Stop
    Await.result(system.whenTerminated, 10 seconds)
  }

  def start(config: Config = ConfigFactory.load()): ActorSystem[KafkaClusterManager.Message] = {
    // Cached thread pool for various Kafka calls for non-blocking I/O
    val kafkaClientEc = ExecutionContext.fromExecutor(Executors.newCachedThreadPool())

    val appConfig = AppConfig(config)

    val clientCreator = (cluster: KafkaCluster) =>
      KafkaClient(cluster, appConfig.clientGroupId, appConfig.clientTimeout)(kafkaClientEc)
    var endpointCreators : List[KafkaClusterManager.NamedCreator] = List()
    appConfig.prometheusConfig.foreach { prometheus =>
      val prometheusCreator = KafkaClusterManager.NamedCreator(
        "prometheus-lag-reporter", 
        (() => PrometheusEndpointSink(
          Metrics.definitions, appConfig.metricWhitelist, appConfig.clustersGlobalLabels(), new HTTPServer(prometheus.port), CollectorRegistry.defaultRegistry
        ))
      )
      endpointCreators = prometheusCreator :: endpointCreators
    }
    appConfig.graphiteConfig.foreach { _ =>
      val graphiteCreator = KafkaClusterManager.NamedCreator(
        "graphite-lag-reporter",
        (() => GraphiteEndpointSink(appConfig.metricWhitelist, appConfig.clustersGlobalLabels(), appConfig.graphiteConfig)))
      endpointCreators = graphiteCreator :: endpointCreators
    }
    ActorSystem(
      KafkaClusterManager.init(appConfig, endpointCreators, clientCreator), "kafka-lag-exporter")
  }
} 
Example 100
Source File: StoragePerfTester.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.tools

import java.util.concurrent.{CountDownLatch, Executors}
import java.util.concurrent.atomic.AtomicLong

import org.apache.spark.executor.ShuffleWriteMetrics
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.serializer.KryoSerializer
import org.apache.spark.shuffle.hash.HashShuffleManager
import org.apache.spark.util.Utils


    val numOutputSplits = sys.env.get("NUM_REDUCERS").map(_.toInt).getOrElse(500)

    val recordLength = 1000 // ~1KB records
    val totalRecords = dataSizeMb * 1000
    val recordsPerMap = totalRecords / numMaps

    val writeKey = "1" * (recordLength / 2)
    val writeValue = "1" * (recordLength / 2)
    val executor = Executors.newFixedThreadPool(numMaps)

    val conf = new SparkConf()
      .set("spark.shuffle.compress", "false")
      .set("spark.shuffle.sync", "true")
      .set("spark.shuffle.manager", "org.apache.spark.shuffle.hash.HashShuffleManager")

    // This is only used to instantiate a BlockManager. All thread scheduling is done manually.
    val sc = new SparkContext("local[4]", "Write Tester", conf)
    val hashShuffleManager = sc.env.shuffleManager.asInstanceOf[HashShuffleManager]

    def writeOutputBytes(mapId: Int, total: AtomicLong): Unit = {
      val shuffle = hashShuffleManager.shuffleBlockResolver.forMapTask(1, mapId, numOutputSplits,
        new KryoSerializer(sc.conf), new ShuffleWriteMetrics())
      val writers = shuffle.writers
      for (i <- 1 to recordsPerMap) {
        writers(i % numOutputSplits).write(writeKey, writeValue)
      }
      writers.map { w =>
        w.commitAndClose()
        total.addAndGet(w.fileSegment().length)
      }

      shuffle.releaseWriters(true)
    }

    val start = System.currentTimeMillis()
    val latch = new CountDownLatch(numMaps)
    val totalBytes = new AtomicLong()
    for (task <- 1 to numMaps) {
      executor.submit(new Runnable() {
        override def run(): Unit = {
          try {
            writeOutputBytes(task, totalBytes)
            latch.countDown()
          } catch {
            case e: Exception =>
              println("Exception in child thread: " + e + " " + e.getMessage)
              System.exit(1)
          }
        }
      })
    }
    latch.await()
    val end = System.currentTimeMillis()
    val time = (end - start) / 1000.0
    val bytesPerSecond = totalBytes.get() / time
    val bytesPerFile = (totalBytes.get() / (numOutputSplits * numMaps.toDouble)).toLong

    System.err.println("files_total\t\t%s".format(numMaps * numOutputSplits))
    System.err.println("bytes_per_file\t\t%s".format(Utils.bytesToString(bytesPerFile)))
    System.err.println("agg_throughput\t\t%s/s".format(Utils.bytesToString(bytesPerSecond.toLong)))

    executor.shutdown()
    sc.stop()
  }
} 
Example 101
Source File: ExecutorDelegationTokenUpdater.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy.yarn

import java.util.concurrent.{Executors, TimeUnit}

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.security.{Credentials, UserGroupInformation}

import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.{Logging, SparkConf}
import org.apache.spark.util.{ThreadUtils, Utils}

import scala.util.control.NonFatal

private[spark] class ExecutorDelegationTokenUpdater(
    sparkConf: SparkConf,
    hadoopConf: Configuration) extends Logging {

  @volatile private var lastCredentialsFileSuffix = 0

  private val credentialsFile = sparkConf.get("spark.yarn.credentials.file")

  private val delegationTokenRenewer =
    Executors.newSingleThreadScheduledExecutor(
      ThreadUtils.namedThreadFactory("Delegation Token Refresh Thread"))

  // On the executor, this thread wakes up and picks up new tokens from HDFS, if any.
  private val executorUpdaterRunnable =
    new Runnable {
      override def run(): Unit = Utils.logUncaughtExceptions(updateCredentialsIfRequired())
    }

  def updateCredentialsIfRequired(): Unit = {
    try {
      val credentialsFilePath = new Path(credentialsFile)
      val remoteFs = FileSystem.get(hadoopConf)
      SparkHadoopUtil.get.listFilesSorted(
        remoteFs, credentialsFilePath.getParent,
        credentialsFilePath.getName, SparkHadoopUtil.SPARK_YARN_CREDS_TEMP_EXTENSION)
        .lastOption.foreach { credentialsStatus =>
        val suffix = SparkHadoopUtil.get.getSuffixForCredentialsPath(credentialsStatus.getPath)
        if (suffix > lastCredentialsFileSuffix) {
          logInfo("Reading new delegation tokens from " + credentialsStatus.getPath)
          val newCredentials = getCredentialsFromHDFSFile(remoteFs, credentialsStatus.getPath)
          lastCredentialsFileSuffix = suffix
          UserGroupInformation.getCurrentUser.addCredentials(newCredentials)
          logInfo("Tokens updated from credentials file.")
        } else {
          // Check every hour to see if new credentials arrived.
          logInfo("Updated delegation tokens were expected, but the driver has not updated the " +
            "tokens yet, will check again in an hour.")
          delegationTokenRenewer.schedule(executorUpdaterRunnable, 1, TimeUnit.HOURS)
          return
        }
      }
      val timeFromNowToRenewal =
        SparkHadoopUtil.get.getTimeFromNowToRenewal(
          sparkConf, 0.8, UserGroupInformation.getCurrentUser.getCredentials)
      if (timeFromNowToRenewal <= 0) {
        executorUpdaterRunnable.run()
      } else {
        logInfo(s"Scheduling token refresh from HDFS in $timeFromNowToRenewal millis.")
        delegationTokenRenewer.schedule(
          executorUpdaterRunnable, timeFromNowToRenewal, TimeUnit.MILLISECONDS)
      }
    } catch {
      // Since the file may get deleted while we are reading it, catch the Exception and come
      // back in an hour to try again
      case NonFatal(e) =>
        logWarning("Error while trying to update credentials, will try again in 1 hour", e)
        delegationTokenRenewer.schedule(executorUpdaterRunnable, 1, TimeUnit.HOURS)
    }
  }

  private def getCredentialsFromHDFSFile(remoteFs: FileSystem, tokenPath: Path): Credentials = {
    val stream = remoteFs.open(tokenPath)
    try {
      val newCredentials = new Credentials()
      newCredentials.readTokenStorageStream(stream)
      newCredentials
    } finally {
      stream.close()
    }
  }

  def stop(): Unit = {
    delegationTokenRenewer.shutdown()
  }

} 
Example 102
Source File: FixedParallelSuite.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.test.helpers

import java.util.concurrent.{ExecutorService, Executors, ThreadFactory}

import FixedParallelSuite._

object FixedParallelSuite {
  lazy val DefaultExecutorService = Executors.newFixedThreadPool(
    ControlledParallelSuite.calculatePoolSize(),
    ControlledParallelSuite.threadFactory
  )
}


trait FixedParallelSuite extends ControlledParallelSuite {
  protected lazy val executorService = DefaultExecutorService

  override protected def newExecutorService(
    poolSize: Int,
    threadFactory: ThreadFactory
  ): ExecutorService = {
    executorService
  }
} 
Example 103
Source File: SparkSQLSessionManager.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.hive.thriftserver

import java.util.concurrent.Executors

import org.apache.commons.logging.Log
import org.apache.hadoop.hive.conf.HiveConf
import org.apache.hadoop.hive.conf.HiveConf.ConfVars
import org.apache.hive.service.cli.SessionHandle
import org.apache.hive.service.cli.session.SessionManager
import org.apache.hive.service.cli.thrift.TProtocolVersion
import org.apache.hive.service.server.HiveServer2

import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.sql.hive.thriftserver.ReflectionUtils._
import org.apache.spark.sql.hive.thriftserver.server.SparkSQLOperationManager


private[hive] class SparkSQLSessionManager(hiveServer: HiveServer2, hiveContext: HiveContext)
  extends SessionManager(hiveServer)
  with ReflectedCompositeService {

  private lazy val sparkSqlOperationManager = new SparkSQLOperationManager(hiveContext)

  override def init(hiveConf: HiveConf) {
    setSuperField(this, "hiveConf", hiveConf)

    val backgroundPoolSize = hiveConf.getIntVar(ConfVars.HIVE_SERVER2_ASYNC_EXEC_THREADS)
    //用于保存等待执行的任务的阻塞队列,
    //LinkedBlockingQueue:一个基于链表结构的阻塞队列,此队列按FIFO(先进先出)排序元素,吞吐量通常要高于ArrayBlockingQueue
    //Executors.newFixedThreadPool()使用了这个队列
    setSuperField(this, "backgroundOperationPool", Executors.newFixedThreadPool(backgroundPoolSize))
    getAncestorField[Log](this, 3, "LOG").info(
      s"HiveServer2: Async execution pool size $backgroundPoolSize")

    setSuperField(this, "operationManager", sparkSqlOperationManager)
    addService(sparkSqlOperationManager)

    initCompositeService(hiveConf)
  }

  override def openSession(
      protocol: TProtocolVersion,
      username: String,
      passwd: String,
      ipAddress: String,
      sessionConf: java.util.Map[String, String],
      withImpersonation: Boolean,
      delegationToken: String): SessionHandle = {
    hiveContext.openSession()
    val sessionHandle =
      super.openSession(protocol, username, passwd, ipAddress, sessionConf, withImpersonation,
          delegationToken)
    val session = super.getSession(sessionHandle)
    HiveThriftServer2.listener.onSessionCreated(
      session.getIpAddress, sessionHandle.getSessionId.toString, session.getUsername)
    sessionHandle
  }

  override def closeSession(sessionHandle: SessionHandle) {
    HiveThriftServer2.listener.onSessionClosed(sessionHandle.getSessionId.toString)
    super.closeSession(sessionHandle)
    sparkSqlOperationManager.sessionToActivePool -= sessionHandle

    hiveContext.detachSession()
  }
} 
Example 104
Source File: ExecutorDelegationTokenUpdater.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy.yarn

import java.util.concurrent.{Executors, TimeUnit}

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.security.{Credentials, UserGroupInformation}

import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.{Logging, SparkConf}
import org.apache.spark.util.{ThreadUtils, Utils}

import scala.util.control.NonFatal

private[spark] class ExecutorDelegationTokenUpdater(
    sparkConf: SparkConf,
    hadoopConf: Configuration) extends Logging {

  @volatile private var lastCredentialsFileSuffix = 0

  private val credentialsFile = sparkConf.get("spark.yarn.credentials.file")
  private val freshHadoopConf =
    SparkHadoopUtil.get.getConfBypassingFSCache(
      hadoopConf, new Path(credentialsFile).toUri.getScheme)

  private val delegationTokenRenewer =
    Executors.newSingleThreadScheduledExecutor(
      ThreadUtils.namedThreadFactory("Delegation Token Refresh Thread"))

  // On the executor, this thread wakes up and picks up new tokens from HDFS, if any.
  //在执行程序中,该线程唤醒并从HDFS中获取新令牌(如果有的话)
  private val executorUpdaterRunnable =
    new Runnable {
      override def run(): Unit = Utils.logUncaughtExceptions(updateCredentialsIfRequired())
    }

  def updateCredentialsIfRequired(): Unit = {
    try {
      val credentialsFilePath = new Path(credentialsFile)
      val remoteFs = FileSystem.get(freshHadoopConf)
      SparkHadoopUtil.get.listFilesSorted(
        remoteFs, credentialsFilePath.getParent,
        credentialsFilePath.getName, SparkHadoopUtil.SPARK_YARN_CREDS_TEMP_EXTENSION)
        .lastOption.foreach { credentialsStatus =>
        val suffix = SparkHadoopUtil.get.getSuffixForCredentialsPath(credentialsStatus.getPath)
        if (suffix > lastCredentialsFileSuffix) {
          logInfo("Reading new delegation tokens from " + credentialsStatus.getPath)
          val newCredentials = getCredentialsFromHDFSFile(remoteFs, credentialsStatus.getPath)
          lastCredentialsFileSuffix = suffix
          UserGroupInformation.getCurrentUser.addCredentials(newCredentials)
          logInfo("Tokens updated from credentials file.")
        } else {
          // Check every hour to see if new credentials arrived.
          logInfo("Updated delegation tokens were expected, but the driver has not updated the " +
            "tokens yet, will check again in an hour.")
          delegationTokenRenewer.schedule(executorUpdaterRunnable, 1, TimeUnit.HOURS)
          return
        }
      }
      val timeFromNowToRenewal =
        SparkHadoopUtil.get.getTimeFromNowToRenewal(
          sparkConf, 0.8, UserGroupInformation.getCurrentUser.getCredentials)
      if (timeFromNowToRenewal <= 0) {
        executorUpdaterRunnable.run()
      } else {
        logInfo(s"Scheduling token refresh from HDFS in $timeFromNowToRenewal millis.")
        delegationTokenRenewer.schedule(
          executorUpdaterRunnable, timeFromNowToRenewal, TimeUnit.MILLISECONDS)
      }
    } catch {
      // Since the file may get deleted while we are reading it, catch the Exception and come
      // back in an hour to try again
      case NonFatal(e) =>
        logWarning("Error while trying to update credentials, will try again in 1 hour", e)
        delegationTokenRenewer.schedule(executorUpdaterRunnable, 1, TimeUnit.HOURS)
    }
  }

  private def getCredentialsFromHDFSFile(remoteFs: FileSystem, tokenPath: Path): Credentials = {
    val stream = remoteFs.open(tokenPath)
    try {
      val newCredentials = new Credentials()
      newCredentials.readTokenStorageStream(stream)
      newCredentials
    } finally {
      stream.close()
    }
  }

  def stop(): Unit = {
    delegationTokenRenewer.shutdown()
  }

} 
Example 105
Source File: TestProcessHelper.scala    From seed   with Apache License 2.0 5 votes vote down vote up
package seed.generation.util

import java.nio.file.Path
import java.util.concurrent.{Executors, Semaphore}

import scala.concurrent.{ExecutionContext, Future}
import seed.Log
import seed.cli.util.RTS
import seed.process.ProcessHelper

object TestProcessHelper {
  // Single-threaded execution context to avoid CI problems
  private val executor = Executors.newFixedThreadPool(1)
  implicit val ec      = ExecutionContext.fromExecutor(executor)

  // Use binary semaphore to synchronise test suite execution. Prevent Bloop
  // processes from running concurrently.
  val semaphore = new Semaphore(1)

  def runBloop(cwd: Path)(args: String*): Future[String] = {
    val sb = new StringBuilder
    val process =
      ProcessHelper.runBloop(cwd, Log.urgent, out => sb.append(out + "\n"))(
        args: _*
      )
    RTS.unsafeRunToFuture(process).map(_ => sb.toString)
  }

  def runCommand(cwd: Path, cmd: List[String]): Future[String] = {
    val sb = new StringBuilder
    val process = ProcessHelper.runCommand(
      cwd,
      cmd,
      None,
      List(),
      None,
      Log.urgent,
      out => sb.append(out + "\n")
    )
    RTS.unsafeRunToFuture(process).map(_ => sb.toString)
  }
} 
Example 106
Source File: BaseIntegrationTestSpec.scala    From haystack-traces   with Apache License 2.0 5 votes vote down vote up
package com.expedia.www.haystack.trace.storage.backends.memory.integration

import java.util.UUID
import java.util.concurrent.Executors

import com.expedia.open.tracing.Span
import com.expedia.open.tracing.backend.StorageBackendGrpc
import com.expedia.open.tracing.buffer.SpanBuffer
import com.expedia.www.haystack.trace.storage.backends.memory.Service
import io.grpc.ManagedChannelBuilder
import org.scalatest._

import scala.collection.JavaConverters._

trait BaseIntegrationTestSpec extends FunSpec with GivenWhenThen with Matchers with BeforeAndAfterAll with BeforeAndAfterEach  {
  protected var client: StorageBackendGrpc.StorageBackendBlockingStub = _


  private val executors = Executors.newSingleThreadExecutor()


  override def beforeAll() {




    executors.submit(new Runnable {
      override def run(): Unit = Service.main(null)
    })

    Thread.sleep(5000)

    client = StorageBackendGrpc.newBlockingStub(ManagedChannelBuilder.forAddress("localhost", 8090)
      .usePlaintext(true)
      .build())
  }

  protected def createSerializedSpanBuffer(traceId: String = UUID.randomUUID().toString,
                                           spanId: String = UUID.randomUUID().toString,
                                           serviceName: String = "test-service",
                                           operationName: String = "test-operation",
                                           tags: Map[String, String] = Map.empty,
                                           startTime: Long = System.currentTimeMillis() * 1000,
                                           sleep: Boolean = true): Array[Byte] = {
    val spanBuffer = createSpanBufferWithSingleSpan(traceId, spanId, serviceName, operationName, tags, startTime)
    spanBuffer.toByteArray
  }

  private def createSpanBufferWithSingleSpan(traceId: String,
                                             spanId: String,
                                             serviceName: String,
                                             operationName: String,
                                             tags: Map[String, String],
                                             startTime: Long) = {
    val spanTags = tags.map(tag => com.expedia.open.tracing.Tag.newBuilder().setKey(tag._1).setVStr(tag._2).build())

    SpanBuffer
      .newBuilder()
      .setTraceId(traceId)
      .addChildSpans(Span
        .newBuilder()
        .setTraceId(traceId)
        .setSpanId(spanId)
        .setOperationName(operationName)
        .setServiceName(serviceName)
        .setStartTime(startTime)
        .addAllTags(spanTags.asJava)
        .build())
      .build()
  }
} 
Example 107
Source File: KafkaConsumer.scala    From aecor   with MIT License 5 votes vote down vote up
package aecor.kafkadistributedprocessing.internal

import java.time.Duration
import java.util.Properties
import java.util.concurrent.Executors

import cats.effect.{ Async, ContextShift, Resource }
import cats.~>
import org.apache.kafka.clients.consumer.{ Consumer, ConsumerRebalanceListener, ConsumerRecords }
import org.apache.kafka.common.PartitionInfo
import org.apache.kafka.common.serialization.Deserializer

import scala.collection.JavaConverters._
import scala.concurrent.ExecutionContext
import scala.concurrent.duration.FiniteDuration

private[kafkadistributedprocessing] final class KafkaConsumer[F[_], K, V](
  withConsumer: (Consumer[K, V] => *) ~> F
) {

  def subscribe(topics: Set[String], listener: ConsumerRebalanceListener): F[Unit] =
    withConsumer(_.subscribe(topics.asJava, listener))

  def subscribe(topics: Set[String]): F[Unit] =
    withConsumer(_.subscribe(topics.asJava))

  val unsubscribe: F[Unit] =
    withConsumer(_.unsubscribe())

  def partitionsFor(topic: String): F[Set[PartitionInfo]] =
    withConsumer(_.partitionsFor(topic).asScala.toSet)

  def close: F[Unit] =
    withConsumer(_.close())

  def poll(timeout: FiniteDuration): F[ConsumerRecords[K, V]] =
    withConsumer(_.poll(Duration.ofNanos(timeout.toNanos)))
}

private[kafkadistributedprocessing] object KafkaConsumer {
  final class Create[F[_]] {
    def apply[K, V](
      config: Properties,
      keyDeserializer: Deserializer[K],
      valueDeserializer: Deserializer[V]
    )(implicit F: Async[F], contextShift: ContextShift[F]): Resource[F, KafkaConsumer[F, K, V]] = {
      val create = F.suspend {

        val executor = Executors.newSingleThreadExecutor()

        def eval[A](a: => A): F[A] =
          contextShift.evalOn(ExecutionContext.fromExecutor(executor)) {
            F.async[A] { cb =>
              executor.execute(new Runnable {
                override def run(): Unit =
                  cb {
                    try Right(a)
                    catch {
                      case e: Throwable => Left(e)
                    }
                  }
              })
            }
          }

        eval {
          val original = Thread.currentThread.getContextClassLoader
          Thread.currentThread.setContextClassLoader(null)
          val consumer = new org.apache.kafka.clients.consumer.KafkaConsumer[K, V](
            config,
            keyDeserializer,
            valueDeserializer
          )
          Thread.currentThread.setContextClassLoader(original)
          val withConsumer = new ((Consumer[K, V] => *) ~> F) {
            def apply[A](f: Consumer[K, V] => A): F[A] =
              eval(f(consumer))
          }
          new KafkaConsumer[F, K, V](withConsumer)
        }
      }
      Resource.make(create)(_.close)
    }
  }
  def create[F[_]]: Create[F] = new Create[F]
} 
Example 108
Source File: FunctionConfigStorage.scala    From mist   with Apache License 2.0 5 votes vote down vote up
package io.hydrosphere.mist.master.data

import java.io.File
import java.nio.file.Paths
import java.util.concurrent.Executors

import com.typesafe.config.{ConfigValueFactory, ConfigFactory, ConfigValueType, Config}
import io.hydrosphere.mist.master.models.FunctionConfig
import io.hydrosphere.mist.utils.Logger

import scala.collection.JavaConverters._
import scala.concurrent.{Future, ExecutionContext}
import scala.util._

class FunctionConfigStorage(
  fsStorage: FsStorage[FunctionConfig],
  val defaults: Seq[FunctionConfig]
)(implicit ex: ExecutionContext) {

  private val defaultMap = defaults.map(e => e.name -> e).toMap

  def all: Future[Seq[FunctionConfig]] =
    Future { fsStorage.entries } map (seq => {
      val merged = defaultMap ++ seq.map(a => a.name -> a).toMap
      merged.values.toSeq
    })

  def get(name: String): Future[Option[FunctionConfig]] = {
    Future { fsStorage.entry(name) } flatMap {
      case s @ Some(_) => Future.successful(s)
      case None => Future.successful(defaultMap.get(name))
    }
  }

  def delete(name: String): Future[Option[FunctionConfig]] = {
    Future { fsStorage.delete(name) }
  }

  def update(ec: FunctionConfig): Future[FunctionConfig] =
    Future { fsStorage.write(ec.name, ec) }

}

object FunctionConfigStorage extends Logger {

  def create(
    storagePath: String,
    defaultConfigPath: String): FunctionConfigStorage = {

    val defaults = fromDefaultsConfig(defaultConfigPath)
    val fsStorage = new FsStorage(checkDirectory(storagePath), ConfigRepr.EndpointsRepr)
    val ec = ExecutionContext.fromExecutorService(Executors.newFixedThreadPool(3))
    new FunctionConfigStorage(fsStorage, defaults)(ec)
  }

  def fromDefaultsConfig(path: String): Seq[FunctionConfig] = {
    val file = new File(path)
    if (!file.exists()) {
      Seq.empty
    } else {
      logger.warn("Starting using router conf (that feature will be removed - please use http api for uploading functions)")
      val directory = Paths.get(path).getParent
      val config = ConfigFactory.parseFile(file)
        .withValue("location", ConfigValueFactory.fromAnyRef(directory.toString))
        .resolve()
      parseConfig(config)
    }
  }

  def parseConfig(config: Config): Seq[FunctionConfig] = {
    def parse(name: String): Try[FunctionConfig] = Try {
      val part = config.getConfig(name)
      ConfigRepr.EndpointsRepr.fromConfig(name, part)
    }

    config.root().keySet().asScala
      .filter(k => config.getValue(k).valueType() == ConfigValueType.OBJECT)
      .map(name => parse(name))
      .foldLeft(List.empty[FunctionConfig])({
        case (lst, Failure(e)) =>
          logger.warn("Invalid configuration for function", e)
          lst
        case (lst, Success(c)) => lst :+ c
      })
  }
} 
Example 109
Source File: ContextsStorage.scala    From mist   with Apache License 2.0 5 votes vote down vote up
package io.hydrosphere.mist.master.data

import java.util.concurrent.Executors

import io.hydrosphere.mist.master.models.ContextConfig

import scala.concurrent.{ExecutionContext, Future}

trait Contexts {

  def get(name: String): Future[Option[ContextConfig]]
  def getOrDefault(name: String): Future[ContextConfig]
  def all: Future[Seq[ContextConfig]]
  def defaultConfig: ContextConfig

}

class ContextsStorage(
  fsStorage: FsStorage[ContextConfig],
  defaults: ContextDefaults
)(implicit ex: ExecutionContext) extends Contexts {

  import defaults._

  def get(name: String): Future[Option[ContextConfig]] = {
    Future { fsStorage.entry(name) }.flatMap({
      case v @ Some(_) => Future.successful(v)
      case None => Future.successful(defaultsMap.get(name))
    })
  }

  def delete(name: String): Future[Option[ContextConfig]] = {
    Future { fsStorage.delete(name) }
  }

  def getOrDefault(name: String): Future[ContextConfig] =
    get(name).map(_.getOrElse(defaultSettings.default.copy(name = name)))

  def all: Future[Seq[ContextConfig]] = {
    Future { fsStorage.entries } map (stored => {
      val merged = defaultsMap ++ stored.map(a => a.name -> a).toMap
      merged.values.toSeq
    })
  }

  def precreated: Future[Seq[ContextConfig]] = all.map(_.filter(_.precreated))

  def update(config: ContextConfig): Future[ContextConfig] = {
    if (config.name == ContextsStorage.DefaultKey)
      Future.failed(new IllegalArgumentException("Can not create context with name:\"default\""))
    else
      Future { fsStorage.write(config.name, config) }
  }

  def defaultConfig: ContextConfig = defaults.defaultConfig
}

object ContextsStorage {

  val DefaultKey = "default"

  def create(path:String, mistConfigPath: String): ContextsStorage = {
    val ec = ExecutionContext.fromExecutorService(Executors.newFixedThreadPool(3))
    val fsStorage = new FsStorage(checkDirectory(path), ConfigRepr.ContextConfigRepr)
    new ContextsStorage(fsStorage, new ContextDefaults(mistConfigPath))(ec)
  }

} 
Example 110
Source File: FsStorage.scala    From mist   with Apache License 2.0 5 votes vote down vote up
package io.hydrosphere.mist.master.data

import java.io.File
import java.nio.file.{Files, Path}
import java.util.concurrent.Executors

import com.typesafe.config.{ConfigFactory, ConfigRenderOptions}
import io.hydrosphere.mist.master.ContextsSettings
import io.hydrosphere.mist.master.data.FsStorage._
import io.hydrosphere.mist.master.models.{ContextConfig, NamedConfig}
import io.hydrosphere.mist.utils.{Logger, fs}

import scala.concurrent.{ExecutionContext, Future}
import scala.util._

class FsStorage[A <: NamedConfig](
  dir: Path,
  repr: ConfigRepr[A],
  renderOptions: ConfigRenderOptions = DefaultRenderOptions
) extends Logger with RwLock { self =>

  def entries: Seq[A] = {
    def files = dir.toFile.listFiles(fs.mkFilter(_.endsWith(".conf")))

    withReadLock {
      files.map(parseFile).foldLeft(List.empty[A])({
        case (list, Failure(e)) =>
          logger.warn("Invalid configuration", e)
          list
        case (list, Success(context)) => list :+ context
      })
    }
  }

  def entry(name: String): Option[A] = {
    val filePath = dir.resolve(s"$name.conf")

    withReadLock {
      val file = filePath.toFile
      if (file.exists()) parseFile(file).toOption else None
    }
  }

  def delete(name: String): Option[A] = {
    entry(name).map(e => {
      withWriteLock { Files.delete(filePath(name)) }
      e
    })
  }

  private def filePath(name: String): Path = dir.resolve(s"$name.conf")

  private def parseFile(f: File): Try[A] = {
    val name = f.getName.replace(".conf", "")
    Try(ConfigFactory.parseFile(f)).map(c => repr.fromConfig(name, c))
  }

  def write(name: String, entry: A): A = {
    val config = repr.toConfig(entry)

    val data = config.root().render(renderOptions)

    withWriteLock {
      Files.write(filePath(name), data.getBytes)
      entry
    }
  }

}

object FsStorage {

  val DefaultRenderOptions =
    ConfigRenderOptions.defaults()
      .setComments(false)
      .setOriginComments(false)
      .setJson(false)
      .setFormatted(true)

  def create[A <: NamedConfig](path: String, repr: ConfigRepr[A]): FsStorage[A] =
    new FsStorage[A](checkDirectory(path), repr)

} 
Example 111
Source File: HikariDataSourceTransactor.scala    From mist   with Apache License 2.0 5 votes vote down vote up
package io.hydrosphere.mist.master.store

import java.util.concurrent.{ExecutorService, Executors, Future, TimeUnit}

import cats.arrow.FunctionK
import cats.effect._
import com.zaxxer.hikari.{HikariConfig, HikariDataSource}
import doobie.util.transactor.Transactor
import doobie.util.transactor.Transactor.Aux
import io.hydrosphere.mist.utils.Logger

import scala.concurrent.ExecutionContext



  def shutdown(): Unit = {
    if (!ds.isClosed) {
      logger.info("Closing Hikari data source")
      ds.close()
    } else {
      logger.warn("Hikari datasource had not been properly initialized before closing")
    }

    shutdownExecutorService(awaitShutdown, ce, "connections EC")
    shutdownExecutorService(awaitShutdown, te, "tx EC")
  }
} 
Example 112
Source File: CheckStepBench.scala    From cornichon   with Apache License 2.0 5 votes vote down vote up
package step

import java.util.concurrent.{ ExecutorService, Executors }

import com.github.agourlay.cornichon.core._
import monix.execution.Scheduler
import org.openjdk.jmh.annotations._
import com.github.agourlay.cornichon.steps.check.checkModel._
import com.github.agourlay.cornichon.steps.cats.EffectStep

import scala.concurrent.Await
import scala.concurrent.duration.Duration
import step.JsonStepBench._

@State(Scope.Benchmark)
@BenchmarkMode(Array(Mode.Throughput))
@Warmup(iterations = 10)
@Measurement(iterations = 10)
@Fork(value = 1, jvmArgsAppend = Array(
  "-XX:+FlightRecorder",
  "-XX:StartFlightRecording=filename=./CheckStepBench-profiling-data.jfr,name=profile,settings=profile",
  "-Xmx1G"))
class CheckStepBench {

  //sbt:benchmarks> jmh:run .*CheckStep.* -prof gc -foe true -gc true -rf csv

  @Param(Array("10", "20", "50", "100", "200"))
  var transitionNumber: String = ""

  var es: ExecutorService = _
  var scheduler: Scheduler = _

  @Setup(Level.Trial)
  final def beforeAll(): Unit = {
    es = Executors.newFixedThreadPool(1)
    scheduler = Scheduler(es)
  }

  @TearDown(Level.Trial)
  final def afterAll(): Unit = {
    es.shutdown()
  }
  

  @Benchmark
  def runModel() = {
    val checkStep = CheckModelStep(maxNumberOfRuns = 1, maxNumberOfTransitions = transitionNumber.toInt, CheckStepBench.modelRunner)
    val s = Scenario("scenario with checkStep", checkStep :: Nil)
    val f = ScenarioRunner.runScenario(session)(s)
    val res = Await.result(f.runToFuture(scheduler), Duration.Inf)
    assert(res.isSuccess)
  }

}

object CheckStepBench {
  def integerGen(rc: RandomContext): ValueGenerator[Int] = ValueGenerator(
    name = "integer",
    gen = () => rc.nextInt(10000))

  def dummyProperty1(name: String): PropertyN[Int, NoValue, NoValue, NoValue, NoValue, NoValue] =
    Property1(
      description = name,
      invariant = g => EffectStep.fromSyncE("add generated", _.session.addValue("generated", g().toString)))

  val starting = dummyProperty1("starting action")
  val otherAction = dummyProperty1("other action")
  val otherActionTwo = dummyProperty1("other action two")
  val transitions = Map(
    starting -> ((100, otherAction) :: Nil),
    otherAction -> ((100, otherActionTwo) :: Nil),
    otherActionTwo -> ((100, otherAction) :: Nil))
  val model = Model("model with empty transition for starting", starting, transitions)
  val modelRunner = ModelRunner.make(integerGen)(model)

} 
Example 113
Source File: RunScenarioBench.scala    From cornichon   with Apache License 2.0 5 votes vote down vote up
package scenario

import java.util.concurrent.{ ExecutorService, Executors }

import cats.instances.int._
import com.github.agourlay.cornichon.core.{ ScenarioRunner, Scenario, Session }
import com.github.agourlay.cornichon.steps.cats.EffectStep
import com.github.agourlay.cornichon.steps.regular.assertStep.{ AssertStep, Assertion, GenericEqualityAssertion }
import org.openjdk.jmh.annotations._
import scenario.RunScenarioBench._
import monix.execution.Scheduler

import scala.concurrent.Await
import scala.concurrent.duration._

@State(Scope.Benchmark)
@BenchmarkMode(Array(Mode.Throughput))
@Warmup(iterations = 10)
@Measurement(iterations = 10)
@Fork(value = 1, jvmArgsAppend = Array(
  "-XX:+FlightRecorder",
  "-XX:StartFlightRecording=filename=./RunScenarioBench-profiling-data.jfr,name=profile,settings=profile",
  "-Xmx1G"))
class RunScenarioBench {

  //sbt:benchmarks> jmh:run .*RunScenario.* -prof gc -foe true -gc true -rf csv

  @Param(Array("10", "20", "50", "100", "200"))
  var stepsNumber: String = ""
  var es: ExecutorService = _
  var scheduler: Scheduler = _

  @Setup(Level.Trial)
  final def beforeAll(): Unit = {
    es = Executors.newFixedThreadPool(1)
    scheduler = Scheduler(es)
  }

  @TearDown(Level.Trial)
  final def afterAll(): Unit = {
    es.shutdown()
  }

  

  @Benchmark
  def lotsOfSteps() = {
    val half = stepsNumber.toInt / 2
    val assertSteps = List.fill(half)(assertStep)
    val effectSteps = List.fill(half)(effectStep)
    val scenario = Scenario("test scenario", setupSession +: (assertSteps ++ effectSteps))
    val f = ScenarioRunner.runScenario(Session.newEmpty)(scenario)
    val res = Await.result(f.runToFuture(scheduler), Duration.Inf)
    assert(res.isSuccess)
  }
}

object RunScenarioBench {
  val setupSession = EffectStep.fromSyncE("setup session", _.session.addValues("v1" -> "2", "v2" -> "1"))
  val assertStep = AssertStep(
    "addition step",
    sc => Assertion.either {
      for {
        two <- sc.session.get("v1").map(_.toInt)
        one <- sc.session.get("v2").map(_.toInt)
      } yield GenericEqualityAssertion(two + one, 3)
    })
  val effectStep = EffectStep.fromSync("identity", _.session)
} 
Example 114
Source File: RequestEffectBench.scala    From cornichon   with Apache License 2.0 5 votes vote down vote up
package httpService

import java.util.concurrent.{ ExecutorService, Executors }

import cats.instances.string._
import com.github.agourlay.cornichon.core.{ Config, ScenarioContext }
import com.github.agourlay.cornichon.http.{ HttpMethods, HttpRequest, HttpService }
import org.openjdk.jmh.annotations._
import RequestEffectBench._
import com.github.agourlay.cornichon.http.client.NoOpHttpClient
import monix.execution.Scheduler

import scala.concurrent.Await
import scala.concurrent.duration._

@State(Scope.Benchmark)
@BenchmarkMode(Array(Mode.Throughput))
@Warmup(iterations = 10)
@Measurement(iterations = 10)
@Fork(value = 1, jvmArgsAppend = Array(
  "-XX:+FlightRecorder",
  "-XX:StartFlightRecording=filename=./RequestEffectBench-profiling-data.jfr,name=profile,settings=profile",
  "-Xmx1G"))
class RequestEffectBench {

  //sbt:benchmarks> jmh:run .*RequestEffect.*

  var es: ExecutorService = _
  val client = new NoOpHttpClient
  var httpService: HttpService = _

  @Setup(Level.Trial)
  final def beforeAll(): Unit = {
    es = Executors.newFixedThreadPool(1)
    val scheduler = Scheduler(es)
    httpService = new HttpService("", 2000.millis, client, Config())(scheduler)
  }

  @TearDown(Level.Trial)
  final def afterAll(): Unit = {
    es.shutdown()
  }
  

  @Benchmark
  def singleRequest() = {
    val f = httpService.requestEffect(request)
    val res = Await.result(f(scenarioContext), Duration.Inf)
    assert(res.isRight)
  }
}

object RequestEffectBench {
  val scenarioContext = ScenarioContext.empty
  val request = HttpRequest[String](
    method = HttpMethods.GET,
    url = "https://myUrl/my/segment",
    body = Some(""" { "k1":"v1", "k2":"v2","k3":"v3","k4":"v4" } """),
    params = ("q1", "v1") :: ("q2", "v2") :: ("q3", "v3") :: Nil,
    headers = ("h1", "v1") :: ("h2", "v2") :: ("h3", "v3") :: Nil)
} 
Example 115
Source File: SparkSQLSessionManager.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.hive.thriftserver

import java.util.concurrent.Executors

import org.apache.commons.logging.Log
import org.apache.hadoop.hive.conf.HiveConf
import org.apache.hadoop.hive.conf.HiveConf.ConfVars
import org.apache.hive.service.cli.SessionHandle
import org.apache.hive.service.cli.session.SessionManager
import org.apache.hive.service.cli.thrift.TProtocolVersion
import org.apache.hive.service.server.HiveServer2

import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.hive.HiveUtils
import org.apache.spark.sql.hive.thriftserver.ReflectionUtils._
import org.apache.spark.sql.hive.thriftserver.server.SparkSQLOperationManager


private[hive] class SparkSQLSessionManager(hiveServer: HiveServer2, sqlContext: SQLContext)
  extends SessionManager(hiveServer)
  with ReflectedCompositeService {

  private lazy val sparkSqlOperationManager = new SparkSQLOperationManager()

  override def init(hiveConf: HiveConf) {
    setSuperField(this, "operationManager", sparkSqlOperationManager)
    super.init(hiveConf)
  }

  override def openSession(
      protocol: TProtocolVersion,
      username: String,
      passwd: String,
      ipAddress: String,
      sessionConf: java.util.Map[String, String],
      withImpersonation: Boolean,
      delegationToken: String): SessionHandle = {
    val sessionHandle =
      super.openSession(protocol, username, passwd, ipAddress, sessionConf, withImpersonation,
          delegationToken)
    val session = super.getSession(sessionHandle)
    HiveThriftServer2.listener.onSessionCreated(
      session.getIpAddress, sessionHandle.getSessionId.toString, session.getUsername)
    val ctx = if (sqlContext.conf.hiveThriftServerSingleSession) {
      sqlContext
    } else {
      sqlContext.newSession()
    }
    ctx.setConf(HiveUtils.FAKE_HIVE_VERSION.key, HiveUtils.builtinHiveVersion)
    if (sessionConf != null && sessionConf.containsKey("use:database")) {
      ctx.sql(s"use ${sessionConf.get("use:database")}")
    }
    sparkSqlOperationManager.sessionToContexts.put(sessionHandle, ctx)
    sessionHandle
  }

  override def closeSession(sessionHandle: SessionHandle) {
    HiveThriftServer2.listener.onSessionClosed(sessionHandle.getSessionId.toString)
    super.closeSession(sessionHandle)
    sparkSqlOperationManager.sessionToActivePool.remove(sessionHandle)
    sparkSqlOperationManager.sessionToContexts.remove(sessionHandle)
  }
} 
Example 116
Source File: Module.scala    From elastiknn   with Apache License 2.0 5 votes vote down vote up
import java.util.concurrent.{ExecutorService, Executors, ThreadFactory}

import com.google.common.util.concurrent.ThreadFactoryBuilder
import com.google.inject.{AbstractModule, TypeLiteral}
import com.klibisz.elastiknn.client.{ElastiknnClient, ElastiknnFutureClient}
import javax.inject.Provider
import play.api.{Configuration, Environment}

import scala.concurrent.ExecutionContext

class Module(environment: Environment, configuration: Configuration) extends AbstractModule {

  val eknnProvider = new Provider[ElastiknnFutureClient] {
    override def get(): ElastiknnFutureClient = {
      val tfac: ThreadFactory = new ThreadFactoryBuilder().setDaemon(true).setNameFormat("elastiknn-%d").build()
      val exec: ExecutorService = Executors.newFixedThreadPool(Runtime.getRuntime.availableProcessors(), tfac)
      implicit val ec: ExecutionContext = ExecutionContext.fromExecutor(exec)
      val host = configuration.underlying.getString("elastiknn.elasticsearch.host")
      val port = configuration.underlying.getInt("elastiknn.elasticsearch.port")
      ElastiknnClient.futureClient(host, port)
    }
  }

  override def configure(): Unit = {
    // Weird that you have to use this constructor, but it works.
    bind(new TypeLiteral[ElastiknnFutureClient]() {}).toProvider(eknnProvider)
  }
} 
Example 117
Source File: TcpServiceImpl.scala    From c4proto   with Apache License 2.0 5 votes vote down vote up
package ee.cone.c4gate_server

import java.net.InetSocketAddress
import java.nio.ByteBuffer
import java.nio.channels.{AsynchronousServerSocketChannel, AsynchronousSocketChannel, CompletionHandler}
import java.util.UUID
import java.util.concurrent.{Executors, ScheduledExecutorService, ScheduledFuture, TimeUnit}

import com.typesafe.scalalogging.LazyLogging
import ee.cone.c4actor._

import scala.collection.concurrent.TrieMap
import scala.collection.immutable.Queue

@SuppressWarnings(Array("org.wartremover.warts.Var")) class ChannelHandler(
  channel: AsynchronousSocketChannel, unregister: ()=>Unit, fail: Throwable=>Unit,
  executor: ScheduledExecutorService, timeout: Long, val compressor: Option[Compressor]
) extends CompletionHandler[Integer,Unit] with SenderToAgent {
  private var queue: Queue[Array[Byte]] = Queue.empty
  private var activeElement: Option[ByteBuffer] = None
  private var purge: Option[ScheduledFuture[_]] = None
  private def startWrite(): Unit =
    queue.dequeueOption.foreach{ case (element,nextQueue) =>
      queue = nextQueue
      activeElement = Option(ByteBuffer.wrap(element))
      channel.write[Unit](activeElement.get, (), this)
    }
  def add(data: Array[Byte]): Unit = synchronized {
    queue = queue.enqueue(data)
    if(activeElement.isEmpty) startWrite()
  }
  def completed(result: Integer, att: Unit): Unit = Trace {
    synchronized {
      if(activeElement.get.hasRemaining) channel.write[Unit](activeElement.get, (), this)
      else {
        purge.foreach(_.cancel(false))
        purge = Option(executor.schedule(new Runnable {
          def run(): Unit = close()
        },timeout,TimeUnit.SECONDS))
        activeElement = None
        startWrite()
      }
    }
  }
  def failed(exc: Throwable, att: Unit): Unit = {
    fail(exc)
    close()
  }
  def close(): Unit = {
    unregister()
    channel.close()  //does close block?
  }
}

class TcpServerImpl(
  port: Int, tcpHandler: TcpHandler, timeout: Long, compressorFactory: StreamCompressorFactory,
  channels: TrieMap[String,ChannelHandler] = TrieMap()
) extends TcpServer with Executable with LazyLogging {
  def getSender(connectionKey: String): Option[SenderToAgent] =
    channels.get(connectionKey)
  def run(): Unit = concurrent.blocking{
    tcpHandler.beforeServerStart()
    val address = new InetSocketAddress(port)
    val listener = AsynchronousServerSocketChannel.open().bind(address)
    val executor = Executors.newScheduledThreadPool(1)
    listener.accept[Unit]((), new CompletionHandler[AsynchronousSocketChannel,Unit] {
      def completed(ch: AsynchronousSocketChannel, att: Unit): Unit = Trace {
        listener.accept[Unit]((), this)
        val key = UUID.randomUUID.toString
        val sender = new ChannelHandler(ch, {() =>
          assert(channels.remove(key).nonEmpty)
          tcpHandler.afterDisconnect(key)
        }, { error =>
          logger.error("channel",error)
        }, executor, timeout, compressorFactory.create())
        assert(channels.put(key,sender).isEmpty)
        tcpHandler.afterConnect(key, sender)
      }
      def failed(exc: Throwable, att: Unit): Unit = logger.error("tcp",exc) //! may be set status-finished
    })
  }
} 
Example 118
Source File: DynamoService.scala    From iep-apps   with Apache License 2.0 5 votes vote down vote up
package com.netflix.iep.archaius

import java.util.concurrent.Executors
import java.util.concurrent.atomic.AtomicLong
import javax.inject.Inject
import javax.inject.Singleton

import com.amazonaws.services.dynamodbv2.AmazonDynamoDB
import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClient
import com.netflix.iep.service.AbstractService
import com.typesafe.config.Config

import scala.concurrent.ExecutionContext
import scala.concurrent.Future


@Singleton
class DynamoService @Inject()(client: AmazonDynamoDB, config: Config) extends AbstractService {

  private val nextId = new AtomicLong()
  private val pool = Executors.newFixedThreadPool(
    Runtime.getRuntime.availableProcessors(),
    (r: Runnable) => {
      new Thread(r, s"dynamo-db-${nextId.getAndIncrement()}")
    }
  )
  private val ec = ExecutionContext.fromExecutorService(pool)

  override def startImpl(): Unit = ()

  override def stopImpl(): Unit = {
    client match {
      case c: AmazonDynamoDBClient => c.shutdown()
      case _                       =>
    }
  }

  def execute[T](task: AmazonDynamoDB => T): Future[T] = Future(task(client))(ec)
} 
Example 119
Source File: ThreadUtil.scala    From almond   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package almond.util

import java.lang.Thread.UncaughtExceptionHandler
import java.util.concurrent.{Executors, ThreadFactory}
import java.util.concurrent.atomic.AtomicInteger

import scala.concurrent.{ExecutionContext, ExecutionContextExecutorService}
import scala.util.control.NonFatal

object ThreadUtil {

  // From https://github.com/functional-streams-for-scala/fs2/blob/d47f903bc6bbcdd5d8bc6d573bc7cfd956f0cbb6/core/jvm/src/main/scala/fs2/Strategy.scala#L19-L41
  
  def daemonThreadFactory(threadName: String, exitJvmOnFatalError: Boolean = true): ThreadFactory = new ThreadFactory {
    val defaultThreadFactory = Executors.defaultThreadFactory()
    val idx = new AtomicInteger(0)
    def newThread(r: Runnable) = {
      val t = defaultThreadFactory.newThread(r)
      t.setDaemon(true)
      t.setName(s"$threadName-${idx.incrementAndGet()}")
      t.setUncaughtExceptionHandler(new UncaughtExceptionHandler {
        def uncaughtException(t: Thread, e: Throwable): Unit = {
          System.err.println(s"------------ UNHANDLED EXCEPTION ---------- (${t.getName})")
          e.printStackTrace(System.err)
          if (exitJvmOnFatalError) {
            e match {
              case NonFatal(_) => ()
              case fatal => System.exit(-1)
            }
          }
        }
      })
      t
    }
  }

  def sequentialExecutionContext(): ExecutionContext =
    new SequentialExecutionContext

  def singleThreadedExecutionContext(threadName: String): ExecutionContext =
    ExecutionContext.fromExecutorService(
      Executors.newSingleThreadExecutor(daemonThreadFactory(threadName))
    )

  def attemptShutdownExecutionContext(ec: ExecutionContext): Boolean =
    ec match {
      case _: SequentialExecutionContext =>
        true
      case es: ExecutionContextExecutorService =>
        es.shutdown()
        true
      case _ =>
        false
    }

} 
Example 120
Source File: ZeromqThreads.scala    From almond   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package almond.channels.zeromq

import java.util.concurrent.Executors

import almond.channels.Channel
import almond.util.ThreadUtil.daemonThreadFactory
import org.zeromq.ZMQ

import scala.concurrent.ExecutionContext

final case class ZeromqThreads(
  ecs: Channel => ExecutionContext,
  selectorOpenCloseEc: ExecutionContext,
  pollingEc: ExecutionContext,
  context: ZMQ.Context
)

object ZeromqThreads {

  def create(name: String, zmqIOThreads: Int = 4): ZeromqThreads = {

    val ctx = ZMQ.context(zmqIOThreads)
    val zeromqOpenCloseEc = ExecutionContext.fromExecutorService(Executors.newFixedThreadPool(2, daemonThreadFactory(s"$name-zeromq-open-close")))
    val zeromqPollingEc = ExecutionContext.fromExecutorService(Executors.newSingleThreadExecutor(daemonThreadFactory(s"$name-zeromq-polling")))
    val zeromqChannelEcs = Channel.channels.map(c => c -> ExecutionContext.fromExecutorService(Executors.newSingleThreadExecutor(daemonThreadFactory(s"$name-zeromq-$c")))).toMap

    ZeromqThreads(
      zeromqChannelEcs,
      zeromqOpenCloseEc,
      zeromqPollingEc,
      ctx
    )
  }

} 
Example 121
Source File: CancellableFuturePool.scala    From almond   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package almond.interpreter.util

import java.lang.Thread.UncaughtExceptionHandler
import java.util.concurrent.{Executors, ThreadFactory}

import almond.logger.LoggerContext

import scala.concurrent.{Future, Promise}
import scala.util.control.NonFatal
import scala.util.{Failure, Success}

final class CancellableFuturePool(
  logCtx: LoggerContext
) {

  private val log = logCtx(getClass)

  private val pool = Executors.newCachedThreadPool(
    // from scalaz.concurrent.Strategy.DefaultDaemonThreadFactory
    new ThreadFactory {
      val defaultThreadFactory = Executors.defaultThreadFactory()
      def newThread(r: Runnable) = {
        val t = defaultThreadFactory.newThread(r)
        t.setDaemon(true)
        t.setUncaughtExceptionHandler(
          new UncaughtExceptionHandler {
            def uncaughtException(t: Thread, e: Throwable) =
              log.warn(s"Uncaught exception in thread $t", e)
          }
        )
        t
      }
    }
  )

  def future[T](result: => T): Future[T] = {

    val p = Promise[T]()

    pool.submit(
      new Runnable {
        def run() =
          p.complete {
            try Success(result)
            catch {
              case NonFatal(e) =>
                Failure(e)
            }
          }
      }
    )

    p.future
  }

  def cancellableFuture[T](result: T): CancellableFuture[T] = {

    @volatile var completionThreadOpt = Option.empty[Thread]

    def result0(): T = {
      completionThreadOpt = Some(Thread.currentThread())
      try result
      finally {
        completionThreadOpt = None
      }
    }

    def cancel(): Unit =
      for (t <- completionThreadOpt)
        t.stop()

    CancellableFuture(future(result0()), () => cancel())
  }

  def shutdown(): Unit =
    pool.shutdown()

} 
Example 122
Source File: ScalazMain.scala    From advanced-scala-code   with Apache License 2.0 5 votes vote down vote up
import java.nio.charset.Charset
import java.util.concurrent.Executors
import org.asynchttpclient.DefaultAsyncHttpClient
import scala.concurrent.Future
import scalaz.{-\/, \/, \/-}
import scalaz.concurrent.Task


object ScalazMain {

  def main(args: Array[String]): Unit = {

    def performAction(num: Int): Unit = println(s"Task #$num is executing in ${Thread.currentThread().getName}")

    import scala.concurrent.ExecutionContext.Implicits.global
    val result1F = Future {
      performAction(0)
    }

    val result2F = Future.successful {
      performAction(1)
    }

    // Executes immediately in the main thread
    val result2T = Task.now {
      performAction(2)
    }

    // Schedules an execution in a default worker thread
    // = Executors.newFixedThreadPool(Math.max(4, Runtime.getRuntime.availableProcessors), DefaultDaemonThreadFactory)
    val result3T = Task {
      performAction(3)
    }

    // Lifts a code block to a Task without scheduling an execution
    val result4T = Task.delay {
      performAction(4)
    }

    result3T.unsafePerformAsync(_ => ())

    implicit val executorService = Executors.newSingleThreadExecutor()
    val result5T = Task {
      performAction(5)
    }
    result3T.unsafePerformSync

    val asyncHttpClient = new DefaultAsyncHttpClient()
    arm.ArmUtils.using(asyncHttpClient) {
      val result6T = Task.async[String](handler => {
        asyncHttpClient.prepareGet("https://httpbin.org/get").execute().
          toCompletableFuture.whenComplete { (response, exc) => {
          if (exc == null) {
            handler(\/.right(response.getResponseBody(Charset.forName("UTF-8"))))
          } else handler(-\/(exc))
        }}
      })
      val responseString = result6T.unsafePerformSync
      println(responseString)
    }
  }
} 
Example 123
Source File: CatsEffect.scala    From advanced-scala-code   with Apache License 2.0 5 votes vote down vote up
import java.util.concurrent.Executors

import cats.effect.IO

import scala.concurrent.ExecutionContext

object PrintThread {
  def execute[A](block: => A): A = {
    print(s"[${Thread.currentThread().getName}] ")
    block
  }
}

object CatsEffect {
  def main(args: Array[String]): Unit = {
    val blockingService = Executors.newCachedThreadPool()
    val blockingCtx   = ExecutionContext.fromExecutor(blockingService)
    val global = ExecutionContext.global
    implicit val contextShift = IO.contextShift(global)

    val ioa: IO[Unit] = for {
      _ <- contextShift.shift
      _ <- IO { PrintThread.execute(println("Enter your name: ")) }
      name <- contextShift.evalOn(blockingCtx)(
        IO{ PrintThread.execute(scala.io.StdIn.readLine()) }
      )
      _ <- IO { PrintThread.execute(println(s"Hello $name!")) }
      _ <- IO { PrintThread.execute(blockingService.shutdown()) }
    } yield ()

    ioa.unsafeRunSync()
  }
} 
Example 124
Source File: ThreadPoolSchedulerProvider.scala    From scala-game-library   with MIT License 5 votes vote down vote up
package sgl.util

import java.util.concurrent.Executors
import java.io.{StringWriter, PrintWriter}
import scala.collection.mutable.Queue

trait ThreadPoolSchedulerProvider extends SchedulerProvider {
  this: LoggingProvider =>

  private implicit val Tag = Logger.Tag("threadpool-scheduler")

  class ThreadPoolScheduler extends Scheduler {
    private val pool = Executors.newFixedThreadPool(4)

    private val tasks: Queue[ChunkedTask] = new Queue
    private val taskQueueLock = new Object

    private var r1: ChunksRunner = null
    private var r2: ChunksRunner = null
    private var r3: ChunksRunner = null
    private var r4: ChunksRunner = null

    override def schedule(task: ChunkedTask): Unit = {
      taskQueueLock.synchronized {
        tasks.enqueue(task)
      }
    }

    
    def shutdown(): Unit = {
      pool.shutdown()

      // Need to check for null because we could have skipped resume.
      if(r1 != null)
        r1.shouldStop = true
      if(r2 != null)
        r2.shouldStop = true
      if(r3 != null)
        r3.shouldStop = true
      if(r4 != null)
        r4.shouldStop = true
    }

    // Simple Runnable class that picks up the first available ChunkedTask and
    // run one chunk of it.
    // Note that if there is only one ChunkedTask in the queue, there will only
    // be one busy Thread at a time as ChunkedTask are assumed to be sequentials.
    // In order to optimize the use of the thread pool, one should try to split
    // parallel work into several independent ChunkedTask.
    class ChunksRunner extends Runnable {
      var shouldStop = false
      override def run(): Unit = {
        while(!shouldStop) {
          val task = taskQueueLock.synchronized {
            if(tasks.isEmpty) {
              None
            } else {
              Some(tasks.dequeue())
            }
          }
          task match {
            case None => Thread.sleep(50)
            case Some(task) => {
              logger.debug("Executing some ChunkedTask from the task queue.")
              try {
                task.doRun(5l)
                if(task.status != ChunkedTask.Completed)
                  taskQueueLock.synchronized { tasks.enqueue(task) }
              } catch {
                case (e: Throwable) => {
                  logger.error(s"Unexpected error while executing task ${task.name}: ${e.getMessage}")
                  val sw = new StringWriter()
                  val pw = new PrintWriter(sw, true)
                  e.printStackTrace(pw)
                  logger.error(sw.toString)
                }
              }
            }
          }
        }
      }
    }
  }
  override val Scheduler = new ThreadPoolScheduler

} 
Example 125
Source File: CatsIoTestRunner.scala    From laserdisc   with MIT License 5 votes vote down vote up
package laserdisc
package fs2

import java.util.concurrent.{Executors, TimeUnit}

import cats.effect.{ContextShift, IO, Timer}
import cats.syntax.flatMap._
import laserdisc.auto._
import log.effect.fs2.SyncLogWriter.consoleLogUpToLevel
import log.effect.{LogLevels, LogWriter}

import scala.concurrent.ExecutionContext
import scala.concurrent.ExecutionContext.fromExecutor

object CatsIoTestRunner extends TestCases {

  private[this] val ec: ExecutionContext = fromExecutor(Executors.newFixedThreadPool(8))

  private[this] implicit val timer: Timer[IO]               = IO.timer(ec)
  private[this] implicit val contextShift: ContextShift[IO] = IO.contextShift(ec)
  private[this] implicit val logWriter: LogWriter[IO]       = consoleLogUpToLevel(LogLevels.Error)

  def main(args: Array[String]): Unit = {

    val task = timer.clock.monotonic(TimeUnit.MINUTES) >>= { start: Long =>
      RedisClient.to("localhost", 6379).use { cl =>
        def loop(count: Long): IO[Long] =
          case1(cl) >> timer.clock.monotonic(TimeUnit.MINUTES) >>= { current =>
            if (current - start >= 2) IO.pure(count)
            else loop(count + 1)
          }

        loop(0)
      }
    }

    println(s"Avg send/s: ${task.unsafeRunSync() * 24.0 / 2 / 60}")
    sys.exit()
  }
} 
Example 126
Source File: Threads.scala    From shapenet-viewer   with MIT License 5 votes vote down vote up
package edu.stanford.graphics.shapenet.util

import java.util.concurrent.{Future, Executors}

object Threads extends Loggable {
  lazy val threadPool = Executors.newCachedThreadPool()
  def execute(runnable: Runnable, logger: org.slf4j.Logger = this.logger, desc: String = ""): Future[_] = {
    val wrappedRunnable = new RunnableWithLogging(runnable, logger, desc)
    threadPool.submit(wrappedRunnable)
  }
}


class RunnableWithLogging(val runnable: Runnable, val logger: org.slf4j.Logger, val desc: String) extends Runnable {
  override def run(): Unit = {
    try {
      runnable.run()
    } catch {
      case ex: Throwable => {
        logger.error("Error running " + desc, ex)
      }
    }
  }
} 
Example 127
Source File: ConcurrentUtil.scala    From sona   with Apache License 2.0 5 votes vote down vote up
package com.tencent.angel.sona.tree.util

import java.util.concurrent.{Callable, ExecutorService, Executors, Future}

object ConcurrentUtil {
  private[tree] var numThread: Int = 1
  private[tree] var threadPool: ExecutorService = _
  private[tree] val DEFAULT_BATCH_SIZE = 1000000

  private[tree] def reset(parallelism: Int): Unit = {
    ConcurrentUtil.getClass.synchronized {
      this.numThread = parallelism
      this.threadPool = Executors.newFixedThreadPool(parallelism)
    }
  }

  private[tree] def rangeParallel[A](f: (Int, Int) => A, start: Int, end: Int,
                                    batchSize: Int = DEFAULT_BATCH_SIZE): Array[Future[A]] = {
    val futures = Array.ofDim[Future[A]](MathUtil.idivCeil(end - start, batchSize))
    var cur = start
    var threadId = 0
    while (cur < end) {
      val i = cur
      val j = (cur + batchSize) min end
      futures(threadId) = threadPool.submit(new Callable[A] {
        override def call(): A = f(i, j)
      })
      cur = j
      threadId += 1
    }
    futures
  }

  private[tree] def shutdown(): Unit = ConcurrentUtil.getClass.synchronized {
    if (threadPool != null)
      threadPool.shutdown()
  }

} 
Example 128
Source File: TwitterStatusReader.scala    From kafka-tweet-producer   with Apache License 2.0 5 votes vote down vote up
package com.eneco.trading.kafka.connect.twitter

import java.util
import java.util.concurrent.{TimeUnit, LinkedBlockingQueue, Executors}
import com.eneco.trading.kafka.connect.twitter.domain.TwitterStatus
import com.twitter.hbc.httpclient.BasicClient
import com.twitter.hbc.twitter4j.Twitter4jStatusClient
import org.apache.kafka.connect.data.Schema
import org.apache.kafka.connect.source.SourceRecord
import twitter4j._
import scala.collection.JavaConverters._
import Extensions._

class StatusEnqueuer(queue: LinkedBlockingQueue[Status]) extends StatusListener with Logging {
  override def onStallWarning(stallWarning: StallWarning) = log.warn("onStallWarning")
  override def onDeletionNotice(statusDeletionNotice: StatusDeletionNotice) = log.info("onDeletionNotice")

  override def onScrubGeo(l: Long, l1: Long) = {
    log.debug(s"onScrubGeo $l $l1")
  }

  override def onStatus(status: Status) = {
    log.debug("onStatus")
    queue.put(status)
  }

  override def onTrackLimitationNotice(i: Int) = log.info(s"onTrackLimitationNotice $i")
  override def onException(e: Exception)= log.warn("onException " + e.toString)
}

trait StatusToSourceRecord {
  def convert(status: Status, topic: String): SourceRecord
}

object StatusToStringKeyValue extends StatusToSourceRecord {
  def convert (status: Status, topic: String): SourceRecord = {
    new SourceRecord(
      Map("tweetSource" -> status.getSource).asJava, //source partitions?
      Map("tweetId" -> status.getId).asJava, //source offsets?
      topic,
      null,
      Schema.STRING_SCHEMA,
      status.getUser.getScreenName,
      Schema.STRING_SCHEMA,
      status.getText)
  }
}

object StatusToTwitterStatusStructure extends StatusToSourceRecord {
  def convert(status: Status, topic: String): SourceRecord = {
    //val ts = TwitterStatus.struct(TwitterStatus(status))
    new SourceRecord(
      Map("tweetSource" -> status.getSource).asJava, //source partitions?
      Map("tweetId" -> status.getId).asJava, //source offsets?
      topic,
      TwitterStatus.schema,
      TwitterStatus.struct(status))
  }
}


  def stop() = {
    log.info("Stop Twitter client")
    client.stop()
  }


} 
Example 129
Source File: LinebackerSpec.scala    From linebacker   with MIT License 5 votes vote down vote up
package io.chrisdavenport.linebacker

import org.specs2._
import cats.effect._
import cats.implicits._
import java.lang.Thread
import scala.concurrent.ExecutionContext
import java.util.concurrent.atomic.AtomicLong
import java.util.concurrent.{Executors, ThreadFactory}
import _root_.io.chrisdavenport.linebacker.contexts.{Executors => E}
import scala.concurrent.ExecutionContext.global

class LinebackerSpec extends Spec {
  override def is = s2"""
  Threads Run On Linebacker $runsOnLinebacker
  Threads Afterwards Run on Provided EC $runsOffLinebackerAfterwards
  """

  def runsOnLinebacker = {
    val testRun = E
      .unbound[IO]
      .map(Linebacker.fromExecutorService[IO])
      .use { implicit linebacker =>
        implicit val cs = IO.contextShift(global)
        Linebacker[IO].blockContextShift(IO(Thread.currentThread().getName))
      }

    testRun.unsafeRunSync must_=== "linebacker-thread-0"
  }

  def runsOffLinebackerAfterwards = {
    val executor = Executors.newCachedThreadPool(new ThreadFactory {
      private val counter = new AtomicLong(0L)

      def newThread(r: Runnable) = {
        val th = new Thread(r)
        th.setName("test-ec-" + counter.getAndIncrement.toString)
        th.setDaemon(true)
        th
      }
    })
    implicit val ec = ExecutionContext
      .fromExecutorService(executor)

    implicit val linebacker = Linebacker.fromExecutionContext[IO](global) // Block Onto Global
    implicit val cs = IO.contextShift(ec) // Should return to custom

    val testRun = Linebacker[IO].blockContextShift(IO.unit) *>
      IO(Thread.currentThread().getName) <*
      IO(executor.shutdownNow)

    testRun.unsafeRunSync must_=== "test-ec-0"
  }
} 
Example 130
Source File: InstrumentationService.scala    From iep-apps   with Apache License 2.0 5 votes vote down vote up
package com.netflix.iep.clienttest

import java.util.UUID
import java.util.concurrent.Executors
import java.util.concurrent.TimeUnit
import javax.inject.Inject
import javax.inject.Singleton

import com.netflix.iep.service.AbstractService
import com.typesafe.config.Config
import org.slf4j.LoggerFactory

@Singleton
class InstrumentationService @Inject()(config: Config, metrics: MetricLibrary)
    extends AbstractService {

  private val logger = LoggerFactory.getLogger("")

  private val tagsPerMetric = config.getInt("netflix.iep.clienttest.tags-per-metric")

  private val numCounters = config.getInt("netflix.iep.clienttest.num-counters")
  private val numTimers = config.getInt("netflix.iep.clienttest.num-timers")
  private val numDistSummaries = config.getInt("netflix.iep.clienttest.num-dist-summaries")
  private val numGauges = config.getInt("netflix.iep.clienttest.num-gauges")
  private val numPolledGauges = config.getInt("netflix.iep.clienttest.num-polled-gauges")
  private val numSlowPolledGauges = config.getInt("netflix.iep.clienttest.num-slow-polled-gauges")

  // To minimize other noise in terms of memory use and computation we use the same base tag
  // set for all metrics.
  private val tagsData = (0 until tagsPerMetric).map { i =>
    val key = f"$i%05d"
    key -> UUID.randomUUID().toString
  }.toMap

  private val executor = Executors.newScheduledThreadPool(2)
  executor.scheduleWithFixedDelay(() => update(), 0L, 10, TimeUnit.SECONDS)

  // Polled sources only need to be registered once
  (0 until numPolledGauges).foreach { i =>
    metrics.poll("polledGauge", createTags(i), i.toDouble)
  }
  (0 until numSlowPolledGauges).foreach { i =>
    metrics.poll("slowPolledGauge", createTags(i), {
      Thread.sleep(120000)
      i.toDouble
    })
  }

  private def update(): Unit = {
    logger.info("update starting")
    logger.info(s"updating $numCounters counters")
    (0 until numCounters).foreach { i =>
      metrics.increment("counter", createTags(i))
    }
    logger.info(s"updating $numTimers timers")
    (0 until numTimers).foreach { i =>
      metrics.recordTime("timer", createTags(i), i)
    }
    logger.info(s"updating $numDistSummaries distribution summaries")
    (0 until numDistSummaries).foreach { i =>
      metrics.recordTime("distSummary", createTags(i), i)
    }
    logger.info(s"updating $numGauges gauges")
    (0 until numGauges).foreach { i =>
      metrics.set("gauge", createTags(i), i)
    }
    logger.info("update complete")
  }

  private def createTags(i: Int): Map[String, String] = {
    tagsData + ("id" -> i.toString)
  }

  override def startImpl(): Unit = ()

  override def stopImpl(): Unit = {
    executor.shutdownNow()
  }
} 
Example 131
Source File: KernelThreads.scala    From almond   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package almond.kernel

import java.util.concurrent.Executors

import almond.util.ThreadUtil.{attemptShutdownExecutionContext, daemonThreadFactory, sequentialExecutionContext}

import scala.concurrent.ExecutionContext

final case class KernelThreads(
  queueEc: ExecutionContext,
  futureEc: ExecutionContext,
  scheduleEc: ExecutionContext,
  commEc: ExecutionContext
) {
  def attemptShutdown(): Unit =
    Seq(queueEc, futureEc, scheduleEc, commEc)
      .distinct
      .foreach { ec =>
        if (!attemptShutdownExecutionContext(ec))
          println(s"Don't know how to shutdown $ec")
      }
}

object KernelThreads {
  def create(name: String): KernelThreads = {

    val dummyStuffEc = ExecutionContext.fromExecutorService(
      Executors.newSingleThreadExecutor(daemonThreadFactory(s"$name-dummy-stuff"))
    )

    KernelThreads(
      sequentialExecutionContext(),
      dummyStuffEc,
      dummyStuffEc,
      ExecutionContext.fromExecutorService(
        Executors.newFixedThreadPool(2, daemonThreadFactory(s"$name-comm"))
      )
    )
  }
} 
Example 132
Source File: InfluxClientFactory.scala    From cave   with MIT License 5 votes vote down vote up
package com.cave.metrics.data.influxdb

import java.util.concurrent.Executors

import com.cave.metrics.data.Metric
import com.typesafe.config.Config
import org.joda.time.{DateTimeZone, DateTime}
import collection.JavaConversions._
import scala.concurrent.ExecutionContext

case class InfluxConfiguration(default: InfluxClusterConfig, alternates: Map[String, InfluxClusterConfig]) {

  val alts = alternates.map { case (name, config) => s"Name: $name, Config: $config"}
  println(s"Default: $default, Alters: $alts")
}

object InfluxConfiguration {

  def apply(config: Config) = {

    val default = InfluxClusterConfig(config.getString("url"), config.getString("user"), config.getString("pass"))

    val alternates = config.getConfigList("alternates") map { conf =>
      conf.getString("name") -> InfluxClusterConfig(conf.getString("url"), default.user, default.pass)
    }

    new InfluxConfiguration(default, alternates.toMap)
  }
}

class InfluxClientFactory(config: InfluxConfiguration) {

  def createClient(clusterConfig: InfluxClusterConfig): (InfluxClient, ExecutionContext) =
    new InfluxClient(clusterConfig) -> ExecutionContext.fromExecutorService(Executors.newSingleThreadExecutor())

  val defaultClient = createClient(config.default)
  val alternates = config.alternates map { case (name, clusterConfig) => name -> createClient(clusterConfig)}

  def getClient(name: Option[String]): (InfluxClient, ExecutionContext) = name match {
    case None => defaultClient
    case Some(clusterName) => alternates.getOrElse(clusterName, default = defaultClient)
  }

  def sendMetrics(metrics: Seq[Metric]): Unit = {

    val now = new DateTime().withZone(DateTimeZone.UTC).getMillis / 1000
    val maxDelay = metrics.foldLeft(0L) { case (delay, metric) =>
        Math.max(delay, Math.abs(metric.timestamp - now))
    }
    val (defaultClient, defaultContext) = getClient(None)
    defaultClient.putMetricData(Seq(
      Metric("writer-delay", now, maxDelay, Map(Metric.Organization -> Metric.Internal))
    ))(defaultContext)

    metrics.groupBy(_.tags.get(Metric.Cluster)) map { case (cluster, metricSeq) =>
      val (client, context) = getClient(cluster)
      client.putMetricData(metricSeq)(context)
    }
  }

  def close(): Unit = {
    defaultClient._1.close()
    alternates foreach { case (_, (client, _)) => client.close() }
  }
} 
Example 133
Source File: Main.scala    From http4s-poc-api   with MIT License 5 votes vote down vote up
package server

import java.util.concurrent.Executors

import com.github.ghik.silencer.silent
import external.{TeamOneHttpApi, TeamThreeCacheApi, TeamTwoHttpApi}
import io.circe.generic.auto._
import log.effect.zio.ZioLogWriter._
import model.DomainModel._
import org.http4s.circe._
import org.http4s.server.Router
import org.http4s.server.blaze.BlazeServerBuilder
import org.http4s.syntax.kleisli._
import org.http4s.{EntityDecoder, EntityEncoder, HttpApp}
import service.PriceService
import zio.interop.catz._
import zio.interop.catz.implicits._
import zio.{ExitCode, RIO, Task, ZEnv, ZIO}

import scala.concurrent.ExecutionContext
import model.DomainModelCodecs._

@silent
object Main extends zio.interop.catz.CatsApp with Pools with Codecs {
  private[this] val priceService: RIO[String, PriceService[Task]] =
    log4sFromName map { log =>
      PriceService[Task](
        TeamThreeCacheApi.productCache,
        TeamOneHttpApi(),
        TeamTwoHttpApi(),
        log
      )
    }

  private[this] val httpApp: RIO[PriceService[Task], HttpApp[Task]] =
    ZIO.access { ps =>
      Router(
        "/pricing-api/prices"       -> PriceRoutes[Task].make(ps),
        "/pricing-api/health-check" -> HealthCheckRoutes[Task].make(ps.logger)
      ).orNotFound
    }

  private[this] val runningServer: RIO[HttpApp[Task], Unit] =
    ZIO.accessM { app =>
      BlazeServerBuilder[Task](serverPool)
        .bindHttp(17171, "0.0.0.0")
        .withConnectorPoolSize(connectorPoolSize)
        .enableHttp2(true)
        .withHttpApp(app)
        .serve
        .compile
        .drain
    }

  private[this] val serviceRuntime: RIO[String, Unit] =
    priceService >>> httpApp >>> runningServer

  def run(args: List[String]): ZIO[ZEnv, Nothing, ExitCode] =
    serviceRuntime.fold(_ => ExitCode.failure, _ => ExitCode.success) provide "App log"
}

sealed trait Pools {

  protected val connectorPoolSize = Runtime.getRuntime.availableProcessors() * 2
  protected val mainThreadNumber  = Runtime.getRuntime.availableProcessors() + 1

  protected val serverPool = ExecutionContext.fromExecutor(
    Executors.newWorkStealingPool(mainThreadNumber)
  )
}

sealed trait Codecs {
  implicit val priceRequestPayloadDecoder: EntityDecoder[Task, PricesRequestPayload] =
    jsonOf[Task, PricesRequestPayload]

  implicit val priceResponsePayloadEncoder: EntityEncoder[Task, List[Price]] =
    jsonEncoderOf[Task, List[Price]]

  implicit val healthCheckResponsePayloadEncoder: EntityEncoder[Task, ServiceSignature] =
    jsonEncoderOf[Task, ServiceSignature]
} 
Example 134
Source File: Concurrent.scala    From zen   with Apache License 2.0 5 votes vote down vote up
package com.github.cloudml.zen.ml.util

import java.util.concurrent.{Executors, LinkedBlockingQueue, ThreadPoolExecutor}

import scala.concurrent._
import scala.concurrent.duration._


object Concurrent extends Serializable {
  @inline def withFuture[T](body: => T)(implicit es: ExecutionContextExecutorService): Future[T] = {
    Future(body)(es)
  }

  @inline def withAwaitReady[T](future: Future[T]): Unit = {
    Await.ready(future, 1.hour)
  }

  def withAwaitReadyAndClose[T](future: Future[T])(implicit es: ExecutionContextExecutorService): Unit = {
    Await.ready(future, 1.hour)
    closeExecutionContext(es)
  }

  @inline def withAwaitResult[T](future: Future[T]): T = {
    Await.result(future, 1.hour)
  }

  def withAwaitResultAndClose[T](future: Future[T])(implicit es: ExecutionContextExecutorService): T = {
    val res = Await.result(future, 1.hour)
    closeExecutionContext(es)
    res
  }

  @inline def initExecutionContext(numThreads: Int): ExecutionContextExecutorService = {
    ExecutionContext.fromExecutorService(Executors.newFixedThreadPool(numThreads))
  }

  @inline def closeExecutionContext(es: ExecutionContextExecutorService): Unit = {
    es.shutdown()
  }
}

object DebugConcurrent extends Serializable {
  def withFuture[T](body: => T)(implicit es: ExecutionContextExecutorService): Future[T] = {
    val future = Future(body)(es)
    future.onFailure { case e =>
      e.printStackTrace()
    }(scala.concurrent.ExecutionContext.Implicits.global)
    future
  }

  def withAwaitReady[T](future: Future[T]): Unit = {
    Await.ready(future, 1.hour)
  }

  def withAwaitReadyAndClose[T](future: Future[T])(implicit es: ExecutionContextExecutorService): Unit = {
    future.onComplete { _ =>
      closeExecutionContext(es)
    }(scala.concurrent.ExecutionContext.Implicits.global)
    Await.ready(future, 1.hour)
  }

  def withAwaitResult[T](future: Future[T]): T = {
    Await.result(future, 1.hour)
  }

  def withAwaitResultAndClose[T](future: Future[T])(implicit es: ExecutionContextExecutorService): T = {
    future.onComplete { _ =>
      closeExecutionContext(es)
    }(scala.concurrent.ExecutionContext.Implicits.global)
    Await.result(future, 1.hour)
  }

  def initExecutionContext(numThreads: Int): ExecutionContextExecutorService = {
    val es = new ThreadPoolExecutor(numThreads, numThreads, 0L, MILLISECONDS, new LinkedBlockingQueue[Runnable],
      Executors.defaultThreadFactory, new ThreadPoolExecutor.AbortPolicy)
    ExecutionContext.fromExecutorService(es)
  }

  def closeExecutionContext(es: ExecutionContextExecutorService): Unit = {
    es.shutdown()
    if (!es.awaitTermination(1L, SECONDS)) {
      System.err.println("Error: ExecutorService does not exit itself, force to terminate.")
    }
  }
} 
Example 135
Source File: KafkaCollector.scala    From Swallow   with Apache License 2.0 5 votes vote down vote up
package com.intel.hibench.common.streaming.metrics

import java.io.{FileWriter, File}
import java.util.Date
import java.util.concurrent.{TimeUnit, Future, Executors}

import com.codahale.metrics.{UniformReservoir, Histogram}
import kafka.utils.{ZKStringSerializer, ZkUtils}
import org.I0Itec.zkclient.ZkClient

import scala.collection.mutable.ArrayBuffer


class KafkaCollector(zkConnect: String, metricsTopic: String,
    outputDir: String, sampleNumber: Int, desiredThreadNum: Int) extends LatencyCollector {

  private val histogram = new Histogram(new UniformReservoir(sampleNumber))
  private val threadPool = Executors.newFixedThreadPool(desiredThreadNum)
  private val fetchResults = ArrayBuffer.empty[Future[FetchJobResult]]

  def start(): Unit = {
    val partitions = getPartitions(metricsTopic, zkConnect)

    println("Starting MetricsReader for kafka topic: " + metricsTopic)

    partitions.foreach(partition => {
      val job = new FetchJob(zkConnect, metricsTopic, partition, histogram)
      val fetchFeature = threadPool.submit(job)
      fetchResults += fetchFeature
    })

    threadPool.shutdown()
    threadPool.awaitTermination(30, TimeUnit.MINUTES)

    val finalResults = fetchResults.map(_.get()).reduce((a, b) => {
      val minTime = Math.min(a.minTime, b.minTime)
      val maxTime = Math.max(a.maxTime, b.maxTime)
      val count = a.count + b.count
      new FetchJobResult(minTime, maxTime, count)
    })

    report(finalResults.minTime, finalResults.maxTime, finalResults.count)
  }

  private def getPartitions(topic: String, zkConnect: String): Seq[Int] = {
    val zkClient = new ZkClient(zkConnect, 6000, 6000, ZKStringSerializer)
    try {
      ZkUtils.getPartitionsForTopics(zkClient, Seq(topic)).flatMap(_._2).toSeq
    } finally {
      zkClient.close()
    }
  }


  private def report(minTime: Long, maxTime: Long, count: Long): Unit = {
    val outputFile = new File(outputDir, metricsTopic + ".csv")
    println(s"written out metrics to ${outputFile.getCanonicalPath}")
    val header = "time,count,throughput(msgs/s),max_latency(ms),mean_latency(ms),min_latency(ms)," +
        "stddev_latency(ms),p50_latency(ms),p75_latency(ms),p95_latency(ms),p98_latency(ms)," +
        "p99_latency(ms),p999_latency(ms)\n"
    val fileExists = outputFile.exists()
    if (!fileExists) {
      val parent = outputFile.getParentFile
      if (!parent.exists()) {
        parent.mkdirs()
      }
      outputFile.createNewFile()
    }
    val outputFileWriter = new FileWriter(outputFile, true)
    if (!fileExists) {
      outputFileWriter.append(header)
    }
    val time = new Date(System.currentTimeMillis()).toString
    val count = histogram.getCount
    val snapshot = histogram.getSnapshot
    val throughput = count * 1000 / (maxTime - minTime)
    outputFileWriter.append(s"$time,$count,$throughput," +
        s"${formatDouble(snapshot.getMax)}," +
        s"${formatDouble(snapshot.getMean)}," +
        s"${formatDouble(snapshot.getMin)}," +
        s"${formatDouble(snapshot.getStdDev)}," +
        s"${formatDouble(snapshot.getMedian)}," +
        s"${formatDouble(snapshot.get75thPercentile())}," +
        s"${formatDouble(snapshot.get95thPercentile())}," +
        s"${formatDouble(snapshot.get98thPercentile())}," +
        s"${formatDouble(snapshot.get99thPercentile())}," +
        s"${formatDouble(snapshot.get999thPercentile())}\n")
    outputFileWriter.close()
  }

  private def formatDouble(d: Double): String = {
    "%.3f".format(d)
  }

} 
Example 136
Source File: MustacheViewRenderer.scala    From peregrine   with Apache License 2.0 5 votes vote down vote up
package io.peregrine.view

import io.peregrine._
import com.github.mustachejava._
import com.google.common.base.Charsets
import com.twitter.mustache._
import com.twitter.util._
import java.io._
import java.util.concurrent.Executors


class PeregrineMustacheFactory(templatePath: String)
  extends DefaultMustacheFactory(templatePath) {

  def invalidateCaches() : Unit = {
    mustacheCache.clear()
    templateCache.clear()
  }
}

private[peregrine] object MustacheViewFactoryHolder {
  val templatePath  = config.templatePath()
  lazy val factory  = new PeregrineMustacheFactory(templatePath)

  factory.setObjectHandler(new ScalaObjectHandler())
  factory.setExecutorService(Executors.newCachedThreadPool)
}

trait MustacheViewRenderer extends ViewRenderer {

  val format = "mustache"

  lazy val location = MustacheViewFactoryHolder.templatePath
  lazy val factory  = MustacheViewFactoryHolder.factory

  def render(templateName: String, view: View) = {
    if (config.env() == "development") {
      factory.invalidateCaches()
    }

    getPath(templateName) match {
      case None            =>
        throw new FileNotFoundException(s"""Template file [$templateName] not found in [
          ${System.getProperty("user.dir")}/app$location,
          ${getClass.getResource("")}
        ]""")

      case Some(reader)  =>

        val mustache = factory.compile(reader, templateName)
        val output   = new StringWriter
        mustache.execute(output, view).flush()
        output.toString
    }
  }

  def getPath(templateName: String): Option[Reader] = {
    val templatePathName = if (location == "/") s"/$templateName.mustache" else s"$location/$templateName.mustache"
    val path = s"${System.getProperty("user.dir")}$templatePathName"
    val file = new File(path)
    if(file.exists && file.isFile) {
      Some(new BufferedReader(new InputStreamReader(new FileInputStream(file))))
    } else {
      Option(getClass.getResourceAsStream(templatePathName)).map(r => new BufferedReader(new InputStreamReader(r)))
    }
  }
}

object MustacheViewRenderer extends MustacheViewRenderer 
Example 137
Source File: TwitterStatusReader.scala    From kafka-connect-twitter   with Apache License 2.0 5 votes vote down vote up
package com.eneco.trading.kafka.connect.twitter

import java.util
import java.util.concurrent.{TimeUnit, LinkedBlockingQueue, Executors}
import com.eneco.trading.kafka.connect.twitter.domain.TwitterStatus
import com.twitter.hbc.httpclient.BasicClient
import com.twitter.hbc.twitter4j.Twitter4jStatusClient
import org.apache.kafka.connect.data.Schema
import org.apache.kafka.connect.source.SourceRecord
import twitter4j._
import scala.collection.JavaConverters._
import Extensions._

class StatusEnqueuer(queue: LinkedBlockingQueue[Status]) extends StatusListener with Logging {
  override def onStallWarning(stallWarning: StallWarning) = log.warn("onStallWarning")
  override def onDeletionNotice(statusDeletionNotice: StatusDeletionNotice) = log.info("onDeletionNotice")

  override def onScrubGeo(l: Long, l1: Long) = {
    log.debug(s"onScrubGeo $l $l1")
  }

  override def onStatus(status: Status) = {
    log.debug("onStatus")
    queue.put(status)
  }

  override def onTrackLimitationNotice(i: Int) = log.info(s"onTrackLimitationNotice $i")
  override def onException(e: Exception)= log.warn("onException " + e.toString)
}

trait StatusToSourceRecord {
  def convert(status: Status, topic: String): SourceRecord
}

object StatusToStringKeyValue extends StatusToSourceRecord {
  def convert (status: Status, topic: String): SourceRecord = {
    new SourceRecord(
      Map("tweetSource" -> status.getSource).asJava, //source partitions?
      Map("tweetId" -> status.getId).asJava, //source offsets?
      topic,
      null,
      Schema.STRING_SCHEMA,
      status.getUser.getScreenName,
      Schema.STRING_SCHEMA,
      status.getText,
      status.getCreatedAt.getTime)
  }
}

object StatusToTwitterStatusStructure extends StatusToSourceRecord {
  def convert(status: Status, topic: String): SourceRecord = {
    //val ts = TwitterStatus.struct(TwitterStatus(status))
    new SourceRecord(
      Map("tweetSource" -> status.getSource).asJava, //source partitions?
      Map("tweetId" -> status.getId).asJava, //source offsets?
      topic,
      null,
      Schema.STRING_SCHEMA,
      status.getUser.getScreenName,
      TwitterStatus.schema,
      TwitterStatus.struct(status),
      status.getCreatedAt.getTime)
  }
}


  def stop() = {
    log.info("Stop Twitter client")
    client.stop()
  }


} 
Example 138
package packt.ch05

import java.util
import java.util.Properties
import java.util.concurrent.ExecutorService
import java.util.concurrent.Executors

import kafka.consumer.ConsumerConfig
import MultiThreadConsumer._

import scala.collection.JavaConversions._

object MultiThreadConsumer {

  private def createConsumerConfig(zookeeper: String, groupId: String): ConsumerConfig = {
    val props = new Properties()
    props.put("zookeeper.connect", zookeeper)
    props.put("group.id", groupId)
    props.put("zookeeper.session.timeout.ms", "500")
    props.put("zookeeper.sync.time.ms", "250")
    props.put("auto.commit.interval.ms", "1000")
    new ConsumerConfig(props)
  }

  def main(args: Array[String]) {
    val zooKeeper = args(0)
    val groupId = args(1)
    val topic = args(2)
    val threadCount = java.lang.Integer.parseInt(args(3))
    val multiThreadHLConsumer = new MultiThreadConsumer(zooKeeper, groupId, topic)
    multiThreadHLConsumer.testMultiThreadConsumer(threadCount)
    try {
      Thread.sleep(10000)
    } catch {
      case ie: InterruptedException =>
    }
    multiThreadHLConsumer.shutdown()
  }
}

class MultiThreadConsumer(zookeeper: String, groupId: String, topic: String) {

  private var executor: ExecutorService = _

  private val consumer = kafka.consumer.Consumer.createJavaConsumerConnector(createConsumerConfig(zookeeper,
    groupId))

  def shutdown() {
    if (consumer != null) consumer.shutdown()
    if (executor != null) executor.shutdown()
  }

  def testMultiThreadConsumer(threadCount: Int) {
    val topicMap = new util.HashMap[String, Integer]()

    // Define thread count for each topic
    topicMap.put(topic, threadCount)

    // Here we have used a single topic but we can also add
    // multiple topics to topicCount MAP
    val consumerStreamsMap = consumer.createMessageStreams(topicMap)
    val streamList = consumerStreamsMap.get(topic)

    // Launching the thread pool
    executor = Executors.newFixedThreadPool(threadCount)

    // Creating an object messages consumption
    var count = 0
    for (stream <- streamList) {
      val threadNumber = count
      executor.submit(new Runnable() {

        def run() {
          val consumerIte = stream.iterator()
          while (consumerIte.hasNext)
            println("Thread Number " + threadNumber + ": " + new String(consumerIte.next().message()))
          println("Shutting down Thread Number: " + threadNumber)
        }
      })
      count += 1
    }
    if (consumer != null) consumer.shutdown()
    if (executor != null) executor.shutdown()
  }
} 
Example 139
Source File: MonixTest.scala    From phobos   with Apache License 2.0 5 votes vote down vote up
package ru.tinkoff.phobos.test

import java.util.concurrent.Executors

import monix.execution.Scheduler
import monix.reactive.Observable
import org.scalatest.AsyncWordSpec
import ru.tinkoff.phobos.annotations.{ElementCodec, XmlCodec}
import ru.tinkoff.phobos.decoding.XmlDecoder
import ru.tinkoff.phobos.syntax.text
import ru.tinkoff.phobos.monix._

class MonixTest extends AsyncWordSpec {
  implicit val scheduler: Scheduler = Scheduler(Executors.newScheduledThreadPool(4))

  "Monix decoder" should {
    "decode case classes correctly" in {
      @ElementCodec
      case class Bar(@text txt: Int)
      @XmlCodec("foo")
      case class Foo(qux: Int, maybeBar: Option[Bar], bars: List[Bar])

      val xml = """
        |<foo>
        | <qux>1234</qux>
        | <bars>2</bars>
        | <maybeBar>1</maybeBar>
        | <bars>3</bars>
        |</foo>
        |""".stripMargin

      val foo = Foo(1234, Some(Bar(1)), List(Bar(2), Bar(3)))
      val observable = Observable.fromIterable(xml.toIterable.map(x => Array(x.toByte)))
      XmlDecoder[Foo]
        .decodeFromObservable(observable)
        .map(result => assert(result == foo))
        .runToFuture
    }
  }
} 
Example 140
Source File: TaskFutureBenchmarks.scala    From Scala-High-Performance-Programming   with MIT License 5 votes vote down vote up
package highperfscala.concurrency.task

import java.util.concurrent.{ExecutorService, Executors, TimeUnit}

import org.openjdk.jmh.annotations.Mode.Throughput
import org.openjdk.jmh.annotations._

import scala.concurrent.{ExecutionContext, Future, Await}
import scala.concurrent.duration.Duration
import scalaz.concurrent.Task

@BenchmarkMode(Array(Throughput))
@OutputTimeUnit(TimeUnit.SECONDS)
@Warmup(iterations = 3, time = 5, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 30, time = 10, timeUnit = TimeUnit.SECONDS)
@Fork(value = 1, warmups = 1, jvmArgs = Array("-Xms1G", "-Xmx1G"))
class TaskFutureBenchmarks {

  import TaskFutureBenchmarks._

  @Benchmark
  def mapWithFuture(state: TaskFutureState): Int = {
    implicit val ec = state.context
    val init = Future(0)
    val res = (1 until state.operations).foldLeft(init)((f, _) => f.map(_ + 1))
    Await.result(res, Duration("5 minutes"))
  }

  @Benchmark
  def mapWithTask(state: TaskFutureState): Int = {
    val init = Task(0)(state.es)
    val res = (1 until state.operations).foldLeft(init)((t, _) => t.map(_ + 1))
    res.unsafePerformSync
  }

  @Benchmark
  def flatMapWithFuture(state: TaskFutureState): Int = {
    implicit val ec = state.context
    val init = Future(0)
    val res = (1 until state.operations).foldLeft(init)((f, _) =>
      f.flatMap(i => Future(i + 1)))
    Await.result(res, Duration("5 minutes"))
  }

  @Benchmark
  def flatMapWithTask(state: TaskFutureState): Int = {
    val init = Task(0)(state.es)
    val res = (1 until state.operations).foldLeft(init)((t, _) =>
      t.flatMap(i => Task(i + 1)(state.es)))
    res.unsafePerformSync
  }

}

object TaskFutureBenchmarks {

  @State(Scope.Benchmark)
  class TaskFutureState {

    @Param(Array("5", "10", "100"))
    var operations: Int = 0

    var es: ExecutorService = null
    var context: ExecutionContext = null

    @Setup(Level.Trial)
    def setup(): Unit = {
      es = Executors.newFixedThreadPool(20)
      context = ExecutionContext.fromExecutor(es)
    }

    @TearDown(Level.Trial)
    def tearDown(): Unit = {
      es.shutdownNow()
    }
  }

} 
Example 141
package highperfscala.concurrency.blocking

import java.util.concurrent.{ExecutorService, Executors, TimeUnit}

import highperfscala.concurrency.blocking.BlockingExample.{ClientId, Order, Ticker}
import org.openjdk.jmh.annotations.Mode.Throughput
import org.openjdk.jmh.annotations._

import scala.concurrent.duration.Duration
import scala.concurrent.{Await, ExecutionContext, Future}

@BenchmarkMode(Array(Throughput))
@OutputTimeUnit(TimeUnit.SECONDS)
@Warmup(iterations = 3, time = 5, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 30, time = 10, timeUnit = TimeUnit.SECONDS)
@Fork(value = 1, warmups = 1, jvmArgs = Array("-Xms1G", "-Xmx1G"))
class BlockingFutureBenchmarks {

  import BlockingFutureBenchmarks._

  @Benchmark
  def withDefaultContext(state: BlockingFutureState): List[List[Order]] = {
    val futures = (1 until state.operations).map{_ =>
      BlockingExample.JdbcOrderRepository.findBuyOrders(
        state.clientId, state.ticker
      )(state.defaultC)
    }

    implicit val ex = state.defaultC
    Await.result(
      Future.sequence(futures).map(_.toList),
      Duration("5 minutes")
    )
  }

  @Benchmark
  def withDedicatedContext(state: BlockingFutureState): List[List[Order]] = {
    val futures = (1 until state.operations).map{_ =>
      BlockingExample.JdbcOrderRepository.findBuyOrders(
        state.clientId, state.ticker
      )(state.dedicatedC)
    }

    implicit val ex = state.defaultC  // we use CPU-bound context for computations below
    Await.result(
      Future.sequence(futures).map(_.toList),
      Duration("5 minutes")
    )
  }

}

object BlockingFutureBenchmarks {

  @State(Scope.Benchmark)
  class BlockingFutureState {

    @Param(Array("10", "1000"))
    var operations: Int = 0

    val clientId = ClientId(12345)
    val ticker = Ticker("FOO")

    var defaultC: ExecutionContext = null
    var dedicatedC: ExecutionContext = null
    var es: ExecutorService = null

    @Setup(Level.Trial)
    def setup(): Unit = {
      defaultC = scala.concurrent.ExecutionContext.global
      es = {
        val i = Runtime.getRuntime.availableProcessors * 20
        Executors.newFixedThreadPool(i)
      }
      dedicatedC = ExecutionContext.fromExecutorService(es)
    }

    @TearDown(Level.Trial)
    def tearDown(): Unit = {
      es.shutdownNow()
    }

  }

} 
Example 142
Source File: FutureAwaitWithFailFastFnTest.scala    From kafka-connect-common   with Apache License 2.0 5 votes vote down vote up
package com.datamountaineer.streamreactor.connect.concurrent

import java.util.concurrent.Executors

import com.datamountaineer.streamreactor.connect.concurrent.ExecutorExtension._
import org.scalactic.source.Position
import org.scalatest.concurrent.{Eventually, TimeLimits}
import org.scalatest.matchers.should.Matchers
import org.scalatest.time.{Millis, Span}
import org.scalatest.wordspec.AnyWordSpec

import scala.util.{Failure, Try}


class FutureAwaitWithFailFastFnTest extends AnyWordSpec with Matchers with Eventually with TimeLimits {


  "FutureAwaitWithFailFastFn" should {
    "return when all the futures have completed" in {
      val exec = Executors.newFixedThreadPool(10)
      val futures = (1 to 5).map(i => exec.submit {
        Thread.sleep(300)
        i
      })
      eventually {
        val result = FutureAwaitWithFailFastFn(exec, futures)
        exec.isTerminated shouldBe true
        result shouldBe Seq(1, 2, 3, 4, 5)
      }
    }

    "stop when the first futures times out" in {
      val exec = Executors.newFixedThreadPool(6)
      val futures = for (i <- 1 to 10) yield {
        exec.submit {
          if (i == 4) {
            Thread.sleep(1000)
            sys.error("this task failed.")
          } else {
            Thread.sleep(50000)
          }
        }
      }

      eventually {
        val t = Try(FutureAwaitWithFailFastFn(exec, futures))
        t.isFailure shouldBe true
        t.asInstanceOf[Failure[_]].exception.getMessage shouldBe "this task failed."
        exec.isTerminated shouldBe true
      }
    }
  }

} 
Example 143
Source File: fileUtils.scala    From RISCV-FiveStage   with Apache License 2.0 5 votes vote down vote up
package FiveStage
import chisel3.iotesters._
import java.io.File
import java.nio.file.Path
import scala.collection.mutable.LinkedHashMap
// import cats.effect.ContextShift

import cats.implicits._
import cats._
import cats.syntax._
import cats.Applicative._
import atto._, Atto._

object fileUtils {

  def say(word: Any)(implicit filename: sourcecode.File, line: sourcecode.Line): Unit = {
    val fname = filename.value.split("/").last
    println(Console.YELLOW + s"[${fname}: ${sourcecode.Line()}]" + Console.RESET + s" - $word")
  }

  def sayRed(word: Any)(implicit filename: sourcecode.File, line: sourcecode.Line): Unit = {
    val fname = filename.value.split("/").last
    println(Console.YELLOW + s"[${fname}: ${sourcecode.Line()}]" + Console.RED + s" - $word")
  }
  def sayGreen(word: Any)(implicit filename: sourcecode.File, line: sourcecode.Line): Unit = {
    val fname = filename.value.split("/").last
    println(Console.YELLOW + s"[${fname}: ${sourcecode.Line()}]" + Console.GREEN + s" - $word")
  }

  def getListOfFiles(dir: String): List[File] =
    (new File(dir)).listFiles.filter(_.isFile).toList

  def getListOfFiles(dir: Path): List[File] =
    dir.toFile().listFiles.filter(_.isFile).toList


  def getListOfFolders(dir: String): List[File] =
    (new File(dir)).listFiles.filter(_.isDirectory).toList

  def getListOfFolders(dir: Path): List[File] =
    dir.toFile().listFiles.filter(_.isDirectory).toList

  def getListOfFilesRecursive(dir: String): List[File] = {
    getListOfFiles(dir) ::: getListOfFolders(dir).flatMap(f =>
      getListOfFilesRecursive(f.getPath)
    )
  }

  import cats.implicits._
  import java.nio.file.Paths
  import java.util.concurrent.Executors
  import scala.concurrent.ExecutionContext

  def relativeFile(name: String) = {
    new File(getClass.getClassLoader.getResource(name).getPath)
  }

  def getTestDir: File =
    new File(getClass.getClassLoader.getResource("tests").getPath)

  def getAllTests: List[File] = getListOfFilesRecursive(getTestDir.getPath)
      .filter( f => f.getPath.endsWith(".s") )

  def getAllTestNames: List[String]        = getAllTests.map(_.toString.split("/").takeRight(1).mkString)

  // Not tested.
  def getAllWindowsTestNames: List[String] = getAllTests.map(_.toString.split("\\\\").takeRight(1).mkString)

  def clearTestResults = {
    try {
      val testResults = relativeFile("/testResults")
      testResults.delete()
    }
    catch {
      case _:Throwable => ()
    }
  }

  
  def readTest(testOptions: TestOptions): Either[String, List[String]] = {

    // Ahh, the GNU toolchain and its tabs
    val annoyingTabCharacter = '	' 

    getAllTests.filter(_.getName.contains(testOptions.testName)).headOption.toRight(s"File not found: ${testOptions.testName}").flatMap{ filename =>
      import scala.io.Source
      scala.util.Try(
        Source.fromFile(filename)
          .getLines.toList
          .map(_.replace(annoyingTabCharacter, ' ')))
        .toOption
        .toRight(s"Error reading $filename")
    }
  }
} 
Example 144
Source File: SparkSQLSessionManager.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.hive.thriftserver

import java.util.concurrent.Executors

import org.apache.commons.logging.Log
import org.apache.hadoop.hive.conf.HiveConf
import org.apache.hadoop.hive.conf.HiveConf.ConfVars
import org.apache.hive.service.cli.SessionHandle
import org.apache.hive.service.cli.session.SessionManager
import org.apache.hive.service.cli.thrift.TProtocolVersion
import org.apache.hive.service.server.HiveServer2

import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.sql.hive.thriftserver.ReflectionUtils._
import org.apache.spark.sql.hive.thriftserver.server.SparkSQLOperationManager


private[hive] class SparkSQLSessionManager(hiveServer: HiveServer2, hiveContext: HiveContext)
  extends SessionManager(hiveServer)
  with ReflectedCompositeService {

  private lazy val sparkSqlOperationManager = new SparkSQLOperationManager()

  override def init(hiveConf: HiveConf) {
    setSuperField(this, "hiveConf", hiveConf)

    // Create operation log root directory, if operation logging is enabled
    if (hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_LOGGING_OPERATION_ENABLED)) {
      invoke(classOf[SessionManager], this, "initOperationLogRootDir")
    }

    val backgroundPoolSize = hiveConf.getIntVar(ConfVars.HIVE_SERVER2_ASYNC_EXEC_THREADS)
    setSuperField(this, "backgroundOperationPool", Executors.newFixedThreadPool(backgroundPoolSize))
    getAncestorField[Log](this, 3, "LOG").info(
      s"HiveServer2: Async execution pool size $backgroundPoolSize")

    setSuperField(this, "operationManager", sparkSqlOperationManager)
    addService(sparkSqlOperationManager)

    initCompositeService(hiveConf)
  }

  override def openSession(
      protocol: TProtocolVersion,
      username: String,
      passwd: String,
      ipAddress: String,
      sessionConf: java.util.Map[String, String],
      withImpersonation: Boolean,
      delegationToken: String): SessionHandle = {
    val sessionHandle =
      super.openSession(protocol, username, passwd, ipAddress, sessionConf, withImpersonation,
          delegationToken)
    val session = super.getSession(sessionHandle)
    HiveThriftServer2.listener.onSessionCreated(
      session.getIpAddress, sessionHandle.getSessionId.toString, session.getUsername)
    val ctx = if (hiveContext.hiveThriftServerSingleSession) {
      hiveContext
    } else {
      hiveContext.newSession()
    }
    ctx.setConf("spark.sql.hive.version", HiveContext.hiveExecutionVersion)
    sparkSqlOperationManager.sessionToContexts += sessionHandle -> ctx
    sessionHandle
  }

  override def closeSession(sessionHandle: SessionHandle) {
    HiveThriftServer2.listener.onSessionClosed(sessionHandle.getSessionId.toString)
    super.closeSession(sessionHandle)
    sparkSqlOperationManager.sessionToActivePool -= sessionHandle
    sparkSqlOperationManager.sessionToContexts.remove(sessionHandle)
  }
} 
Example 145
Source File: StoragePerfTester.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.tools

import java.util.concurrent.{CountDownLatch, Executors}
import java.util.concurrent.atomic.AtomicLong

import org.apache.spark.executor.ShuffleWriteMetrics
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.serializer.KryoSerializer
import org.apache.spark.shuffle.hash.HashShuffleManager
import org.apache.spark.util.Utils


    val numOutputSplits = sys.env.get("NUM_REDUCERS").map(_.toInt).getOrElse(500)

    val recordLength = 1000 // ~1KB records
    val totalRecords = dataSizeMb * 1000
    val recordsPerMap = totalRecords / numMaps

    val writeKey = "1" * (recordLength / 2)
    val writeValue = "1" * (recordLength / 2)
    val executor = Executors.newFixedThreadPool(numMaps)

    val conf = new SparkConf()
      .set("spark.shuffle.compress", "false")
      .set("spark.shuffle.sync", "true")
      .set("spark.shuffle.manager", "org.apache.spark.shuffle.hash.HashShuffleManager")

    // This is only used to instantiate a BlockManager. All thread scheduling is done manually.
    val sc = new SparkContext("local[4]", "Write Tester", conf)
    val hashShuffleManager = sc.env.shuffleManager.asInstanceOf[HashShuffleManager]

    def writeOutputBytes(mapId: Int, total: AtomicLong): Unit = {
      val shuffle = hashShuffleManager.shuffleBlockResolver.forMapTask(1, mapId, numOutputSplits,
        new KryoSerializer(sc.conf), new ShuffleWriteMetrics())
      val writers = shuffle.writers
      for (i <- 1 to recordsPerMap) {
        writers(i % numOutputSplits).write(writeKey, writeValue)
      }
      writers.map { w =>
        w.commitAndClose()
        total.addAndGet(w.fileSegment().length)
      }

      shuffle.releaseWriters(true)
    }

    val start = System.currentTimeMillis()
    val latch = new CountDownLatch(numMaps)
    val totalBytes = new AtomicLong()
    for (task <- 1 to numMaps) {
      executor.submit(new Runnable() {
        override def run(): Unit = {
          try {
            writeOutputBytes(task, totalBytes)
            latch.countDown()
          } catch {
            case e: Exception =>
              // scalastyle:off println
              println("Exception in child thread: " + e + " " + e.getMessage)
              // scalastyle:on println
              System.exit(1)
          }
        }
      })
    }
    latch.await()
    val end = System.currentTimeMillis()
    val time = (end - start) / 1000.0
    val bytesPerSecond = totalBytes.get() / time
    val bytesPerFile = (totalBytes.get() / (numOutputSplits * numMaps.toDouble)).toLong

    // scalastyle:off println
    System.err.println("files_total\t\t%s".format(numMaps * numOutputSplits))
    System.err.println("bytes_per_file\t\t%s".format(Utils.bytesToString(bytesPerFile)))
    System.err.println("agg_throughput\t\t%s/s".format(Utils.bytesToString(bytesPerSecond.toLong)))
    // scalastyle:on println

    executor.shutdown()
    sc.stop()
  }
} 
Example 146
Source File: ExecutorDelegationTokenUpdater.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy.yarn

import java.util.concurrent.{Executors, TimeUnit}

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.security.{Credentials, UserGroupInformation}

import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.{Logging, SparkConf}
import org.apache.spark.util.{ThreadUtils, Utils}

import scala.util.control.NonFatal

private[spark] class ExecutorDelegationTokenUpdater(
    sparkConf: SparkConf,
    hadoopConf: Configuration) extends Logging {

  @volatile private var lastCredentialsFileSuffix = 0

  private val credentialsFile = sparkConf.get("spark.yarn.credentials.file")
  private val freshHadoopConf =
    SparkHadoopUtil.get.getConfBypassingFSCache(
      hadoopConf, new Path(credentialsFile).toUri.getScheme)

  private val delegationTokenRenewer =
    Executors.newSingleThreadScheduledExecutor(
      ThreadUtils.namedThreadFactory("Delegation Token Refresh Thread"))

  // On the executor, this thread wakes up and picks up new tokens from HDFS, if any.
  private val executorUpdaterRunnable =
    new Runnable {
      override def run(): Unit = Utils.logUncaughtExceptions(updateCredentialsIfRequired())
    }

  def updateCredentialsIfRequired(): Unit = {
    try {
      val credentialsFilePath = new Path(credentialsFile)
      val remoteFs = FileSystem.get(freshHadoopConf)
      SparkHadoopUtil.get.listFilesSorted(
        remoteFs, credentialsFilePath.getParent,
        credentialsFilePath.getName, SparkHadoopUtil.SPARK_YARN_CREDS_TEMP_EXTENSION)
        .lastOption.foreach { credentialsStatus =>
        val suffix = SparkHadoopUtil.get.getSuffixForCredentialsPath(credentialsStatus.getPath)
        if (suffix > lastCredentialsFileSuffix) {
          logInfo("Reading new delegation tokens from " + credentialsStatus.getPath)
          val newCredentials = getCredentialsFromHDFSFile(remoteFs, credentialsStatus.getPath)
          lastCredentialsFileSuffix = suffix
          UserGroupInformation.getCurrentUser.addCredentials(newCredentials)
          logInfo("Tokens updated from credentials file.")
        } else {
          // Check every hour to see if new credentials arrived.
          logInfo("Updated delegation tokens were expected, but the driver has not updated the " +
            "tokens yet, will check again in an hour.")
          delegationTokenRenewer.schedule(executorUpdaterRunnable, 1, TimeUnit.HOURS)
          return
        }
      }
      val timeFromNowToRenewal =
        SparkHadoopUtil.get.getTimeFromNowToRenewal(
          sparkConf, 0.8, UserGroupInformation.getCurrentUser.getCredentials)
      if (timeFromNowToRenewal <= 0) {
        // We just checked for new credentials but none were there, wait a minute and retry.
        // This handles the shutdown case where the staging directory may have been removed(see
        // SPARK-12316 for more details).
        delegationTokenRenewer.schedule(executorUpdaterRunnable, 1, TimeUnit.MINUTES)
      } else {
        logInfo(s"Scheduling token refresh from HDFS in $timeFromNowToRenewal millis.")
        delegationTokenRenewer.schedule(
          executorUpdaterRunnable, timeFromNowToRenewal, TimeUnit.MILLISECONDS)
      }
    } catch {
      // Since the file may get deleted while we are reading it, catch the Exception and come
      // back in an hour to try again
      case NonFatal(e) =>
        logWarning("Error while trying to update credentials, will try again in 1 hour", e)
        delegationTokenRenewer.schedule(executorUpdaterRunnable, 1, TimeUnit.HOURS)
    }
  }

  private def getCredentialsFromHDFSFile(remoteFs: FileSystem, tokenPath: Path): Credentials = {
    val stream = remoteFs.open(tokenPath)
    try {
      val newCredentials = new Credentials()
      newCredentials.readTokenStorageStream(stream)
      newCredentials
    } finally {
      stream.close()
    }
  }

  def stop(): Unit = {
    delegationTokenRenewer.shutdown()
  }

} 
Example 147
Source File: S3Spec.scala    From fs2-aws   with MIT License 5 votes vote down vote up
package fs2
package aws

import java.util.concurrent.Executors

import cats.effect.{ ContextShift, IO }
import com.amazonaws.services.s3.AmazonS3
import fs2.aws.internal.S3Client
import fs2.aws.s3._
import org.mockito.MockitoSugar._
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers

import scala.concurrent.ExecutionContext

class S3Spec extends AnyFlatSpec with Matchers {

  private val blockingEC                        = ExecutionContext.fromExecutorService(Executors.newFixedThreadPool(6))
  implicit val ec: ExecutionContext             = ExecutionContext.global
  implicit val ioContextShift: ContextShift[IO] = IO.contextShift(ec)

  implicit val s3Client: S3Client[IO] = fs2.aws.utils.s3TestClient
  val mockS3                          = mock[AmazonS3]

  ignore should "stdout the jsonfile" in {
    readS3FileMultipart[IO]("resources", "jsontest.json", 25, mockS3).compile.toVector.unsafeRunSync should be(
      Vector()
    )
  }

  "Downloading the JSON test file by chunks" should "return the same content" in {
    readS3FileMultipart[IO]("resources", "jsontest.json", 25, mockS3)
      .through(fs2.text.utf8Decode)
      .through(fs2.text.lines)
      .compile
      .toVector
      .unsafeRunSync
      .reduce(_ + _)
      .concat("") should be(
      """{"test": 1}{"test": 2}{"test": 3}{"test": 4}{"test": 5}{"test": 6}{"test": 7}{"test": 8}"""
    )
  }

  "Downloading the JSON test file" should "return the same content" in {
    readS3File[IO]("resources", "jsontest.json", blockingEC, mockS3)
      .through(fs2.text.utf8Decode)
      .through(fs2.text.lines)
      .compile
      .toVector
      .unsafeRunSync
      .reduce(_ + _)
      .concat("") should be(
      """{"test": 1}{"test": 2}{"test": 3}{"test": 4}{"test": 5}{"test": 6}{"test": 7}{"test": 8}"""
    )
  }

  "Downloading the versioned JSON test file" should "return the same content" in {
    readS3VersionedFile[IO]("resources", "jsontest.json", version = "ABC", blockingEC, mockS3)
      .through(fs2.text.utf8Decode)
      .through(fs2.text.lines)
      .compile
      .toVector
      .unsafeRunSync
      .reduce(_ + _)
      .concat("") should be(
      """{"this": 1}{"is": 2}{"versioned": 3}{"content": 4}"""
    )
  }

  "big chunk size but small entire text" should "be trimmed to content" in {
    readS3FileMultipart[IO]("resources", "jsontest1.json", 25, mockS3)
      .through(fs2.text.utf8Decode)
      .through(fs2.text.lines)
      .compile
      .toVector
      .unsafeRunSync
      .reduce(_ + _)
      .concat("") should be("""{"test": 1}""")
  }
} 
Example 148
Source File: BitVectorSocket.scala    From skunk   with MIT License 5 votes vote down vote up
// Copyright (c) 2018-2020 by Rob Norris
// This software is licensed under the MIT License (MIT).
// For more information see LICENSE or https://opensource.org/licenses/MIT

package skunk.net

import cats._
import cats.effect._
import cats.implicits._
import fs2.Chunk
import fs2.io.tcp.Socket
import scala.concurrent.duration.FiniteDuration
import scodec.bits.BitVector
import java.net.InetSocketAddress
import java.nio.channels._
import java.util.concurrent.Executors
import java.util.concurrent.ThreadFactory
import fs2.io.tcp.SocketGroup


  def apply[F[_]: Concurrent: ContextShift](
    host:         String,
    port:         Int,
    readTimeout:  FiniteDuration,
    writeTimeout: FiniteDuration,
    sg:           SocketGroup,
    sslOptions:   Option[SSLNegotiation.Options[F]],
  ): Resource[F, BitVectorSocket[F]] =
    for {
      sock  <- sg.client[F](new InetSocketAddress(host, port))
      sockʹ <- sslOptions.fold(sock.pure[Resource[F, ?]])(SSLNegotiation.negotiateSSL(sock, readTimeout, writeTimeout, _))
    } yield fromSocket(sockʹ, readTimeout, writeTimeout)

} 
Example 149
Source File: DubboConfig.scala    From asura   with MIT License 5 votes vote down vote up
package asura.dubbo

import java.util.concurrent.Executors

import com.alibaba.dubbo.config.ApplicationConfig

import scala.concurrent.ExecutionContext
import scala.concurrent.duration._

case class DubboConfig(
                        appName: String = "asura-dubbo"
                      ) {

}

object DubboConfig {

  val DEFAULT_ACTOR_ASK_TIMEOUT = 30.seconds
  val DEFAULT_PROTOCOL = "dubbo://"
  val DEFAULT_PORT = 20880
  val DEFAULT_ROOT_DUBBO_PATH = "/dubbo"
  val DEFAULT_PROMPT = "dubbo>"
  val DEFAULT_ZK_CLIENT_CACHE_SIZE = 10
  val DEFAULT_DUBBO_REF_CACHE_SIZE = 20
  val DEFAULT_TIMEOUT = 10000
  var appName = "asura-dubbo"
  var appConfig = new ApplicationConfig(appName)

  val DUBBO_EC = ExecutionContext.fromExecutorService(Executors.newFixedThreadPool(4))
} 
Example 150
Source File: StreamConsumerScala.scala    From infinispan-spark   with Apache License 2.0 5 votes vote down vote up
package org.infinispan.spark.examples.twitter

import java.util.concurrent.{Executors, TimeUnit}

import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkContext
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.infinispan.client.hotrod.RemoteCacheManager
import org.infinispan.client.hotrod.configuration.ConfigurationBuilder
import org.infinispan.spark.examples.twitter.Sample.{getSparkConf, runAndExit, usageStream}
import org.infinispan.spark.examples.util.TwitterDStream
import org.infinispan.spark.stream._

import scala.collection.JavaConverters._
import scala.concurrent.duration._
import scala.language.postfixOps


object StreamConsumerScala {

   def main(args: Array[String]) {
      Logger.getLogger("org").setLevel(Level.WARN)

      if (args.length < 2) {
         usageStream("StreamConsumerScala")
      }

      val infinispanHost = args(0)
      val duration = args(1).toLong * 1000

      val conf = getSparkConf("spark-infinispan-stream-consumer-scala")
      val sparkContext = new SparkContext(conf)

      val streamingContext = new StreamingContext(sparkContext, Seconds(1))

      val config = Sample.getConnectorConf(infinispanHost)

      val remoteCacheManager = new RemoteCacheManager(new ConfigurationBuilder().withProperties(config.getHotRodClientProperties).build())
      val cache = remoteCacheManager.getCache[Long, Tweet]("default")

      val twitterDStream = TwitterDStream.create(streamingContext)

      val keyValueTweetStream = twitterDStream.map(s => (s.getId, s))

      keyValueTweetStream.writeToInfinispan(config)

      Repeat.every(5 seconds, {
         val keySet = cache.keySet()
         val maxKey = keySet.asScala.max
         println(s"${keySet.size} tweets inserted in the cache")
         println(s"Last tweet:${Option(cache.get(maxKey)).map(_.getText).getOrElse("<no tweets received so far>")}")
         println()
      })

      runAndExit(streamingContext, duration)
   }

   object Repeat {
      def every(d: Duration, code: => Unit) =
         Executors.newSingleThreadScheduledExecutor.scheduleWithFixedDelay(new Runnable {
            override def run(): Unit = code
         }, 10, d.toSeconds, TimeUnit.SECONDS)
   }

} 
Example 151
Source File: SparkSQLSessionManager.scala    From bdg-sequila   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.hive.thriftserver

import java.util.concurrent.Executors

import org.apache.commons.logging.Log
import org.apache.hadoop.hive.conf.HiveConf
import org.apache.hadoop.hive.conf.HiveConf.ConfVars
import org.apache.hive.service.cli.SessionHandle
import org.apache.hive.service.cli.session.SessionManager
import org.apache.hive.service.cli.thrift.TProtocolVersion
import org.apache.hive.service.server.HiveServer2
import org.apache.spark.sql.{SQLContext, SequilaSession}
import org.apache.spark.sql.hive.HiveUtils
import org.apache.spark.sql.hive.thriftserver.ReflectionUtils._
import org.apache.spark.sql.hive.thriftserver.server.SparkSQLOperationManagerSeq


private[hive] class SparkSQLSessionManagerSeq(hiveServer: HiveServer2, ss: SequilaSession)
  extends SessionManager(hiveServer)
    with ReflectedCompositeService {

  private lazy val sparkSqlOperationManager = new SparkSQLOperationManagerSeq(ss)

  override def init(hiveConf: HiveConf) {
    setSuperField(this, "operationManager", sparkSqlOperationManager)
    super.init(hiveConf)
  }

  override def openSession(
                            protocol: TProtocolVersion,
                            username: String,
                            passwd: String,
                            ipAddress: String,
                            sessionConf: java.util.Map[String, String],
                            withImpersonation: Boolean,
                            delegationToken: String): SessionHandle = {
    val sessionHandle =
      super.openSession(protocol, username, passwd, ipAddress, sessionConf, withImpersonation,
        delegationToken)
    val session = super.getSession(sessionHandle)
    HiveThriftServer2Seq.listener.onSessionCreated(
      session.getIpAddress, sessionHandle.getSessionId.toString, session.getUsername)
    val ctx = if (ss.sqlContext.conf.hiveThriftServerSingleSession) {
      ss.sqlContext
    } else {
      ss.sqlContext.newSession()
    }
    //ctx.setConf(HiveUtils.FAKE_HIVE_VERSION.key, HiveUtils.builtinHiveVersion)
    if (sessionConf != null && sessionConf.containsKey("use:database")) {
      ctx.sql(s"use ${sessionConf.get("use:database")}")
    }
    sparkSqlOperationManager.sessionToContexts.put(sessionHandle, ctx)
    sessionHandle
  }

  override def closeSession(sessionHandle: SessionHandle) {
    HiveThriftServer2Seq.listener.onSessionClosed(sessionHandle.getSessionId.toString)
    super.closeSession(sessionHandle)
    sparkSqlOperationManager.sessionToActivePool.remove(sessionHandle)
    sparkSqlOperationManager.sessionToContexts.remove(sessionHandle)
  }
} 
Example 152
Source File: Weather.scala    From Mastering-Functional-Programming   with MIT License 5 votes vote down vote up
package jvm

import scala.concurrent.{ Future, ExecutionContext }
import java.util.concurrent.Executors


object Weather {
  case class Event(time: Long, location: String)

  def getEvent(id: Int): Event = {
    Thread.sleep(1000)  // Simulate delay
    Event(System.currentTimeMillis, "New York")
  }

  def getWeather(time: Long, location: String): String = {
    Thread.sleep(1000) // Simulate delay
    "bad"
  }

  def notifyUser(): Unit = {
    Thread.sleep(1000)
    println("The user is notified")
  }

  def weatherImperative(eventId: Int): Unit = {
    val evt = getEvent(eventId)  // Will block
    val weather = getWeather(evt.time, evt.location)  // Will block
    if (weather == "bad") notifyUser() // Will block
  }

  def weatherImperativeThreaded(eventId: Int): Unit = {
    // Utility methods
    def thread(op: => Unit): Thread =
      new Thread(new Runnable { def run(): Unit = { op }})

    def runThread(t: Thread): Unit = t.start()
    

    // Business logic methods
    def notifyThread(weather: String): Thread = thread {
      if (weather == "bad") notifyUser()
    }

    def weatherThread(evt: Event): Thread = thread {
      val weather = getWeather(evt.time, evt.location)
      runThread(notifyThread(weather))
    }

    val eventThread: Thread = thread {
      val evt = getEvent(eventId)
      runThread(weatherThread(evt))
    }


    // Run the app
    runThread(eventThread)  // Prints "The user is notified"
  }

  def weatherFuture(eventId: Int): Unit = {
    implicit val context = ExecutionContext.fromExecutorService(Executors.newFixedThreadPool(5))

    Future { getEvent(eventId) }
      .onSuccess { case evt =>
        Future { getWeather(evt.time, evt.location) }
          .onSuccess { case weather => Future { if (weather == "bad") notifyUser } }
      }
  }

  def weatherFutureFlatmap(eventId: Int): Future[Unit] = {
    implicit val context = ExecutionContext.fromExecutorService(Executors.newFixedThreadPool(5))

    for {
      evt     <- Future { getEvent(eventId) }
      weather <- Future { getWeather(evt.time, evt.location) }
      _       <- Future { if (weather == "bad") notifyUser() }
    } yield ()
  }

  def weatherFutureFlatmapDesugared(eventId: Int): Future[Unit] = {
    implicit val context = ExecutionContext.fromExecutorService(Executors.newFixedThreadPool(5))

    Future { getEvent(eventId) }
      .flatMap { evt => Future { getWeather(evt.time, evt.location) } }
      .flatMap { weather => Future { if (weather == "bad") notifyUser() } }
  }

}

import Weather._
object WeatherImperativeSync extends App { weatherImperative(0) }
object WeatherImperativeThreaded extends App { weatherImperativeThreaded(0) }
object WeatherFuture extends App { weatherFuture(0) }
object WeatherFutureFlatmap extends App { weatherFutureFlatmap(0) }
object WeatherFutureFlatmapDesugared extends App { weatherFutureFlatmapDesugared(0) } 
Example 153
Source File: InMemoryStore.scala    From slab   with Apache License 2.0 5 votes vote down vote up
package com.criteo.slab.lib

import java.time.format.{DateTimeFormatter, FormatStyle}
import java.time.temporal.ChronoUnit
import java.time.{Instant, ZoneId}
import java.util.concurrent.{Executors, TimeUnit}

import com.criteo.slab.core.{Codec, Context, Store}
import com.criteo.slab.lib.Values.Slo
import org.slf4j.{Logger, LoggerFactory}

import scala.collection.concurrent.TrieMap
import scala.concurrent.Future
import scala.util.Try


class InMemoryStore(
                     val expiryDays: Int = 30
                   ) extends Store[Any] {
  private val logger = LoggerFactory.getLogger(this.getClass)
  private val cache = TrieMap.empty[(String, Long), Any]
  private val scheduler = Executors.newSingleThreadScheduledExecutor()

  scheduler.scheduleAtFixedRate(InMemoryStore.createCleaner(cache, expiryDays, logger), 1, 1, TimeUnit.HOURS)
  logger.info(s"InMemoryStore started, entries expire in $expiryDays days")

  sys.addShutdownHook {
    logger.info(s"Shutting down...")
    scheduler.shutdown()
  }

  override def upload[T](id: String, context: Context, v: T)(implicit codec: Codec[T, Any]): Future[Unit] = {
    logger.debug(s"Uploading $id")
    Future.successful {
      cache.putIfAbsent((id, context.when.toEpochMilli), codec.encode(v))
      logger.info(s"Store updated, size: ${cache.size}")
    }
  }

  override def uploadSlo(id: String, context: Context, slo: Slo)(implicit codec: Codec[Slo, Any]): Future[Unit] = {
    upload[Slo](id, context, slo)
  }

  def fetchSloHistory(id: String, from: Instant, until: Instant)(implicit codec: Codec[Slo, Any]): Future[Seq[(Long, Slo)]] = {
    fetchHistory[Slo](id, from, until)(codec)
  }

  override def fetch[T](id: String, context: Context)(implicit codec: Codec[T, Any]): Future[Option[T]] = {
    logger.debug(s"Fetching $id")
    Future.successful {
      cache.get((id, context.when.toEpochMilli)) map { v =>
        codec.decode(v).get
      }
    }
  }

  override def fetchHistory[T](
                                id: String,
                                from: Instant,
                                until: Instant
                              )(implicit ev: Codec[T, Any]): Future[Seq[(Long, T)]] = {
    logger.debug(s"Fetching the history of $id from ${format(from)} until ${format(until)}, cache size: ${cache.size}")
    Future.successful {
      cache.withFilter { case ((_id, ts), _) =>
        _id == id && ts >= from.toEpochMilli && ts <= until.toEpochMilli
      }.map { case ((_, ts), repr) =>
        (ts, ev.decode(repr).get)
      }.toList
    }
  }

  private def format(i: Instant) = DateTimeFormatter.ofLocalizedDateTime(FormatStyle.FULL)
    .withZone(ZoneId.systemDefault)
    .format(i)
}

object InMemoryStore {
  implicit def codec[T] = new Codec[T, Any] {
    override def encode(v: T): Any = v

    override def decode(v: Any): Try[T] = Try(v.asInstanceOf[T])
  }

  def createCleaner(cache: TrieMap[(String, Long), Any], expiryDays: Int, logger: Logger): Runnable = {
    object C extends Runnable {
      override def run(): Unit = {
        val expired = cache.filterKeys(_._2 <= Instant.now.minus(expiryDays, ChronoUnit.DAYS).toEpochMilli).keys
        logger.debug(s"${expired.size} out of ${cache.size} entries have expired, cleaning up...")
        cache --= expired
      }
    }
    C
  }
} 
Example 154
Source File: SwaveIdentityProcessorVerification.scala    From swave   with Mozilla Public License 2.0 5 votes vote down vote up
package swave.core.tck

import java.util.concurrent.{ExecutorService, Executors, TimeUnit}
import org.reactivestreams.Publisher
import org.reactivestreams.tck.{IdentityProcessorVerification, TestEnvironment}
import org.scalatest.testng.TestNGSuiteLike
import org.testng.SkipException
import org.testng.annotations.AfterClass
import swave.core._

abstract class SwaveIdentityProcessorVerification[T](val testEnv: TestEnvironment, publisherShutdownTimeout: Long)
    extends IdentityProcessorVerification[T](testEnv, publisherShutdownTimeout) with TestNGSuiteLike
    with StreamEnvShutdown {

  def this(printlnDebug: Boolean) =
    this(
      new TestEnvironment(Timeouts.defaultTimeout.toMillis, printlnDebug),
      Timeouts.publisherShutdownTimeout.toMillis)

  def this() = this(false)

  override def createFailedPublisher(): Publisher[T] =
    Spout.failing[T](new Exception("Nope")).drainTo(Drain.toPublisher()).get

  // Publishers created by swave don't support fanout by default
  override def maxSupportedSubscribers: Long = 1L

  override def required_spec313_cancelMustMakeThePublisherEventuallyDropAllReferencesToTheSubscriber(): Unit =
    throw new SkipException("Not relevant for publisher w/o fanout support")

  override lazy val publisherExecutorService: ExecutorService =
    Executors.newFixedThreadPool(3)

  @AfterClass
  def shutdownPublisherExecutorService(): Unit = {
    publisherExecutorService.shutdown()
    publisherExecutorService.awaitTermination(3, TimeUnit.SECONDS)
  }
} 
Example 155
Source File: ExecutionContextSchedulerTest.scala    From reactor-scala-extensions   with Apache License 2.0 5 votes vote down vote up
package reactor.core.scala.scheduler

import java.util.concurrent.{Executors, ThreadFactory}

import org.scalatest.freespec.AnyFreeSpec
import org.scalatest.matchers.should.Matchers
import reactor.core.scala.publisher.SMono
import reactor.test.StepVerifier

import scala.concurrent.ExecutionContext


class ExecutionContextSchedulerTest extends AnyFreeSpec with Matchers {
  "ExecutionContextScheduler" - {
    "should create a Scheduler using provided ExecutionContext" - {
      "on SMono" in {
        val executionContext = ExecutionContext.fromExecutorService(Executors.newFixedThreadPool(1, new ThreadFactory {
          override def newThread(r: Runnable): Thread = new Thread(r, "THREAD-NAME-SMONO")
        }))
        val mono = SMono.just(1)
          .subscribeOn(ExecutionContextScheduler(executionContext))
          .doOnNext(i => Thread.currentThread().getName shouldBe "THREAD-NAME-SMONO")
        StepVerifier.create(mono)
          .expectNext(1)
          .verifyComplete()
      }
    }
  }
} 
Example 156
Source File: ProtoBuffTest.scala    From c4proto   with Apache License 2.0 5 votes vote down vote up
package ee.cone.c4actor

import java.lang.management.ManagementFactory
import java.util
import java.util.concurrent.{Callable, Executors}

import ee.cone.c4actor.AnyAdapter._
import ee.cone.c4actor.AnyOrigProtocol.N_AnyOrig
import ee.cone.c4actor.ProtoBuffTestProtocol.{D_TestOrig, D_TestOrigForDecode}
import ee.cone.c4di.{c4, c4app}
import ee.cone.c4proto._

import scala.collection.immutable
import scala.util.Random

trait ProtoBuffTestProtocolAppBase

@protocol("ProtoBuffTestProtocolApp") object ProtoBuffTestProtocol {

  @Id(0x1) case class D_TestOrig(
    @Id(0x2) srcId: String,
    @Id(0x3) list: List[String],
    @Id(0x4) byteStr: List[N_AnyOrig]
  )

  @Id(0x5) case class D_TestOrigForDecode(
    @Id(0x6) srcId: String,
    @Id(0x7) number: Long
  )

}

@c4app class SeqProtoBuffTestAppBase extends ProtoBuffTestApp
@c4app class ParProtoBuffTestAppBase extends ProtoBuffTestApp

trait ProtoBuffTestApp
  extends VMExecutionApp with ExecutableApp
    with BaseApp with ProtoApp
    with ProtoBuffTestProtocolApp
    with AnyOrigProtocolApp



class SerializationRunnable(pid: Int, testOrigs: Seq[D_TestOrigForDecode], qAdapterRegistry: QAdapterRegistry) extends Callable[Long] {

  def call(): Long = {
    TestCode.test(testOrigs, qAdapterRegistry)
  }
}

object TestCode {
  def test(testOrigs: Seq[D_TestOrigForDecode], qAdapterRegistry: QAdapterRegistry): Long = {
    val time = System.currentTimeMillis()
    val encoded: immutable.Seq[N_AnyOrig] = testOrigs.map(encode(qAdapterRegistry)(_))
    val testOrigsss: immutable.Seq[D_TestOrig] = encoded.zipWithIndex.map { case (a, b) => D_TestOrig(b.toString, a.toString.split(",").toList, List(a)) }
    val encoded2: immutable.Seq[N_AnyOrig] = testOrigsss.map(encode(qAdapterRegistry)(_))
    val decoded: immutable.Seq[D_TestOrig] = encoded2.map(decode[D_TestOrig](qAdapterRegistry))
    // assert (testOrigsss == decoded)
    val time2 = System.currentTimeMillis()
    time2 - time
  }
} 
Example 157
Source File: CatalogDatabase.scala    From modelmatrix   with Apache License 2.0 5 votes vote down vote up
package com.collective.modelmatrix.catalog

import java.util.concurrent.Executors

import com.collective.modelmatrix.db.SchemaInstaller
import org.scalatest.BeforeAndAfterAll
import slick.driver.JdbcProfile

import scala.concurrent.duration.{FiniteDuration, _}
import scala.concurrent.{Await, ExecutionContext, Future}
import scalaz.Tag

trait CatalogDatabase {
  def driver: JdbcProfile

  import com.collective.modelmatrix.db.GenericSlickDriver.api.Database
  def db: Database

  lazy val catalog = new ModelMatrixCatalog(driver)

  protected implicit val catalogExecutionContext =
    Tag[ExecutionContext, ModelMatrixCatalog](ExecutionContext.fromExecutor(Executors.newFixedThreadPool(10)))

  protected def await[T](f: Future[T], duration: FiniteDuration = 10.seconds): T = {
    Await.result(f, duration)
  }

}

trait InstallSchemaBefore extends SchemaInstaller {
  self: BeforeAndAfterAll with CatalogDatabase =>
  private[this] var schemaInstalled: Boolean = false

  override protected def beforeAll(): Unit = {
    this.synchronized {
      if (!schemaInstalled) {
        installOrMigrate
        schemaInstalled = true
      }
    }
  }
} 
Example 158
Source File: Contexts.scala    From scala-loci   with Apache License 2.0 5 votes vote down vote up
package loci
package contexts

import java.util.concurrent.{Executors, ThreadFactory}

import scala.concurrent.{ExecutionContext, ExecutionContextExecutor}
import scala.util.control.NonFatal

object Pooled {
  lazy val global: ExecutionContextExecutor =
    new logging.ReportingExecutionContext(ExecutionContext.global)

  object Implicits {
    implicit lazy val global: ExecutionContext = Pooled.global
  }
}

object Immediate {
  lazy val global: ExecutionContextExecutor = new ExecutionContextExecutor {
    def execute(runnable: Runnable) =
      try runnable.run()
      catch { case NonFatal(exception) => reportFailure(exception) }

    def reportFailure(throwable: Throwable) = logging.reportException(throwable)
  }

  object Implicits {
    implicit lazy val global: ExecutionContext = Immediate.global
  }
}

object Queued {
  lazy val global = create()

  def create(): ExecutionContextExecutor =
    ExecutionContext.fromExecutorService(
      Executors.newSingleThreadExecutor(new ThreadFactory {
        def newThread(runnable: Runnable) = {
          val thread = new Thread(new Runnable {
            def run() =
              try runnable.run()
              catch {
                case NonFatal(exception) =>
                  if (exception.getCause != null)
                    logging.reportException(exception.getCause)
                  else
                    logging.reportException(exception)
              }
          })
          thread.setDaemon(true)
          thread
        }
      }),
      logging.reportException)

  object Implicits {
    implicit lazy val global: ExecutionContext = Queued.global
  }
} 
Example 159
Source File: TCPListener.scala    From scala-loci   with Apache License 2.0 5 votes vote down vote up
package loci
package communicator
package tcp

import java.io.IOException
import java.net.{InetAddress, ServerSocket, SocketException}
import java.util.concurrent.Executors
import java.util.concurrent.atomic.AtomicBoolean

import scala.util.{Failure, Success, Try}
import scala.util.control.NonFatal

private class TCPListener(
  port: Int, interface: String, properties: TCP.Properties)
    extends Listener[TCP] {

  protected def startListening(connectionEstablished: Connected[TCP]): Try[Listening] =
    try {
      val running = new AtomicBoolean(true)
      val socket = new ServerSocket(port, 0, InetAddress.getByName(interface))
      val executor = Executors.newCachedThreadPool()

      def terminate() = {
        try socket.close()
        catch { case _: IOException => }
        executor.shutdown()
      }

      new Thread() {
        override def run() =
          try
            while (true) {
              val connection = socket.accept()
              if (connection != null)
                executor.execute(new Runnable {
                  def run() = TCPHandler.handleConnection(
                    connection, properties, TCPListener.this, { connection =>
                      connectionEstablished.fire(Success(connection))
                    })
                })
            }
          catch {
            case exception: SocketException =>
              if (running.getAndSet(false)) {
                terminate()
                connectionEstablished.fire(Failure(exception))
              }
          }
      }.start()

      Success(new Listening {
        def stopListening(): Unit =
          if (running.getAndSet(false))
            terminate()
      })
    }
    catch {
      case NonFatal(exception) =>
        Failure(exception)
    }
} 
Example 160
Source File: GraphiteMockServer.scala    From kafka-offset-monitor-graphite   with Apache License 2.0 5 votes vote down vote up
package pl.allegro.tech.kafka.offset.monitor.graphite

import java.io.InputStream
import java.lang
import java.net.ServerSocket
import java.util.concurrent.{Callable, ExecutorService, Executors}

import com.jayway.awaitility.Awaitility._
import com.jayway.awaitility.Duration

class GraphiteMockServer(port: Int) {

  var serverSocket: ServerSocket = null
  val executor: ExecutorService = Executors.newFixedThreadPool(10)
  @volatile var listen: Boolean = false

  var expectedMetrics: scala.collection.mutable.Map[String, Double] = scala.collection.mutable.Map()
  var receivedMetrics: scala.collection.mutable.Map[String, Double] = scala.collection.mutable.Map()
  
  def start() {
    serverSocket = new ServerSocket(port)
    listen = true
    handleConnections()
  }

  private def handleConnections() {
    executor.execute(new Runnable {
      override def run() {
        while(listen) {
            readData(serverSocket.accept().getInputStream())
        }
      }
    })
  }

  private def readData(stream: InputStream) {
    executor.execute(new Runnable {
      override def run() {
        scala.io.Source.fromInputStream(stream).getLines().foreach((line) => handleMetric(line))
      }
    })
  }
  
  private def handleMetric(metricLine: String) {
    val metric = metricLine.split(" ")(0)
    val value = metricLine.split(" ")(1)

    if(expectedMetrics.contains(metric)) {
      receivedMetrics += (metric -> value.toDouble)
    }
  }
  
  def stop() {
    listen = false
    serverSocket.close()
  }
  
  def reset() {
    expectedMetrics.clear()
    receivedMetrics.clear()
  }

  def expectMetric(metricNamePattern: String, value: Double) {
    expectedMetrics += (metricNamePattern -> value)
  }

  def waitUntilReceived() {
    await.atMost(Duration.FIVE_SECONDS).until(new Callable[lang.Boolean] {
      override def call(): lang.Boolean = {
        expectedMetrics.forall { case (k, v) =>
          receivedMetrics.get(k).exists( (rv) => v == rv )
        }
      }
    })
  }
} 
Example 161
Source File: TrafficMonitorThread.scala    From shadowsocksr-android   with GNU General Public License v3.0 5 votes vote down vote up
package com.github.shadowsocks.utils

import java.io.{File, IOException}
import java.nio.{ByteBuffer, ByteOrder}
import java.util.concurrent.Executors

import android.content.Context
import android.net.{LocalServerSocket, LocalSocket, LocalSocketAddress}
import android.util.Log

class TrafficMonitorThread(context: Context) extends Thread {

  val TAG = "TrafficMonitorThread"
  lazy val PATH = context.getApplicationInfo.dataDir + "/stat_path"

  @volatile var serverSocket: LocalServerSocket = null
  @volatile var isRunning: Boolean = true

  def closeServerSocket() {
    if (serverSocket != null) {
      try {
        serverSocket.close()
      } catch {
        case _: Exception => // ignore
      }
      serverSocket = null
      }
  }

  def stopThread() {
    isRunning = false
    closeServerSocket()
  }

  override def run() {

    try {
      new File(PATH).delete()
    } catch {
      case _: Exception => // ignore
    }

    try {
      val localSocket = new LocalSocket
      localSocket.bind(new LocalSocketAddress(PATH, LocalSocketAddress.Namespace.FILESYSTEM))
      serverSocket = new LocalServerSocket(localSocket.getFileDescriptor)
    } catch {
      case e: IOException =>
        Log.e(TAG, "unable to bind", e)
        return
    }

    val pool = Executors.newFixedThreadPool(1)

    while (isRunning) {
      try {
        val socket = serverSocket.accept()

        pool.execute(() => {
          try {
            val input = socket.getInputStream
            val output = socket.getOutputStream

            val buffer = new Array[Byte](16)
            if (input.read(buffer) != 16) throw new IOException("Unexpected traffic stat length")
            val stat = ByteBuffer.wrap(buffer).order(ByteOrder.LITTLE_ENDIAN)
            TrafficMonitor.update(stat.getLong(0), stat.getLong(8))

            output.write(0)

            input.close()
            output.close()

          } catch {
            case e: Exception =>
              Log.e(TAG, "Error when recv traffic stat", e)
          }

          // close socket
          try {
            socket.close()
          } catch {
            case _: Exception => // ignore
          }

        })
      } catch {
        case e: IOException =>
          Log.e(TAG, "Error when accept socket", e)
          return
      }
    }
  }
} 
Example 162
Source File: PoolUtils.scala    From cats-effect   with Apache License 2.0 5 votes vote down vote up
package cats.effect
package internals

import scala.concurrent.ExecutionContext
import scala.util.control.NonFatal

import java.util.concurrent.{Executors, ThreadFactory}
import java.util.concurrent.atomic.AtomicInteger

private[internals] object PoolUtils {
  // we can initialize this eagerly because the enclosing object is lazy
  val ioAppGlobal: ExecutionContext = {
    // lower-bound of 2 to prevent pathological deadlocks on virtual machines
    val bound = math.max(2, Runtime.getRuntime().availableProcessors())

    val executor = Executors.newFixedThreadPool(
      bound,
      new ThreadFactory {
        val ctr = new AtomicInteger(0)
        def newThread(r: Runnable): Thread = {
          val back = new Thread(r, s"ioapp-compute-${ctr.getAndIncrement()}")
          back.setDaemon(true)
          back
        }
      }
    )

    exitOnFatal(ExecutionContext.fromExecutor(executor))
  }

  def exitOnFatal(ec: ExecutionContext): ExecutionContext = new ExecutionContext {
    def execute(r: Runnable): Unit =
      ec.execute(new Runnable {
        def run(): Unit =
          try {
            r.run()
          } catch {
            case NonFatal(t) =>
              reportFailure(t)

            case t: Throwable =>
              // under most circumstances, this will work even with fatal errors
              t.printStackTrace()
              System.exit(1)
          }
      })

    def reportFailure(t: Throwable): Unit =
      ec.reportFailure(t)
  }
} 
Example 163
Source File: AsyncFunctionLoop.scala    From stream-reactor   with Apache License 2.0 5 votes vote down vote up
package com.datamountaineer.streamreactor.connect.hbase.kerberos.utils

import java.util.concurrent.atomic.AtomicBoolean
import java.util.concurrent.{Executors, TimeUnit}

import com.typesafe.scalalogging.StrictLogging

import scala.concurrent.duration.Duration

class AsyncFunctionLoop(interval: Duration, description: String)(thunk: => Unit)
  extends AutoCloseable
    with StrictLogging {

  private val running = new AtomicBoolean(false)
  private val executorService = Executors.newSingleThreadExecutor

  def start(): Unit = {
    if (!running.compareAndSet(false, true)) {
      throw new IllegalStateException(s"$description already running.")
    }
    logger.info(s"Starting $description loop with an interval of ${interval.toMillis}ms.")
    executorService.submit(new Runnable {
      override def run(): Unit = {
        while (running.get()) {
          try {
            Thread.sleep(interval.toMillis)
            thunk
          }
          catch {
            case _: InterruptedException =>
            case t: Throwable =>
              logger.warn("Failed to renew the Kerberos ticket", t)
          }
        }
      }
    })
  }

  override def close(): Unit = {
    if (running.compareAndSet(true, false)) {
      executorService.shutdownNow()
      executorService.awaitTermination(10000, TimeUnit.MILLISECONDS)
    }
  }
} 
Example 164
Source File: AsyncFunctionLoop.scala    From stream-reactor   with Apache License 2.0 5 votes vote down vote up
package com.landoop.streamreactor.connect.hive

import java.util.concurrent.{Executors, TimeUnit}
import java.util.concurrent.atomic.AtomicBoolean

import com.typesafe.scalalogging.StrictLogging

import scala.concurrent.duration.Duration

class AsyncFunctionLoop(interval: Duration, description: String)(thunk: => Unit)
  extends AutoCloseable
    with StrictLogging {

  private val running = new AtomicBoolean(false)
  private val executorService = Executors.newFixedThreadPool(1)

  def start(): Unit = {
    if (!running.compareAndSet(false, true)) {
      throw new IllegalStateException(s"$description already running.")
    }
    logger.info(s"Starting $description loop with an interval of ${interval.toMillis}ms.")
    executorService.submit(new Runnable {
      override def run(): Unit = {
        while (running.get()) {
          try {
            Thread.sleep(interval.toMillis)
            thunk
          }
          catch {
            case _: InterruptedException =>
            case t: Throwable =>
              logger.warn("Failed to renew the Kerberos ticket", t)
          }
        }
      }
    })
  }

  override def close(): Unit = {
    if (running.compareAndSet(true, false)) {
      executorService.shutdownNow()
      executorService.awaitTermination(10000, TimeUnit.MILLISECONDS)
    }
  }
} 
Example 165
Source File: AsyncFunctionLoop.scala    From stream-reactor   with Apache License 2.0 5 votes vote down vote up
package com.landoop.streamreactor.connect.hive

import java.util.concurrent.{Executors, TimeUnit}
import java.util.concurrent.atomic.AtomicBoolean

import com.typesafe.scalalogging.StrictLogging

import scala.concurrent.duration.Duration

class AsyncFunctionLoop(interval: Duration, description: String)(thunk: => Unit)
  extends AutoCloseable
    with StrictLogging {

  private val running = new AtomicBoolean(false)
  private val executorService = Executors.newFixedThreadPool(1)

  def start(): Unit = {
    if (!running.compareAndSet(false, true)) {
      throw new IllegalStateException(s"$description already running.")
    }
    logger.info(s"Starting $description loop with an interval of ${interval.toMillis}ms.")
    executorService.submit(new Runnable {
      override def run(): Unit = {
        while (running.get()) {
          try {
            Thread.sleep(interval.toMillis)
            thunk
          }
          catch {
            case _: InterruptedException =>
            case t: Throwable =>
              logger.warn("Failed to renew the Kerberos ticket", t)
          }
        }
      }
    })
  }

  override def close(): Unit = {
    if (running.compareAndSet(true, false)) {
      executorService.shutdownNow()
      executorService.awaitTermination(10000, TimeUnit.MILLISECONDS)
    }
  }
} 
Example 166
Source File: VoltConnectionConnectFn.scala    From stream-reactor   with Apache License 2.0 5 votes vote down vote up
package com.datamountaineer.streamreactor.connect.voltdb.writers

import java.util.concurrent.Executors

import com.datamountaineer.streamreactor.connect.concurrent.ExecutorExtension._
import com.datamountaineer.streamreactor.connect.concurrent.FutureAwaitWithFailFastFn
import com.datamountaineer.streamreactor.connect.voltdb.config.VoltSettings
import org.voltdb.client.Client

object VoltConnectionConnectFn extends Retries {
  def apply(client: Client, settings: VoltSettings): Seq[Unit] = {
    logger.info("Connecting to VoltDB...")
    val servers = settings.servers.split(",").map(_.trim)

    val executor = Executors.newFixedThreadPool(servers.length)

    val futures = servers.map { server =>
      executor.submit {
        connectWithRetries(client, server, 10)
      }
    }
    FutureAwaitWithFailFastFn(executor, futures)
  }

  private def connectWithRetries(client: Client, server: String, maxRetries: Int) = {
    val retryInterval = 1000
    withRetries(maxRetries, retryInterval, Some(s"Connection failure. Retrying in $retryInterval")) {
      client.createConnection(server)
    }
    logger.info(s"Connected to VoltDB node at: $server")
  }

} 
Example 167
Source File: HazelCastWriter.scala    From stream-reactor   with Apache License 2.0 5 votes vote down vote up
package com.datamountaineer.streamreactor.connect.hazelcast.writers

import java.util.concurrent.Executors

import com.datamountaineer.streamreactor.connect.concurrent.ExecutorExtension._
import com.datamountaineer.streamreactor.connect.concurrent.FutureAwaitWithFailFastFn
import com.datamountaineer.streamreactor.connect.errors.ErrorHandler
import com.datamountaineer.streamreactor.connect.hazelcast.config.{HazelCastSinkSettings, HazelCastStoreAsType, TargetType}
import com.datamountaineer.streamreactor.connect.schemas.ConverterUtil
import com.typesafe.scalalogging.StrictLogging
import org.apache.kafka.connect.sink.SinkRecord

import scala.concurrent.duration._
import scala.util.{Failure, Success}


  def write(records: Seq[SinkRecord]): Unit = {
    if (records.isEmpty) {
      logger.debug("No records received.")
    } else {
      logger.debug(s"Received ${records.size} records.")
      if (settings.allowParallel) parallelWrite(records) else sequentialWrite(records)
      logger.debug(s"Written ${records.size}")
    }
  }

  def sequentialWrite(records: Seq[SinkRecord]): Any = {
    try {
      records.foreach(r => insert(r))
    } catch {
      case t: Throwable =>
        logger.error(s"There was an error inserting the records ${t.getMessage}", t)
        handleTry(Failure(t))
    }
  }

  def parallelWrite(records: Seq[SinkRecord]): Any = {
    logger.warn("Running parallel writes! Order of writes not guaranteed.")
    val executor = Executors.newFixedThreadPool(settings.threadPoolSize)

    try {
      val futures = records.map { record =>
        executor.submit {
          insert(record)
          ()
        }
      }

      //when the call returns the pool is shutdown
      FutureAwaitWithFailFastFn(executor, futures, 1.hours)
      handleTry(Success(()))
      logger.debug(s"Processed ${futures.size} records.")
    }
    catch {
      case t: Throwable =>
        logger.error(s"There was an error inserting the records ${t.getMessage}", t)
        handleTry(Failure(t))
    }
  }

  def insert(record: SinkRecord): Unit = {
    val writer = writers.get(record.topic())
    writer.foreach(w => w.write(record))
  }

  def close(): Unit = {
    logger.info("Shutting down Hazelcast client.")
    writers.values.foreach(_.close)
    settings.client.shutdown()
  }

  def flush(): Unit = {}
} 
Example 168
Source File: TestDeleteTopicsConcurrently.scala    From ohara   with Apache License 2.0 4 votes vote down vote up
package oharastream.ohara.configurator

import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger}
import java.util.concurrent.{ArrayBlockingQueue, Executors, LinkedBlockingDeque, TimeUnit}

import oharastream.ohara.client.configurator.{BrokerApi, TopicApi}
import oharastream.ohara.common.setting.TopicKey
import oharastream.ohara.common.util.Releasable
import oharastream.ohara.testing.WithBrokerWorker
import org.junit.{After, Test}
import org.scalatest.matchers.should.Matchers._

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future}
import scala.jdk.CollectionConverters._

class TestDeleteTopicsConcurrently extends WithBrokerWorker {
  private[this] val configurator =
    Configurator.builder.fake(testUtil.brokersConnProps, testUtil().workersConnProps()).build()

  private[this] val topicApi = TopicApi.access.hostname(configurator.hostname).port(configurator.port)

  private[this] def result[T](f: Future[T]): T = Await.result(f, Duration(20, TimeUnit.SECONDS))

  private[this] val brokerClusterInfo = result(
    BrokerApi.access.hostname(configurator.hostname).port(configurator.port).list()
  ).head

  @Test
  def test(): Unit = {
    val loopMax       = 10
    val count         = 3
    val topicKeyQueue = new ArrayBlockingQueue[TopicKey](count)
    (0 until count).foreach(i => topicKeyQueue.put(TopicKey.of("test", i.toString)))
    val executors      = Executors.newFixedThreadPool(count)
    val exceptionQueue = new LinkedBlockingDeque[Throwable]()
    val closed         = new AtomicBoolean(false)
    val loopCount      = new AtomicInteger(0)
    (0 until count).foreach(
      _ =>
        executors.execute { () =>
          while (!closed.get() && loopCount.getAndIncrement() <= loopMax) try {
            val topicKey = topicKeyQueue.take()
            try result(
              topicApi.request
                .group(topicKey.group())
                .name(topicKey.name())
                .brokerClusterKey(brokerClusterInfo.key)
                .numberOfPartitions(1)
                .numberOfReplications(1)
                .create()
                .flatMap(_ => topicApi.start(topicKey))
                .flatMap { _ =>
                  TimeUnit.SECONDS.sleep(1)
                  topicApi.stop(topicKey)
                }
                .flatMap { _ =>
                  TimeUnit.SECONDS.sleep(1)
                  topicApi.delete(topicKey)
                }
            )
            finally topicKeyQueue.put(topicKey)
          } catch {
            case t: Throwable =>
              exceptionQueue.put(t)
              closed.set(true)
          }
        }
    )
    executors.shutdown()
    withClue(s"${exceptionQueue.asScala.map(_.getMessage).mkString(",")}") {
      executors.awaitTermination(60, TimeUnit.SECONDS) shouldBe true
    }
    exceptionQueue.size() shouldBe 0
  }
  @After
  def tearDown(): Unit = Releasable.close(configurator)
}