java.util.concurrent.ExecutorService Scala Examples

The following examples show how to use java.util.concurrent.ExecutorService. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: ExecutionApi.scala    From c4proto   with Apache License 2.0 5 votes vote down vote up
package ee.cone.c4actor

import java.nio.file.Path
import java.util.concurrent.ExecutorService

import scala.concurrent.{ExecutionContext, Future, Promise}
import scala.util.Try

trait Execution extends Runnable {
  def onShutdown(hint: String, f:()=>Unit): ()=>Unit
  def complete(): Unit
  def skippingFuture[T](value: T): SkippingFuture[T]
  def newExecutorService(prefix: String, threadCount: Option[Int]): ExecutorService
  def fatal[T](future: ExecutionContext=>Future[T]): Unit
  def mainExecutionContext: ExecutionContext
  def success[T](promise: Promise[T], value: T): Unit
}

trait SkippingFuture[T] {
  def map(body: T => T): SkippingFuture[T]
  def value: Option[Try[T]]
}

trait ExecutableApp {
  def execution: Runnable // we need this while we have componentRegistry.resolve to avoid 2 componentRegistry-s
}


trait Executable extends Runnable

// mark Executable with Early
//   if it is highly optimal to start it before the world is ready;
// Early Executable SHOULD NOT write to anything (kafka,db,jms)
//   because another instance of the same app may be still alive;
trait Early
abstract class ExecutionFilter(val check: Executable=>Boolean)

trait Config {
  def get(key: String): String
}
trait ListConfig {
  def get(key: String): List[String]
}


case class ActorName(value: String)

trait SimpleSigner extends Signer[List[String]]
trait Signer[T] {
  def sign(data: T, until: Long): String
  def retrieve(check: Boolean): Option[String]=>Option[T]
}


////

object Trace { //m. b. to util
  def apply[T](f: =>T): T = try { f } catch {
    case e: Throwable =>
      System.err.println(s"TRACED: ${e.getMessage}")
      e.printStackTrace()
      throw e
  }
}

object FinallyClose {
  def apply[A<:AutoCloseable,R](o: A)(f: A=>R): R = try f(o) finally o.close()
  def apply[A,R](close: A=>Unit)(o: A)(f: A=>R): R = try f(o) finally close(o)
}

trait CatchNonFatal {
  def apply[T](aTry: =>T)(hint: =>String)(aCatch: Throwable=>T): T
}

case class NanoTimer(startedAt: Long = System.nanoTime){
  def ms: Long = (System.nanoTime - startedAt) / 1000000
} 
Example 2
Source File: PowerBIAuthenticationWithUsernamePassword.scala    From spark-powerbi-connector   with Apache License 2.0 5 votes vote down vote up
package com.microsoft.azure.powerbi.authentication

import java.net.URI
import java.util.concurrent.{Executors, ExecutorService, Future}
import javax.naming.ServiceUnavailableException

import com.microsoft.aad.adal4j.{AuthenticationContext, AuthenticationResult}

case class PowerBIAuthenticationWithUsernamePassword(powerBIAuthorityURL: String,
                                                     powerBIResourceURL: String,
                                                     powerBIClientID: String,
                                                     activeDirectoryUsername: String,
                                                     activeDirectoryPassword: String)
  extends PowerBIAuthentication{

  def getAccessToken: String =
    if (this.accessToken != null && this.accessToken.nonEmpty) this.accessToken
    else refreshAccessToken

  def refreshAccessToken: String = retrieveToken.getAccessToken

  private def retrieveToken: AuthenticationResult = {

    var authenticationResult: AuthenticationResult = null
    var executorService: ExecutorService = null

    try {

      executorService = Executors.newFixedThreadPool(1)

      val authenticationContext: AuthenticationContext =
        new AuthenticationContext(powerBIAuthorityURL, true, executorService)

      val authenticationResultFuture: Future[AuthenticationResult] =
        authenticationContext.acquireToken(powerBIResourceURL, powerBIClientID,
          activeDirectoryUsername, activeDirectoryPassword, null)

      authenticationResult = authenticationResultFuture.get()
    }
    finally
    {
      executorService.shutdown()
    }

    if (authenticationResult == null) {
      throw new ServiceUnavailableException("Authentication result empty")
    }

    this.accessToken = authenticationResult.getAccessToken

    authenticationResult
  }

  private var accessToken: String = _
} 
Example 3
Source File: OkhttpDownload.scala    From coursier   with Apache License 2.0 5 votes vote down vote up
package coursier.publish.download

import java.time.Instant
import java.util.concurrent.ExecutorService

import coursier.cache.CacheUrl
import coursier.core.Authentication
import coursier.publish.download.logger.DownloadLogger
import coursier.util.Task
import okhttp3.internal.http.HttpDate
import okhttp3.{OkHttpClient, Request, Response}

import scala.collection.JavaConverters._
import scala.util.{Failure, Success, Try}

final case class OkhttpDownload(client: OkHttpClient, pool: ExecutorService) extends Download {

  import OkhttpDownload.TryOps

  def downloadIfExists(url: String, authentication: Option[Authentication], logger: DownloadLogger): Task[Option[(Option[Instant], Array[Byte])]] = {

    // FIXME Some duplication with upload below…

    val request = {
      val b = new Request.Builder()
        .url(url)
        .get()

      // Handling this ourselves rather than via client.setAuthenticator / com.squareup.okhttp.Authenticator
      for (auth <- authentication; (k, v) <- auth.allHttpHeaders)
        b.addHeader(k, v)

      b.build()
    }

    Task.schedule(pool) {
      logger.downloadingIfExists(url)

      val res = Try {
        var response: Response = null

        try {
          response = client.newCall(request).execute()

          if (response.isSuccessful) {
            val lastModifiedOpt = Option(response.header("Last-Modified")).map { s =>
              HttpDate.parse(s).toInstant
            }
            Right(Some((lastModifiedOpt, response.body().bytes())))
          } else {
            val code = response.code()
            if (code / 100 == 4)
              Right(None)
            else {
              val content = Try(response.body().string()).getOrElse("")
              Left(new Download.Error.HttpError(url, code, response.headers().toMultimap.asScala.mapValues(_.asScala.toList).iterator.toMap, content))
            }
          }
        } finally {
          if (response != null)
            response.body().close()
        }
      }.toEither.flatMap(identity)

      logger.downloadedIfExists(
        url,
        res.toOption.flatMap(_.map(_._2.length)),
        res.left.toOption.map(e => new Download.Error.DownloadError(url, e))
      )

      Task.fromEither(res)
    }.flatMap(identity)
  }

}

object OkhttpDownload {

  // for 2.11
  private[publish] implicit class TryOps[T](private val t: Try[T]) {
    def toEither: Either[Throwable, T] =
      t match {
        case Success(t) => Right(t)
        case Failure(e) => Left(e)
      }
  }

  def create(pool: ExecutorService): Download = {
    // Seems we can't even create / shutdown the client thread pool (via its Dispatcher)…
    OkhttpDownload(new OkHttpClient, pool)
  }
} 
Example 4
Source File: PlatformScalazImplicits.scala    From coursier   with Apache License 2.0 5 votes vote down vote up
package coursier.interop

import java.util.concurrent.ExecutorService

import coursier.util.Sync
import _root_.scalaz.concurrent.{Task => ScalazTask}

abstract class PlatformScalazImplicits {

  implicit val scalazTaskSync: Sync[ScalazTask] =
    new Sync[ScalazTask] {
      def point[A](a: A) =
        ScalazTask.point(a)
      def delay[A](a: => A): ScalazTask[A] =
        ScalazTask.delay(a)
      override def fromAttempt[A](a: Either[Throwable, A]): ScalazTask[A] =
        a match {
          case Left(t) => ScalazTask.fail(t)
          case Right(x) => ScalazTask.now(x)
        }
      def handle[A](a: ScalazTask[A])(f: PartialFunction[Throwable, A]) =
        a.handle(f)
      def schedule[A](pool: ExecutorService)(f: => A) =
        ScalazTask(f)(pool)

      def gather[A](elems: Seq[ScalazTask[A]]) =
        ScalazTask.taskInstance.gather(elems)

      def bind[A, B](elem: ScalazTask[A])(f: A => ScalazTask[B]) =
        ScalazTask.taskInstance.bind(elem)(f)
    }

} 
Example 5
Source File: PlatformCatsImplicits.scala    From coursier   with Apache License 2.0 5 votes vote down vote up
package coursier.interop

import java.util.concurrent.ExecutorService

import _root_.cats.instances.vector._
import _root_.cats.syntax.all._
import coursier.util.Sync

import scala.concurrent.{ExecutionContext, ExecutionContextExecutorService}

abstract class PlatformCatsImplicits {

  implicit def coursierSyncFromCats[F[_], F0[_]](implicit N: _root_.cats.effect.Sync[F], par: _root_.cats.Parallel.Aux[F, F0], cs: _root_.cats.effect.ContextShift[F]): Sync[F] =
    new Sync[F] {
      def point[A](a: A): F[A] =
        a.pure[F]
      def delay[A](a: => A): F[A] =
        N.delay(a)
      override def fromAttempt[A](a: Either[Throwable, A]): F[A] =
        N.fromEither(a)
      def handle[A](a: F[A])(f: PartialFunction[Throwable, A]): F[A] =
        a.recover(f)
      def schedule[A](pool: ExecutorService)(f: => A): F[A] = {
        val ec0 = pool match {
          case eces: ExecutionContextExecutorService => eces
          case _ => ExecutionContext.fromExecutorService(pool) // FIXME Is this instantiation costly? Cache it?
        }
        cs.evalOn(ec0)(N.delay(f))
      }

      def gather[A](elems: Seq[F[A]]): F[Seq[A]] =
        N.map(_root_.cats.Parallel.parSequence(elems.toVector))(_.toSeq)
      def bind[A, B](elem: F[A])(f: A => F[B]): F[B] =
        elem.flatMap(f)
    }

} 
Example 6
Source File: ThreadUtil.scala    From coursier   with Apache License 2.0 5 votes vote down vote up
package coursier.cache.internal

import java.util.concurrent.{ExecutorService, LinkedBlockingQueue, ScheduledExecutorService, ScheduledThreadPoolExecutor, ThreadFactory, ThreadPoolExecutor, TimeUnit}
import java.util.concurrent.atomic.AtomicInteger

object ThreadUtil {

  private val poolNumber = new AtomicInteger(1)

  def daemonThreadFactory(): ThreadFactory = {

    val poolNumber0 = poolNumber.getAndIncrement()

    val threadNumber = new AtomicInteger(1)

    new ThreadFactory {
      def newThread(r: Runnable) = {
        val threadNumber0 = threadNumber.getAndIncrement()
        val t = new Thread(r, s"coursier-pool-$poolNumber0-thread-$threadNumber0")
        t.setDaemon(true)
        t.setPriority(Thread.NORM_PRIORITY)
        t
      }
    }
  }

  def fixedThreadPool(size: Int): ExecutorService = {

    val factory = daemonThreadFactory()

    // 1 min keep alive, so that threads get stopped a bit after resolution / downloading is done
    val executor = new ThreadPoolExecutor(
      size, size,
      1L, TimeUnit.MINUTES,
      new LinkedBlockingQueue[Runnable],
      factory
    )
    executor.allowCoreThreadTimeOut(true)
    executor
  }

  def fixedScheduledThreadPool(size: Int): ScheduledExecutorService = {

    val factory = daemonThreadFactory()

    val executor = new ScheduledThreadPoolExecutor(size, factory)
    executor.setKeepAliveTime(1L, TimeUnit.MINUTES)
    executor.allowCoreThreadTimeOut(true)
    executor
  }

  def withFixedThreadPool[T](size: Int)(f: ExecutorService => T): T = {

    var pool: ExecutorService = null
    try {
      pool = fixedThreadPool(size)
      f(pool)
    } finally {
      if (pool != null)
        pool.shutdown()
    }
  }

} 
Example 7
Source File: PlatformTaskCompanion.scala    From coursier   with Apache License 2.0 5 votes vote down vote up
package coursier.util

import java.util.concurrent.{ExecutorService, ScheduledExecutorService}

import scala.concurrent.{Await, ExecutionContext, ExecutionContextExecutorService, Future}
import scala.concurrent.duration.{Duration, FiniteDuration}
import scala.concurrent.Promise
import scala.util.Success

abstract class PlatformTaskCompanion { self =>

  def schedule[A](pool: ExecutorService)(f: => A): Task[A] = {

    val ec0 = pool match {
      case eces: ExecutionContextExecutorService => eces
      case _ => ExecutionContext.fromExecutorService(pool) // FIXME Is this instantiation costly? Cache it?
    }

    Task(_ => Future(f)(ec0))
  }

  def completeAfter(pool: ScheduledExecutorService, duration: FiniteDuration): Task[Unit] =
    Task.delay {
      val p = Promise[Unit]()
      val runnable =
        new Runnable {
          def run(): Unit =
            p.complete(Success(()))
        }
      pool.schedule(runnable, duration.length, duration.unit)
      Task(_ => p.future)
    }.flatMap(identity)

  implicit val sync: Sync[Task] =
    new TaskSync {
      def schedule[A](pool: ExecutorService)(f: => A) = self.schedule(pool)(f)
    }

  implicit class PlatformTaskOps[T](private val task: Task[T]) {
    def unsafeRun()(implicit ec: ExecutionContext): T =
      Await.result(task.future(), Duration.Inf)
  }

} 
Example 8
Source File: CacheParams.scala    From coursier   with Apache License 2.0 5 votes vote down vote up
package coursier.cli.params

import java.io.File
import java.util.concurrent.ExecutorService

import coursier.cache._
import coursier.credentials.Credentials
import coursier.internal.InMemoryCache
import coursier.util.{Sync, Task}

import scala.concurrent.duration.Duration

final case class CacheParams(
  cacheLocation: java.io.File,
  cachePolicies: Seq[coursier.cache.CachePolicy],
  ttl: Option[scala.concurrent.duration.Duration],
  parallel: Int,
  checksum: Seq[Option[String]],
  retryCount: Int,
  cacheLocalArtifacts: Boolean,
  followHttpToHttpsRedirections: Boolean,
  credentials: Seq[coursier.credentials.Credentials] = Nil,
  useEnvCredentials: Boolean = true
) {

  def withCacheLocation(cacheLocation: java.io.File): CacheParams =
    copy(cacheLocation = cacheLocation)
  def withCachePolicies(cachePolicies: Seq[coursier.cache.CachePolicy]): CacheParams =
    copy(cachePolicies = cachePolicies)
  def withTtl(ttl: Option[scala.concurrent.duration.Duration]): CacheParams =
    copy(ttl = ttl)
  def withTtl(ttl: scala.concurrent.duration.Duration): CacheParams =
    copy(ttl = Option(ttl))
  def withParallel(parallel: Int): CacheParams =
    copy(parallel = parallel)
  def withChecksum(checksum: Seq[Option[String]]): CacheParams =
    copy(checksum = checksum)
  def withRetryCount(retryCount: Int): CacheParams =
    copy(retryCount = retryCount)
  def withCacheLocalArtifacts(cacheLocalArtifacts: Boolean): CacheParams =
    copy(cacheLocalArtifacts = cacheLocalArtifacts)
  def withFollowHttpToHttpsRedirections(followHttpToHttpsRedirections: Boolean): CacheParams =
    copy(followHttpToHttpsRedirections = followHttpToHttpsRedirections)
  def withCredentials(credentials: Seq[coursier.credentials.Credentials]): CacheParams =
    copy(credentials = credentials)
  def withUseEnvCredentials(useEnvCredentials: Boolean): CacheParams =
    copy(useEnvCredentials = useEnvCredentials)

  def cache(
    pool: ExecutorService,
    logger: CacheLogger
  ): FileCache[Task] = {

    var c = FileCache[Task]()
      .withLocation(cacheLocation)
      .withCachePolicies(cachePolicies)
      .withChecksums(checksum)
      .withLogger(logger)
      .withPool(pool)
      .withTtl(ttl)
      .withRetry(retryCount)
      .withFollowHttpToHttpsRedirections(followHttpToHttpsRedirections)
      .withLocalArtifactsShouldBeCached(cacheLocalArtifacts)

    if (!useEnvCredentials)
      c = c.withCredentials(Nil)

    c = c.addCredentials(credentials: _*)

    c
  }

  def cache(
    pool: ExecutorService,
    logger: CacheLogger,
    inMemoryCache: Boolean
  ): Cache[Task] = {

    val c = cache(pool, logger)

    if (inMemoryCache)
      InMemoryCache(c, Task.sync)
    else
      c
  }
} 
Example 9
Source File: FixedParallelSuite.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.test.helpers

import java.util.concurrent.{ExecutorService, Executors, ThreadFactory}

import FixedParallelSuite._

object FixedParallelSuite {
  lazy val DefaultExecutorService = Executors.newFixedThreadPool(
    ControlledParallelSuite.calculatePoolSize(),
    ControlledParallelSuite.threadFactory
  )
}


trait FixedParallelSuite extends ControlledParallelSuite {
  protected lazy val executorService = DefaultExecutorService

  override protected def newExecutorService(
    poolSize: Int,
    threadFactory: ThreadFactory
  ): ExecutorService = {
    executorService
  }
} 
Example 10
Source File: HikariDataSourceTransactor.scala    From mist   with Apache License 2.0 5 votes vote down vote up
package io.hydrosphere.mist.master.store

import java.util.concurrent.{ExecutorService, Executors, Future, TimeUnit}

import cats.arrow.FunctionK
import cats.effect._
import com.zaxxer.hikari.{HikariConfig, HikariDataSource}
import doobie.util.transactor.Transactor
import doobie.util.transactor.Transactor.Aux
import io.hydrosphere.mist.utils.Logger

import scala.concurrent.ExecutionContext



  def shutdown(): Unit = {
    if (!ds.isClosed) {
      logger.info("Closing Hikari data source")
      ds.close()
    } else {
      logger.warn("Hikari datasource had not been properly initialized before closing")
    }

    shutdownExecutorService(awaitShutdown, ce, "connections EC")
    shutdownExecutorService(awaitShutdown, te, "tx EC")
  }
} 
Example 11
Source File: CheckStepBench.scala    From cornichon   with Apache License 2.0 5 votes vote down vote up
package step

import java.util.concurrent.{ ExecutorService, Executors }

import com.github.agourlay.cornichon.core._
import monix.execution.Scheduler
import org.openjdk.jmh.annotations._
import com.github.agourlay.cornichon.steps.check.checkModel._
import com.github.agourlay.cornichon.steps.cats.EffectStep

import scala.concurrent.Await
import scala.concurrent.duration.Duration
import step.JsonStepBench._

@State(Scope.Benchmark)
@BenchmarkMode(Array(Mode.Throughput))
@Warmup(iterations = 10)
@Measurement(iterations = 10)
@Fork(value = 1, jvmArgsAppend = Array(
  "-XX:+FlightRecorder",
  "-XX:StartFlightRecording=filename=./CheckStepBench-profiling-data.jfr,name=profile,settings=profile",
  "-Xmx1G"))
class CheckStepBench {

  //sbt:benchmarks> jmh:run .*CheckStep.* -prof gc -foe true -gc true -rf csv

  @Param(Array("10", "20", "50", "100", "200"))
  var transitionNumber: String = ""

  var es: ExecutorService = _
  var scheduler: Scheduler = _

  @Setup(Level.Trial)
  final def beforeAll(): Unit = {
    es = Executors.newFixedThreadPool(1)
    scheduler = Scheduler(es)
  }

  @TearDown(Level.Trial)
  final def afterAll(): Unit = {
    es.shutdown()
  }
  

  @Benchmark
  def runModel() = {
    val checkStep = CheckModelStep(maxNumberOfRuns = 1, maxNumberOfTransitions = transitionNumber.toInt, CheckStepBench.modelRunner)
    val s = Scenario("scenario with checkStep", checkStep :: Nil)
    val f = ScenarioRunner.runScenario(session)(s)
    val res = Await.result(f.runToFuture(scheduler), Duration.Inf)
    assert(res.isSuccess)
  }

}

object CheckStepBench {
  def integerGen(rc: RandomContext): ValueGenerator[Int] = ValueGenerator(
    name = "integer",
    gen = () => rc.nextInt(10000))

  def dummyProperty1(name: String): PropertyN[Int, NoValue, NoValue, NoValue, NoValue, NoValue] =
    Property1(
      description = name,
      invariant = g => EffectStep.fromSyncE("add generated", _.session.addValue("generated", g().toString)))

  val starting = dummyProperty1("starting action")
  val otherAction = dummyProperty1("other action")
  val otherActionTwo = dummyProperty1("other action two")
  val transitions = Map(
    starting -> ((100, otherAction) :: Nil),
    otherAction -> ((100, otherActionTwo) :: Nil),
    otherActionTwo -> ((100, otherAction) :: Nil))
  val model = Model("model with empty transition for starting", starting, transitions)
  val modelRunner = ModelRunner.make(integerGen)(model)

} 
Example 12
Source File: RunScenarioBench.scala    From cornichon   with Apache License 2.0 5 votes vote down vote up
package scenario

import java.util.concurrent.{ ExecutorService, Executors }

import cats.instances.int._
import com.github.agourlay.cornichon.core.{ ScenarioRunner, Scenario, Session }
import com.github.agourlay.cornichon.steps.cats.EffectStep
import com.github.agourlay.cornichon.steps.regular.assertStep.{ AssertStep, Assertion, GenericEqualityAssertion }
import org.openjdk.jmh.annotations._
import scenario.RunScenarioBench._
import monix.execution.Scheduler

import scala.concurrent.Await
import scala.concurrent.duration._

@State(Scope.Benchmark)
@BenchmarkMode(Array(Mode.Throughput))
@Warmup(iterations = 10)
@Measurement(iterations = 10)
@Fork(value = 1, jvmArgsAppend = Array(
  "-XX:+FlightRecorder",
  "-XX:StartFlightRecording=filename=./RunScenarioBench-profiling-data.jfr,name=profile,settings=profile",
  "-Xmx1G"))
class RunScenarioBench {

  //sbt:benchmarks> jmh:run .*RunScenario.* -prof gc -foe true -gc true -rf csv

  @Param(Array("10", "20", "50", "100", "200"))
  var stepsNumber: String = ""
  var es: ExecutorService = _
  var scheduler: Scheduler = _

  @Setup(Level.Trial)
  final def beforeAll(): Unit = {
    es = Executors.newFixedThreadPool(1)
    scheduler = Scheduler(es)
  }

  @TearDown(Level.Trial)
  final def afterAll(): Unit = {
    es.shutdown()
  }

  

  @Benchmark
  def lotsOfSteps() = {
    val half = stepsNumber.toInt / 2
    val assertSteps = List.fill(half)(assertStep)
    val effectSteps = List.fill(half)(effectStep)
    val scenario = Scenario("test scenario", setupSession +: (assertSteps ++ effectSteps))
    val f = ScenarioRunner.runScenario(Session.newEmpty)(scenario)
    val res = Await.result(f.runToFuture(scheduler), Duration.Inf)
    assert(res.isSuccess)
  }
}

object RunScenarioBench {
  val setupSession = EffectStep.fromSyncE("setup session", _.session.addValues("v1" -> "2", "v2" -> "1"))
  val assertStep = AssertStep(
    "addition step",
    sc => Assertion.either {
      for {
        two <- sc.session.get("v1").map(_.toInt)
        one <- sc.session.get("v2").map(_.toInt)
      } yield GenericEqualityAssertion(two + one, 3)
    })
  val effectStep = EffectStep.fromSync("identity", _.session)
} 
Example 13
Source File: RequestEffectBench.scala    From cornichon   with Apache License 2.0 5 votes vote down vote up
package httpService

import java.util.concurrent.{ ExecutorService, Executors }

import cats.instances.string._
import com.github.agourlay.cornichon.core.{ Config, ScenarioContext }
import com.github.agourlay.cornichon.http.{ HttpMethods, HttpRequest, HttpService }
import org.openjdk.jmh.annotations._
import RequestEffectBench._
import com.github.agourlay.cornichon.http.client.NoOpHttpClient
import monix.execution.Scheduler

import scala.concurrent.Await
import scala.concurrent.duration._

@State(Scope.Benchmark)
@BenchmarkMode(Array(Mode.Throughput))
@Warmup(iterations = 10)
@Measurement(iterations = 10)
@Fork(value = 1, jvmArgsAppend = Array(
  "-XX:+FlightRecorder",
  "-XX:StartFlightRecording=filename=./RequestEffectBench-profiling-data.jfr,name=profile,settings=profile",
  "-Xmx1G"))
class RequestEffectBench {

  //sbt:benchmarks> jmh:run .*RequestEffect.*

  var es: ExecutorService = _
  val client = new NoOpHttpClient
  var httpService: HttpService = _

  @Setup(Level.Trial)
  final def beforeAll(): Unit = {
    es = Executors.newFixedThreadPool(1)
    val scheduler = Scheduler(es)
    httpService = new HttpService("", 2000.millis, client, Config())(scheduler)
  }

  @TearDown(Level.Trial)
  final def afterAll(): Unit = {
    es.shutdown()
  }
  

  @Benchmark
  def singleRequest() = {
    val f = httpService.requestEffect(request)
    val res = Await.result(f(scenarioContext), Duration.Inf)
    assert(res.isRight)
  }
}

object RequestEffectBench {
  val scenarioContext = ScenarioContext.empty
  val request = HttpRequest[String](
    method = HttpMethods.GET,
    url = "https://myUrl/my/segment",
    body = Some(""" { "k1":"v1", "k2":"v2","k3":"v3","k4":"v4" } """),
    params = ("q1", "v1") :: ("q2", "v2") :: ("q3", "v3") :: Nil,
    headers = ("h1", "v1") :: ("h2", "v2") :: ("h3", "v3") :: Nil)
} 
Example 14
Source File: Module.scala    From elastiknn   with Apache License 2.0 5 votes vote down vote up
import java.util.concurrent.{ExecutorService, Executors, ThreadFactory}

import com.google.common.util.concurrent.ThreadFactoryBuilder
import com.google.inject.{AbstractModule, TypeLiteral}
import com.klibisz.elastiknn.client.{ElastiknnClient, ElastiknnFutureClient}
import javax.inject.Provider
import play.api.{Configuration, Environment}

import scala.concurrent.ExecutionContext

class Module(environment: Environment, configuration: Configuration) extends AbstractModule {

  val eknnProvider = new Provider[ElastiknnFutureClient] {
    override def get(): ElastiknnFutureClient = {
      val tfac: ThreadFactory = new ThreadFactoryBuilder().setDaemon(true).setNameFormat("elastiknn-%d").build()
      val exec: ExecutorService = Executors.newFixedThreadPool(Runtime.getRuntime.availableProcessors(), tfac)
      implicit val ec: ExecutionContext = ExecutionContext.fromExecutor(exec)
      val host = configuration.underlying.getString("elastiknn.elasticsearch.host")
      val port = configuration.underlying.getInt("elastiknn.elasticsearch.port")
      ElastiknnClient.futureClient(host, port)
    }
  }

  override def configure(): Unit = {
    // Weird that you have to use this constructor, but it works.
    bind(new TypeLiteral[ElastiknnFutureClient]() {}).toProvider(eknnProvider)
  }
} 
Example 15
Source File: OdinsonIndexSearcher.scala    From odinson   with Apache License 2.0 5 votes vote down vote up
package ai.lum.odinson.lucene.search

import java.util.Collection
import java.util.concurrent.ExecutorService
import scala.collection.JavaConverters._
import scala.concurrent.ExecutionContext
import org.apache.lucene.index._
import org.apache.lucene.search._
import ai.lum.odinson.lucene._
import ai.lum.odinson.utils.ExecutionContextExecutorServiceBridge

class OdinsonIndexSearcher(
    context: IndexReaderContext,
    executor: ExecutorService,
    computeTotalHits: Boolean,
) extends IndexSearcher(context, executor) {

  def this(r: IndexReader, e: ExecutorService, computeTotalHits: Boolean) = {
    this(r.getContext(), e, computeTotalHits)
  }

  def this(r: IndexReader, e: ExecutionContext,  computeTotalHits: Boolean) = {
    this(r.getContext(), ExecutionContextExecutorServiceBridge(e), computeTotalHits)
  }

  def this(r: IndexReader, computeTotalHits: Boolean) = {
    this(r.getContext(), null, computeTotalHits)
  }

  def odinSearch(query: OdinsonQuery): OdinResults = {
    val n = readerContext.reader().maxDoc()
    odinSearch(query, n)
  }

  def odinSearch(query: OdinsonQuery, n: Int): OdinResults = {
    odinSearch(null, query, n)
  }

  def odinSearch(after: OdinsonScoreDoc, query: OdinsonQuery, numHits: Int): OdinResults = {
    odinSearch(after, query, numHits, false)
  }

  def odinSearch(after: OdinsonScoreDoc, query: OdinsonQuery, numHits: Int, disableMatchSelector: Boolean): OdinResults = {
    val limit = math.max(1, readerContext.reader().maxDoc())
    require(
      after == null || after.doc < limit,
      s"after.doc exceeds the number of documents in the reader: after.doc=${after.doc} limit=${limit}"
    )
    val cappedNumHits = math.min(numHits, limit)
    val manager = new CollectorManager[OdinsonCollector, OdinResults] {
      def newCollector() = new OdinsonCollector(cappedNumHits, after, computeTotalHits, disableMatchSelector)
      def reduce(collectors: Collection[OdinsonCollector]): OdinResults = {
        val results = collectors.iterator.asScala.map(_.odinResults).toArray
        OdinResults.merge(0, cappedNumHits, results, true)
      }
    }
    search(query, manager)
  }

} 
Example 16
Source File: RichRawImpl.scala    From c4proto   with Apache License 2.0 5 votes vote down vote up
package ee.cone.c4actor

import java.util.concurrent.ExecutorService

import com.typesafe.scalalogging.LazyLogging
import ee.cone.c4assemble._
import ee.cone.c4assemble.Types._
import ee.cone.c4proto.ToByteString

import scala.collection.immutable.Map
import scala.concurrent.ExecutionContext
import java.lang.Math.toIntExact

import ee.cone.c4actor.QProtocol._
import ee.cone.c4actor.Types._
import ee.cone.c4di.c4

@c4("RichDataCompApp") final class GetOffsetImpl(
  actorName: ActorName,
  getS_Offset: GetByPK[S_Offset],
) extends GetOffset {
  def of: SharedContext with AssembledContext => NextOffset =
    ctx => getS_Offset.ofA(ctx).get(actorName.value).fold(empty)(_.txId)
  def empty: NextOffset = "0" * OffsetHexSize()
}

object EmptyInjected extends Injected

@c4("RichDataCompApp") final class RichRawWorldReducerImpl(
  injected: List[Injected],
  toUpdate: ToUpdate,
  actorName: ActorName,
  execution: Execution,
  getOffset: GetOffsetImpl,
  readModelAdd: ReadModelAdd,
  getAssembleOptions: GetAssembleOptions,
) extends RichRawWorldReducer with LazyLogging {
  def reduce(contextOpt: Option[SharedContext with AssembledContext], addEvents: List[RawEvent]): RichContext = {
    val events = if(contextOpt.nonEmpty) addEvents else {
      val offset = addEvents.lastOption.fold(getOffset.empty)(_.srcId)
      val firstborn = LEvent.update(S_Firstborn(actorName.value,offset)).toList.map(toUpdate.toUpdate)
      val (bytes, headers) = toUpdate.toBytes(firstborn)
      SimpleRawEvent(offset, ToByteString(bytes), headers) :: addEvents
    }
    if(events.isEmpty) contextOpt.get match {
      case context: RichRawWorldImpl => context
      case context => create(context.injected, context.assembled, context.executionContext)
    } else {
      val context = contextOpt.getOrElse(
        create(Single.option(injected).getOrElse(EmptyInjected), emptyReadModel, EmptyOuterExecutionContext)
      )
      val nAssembled = readModelAdd.add(events, context)
      create(context.injected, nAssembled, context.executionContext)
    }
  }

  def create(injected: Injected, assembled: ReadModel, executionContext: OuterExecutionContext): RichRawWorldImpl = {
    val preWorld = new RichRawWorldImpl(injected, assembled, executionContext, "")
    val threadCount = getAssembleOptions.get(assembled).threadCount
    val offset = getOffset.of(preWorld)
    new RichRawWorldImpl(injected, assembled, needExecutionContext(threadCount)(executionContext), offset)
  }
  def newExecutionContext(confThreadCount: Long): OuterExecutionContext = {
    val fixedThreadCount = if(confThreadCount>0) toIntExact(confThreadCount) else Runtime.getRuntime.availableProcessors
    val pool = execution.newExecutorService("ass-",Option(fixedThreadCount))
    logger.info(s"ForkJoinPool create $fixedThreadCount")
    new OuterExecutionContextImpl(confThreadCount,fixedThreadCount,ExecutionContext.fromExecutor(pool),pool)
  }
  def needExecutionContext(confThreadCount: Long): OuterExecutionContext=>OuterExecutionContext = {
    case ec: OuterExecutionContextImpl if ec.confThreadCount == confThreadCount =>
      ec
    case ec: OuterExecutionContextImpl =>
      ec.service.shutdown()
      logger.info("ForkJoinPool shutdown")
      newExecutionContext(confThreadCount)
    case _ =>
      newExecutionContext(confThreadCount)
  }
}

class OuterExecutionContextImpl(
  val confThreadCount: Long,
  val threadCount: Long,
  val value: ExecutionContext,
  val service: ExecutorService
) extends OuterExecutionContext
object EmptyOuterExecutionContext extends OuterExecutionContext {
  def value: ExecutionContext = throw new Exception("no ExecutionContext")
  def threadCount: Long =  throw new Exception("no ExecutionContext")
}

class RichRawWorldImpl(
  val injected: Injected,
  val assembled: ReadModel,
  val executionContext: OuterExecutionContext,
  val offset: NextOffset
) extends RichContext
 
Example 17
Source File: PowerBIAuthenticationWithAuthorizationCode.scala    From spark-powerbi-connector   with Apache License 2.0 5 votes vote down vote up
package com.microsoft.azure.powerbi.authentication

import java.net.URI
import java.util.concurrent.{Executors, ExecutorService, Future}
import javax.naming.ServiceUnavailableException

import com.microsoft.aad.adal4j.{AuthenticationContext, AuthenticationResult}

case class PowerBIAuthenticationWithAuthorizationCode(powerBIAuthorityURL: String,
                                                      powerBIResourceURL: String,
                                                      powerBIClientID: String,
                                                      activeDirectoryAuthorizationCode: String,
                                                      activeDirectoryRedirectUri: URI)
  extends PowerBIAuthentication{

  def getAccessToken: String =
    if (this.accessToken != null && this.accessToken.nonEmpty) this.accessToken
    else refreshAccessToken

  def refreshAccessToken: String = retrieveToken.getAccessToken

  private def retrieveToken: AuthenticationResult = {

    var authenticationResult: AuthenticationResult = null
    var executorService: ExecutorService = null

    try {

      executorService = Executors.newFixedThreadPool(1)

      val authenticationContext: AuthenticationContext =
        new AuthenticationContext(powerBIAuthorityURL, true, executorService)

      val authenticationResultFuture: Future[AuthenticationResult] =
        authenticationContext.acquireTokenByAuthorizationCode(activeDirectoryAuthorizationCode,
          powerBIResourceURL, powerBIClientID, activeDirectoryRedirectUri, null)

      authenticationResult = authenticationResultFuture.get()
    }
    finally
    {
      executorService.shutdown()
    }

    if (authenticationResult == null) {
      throw new ServiceUnavailableException("Authentication result empty")
    }

    this.accessToken = authenticationResult.getAccessToken

    authenticationResult
  }

  private var accessToken: String = _
} 
Example 18
Source File: SwaveIdentityProcessorVerification.scala    From swave   with Mozilla Public License 2.0 5 votes vote down vote up
package swave.core.tck

import java.util.concurrent.{ExecutorService, Executors, TimeUnit}
import org.reactivestreams.Publisher
import org.reactivestreams.tck.{IdentityProcessorVerification, TestEnvironment}
import org.scalatest.testng.TestNGSuiteLike
import org.testng.SkipException
import org.testng.annotations.AfterClass
import swave.core._

abstract class SwaveIdentityProcessorVerification[T](val testEnv: TestEnvironment, publisherShutdownTimeout: Long)
    extends IdentityProcessorVerification[T](testEnv, publisherShutdownTimeout) with TestNGSuiteLike
    with StreamEnvShutdown {

  def this(printlnDebug: Boolean) =
    this(
      new TestEnvironment(Timeouts.defaultTimeout.toMillis, printlnDebug),
      Timeouts.publisherShutdownTimeout.toMillis)

  def this() = this(false)

  override def createFailedPublisher(): Publisher[T] =
    Spout.failing[T](new Exception("Nope")).drainTo(Drain.toPublisher()).get

  // Publishers created by swave don't support fanout by default
  override def maxSupportedSubscribers: Long = 1L

  override def required_spec313_cancelMustMakeThePublisherEventuallyDropAllReferencesToTheSubscriber(): Unit =
    throw new SkipException("Not relevant for publisher w/o fanout support")

  override lazy val publisherExecutorService: ExecutorService =
    Executors.newFixedThreadPool(3)

  @AfterClass
  def shutdownPublisherExecutorService(): Unit = {
    publisherExecutorService.shutdown()
    publisherExecutorService.awaitTermination(3, TimeUnit.SECONDS)
  }
} 
Example 19
Source File: PythonFunction.scala    From polynote   with Apache License 2.0 5 votes vote down vote up
package polynote.runtime.python

import java.util.concurrent.{Callable, ExecutorService}

import jep.python.PyCallable
import shapeless.Witness

import scala.collection.JavaConverters._
import scala.language.dynamics


class PythonFunction(callable: PyCallable, runner: PythonObject.Runner) extends TypedPythonObject[PythonFunction.function](callable, runner) with Dynamic {

  private def unwrapArg(arg: Any): Any = arg match {
    case pyObj: PythonObject => pyObj.unwrap
    case obj => obj
  }

  override def applyDynamic(method: String)(args: Any*): PythonObject = {
    if (method == "apply" || method == "call" || method == "__call__")
      callPosArgs(callable, args.asInstanceOf[Seq[AnyRef]])
    else
      super.applyDynamic(method)(args: _*)

  }

  override def applyDynamicNamed(method: String)(args: (String, Any)*): PythonObject = {
    if (method == "apply" || method == "call" || method == "__call__")
      callKwArgs(callable, args)
    else
      super.applyDynamicNamed(method)(args: _*)
  }

}

object PythonFunction {
  type function = Witness.`"function"`.T
} 
Example 20
Source File: Netty.scala    From scalaz-netty   with Apache License 2.0 5 votes vote down vote up
package scalaz
package netty

import java.net.InetSocketAddress
import java.util.concurrent.ExecutorService

import _root_.io.netty.channel._
import scodec.bits.ByteVector

import scalaz.concurrent._
import scalaz.stream._

object Netty {

  def server(bind: InetSocketAddress, config: ServerConfig = ServerConfig.Default)(implicit pool: ExecutorService = Strategy.DefaultExecutorService, S: Strategy): Process[Task, Process[Task, Exchange[ByteVector, ByteVector]]] = {
    Process.bracket(Server(bind, config))(s => Process.eval(s.shutdown).drain) { server: Server =>
      server.listen
    }
  }

  def connect(to: InetSocketAddress, config: ClientConfig = ClientConfig.Default)(implicit pool: ExecutorService = Strategy.DefaultExecutorService, S: Strategy): Process[Task, Exchange[ByteVector, ByteVector]] = {
    Process.bracket(Client(to, config))(_.shutdown) { client: Client =>
      Process(Exchange(client.read, client.write))
    }
  }

  private[netty] def toTask(f: ChannelFuture)(implicit pool: ExecutorService): Task[Unit] = fork {
    Task async { (cb: (Throwable \/ Unit) => Unit) =>
      f.addListener(new ChannelFutureListener {
        def operationComplete(f: ChannelFuture): Unit = {
          if (f.isSuccess)
            cb(\/-(()))
          else
            cb(-\/(f.cause))
        }
      })
    }
  }

  private def fork[A](t: Task[A])(implicit pool: ExecutorService = Strategy.DefaultExecutorService): Task[A] = {
    Task async { cb =>
      t unsafePerformAsync { either =>
        pool.submit(new Runnable {
          def run(): Unit = cb(either)
        })

        ()
      }
    }
  }
} 
Example 21
Source File: GraphiteMockServer.scala    From kafka-offset-monitor-graphite   with Apache License 2.0 5 votes vote down vote up
package pl.allegro.tech.kafka.offset.monitor.graphite

import java.io.InputStream
import java.lang
import java.net.ServerSocket
import java.util.concurrent.{Callable, ExecutorService, Executors}

import com.jayway.awaitility.Awaitility._
import com.jayway.awaitility.Duration

class GraphiteMockServer(port: Int) {

  var serverSocket: ServerSocket = null
  val executor: ExecutorService = Executors.newFixedThreadPool(10)
  @volatile var listen: Boolean = false

  var expectedMetrics: scala.collection.mutable.Map[String, Double] = scala.collection.mutable.Map()
  var receivedMetrics: scala.collection.mutable.Map[String, Double] = scala.collection.mutable.Map()
  
  def start() {
    serverSocket = new ServerSocket(port)
    listen = true
    handleConnections()
  }

  private def handleConnections() {
    executor.execute(new Runnable {
      override def run() {
        while(listen) {
            readData(serverSocket.accept().getInputStream())
        }
      }
    })
  }

  private def readData(stream: InputStream) {
    executor.execute(new Runnable {
      override def run() {
        scala.io.Source.fromInputStream(stream).getLines().foreach((line) => handleMetric(line))
      }
    })
  }
  
  private def handleMetric(metricLine: String) {
    val metric = metricLine.split(" ")(0)
    val value = metricLine.split(" ")(1)

    if(expectedMetrics.contains(metric)) {
      receivedMetrics += (metric -> value.toDouble)
    }
  }
  
  def stop() {
    listen = false
    serverSocket.close()
  }
  
  def reset() {
    expectedMetrics.clear()
    receivedMetrics.clear()
  }

  def expectMetric(metricNamePattern: String, value: Double) {
    expectedMetrics += (metricNamePattern -> value)
  }

  def waitUntilReceived() {
    await.atMost(Duration.FIVE_SECONDS).until(new Callable[lang.Boolean] {
      override def call(): lang.Boolean = {
        expectedMetrics.forall { case (k, v) =>
          receivedMetrics.get(k).exists( (rv) => v == rv )
        }
      }
    })
  }
} 
Example 22
Source File: ConcurrentUtil.scala    From sona   with Apache License 2.0 5 votes vote down vote up
package com.tencent.angel.sona.tree.util

import java.util.concurrent.{Callable, ExecutorService, Executors, Future}

object ConcurrentUtil {
  private[tree] var numThread: Int = 1
  private[tree] var threadPool: ExecutorService = _
  private[tree] val DEFAULT_BATCH_SIZE = 1000000

  private[tree] def reset(parallelism: Int): Unit = {
    ConcurrentUtil.getClass.synchronized {
      this.numThread = parallelism
      this.threadPool = Executors.newFixedThreadPool(parallelism)
    }
  }

  private[tree] def rangeParallel[A](f: (Int, Int) => A, start: Int, end: Int,
                                    batchSize: Int = DEFAULT_BATCH_SIZE): Array[Future[A]] = {
    val futures = Array.ofDim[Future[A]](MathUtil.idivCeil(end - start, batchSize))
    var cur = start
    var threadId = 0
    while (cur < end) {
      val i = cur
      val j = (cur + batchSize) min end
      futures(threadId) = threadPool.submit(new Callable[A] {
        override def call(): A = f(i, j)
      })
      cur = j
      threadId += 1
    }
    futures
  }

  private[tree] def shutdown(): Unit = ConcurrentUtil.getClass.synchronized {
    if (threadPool != null)
      threadPool.shutdown()
  }

} 
Example 23
Source File: Linebacker.scala    From linebacker   with MIT License 5 votes vote down vote up
package io.chrisdavenport.linebacker

import cats.effect._
import cats.effect.concurrent.Semaphore
import cats.implicits._
import java.util.concurrent.ExecutorService
import scala.concurrent.ExecutionContext

trait Linebacker[F[_]] {

  def blockingContext: ExecutionContext

  
  final def blockCS[A](fa: F[A])(implicit cs: ContextShift[F]): F[A] =
    blockContextShift(fa)
}

object Linebacker {
  def apply[F[_]](implicit ev: Linebacker[F]): Linebacker[F] = ev

  def fromExecutorService[F[_]](es: ExecutorService): Linebacker[F] = new Linebacker[F] {
    def blockingContext = ExecutionContext.fromExecutorService(es)
  }
  def fromExecutionContext[F[_]](ec: ExecutionContext): Linebacker[F] = new Linebacker[F] {
    def blockingContext = ec
  }

  def bounded[F[_]: Concurrent](lb: Linebacker[F], bound: Long): F[Linebacker[F]] = 
    Semaphore[F](bound).map(new BoundedLinebacker(lb, _))

  private class BoundedLinebacker[F[_]: Concurrent](lb: Linebacker[F], s: Semaphore[F]) extends Linebacker[F]{
    def blockingContext: ExecutionContext = lb.blockingContext
    override def blockContextShift[A](fa: F[A])(implicit cs: ContextShift[F]): F[A] =
      s.withPermit(lb.blockContextShift(fa))
  }
} 
Example 24
Source File: DualContext.scala    From linebacker   with MIT License 5 votes vote down vote up
package io.chrisdavenport.linebacker

import cats.effect._
import cats.effect.concurrent.Semaphore
import cats.implicits._
import java.util.concurrent.ExecutorService
import scala.concurrent.ExecutionContext

trait DualContext[F[_]] extends Linebacker[F] {
  def blockingContext: ExecutionContext
  def contextShift: ContextShift[F]

  def block[A](fa: F[A]): F[A] =
    contextShift.evalOn(blockingContext)(fa)
}

object DualContext {
  def apply[F[_]](implicit ev: DualContext[F]) = ev

  def fromContexts[F[_]](
      cs: ContextShift[F],
      blocking: ExecutionContext): DualContext[F] =
    new DualContext[F] {
      override def blockingContext = blocking
      override def contextShift = cs
    }

  def fromExecutorService[F[_]](
      default: ContextShift[F],
      blocking: ExecutorService): DualContext[F] =
    fromContexts(default, ExecutionContext.fromExecutor(blocking))

  def bounded[F[_]: Concurrent](lb: DualContext[F], bound: Long): F[DualContext[F]] = 
    Semaphore[F](bound).map(new BoundedDualContext(lb, _))

  private class BoundedDualContext[F[_]: Concurrent](dc: DualContext[F], s: Semaphore[F]) extends DualContext[F]{
    def blockingContext: ExecutionContext = dc.blockingContext
    def contextShift: ContextShift[F] = dc.contextShift
    override def blockContextShift[A](fa: F[A])(implicit cs: ContextShift[F]): F[A] =
      s.withPermit(dc.blockContextShift(fa))
    override def block[A](fa: F[A]): F[A] =
      s.withPermit(dc.block(fa))
  }
} 
Example 25
Source File: standard_thread.scala    From libisabelle   with Apache License 2.0 5 votes vote down vote up
package isabelle


import java.lang.Thread
import java.util.concurrent.{ExecutorService, ThreadPoolExecutor, TimeUnit, LinkedBlockingQueue, ThreadFactory}


object Standard_Thread
{
  

  final class Delay private[Standard_Thread](
    first: Boolean, delay: => Time, log: Logger, event: => Unit)
  {
    private var running: Option[Event_Timer.Request] = None

    private def run: Unit =
    {
      val do_run = synchronized {
        if (running.isDefined) { running = None; true } else false
      }
      if (do_run) {
        try { event }
        catch { case exn: Throwable if !Exn.is_interrupt(exn) => log(Exn.message(exn)); throw exn }
      }
    }

    def invoke(): Unit = synchronized
    {
      val new_run =
        running match {
          case Some(request) => if (first) false else { request.cancel; true }
          case None => true
        }
      if (new_run)
        running = Some(Event_Timer.request(Time.now() + delay)(run))
    }

    def revoke(): Unit = synchronized
    {
      running match {
        case Some(request) => request.cancel; running = None
        case None =>
      }
    }

    def postpone(alt_delay: Time): Unit = synchronized
    {
      running match {
        case Some(request) =>
          val alt_time = Time.now() + alt_delay
          if (request.time < alt_time && request.cancel) {
            running = Some(Event_Timer.request(alt_time)(run))
          }
        case None =>
      }
    }
  }

  // delayed event after first invocation
  def delay_first(delay: => Time, log: Logger = No_Logger)(event: => Unit): Delay =
    new Delay(true, delay, log, event)

  // delayed event after last invocation
  def delay_last(delay: => Time, log: Logger = No_Logger)(event: => Unit): Delay =
    new Delay(false, delay, log, event)
} 
Example 26
Source File: standard_thread.scala    From libisabelle   with Apache License 2.0 5 votes vote down vote up
package isabelle


import java.lang.Thread
import java.util.concurrent.{ExecutorService, ThreadPoolExecutor, TimeUnit, LinkedBlockingQueue, ThreadFactory}


object Standard_Thread
{
  

  final class Delay private[Standard_Thread](
    first: Boolean, delay: => Time, log: Logger, event: => Unit)
  {
    private var running: Option[Event_Timer.Request] = None

    private def run: Unit =
    {
      val do_run = synchronized {
        if (running.isDefined) { running = None; true } else false
      }
      if (do_run) {
        try { event }
        catch { case exn: Throwable if !Exn.is_interrupt(exn) => log(Exn.message(exn)); throw exn }
      }
    }

    def invoke(): Unit = synchronized
    {
      val new_run =
        running match {
          case Some(request) => if (first) false else { request.cancel; true }
          case None => true
        }
      if (new_run)
        running = Some(Event_Timer.request(Time.now() + delay)(run))
    }

    def revoke(): Unit = synchronized
    {
      running match {
        case Some(request) => request.cancel; running = None
        case None =>
      }
    }

    def postpone(alt_delay: Time): Unit = synchronized
    {
      running match {
        case Some(request) =>
          val alt_time = Time.now() + alt_delay
          if (request.time < alt_time && request.cancel) {
            running = Some(Event_Timer.request(alt_time)(run))
          }
        case None =>
      }
    }
  }

  // delayed event after first invocation
  def delay_first(delay: => Time, log: Logger = No_Logger)(event: => Unit): Delay =
    new Delay(true, delay, log, event)

  // delayed event after last invocation
  def delay_last(delay: => Time, log: Logger = No_Logger)(event: => Unit): Delay =
    new Delay(false, delay, log, event)
} 
Example 27
Source File: standard_thread.scala    From libisabelle   with Apache License 2.0 5 votes vote down vote up
package isabelle


import java.lang.Thread
import java.util.concurrent.{ExecutorService, ThreadPoolExecutor, TimeUnit, LinkedBlockingQueue, ThreadFactory}


object Standard_Thread
{
  

  final class Delay private[Standard_Thread](
    first: Boolean, delay: => Time, log: Logger, event: => Unit)
  {
    private var running: Option[Event_Timer.Request] = None

    private def run: Unit =
    {
      val do_run = synchronized {
        if (running.isDefined) { running = None; true } else false
      }
      if (do_run) {
        try { event }
        catch { case exn: Throwable if !Exn.is_interrupt(exn) => log(Exn.message(exn)); throw exn }
      }
    }

    def invoke(): Unit = synchronized
    {
      val new_run =
        running match {
          case Some(request) => if (first) false else { request.cancel; true }
          case None => true
        }
      if (new_run)
        running = Some(Event_Timer.request(Time.now() + delay)(run))
    }

    def revoke(): Unit = synchronized
    {
      running match {
        case Some(request) => request.cancel; running = None
        case None =>
      }
    }

    def postpone(alt_delay: Time): Unit = synchronized
    {
      running match {
        case Some(request) =>
          val alt_time = Time.now() + alt_delay
          if (request.time < alt_time && request.cancel) {
            running = Some(Event_Timer.request(alt_time)(run))
          }
        case None =>
      }
    }
  }

  // delayed event after first invocation
  def delay_first(delay: => Time, log: Logger = No_Logger)(event: => Unit): Delay =
    new Delay(true, delay, log, event)

  // delayed event after last invocation
  def delay_last(delay: => Time, log: Logger = No_Logger)(event: => Unit): Delay =
    new Delay(false, delay, log, event)
} 
Example 28
package packt.ch05

import java.util
import java.util.Properties
import java.util.concurrent.ExecutorService
import java.util.concurrent.Executors

import kafka.consumer.ConsumerConfig
import MultiThreadConsumer._

import scala.collection.JavaConversions._

object MultiThreadConsumer {

  private def createConsumerConfig(zookeeper: String, groupId: String): ConsumerConfig = {
    val props = new Properties()
    props.put("zookeeper.connect", zookeeper)
    props.put("group.id", groupId)
    props.put("zookeeper.session.timeout.ms", "500")
    props.put("zookeeper.sync.time.ms", "250")
    props.put("auto.commit.interval.ms", "1000")
    new ConsumerConfig(props)
  }

  def main(args: Array[String]) {
    val zooKeeper = args(0)
    val groupId = args(1)
    val topic = args(2)
    val threadCount = java.lang.Integer.parseInt(args(3))
    val multiThreadHLConsumer = new MultiThreadConsumer(zooKeeper, groupId, topic)
    multiThreadHLConsumer.testMultiThreadConsumer(threadCount)
    try {
      Thread.sleep(10000)
    } catch {
      case ie: InterruptedException =>
    }
    multiThreadHLConsumer.shutdown()
  }
}

class MultiThreadConsumer(zookeeper: String, groupId: String, topic: String) {

  private var executor: ExecutorService = _

  private val consumer = kafka.consumer.Consumer.createJavaConsumerConnector(createConsumerConfig(zookeeper,
    groupId))

  def shutdown() {
    if (consumer != null) consumer.shutdown()
    if (executor != null) executor.shutdown()
  }

  def testMultiThreadConsumer(threadCount: Int) {
    val topicMap = new util.HashMap[String, Integer]()

    // Define thread count for each topic
    topicMap.put(topic, threadCount)

    // Here we have used a single topic but we can also add
    // multiple topics to topicCount MAP
    val consumerStreamsMap = consumer.createMessageStreams(topicMap)
    val streamList = consumerStreamsMap.get(topic)

    // Launching the thread pool
    executor = Executors.newFixedThreadPool(threadCount)

    // Creating an object messages consumption
    var count = 0
    for (stream <- streamList) {
      val threadNumber = count
      executor.submit(new Runnable() {

        def run() {
          val consumerIte = stream.iterator()
          while (consumerIte.hasNext)
            println("Thread Number " + threadNumber + ": " + new String(consumerIte.next().message()))
          println("Shutting down Thread Number: " + threadNumber)
        }
      })
      count += 1
    }
    if (consumer != null) consumer.shutdown()
    if (executor != null) executor.shutdown()
  }
} 
Example 29
Source File: TaskFutureBenchmarks.scala    From Scala-High-Performance-Programming   with MIT License 5 votes vote down vote up
package highperfscala.concurrency.task

import java.util.concurrent.{ExecutorService, Executors, TimeUnit}

import org.openjdk.jmh.annotations.Mode.Throughput
import org.openjdk.jmh.annotations._

import scala.concurrent.{ExecutionContext, Future, Await}
import scala.concurrent.duration.Duration
import scalaz.concurrent.Task

@BenchmarkMode(Array(Throughput))
@OutputTimeUnit(TimeUnit.SECONDS)
@Warmup(iterations = 3, time = 5, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 30, time = 10, timeUnit = TimeUnit.SECONDS)
@Fork(value = 1, warmups = 1, jvmArgs = Array("-Xms1G", "-Xmx1G"))
class TaskFutureBenchmarks {

  import TaskFutureBenchmarks._

  @Benchmark
  def mapWithFuture(state: TaskFutureState): Int = {
    implicit val ec = state.context
    val init = Future(0)
    val res = (1 until state.operations).foldLeft(init)((f, _) => f.map(_ + 1))
    Await.result(res, Duration("5 minutes"))
  }

  @Benchmark
  def mapWithTask(state: TaskFutureState): Int = {
    val init = Task(0)(state.es)
    val res = (1 until state.operations).foldLeft(init)((t, _) => t.map(_ + 1))
    res.unsafePerformSync
  }

  @Benchmark
  def flatMapWithFuture(state: TaskFutureState): Int = {
    implicit val ec = state.context
    val init = Future(0)
    val res = (1 until state.operations).foldLeft(init)((f, _) =>
      f.flatMap(i => Future(i + 1)))
    Await.result(res, Duration("5 minutes"))
  }

  @Benchmark
  def flatMapWithTask(state: TaskFutureState): Int = {
    val init = Task(0)(state.es)
    val res = (1 until state.operations).foldLeft(init)((t, _) =>
      t.flatMap(i => Task(i + 1)(state.es)))
    res.unsafePerformSync
  }

}

object TaskFutureBenchmarks {

  @State(Scope.Benchmark)
  class TaskFutureState {

    @Param(Array("5", "10", "100"))
    var operations: Int = 0

    var es: ExecutorService = null
    var context: ExecutionContext = null

    @Setup(Level.Trial)
    def setup(): Unit = {
      es = Executors.newFixedThreadPool(20)
      context = ExecutionContext.fromExecutor(es)
    }

    @TearDown(Level.Trial)
    def tearDown(): Unit = {
      es.shutdownNow()
    }
  }

} 
Example 30
package highperfscala.concurrency.blocking

import java.util.concurrent.{ExecutorService, Executors, TimeUnit}

import highperfscala.concurrency.blocking.BlockingExample.{ClientId, Order, Ticker}
import org.openjdk.jmh.annotations.Mode.Throughput
import org.openjdk.jmh.annotations._

import scala.concurrent.duration.Duration
import scala.concurrent.{Await, ExecutionContext, Future}

@BenchmarkMode(Array(Throughput))
@OutputTimeUnit(TimeUnit.SECONDS)
@Warmup(iterations = 3, time = 5, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 30, time = 10, timeUnit = TimeUnit.SECONDS)
@Fork(value = 1, warmups = 1, jvmArgs = Array("-Xms1G", "-Xmx1G"))
class BlockingFutureBenchmarks {

  import BlockingFutureBenchmarks._

  @Benchmark
  def withDefaultContext(state: BlockingFutureState): List[List[Order]] = {
    val futures = (1 until state.operations).map{_ =>
      BlockingExample.JdbcOrderRepository.findBuyOrders(
        state.clientId, state.ticker
      )(state.defaultC)
    }

    implicit val ex = state.defaultC
    Await.result(
      Future.sequence(futures).map(_.toList),
      Duration("5 minutes")
    )
  }

  @Benchmark
  def withDedicatedContext(state: BlockingFutureState): List[List[Order]] = {
    val futures = (1 until state.operations).map{_ =>
      BlockingExample.JdbcOrderRepository.findBuyOrders(
        state.clientId, state.ticker
      )(state.dedicatedC)
    }

    implicit val ex = state.defaultC  // we use CPU-bound context for computations below
    Await.result(
      Future.sequence(futures).map(_.toList),
      Duration("5 minutes")
    )
  }

}

object BlockingFutureBenchmarks {

  @State(Scope.Benchmark)
  class BlockingFutureState {

    @Param(Array("10", "1000"))
    var operations: Int = 0

    val clientId = ClientId(12345)
    val ticker = Ticker("FOO")

    var defaultC: ExecutionContext = null
    var dedicatedC: ExecutionContext = null
    var es: ExecutorService = null

    @Setup(Level.Trial)
    def setup(): Unit = {
      defaultC = scala.concurrent.ExecutionContext.global
      es = {
        val i = Runtime.getRuntime.availableProcessors * 20
        Executors.newFixedThreadPool(i)
      }
      dedicatedC = ExecutionContext.fromExecutorService(es)
    }

    @TearDown(Level.Trial)
    def tearDown(): Unit = {
      es.shutdownNow()
    }

  }

} 
Example 31
Source File: FutureAwaitWithFailFastFn.scala    From kafka-connect-common   with Apache License 2.0 5 votes vote down vote up
package com.datamountaineer.streamreactor.connect.concurrent

import java.util.concurrent.{ExecutorService, TimeUnit}

import com.typesafe.scalalogging.StrictLogging

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
import scala.concurrent.{Await, Future, Promise}
import scala.util.Failure

object FutureAwaitWithFailFastFn extends StrictLogging {

  def apply(executorService: ExecutorService, futures: Seq[Future[Unit]], duration: Duration): Unit = {
    //make sure we ask the executor to shutdown to ensure the process exits
    executorService.shutdown()

    val promise = Promise[Boolean]()

    //stop on the first failure
    futures.foreach { f =>
      f.failed.foreach { case t =>
        if (promise.tryFailure(t)) {
          executorService.shutdownNow()
        }
      }
    }

    val fut = Future.sequence(futures)
    fut.foreach { case t =>
      if (promise.trySuccess(true)) {
        val failed = executorService.shutdownNow()
        if (failed.size() > 0) {
          logger.error(s"${failed.size()} task have failed.")
        }
      }
    }

    Await.ready(promise.future, duration).value match {
      case Some(Failure(t)) =>
        executorService.awaitTermination(1, TimeUnit.MINUTES)
        //throw the underlying error
        throw t

      case _ =>
        executorService.awaitTermination(1, TimeUnit.MINUTES)
    }
  }

  def apply[T](executorService: ExecutorService, futures: Seq[Future[T]], duration: Duration = 1.hours): Seq[T] = {
    //make sure we ask the executor to shutdown to ensure the process exits
    executorService.shutdown()

    val promise = Promise[Boolean]()

    //stop on the first failure
    futures.foreach { f =>
      f.failed.foreach { case t =>
        if (promise.tryFailure(t)) {
          executorService.shutdownNow()
        }
      }
    }

    val fut = Future.sequence(futures)
    fut.foreach { case t =>
      if (promise.trySuccess(true)) {
        val failed = executorService.shutdownNow()
        if (failed.size() > 0) {
          logger.error(s"${failed.size()} task have failed.")
        }
      }
    }

    Await.ready(promise.future, duration).value match {
      case Some(Failure(t)) =>
        executorService.awaitTermination(1, TimeUnit.MINUTES)
        //throw the underlying error
        throw t

      case _ =>
        executorService.awaitTermination(1, TimeUnit.MINUTES)
        //return the result from each of the futures
        Await.result(Future.sequence(futures), 1.minute)
    }
  }
} 
Example 32
Source File: DI_05_LifecycleHooks.scala    From airframe   with Apache License 2.0 5 votes vote down vote up
package wvlet.airframe.examples.di

import java.util.concurrent.{Executor, ExecutorService, Executors}

import wvlet.log.LogSupport


object DI_05_LifecycleHooks extends App {
  import wvlet.airframe._

  trait MyApp extends LogSupport {
    private val threadManager = bind[ExecutorService] { Executors.newCachedThreadPool() }
      .onStart { x => info(f"Started a thread manager: ${x.hashCode()}%x") }
      .onShutdown { x =>
        info(f"Shutting down the thread manager: ${x.hashCode()}%x")
        x.shutdown()
      }
  }

  val d = newDesign

  d.build[MyApp] { app =>
    // Thread manager will start here
  }
  // Thread manager will be shutdown here.
} 
Example 33
Source File: ServiceCollieImpl.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.agent.docker

import java.util.concurrent.ExecutorService

import oharastream.ohara.agent.{ClusterKind, ClusterStatus, _}
import oharastream.ohara.client.configurator.ContainerApi.ContainerInfo
import oharastream.ohara.client.configurator.NodeApi.Node
import oharastream.ohara.common.setting.ObjectKey
import oharastream.ohara.common.util.Releasable

import scala.concurrent.duration.Duration
import scala.concurrent.{Await, ExecutionContext, Future}

// accessible to configurator
private[ohara] class ServiceCollieImpl(cacheTimeout: Duration, dataCollie: DataCollie, cacheThreadPool: ExecutorService)
    extends ServiceCollie {
  override val containerClient: DockerClient = DockerClient(dataCollie)

  private[this] val clusterCache: ServiceCache = ServiceCache.builder
    .frequency(cacheTimeout)
    // TODO: 5 * timeout is enough ??? by chia
    .supplier(() => Await.result(doClusters(ExecutionContext.fromExecutor(cacheThreadPool)), cacheTimeout * 5))
    // Giving some time to process to complete the build and then we can remove it from cache safety.
    .lazyRemove(cacheTimeout)
    .build()

  override val zookeeperCollie: ZookeeperCollie = new BasicCollieImpl(dataCollie, containerClient, clusterCache)
    with ZookeeperCollie
  override val brokerCollie: BrokerCollie = new BasicCollieImpl(dataCollie, containerClient, clusterCache)
    with BrokerCollie
  override val workerCollie: WorkerCollie = new BasicCollieImpl(dataCollie, containerClient, clusterCache)
    with WorkerCollie
  override val streamCollie: StreamCollie = new BasicCollieImpl(dataCollie, containerClient, clusterCache)
    with StreamCollie
  override val shabondiCollie: ShabondiCollie = new BasicCollieImpl(dataCollie, containerClient, clusterCache)
    with ShabondiCollie

  private[this] def doClusters(
    implicit executionContext: ExecutionContext
  ): Future[Seq[ClusterStatus]] =
    containerClient
      .containers()
      .map { allContainers =>
        def parse(
          kind: ClusterKind,
          toClusterStatus: (ObjectKey, Seq[ContainerInfo]) => ClusterStatus
        ): Seq[ClusterStatus] =
          allContainers
            .filter(container => Collie.matched(container.name, kind))
            .map(container => Collie.objectKeyOfContainerName(container.name) -> container)
            .groupBy(_._1)
            .map {
              case (clusterKey, value) => clusterKey -> value.map(_._2)
            }
            .map {
              case (clusterKey, containers) => toClusterStatus(clusterKey, containers)
            }
            .toSeq

        parse(ClusterKind.ZOOKEEPER, zookeeperCollie.toStatus) ++
          parse(ClusterKind.BROKER, brokerCollie.toStatus) ++
          parse(ClusterKind.WORKER, workerCollie.toStatus) ++
          parse(ClusterKind.STREAM, streamCollie.toStatus) ++
          parse(ClusterKind.SHABONDI, shabondiCollie.toStatus)
      }

  override def close(): Unit = {
    Releasable.close(containerClient)
    Releasable.close(clusterCache)
    Releasable.close(() => cacheThreadPool.shutdownNow())
  }

  
  override def verifyNode(node: Node)(implicit executionContext: ExecutionContext): Future[String] =
    containerClient
      .resources()
      .map { resources =>
        if (resources.getOrElse(node.hostname, Seq.empty).nonEmpty)
          s"succeed to check the docker resources on ${node.name}"
        else throw new IllegalStateException(s"the docker on ${node.hostname} is unavailable")
      }
} 
Example 34
Source File: BlockchainCacheSpecification.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.grpc.integration.caches

import java.time.Duration
import java.util.concurrent.{ConcurrentHashMap, ExecutorService, Executors}

import mouse.any.anySyntaxMouse
import org.scalatest.BeforeAndAfterAll
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpecLike

import scala.collection.JavaConverters._
import scala.concurrent._

class BlockchainCacheSpecification extends AnyWordSpecLike with Matchers with BeforeAndAfterAll {

  private val executor: ExecutorService                          = Executors.newCachedThreadPool
  implicit private val blockingContext: ExecutionContextExecutor = ExecutionContext.fromExecutor(executor)

  private class BlockchainCacheTest(loader: String => Future[String], expiration: Option[Duration], invalidationPredicate: String => Boolean)
      extends BlockchainCache[String, String](loader, expiration, invalidationPredicate)

  private def createCache(loader: String => Future[String],
                          expiration: Option[Duration] = None,
                          invalidationPredicate: String => Boolean = _ => false): BlockchainCacheTest = {
    new BlockchainCacheTest(loader, expiration, invalidationPredicate)
  }

  override def afterAll(): Unit = {
    super.afterAll()
    executor.shutdownNow()
  }

  private val andThenAwaitTimeout = 300

  "BlockchainCache" should {

    "not keep failed futures" in {

      val goodKey = "good key"
      val badKey  = "gRPC Error"

      val keyAccessMap = new ConcurrentHashMap[String, Int] unsafeTap (m => { m.put(goodKey, 0); m.put(badKey, 0) })
      val gRPCError    = new RuntimeException("gRPC Error occurred")

      val cache =
        createCache(
          key => {
            (if (key == badKey) Future.failed(gRPCError) else Future.successful(s"value = $key")) unsafeTap { _ =>
              keyAccessMap.computeIfPresent(key, (_, prev) => prev + 1)
            }
          }
        )

      val badKeyAccessCount = 10

      Await.result(
        (1 to badKeyAccessCount).foldLeft { Future.successful("") } { (prev, _) =>
          for {
            _ <- prev
            _ <- cache get goodKey
            r <- cache get badKey recover { case _ => "sad" }
          } yield { Thread.sleep(andThenAwaitTimeout); r }
        },
        scala.concurrent.duration.Duration.Inf
      )

      keyAccessMap.get(goodKey) shouldBe 1
      keyAccessMap.get(badKey) should be > 1
    }

    "not keep values according to the predicate" in {

      val goodKey = "111"
      val badKey  = "222"

      val keyAccessMap = new ConcurrentHashMap[String, Int](Map(goodKey -> 0, badKey -> 0).asJava)

      val cache = createCache(
        key => { keyAccessMap.computeIfPresent(key, (_, prev) => prev + 1); Future.successful(key) },
        invalidationPredicate = _.startsWith("2")
      )

      Await.result(
        (1 to 10).foldLeft { Future.successful("") } { (prev, _) =>
          for {
            _ <- prev
            _ <- cache get goodKey
            r <- cache get badKey
          } yield blocking { Thread.sleep(andThenAwaitTimeout); r }
        },
        scala.concurrent.duration.Duration.Inf
      )

      keyAccessMap.get(goodKey) shouldBe 1
      keyAccessMap.get(badKey) should be > 1
    }
  }
} 
Example 35
Source File: ParallelExecutor.scala    From nyaya   with GNU Lesser General Public License v2.1 5 votes vote down vote up
package nyaya.test

import java.util.concurrent.{Callable, ExecutorService, Executors, Future, TimeUnit}
import java.util.concurrent.atomic.AtomicInteger
import nyaya.gen.ThreadNumber
import nyaya.prop.Prop
import ParallelExecutor._
import PTest._
import Executor.{DataCtx, Data}

// TODO data SampleSize = TotalSamples(n) | Fn(qty|%, gensize|%) | PerWorker(sampleSize)

object ParallelExecutor {
  val defaultThreadCount = 1.max(Runtime.getRuntime.availableProcessors - 1)

  def merge[A](a: RunState[A], b: RunState[A]): RunState[A] = {
    val runs = a.runs max b.runs
    (a.success, b.success) match {
      case (false, true) => RunState(runs, a.result)
      case _             => RunState(runs, b.result)
    }
  }
}

case class ParallelExecutor(workers: Int = defaultThreadCount) extends Executor {

  val debugPrefixes = (0 until workers).toVector.map(i => s"Worker #$i: ")

  override def run[A](p: Prop[A], g: Data[A], S: Settings): RunState[A] = {
    val sss = {
      var rem = S.sampleSize.value
      var i = workers
      var v = Vector.empty[SampleSize]
      while(i > 0) {
        val p = rem / i
        v :+= SampleSize(p)
        rem -= p
        i -= 1
      }
      v
    }

    if (S.debug) {
      val szs = sss.map(_.value)
      println(s"Samples/Worker: ${szs.mkString("{", ",", "}")} = Σ${szs.sum}")
    }

    val ai = new AtomicInteger(0)
    def task(worker: Int) = mkTask {
      val dp = debugPrefixes(worker)
      val data = g(DataCtx(sss(worker), ThreadNumber(worker), S.seed, dp))
      testN(p, data, () => ai.incrementAndGet(), S)
    }
    runAsync2(workers, task)
  }

  override def prove[A](p: Prop[A], d: Domain[A], S: Settings): RunState[A] = {
    val threads = workers min d.size

    val ai = new AtomicInteger(0)
    def task(worker: Int) = mkTask {
      proveN(p, d, worker, threads, _ => ai.incrementAndGet, S)
    }
    runAsync2(threads, task)
  }

  private[this] def mkTask[A](f: => RunState[A]) = new Callable[RunState[A]] {
    override def call(): RunState[A] = f
  }

  private[this] def runAsync2[A](threads: Int, f: Int => Callable[RunState[A]]): RunState[A] =
    runAsync(es => (0 until threads).toList.map(es submit f(_)))

  private[this] def runAsync[A](start: ExecutorService => List[Future[RunState[A]]]): RunState[A] = {
    val es: ExecutorService = Executors.newFixedThreadPool(workers)
    val fs = start(es)
    es.shutdown()
    val rss = fs.map(_.get())
    es.awaitTermination(1, TimeUnit.MINUTES)
    rss.foldLeft(RunState.empty[A])(merge)
  }
} 
Example 36
Source File: RxScala.scala    From scala-concurrency-playground   with MIT License 5 votes vote down vote up
package org.zalando.benchmarks

import java.util.concurrent.{ExecutorService, Executors}

import akka.actor.ActorSystem
import rx.lang.scala.Observable

import scala.concurrent.{ExecutionContext, Future}

class RxScala(system: ActorSystem) {
  import ComputationFollowedByAsyncPublishing._

  def benchmark(coreFactor: Int): Unit = {
    val executor: ExecutorService = Executors.newCachedThreadPool()
    implicit val ec = ExecutionContext.fromExecutor(executor)
    try {
      Observable
        .from(1 to numTasks)
        .map(Job)
        .flatMap(numWorkers(coreFactor), job => Observable.from(Future(Computer compute job)))
        .flatMap(r => Observable.from(Publisher publish (r, system))(system dispatcher))
        .foldLeft(0) { case (s, r) => s + computeResult(r) }
        .toBlocking
        .foreach(println)
    } finally {
      executor.shutdown()
    }
  }
} 
Example 37
Source File: EventConsumerManager.scala    From Linkis   with Apache License 2.0 5 votes vote down vote up
package com.webank.wedatasphere.linkis.resourcemanager.schedule

import java.util.concurrent.ExecutorService

import com.webank.wedatasphere.linkis.common.utils.{Logging, Utils}
import com.webank.wedatasphere.linkis.resourcemanager.event.RMEvent
import com.webank.wedatasphere.linkis.scheduler.SchedulerContext
import com.webank.wedatasphere.linkis.scheduler.listener.ConsumerListener
import com.webank.wedatasphere.linkis.scheduler.queue.{ConsumerManager, LoopArrayQueue}

import scala.collection.mutable



  override def getOrCreateConsumer(groupName: String) = {
    RM_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
      var tmpConsumer = consumerGroupMap.get(groupName).getOrElse(null)
      if (tmpConsumer == null) {
        tmpConsumer = createConsumer(groupName)
      }
      tmpConsumer
    }
  }

  override protected def createConsumer(groupName: String) = {
    val group = schedulerContext.getOrCreateGroupFactory.getOrCreateGroup(groupName)
    val consumer = new RMEventConsumer(schedulerContext, getOrCreateExecutorService, group)
    consumer.start()
    val listener = new RMConsumerListenerImpl
    listener.setConsumer(consumer)
    consumer.setConsumeQueue(new LoopArrayQueue(group))
    consumer.setRmConsumerListener(listener)
    consumerGroupMap.put(groupName, consumer)
    consumerListenerMap.put(groupName, listener)
    if (consumerListener != null) consumerListener.onConsumerCreated(consumer)

    consumer
  }

  protected def createConsumerFromConsumer(oldConsumer: RMEventConsumer) = {
    var newConsumer: RMEventConsumer = null
    if (oldConsumer != null) {
      info("Create new consumer from old consumer " + oldConsumer.getGroup.getGroupName)
      val groupName = oldConsumer.getGroup.getGroupName
      val group = schedulerContext.getOrCreateGroupFactory.getOrCreateGroup(groupName)
      newConsumer = new RMEventConsumer(schedulerContext, getOrCreateExecutorService, group)
      newConsumer.start()
      val listener = new RMConsumerListenerImpl
      listener.setConsumer(newConsumer)
      newConsumer.setConsumeQueue(oldConsumer.getConsumeQueue)
      newConsumer.setRmConsumerListener(listener)
      consumerListenerMap.update(groupName, listener)
      if (consumerListener != null) consumerListener.onConsumerCreated(newConsumer)

    }

    newConsumer
  }

  override def destroyConsumer(groupName: String) = {
    val tmpConsumer = consumerGroupMap.get(groupName).getOrElse(null)
    if (tmpConsumer != null) {
      tmpConsumer.shutdown()
      consumerGroupMap.remove(groupName)
      if (consumerListener != null) consumerListener.onConsumerDestroyed(tmpConsumer)
    }
  }

  override def shutdown() = {
    Utils.tryThrow({
      consumerGroupMap.values.toArray.foreach(x => x.shutdown())
      executorService.shutdown()
    })(t => new Exception("ConsumerManager shutdown exception", t))
  }

  override def listConsumers() = consumerGroupMap.values.toArray

  override def getOrCreateExecutorService: ExecutorService = {
    RM_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
      if (executorService == null) {
        executorService = Utils.newCachedThreadPool(3 * maxParallelismUsers + 1, "Engine-Scheduler-ThreadPool-", true)
        executorService
      } else {
        executorService
      }
    }
  }
} 
Example 38
Source File: RMEventConsumer.scala    From Linkis   with Apache License 2.0 5 votes vote down vote up
package com.webank.wedatasphere.linkis.resourcemanager.schedule

import java.util.concurrent.{ExecutorService, Future}

import com.webank.wedatasphere.linkis.common.utils.Utils
import com.webank.wedatasphere.linkis.resourcemanager.event.RMEvent
import com.webank.wedatasphere.linkis.resourcemanager.event.metric.{MetricRMEvent, MetricRMEventExecutor}
import com.webank.wedatasphere.linkis.resourcemanager.event.notify.{NotifyRMEvent, NotifyRMEventExecutor}
import com.webank.wedatasphere.linkis.scheduler.SchedulerContext
import com.webank.wedatasphere.linkis.scheduler.queue._

import scala.collection.mutable.ArrayBuffer


class RMEventConsumer(schedulerContext: SchedulerContext,
                      executeService: ExecutorService) extends Consumer(schedulerContext, executeService) {
  private var queue: ConsumeQueue = _
  private var group: Group = _
  private var maxRunningJobsNum = 1000
  //Not put(暂未放)
  private val runningJobs = new Array[SchedulerEvent](maxRunningJobsNum)
  private val executorManager = schedulerContext.getOrCreateExecutorManager
  private var rmConsumerListener : RMConsumerListener = _
  var future: Future[_] = _

  def this(schedulerContext: SchedulerContext, executeService: ExecutorService, group: Group) = {
    this(schedulerContext, executeService)
    this.group = group
    maxRunningJobsNum = group.getMaximumCapacity
  }

  def start():Unit = future = executeService.submit(this)

  def setRmConsumerListener(rmConsumerListener: RMConsumerListener): Unit ={
    this.rmConsumerListener = rmConsumerListener
  }

  override def setConsumeQueue(consumeQueue: ConsumeQueue) = {
    queue = consumeQueue
  }

  override def getConsumeQueue = queue

  override def getGroup = group

  override def setGroup(group: Group) = {
    this.group = group
  }

  override def getRunningEvents = getEvents(_.isRunning)

  private def getEvents(op: SchedulerEvent => Boolean): Array[SchedulerEvent] = {
    val result = ArrayBuffer[SchedulerEvent]()
    runningJobs.filter(_ != null).filter(x => op(x)).foreach(result += _)
    result.toArray
  }

  override def run() = {
    Thread.currentThread().setName(s"${toString}Thread")
    info(s"$toString thread started!")
    while (!terminate) {
      Utils.tryAndError(loop())
      Utils.tryQuietly(Thread.sleep(10))
    }
    info(s"$toString thread stopped!")
  }

  def loop(): Unit = {
    var event = queue.take()
    while (event.turnToScheduled() != true) {
      event = queue.take()
    }
    if(rmConsumerListener != null){rmConsumerListener.beforeEventExecute(this,event.asInstanceOf[RMEvent])}
    Utils.tryAndError({
      val executor = executorManager.askExecutor(event)
      if (executor.isDefined) {
        event match {
          case x: MetricRMEvent =>{
            Utils.tryQuietly(executor.get.asInstanceOf[MetricRMEventExecutor].execute(new EventJob(x)))
          }
          case y: NotifyRMEvent =>{
            Utils.tryQuietly(executor.get.asInstanceOf[NotifyRMEventExecutor].execute(new EventJob(y)))
          }
        }
      }
    })
    if(rmConsumerListener != null){rmConsumerListener.afterEventExecute(this,event.asInstanceOf[RMEvent])}
  }

  override def shutdown() = {
    future.cancel(true)
    super.shutdown()
  }
} 
Example 39
Source File: ParallelConsumerManager.scala    From Linkis   with Apache License 2.0 5 votes vote down vote up
package com.webank.wedatasphere.linkis.scheduler.queue.parallelqueue

import java.util.concurrent.ExecutorService

import com.webank.wedatasphere.linkis.common.utils.Utils
import com.webank.wedatasphere.linkis.scheduler.listener.ConsumerListener
import com.webank.wedatasphere.linkis.scheduler.queue._
import com.webank.wedatasphere.linkis.scheduler.queue.fifoqueue.FIFOUserConsumer

import scala.collection.mutable


class ParallelConsumerManager(maxParallelismUsers: Int)extends  ConsumerManager{

  private val UJES_CONTEXT_CONSTRUCTOR_LOCK = new Object()
  private var consumerListener: Option[ConsumerListener] = None

  private var executorService: ExecutorService = _

  private val consumerGroupMap = new mutable.HashMap[String, FIFOUserConsumer]()

  override def setConsumerListener(consumerListener: ConsumerListener) = {
    this.consumerListener = Some(consumerListener)
  }

  override def getOrCreateExecutorService = if(executorService != null) executorService
    else UJES_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
      if (executorService == null) {
        executorService = Utils.newCachedThreadPool(5 * maxParallelismUsers + 1, "Engine-Scheduler-ThreadPool-", true)
      }
      executorService
  }

  override def getOrCreateConsumer(groupName: String) = if(consumerGroupMap.contains(groupName)) consumerGroupMap(groupName)
    else UJES_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
      consumerGroupMap.getOrElse(groupName, {
        val newConsumer = createConsumer(groupName)
        val group = getSchedulerContext.getOrCreateGroupFactory.getOrCreateGroup(groupName)
        newConsumer.setGroup(group)
        newConsumer.setConsumeQueue(new LoopArrayQueue(group))
        consumerGroupMap.put(groupName, newConsumer)
        consumerListener.foreach(_.onConsumerCreated(newConsumer))
        newConsumer.start()
        newConsumer
      })
  }

  override protected def createConsumer(groupName: String) = {
    val group = getSchedulerContext.getOrCreateGroupFactory.getOrCreateGroup(groupName)
    new FIFOUserConsumer(getSchedulerContext, getOrCreateExecutorService, group)
  }

  override def destroyConsumer(groupName: String) =
    consumerGroupMap.get(groupName).foreach { tmpConsumer =>
      tmpConsumer.shutdown()
      consumerGroupMap.remove(groupName)
      consumerListener.foreach(_.onConsumerDestroyed(tmpConsumer))
    }

  override def shutdown() = {
    consumerGroupMap.iterator.foreach(x => x._2.shutdown())
  }

  override def listConsumers() = consumerGroupMap.values.toArray
} 
Example 40
Source File: FIFOConsumerManager.scala    From Linkis   with Apache License 2.0 5 votes vote down vote up
package com.webank.wedatasphere.linkis.scheduler.queue.fifoqueue


import java.util.concurrent.{ExecutorService, ThreadPoolExecutor}

import com.webank.wedatasphere.linkis.common.utils.Utils
import com.webank.wedatasphere.linkis.scheduler.SchedulerContext
import com.webank.wedatasphere.linkis.scheduler.exception.SchedulerErrorException
import com.webank.wedatasphere.linkis.scheduler.listener.ConsumerListener
import com.webank.wedatasphere.linkis.scheduler.queue.{Consumer, ConsumerManager, Group, LoopArrayQueue}


class FIFOConsumerManager(groupName: String) extends ConsumerManager {

  def this() = this("FIFO_GROUP")

  private var group: Group = _
  private var executorService: ThreadPoolExecutor = _
  private var consumerListener: ConsumerListener = _
  private var consumerQueue: LoopArrayQueue = _
  private var consumer: Consumer = _


  override def setSchedulerContext(schedulerContext: SchedulerContext): Unit = {
    super.setSchedulerContext(schedulerContext)
    group = getSchedulerContext.getOrCreateGroupFactory.getOrCreateGroup(groupName)
    executorService = group match {
      case g: FIFOGroup => Utils.newCachedThreadPool(g.getMaxRunningJobs + 2, groupName + "-Thread-")
      case _ => throw new SchedulerErrorException(13000, s"FIFOConsumerManager need a FIFOGroup, but ${group.getClass} is supported.")
    }
    consumerQueue = new LoopArrayQueue(getSchedulerContext.getOrCreateGroupFactory.getOrCreateGroup(null))
    consumer = createConsumer(null)
  }

  override def setConsumerListener(consumerListener: ConsumerListener): Unit = this.consumerListener = consumerListener

  override def getOrCreateExecutorService: ExecutorService = executorService

  override def getOrCreateConsumer(groupName: String): Consumer = consumer


  override protected def createConsumer(groupName: String): Consumer = {
    val group = getSchedulerContext.getOrCreateGroupFactory.getOrCreateGroup(null)
    val consumer = new FIFOUserConsumer(getSchedulerContext, getOrCreateExecutorService, group)
    consumer.setGroup(group)
    consumer.setConsumeQueue(consumerQueue)
    if(consumerListener != null) consumerListener.onConsumerCreated(consumer)
    consumer.start()
    consumer
  }

  override def destroyConsumer(groupName: String): Unit = {
    //ignore
  }

  override def shutdown(): Unit = {
    if(consumerListener != null) consumerListener.onConsumerDestroyed(consumer)
    consumer.shutdown()
    executorService.shutdownNow()
  }

  override def listConsumers(): Array[Consumer] = Array(consumer)
} 
Example 41
Source File: TestUtils.scala    From iodb   with Creative Commons Zero v1.0 Universal 5 votes vote down vote up
package io.iohk.iodb

import java.io.File
import java.nio.ByteBuffer
import java.util.concurrent.{ExecutorService, TimeUnit}
import java.util.logging.Level

import scala.util.Random


  def runningTime[A](computation: => A): (Long, A) = {
    val s = System.currentTimeMillis()
    val res = computation
    (System.currentTimeMillis() - s, res)
  }

  def fromLong(id: Long): ByteArrayWrapper = {
    val b = ByteBuffer.allocate(8)
    b.putLong(0, id)
    ByteArrayWrapper(b.array())
  }


  def runnable(f: => Unit): Runnable =
    return () => {
      try {
        f
      } catch {
        case e: Throwable => {
          Utils.LOG.log(Level.SEVERE, "Background task failed", e)
        }
      }
    }

  def waitForFinish(exec: ExecutorService): Unit = {
    exec.shutdown()
    exec.awaitTermination(400, TimeUnit.DAYS)
  }


  def withTempDir(ff: (File) => Unit) {
    val iFile = TestUtils.tempDir()
    try {
      ff(iFile)
    } finally {
      TestUtils.deleteRecur(iFile)
    }
  }
} 
Example 42
Source File: Publisher.scala    From eel-sdk   with Apache License 2.0 5 votes vote down vote up
package io.eels.datastream

import java.util.concurrent.atomic.{AtomicInteger, AtomicReference}
import java.util.concurrent.{ExecutorService, LinkedBlockingQueue, TimeUnit}

import com.sksamuel.exts.Logging
import com.sksamuel.exts.collection.BlockingQueueConcurrentIterator
import com.sksamuel.exts.concurrent.ExecutorImplicits._

import scala.collection.concurrent.TrieMap

trait Publisher[T] {
  def subscribe(subscriber: Subscriber[T])
}

object Publisher extends Logging {

  
  def merge[T](publishers: Seq[Publisher[T]], sentinel: T)(implicit executor: ExecutorService): Publisher[T] = {

    new Publisher[T] {
      override def subscribe(s: Subscriber[T]): Unit = {

        // subscribers to the returned publisher will be fed from an intermediate queue
        val queue = new LinkedBlockingQueue[Either[Throwable, T]](DataStream.DefaultBufferSize)

        // to keep track of how many subscribers are yet to finish; only once all upstream
        // publishers have finished will this subscriber be completed.
        val outstanding = new AtomicInteger(publishers.size)

        // we make a collection of all the subscriptions, so if there's an error at any point in the
        // merge, we can cancel all upstream producers
        val subscriptions = TrieMap.empty[Subscription, Int]

        // this cancellable can be used to cancel all the subscriptions
        val subscription = new Subscription {
          override def cancel(): Unit = subscriptions.keys.foreach(_.cancel)
        }

        // status flag that an error occured and the subscriptions should watch for it
        val errorRef = new AtomicReference[Throwable](null)
        def terminate(t: Throwable): Unit = {
          logger.error(s"Error in merge", t)
          errorRef.set(t)
          subscription.cancel()
          queue.clear()
          queue.put(Right(sentinel))
        }

        // each subscriber will occupy its own thread, on the provided executor
        publishers.foreach { publisher =>
          executor.submit {
            try {
              publisher.subscribe(new Subscriber[T] {
                override def subscribed(sub: Subscription): Unit = if (sub != null) subscriptions.put(sub, 1)                
                override def next(t: T): Unit = {
                  var success = true
                  do {
                    success = queue.offer(Right(t), 100, TimeUnit.MILLISECONDS)
                  } while(!success && errorRef.get == null)
                }
                override def error(t: Throwable): Unit = terminate(t)
                override def completed(): Unit = {
                  if (outstanding.decrementAndGet() == 0) {
                    logger.debug("All subscribers have finished; marking queue with sentinel")
                    queue.put(Right(sentinel))
                  }
                }
              })
            } catch {
              case t: Throwable => terminate(t)
            }
          }
        }

        try {
          s.subscribed(subscription)
          BlockingQueueConcurrentIterator(queue, Right(sentinel)).takeWhile(_ => errorRef.get == null).foreach {
            case Left(t) => s.error(t)
            case Right(t) => s.next(t)
          }
          // once we've had an error that's it, we don't complete the subscriber
          if (errorRef.get == null)
            s.completed()
          else 
            s.error(errorRef.get)
        } catch {
          case t: Throwable =>
            logger.error("Error in merge subscriber", t)
            subscription.cancel()
            s.error(t)
        }

        logger.debug("Merge subscriber has completed")
      }
    }
  }
} 
Example 43
Source File: NonblockingPar.scala    From learning-fpinscala   with MIT License 5 votes vote down vote up
package com.satansk.fpinscala.parallelism

import java.util.concurrent.{Callable, CountDownLatch, ExecutorService}
import java.util.concurrent.atomic.AtomicReference

import com.sun.glass.ui.MenuItem.Callback


  def unit[A](a: A): Par[A] =
    _ ⇒ new Future[A] {
      def apply(callback: A ⇒ Unit): Unit = callback(a)
    }

  def fork[A](a: ⇒ Par[A]): Par[A] =
    es ⇒ new Future[A] {
      def apply(callback: (A) ⇒ Unit): Unit =
        eval(es)(a(es)(callback))
    }

  def eval(es: ExecutorService)(r: ⇒ Unit): Unit =
    es.submit(new Callable[Unit] {
      def call = r
    })

} 
Example 44
Source File: NamedExecutors.scala    From mango   with Apache License 2.0 5 votes vote down vote up
package com.kakao.mango.concurrent

import java.util.concurrent.Executors._
import java.util.concurrent.ForkJoinPool.ForkJoinWorkerThreadFactory
import java.util.concurrent.atomic.AtomicInteger
import java.util.concurrent.{ForkJoinWorkerThread, ExecutorService, ScheduledExecutorService, ForkJoinPool}
import scala.language.implicitConversions


object NamedExecutors {

  implicit def toRich(e: ExecutorService): RichExecutorService = new RichExecutorService(e)

  implicit def toRich(e: ScheduledExecutorService): RichScheduledExecutorService = new RichScheduledExecutorService(e)

  def scheduled(name: String, daemon: Boolean = true): RichScheduledExecutorService = {
    newSingleThreadScheduledExecutor(NamedThreadFactory(name, daemon))
  }

  def scheduledPool(name: String, size: Int, daemon: Boolean = true): RichScheduledExecutorService = {
    newScheduledThreadPool(size, NamedThreadFactory(name, daemon))
  }

  def cached(name: String, daemon: Boolean = true): RichExecutorService = {
    newCachedThreadPool(NamedThreadFactory(name, daemon))
  }

  def fixed(name: String, size: Int, daemon: Boolean = true): RichExecutorService = {
    newFixedThreadPool(size, NamedThreadFactory(name, daemon))
  }

  def single(name: String, daemon: Boolean = true): RichExecutorService = {
    newSingleThreadExecutor(NamedThreadFactory(name, daemon))
  }

  def forkJoin(name: String, size: Int, daemon: Boolean = true, asyncMode: Boolean = false): RichExecutorService = {
    val counter = new AtomicInteger()
    new ForkJoinPool(size, new ForkJoinWorkerThreadFactory {
      override def newThread(pool: ForkJoinPool): ForkJoinWorkerThread = {
        val thread = new ForkJoinWorkerThread(pool) {}
        thread.setName(s"$name-${counter.incrementAndGet()}")
        thread.setDaemon(daemon)
        thread
      }
    }, null, asyncMode)
  }

} 
Example 45
Source File: Gen.scala    From arrows   with Apache License 2.0 5 votes vote down vote up
package benchmarks

import scala.annotation.tailrec
import scala.util.Random
import java.util.concurrent.ExecutorService

trait Gen[T] {

  def apply(dist: List[(Gen.Op, Int)])(implicit s: ExecutorService): T = {
    val depth = 100
    val rnd = new Random(1)
    import rnd._

    val values = dist.collect { case (g: Gen.Value, i) => (g, i) }
    val transforms = dist.collect { case (g: Gen.Transform, i) => (g, i) }

    require(values.nonEmpty)

    def choose[O <: Gen.Op](l: List[(O, Int)]): O = {
      @tailrec
      def find(n: Int, prev: Int, l: List[(O, Int)]): O = {
        l match {
          case Nil => ???
          case (o, i) :: tail =>
            if (prev + i > n) o
            else find(n, prev + i, tail)
        }
      }

      val max = l.map(_._2).sum
      find(nextInt(max), 0, l)
    }

    val ex = new Exception

    def genValue: T =
      choose(values) match {
        case Gen.Async =>
          async(s.submit(_))
        case Gen.Sync =>
          sync
        case Gen.Failure =>
          failure(ex)
      }

    def genTransform(depth: Int, t: T): T =
      depth match {
        case 0 => t
        case _ =>
          choose(transforms) match {
            case Gen.Map =>
              val i = nextInt
              genTransform(depth - 1, map(t, _ + i))
            case Gen.FlatMap =>
              val d = nextInt(depth)
              val n = genTransform(depth - d, genValue)
              genTransform(d, flatMap(t, n))
            case Gen.Handle =>
              val i = nextInt
              genTransform(depth - 1, handle(t, i))
          }
      }

    genTransform(depth, genValue)
  }

  def sync: T
  def async(schedule: Runnable => Unit): T
  def failure(ex: Throwable): T
  def map(t: T, f: Int => Int): T
  def flatMap(t: T, f: T): T
  def handle(t: T, i: Int): T
}

object Gen {
  sealed trait Op

  sealed trait Value extends Op
  case object Async extends Value
  case object Sync extends Value
  case object Failure extends Value

  sealed trait Transform extends Op
  case object Map extends Transform
  case object FlatMap extends Transform
  case object Handle extends Transform
} 
Example 46
Source File: ExecutorCloser.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.common
import java.io.Closeable
import java.util.concurrent.{ExecutorService, TimeUnit}

import akka.event.slf4j.SLF4JLogging

import scala.concurrent.duration._

case class ExecutorCloser(service: ExecutorService, timeout: FiniteDuration = 5.seconds)
    extends Closeable
    with SLF4JLogging {
  override def close(): Unit = {
    try {
      service.shutdown()
      service.awaitTermination(timeout.toSeconds, TimeUnit.SECONDS)
    } catch {
      case e: InterruptedException =>
        log.error("Error while shutting down the ExecutorService", e)
        Thread.currentThread.interrupt()
    } finally {
      if (!service.isShutdown) {
        log.warn(s"ExecutorService `$service` didn't shutdown property. Will be forced now.")
      }
      service.shutdownNow()
    }
  }
} 
Example 47
Source File: BasicShabondiTest.scala    From ohara   with Apache License 2.0 5 votes vote down vote up
package oharastream.ohara.shabondi

import java.util
import java.util.concurrent.{ExecutorService, Executors}

import com.google.common.util.concurrent.ThreadFactoryBuilder
import com.typesafe.scalalogging.Logger
import oharastream.ohara.common.data.Row
import oharastream.ohara.common.setting.TopicKey
import oharastream.ohara.common.util.{CommonUtils, Releasable}
import oharastream.ohara.kafka.TopicAdmin
import oharastream.ohara.shabondi.common.ShabondiUtils
import oharastream.ohara.shabondi.sink.SinkConfig
import oharastream.ohara.shabondi.source.SourceConfig
import oharastream.ohara.testing.WithBroker
import org.junit.After

import scala.collection.{immutable, mutable}
import scala.concurrent.{ExecutionContext, Future}
import scala.jdk.CollectionConverters._

private[shabondi] abstract class BasicShabondiTest extends WithBroker {
  protected val log = Logger(this.getClass())

  protected val brokerProps            = testUtil.brokersConnProps
  protected val topicAdmin: TopicAdmin = TopicAdmin.of(brokerProps)

  protected val newThreadPool: () => ExecutorService = () =>
    Executors.newCachedThreadPool(new ThreadFactoryBuilder().setNameFormat(this.getClass.getSimpleName + "-").build())

  protected val countRows: (util.Queue[Row], Long, ExecutionContext) => Future[Long] =
    (queue, executionTime, ec) =>
      Future {
        log.debug("countRows begin...")
        val baseTime = System.currentTimeMillis()
        var count    = 0L
        var running  = true
        while (running) {
          val row = queue.poll()
          if (row != null) count += 1 else Thread.sleep(100)
          running = (System.currentTimeMillis() - baseTime) < executionTime
        }
        log.debug("countRows done")
        count
      }(ec)

  protected def createTopicKey = TopicKey.of("default", CommonUtils.randomString(5))

  protected def createTestTopic(topicKey: TopicKey): Unit =
    topicAdmin.topicCreator
      .numberOfPartitions(1)
      .numberOfReplications(1.toShort)
      .topicKey(topicKey)
      .create

  protected def defaultSourceConfig(
    sourceToTopics: Seq[TopicKey] = Seq.empty[TopicKey]
  ): SourceConfig = {
    import ShabondiDefinitions._
    val args = mutable.ArrayBuffer(
      GROUP_DEFINITION.key + "=" + CommonUtils.randomString(5),
      NAME_DEFINITION.key + "=" + CommonUtils.randomString(3),
      SHABONDI_CLASS_DEFINITION.key + "=" + classOf[ShabondiSource].getName,
      CLIENT_PORT_DEFINITION.key + "=8080",
      BROKERS_DEFINITION.key + "=" + testUtil.brokersConnProps
    )
    if (sourceToTopics.nonEmpty)
      args += s"${SOURCE_TO_TOPICS_DEFINITION.key}=${TopicKey.toJsonString(sourceToTopics.asJava)}"

    val rawConfig = ShabondiUtils.parseArgs(args.toArray)
    new SourceConfig(rawConfig)
  }

  protected def defaultSinkConfig(
    sinkFromTopics: Seq[TopicKey] = Seq.empty[TopicKey]
  ): SinkConfig = {
    import ShabondiDefinitions._
    val args = mutable.ArrayBuffer(
      GROUP_DEFINITION.key + "=" + CommonUtils.randomString(5),
      NAME_DEFINITION.key + "=" + CommonUtils.randomString(3),
      SHABONDI_CLASS_DEFINITION.key + "=" + classOf[ShabondiSink].getName,
      CLIENT_PORT_DEFINITION.key + "=8080",
      BROKERS_DEFINITION.key + "=" + testUtil.brokersConnProps
    )
    if (sinkFromTopics.nonEmpty)
      args += s"${SINK_FROM_TOPICS_DEFINITION.key}=${TopicKey.toJsonString(sinkFromTopics.asJava)}"
    val rawConfig = ShabondiUtils.parseArgs(args.toArray)
    new SinkConfig(rawConfig)
  }

  protected def singleRow(columnSize: Int, rowId: Int = 0): Row =
    KafkaSupport.singleRow(columnSize, rowId)

  protected def multipleRows(rowSize: Int): immutable.Iterable[Row] =
    KafkaSupport.multipleRows(rowSize)

  @After
  def tearDown(): Unit = {
    Releasable.close(topicAdmin)
  }
} 
Example 48
Source File: HttpServer.scala    From temperature-machine   with Apache License 2.0 5 votes vote down vote up
package bad.robot.temperature.server

import java.lang.Math._
import java.time.Clock
import java.util.concurrent.Executors._
import java.util.concurrent.ExecutorService

import bad.robot.temperature.JsonToCsv
import bad.robot.temperature.ds18b20.{SensorFile, SensorReader}
import bad.robot.temperature.rrd.Host
import bad.robot.temperature.task.TemperatureMachineThreadFactory
import fs2.Stream
import fs2.Scheduler
import fs2.StreamApp._
import cats.implicits._

import scala.concurrent.ExecutionContext
import cats.effect.IO
import org.http4s.HttpService
import org.http4s.server.blaze.BlazeBuilder
import org.http4s.server.middleware.{CORS, GZip}

object HttpServer {
  def apply(port: Int, monitored: List[Host]): HttpServer = {
    new HttpServer(port, monitored)
  }
}

class HttpServer(port: Int, monitored: List[Host]) {

  private val DefaultHttpExecutorService: ExecutorService = {
    newFixedThreadPool(max(4, Runtime.getRuntime.availableProcessors), TemperatureMachineThreadFactory("http-server"))
  }

  def asStream(temperatures: AllTemperatures, connections: Connections): Stream[IO, ExitCode] = {
    import scala.concurrent.ExecutionContext.Implicits.global // todo replace with explicit one

    for {
      scheduler <- Scheduler[IO](corePoolSize = 1)
      exitCode  <- build(temperatures, connections, scheduler).serve
    } yield exitCode
  }

  private[server] def build(temperatures: AllTemperatures, connections: Connections, scheduler: Scheduler): BlazeBuilder[IO] = {
    BlazeBuilder[IO]
      .withWebSockets(true)
      .withExecutionContext(ExecutionContext.fromExecutorService(DefaultHttpExecutorService))
      .bindHttp(port, "0.0.0.0")
      .mountService(services(scheduler, temperatures, connections), "/")
  }

  private def services(scheduler: Scheduler, temperatures: AllTemperatures, connections: Connections): HttpService[IO] = {
    GZip(
      CORS(
        TemperatureEndpoint(scheduler, SensorReader(Host.local, SensorFile.find()), temperatures, connections) <+>
          ConnectionsEndpoint(connections)(Clock.systemDefaultZone) <+>
          LogEndpoint() <+>
          ExportEndpoint(JsonFile.load, JsonToCsv.DefaultTimeFormatter) <+>
          VersionEndpoint() <+>
          StaticFiles() <+>
          StaticResources()
      )
    )
  }
} 
Example 49
Source File: RerunnableBenchmark.scala    From catbird   with Apache License 2.0 5 votes vote down vote up
package io.catbird.benchmark

import com.twitter.util.{ Await, Future, FuturePool }
import io.catbird.util.Rerunnable
import java.util.concurrent.{ ExecutorService, Executors, TimeUnit }
import org.openjdk.jmh.annotations._


@State(Scope.Thread)
@BenchmarkMode(Array(Mode.Throughput))
@OutputTimeUnit(TimeUnit.SECONDS)
class RerunnableBenchmark {
  val count: Int = 100000
  val numbers: IndexedSeq[Int] = 0 to count
  var es: ExecutorService = _
  var pool: FuturePool = _

  @Setup
  def initPool(): Unit = {
    es = Executors.newFixedThreadPool(4)
    pool = FuturePool(es)
  }

  @TearDown
  def shutdownPool(): Unit = es.shutdown()

  @Benchmark
  def sumIntsF: Int = Await.result(
    numbers.foldLeft(Future(0)) {
      case (acc, i) => acc.flatMap(prev => Future(prev + i))
    }
  )

  @Benchmark
  def sumIntsR: Int = Await.result(
    numbers
      .foldLeft(Rerunnable(0)) {
        case (acc, i) => acc.flatMap(prev => Rerunnable(prev + i))
      }
      .run
  )

  @Benchmark
  def sumIntsPF: Int = Await.result(
    numbers.foldLeft(pool(0)) {
      case (acc, i) => acc.flatMap(prev => pool(prev + i))
    }
  )

  @Benchmark
  def sumIntsPR: Int = Await.result(
    numbers
      .foldLeft(Rerunnable.withFuturePool(pool)(0)) {
        case (acc, i) => acc.flatMap(prev => Rerunnable.withFuturePool(pool)(prev + i))
      }
      .run
  )
} 
Example 50
Source File: RerunnableContextShift.scala    From catbird   with Apache License 2.0 5 votes vote down vote up
package io.catbird.util.effect

import cats.effect.ContextShift
import com.twitter.util.{ Future, FuturePool, Promise }
import io.catbird.util.Rerunnable

import scala.Unit
import java.lang.Runnable
import java.util.concurrent.ExecutorService

import scala.concurrent.{ ExecutionContext, ExecutionContextExecutorService }


  object Implicits {
    final implicit def global: ContextShift[Rerunnable] = RerunnableContextShift.global
  }
}

final private[effect] class RerunnableContextShift private (ec: ExecutionContext) extends ContextShift[Rerunnable] {
  private final lazy val futurePool = FuturePool.interruptible(ec.asInstanceOf[ExecutionContextExecutorService])

  override def shift: Rerunnable[Unit] =
    Rerunnable.withFuturePool(futurePool)(()) // This is a bit of a hack, but it will have to do

  override def evalOn[A](targetEc: ExecutionContext)(fa: Rerunnable[A]): Rerunnable[A] =
    for {
      r <- executeOn(targetEc)(fa).liftToTry
      _ <- shift
      a <- Rerunnable.fromFuture(Future.value(r).lowerFromTry)
    } yield a

  private def executeOn[A](targetEc: ExecutionContext)(fa: Rerunnable[A]): Rerunnable[A] =
    Rerunnable.fromFuture {
      val p = Promise[A]()

      targetEc.execute(new Runnable {
        override def run(): Unit =
          fa.run.proxyTo[A](p)
      })

      p
    }
} 
Example 51
Source File: TraceFriendlyThreadPoolExecutorSpec.scala    From money   with Apache License 2.0 5 votes vote down vote up
package com.comcast.money.core.concurrent

import java.util.concurrent.{ Callable, ExecutorService }

import com.comcast.money.api.SpanId
import com.comcast.money.core.SpecHelpers
import com.comcast.money.core.internal.SpanLocal
import org.scalatest.mockito.MockitoSugar
import org.scalatest.{ Matchers, OneInstancePerTest, WordSpecLike }
import org.slf4j.MDC

class TraceFriendlyThreadPoolExecutorSpec
  extends WordSpecLike
  with MockitoSugar with Matchers with ConcurrentSupport with OneInstancePerTest with SpecHelpers {

  val executor: ExecutorService = TraceFriendlyThreadPoolExecutor.newCachedThreadPool

  "TraceFriendlyThreadPoolExecutor cachedThreadPool" should {
    "propagate the current span local value" in {
      val traceId = new SpanId("1", 2L, 3L)
      SpanLocal.push(testSpan(traceId))

      val future = executor.submit(testCallable)

      future.get shouldEqual Some(traceId)
      SpanLocal.clear()
    }
    "propagate no span value if none is present" in {
      SpanLocal.clear()

      val future = executor.submit(testCallable)

      future.get shouldEqual None
      SpanLocal.current shouldEqual None
    }
    "propagate only the current span id value" in {
      val traceId1 = new SpanId()
      val traceId2 = new SpanId()
      SpanLocal.push(testSpan(traceId1))
      SpanLocal.push(testSpan(traceId2))

      val future = executor.submit(testCallable)
      future.get shouldEqual Some(traceId2)
    }
    "propagate MDC" in {
      val traceId = new SpanId("1", 2L, 3L)
      SpanLocal.push(testSpan(traceId))
      MDC.put("foo", "bar")

      val mdcCallable = new Callable[String] {
        override def call(): String = MDC.get("foo")
      }

      val future = executor.submit(mdcCallable)

      future.get shouldEqual "bar"
      SpanLocal.clear()
    }
  }
  "TraceFriendlyThreadPoolExecutor fixedThreadPool" should {
    val threadPool: TraceFriendlyThreadPoolExecutor = TraceFriendlyThreadPoolExecutor.newFixedThreadPool(1)
      .asInstanceOf[TraceFriendlyThreadPoolExecutor]

    "created the pool with the specified number of threads" in {
      threadPool.getCorePoolSize shouldEqual 1
    }
  }
} 
Example 52
Source File: KeyVaultADALAuthenticator.scala    From azure-kusto-spark   with Apache License 2.0 5 votes vote down vote up
package com.microsoft.kusto.spark.utils

import java.net.MalformedURLException
import java.util.concurrent.{ExecutionException, ExecutorService, Executors, Future}

import com.microsoft.aad.adal4j.{AuthenticationContext, AuthenticationResult, ClientCredential}
import com.microsoft.azure.keyvault.KeyVaultClient
import com.microsoft.azure.keyvault.authentication.KeyVaultCredentials


class KeyVaultADALAuthenticator(clientId: String, clientKey: String) {

  def getAuthenticatedClient: KeyVaultClient = {
    // Creates the KeyVaultClient using the created credentials.
    new KeyVaultClient(createCredentials)
  }

  private def createCredentials: KeyVaultCredentials = {
    new KeyVaultCredentials() { //Callback that supplies the token type and access token on request.
      override def doAuthenticate(authorization: String, resource: String, scope: String): String = {
        try {
          val authResult = getAccessToken(authorization, resource)
          authResult.getAccessToken
        } catch {
          case e: Exception =>
            KustoDataSourceUtils.logError("KeyVaultADALAuthenticator", "Exception trying to access Key Vault:" + e.getMessage)
            ""
        }
      }
    }
  }

  @throws[InterruptedException]
  @throws[ExecutionException]
  @throws[MalformedURLException]
  private def getAccessToken(authorization: String, resource: String): AuthenticationResult  = {
    var result: AuthenticationResult = null
    var service: ExecutorService = null

    //Starts a service to fetch access token.
    try {
      service = Executors.newFixedThreadPool(1)
      val context = new AuthenticationContext(authorization, false, service)

      //Acquires token based on client ID and client secret.
      var future: Future[AuthenticationResult] = null
      if (clientId != null && clientKey != null) {
        val credentials = new ClientCredential(clientId, clientKey)
        future = context.acquireToken(resource, credentials, null)
      }

      result = future.get
    } finally service.shutdown()
    if (result == null) throw new RuntimeException("Authentication results were null.")
    result
  }
} 
Example 53
Source File: OapRuntimeSuite.scala    From OAP   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.oap

import java.util.concurrent.{Executors, ExecutorService, TimeUnit}

import org.apache.spark.sql.QueryTest
import org.apache.spark.sql.test.oap.SharedOapLocalClusterContext

class OapRuntimeSuite extends QueryTest with SharedOapLocalClusterContext {

  test("OapRuntime is created once") {
    val oapruntime = new Array[OapRuntime](2)
    val threadPool: ExecutorService = Executors.newFixedThreadPool(2)
    try {
      for (i <- 0 to 1) {
        threadPool.execute(new Runnable {
          override def run(): Unit = {
            oapruntime(i) = OapRuntime.getOrCreate
          }
        })
      }
      threadPool.awaitTermination(1000, TimeUnit.MILLISECONDS)
    } finally {
      threadPool.shutdown()
    }
    assert(oapruntime(0) == oapruntime(1))
  }

  test("get sparkSession from OapRuntime") {
    assert(OapRuntime.getOrCreate.sparkSession == spark)
  }
} 
Example 54
Source File: RpcServerImpl.scala    From finagle-protobuf   with Apache License 2.0 5 votes vote down vote up
package com.twitter.finagle.protobuf.rpc.impl

import com.twitter.finagle.protobuf.rpc.channel.ProtoBufCodec
import com.twitter.finagle.protobuf.rpc.{RpcServer, Util}
import com.twitter.util._
import com.twitter.util.Duration
import com.twitter.util.FuturePool
import com.twitter.finagle.builder.{Server, ServerBuilder, ServerConfig}
import java.net.InetSocketAddress
import java.util.logging.Logger
import scala.None
import java.util.concurrent.Executors
import java.util.concurrent.ExecutorService
import com.google.common.base.Preconditions
import com.twitter.finagle.protobuf.rpc.ServiceExceptionHandler
import com.google.protobuf.DynamicMessage
import com.google.protobuf.DynamicMessage.Builder
import com.google.protobuf._
import com.google.protobuf.Descriptors._
import com.twitter.util.Promise

class RpcServerImpl(sb: ServerBuilder[(String, Message), (String, Message), Any, Any, Any], port: Int, service: Service, handler: ServiceExceptionHandler[Message], executorService: ExecutorService) extends RpcServer {

  private val log = Logger.getLogger(getClass.toString)

  Preconditions.checkNotNull(executorService)
  Preconditions.checkNotNull(handler)

  private val execFuturePool = new ExecutorServiceFuturePool(executorService)

  private val server: Server = ServerBuilder.safeBuild(ServiceDispatcher(service, handler, execFuturePool),
    sb
      .codec(new ProtoBufCodec(service))
      .name(getClass().getName())
      .bindTo(new InetSocketAddress(port)))

  def close(d: Duration) = {
    server.close(d)
  }
}

class ServiceDispatcher(service: com.google.protobuf.Service, handler: ServiceExceptionHandler[Message], futurePool: FuturePool) extends com.twitter.finagle.Service[(String, Message), (String, Message)] {

  private val log = Logger.getLogger(getClass.toString)

  def apply(request: (String, Message)) = {

    val methodName = request._1
    val reqMessage = request._2

    Util.log("Request", methodName, reqMessage)
    val m = service.getDescriptorForType().findMethodByName(methodName);
    if (m == null) {
      throw new java.lang.AssertionError("Should never happen, we already decoded " + methodName)
    }

    val promise = new Promise[(String, Message)]()

    // dispatch to the service method
    val task = () => {
      try {
        service.callMethod(m, null, reqMessage, new RpcCallback[Message]() {

          def run(msg: Message) = {
            Util.log("Response", methodName, msg)
            promise.setValue((methodName, msg))
          }

        })
      } catch {
        case e: RuntimeException => {
          log.warning("#apply# Exception: "+e.getMessage)
          if (handler.canHandle(e)) {
            promise.setValue((methodName, handler.handle(e, constructEmptyResponseMessage(m))))
          }
        }
      }
    }
    futurePool(task())
    promise
  }

  def constructEmptyResponseMessage(m: MethodDescriptor): Message = {
    val outputType = m.getOutputType();
    DynamicMessage.newBuilder(outputType).build()
  }
}

object ServiceDispatcher {
  def apply(service: com.google.protobuf.Service, handler: ServiceExceptionHandler[Message], futurePool: FuturePool): ServiceDispatcher = {
    new ServiceDispatcher(service, handler, futurePool)
  }
} 
Example 55
Source File: RpcFactoryImpl.scala    From finagle-protobuf   with Apache License 2.0 5 votes vote down vote up
package com.twitter.finagle.protobuf.rpc.impl

import com.twitter.finagle.protobuf.rpc.RpcFactory
import com.twitter.finagle.protobuf.rpc.RpcControllerWithOnFailureCallback
import com.twitter.finagle.protobuf.rpc.RpcServer
import com.twitter.finagle.builder.ServerBuilder
import com.twitter.finagle.builder.ClientBuilder
import com.twitter.util.Duration
import com.google.protobuf.RpcController
import com.google.protobuf.RpcChannel
import com.google.protobuf.Message
import com.google.protobuf.Service
import java.util.concurrent.ExecutorService
import com.twitter.finagle.protobuf.rpc.ServiceExceptionHandler
import com.twitter.finagle.protobuf.rpc.ExceptionResponseHandler

class RpcFactoryImpl extends RpcFactory {

  def createServer(sb: ServerBuilder[(String, Message), (String, Message), Any, Any, Any], port: Int, service: Service, handler: ServiceExceptionHandler[Message], executorService: ExecutorService): RpcServer = new RpcServerImpl(sb, port, service, handler, executorService)

  def createStub[T <: Service](cb: ClientBuilder[(String, Message), (String, Message), Any, Any, Any], service: {def newStub(c: RpcChannel): T}, handler: ExceptionResponseHandler[Message], executorService: ExecutorService): T = {
    service.newStub(new RpcChannelImpl(cb, service.asInstanceOf[T], handler, executorService))
  }

  def createController(): RpcController = {
    new RpcControllerWithOnFailureCallback()
  }

  def release(stub: {def getChannel(): RpcChannel}) {
    stub.getChannel().asInstanceOf[RpcChannelImpl].release()
  }
} 
Example 56
Source File: RpcChannelImpl.scala    From finagle-protobuf   with Apache License 2.0 5 votes vote down vote up
package com.twitter.finagle.protobuf.rpc.impl

import java.net.InetSocketAddress
import com.google.protobuf.Descriptors.MethodDescriptor
import com.google.protobuf.RpcCallback
import com.google.protobuf.Message
import com.google.protobuf.RpcChannel
import com.google.protobuf.RpcController
import com.google.protobuf.Service
import java.util.logging.Logger
import com.twitter.util.Duration
import com.twitter.util.FuturePool
import com.twitter.finagle.builder.ClientBuilder
import java.util.concurrent.ExecutorService
import com.twitter.finagle.protobuf.rpc.RpcControllerWithOnFailureCallback
import com.twitter.finagle.protobuf.rpc.channel.ProtoBufCodec
import com.twitter.finagle.ChannelClosedException
import com.twitter.finagle.protobuf.rpc.Util
import com.twitter.finagle.protobuf.rpc.ExceptionResponseHandler

class RpcChannelImpl(cb: ClientBuilder[(String, Message), (String, Message), Any, Any, Any], s: Service, handler: ExceptionResponseHandler[Message], executorService: ExecutorService) extends RpcChannel {

  private val log = Logger.getLogger(getClass.toString)

  private val futurePool = FuturePool(executorService)

  private val client: com.twitter.finagle.Service[(String, Message), (String, Message)] = cb
    .codec(new ProtoBufCodec(s))
    .unsafeBuild()

  def callMethod(m: MethodDescriptor, controller: RpcController,
                 request: Message, responsePrototype: Message,
                 done: RpcCallback[Message]): Unit = {
    // retries is a workaround for ChannelClosedException raised when servers shut down.
    val retries = 3

    callMethod(m, controller, request, responsePrototype, done, retries)
  }

  def callMethod(m: MethodDescriptor, controller: RpcController,
                 request: Message, responsePrototype: Message,
                 done: RpcCallback[Message], retries: Int): Unit = {

    Util.log("Request", m.getName(), request)
    val req = (m.getName(), request)

    client(req) onSuccess {
      result =>
        Util.log("Response", m.getName(), result._2)
        futurePool({
          handle(done, controller, result._2)
        })
    } onFailure {
      e =>
        log.warning("#callMethod# Failed. "+ e.getMessage)
        e match {
          case cc: ChannelClosedException => if (retries > 1) {
            log.warning("#callMethod# Retrying.")
            callMethod(m, controller, request, responsePrototype, done, retries - 1);
          } else {
            controller.asInstanceOf[RpcControllerWithOnFailureCallback].setFailed(e)
          }
          case _ => controller.asInstanceOf[RpcControllerWithOnFailureCallback].setFailed(e)
        }
    }
  }

  def handle(done: RpcCallback[Message], controller: RpcController, m: Message) {
    if (handler.canHandle(m)) {
      controller.asInstanceOf[RpcControllerWithOnFailureCallback].setFailed(handler.handle(m))
    } else {
      done.run(m)
    }
  }

  def release() {
     client.close()
  }
} 
Example 57
Source File: ExecutorServiceWrapper.scala    From monix-nio   with Apache License 2.0 5 votes vote down vote up
package monix.nio.internal

import java.util
import java.util.concurrent.{ AbstractExecutorService, ExecutorService, TimeUnit }

import monix.execution.schedulers.{ ReferenceScheduler, SchedulerService }
import monix.execution.{ Cancelable, ExecutionModel, Scheduler }

import scala.collection.JavaConverters._
import scala.concurrent.duration.Duration
import scala.concurrent.{ Await, ExecutionContextExecutorService }


  private val currentThread: Scheduler =
    new ReferenceScheduler {
      import monix.execution.Scheduler.global
      def execute(r: Runnable): Unit = r.run()
      def reportFailure(t: Throwable): Unit = throw t
      def scheduleOnce(initialDelay: Long, unit: TimeUnit, r: Runnable): Cancelable =
        global.scheduleOnce(initialDelay, unit, r)
      def executionModel: ExecutionModel =
        ExecutionModel.Default
    }
} 
Example 58
Source File: Compactor.scala    From carbondata   with Apache License 2.0 5 votes vote down vote up
package org.apache.carbondata.spark.rdd

import java.util
import java.util.concurrent.ExecutorService

import scala.collection.JavaConverters._

import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.execution.command.CompactionModel

import org.apache.carbondata.common.logging.{LogService, LogServiceFactory}
import org.apache.carbondata.core.statusmanager.LoadMetadataDetails
import org.apache.carbondata.processing.loading.TableProcessingOperations
import org.apache.carbondata.processing.loading.model.CarbonLoadModel
import org.apache.carbondata.processing.merger.CarbonDataMergerUtil

abstract class Compactor(carbonLoadModel: CarbonLoadModel,
    compactionModel: CompactionModel,
    executor: ExecutorService,
    sqlContext: SQLContext,
    storeLocation: String) {

  val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName)

  def executeCompaction(): Unit

  def identifySegmentsToBeMerged(): java.util.List[LoadMetadataDetails] = {
    val customSegmentIds: util.List[String] = if (compactionModel.customSegmentIds.isDefined) {
      compactionModel.customSegmentIds.get.asJava
    } else {
      new util.ArrayList[String]()
    }
    CarbonDataMergerUtil
      .identifySegmentsToBeMerged(carbonLoadModel,
        compactionModel.compactionSize,
        new util.ArrayList(
          carbonLoadModel.getLoadMetadataDetails.asScala.filter(_.isCarbonFormat).asJava),
        compactionModel.compactionType,
        customSegmentIds)
  }

  def deletePartialLoadsInCompaction(): Unit = {
    // Deleting the any partially loaded data if present.
    // in some case the segment folder which is present in store will not have entry in
    // status.
    // so deleting those folders.
    try {
      TableProcessingOperations
        .deletePartialLoadDataIfExist(carbonLoadModel.getCarbonDataLoadSchema.getCarbonTable, true)
    } catch {
      case e: Exception =>
        LOGGER.error(s"Exception in compaction thread while clean up of stale segments" +
                     s" ${ e.getMessage }")
    }
  }

} 
Example 59
Source File: CompactionFactory.scala    From carbondata   with Apache License 2.0 5 votes vote down vote up
package org.apache.carbondata.spark.rdd

import java.util.concurrent.ExecutorService

import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.execution.command.CompactionModel

import org.apache.carbondata.events.OperationContext
import org.apache.carbondata.processing.loading.model.CarbonLoadModel

object CompactionFactory {

  
  def getCompactor(carbonLoadModel: CarbonLoadModel,
      compactionModel: CompactionModel,
      executor: ExecutorService,
      sqlContext: SQLContext,
      storeLocation: String,
      mergedLoads: java.util.List[String],
      operationContext: OperationContext): Compactor = {
    new CarbonTableCompactor(
      carbonLoadModel,
      compactionModel,
      executor,
      sqlContext,
      storeLocation,
      mergedLoads,
      operationContext)
  }
} 
Example 60
Source File: TestCreateTableIfNotExists.scala    From carbondata   with Apache License 2.0 5 votes vote down vote up
package org.apache.carbondata.spark.testsuite.createTable

import java.util.concurrent.{Callable, Executors, ExecutorService, Future, TimeUnit}

import org.apache.spark.sql.test.util.QueryTest
import org.apache.spark.sql.AnalysisException
import org.scalatest.BeforeAndAfterAll

class TestCreateTableIfNotExists extends QueryTest with BeforeAndAfterAll {

  override def beforeAll {
    sql("use default")
    sql("drop table if exists test")
    sql("drop table if exists sourceTable")
    sql("drop table if exists targetTable")
  }

  test("test create table if not exists") {
    sql("create table test(a int, b string) STORED AS carbondata")
    try {
      // table creation should be successful
      sql("create table if not exists test(a int, b string) STORED AS carbondata")
      assert(true)
    } catch {
      case ex: Exception =>
        assert(false)
    }
  }

  test("test create table if not exist concurrently") {

    val executorService: ExecutorService = Executors.newFixedThreadPool(10)
    var futures: List[Future[_]] = List()
    for (i <- 0 until (3)) {
      futures = futures :+ runAsync()
    }

    executorService.shutdown()
    executorService.awaitTermination(30L, TimeUnit.SECONDS)

    futures.foreach { future =>
      assertResult("PASS")(future.get.toString)
    }

    def runAsync(): Future[String] = {
      executorService.submit(new Callable[String] {
        override def call() = {
          // Create table
          var result = "PASS"
          try {
            sql("create table IF NOT EXISTS TestIfExists(name string) STORED AS carbondata")
          } catch {
            case exception: Exception =>
              result = exception.getMessage
              exception.printStackTrace()
          }
          result
        }
      })
    }
  }

  test("test create table without column specified") {
    val exception = intercept[AnalysisException] {
      sql("create table TableWithoutColumn STORED AS carbondata tblproperties('sort_columns'='')")
    }
    assert(exception.getMessage.contains("Unable to infer the schema"))
  }

  override def afterAll {
    sql("use default")
    sql("drop table if exists test")
    sql("drop table if exists sourceTable")
    sql("drop table if exists targetTable")
    sql("drop table if exists TestIfExists")
  }

} 
Example 61
Source File: package.scala    From ionroller   with MIT License 5 votes vote down vote up
import java.util.concurrent.{ExecutorService, Executors, ScheduledExecutorService}

import com.amazonaws.services.elasticbeanstalk.model.ConfigurationOptionSetting
import com.typesafe.scalalogging.StrictLogging
import ionroller.aws.Dynamo
import ionroller.tracking.Event
import play.api.libs.functional.syntax._
import play.api.libs.json._

import scala.concurrent.duration.FiniteDuration
import scalaz.concurrent.Task
import scalaz.{-\/, \/-}

package object ionroller extends StrictLogging {
  val ionrollerExecutorService: ExecutorService = Executors.newFixedThreadPool(4)

  implicit val `| Implicit executor service        |`: ExecutorService = ionrollerExecutorService
  implicit val ` | is disabled - define explicitly  |`: ExecutorService = ionrollerExecutorService

  implicit val timer: ScheduledExecutorService = scalaz.concurrent.Strategy.DefaultTimeoutScheduler

  def ionrollerRole(awsAccountId: String) = s"arn:aws:iam::$awsAccountId:role/ionroller"

  implicit lazy val finiteDurationFormat = {

    def applyFiniteDuration(l: Long, u: String): FiniteDuration = {
      FiniteDuration(l, u.toLowerCase)
    }

    def unapplyFiniteDuration(d: FiniteDuration): (Long, String) = {
      (d.length, d.unit.toString)
    }

    ((JsPath \ "length").format[Long] and
      (JsPath \ "unit").format[String])(applyFiniteDuration, unapplyFiniteDuration)
  }

  implicit lazy val configurationOptionSettingFormat: Format[ConfigurationOptionSetting] = {
    def applyConfigOptionSetting(ns: String, optionName: String, value: String) =
      new ConfigurationOptionSetting(ns, optionName, value)

    def unapplyConfigOptionSetting(o: ConfigurationOptionSetting): Option[(String, String, String)] = {
      for {
        ns <- Option(o.getNamespace)
        n <- Option(o.getOptionName)
        v <- Option(o.getValue)
      } yield (ns, n, v)
    }

    ((JsPath \ "Namespace").format[String] and
      (JsPath \ "OptionName").format[String] and
      (JsPath \ "Value").format[String])(applyConfigOptionSetting _, unlift(unapplyConfigOptionSetting))
  }

  def enabled(name: TimelineName) = {
    ConfigurationManager.modifyEnvironments &&
      (ConfigurationManager.modifyEnvironmentsWhitelist.isEmpty || ConfigurationManager.modifyEnvironmentsWhitelist.contains(name)) &&
      !ConfigurationManager.modifyEnvironmentsBlacklist.contains(name)
  }

  def logEvent(evt: Event) = {
    logger.info(s"$evt (enabled = ${enabled(evt.service)})")
    if (enabled(evt.service))
      Dynamo.EventLogger.log(evt)
        .flatMap({
          case \/-(s) => Task.now(())
          case -\/(f) => Task.delay(logger.error(f.getMessage, f))
        })
    else Task.now(())
  }

}