scala.concurrent.blocking Scala Examples
The following examples show how to use scala.concurrent.blocking.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: ScalajHttpClient.scala From telegram with Apache License 2.0 | 6 votes |
package com.bot4s.telegram.clients import java.net.Proxy import java.nio.file.Files import cats.instances.future._ import com.bot4s.telegram.api.RequestHandler import com.bot4s.telegram.methods.{Request, JsonRequest, MultipartRequest, Response} import com.bot4s.telegram.models.InputFile import com.bot4s.telegram.marshalling import io.circe.parser.parse import io.circe.{Decoder, Encoder} import scalaj.http.{Http, MultiPart} import slogging.StrictLogging import scala.concurrent.{ExecutionContext, Future, blocking} class ScalajHttpClient(token: String, proxy: Proxy = Proxy.NO_PROXY, telegramHost: String = "api.telegram.org") (implicit ec: ExecutionContext) extends RequestHandler[Future] with StrictLogging { val connectionTimeoutMs = 10000 val readTimeoutMs = 50000 private val apiBaseUrl = s"https://$telegramHost/bot$token/" def sendRequest[R, T <: Request[_]](request: T)(implicit encT: Encoder[T], decR: Decoder[R]): Future[R] = { val url = apiBaseUrl + request.methodName val scalajRequest = request match { case r: JsonRequest[_] => Http(url) .postData(marshalling.toJson(request)) .header("Content-Type", "application/json") case r: MultipartRequest[_] => // InputFile.FileIds are encoded as query params. val (fileIds, files) = r.getFiles.partition { case (key, _: InputFile.FileId) => true case _ => false } val parts = files.map { case (camelKey, inputFile) => val key = marshalling.snakenize(camelKey) inputFile match { case InputFile.FileId(id) => throw new RuntimeException("InputFile.FileId cannot must be encoded as a query param") case InputFile.Contents(filename, contents) => MultiPart(key, filename, "application/octet-stream", contents) case InputFile.Path(path) => MultiPart(key, path.getFileName.toString(), "application/octet-stream", Files.newInputStream(path), Files.size(path), _ => ()) case other => throw new RuntimeException(s"InputFile $other not supported") } } val fields = parse(marshalling.toJson(request)).fold(throw _, _.asObject.map { _.toMap.mapValues { json => json.asString.getOrElse(marshalling.printer.pretty(json)) } }) val fileIdsParams = fileIds.map { case (key, inputFile: InputFile.FileId) => marshalling.snakenize(key) -> inputFile.fileId } val params = fields.getOrElse(Map()) Http(url).params(params ++ fileIdsParams).postMulti(parts: _*) } import marshalling.responseDecoder Future { blocking { scalajRequest .timeout(connectionTimeoutMs, readTimeoutMs) .proxy(proxy) .asString } } map { x => if (x.isSuccess) marshalling.fromJson[Response[R]](x.body) else throw new RuntimeException(s"Error ${x.code} on request") } map (processApiResponse[R]) } }
Example 2
Source File: AliasSwitching.scala From elastic-indexer4s with MIT License | 5 votes |
package com.yannick_cw.elastic_indexer4s.elasticsearch.index_ops import cats.data.EitherT import cats.implicits._ import com.yannick_cw.elastic_indexer4s.Index_results.{IndexError, StageSucceeded} import scala.concurrent.{ExecutionContext, Future, blocking} import scala.util.control.NonFatal class AliasSwitching(esClient: EsOpsClientApi, waitForElastic: Long, minThreshold: Double, maxThreshold: Double)( implicit ec: ExecutionContext) { import esClient._ def switchAlias(alias: String, newIndexName: String): Future[Either[IndexError, StageSucceeded]] = trySwitching(alias, newIndexName) .recover { case NonFatal(ex) => Left(IndexError("Could not switch alias.", Some(ex))) } private def trySwitching(alias: String, newIndexName: String): Future[Either[IndexError, StageSucceeded]] = (for { _ <- EitherT.liftF[Future, IndexError, Unit](Future(blocking(Thread.sleep(waitForElastic)))) oldSize <- latestIndexWithAliasSize(alias) newSize <- sizeFor(newIndexName) optSwitchRes <- oldSize .traverse(oldIndexSize => switchAliasBetweenIndices(oldIndexSize, newSize, alias, newIndexName)) switchRes <- optSwitchRes match { case None => addAliasToIndex(newIndexName, alias) .map(_ => NewAliasCreated(s"Added alias $alias to index $newIndexName"): StageSucceeded) case Some(x) => EitherT.pure[Future, IndexError](x) } } yield switchRes).value private def switchAliasBetweenIndices(oldSize: Long, newSize: Long, alias: String, newIndexName: String): OpsResult[StageSucceeded] = { val percentage = newSize / oldSize.toDouble if (checkThreshold(percentage)) switchAliasToIndex(alias, newIndexName) .map(_ => AliasSwitched(s"Switched alias, new index size is ${(percentage * 100).toInt}% of old index")) else EitherT.leftT( IndexError( s"Switching failed, new index size is ${(percentage * 100).toInt}% of old index,\n" + s" $oldSize documents in old index with alias $alias, $newSize documents in new index $newIndexName.\n\n" + s"If you think the size of the new index is not correct, try to increase the `waitForElasticTimeout` property in the config." + s"This run spent ${waitForElastic / 1000} seconds waiting")) } private def checkThreshold(percentage: Double): Boolean = minThreshold < percentage && percentage <= maxThreshold } object AliasSwitching { def apply(esClient: EsOpsClientApi, minThreshold: Double, maxThreshold: Double, waitForElastic: Long)( implicit ec: ExecutionContext): AliasSwitching = new AliasSwitching(esClient, waitForElastic, minThreshold, maxThreshold) } case class AliasSwitched(override val msg: String) extends StageSucceeded case class NewAliasCreated(override val msg: String) extends StageSucceeded
Example 3
Source File: InMemoryState.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.on.memory import java.util.concurrent.locks.StampedLock import com.daml.ledger.on.memory.InMemoryState._ import com.daml.ledger.participant.state.kvutils.Bytes import com.daml.ledger.participant.state.kvutils.api.LedgerRecord import com.daml.ledger.participant.state.v1.Offset import com.google.protobuf.ByteString import scala.collection.mutable import scala.concurrent.{ExecutionContext, Future, blocking} private[memory] class InMemoryState private (log: MutableLog, state: MutableState) { private val lockCurrentState = new StampedLock() @volatile private var lastLogEntryIndex = 0 def readLog[A](action: ImmutableLog => A): A = action(log) // `log` is mutable, but the interface is immutable def newHeadSinceLastWrite(): Int = lastLogEntryIndex def write[A](action: (MutableLog, MutableState) => Future[A])( implicit executionContext: ExecutionContext ): Future[A] = for { stamp <- Future { blocking { lockCurrentState.writeLock() } } result <- action(log, state) .andThen { case _ => lastLogEntryIndex = log.size - 1 lockCurrentState.unlock(stamp) } } yield result } object InMemoryState { type ImmutableLog = IndexedSeq[LedgerRecord] type ImmutableState = collection.Map[StateKey, StateValue] type MutableLog = mutable.Buffer[LedgerRecord] with ImmutableLog type MutableState = mutable.Map[StateKey, StateValue] with ImmutableState type StateKey = Bytes type StateValue = Bytes // The first element will never be read because begin offsets are exclusive. private val Beginning = LedgerRecord(Offset.beforeBegin, ByteString.EMPTY, ByteString.EMPTY) def empty = new InMemoryState( log = mutable.ArrayBuffer(Beginning), state = mutable.Map.empty, ) }
Example 4
Source File: TrampolineECTests.scala From cats-effect with Apache License 2.0 | 5 votes |
package cats.effect package internals import org.scalatest.matchers.should.Matchers import org.scalatest.funsuite.AnyFunSuite import cats.effect.internals.TrampolineEC.immediate import scala.concurrent.ExecutionContext import cats.effect.internals.IOPlatform.isJVM import scala.collection.immutable.Queue class TrampolineECTests extends AnyFunSuite with Matchers with TestUtils { implicit val ec: ExecutionContext = immediate def executeImmediate(f: => Unit): Unit = ec.execute(new Runnable { def run(): Unit = f }) test("execution should be immediate") { var effect = 0 executeImmediate { effect += 1 executeImmediate { effect += 2 executeImmediate { effect += 3 } } } effect shouldEqual 1 + 2 + 3 } test("concurrent execution") { var effect = List.empty[Int] executeImmediate { executeImmediate { effect = 1 :: effect } executeImmediate { effect = 2 :: effect } executeImmediate { effect = 3 :: effect } } effect shouldEqual List(1, 2, 3) } test("stack safety") { var effect = 0 def loop(n: Int, acc: Int): Unit = executeImmediate { if (n > 0) loop(n - 1, acc + 1) else effect = acc } val n = if (isJVM) 100000 else 5000 loop(n, 0) effect shouldEqual n } test("on blocking it should fork") { assume(isJVM, "test relevant only for the JVM") import scala.concurrent.blocking var effects = Queue.empty[Int] executeImmediate { executeImmediate { effects = effects.enqueue(4) } executeImmediate { effects = effects.enqueue(4) } effects = effects.enqueue(1) blocking { effects = effects.enqueue(2) } effects = effects.enqueue(3) } effects shouldBe Queue(1, 4, 4, 2, 3) } test("thrown exceptions should get logged to System.err (immediate)") { val dummy1 = new RuntimeException("dummy1") val dummy2 = new RuntimeException("dummy2") var effects = 0 val output = catchSystemErr { executeImmediate { executeImmediate(effects += 1) executeImmediate(effects += 1) executeImmediate { executeImmediate(effects += 1) executeImmediate(effects += 1) throw dummy2 } throw dummy1 } } output should include("dummy1") output should include("dummy2") effects shouldBe 4 } }
Example 5
Source File: ComponentController.scala From CM-Well with Apache License 2.0 | 5 votes |
package cmwell.ctrl.controllers import akka.actor.ActorSelection import cmwell.ctrl.controllers.CassandraController._ import cmwell.ctrl.config.Config import cmwell.ctrl.utils.ProcUtil import com.typesafe.scalalogging.LazyLogging import k.grid.Grid import scala.concurrent.{blocking, Future} import scala.util.{Failure, Success} import scala.concurrent.duration._ import scala.concurrent.ExecutionContext.Implicits.global abstract class ComponentController(startScriptLocation: String, psIdentifier: String, dirIdentifier: Set[String]) { object ComponentControllerLogger extends LazyLogging { lazy val l = logger } protected val startScriptPattern: String = "start[0-9]*.sh" def getStartScriptLocation = startScriptLocation def getStartScripts(location: String): Set[String] = { ProcUtil.executeCommand(s"ls -1 $location/ | grep $startScriptPattern") match { case Success(str) => str.trim.split("\n").toSet case Failure(err) => Set.empty[String] } } def getDataDirs(location: String, id: String): Set[String] = { ProcUtil.executeCommand(s"ls -1 $location | grep $id[0-9]*") match { case Success(str) => str.trim.split("\n").toSet case Failure(err) => Set.empty[String] } } private def doStart: Unit = { getStartScripts(startScriptLocation).foreach { sScript => val runScript = s"HAL=9000 $startScriptLocation/$sScript" ProcUtil.executeCommand(runScript) } } def start { blocking { Future { doStart } } } private def doStop(forceKill: Boolean = false, tries: Int = 5): Unit = { val cmd = s"ps aux | grep $psIdentifier | egrep -v 'grep|starter' | awk '{print $$2}' | xargs kill ${if (forceKill) "-9" else ""}" ComponentControllerLogger.l.info(s"executing $cmd") ProcUtil.executeCommand(cmd) val isDead = ProcUtil.executeCommand(s"ps aux | grep $psIdentifier | egrep -v 'grep|starter' | awk '{print $$2}'").get.isEmpty if (!isDead) { if (tries > 1) doStop(false, tries - 1) else doStop(true, tries - 1) } } def stop { Future { blocking { doStop() } } } def restart: Unit = { Future { blocking { doStop() doStart } } } def clearData { Future { blocking { dirIdentifier.foreach { id => getDataDirs(s"${Config.cmwellHome}/data/", id).foreach { dir => ProcUtil.executeCommand(s"rm -rf ${Config.cmwellHome}/data/$dir/") } } } } } }
Example 6
Source File: Gateway.scala From reactive-microservices with MIT License | 5 votes |
import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.client.RequestBuilding import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._ import akka.http.scaladsl.model.StatusCodes._ import akka.http.scaladsl.model.{HttpRequest, HttpResponse} import akka.http.scaladsl.unmarshalling.Unmarshal import akka.stream.FlowMaterializer import akka.stream.scaladsl.{Sink, Source} import com.restfb.DefaultFacebookClient import com.restfb.types.User import java.io.IOException import scala.concurrent.{blocking, ExecutionContext, Future} import scala.util.Try case class InternalLoginRequest(identityId: Long, authMethod: String = "fb") case class InternalReloginRequest(tokenValue: String, authMethod: String = "fb") class Gateway(implicit actorSystem: ActorSystem, materializer: FlowMaterializer, ec: ExecutionContext) extends JsonProtocols with Config { private val identityManagerConnectionFlow = Http().outgoingConnection(identityManagerHost, identityManagerPort) private val tokenManagerConnectionFlow = Http().outgoingConnection(tokenManagerHost, tokenManagerPort) private def requestIdentityManager(request: HttpRequest): Future[HttpResponse] = { Source.single(request).via(identityManagerConnectionFlow).runWith(Sink.head) } private def requestTokenManager(request: HttpRequest): Future[HttpResponse] = { Source.single(request).via(tokenManagerConnectionFlow).runWith(Sink.head) } def requestToken(tokenValue: String): Future[Either[String, Token]] = { requestTokenManager(RequestBuilding.Get(s"/tokens/$tokenValue")).flatMap { response => response.status match { case Success(_) => Unmarshal(response.entity).to[Token].map(Right(_)) case NotFound => Future.successful(Left("Token expired or not found")) case _ => Future.failed(new IOException(s"Token request failed with status ${response.status} and error ${response.entity}")) } } } def requestNewIdentity(): Future[Identity] = { requestIdentityManager(RequestBuilding.Post("/identities")).flatMap { response => response.status match { case Success(_) => Unmarshal(response.entity).to[Identity] case _ => Future.failed(new IOException(s"Identity request failed with status ${response.status} and error ${response.entity}")) } } } def requestLogin(identityId: Long): Future[Token] = { val loginRequest = InternalLoginRequest(identityId) requestTokenManager(RequestBuilding.Post("/tokens", loginRequest)).flatMap { response => response.status match { case Success(_) => Unmarshal(response.entity).to[Token] case _ => Future.failed(new IOException(s"Login request failed with status ${response.status} and error ${response.entity}")) } } } def requestRelogin(tokenValue: String): Future[Option[Token]] = { requestTokenManager(RequestBuilding.Patch("/tokens", InternalReloginRequest(tokenValue))).flatMap { response => response.status match { case Success(_) => Unmarshal(response.entity).to[Token].map(Option(_)) case NotFound => Future.successful(None) case _ => Future.failed(new IOException(s"Relogin request failed with status ${response.status} and error ${response.entity}")) } } } def getFbUserDetails(accessToken: String): Try[User] = { Try { blocking { val client = new DefaultFacebookClient(accessToken) client.fetchObject("me", classOf[User]) } } } }
Example 7
Source File: Repository.scala From reactive-microservices with MIT License | 5 votes |
import scala.concurrent.blocking import scala.slick.driver.PostgresDriver.simple._ import scala.slick.jdbc.meta.MTable import scala.slick.lifted.{ProvenShape, Tag} case class EmailAddress(address: String) extends MappedTo[String] { override val value: String = address require(EmailAddress.isValid(address), "Invalid email address format") } object EmailAddress { def isValid(email: String): Boolean = EmailRegex.pattern.matcher(email.toUpperCase).matches() private val EmailRegex = """\b[a-zA-Z0-9.!#$%&’*+/=?^_`{|}~-]+@[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*\b""".r } case class AuthEntry(id: Option[Long], identityId: Long, createdAt: Long, email: EmailAddress, password: String) class AuthEntries(tag: Tag) extends Table[AuthEntry](tag, "auth_entry") { def id = column[Long]("id", O.PrimaryKey, O.AutoInc) def identityId = column[Long]("identity_id", O.NotNull) def createdAt = column[Long]("created_at", O.NotNull) def email = column[EmailAddress]("email", O.NotNull) def password = column[String]("password", O.NotNull) override def * : ProvenShape[AuthEntry] = (id.?, identityId, createdAt, email, password) <> (AuthEntry.tupled, AuthEntry.unapply) } class Repository extends Config { def createAuthEntry(entry: AuthEntry) = { blocking { db.withSession { implicit session => authEntries.insert(entry).run } } } def updateAuthEntry(entry: AuthEntry) = { blocking { db.withSession { implicit session => authEntries.filter(_.id === entry.id.get).update(entry) } } } def findAuthEntry(email: EmailAddress): Option[AuthEntry] = { blocking { db.withSession { implicit session => byEmailCompiled(email).firstOption } } } private def byEmailQuery(email: Column[EmailAddress]) = authEntries.filter(_.email === email) private val byEmailCompiled = Compiled(byEmailQuery _) private val authEntries = TableQuery[AuthEntries] private val db = Database.forURL(url = dbUrl, user = dbUser, password = dbPassword, driver = "org.postgresql.Driver") }
Example 8
Source File: IdentityManager.scala From reactive-microservices with MIT License | 5 votes |
import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._ import akka.http.scaladsl.model.StatusCodes._ import akka.http.scaladsl.server.Directives._ import akka.stream.ActorFlowMaterializer import com.typesafe.config.ConfigFactory import scala.concurrent.blocking import scala.slick.driver.PostgresDriver.simple._ import scala.slick.lifted.{ProvenShape, Tag} import spray.json.DefaultJsonProtocol case class Identity(id: Option[Long], createdAt: Long) class Identities(tag: Tag) extends Table[Identity](tag, "identity") { def id = column[Long]("id", O.PrimaryKey, O.AutoInc) def createdAt = column[Long]("created_at", O.NotNull) override def * : ProvenShape[Identity] = (id.?, createdAt) <> ((Identity.apply _).tupled, Identity.unapply) } object IdentityManager extends App with DefaultJsonProtocol { val config = ConfigFactory.load() val interface = config.getString("http.interface") val port = config.getInt("http.port") val dbUrl = config.getString("db.url") val dbUser = config.getString("db.user") val dbPassword = config.getString("db.password") implicit val actorSystem = ActorSystem() implicit val materializer = ActorFlowMaterializer() implicit val dispatcher = actorSystem.dispatcher implicit val identityFormat = jsonFormat2(Identity.apply) val db = Database.forURL(url = dbUrl, user = dbUser, password = dbPassword, driver = "org.postgresql.Driver") val identities = TableQuery[Identities] def getAllIdentities(): List[Identity] = { blocking { db.withSession { implicit s => identities.list } } } def saveIdentity(identity: Identity): Identity = { blocking { db.withSession { implicit s => identities returning identities.map(_.id) into ((_, id) => identity.copy(id = Option(id))) += identity } } } Http().bindAndHandle(interface = interface, port = port, handler = { logRequestResult("identity-manager") { path("identities") { pathEndOrSingleSlash { post { complete { val newIdentity = Identity(id = None, createdAt = System.currentTimeMillis()) Created -> saveIdentity(newIdentity) } } ~ get { complete { getAllIdentities() } } } } } }) }
Example 9
Source File: RESTHelpers.scala From mmlspark with MIT License | 5 votes |
// Copyright (C) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See LICENSE in project root for information. package com.microsoft.ml.spark.cognitive import org.apache.commons.io.IOUtils import org.apache.http.client.config.RequestConfig import org.apache.http.client.methods._ import org.apache.http.impl.client.{CloseableHttpClient, HttpClientBuilder} import org.apache.http.impl.conn.PoolingHttpClientConnectionManager import scala.concurrent.blocking import scala.util.Try object RESTHelpers { lazy val RequestTimeout = 60000 lazy val RequestConfigVal: RequestConfig = RequestConfig.custom() .setConnectTimeout(RequestTimeout) .setConnectionRequestTimeout(RequestTimeout) .setSocketTimeout(RequestTimeout) .build() lazy val ConnectionManager = { val cm = new PoolingHttpClientConnectionManager() cm.setDefaultMaxPerRoute(Int.MaxValue) cm.setMaxTotal(Int.MaxValue) cm } lazy val Client: CloseableHttpClient = HttpClientBuilder .create().setConnectionManager(ConnectionManager) .setDefaultRequestConfig(RequestConfigVal).build() def retry[T](backoffs: List[Int], f: () => T): T = { try { f() } catch { case t: Throwable => val waitTime = backoffs.headOption.getOrElse(throw t) println(s"Caught error: $t with message ${t.getMessage}, waiting for $waitTime") blocking {Thread.sleep(waitTime.toLong)} retry(backoffs.tail, f) } } //TODO use this elsewhere def safeSend(request: HttpRequestBase, backoffs: List[Int] = List(100, 500, 1000), expectedCodes: Set[Int] = Set(), close: Boolean = true): CloseableHttpResponse = { retry(List(100, 500, 1000), { () => val response = Client.execute(request) try { if (response.getStatusLine.getStatusCode.toString.startsWith("2") || expectedCodes(response.getStatusLine.getStatusCode) ) { response } else { val requestBodyOpt = Try(request match { case er: HttpEntityEnclosingRequestBase => IOUtils.toString(er.getEntity.getContent) case _ => "" }).get val responseBodyOpt = Try(IOUtils.toString(response.getEntity.getContent)).getOrElse("") throw new RuntimeException( s"Failed: " + s"\n\t response: $response " + s"\n\t requestUrl: ${request.getURI}" + s"\n\t requestBody: $requestBodyOpt" + s"\n\t responseBody: $responseBodyOpt") } } catch { case e: Exception => response.close() throw e } finally { if (close) { response.close() } } }) } }
Example 10
Source File: FileMonitor.scala From better-files with MIT License | 5 votes |
package better.files import java.nio.file._ import scala.concurrent.{blocking, ExecutionContext} import scala.util.Try import scala.util.control.NonFatal protected[this] def reactTo(target: File) = root.isDirectory || root.isSamePathAs(target) protected[this] def process(key: WatchKey) = { val path = key.watchable().asInstanceOf[Path] import scala.collection.JavaConverters._ key.pollEvents().asScala foreach { case event: WatchEvent[Path] @unchecked if (event.context() != null) => val target: File = path.resolve(event.context()) if (reactTo(target)) { if (event.kind() == StandardWatchEventKinds.ENTRY_CREATE) { val depth = root.relativize(target).getNameCount watch(target, (maxDepth - depth) max 0) // auto-watch new files in a directory } onEvent(event.kind(), target, event.count()) } case event => if (reactTo(path)) onUnknownEvent(event) } key.reset() } protected[this] def watch(file: File, depth: Int): Unit = { def toWatch: Iterator[File] = if (file.isDirectory) { file.walk(depth).filter(f => f.isDirectory && f.exists) } else { when(file.exists)(file.parent).iterator // There is no way to watch a regular file; so watch its parent instead } try { toWatch.foreach(f => Try[Unit](f.register(service)).recover { case e => onException(e) }.get) } catch { case NonFatal(e) => onException(e) } } override def start()(implicit executionContext: ExecutionContext) = { watch(root, maxDepth) executionContext.execute(new Runnable { override def run() = blocking { Iterator.continually(service.take()).foreach(process) } }) } override def close() = service.close() // Although this class is abstract, we provide noop implementations so user can choose to implement a subset of these override def onCreate(file: File, count: Int) = {} override def onModify(file: File, count: Int) = {} override def onDelete(file: File, count: Int) = {} override def onUnknownEvent(event: WatchEvent[_]) = {} override def onException(exception: Throwable) = {} }
Example 11
Source File: NomadHttpClientIntegrationSpec.scala From cluster-broccoli with Apache License 2.0 | 5 votes |
package de.frosner.broccoli.nomad import cats.instances.future._ import com.netaporter.uri.Uri import com.netaporter.uri.dsl._ import de.frosner.broccoli.nomad.models.{Allocation, Job, WithId} import de.frosner.broccoli.test.contexts.WSClientContext import de.frosner.broccoli.test.contexts.docker.BroccoliDockerContext import de.frosner.broccoli.test.contexts.docker.BroccoliTestService.{Broccoli, Nomad} import org.scalacheck.Gen import org.specs2.concurrent.ExecutionEnv import org.specs2.mutable.Specification import org.specs2.specification.mutable.ExecutionEnvironment import play.api.libs.json.Json import play.api.libs.ws.WSClient import scala.collection.immutable import scala.concurrent.blocking import scala.concurrent.duration._ class NomadHttpClientIntegrationSpec extends Specification with WSClientContext with BroccoliDockerContext with ExecutionEnvironment { override def broccoliDockerConfig: BroccoliDockerContext.Configuration = BroccoliDockerContext.Configuration.services(Broccoli, Nomad) private val broccoliApi = "http://localhost:9000/api/v1" override def is(implicit executionEnv: ExecutionEnv): Any = "The NomadHttpClient" should { "get allocations for a running nomad job" >> { wsClient: WSClient => // Generate a random identifier for the instance val identifier = Gen.resize(10, Gen.identifier).sample.get val client = new NomadHttpClient(Uri.parse("http://localhost:4646"), wsClient) (for { // Create and start a simple instance to look at it's allocations _ <- wsClient .url(broccoliApi / "instances") .post( Json.obj("templateId" -> "http-server", "parameters" -> Json.obj( "id" -> identifier ))) .map(response => { // Ensure that the response.status must beEqualTo(201) response }) _ <- wsClient .url(broccoliApi / "instances" / identifier) .post(Json.obj("status" -> "running")) .map(response => { response.status must beEqualTo(200) // Wait until the service is up blocking(Thread.sleep(1.seconds.toMillis)) response }) allocations <- client.getAllocationsForJob(shapeless.tag[Job.Id](identifier)).value } yield { allocations must beRight( (v: WithId[immutable.Seq[Allocation]]) => (v.jobId === identifier) and (v.payload must have length 1)) }).await(5, broccoliDockerConfig.startupPatience + 2.seconds) } } }
Example 12
Source File: PosixPluginFrontend.scala From protoc-bridge with Apache License 2.0 | 5 votes |
package protocbridge.frontend import java.nio.file.{Files, Path} import protocbridge.ProtocCodeGenerator import java.nio.file.attribute.PosixFilePermission import scala.concurrent.blocking import scala.concurrent.Future import scala.concurrent.ExecutionContext.Implicits.global import scala.sys.process._ import java.{util => ju} object PosixPluginFrontend extends PluginFrontend { case class InternalState( inputPipe: Path, outputPipe: Path, tempDir: Path, shellScript: Path ) override def prepare(plugin: ProtocCodeGenerator): (Path, InternalState) = { val tempDirPath = Files.createTempDirectory("protopipe-") val inputPipe = createPipe(tempDirPath, "input") val outputPipe = createPipe(tempDirPath, "output") val sh = createShellScript(inputPipe, outputPipe) Future { blocking { val fsin = Files.newInputStream(inputPipe) val response = PluginFrontend.runWithInputStream(plugin, fsin) fsin.close() val fsout = Files.newOutputStream(outputPipe) fsout.write(response) fsout.close() } } (sh, InternalState(inputPipe, outputPipe, tempDirPath, sh)) } override def cleanup(state: InternalState): Unit = { Files.delete(state.inputPipe) Files.delete(state.outputPipe) Files.delete(state.tempDir) Files.delete(state.shellScript) } private def createPipe(tempDirPath: Path, name: String): Path = { val pipeName = tempDirPath.resolve(name) Seq("mkfifo", "-m", "600", pipeName.toAbsolutePath.toString).!! pipeName } private def createShellScript(inputPipe: Path, outputPipe: Path): Path = { val scriptName = PluginFrontend.createTempFile("", s"""|#!/usr/bin/env sh |set -e |cat /dev/stdin > "$inputPipe" |cat "$outputPipe" """.stripMargin) val perms = new ju.HashSet[PosixFilePermission] perms.add(PosixFilePermission.OWNER_EXECUTE) perms.add(PosixFilePermission.OWNER_READ) Files.setPosixFilePermissions( scriptName, perms ) scriptName } }
Example 13
Source File: WindowsPluginFrontend.scala From protoc-bridge with Apache License 2.0 | 5 votes |
package protocbridge.frontend import java.net.ServerSocket import java.nio.file.{Files, Path, Paths} import protocbridge.ProtocCodeGenerator import scala.concurrent.blocking import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.Future object WindowsPluginFrontend extends PluginFrontend { case class InternalState(batFile: Path) override def prepare(plugin: ProtocCodeGenerator): (Path, InternalState) = { val ss = new ServerSocket(0) val state = createWindowsScript(ss.getLocalPort) Future { blocking { val client = ss.accept() val response = PluginFrontend.runWithInputStream(plugin, client.getInputStream) client.getOutputStream.write(response) client.close() ss.close() } } (state.batFile, state) } override def cleanup(state: InternalState): Unit = { Files.delete(state.batFile) } private def createWindowsScript(port: Int): InternalState = { val classPath = Paths.get(getClass.getProtectionDomain.getCodeSource.getLocation.toURI) val classPathBatchString = classPath.toString.replace("%", "%%") val batchFile = PluginFrontend.createTempFile( ".bat", s"""@echo off |"${sys .props( "java.home" )}\\bin\\java.exe" -cp "$classPathBatchString" ${classOf[ BridgeApp ].getName} $port """.stripMargin ) InternalState(batchFile) } }
Example 14
Source File: package.scala From milan with Apache License 2.0 | 5 votes |
package com.amazon.milan.compiler.flink import java.time.Duration import java.util.concurrent.TimeoutException import com.amazon.milan.testing.Concurrent import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment import scala.concurrent.{Await, ExecutionContext, Future, blocking} import scala.language.implicitConversions package object testing { implicit def extendStreamExecutionEnvironment(env: StreamExecutionEnvironment): StreamExecutionEnvironmentExtensions = new StreamExecutionEnvironmentExtensions(env) implicit def extendFuture[T](future: Future[T]): FutureExtensions[T] = new FutureExtensions[T](future) implicit class DurationExtensions(d: Duration) { def toConcurrent: scala.concurrent.duration.Duration = scala.concurrent.duration.Duration(this.d.toMillis, scala.concurrent.duration.MILLISECONDS) } } class StreamExecutionEnvironmentExtensions(env: StreamExecutionEnvironment) { def executeThenWaitFor(predicate: () => Boolean, secondsToWait: Int): Unit = { if (!Concurrent.executeAndWait( () => env.execute(), predicate, Duration.ofSeconds(secondsToWait))) { throw new TimeoutException("Timed out waiting for stop condition.") } } def executeAsync(maxSeconds: Int): Future[Boolean] = { Concurrent.executeAsync(() => env.execute(), () => true, Duration.ofSeconds(maxSeconds)) } def executeUntilAsync(predicate: () => Boolean, secondsToWait: Int): Future[Unit] = { val result = Concurrent.executeAsync( () => env.execute(), predicate, Duration.ofSeconds(secondsToWait)) result.transform( success => if (!success) { throw new TimeoutException("Timed out waiting for stop condition.") }, ex => throw ex)(ExecutionContext.global) } def executeAtMost(maxSeconds: Int): Unit = { if (!Concurrent.executeUntil( () => env.execute(), () => true, Duration.ofSeconds(maxSeconds))) { throw new TimeoutException("Timed out waiting for stop condition.") } } } class FutureExtensions[T](future: Future[T]) { def thenWaitFor(duration: Duration)(implicit context: ExecutionContext): Future[T] = { Future { blocking { val result = Await.result(this.future, scala.concurrent.duration.Duration.Inf) Thread.sleep(duration.toMillis) result } } } }
Example 15
Source File: DockerClientWithFileAccess.scala From openwhisk with Apache License 2.0 | 5 votes |
package org.apache.openwhisk.core.containerpool.docker import java.io.File import java.nio.file.Paths import akka.actor.ActorSystem import akka.stream.alpakka.file.scaladsl.FileTailSource import akka.stream.scaladsl.{FileIO, Source => AkkaSource} import akka.util.ByteString import scala.concurrent.ExecutionContext import scala.concurrent.Future import scala.concurrent.blocking import spray.json.DefaultJsonProtocol._ import spray.json._ import org.apache.openwhisk.common.Logging import org.apache.openwhisk.common.TransactionId import org.apache.openwhisk.core.containerpool.ContainerId import org.apache.openwhisk.core.containerpool.ContainerAddress import scala.io.Source import scala.concurrent.duration.FiniteDuration class DockerClientWithFileAccess(dockerHost: Option[String] = None, containersDirectory: File = Paths.get("containers").toFile)( executionContext: ExecutionContext)(implicit log: Logging, as: ActorSystem) extends DockerClient(dockerHost)(executionContext) with DockerApiWithFileAccess { implicit private val ec = executionContext def rawContainerLogs(containerId: ContainerId, fromPos: Long, pollInterval: Option[FiniteDuration]): AkkaSource[ByteString, Any] }
Example 16
Source File: KafkaProducerConnector.scala From openwhisk with Apache License 2.0 | 5 votes |
package org.apache.openwhisk.connector.kafka import akka.actor.ActorSystem import akka.pattern.after import org.apache.kafka.clients.producer._ import org.apache.kafka.common.errors._ import org.apache.kafka.common.serialization.StringSerializer import pureconfig._ import pureconfig.generic.auto._ import org.apache.openwhisk.common.{Counter, Logging, TransactionId} import org.apache.openwhisk.connector.kafka.KafkaConfiguration._ import org.apache.openwhisk.core.ConfigKeys import org.apache.openwhisk.core.connector.{Message, MessageProducer} import org.apache.openwhisk.core.entity.{ByteSize, UUIDs} import org.apache.openwhisk.utils.Exceptions import scala.collection.JavaConverters._ import scala.concurrent.duration._ import scala.concurrent.{blocking, ExecutionContext, Future, Promise} import scala.util.{Failure, Success} class KafkaProducerConnector( kafkahosts: String, id: String = UUIDs.randomUUID().toString, maxRequestSize: Option[ByteSize] = None)(implicit logging: Logging, actorSystem: ActorSystem) extends MessageProducer with Exceptions { implicit val ec: ExecutionContext = actorSystem.dispatcher private val gracefulWaitTime = 100.milliseconds override def sentCount(): Long = sentCounter.cur override def close(): Unit = { logging.info(this, "closing producer") producer.close() } private val sentCounter = new Counter() private def createProducer(): KafkaProducer[String, String] = { val config = Map(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG -> kafkahosts) ++ configMapToKafkaConfig(loadConfigOrThrow[Map[String, String]](ConfigKeys.kafkaCommon)) ++ configMapToKafkaConfig(loadConfigOrThrow[Map[String, String]](ConfigKeys.kafkaProducer)) ++ (maxRequestSize map { max => Map("max.request.size" -> max.size.toString) } getOrElse Map.empty) verifyConfig(config, ProducerConfig.configNames().asScala.toSet) tryAndThrow("creating producer")(new KafkaProducer(config, new StringSerializer, new StringSerializer)) } private def recreateProducer(): Unit = { logging.info(this, s"recreating producer") tryAndSwallow("closing old producer")(producer.close()) logging.info(this, s"old producer closed") producer = createProducer() } @volatile private var producer = createProducer() }
Example 17
Source File: AmazonS3Extensions.scala From gfc-aws-s3 with Apache License 2.0 | 5 votes |
package com.gilt.gfc.aws.s3.akka import java.util.Date import com.amazonaws.services.s3.AmazonS3 import com.amazonaws.services.s3.model.{S3Object, S3ObjectSummary} import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.Future object AmazonS3Extensions { implicit class S3Extensions(val amazonS3: AmazonS3) extends AnyVal { import scala.concurrent.blocking def mostRecentObject(bucketName: String, prefix: String): Future[Option[S3Object]] = { Future { mostRecentObjectSummary(bucketName, prefix) }.map { objectSummaryOpt => objectSummaryOpt.map { summary => val key = summary.getKey amazonS3.getObject(bucketName, key) } } } private def mostRecentObjectSummary(bucketName: String, prefix: String): Option[S3ObjectSummary] = { import scala.collection.JavaConversions._ blocking { amazonS3.listObjects(bucketName, prefix).getObjectSummaries.toList }.sortBy(_.getLastModified)(Ordering[Date].reverse).headOption } } }
Example 18
Source File: EmbeddedCassandra.scala From phantom-activator-template with Apache License 2.0 | 5 votes |
package controllers import java.io.File import java.util.concurrent.atomic.AtomicBoolean import org.cassandraunit.utils.EmbeddedCassandraServerHelper import org.slf4j.Logger import scala.concurrent.blocking import scala.util.control.NonFatal import scala.util.{Failure, Success, Try} def start(logger: Logger, config: Option[File] = None, timeout: Option[Int] = None): Unit = { this.synchronized { if (started.compareAndSet(false, true)) { blocking { val configFile = config.map(_.toURI.toString) getOrElse EmbeddedCassandraServerHelper.DEFAULT_CASSANDRA_YML_FILE System.setProperty("cassandra.config", configFile) Try { EmbeddedCassandraServerHelper.mkdirs() } match { case Success(value) => logger.info("Successfully created directories for embedded Cassandra.") case Failure(NonFatal(e)) => logger.error(s"Error creating Embedded cassandra directories: ${e.getMessage}") } (config, timeout) match { case (Some(file), None) => logger.info(s"Starting Cassandra in embedded mode with configuration from $file.") EmbeddedCassandraServerHelper.startEmbeddedCassandra( file, EmbeddedCassandraServerHelper.DEFAULT_TMP_DIR, EmbeddedCassandraServerHelper.DEFAULT_STARTUP_TIMEOUT ) case (Some(file), Some(time)) => logger.info(s"Starting Cassandra in embedded mode with configuration from $file and timeout set to $timeout ms.") EmbeddedCassandraServerHelper.startEmbeddedCassandra( file, EmbeddedCassandraServerHelper.DEFAULT_TMP_DIR, time ) case (None, Some(time)) => logger.info(s"Starting Cassandra in embedded mode with default configuration and timeout set to $timeout ms.") EmbeddedCassandraServerHelper.startEmbeddedCassandra(time) case (None, None) => logger.info("Starting Cassandra in embedded mode with default configuration.") EmbeddedCassandraServerHelper.startEmbeddedCassandra() logger.info("Successfully started embedded Cassandra") } } } else { logger.info("Embedded Cassandra has already been started") } } } def cleanup(logger: Logger): Unit = { this.synchronized { if (started.compareAndSet(true, false)) { logger.info("Cleaning up embedded Cassandra") EmbeddedCassandraServerHelper.cleanEmbeddedCassandra() } else { logger.info("Cassandra is not running, not cleaning up") } } } }
Example 19
Source File: BlockingIO.scala From gbf-raidfinder with MIT License | 5 votes |
package walfie.gbf.raidfinder.util import java.util.concurrent.atomic.AtomicLong import java.util.concurrent.{Executors, ThreadFactory} import scala.concurrent.{ExecutionContext, ExecutionContextExecutor, Future, Promise, blocking} import scala.util.control.NonFatal import monix.execution.Scheduler // https://github.com/alexandru/scala-best-practices/blob/master/sections/4-concurrency-parallelism.md object BlockingIO { private val ioThreadPool = Scheduler.io(name = "io-thread") def future[T](t: => T): Future[T] = { val p = Promise[T]() val runnable = new Runnable { def run() = try { p.success(blocking(t)) } catch { case NonFatal(ex) => p.failure(ex) } } ioThreadPool.execute(runnable) p.future } }