scala.util.Success Scala Examples
The following examples show how to use scala.util.Success.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: DateTimeTools.scala From pertax-frontend with Apache License 2.0 | 9 votes |
package util import com.google.inject.{Inject, Singleton} import org.joda.time.format.{DateTimeFormat, DateTimeFormatter} import org.joda.time.{DateTime, _} import play.api.Logger import uk.gov.hmrc.time.CurrentTaxYear import scala.util.{Failure, Success, Try} import java.time.{LocalDateTime => JavaLDT} object DateTimeTools extends CurrentTaxYear { //Timezone causing problem on dev server val defaultTZ = DateTimeZone.forID("Europe/London") val unixDateFormat = "yyyy-MM-dd" val unixDateTimeFormat = "yyyy-MM-dd'T'HH:mm:ss" val humanDateFormat = "dd MMMMM yyyy" //Returns for example 1516 in March 2016 def previousAndCurrentTaxYear = previousAndCurrentTaxYearFromGivenYear(current.currentYear) def previousAndCurrentTaxYearFromGivenYear(year: Int) = { def y = year (y - 1).toString.takeRight(2) + (y).toString.takeRight(2) } private def formatter(pattern: String): DateTimeFormatter = DateTimeFormat.forPattern(pattern).withZone(defaultTZ) def short(dateTime: LocalDate) = formatter("dd/MM/yyy").print(dateTime) def asHumanDateFromUnixDate(unixDate: String): String = Try(DateTimeFormat.forPattern(humanDateFormat).print(DateTime.parse(unixDate))) match { case Success(v) => v case Failure(e) => { Logger.warn("Invalid date parse in DateTimeTools.asHumanDateFromUnixDate: " + e) unixDate } } def toPaymentDate(dateTime: JavaLDT): LocalDate = new LocalDate(dateTime.getYear, dateTime.getMonthValue, dateTime.getDayOfMonth) override def now: () => DateTime = DateTime.now } @Singleton class DateTimeTools @Inject()() { def showSendTaxReturnByPost = { val start = new DateTime(s"${DateTime.now().getYear}-11-01T00:00:00Z") val end = new DateTime(s"${DateTime.now().getYear + 1}-01-31T23:59:59Z") !DateTime.now().isAfter(start) && DateTime.now().isBefore(end) } }
Example 2
Source File: AbstractWebServer.scala From ohara with Apache License 2.0 | 6 votes |
package oharastream.ohara.shabondi.common import akka.Done import akka.actor.ActorSystem import akka.event.Logging import akka.http.scaladsl.Http import akka.http.scaladsl.Http.ServerBinding import akka.http.scaladsl.server.{Directives, Route} import akka.http.scaladsl.settings.ServerSettings import oharastream.ohara.common.util.Releasable import scala.concurrent._ import scala.concurrent.duration.Duration import scala.io.StdIn import scala.util.{Failure, Success} private[shabondi] abstract class AbstractWebServer extends Directives with Releasable { implicit protected val actorSystem: ActorSystem = ActorSystem(Logging.simpleName(this).replaceAll("\\$", "")) protected def routes: Route protected def postBinding(binding: ServerBinding): Unit = { val hostname = binding.localAddress.getHostName val port = binding.localAddress.getPort actorSystem.log.info(s"Server online at http://$hostname:$port/") } protected def postBindingFailure(cause: Throwable): Unit = { actorSystem.log.error(cause, s"Error starting the server ${cause.getMessage}") } protected def waitForShutdownSignal()(implicit ec: ExecutionContext): Future[Done] = { val promise = Promise[Done]() sys.addShutdownHook { promise.trySuccess(Done) } Future { blocking { if (StdIn.readLine("Press <RETURN> to stop Shabondi WebServer...\n") != null) promise.trySuccess(Done) } } promise.future } protected def postServerShutdown(): Unit = actorSystem.log.info("Shutting down the server") def start(bindInterface: String, port: Int): Unit = { start(bindInterface, port, ServerSettings(actorSystem)) } def start(bindInterface: String, port: Int, settings: ServerSettings): Unit = { implicit val executionContext: ExecutionContextExecutor = actorSystem.dispatcher val bindingFuture: Future[Http.ServerBinding] = Http().bindAndHandle( handler = routes, interface = bindInterface, port = port, settings = settings ) bindingFuture.onComplete { case Success(binding) => postBinding(binding) case Failure(cause) => postBindingFailure(cause) } Await.ready( bindingFuture.flatMap(_ => waitForShutdownSignal()), Duration.Inf ) bindingFuture .flatMap(_.unbind()) .onComplete { _ => postServerShutdown() actorSystem.terminate() } } override def close(): Unit = actorSystem.terminate() }
Example 3
Source File: ComponentsFixture.scala From daml with Apache License 2.0 | 6 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.navigator.test import java.util.concurrent.atomic.AtomicReference import com.daml.navigator.test.config.Arguments import com.daml.navigator.test.runner.{HeadNavigator, PackagedDamlc, PackagedSandbox} import com.typesafe.scalalogging.LazyLogging import scala.io.Source import scala.util.{Failure, Success, Try} class ComponentsFixture( val args: Arguments, val navigatorPort: Int, val sandboxPort: Int, val scenario: String ) extends LazyLogging { // A list of commands on how to destroy started processes private val killProcs: AtomicReference[List[Unit => Unit]] = new AtomicReference(List.empty) private val onlineUrl = s"http://localhost:$navigatorPort/api/about" private def get( url: String, connectTimeout: Int = 1000, readTimeout: Int = 1000, requestMethod: String = "GET" ): String = { import java.net.{URL, HttpURLConnection} val connection = (new URL(url)).openConnection.asInstanceOf[HttpURLConnection] connection.setConnectTimeout(connectTimeout) connection.setReadTimeout(readTimeout) connection.setRequestMethod(requestMethod) val inputStream = connection.getInputStream val content = Source.fromInputStream(inputStream).mkString if (inputStream != null) inputStream.close() content } def startup(): Try[Unit] = { if (args.startComponents) { logger.info("Starting the sandbox and the Navigator") for { (darFile, tempFiles) <- Try(PackagedDamlc.run(args.damlPath)) sandbox <- Try(PackagedSandbox.runAsync(sandboxPort, darFile, scenario)) _ = killProcs.updateAndGet(s => sandbox :: s) navigator <- Try( HeadNavigator.runAsync(args.navConfPAth, args.navigatorDir, navigatorPort, sandboxPort)) _ = killProcs.updateAndGet(s => navigator :: s) } yield { () } } else { Success(()) } } private def retry[R](action: => R, maxRetries: Int, delayMillis: Int): Try[R] = { def retry0(count: Int): Try[R] = { Try(action) match { case Success(r) => Success(r) case Failure(e) => if (count > maxRetries) { logger.error( s"Navigator is not available after $maxRetries retries with $delayMillis millis interval.") Failure(e) } else { logger.info(s"Navigator is not available yet, waiting $delayMillis millis ") Thread.sleep(delayMillis.toLong) retry0(count + 1) } } } retry0(0) } def waitForNavigator(): Try[Unit] = { logger.info(s"Waiting for the Navigator to start up (waiting for $onlineUrl)") retry({ get(onlineUrl); () }, 120, 1000) } def shutdown(): Unit = { killProcs.getAndUpdate(procs => { procs.foreach(killAction => Try { killAction(()) }) List.empty }) () } }
Example 4
Source File: AuthorizationInterceptor.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.api.auth.interceptor import com.daml.ledger.api.auth.{AuthService, Claims} import com.daml.platform.server.api.validation.ErrorFactories.unauthenticated import io.grpc.{ Context, Contexts, Metadata, ServerCall, ServerCallHandler, ServerInterceptor, Status } import org.slf4j.{Logger, LoggerFactory} import scala.compat.java8.FutureConverters import scala.concurrent.ExecutionContext import scala.util.{Failure, Success, Try} final class AuthorizationInterceptor(protected val authService: AuthService, ec: ExecutionContext) extends ServerInterceptor { private val logger: Logger = LoggerFactory.getLogger(AuthorizationInterceptor.getClass) private val internalAuthenticationError = Status.INTERNAL.withDescription("Failed to get claims from request metadata") import AuthorizationInterceptor.contextKeyClaim override def interceptCall[ReqT, RespT]( call: ServerCall[ReqT, RespT], headers: Metadata, nextListener: ServerCallHandler[ReqT, RespT]): ServerCall.Listener[ReqT] = { // Note: Context uses ThreadLocal storage, we need to capture it outside of the async block below. // Contexts are immutable and safe to pass around. val prevCtx = Context.current // The method interceptCall() must return a Listener. // The target listener is created by calling `Contexts.interceptCall()`. // However, this is only done after we have asynchronously received the claims. // Therefore, we need to return a listener that buffers all messages until the target listener is available. new AsyncForwardingListener[ReqT] { FutureConverters .toScala(authService.decodeMetadata(headers)) .onComplete { case Failure(exception) => logger.warn(s"Failed to get claims from request metadata: ${exception.getMessage}") call.close(internalAuthenticationError, new Metadata()) new ServerCall.Listener[Nothing]() {} case Success(Claims.empty) => logger.debug(s"Auth metadata decoded into empty claims, returning UNAUTHENTICATED") call.close(Status.UNAUTHENTICATED, new Metadata()) new ServerCall.Listener[Nothing]() {} case Success(claims) => val nextCtx = prevCtx.withValue(contextKeyClaim, claims) // Contexts.interceptCall() creates a listener that wraps all methods of `nextListener` // such that `Context.current` returns `nextCtx`. val nextListenerWithContext = Contexts.interceptCall(nextCtx, call, headers, nextListener) setNextListener(nextListenerWithContext) nextListenerWithContext }(ec) } } } object AuthorizationInterceptor { private val contextKeyClaim = Context.key[Claims]("AuthServiceDecodedClaim") def extractClaimsFromContext(): Try[Claims] = Option(contextKeyClaim.get()).fold[Try[Claims]](Failure(unauthenticated()))(Success(_)) def apply(authService: AuthService, ec: ExecutionContext): AuthorizationInterceptor = new AuthorizationInterceptor(authService, ec) }
Example 5
Source File: InfiniteRetries.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.sandbox.perf import akka.actor.ActorSystem import scala.concurrent.duration.{DurationInt, FiniteDuration} import scala.concurrent.{ExecutionContext, Future, Promise} import scala.util.{Failure, Success} trait InfiniteRetries { protected def retry[T](action: => Future[T], delay: FiniteDuration = 10.millis)( implicit system: ActorSystem): Future[T] = { implicit val ec: ExecutionContext = system.dispatcher action.transformWith { case Success(v) => Future.successful(v) case Failure(t) => val p = Promise[T]() system.scheduler.scheduleOnce( delay, () => retry[T](action, delay).onComplete { case Success(s) => p.success(s) case Failure(throwable) => p.failure(throwable) } ) p.future } } }
Example 6
Source File: CommandSubmissionFlow.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.client.services.commands import akka.NotUsed import akka.stream.scaladsl.Flow import com.daml.dec.DirectExecutionContext import com.daml.ledger.api.v1.command_submission_service.SubmitRequest import com.daml.util.Ctx import com.google.protobuf.empty.Empty import scala.concurrent.Future import scala.util.{Success, Try} object CommandSubmissionFlow { def apply[Context]( submit: SubmitRequest => Future[Empty], maxInFlight: Int): Flow[Ctx[Context, SubmitRequest], Ctx[Context, Try[Empty]], NotUsed] = { Flow[Ctx[Context, SubmitRequest]] .log("submission at client", _.value.commands.fold("")(_.commandId)) .mapAsyncUnordered(maxInFlight) { case Ctx(context, request) => submit(request) .transform { tryResponse => Success( Ctx( context, tryResponse )) }(DirectExecutionContext) } } }
Example 7
Source File: ApiOffset.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform import com.daml.ledger.participant.state.v1.Offset import com.daml.lf.data.Ref import scala.util.{Failure, Success, Try} // This utility object is used as a single point to encode and decode // offsets sent over the API and received from the API. object ApiOffset { def fromString(s: String): Try[Offset] = Ref.HexString .fromString(s) .fold( err => Failure(new IllegalArgumentException(err)), b => Success(Offset.fromHexString(b)) ) def assertFromString(s: String): Offset = fromString(s).get def toApiString(offset: Offset): Ref.LedgerString = offset.toHexString implicit class ApiOffsetConverter(val offset: Offset) { def toApiString: Ref.LedgerString = ApiOffset.toApiString(offset) } }
Example 8
Source File: TrackerMap.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.apiserver.services.tracking import java.util.concurrent.atomic.AtomicReference import com.daml.dec.DirectExecutionContext import com.daml.ledger.api.v1.command_service.SubmitAndWaitRequest import com.daml.ledger.api.v1.completion.Completion import com.daml.logging.{ContextualizedLogger, LoggingContext} import org.slf4j.LoggerFactory import scala.collection.immutable.HashMap import scala.concurrent.duration.{FiniteDuration, _} import scala.concurrent.{ExecutionContext, Future} import scala.util.{Failure, Success} final class AsyncResource[T <: AutoCloseable](future: Future[T]) { private val logger = LoggerFactory.getLogger(this.getClass) // Must progress Waiting => Ready => Closed or Waiting => Closed. val state: AtomicReference[AsyncResourceState[T]] = new AtomicReference(Waiting) future.andThen({ case Success(t) => if (!state.compareAndSet(Waiting, Ready(t))) { // This is the punch line of AsyncResource. // If we've been closed in the meantime, we must close the underlying resource also. // This "on-failure-to-complete" behavior is not present in scala or java Futures. t.close() } // Someone should be listening to this failure downstream // TODO(mthvedt): Refactor so at least one downstream listener is always present, // and exceptions are never dropped. case Failure(ex) => logger.error("failure to get async resource", ex) state.set(Closed) })(DirectExecutionContext) def flatMap[U](f: T => Future[U])(implicit ex: ExecutionContext): Future[U] = { state.get() match { case Waiting => future.flatMap(f) case Closed => throw new IllegalStateException() case Ready(t) => f(t) } } def map[U](f: T => U)(implicit ex: ExecutionContext): Future[U] = flatMap(t => Future.successful(f(t))) def ifPresent[U](f: T => U): Option[U] = state.get() match { case Ready(t) => Some(f(t)) case _ => None } def close(): Unit = state.getAndSet(Closed) match { case Ready(t) => t.close() case _ => } } def apply(retentionPeriod: FiniteDuration)(implicit logCtx: LoggingContext): TrackerMap = new TrackerMap(retentionPeriod) }
Example 9
Source File: HandleOfferResult.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.apiserver.services.tracking import akka.stream.QueueOfferResult import com.daml.platform.server.api.ApiException import com.google.rpc.status.Status import io.grpc.{Status => GrpcStatus} import scala.concurrent.Promise import scala.util.{Failure, Success, Try} private[tracking] object HandleOfferResult { val toGrpcStatus: PartialFunction[Try[QueueOfferResult], Option[GrpcStatus]] = { case Failure(t) => t match { case i: IllegalStateException if i.getMessage == "You have to wait for previous offer to be resolved to send another request" => Some( GrpcStatus.RESOURCE_EXHAUSTED .withDescription("Ingress buffer is full")) case _ => Some( GrpcStatus.ABORTED .withDescription(s"Failure: ${t.getClass.getSimpleName}: ${t.getMessage}") .withCause(t)) } case Success(QueueOfferResult.Failure(t)) => Some( GrpcStatus.ABORTED .withDescription(s"Failed to enqueue: ${t.getClass.getSimpleName}: ${t.getMessage}") .withCause(t)) case Success(QueueOfferResult.Dropped) => Some( GrpcStatus.RESOURCE_EXHAUSTED .withDescription("Ingress buffer is full")) case Success(QueueOfferResult.QueueClosed) => Some(GrpcStatus.ABORTED.withDescription("Queue closed")) case Success(QueueOfferResult.Enqueued) => None // Promise will be completed downstream. } def toStatusMessage: PartialFunction[Try[QueueOfferResult], Status] = toGrpcStatus.andThen(_.fold(Status())(e => Status(e.getCode.value(), e.getDescription))) def completePromise(promise: Promise[_]): PartialFunction[Try[QueueOfferResult], Unit] = toGrpcStatus.andThen(_.foreach(s => promise.tryFailure(new ApiException(s)))) }
Example 10
Source File: SandboxBackend.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.sandbox import java.util.UUID import com.daml.platform.sandbox.services.DbInfo import com.daml.platform.store.DbType import com.daml.resources.ResourceOwner import com.daml.testing.postgresql.PostgresResource import scala.util.Success object SandboxBackend { trait Postgresql { this: AbstractSandboxFixture => override protected final def database: Option[ResourceOwner[DbInfo]] = Some(PostgresResource.owner().map(database => DbInfo(database.url, DbType.Postgres))) } trait H2Database { this: AbstractSandboxFixture => private def randomDatabaseName = UUID.randomUUID().toString private[this] def jdbcUrl = s"jdbc:h2:mem:$randomDatabaseName;db_close_delay=-1" override protected final def database: Option[ResourceOwner[DbInfo]] = Some(ResourceOwner.forTry(() => Success(DbInfo(jdbcUrl, DbType.H2Database)))) } }
Example 11
Source File: GrpcHealthService.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.server.api.services.grpc import akka.NotUsed import akka.stream.Materializer import akka.stream.scaladsl.Source import com.daml.grpc.adapter.ExecutionSequencerFactory import com.daml.dec.DirectExecutionContext import com.daml.ledger.api.health.HealthChecks import com.daml.platform.api.grpc.GrpcApiService import com.daml.platform.server.api.DropRepeated import com.daml.platform.server.api.services.grpc.GrpcHealthService._ import io.grpc.health.v1.health.{ HealthAkkaGrpc, HealthCheckRequest, HealthCheckResponse, HealthGrpc } import io.grpc.{ServerServiceDefinition, Status, StatusException} import scala.concurrent.duration.{DurationInt, FiniteDuration} import scala.concurrent.{ExecutionContext, Future} import scala.util.{Failure, Success, Try} class GrpcHealthService( healthChecks: HealthChecks, maximumWatchFrequency: FiniteDuration = 1.second, )( implicit protected val esf: ExecutionSequencerFactory, protected val mat: Materializer, executionContext: ExecutionContext, ) extends HealthAkkaGrpc with GrpcApiService { override def bindService(): ServerServiceDefinition = HealthGrpc.bindService(this, DirectExecutionContext) override def check(request: HealthCheckRequest): Future[HealthCheckResponse] = Future.fromTry(matchResponse(serviceFrom(request))) override def watchSource(request: HealthCheckRequest): Source[HealthCheckResponse, NotUsed] = Source .fromIterator(() => Iterator.continually(matchResponse(serviceFrom(request)).get)) .throttle(1, per = maximumWatchFrequency) .via(DropRepeated()) private def matchResponse(componentName: Option[String]): Try[HealthCheckResponse] = if (!componentName.forall(healthChecks.hasComponent)) Failure(new StatusException(Status.NOT_FOUND)) else if (healthChecks.isHealthy(componentName)) Success(servingResponse) else Success(notServingResponse) } object GrpcHealthService { private[grpc] val servingResponse = HealthCheckResponse(HealthCheckResponse.ServingStatus.SERVING) private[grpc] val notServingResponse = HealthCheckResponse(HealthCheckResponse.ServingStatus.NOT_SERVING) private def serviceFrom(request: HealthCheckRequest): Option[String] = { Option(request.service).filter(_.nonEmpty) } }
Example 12
Source File: BatchedValidatingCommitter.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.validator import java.time.Instant import akka.stream.Materializer import com.daml.caching.Cache import com.daml.ledger.participant.state.kvutils.Bytes import com.daml.ledger.participant.state.kvutils.DamlKvutils.{DamlStateKey, DamlStateValue} import com.daml.ledger.participant.state.v1.{ParticipantId, SubmissionResult} import com.daml.ledger.validator.batch.{ BatchedSubmissionValidator, BatchedSubmissionValidatorFactory } import com.daml.ledger.validator.caching.{CacheUpdatePolicy, ImmutablesOnlyCacheUpdatePolicy} import scala.concurrent.{ExecutionContext, Future} import scala.util.{Failure, Success} class BatchedValidatingCommitter[LogResult]( now: () => Instant, keySerializationStrategy: StateKeySerializationStrategy, validator: BatchedSubmissionValidator[LogResult], stateValueCache: Cache[DamlStateKey, DamlStateValue], cacheUpdatePolicy: CacheUpdatePolicy )(implicit materializer: Materializer) { def commit( correlationId: String, envelope: Bytes, submittingParticipantId: ParticipantId, ledgerStateOperations: LedgerStateOperations[LogResult] )(implicit executionContext: ExecutionContext): Future[SubmissionResult] = { val (ledgerStateReader, commitStrategy) = readerAndCommitStrategyFrom(ledgerStateOperations) validator .validateAndCommit( envelope, correlationId, now(), submittingParticipantId, ledgerStateReader, commitStrategy ) .transformWith { case Success(_) => Future.successful(SubmissionResult.Acknowledged) case Failure(exception) => Future.successful(SubmissionResult.InternalError(exception.getLocalizedMessage)) } } private def readerAndCommitStrategyFrom(ledgerStateOperations: LedgerStateOperations[LogResult])( implicit executionContext: ExecutionContext) : (DamlLedgerStateReader, CommitStrategy[LogResult]) = if (stateValueCache == Cache.none) { BatchedSubmissionValidatorFactory .readerAndCommitStrategyFrom(ledgerStateOperations, keySerializationStrategy) } else { BatchedSubmissionValidatorFactory .cachingReaderAndCommitStrategyFrom( ledgerStateOperations, stateValueCache, cacheUpdatePolicy, keySerializationStrategy) } } object BatchedValidatingCommitter { def apply[LogResult](now: () => Instant, validator: BatchedSubmissionValidator[LogResult])( implicit materializer: Materializer): BatchedValidatingCommitter[LogResult] = new BatchedValidatingCommitter[LogResult]( now, DefaultStateKeySerializationStrategy, validator, Cache.none, ImmutablesOnlyCacheUpdatePolicy) def apply[LogResult]( now: () => Instant, validator: BatchedSubmissionValidator[LogResult], stateValueCache: Cache[DamlStateKey, DamlStateValue])( implicit materializer: Materializer): BatchedValidatingCommitter[LogResult] = new BatchedValidatingCommitter[LogResult]( now, DefaultStateKeySerializationStrategy, validator, stateValueCache, ImmutablesOnlyCacheUpdatePolicy) }
Example 13
Source File: Main.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.jwt import java.io.File import scala.util.{Failure, Success} object Main { object ErrorCodes { val InvalidUsage = 100 val GenerateKeysError = 101 val GenerateJwtError = 102 } final case class Config( generateKeys: Option[GenerateKeys] = None, generateJwt: Option[GenerateJwt] = None) final case class GenerateKeys(name: Option[String] = None) final case class GenerateJwt(publicKey: Option[File] = None, privateKey: Option[File] = None) def main(args: Array[String]): Unit = { parseConfig(args) match { case Some(Config(Some(GenerateKeys(Some(name))), None)) => RsaKeysGenerator.generate(keyPair(name)) match { case Success(a) => print(s"Generated keys: ${a: domain.KeyPair[File]}") case Failure(e) => e.printStackTrace() sys.exit(ErrorCodes.GenerateKeysError) } case Some(Config(None, Some(GenerateJwt(Some(publicKey), Some(privateKey))))) => JwtGenerator.generate( domain.KeyPair(publicKey = Seq.empty[Byte], privateKey = Seq.empty[Byte])) match { case Success(a) => println(s"Generated JWT: $a") case Failure(e) => e.printStackTrace() sys.exit(ErrorCodes.GenerateJwtError) } case Some(_) => configParser.showUsage() sys.exit(ErrorCodes.InvalidUsage) case None => // error is printed out by scopt... yeah I know... why? sys.exit(ErrorCodes.InvalidUsage) } } private def keyPair(name: String) = domain.KeyPair( publicKey = new File(s"./$name.pub").getAbsoluteFile, privateKey = new File(s"./$name.pvt").getAbsoluteFile) private def parseConfig(args: Seq[String]): Option[Config] = { configParser.parse(args, Config()) } private val configParser = new scopt.OptionParser[Config]("ledger-service-jwt") { cmd("generate-keys") .text("generate public and private keys") .action((_, c) => c.copy(generateKeys = Some(GenerateKeys()))) .children( opt[String]("name") .required() .valueName("<keys name>") .action((x, c) => c.copy(generateKeys = c.generateKeys.map(_.copy(name = Some(x))))) ) cmd("generate-jwt") .text("generate JWT") .action((_, c) => c.copy(generateJwt = Some(GenerateJwt()))) .children( opt[File]("public-key") .required() .valueName("<public key file path>") .action((x, c) => c.copy(generateJwt = c.generateJwt.map(_.copy(publicKey = Some(x))))), opt[File]("private-key") .required() .valueName("<private key file path>") .action((x, c) => c.copy(generateJwt = c.generateJwt.map(_.copy(privateKey = Some(x))))) ) } }
Example 14
Source File: RsaKeysGenerator.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.jwt import java.io.{File, FileNotFoundException, FileOutputStream} import com.daml.lf.data.TryOps.Bracket.bracket import scalaz.std.option._ import scalaz.syntax.applicative._ import scala.util.{Failure, Success, Try} object RsaKeysGenerator { private val keySize: Int = 2048 def generate(destination: domain.KeyPair[File]): Try[domain.KeyPair[File]] = for { keyPair <- generate_(): Try[domain.KeyPair[Array[Byte]]] publicKeyFile <- writeKey(keyPair.publicKey, destination.publicKey) privateKeyFile <- writeKey(keyPair.privateKey, destination.privateKey) } yield domain.KeyPair(publicKey = publicKeyFile, privateKey = privateKeyFile) def generate(): Try[domain.KeyPair[Seq[Byte]]] = generate_().map(k => k.map(as => as.toSeq)) private def generate_(): Try[domain.KeyPair[Array[Byte]]] = Try { val kpg = java.security.KeyPairGenerator.getInstance("RSA") kpg.initialize(keySize) Option(kpg.generateKeyPair()).flatMap(domainKeyPair) } flatMap { case Some(x) => Success(x) case None => Failure(new IllegalStateException("Cannot generate RSA key pair, null returned")) } private def domainKeyPair(k: java.security.KeyPair): Option[domain.KeyPair[Array[Byte]]] = ^(Option(k.getPublic), Option(k.getPrivate)) { (pub, pvt) => domain.KeyPair(publicKey = pub.getEncoded, privateKey = pvt.getEncoded) } private def writeKey(key: Array[Byte], file: File): Try[File] = bracket(Try(new FileOutputStream(file)))(close).flatMap { ostream => for { encoder <- Try(java.util.Base64.getEncoder) _ <- Try(ostream.write(encoder.encode(key))) _ <- exists(file) } yield file } private def close(a: FileOutputStream): Try[Unit] = Try(a.close()) private def exists(f: File): Try[File] = for { b <- Try(f.exists()) x <- if (b) Success(f) else Failure(new FileNotFoundException(f.getAbsolutePath)) } yield x }
Example 15
Source File: DarManifestReader.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.lf package archive import java.io.InputStream import java.util.jar.{Attributes, Manifest} import scala.util.{Failure, Success, Try} object DarManifestReader { private val supportedFormat = "daml-lf" def dalfNames(is: InputStream): Try[Dar[String]] = { val manifest = new Manifest(is) val attributes = value(manifest.getMainAttributes) _ for { mainDalf <- attributes("Main-Dalf") allDalfs <- attributes("Dalfs") format <- attributes("Format") _ <- checkFormat(format) } yield Dar(mainDalf, dependencies(allDalfs, mainDalf)) } private def dependencies(other: String, main: String): List[String] = { val deps = other.split(',').view.map(_.trim) deps.filter(x => x != main).toList } private def value(attributes: Attributes)(key: String): Try[String] = Option(attributes.getValue(key)) match { case None => failure(s"Cannot find attribute: $key") case Some(x) => Success(x.trim) } private def checkFormat(format: String): Try[Unit] = if (format == supportedFormat) Success(()) else failure(s"Unsupported format: $format") private def failure(msg: String) = Failure(DarManifestReaderException(msg)) case class DarManifestReaderException(msg: String) extends IllegalStateException(msg) }
Example 16
Source File: UniversalArchiveReader.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.lf package archive import java.io._ import java.util.zip.ZipInputStream import com.daml.lf.data.Ref import com.daml.lf.language.LanguageMajorVersion import com.daml.daml_lf_dev.DamlLf import scala.util.{Failure, Success, Try} import com.daml.lf.data.TryOps.Bracket.bracket object UniversalArchiveReaderWithVersion { def apply() : UniversalArchiveReader[((Ref.PackageId, DamlLf.ArchivePayload), LanguageMajorVersion)] = UniversalArchiveReader(parseDalf) private def parseDalf(is: InputStream) = Try(Reader.readArchiveAndVersion(is)) } object SupportedFileType { def supportedFileType(f: File): Try[SupportedFileType] = if (DarFile.matchesFileExtension(f)) Success(DarFile) else if (DalfFile.matchesFileExtension(f)) Success(DalfFile) else Failure(UnsupportedFileExtension(f)) sealed abstract class SupportedFileType(fileExtension: String) extends Serializable with Product { def matchesFileExtension(f: File): Boolean = f.getName.endsWith(fileExtension) } final case object DarFile extends SupportedFileType(".dar") final case object DalfFile extends SupportedFileType(".dalf") case class UnsupportedFileExtension(file: File) extends RuntimeException(s"Unsupported file extension: ${file.getAbsolutePath}") }
Example 17
Source File: DarManifestReaderTest.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.lf.archive import java.io.{ByteArrayInputStream, InputStream} import java.nio.charset.Charset import com.daml.lf.archive.DarManifestReader.DarManifestReaderException import org.scalatest.{Inside, Matchers, WordSpec} import scala.util.{Failure, Success} class DarManifestReaderTest extends WordSpec with Matchers with Inside { private val unicode = Charset.forName("UTF-8") "should read dalf names from manifest, real scenario with Dalfs line split" in { val manifest = """Manifest-Version: 1.0 |Created-By: Digital Asset packager (DAML-GHC) |Main-Dalf: com.daml.lf.archive:DarReaderTest:0.1.dalf |Dalfs: com.daml.lf.archive:DarReaderTest:0.1.dalf, daml-pri | m.dalf |Format: daml-lf |Encryption: non-encrypted""".stripMargin val inputStream: InputStream = new ByteArrayInputStream(manifest.getBytes(unicode)) val actual = DarManifestReader.dalfNames(inputStream) actual shouldBe Success( Dar("com.daml.lf.archive:DarReaderTest:0.1.dalf", List("daml-prim.dalf"))) inputStream.close() } "should read dalf names from manifest, Main-Dalf returned in the head" in { val manifest = """Main-Dalf: A.dalf |Dalfs: B.dalf, C.dalf, A.dalf, E.dalf |Format: daml-lf |Encryption: non-encrypted""".stripMargin val inputStream: InputStream = new ByteArrayInputStream(manifest.getBytes(unicode)) val actual = DarManifestReader.dalfNames(inputStream) actual shouldBe Success(Dar("A.dalf", List("B.dalf", "C.dalf", "E.dalf"))) inputStream.close() } "should read dalf names from manifest, can handle one Dalf per manifest" in { val manifest = """Main-Dalf: A.dalf |Dalfs: A.dalf |Format: daml-lf |Encryption: non-encrypted""".stripMargin val inputStream: InputStream = new ByteArrayInputStream(manifest.getBytes(unicode)) val actual = DarManifestReader.dalfNames(inputStream) actual shouldBe Success(Dar("A.dalf", List.empty)) inputStream.close() } "should return failure if Format is not daml-lf" in { val manifest = """Main-Dalf: A.dalf |Dalfs: B.dalf, C.dalf, A.dalf, E.dalf |Format: anything-different-from-daml-lf |Encryption: non-encrypted""".stripMargin val inputStream: InputStream = new ByteArrayInputStream(manifest.getBytes(unicode)) val actual = DarManifestReader.dalfNames(inputStream) inside(actual) { case Failure(DarManifestReaderException(msg)) => msg shouldBe "Unsupported format: anything-different-from-daml-lf" } inputStream.close() } }
Example 18
Source File: TryOps.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.lf.data import scala.util.{Failure, Success, Try} private[daml] object TryOps { def sequence[A](list: List[Try[A]]): Try[List[A]] = { val zero: Try[List[A]] = Success(List.empty[A]) list.foldRight(zero)((a, as) => map2(a, as)(_ :: _)) } def map2[A, B, C](ta: Try[A], tb: Try[B])(f: (A, B) => C): Try[C] = for { a <- ta b <- tb } yield f(a, b) object Bracket { def bracket[A, B](fa: Try[A])(cleanup: A => Try[B]): Bracket[A, B] = new Bracket(fa, cleanup) final class Bracket[A, B](fa: Try[A], cleanup: A => Try[B]) { def flatMap[C](f: A => Try[C]): Try[C] = { val fc = fa.flatMap(a => f(a)) val fb = fa.flatMap(a => cleanup(a)) (fc, fb) match { case (Success(_), Success(_)) => fc case (e @ Failure(_), _) => e case (Success(_), Failure(e)) => Failure(e) } } def map[C](f: A => C): Try[C] = flatMap(a => Try(f(a))) } } }
Example 19
Source File: TryOpsTest.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.lf.data import org.scalatest.{Matchers, WordSpec} import org.scalatest.prop.GeneratorDrivenPropertyChecks import com.daml.lf.data.TryOps.Bracket.bracket import scala.util.{Failure, Success, Try} class TryOpsTest extends WordSpec with Matchers with GeneratorDrivenPropertyChecks { "bracket should call clean after successful calculation" in forAll { (a: Int, b: Int) => var calls = List.empty[String] def clean(x: Int): Try[Unit] = { calls = s"clean $x" :: calls Success(()) } def add(x: Int)(y: Int): Try[Int] = { calls = s"add $x $y" :: calls Success(x + y) } val actual = bracket(Try(a))(clean).flatMap(add(b)) actual shouldBe Success(a + b) calls.reverse shouldBe List(s"add $b $a", s"clean $a") } "bracket should fail if clean failed" in forAll { (a: Int, b: Int, e: Throwable) => var calls = List.empty[String] def clean(x: Int): Try[Unit] = { calls = s"clean $x $e" :: calls Failure(e) } def add(x: Int)(y: Int): Try[Int] = { calls = s"add $x $y" :: calls Success(x + y) } val actual = bracket(Try(a))(clean).flatMap(add(b)) actual shouldBe Failure(e) calls.reverse shouldBe List(s"add $b $a", s"clean $a $e") } "bracket should call clean if calculation fails" in forAll { (a: Int, b: Int, e: Throwable) => var calls = List.empty[String] def clean(x: Int): Try[Unit] = { calls = s"clean $x" :: calls Success(()) } def add(x: Int)(y: Int): Try[Int] = { calls = s"add $x $y" :: calls Failure(e) } val actual = bracket(Try(a))(clean).flatMap(add(b)) actual shouldBe Failure(e) calls.reverse shouldBe List(s"add $b $a", s"clean $a") } "bracket should return calculation error if if both calculation and clean fail" in forAll { (a: Int, b: Int, e1: Throwable, e2: Throwable) => var calls = List.empty[String] def clean(x: Int): Try[Unit] = { calls = s"clean $x $e2" :: calls Failure(e2) } def add(x: Int)(y: Int): Try[Int] = { calls = s"add $x $y" :: calls Failure(e1) } val actual = bracket(Try(a))(clean).flatMap(add(b)) actual shouldBe Failure(e1) calls.reverse shouldBe List(s"add $b $a", s"clean $a $e2") } }
Example 20
Source File: CodegenMain.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.codegen import com.daml.codegen.{Main => ScalaCodegen} import com.daml.lf.codegen.conf.CodegenConfigReader.{CodegenDest, Java, Scala} import com.daml.lf.codegen.conf.{CodegenConfigReader, Conf} import com.daml.lf.codegen.{CodeGenRunner => JavaCodegen} import scala.util.{Failure, Success, Try} object CodegenMain { sealed abstract class ExitCode(val code: Int) object OK extends ExitCode(0) object UsageError extends ExitCode(101) object CodegenError extends ExitCode(201) private final case class FrontEndConfig(mode: Option[CodegenDest]) def main(args: Array[String]): Unit = { val exitCode: ExitCode = parseFrontEndConfig(args) match { case Some(FrontEndConfig(Some(Java))) => javaCodegen(args.tail) case Some(FrontEndConfig(Some(Scala))) => scalaCodegen(args.tail) case Some(FrontEndConfig(None)) | None => println("\n") cliConfigParser.showUsage() UsageError } sys.exit(exitCode.code) } private def javaCodegen(args: Array[String]): ExitCode = { println("Java codegen") runCodegen(JavaCodegen.run, codegenConfig(args, Java)) } private def scalaCodegen(args: Array[String]): ExitCode = { println("Scala codegen") runCodegen(ScalaCodegen.generateCode, codegenConfig(args, Scala)) } private def runCodegen(generate: Conf => Unit, configO: Option[Conf]): ExitCode = configO match { case None => println("\n") Conf.parser.showUsage UsageError case Some(conf) => Try(generate(conf)) match { case Success(_) => OK case Failure(t) => println(s"Error generating code: ${t.getMessage}") CodegenError } } private def codegenConfig(args: Array[String], mode: CodegenDest): Option[Conf] = if (args.nonEmpty) { println(s"Reading configuration from command line input: ${args.mkString(",")}") Conf.parse(args) } else { println(s"Reading configuration from project configuration file") CodegenConfigReader.readFromEnv(mode) match { case Left(e) => println(s"Error reading project configuration file: $e"); None case Right(c) => Some(c) } } private def parseFrontEndConfig(args: Seq[String]): Option[FrontEndConfig] = args match { case h +: _ => cliConfigParser.parse(Seq(h), FrontEndConfig(None)) case _ => None } private val cliConfigParser = new scopt.OptionParser[FrontEndConfig]("codegen-front-end") { head("Codegen front end") override def showUsageOnError = false help("help").text("Prints this usage text") note("\n") cmd("java") .action((_, c) => c.copy(mode = Some(Java))) .text("To generate Java code:\n") .children(help("help").text("Java codegen help")) note("\n") cmd("scala") .action((_, c) => c.copy(mode = Some(Scala))) .text("To generate Scala code:\n") .children(help("help").text("Scala codegen help")) note("\n") } }
Example 21
Source File: Positive.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.client.binding.config import pureconfig.ConfigConvert import scala.util.{Failure, Success, Try} class Positive[T: Numeric] private (val value: T) { override def toString: String = value.toString } object Positive { def apply[T](num: T)(implicit numeric: Numeric[T]): Try[Positive[T]] = { if (numeric.lteq(num, numeric.zero)) { Failure(new IllegalArgumentException(s"$num must be positive.")) } else { Success(new Positive(num)) } } def unsafe[T](num: T)(implicit numeric: Numeric[T]): Positive[T] = Positive(num).get implicit val configConvertL: ConfigConvert[Positive[Long]] = convertPositive(_.toLong) implicit val configConvertI: ConfigConvert[Positive[Int]] = convertPositive(_.toInt) private def convertPositive[T: Numeric](readStr: String => T) = { ConfigConvert.viaStringTry[Positive[T]]({ s => for { number <- Try(readStr(s)) positive <- apply(number) } yield positive }, _.toString) } }
Example 22
Source File: LedgerClientConfigTest.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.client.binding.config import java.io.File import com.daml.ledger.client.binding.LedgerClientConfigurationError.MalformedTypesafeConfig import com.typesafe.config.ConfigFactory import org.scalatest.{Matchers, WordSpec} import scala.util.Success class LedgerClientConfigTest extends WordSpec with Matchers { "TypeSafePlatformConfig" should { "parse the reference conf without errors" in { LedgerClientConfig.create() should be(a[Success[_]]) } "parse the expected values out of the reference conf" in { val config = LedgerClientConfig.create().get config.ledgerId shouldEqual None config.commandClient.maxCommandsInFlight shouldEqual 256 config.commandClient.maxParallelSubmissions shouldEqual 32 config.commandClient.defaultDeduplicationTime.getSeconds shouldEqual 30 config.maxRetryTime.getSeconds shouldEqual 60 config.ssl shouldBe None } "parse the expected values out of the mock config" in { val configStr = """ |ledger-client { | ledger-id = "ledgerId_mock" | command-client { | max-commands-in-flight = 260 | max-parallel-submissions = 40 | default-deduplication-time = PT40S | } | max-retry-time = PT45S | ssl { | client-key-cert-chain-file = "file1" | client-key-file = "file2" | trusted-certs-file = "file3" | } |}""".stripMargin val clientConfig = LedgerClientConfig.create(ConfigFactory.parseString(configStr)).get clientConfig.ledgerId shouldEqual Some("ledgerId_mock") clientConfig.commandClient.maxCommandsInFlight shouldEqual 260 clientConfig.commandClient.maxParallelSubmissions shouldEqual 40 clientConfig.commandClient.defaultDeduplicationTime.getSeconds shouldEqual 40 clientConfig.maxRetryTime.getSeconds shouldEqual 45 clientConfig.ssl.get.clientKeyCertChainFile shouldBe new File("file1") clientConfig.ssl.get.clientKeyFile shouldBe new File("file2") clientConfig.ssl.get.trustedCertsFile shouldBe new File("file3") } "return the expected type of Throwable on parse errors" in { LedgerClientConfig.create(ConfigFactory.empty()).failed.get should be( a[MalformedTypesafeConfig]) } } }
Example 23
Source File: HeadSandbox.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.navigator.test.runner import java.io.File import scala.concurrent._ import scala.concurrent.duration._ import scala.sys.process.{Process, ProcessLogger} import scala.util.Success def buffer[T](f: => T): T = f } def runAsync(port: Int, darFile: File, scenario: String): Unit => Unit = { // Run the sandbox. val logger = new SandboxLogger val sandbox = Process( Seq("sbt", s"sandbox/run ${darFile.getAbsolutePath} --port $port --scenario $scenario"), new File("../../../ledger")) .run(logger) // Sbt takes a long time to compile and start up, longer than Navigator keeps trying to connect. // Block for a while until the sandbox shows signs of being started up. logger.waitForStartup(300.seconds) val shutdown = (_: Unit) => { sandbox.destroy() } sys addShutdownHook shutdown(()) _ => shutdown(()) } }
Example 24
Source File: CommandStatusRow.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.navigator.data import com.daml.ledger.api.refinements.ApiTypes import com.daml.navigator.model._ import scala.util.{Failure, Success, Try} import scalaz.syntax.tag._ final case class CommandStatusRow( commandId: String, isCompleted: Boolean, subclassType: String, code: Option[String], details: Option[String], transactionId: Option[String] ) { def toCommandStatus( transactionById: ApiTypes.TransactionId => Try[Option[Transaction]]): Try[CommandStatus] = { subclassType match { case "CommandStatusWaiting" => Success(CommandStatusWaiting()) case "CommandStatusError" => (for { c <- code d <- details } yield { CommandStatusError(c, d) }).fold[Try[CommandStatus]]( Failure( DeserializationFailed(s"Failed to deserialize CommandStatusError from row: $this")) )( Success(_) ) case "CommandStatusSuccess" => transactionId.map { tId => transactionById(ApiTypes.TransactionId(tId)) } match { case Some(Success(Some(tx: Transaction))) => Success(CommandStatusSuccess(tx)) case Some(Failure(e)) => Failure(RecordNotFound( s"Failed to load transaction $transactionId for CommandStatus with commandId: $commandId. Exception: ${e.getMessage}")) case Some(Success(None)) => Failure(RecordNotFound( s"Failed to load transaction $transactionId for CommandStatus with commandId: $commandId")) case None => Failure( DeserializationFailed( s"TransactionId is missing for CommandStatusSuccess row: $this")) } case "CommandStatusUnknown" => Success(CommandStatusUnknown()) case s => Failure(DeserializationFailed(s"unknown subclass type for CommandStatus: $s")) } } } object CommandStatusRow { def fromCommandStatus(commandId: ApiTypes.CommandId, cs: CommandStatus): CommandStatusRow = { cs match { case w: CommandStatusWaiting => CommandStatusRow(commandId.unwrap, w.isCompleted, "CommandStatusWaiting", None, None, None) case e: CommandStatusError => CommandStatusRow( commandId.unwrap, e.isCompleted, "CommandStatusError", Some(e.code), Some(e.details), None) case s: CommandStatusSuccess => CommandStatusRow( commandId.unwrap, s.isCompleted, "CommandStatusSuccess", None, None, Some(s.tx.id.unwrap)) case u: CommandStatusUnknown => CommandStatusRow(commandId.unwrap, u.isCompleted, "CommandStatusUnknown", None, None, None) } } }
Example 25
Source File: Read.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.navigator import scala.reflect.ClassTag import scala.util.{Failure, Success, Try} def fromUnsafeFunction[To](f: String => To)(implicit classTag: ClassTag[To]): Read[To] = fromFunction[To] { str => Try(f(str)) match { case Success(str) => Right(str) case Failure(f) => Read.fail[To](classTag) } } implicit val readString = Read.fromFunction[String](str => Right(str)) implicit val readBoolean = Read.fromUnsafeFunction[Boolean](_.toBoolean) implicit val readInt = Read.fromUnsafeFunction[Int](_.toInt) implicit val readFloat = Read.fromUnsafeFunction[Float](_.toFloat) implicit val readDouble = Read.fromUnsafeFunction[Double](_.toDouble) }
Example 26
Source File: ApiCodecVerboseSpec.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.navigator.json import com.daml.navigator.model import org.scalatest.{Matchers, WordSpec} import scala.util.{Success, Try} class ApiCodecVerboseSpec extends WordSpec with Matchers { import com.daml.navigator.{DamlConstants => C} private def serializeAndParse( value: model.ApiValue, typ: model.DamlLfType): Try[model.ApiValue] = { import com.daml.navigator.json.ApiCodecVerbose.JsonImplicits._ import spray.json._ for { serialized <- Try(value.toJson.prettyPrint) json <- Try(serialized.parseJson) parsed <- Try(json.convertTo[model.ApiValue]) } yield parsed } "API verbose JSON codec" when { "serializing and parsing a value" should { "work for Text" in { serializeAndParse(C.simpleTextV, C.simpleTextT) shouldBe Success(C.simpleTextV) } "work for Int64" in { serializeAndParse(C.simpleInt64V, C.simpleInt64T) shouldBe Success(C.simpleInt64V) } "work for Decimal" in { serializeAndParse(C.simpleDecimalV, C.simpleDecimalT) shouldBe Success(C.simpleDecimalV) } "work for Unit" in { serializeAndParse(C.simpleUnitV, C.simpleUnitT) shouldBe Success(C.simpleUnitV) } "work for Date" in { serializeAndParse(C.simpleDateV, C.simpleDateT) shouldBe Success(C.simpleDateV) } "work for Timestamp" in { serializeAndParse(C.simpleTimestampV, C.simpleTimestampT) shouldBe Success( C.simpleTimestampV) } "work for Optional" in { serializeAndParse(C.simpleOptionalV, C.simpleOptionalT(C.simpleTextT)) shouldBe Success( C.simpleOptionalV) } "work for EmptyRecord" in { serializeAndParse(C.emptyRecordV, C.emptyRecordTC) shouldBe Success(C.emptyRecordV) } "work for SimpleRecord" in { serializeAndParse(C.simpleRecordV, C.simpleRecordTC) shouldBe Success(C.simpleRecordV) } "work for SimpleVariant" in { serializeAndParse(C.simpleVariantV, C.simpleVariantTC) shouldBe Success(C.simpleVariantV) } "work for ComplexRecord" in { serializeAndParse(C.complexRecordV, C.complexRecordTC) shouldBe Success(C.complexRecordV) } "work for Tree" in { serializeAndParse(C.treeV, C.treeTC) shouldBe Success(C.treeV) } "work for TextMap" in { serializeAndParse(C.simpleTextMapV, C.simpleTextMapT(C.simpleTextT)) shouldBe Success( C.simpleTextMapV) } "work for GenMap" in { serializeAndParse(C.complexGenMapV, C.complexGenMapT(C.treeNodeTC, C.simpleInt64T)) shouldBe Success( C.complexGenMapV) } } } }
Example 27
Source File: DamlLfCodecSpec.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.navigator.json import com.daml.navigator.json.DamlLfCodec.JsonImplicits._ import com.daml.navigator.model import org.scalatest.{Matchers, WordSpec} import scala.util.{Success, Try} class DamlLfCodecSpec extends WordSpec with Matchers { import com.daml.navigator.{DamlConstants => C} private def serializeAndParse[T](value: T)(implicit fmt: spray.json.JsonFormat[T]): Try[T] = { import spray.json._ for { serialized <- Try(value.toJson.prettyPrint) json <- Try(serialized.parseJson) parsed <- Try(json.convertTo[T]) } yield parsed } "DAML-LF JSON codec" when { "serializing and parsing a DAML-LF object" should { "work for DamlLFIdentifier" in { serializeAndParse(C.ref0) shouldBe Success(C.ref0) } "work for DamlLfTypePrim(Text)" in { serializeAndParse[model.DamlLfType](C.simpleTextT) shouldBe Success(C.simpleTextT) } "work for DamlLfTypeCon(SimpleRecord)" in { serializeAndParse[model.DamlLfType](C.simpleRecordTC) shouldBe Success(C.simpleRecordTC) } "work for DamlLfTypeCon(Tree)" in { serializeAndParse[model.DamlLfType](C.treeTC) shouldBe Success(C.treeTC) } "work for DamlLfDefDataType(SimpleRecord)" in { serializeAndParse[model.DamlLfDefDataType](C.simpleRecordGC) shouldBe Success( C.simpleRecordGC) } "work for DamlLfDefDataType(Tree)" in { serializeAndParse[model.DamlLfDefDataType](C.treeGC) shouldBe Success(C.treeGC) } "work for DamlLfDefDataType(ComplexRecord)" in { serializeAndParse[model.DamlLfDefDataType](C.complexRecordGC) shouldBe Success( C.complexRecordGC) } } } }
Example 28
Source File: ApiCodecCompressedSpec.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.navigator.json import com.daml.navigator.model import org.scalatest.{Matchers, WordSpec} import scala.util.{Success, Try} class ApiCodecCompressedSpec extends WordSpec with Matchers { import com.daml.navigator.{DamlConstants => C} private def serializeAndParse( value: model.ApiValue, typ: model.DamlLfType): Try[model.ApiValue] = { import com.daml.lf.value.json.ApiCodecCompressed import ApiCodecCompressed.JsonImplicits._ import spray.json._ for { serialized <- Try(value.toJson.prettyPrint) json <- Try(serialized.parseJson) parsed <- Try(ApiCodecCompressed.jsValueToApiValue(json, typ, C.allTypes.get _)) } yield parsed } "API verbose JSON codec" when { "serializing and parsing a value" should { "work for SimpleRecord" in { serializeAndParse(C.simpleRecordV, C.simpleRecordTC) shouldBe Success(C.simpleRecordV) } "work for SimpleVariant" in { serializeAndParse(C.simpleVariantV, C.simpleVariantTC) shouldBe Success(C.simpleVariantV) } "work for ComplexRecord" in { serializeAndParse(C.complexRecordV, C.complexRecordTC) shouldBe Success(C.complexRecordV) } "work for Tree" in { serializeAndParse(C.treeV, C.treeTC) shouldBe Success(C.treeV) } "work for Enum" in { serializeAndParse(C.redV, C.redTC) shouldBe Success(C.redV) } } } }
Example 29
Source File: Port.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ports import scala.util.{Failure, Success, Try} final case class Port private (value: Int) extends AnyVal { override def toString: String = value.toString } object Port { private val ValidPorts: Range = 0 until 0x10000 val Dynamic = new Port(0) def apply(value: Int): Port = create(value).get def validate(value: Int): Try[Unit] = create(value).map(_ => ()) private def create(value: Int): Try[Port] = if (ValidPorts.contains(value)) Success(new Port(value)) else Failure( new IllegalArgumentException( s"Ports must be in the range ${ValidPorts.start}—${ValidPorts.last}.")) }
Example 30
Source File: Spin.scala From iotchain with MIT License | 5 votes |
package jbok.app.components import com.thoughtworks.binding import com.thoughtworks.binding.Binding import org.scalajs.dom.Element import scala.util.{Failure, Success, Try} object Spin { @binding.dom def render(color: String = "blue"): Binding[Element] = <div class="spinner"> <div class={s"rect1 $color"}></div> <div class={s"rect2 $color"}></div> <div class={s"rect3 $color"}></div> <div class={s"rect4 $color"}></div> <div class={s"rect5 $color"}></div> </div> @binding.dom def renderFuture[A](fb: Binding[Option[Try[A]]]): Binding[Element] = fb.bind match { case Some(Success(a)) => <div>{a.toString}</div> case Some(Failure(e)) => <div>{e.toString}</div> case None => <div>{render().bind}</div> } }
Example 31
Source File: ChaosActorInterface.scala From eventuate-chaos with Apache License 2.0 | 5 votes |
package com.rbmhtechnology.eventuate.chaos import akka.actor.ActorRef import akka.io.Tcp import akka.util.ByteString import akka.pattern.ask import akka.util.Timeout import com.rbmhtechnology.eventuate.chaos.ChaosActorInterface.HealthCheckResult import com.rbmhtechnology.eventuate.chaos.ChaosActorInterface.HealthCheck import scala.concurrent.duration._ import scala.util.Failure import scala.util.Success object ChaosActorInterface { case class HealthCheck(requester: ActorRef) case class HealthCheckResult(state: Int, requester: ActorRef) } class ChaosActorInterface(chaosActor: ActorRef) extends ChaosInterface { implicit val timeout = Timeout(1.seconds) def handleCommand = { case ("persist", None, recv) => val check = HealthCheck(recv) (chaosActor ? check).mapTo[HealthCheckResult] onComplete { case Success(result) => result.requester ! Tcp.Write(ByteString(result.state.toString)) result.requester ! Tcp.Close case Failure(e) => recv ! Tcp.Close } } }
Example 32
Source File: TestSpec.scala From nanotest-strawman with Apache License 2.0 | 5 votes |
package verify import scala.concurrent.{ ExecutionContext, Future, Promise } import scala.util.control.NonFatal import scala.util.{ Failure, Success } import verify.sourcecode.SourceLocation case class TestSpec[I, +O](name: String, f: I => Future[Result[O]]) extends (I => Future[Result[O]]) { override def apply(v1: I): Future[Result[O]] = f(v1) } object TestSpec { def async[Env](name: String, cb: Env => Future[Unit])(implicit ec: ExecutionContext): TestSpec[Env, Unit] = TestSpec( name, { env => val f: Future[Unit] = try cb(env) catch { case NonFatal(ex) => Future.failed(ex) } val p = Promise[Result[Unit]]() f.onComplete { case Success(_) => p.success(Result.Success(())) case Failure(ex) => p.success(Result.from(ex)) } p.future } ) def sync[Env](name: String, cb: Env => Void): TestSpec[Env, Unit] = TestSpec( name, { env => try { cb(env) match { case Void.UnitRef => Future.successful(Result.Success(())) case Void.Caught(ref, loc) => Future.successful(unexpected(ref, loc)) } } catch { case NonFatal(ex) => Future.successful(Result.from(ex)) } } ) private def unexpected[A](ref: A, loc: SourceLocation): Result[Nothing] = Result.Failure( s"Problem with test spec, expecting `Unit`, but received: $ref ", None, Some(loc) ) }
Example 33
Source File: ArangoWriteAheadLog.scala From scarango with MIT License | 5 votes |
package com.outr.arango import com.outr.arango.api.{APIWalTail, WALOperation, WALOperations} import io.youi.client.HttpClient import io.youi.util.Time import reactify.Channel import scala.concurrent.duration._ import scala.concurrent.{ExecutionContext, Future} import scala.util.{Failure, Success} class ArangoWriteAheadLog(client: HttpClient) { def tail(global: Boolean = false, from: Option[Long] = None, to: Option[Long] = None, lastScanned: Long = 0L, chunkSize: Option[Long] = None, syncerId: Option[Long] = None, serverId: Option[Long] = None, clientId: String = "scarango") (implicit ec: ExecutionContext): Future[WALOperations] = { APIWalTail.get( client = client, global = Some(global), from = from, to = to, lastScanned = lastScanned, chunkSize = chunkSize, syncerId = syncerId, serverId = serverId, clientId = Some(clientId) ) } def monitor(global: Boolean = false, from: Option[Long] = None, to: Option[Long] = None, lastScanned: Long = 0L, chunkSize: Option[Long] = None, syncerId: Option[Long] = None, serverId: Option[Long] = None, clientId: String = "scarango", delay: FiniteDuration = 5.seconds, skipHistory: Boolean = true, failureHandler: Throwable => Option[FiniteDuration] = t => { scribe.error("Monitor error", t) None }) (implicit ec: ExecutionContext): WriteAheadLogMonitor = { val m = new WriteAheadLogMonitor(delay, skipHistory, failureHandler) m.run(tail(global, from, to, lastScanned, chunkSize, syncerId, serverId, clientId)) m } } class WriteAheadLogMonitor(delay: FiniteDuration, skipHistory: Boolean, failureHandler: Throwable => Option[FiniteDuration]) extends Channel[WALOperation] { private var keepAlive = false private var last: Option[WALOperations] = None private var from: Long = 0L private var skipped: Boolean = false val tailed: Channel[WALOperations] = Channel[WALOperations] private[arango] def run(future: Future[WALOperations])(implicit ec: ExecutionContext): Unit = { keepAlive = true future.onComplete { complete => val d = complete match { case Success(operations) => try { if (skipHistory && !skipped) { if (operations.lastIncluded == 0L) { skipped = true } } else { operations.operations.foreach(static) } last = Some(operations) from = math.max(from, operations.lastIncluded) tailed @= operations Some(delay) } catch { case t: Throwable => failureHandler(t) } case Failure(exception) => failureHandler(exception) } d match { case Some(delay) if keepAlive => last.foreach { ops => Time.delay(delay).foreach(_ => run(ops.tail(from))) } case _ => // Error or keepAlive caused monitor to stop } } } def stop(): Unit = keepAlive = false }
Example 34
Source File: GoldenCodecLaws.scala From circe-golden with Apache License 2.0 | 5 votes |
package io.circe.testing.golden import cats.instances.list._, cats.instances.try_._ import cats.syntax.traverse._ import cats.laws._ import io.circe.{ Json, Printer } import io.circe.testing.CodecLaws import scala.util.{ Failure, Success, Try } trait GoldenCodecLaws[A] extends CodecLaws[A] { protected def goldenExamples: Try[List[(A, String)]] final def goldenDecoding: Try[List[IsEq[A]]] = goldenExamples.flatMap { _.traverse { case (value, encoded) => io.circe.parser.decode[A](encoded)(decode) match { case Left(error) => Failure(error) case Right(decoded) => Success(decoded <-> value) } } } final def goldenEncoding: Try[List[IsEq[String]]] = goldenExamples.map { _.map { case (value, encoded) => printJson(encode(value)) <-> encoded } } }
Example 35
Source File: GoldenCodecTests.scala From circe-golden with Apache License 2.0 | 5 votes |
package io.circe.testing.golden import cats.instances.string._ import cats.kernel.Eq import cats.laws.IsEq import cats.laws.discipline.catsLawsIsEqToProp import io.circe.{ Decoder, Encoder, Json, Printer } import io.circe.testing.CodecTests import org.scalacheck.{ Arbitrary, Prop, Shrink } import scala.reflect.runtime.universe.TypeTag import scala.util.{ Failure, Success, Try } trait GoldenCodecTests[A] extends CodecTests[A] { def laws: GoldenCodecLaws[A] private[this] def tryListToProp[A: Eq](result: Try[List[IsEq[A]]]): Prop = result match { case Failure(error) => Prop.exception(error) case Success(equalities) => Prop.all(equalities.map(catsLawsIsEqToProp(_)): _*) } def goldenCodec( implicit arbitraryA: Arbitrary[A], shrinkA: Shrink[A], eqA: Eq[A], arbitraryJson: Arbitrary[Json], shrinkJson: Shrink[Json] ): RuleSet = new DefaultRuleSet( name = "goldenCodec", parent = Some(codec), "decoding golden files" -> tryListToProp(laws.goldenDecoding), "encoding golden files" -> tryListToProp(laws.goldenEncoding) ) def unserializableGoldenCodec( implicit arbitraryA: Arbitrary[A], shrinkA: Shrink[A], eqA: Eq[A], arbitraryJson: Arbitrary[Json], shrinkJson: Shrink[Json] ): RuleSet = new DefaultRuleSet( name = "goldenCodec", parent = Some(unserializableCodec), "decoding golden files" -> tryListToProp(laws.goldenDecoding), "encoding golden files" -> tryListToProp(laws.goldenEncoding) ) } object GoldenCodecTests { def apply[A: Decoder: Encoder: Arbitrary: TypeTag]: GoldenCodecTests[A] = apply[A](ResourceFileGoldenCodecLaws[A]()) def apply[A: Decoder: Encoder: Arbitrary: TypeTag](printer: Printer): GoldenCodecTests[A] = apply[A](ResourceFileGoldenCodecLaws[A](printer = printer)) def apply[A: Decoder: Encoder: Arbitrary: TypeTag](count: Int): GoldenCodecTests[A] = apply[A](ResourceFileGoldenCodecLaws[A](count = count)) def apply[A: Decoder: Encoder: Arbitrary: TypeTag](count: Int, printer: Printer): GoldenCodecTests[A] = apply[A](ResourceFileGoldenCodecLaws[A](count = count, printer = printer)) def apply[A: Decoder: Encoder: Arbitrary](laws0: GoldenCodecLaws[A]): GoldenCodecTests[A] = new GoldenCodecTests[A] { val laws: GoldenCodecLaws[A] = laws0 } }
Example 36
Source File: EscapingTests.scala From circe-yaml with Apache License 2.0 | 5 votes |
package io.circe.yaml import io.circe.Encoder import org.scalatest.flatspec.AnyFlatSpec import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks import scala.util.{ Success, Try } import org.scalatest.matchers.should.Matchers class EscapingTests extends AnyFlatSpec with Matchers with ScalaCheckDrivenPropertyChecks { import io.circe.syntax._ import io.circe.yaml.Printer.spaces2.pretty import io.circe.yaml.parser.parse // according to the YAML spec (section 5.1: character set) def isPrintable(c: Char): Boolean = ('\t' == c) || ('\n' == c) || ('\r' == c) || (' ' <= c && c <= '~') || ('\u0085' == c) || ('\u00a0' <= c && c <= '\ud7ff') || ('\ue000' <= c && c <= '\ufffd') def test1(c: Char): Unit = { val r = "'\\u%04X'".format(c.toInt) def repr[A](a: A): (String, A) = (r, a) val json = c.toString.asJson val s = pretty(json) if (s.contains(c)) repr(isPrintable(c)) shouldBe repr(true) else () // we do not enforce that printable chars are never escaped repr(s.forall(isPrintable)) shouldBe repr(true) repr(Try(parse(s))) shouldBe repr(Success(Right(json))) } "Escaping" should "properly escape JSON string values (all chars)" in { // exhaustive test: 65k test cases (Char.MinValue to Char.MaxValue).map(_.toChar).foreach(test1) } def test2(s0: String): Unit = { val json = s0.asJson val s1 = pretty(json) s1.forall(isPrintable) parse(s1) shouldBe Right(json) } it should "properly escape JSON string values" in { forAll { (s0: String) => test2(s0) } } def test3(c: Char): Unit = { val m = Map(c.toString -> c.toInt) val o = Encoder[Map[String, Int]].apply(m) parser.parse(printer.print(o)).right.flatMap(_.as[Map[String, Int]]) shouldBe Right(m) } it should "properly escape JSON object keys" in { // exhaustive test: 65k test cases (Char.MinValue to Char.MaxValue).map(_.toChar).foreach(test3) } }
Example 37
Source File: IterativeDeepeningSearch.scala From aima-scala with MIT License | 5 votes |
package aima.core.search.uninformed import aima.core.search.Problem import scala.annotation.tailrec import scala.util.{Failure, Success, Try} trait IterativeDeepeningSearch[State, Action] { def depthLimitedTreeSearch: DepthLimitedTreeSearch[State, Action] def search(problem: Problem[State, Action], noAction: Action): Try[DLSResult[Action]] = { @tailrec def searchHelper(currentDepth: Int): Try[DLSResult[Action]] = { val result = depthLimitedTreeSearch.search(problem, currentDepth, noAction) result match { case Success(Solution(_)) | Failure(_) => result case _ if currentDepth == Int.MaxValue => Failure[DLSResult[Action]](new Exception("Depth has reached Int.MaxValue")) case _ => searchHelper(currentDepth + 1) } } searchHelper(currentDepth = 0) } }
Example 38
Source File: DepthLimitedTreeSearch.scala From aima-scala with MIT License | 5 votes |
package aima.core.search.uninformed import aima.core.search.{Problem, ProblemSearch, StateNode} import scala.annotation.tailrec import scala.util.{Failure, Success, Try} sealed trait DLSResult[Action] { def actions: List[Action] } final case class Solution[Action](actions: List[Action]) extends DLSResult[Action] final case class CutOff[Action](actions: List[Action]) extends DLSResult[Action] trait DepthLimitedTreeSearch[State, Action] extends ProblemSearch[State, Action, StateNode[State, Action]] { type Node = StateNode[State, Action] def search(problem: Problem[State, Action], initialLimit: Int, noAction: Action): Try[DLSResult[Action]] = Try { def recursiveDLS(node: Node, currentLimit: Int): Try[DLSResult[Action]] = Try { if (problem.isGoalState(node.state)) { Success(Solution(solution(node))) } else if (currentLimit == 0) { Success(CutOff(solution(node))) } else { val childNodes = for { action <- problem.actions(node.state) } yield newChildNode(problem, node, action) @tailrec def shortCircuitChildSearch(children: List[Node]): Try[DLSResult[Action]] = { children match { case Nil => Failure[DLSResult[Action]](new Exception("Exhausted child nodes")) case lastChild :: Nil => recursiveDLS(lastChild, currentLimit - 1) case firstChild :: rest => recursiveDLS(firstChild, currentLimit - 1) match { case result @ Success(Solution(_)) => result case _ => shortCircuitChildSearch(rest) } } } shortCircuitChildSearch(childNodes) } }.flatten recursiveDLS(makeNode(problem.initialState, noAction), initialLimit) }.flatten def makeNode(state: State, noAction: Action): Node = StateNode(state, noAction, None) def newChildNode(problem: Problem[State, Action], parent: Node, action: Action): Node = { val childState = problem.result(parent.state, action) StateNode(childState, action, Some(parent)) } }
Example 39
Source File: Node.scala From mantis with Apache License 2.0 | 5 votes |
package io.iohk.ethereum.network.discovery import java.net.{InetSocketAddress, _} import akka.util.ByteString import io.iohk.ethereum.network import io.iohk.ethereum.utils.Logger import org.spongycastle.util.encoders.Hex import scala.util.{Failure, Success, Try} case class Node(id: ByteString, addr: InetSocketAddress) { def toUri: URI = { val host = network.getHostName(addr.getAddress) val port = addr.getPort new URI(s"enode://${Hex.toHexString(id.toArray[Byte])}@$host:$port") } } object Node { def fromUri(uri: URI): Node = { val nodeId = ByteString(Hex.decode(uri.getUserInfo)) Node(nodeId, new InetSocketAddress(uri.getHost, uri.getPort)) } } object NodeParser extends Logger { val NodeScheme = "enode" val NodeIdSize = 64 def parseNodes(unParsedNodes: Set[String]): Set[Node] = unParsedNodes.foldLeft[Set[Node]](Set.empty) { case (parsedNodes, nodeString) => val maybeNode = NodeParser.parseNode(nodeString) maybeNode match { case Right(node) => parsedNodes + node case Left(errors) => log.warn(s"Unable to parse node: $nodeString due to: ${errors.map(_.getMessage).mkString("; ")}") parsedNodes } } }
Example 40
Source File: Mantis.scala From mantis with Apache License 2.0 | 5 votes |
package io.iohk.ethereum import io.iohk.ethereum.blockchain.sync.SyncController import io.iohk.ethereum.mining.Miner import io.iohk.ethereum.network.discovery.DiscoveryListener import io.iohk.ethereum.network.{PeerManagerActor, ServerActor} import io.iohk.ethereum.nodebuilder.Node import io.iohk.ethereum.utils.Logger import scala.concurrent.Await import scala.util.{Failure, Success, Try} object Mantis { def main(args: Array[String]): Unit = { new Node with Logger { def tryAndLogFailure(f: () => Any): Unit = Try(f()) match { case Failure(e) => log.warn("Error while shutting down...", e) case Success(_) => } override def shutdown(): Unit = { tryAndLogFailure(() => Await.ready(actorSystem.terminate, shutdownTimeoutDuration)) tryAndLogFailure(() => storagesInstance.dataSources.closeAll()) } genesisDataLoader.loadGenesisData() peerManager ! PeerManagerActor.StartConnecting server ! ServerActor.StartServer(networkConfig.Server.listenAddress) if (discoveryConfig.discoveryEnabled) { discoveryListener ! DiscoveryListener.Start } syncController ! SyncController.Start if (miningConfig.miningEnabled) { miner ! Miner.StartMining } peerDiscoveryManager // unlazy maybeJsonRpcServer match { case Right(jsonRpcServer) if jsonRpcServerConfig.enabled => jsonRpcServer.run() case Left(error) if jsonRpcServerConfig.enabled => log.error(error) case _=> //Nothing } } } }
Example 41
Source File: JsonRpcHttpsServer.scala From mantis with Apache License 2.0 | 5 votes |
package io.iohk.ethereum.jsonrpc.server import java.io.{File, FileInputStream} import java.security.{KeyStore, SecureRandom} import javax.net.ssl.{KeyManagerFactory, SSLContext, TrustManagerFactory} import akka.actor.ActorSystem import akka.http.scaladsl.model.headers.HttpOriginRange import akka.http.scaladsl.{ConnectionContext, Http} import akka.stream.ActorMaterializer import io.iohk.ethereum.jsonrpc.JsonRpcController import io.iohk.ethereum.jsonrpc.server.JsonRpcHttpsServer.HttpsSetupResult import io.iohk.ethereum.jsonrpc.server.JsonRpcServer.JsonRpcServerConfig import io.iohk.ethereum.utils.Logger import scala.concurrent.ExecutionContext.Implicits.global import scala.io.Source import scala.util.{Failure, Success, Try} class JsonRpcHttpsServer(val jsonRpcController: JsonRpcController, config: JsonRpcServerConfig, secureRandom: SecureRandom)(implicit val actorSystem: ActorSystem) extends JsonRpcServer with Logger { def run(): Unit = { implicit val materializer = ActorMaterializer() val maybeSslContext = validateCertificateFiles(config.certificateKeyStorePath, config.certificateKeyStoreType, config.certificatePasswordFile).flatMap{ case (keystorePath, keystoreType, passwordFile) => val passwordReader = Source.fromFile(passwordFile) try { val password = passwordReader.getLines().mkString obtainSSLContext(keystorePath, keystoreType, password) } finally { passwordReader.close() } } val maybeHttpsContext = maybeSslContext.map(sslContext => ConnectionContext.https(sslContext)) maybeHttpsContext match { case Right(httpsContext) => Http().setDefaultServerHttpContext(httpsContext) val bindingResultF = Http().bindAndHandle(route, config.interface, config.port, connectionContext = httpsContext) bindingResultF onComplete { case Success(serverBinding) => log.info(s"JSON RPC HTTPS server listening on ${serverBinding.localAddress}") case Failure(ex) => log.error("Cannot start JSON HTTPS RPC server", ex) } case Left(error) => log.error(s"Cannot start JSON HTTPS RPC server due to: $error") } } private def validateCertificateFiles(maybeKeystorePath: Option[String], maybeKeystoreType: Option[String], maybePasswordFile: Option[String]): HttpsSetupResult[(String, String, String)] = (maybeKeystorePath, maybeKeystoreType, maybePasswordFile) match { case (Some(keystorePath), Some(keystoreType), Some(passwordFile)) => val keystoreDirMissing = !new File(keystorePath).isFile val passwordFileMissing = !new File(passwordFile).isFile if(keystoreDirMissing && passwordFileMissing) Left("Certificate keystore path and password file configured but files are missing") else if(keystoreDirMissing) Left("Certificate keystore path configured but file is missing") else if(passwordFileMissing) Left("Certificate password file configured but file is missing") else Right((keystorePath, keystoreType, passwordFile)) case _ => Left("HTTPS requires: certificate-keystore-path, certificate-keystore-type and certificate-password-file to be configured") } override def corsAllowedOrigins: HttpOriginRange = config.corsAllowedOrigins } object JsonRpcHttpsServer { type HttpsSetupResult[T] = Either[String, T] }
Example 42
Source File: JsonRpcHttpServer.scala From mantis with Apache License 2.0 | 5 votes |
package io.iohk.ethereum.jsonrpc.server import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.model.headers.HttpOriginRange import akka.stream.ActorMaterializer import io.iohk.ethereum.jsonrpc._ import io.iohk.ethereum.jsonrpc.server.JsonRpcServer.JsonRpcServerConfig import io.iohk.ethereum.utils.Logger import scala.concurrent.ExecutionContext.Implicits.global import scala.util.{Failure, Success} class JsonRpcHttpServer(val jsonRpcController: JsonRpcController, config: JsonRpcServerConfig) (implicit val actorSystem: ActorSystem) extends JsonRpcServer with Logger { def run(): Unit = { implicit val materializer = ActorMaterializer() val bindingResultF = Http(actorSystem).bindAndHandle(route, config.interface, config.port) bindingResultF onComplete { case Success(serverBinding) => log.info(s"JSON RPC HTTP server listening on ${serverBinding.localAddress}") case Failure(ex) => log.error("Cannot start JSON HTTP RPC server", ex) } } override def corsAllowedOrigins: HttpOriginRange = config.corsAllowedOrigins }
Example 43
Source File: FastSyncStateStorageActor.scala From mantis with Apache License 2.0 | 5 votes |
package io.iohk.ethereum.blockchain.sync import akka.actor.{Actor, ActorLogging} import akka.pattern.pipe import io.iohk.ethereum.blockchain.sync.FastSync.SyncState import io.iohk.ethereum.blockchain.sync.FastSyncStateStorageActor.GetStorage import io.iohk.ethereum.db.storage.FastSyncStateStorage import scala.concurrent.Future import scala.util.{Failure, Success, Try} class FastSyncStateStorageActor extends Actor with ActorLogging { def receive: Receive = { // after initialization send a valid Storage reference case storage: FastSyncStateStorage => context become idle(storage) } def idle(storage: FastSyncStateStorage): Receive = { // begin saving of the state to the storage and become busy case state: SyncState => persistState(storage, state) case GetStorage => sender() ! storage.getSyncState() } def busy(storage: FastSyncStateStorage, stateToPersist: Option[SyncState]): Receive = { // update state waiting to be persisted later. we only keep newest state case state: SyncState => context become busy(storage, Some(state)) // exception was thrown during persisting of a state. push case Failure(e) => throw e // state was saved in the storage. become idle case Success(s: FastSyncStateStorage) if stateToPersist.isEmpty => context become idle(s) // state was saved in the storage but new state is already waiting to be saved. case Success(s: FastSyncStateStorage) if stateToPersist.isDefined => stateToPersist.foreach(persistState(s, _)) case GetStorage => sender() ! storage.getSyncState() } private def persistState(storage: FastSyncStateStorage, syncState: SyncState): Unit = { import context.dispatcher val persistingQueues: Future[Try[FastSyncStateStorage]] = Future { lazy val result = Try { storage.putSyncState(syncState) } if (log.isDebugEnabled) { val now = System.currentTimeMillis() result val end = System.currentTimeMillis() log.debug(s"Saving snapshot of a fast sync took ${end - now} ms") result } else { result } } persistingQueues pipeTo self context become busy(storage, None) } } object FastSyncStateStorageActor { case object GetStorage }
Example 44
Source File: Logger.scala From scala-json-rpc with MIT License | 5 votes |
package io.github.shogowada.scala.jsonrpc.example.e2e import io.github.shogowada.scalajs.reactjs.React import io.github.shogowada.scalajs.reactjs.VirtualDOM._ import io.github.shogowada.scalajs.reactjs.events.{FormSyntheticEvent, SyntheticEvent} import org.scalajs.dom.raw.HTMLInputElement import scala.concurrent.ExecutionContext.Implicits.global import scala.util.Success object Logger { case class State(log: String, logs: Seq[String]) type Self = React.Self[Unit, State] } class Logger(loggerAPI: LoggerAPI) { import Logger._ def apply() = reactClass private lazy val reactClass = React.createClass[Unit, State]( getInitialState = (self) => State("", Seq()), render = (self) => <.div()( <.h2()("Logger"), <.form(^.onSubmit := onLog(self))( <.input( ^.id := ElementIds.LoggerLogText, ^.value := self.state.log, ^.onChange := onChange(self) )(), <.button( ^.id := ElementIds.LoggerLog, ^.`type` := "submit" )("Log") ), <.form(^.onSubmit := onGetLogs(self))( <.button( ^.id := ElementIds.LoggerGetLogs, ^.`type` := "submit" )("Get Logs") ), <.div(^.id := ElementIds.LoggerLogs)( self.state.logs.map(log => { <.div()(log) }) ) ).asReactElement ) private def onChange(self: Self) = (event: FormSyntheticEvent[HTMLInputElement]) => { val log = event.target.value self.setState(_.copy(log = log)) } private def onLog(self: Self) = (event: SyntheticEvent) => { event.preventDefault() loggerAPI.log(self.state.log) self.setState(_.copy(log = "")) } private def onGetLogs(self: Self) = (event: SyntheticEvent) => { event.preventDefault() loggerAPI.getAllLogs().onComplete { case Success(logs) => self.setState(_.copy(logs = logs)) case _ => } } }
Example 45
Source File: Calculator.scala From scala-json-rpc with MIT License | 5 votes |
package io.github.shogowada.scala.jsonrpc.example.e2e import io.github.shogowada.scalajs.reactjs.React import io.github.shogowada.scalajs.reactjs.VirtualDOM._ import io.github.shogowada.scalajs.reactjs.events.{FormSyntheticEvent, SyntheticEvent} import org.scalajs.dom.raw.HTMLInputElement import scala.concurrent.ExecutionContext.Implicits.global import scala.util.Success object Calculator { case class State(lhs: Int, rhs: Int, added: Option[Int], subtracted: Option[Int]) type Self = React.Self[Unit, State] } class Calculator(calculatorAPI: CalculatorAPI) { import Calculator._ def apply() = reactClass private lazy val reactClass = React.createClass[Unit, State]( getInitialState = (self) => Calculator.State(0, 0, None, None), render = (self) => <.div()( <.h2()("Calculator"), <.form(^.onSubmit := onSubmit(self))( <.input( ^.id := ElementIds.CalculatorLhs, ^.onChange := onLhsChange(self), ^.value := self.state.lhs )(), <.input( ^.id := ElementIds.CalculatorRhs, ^.onChange := onRhsChange(self), ^.value := self.state.rhs )(), <.button( ^.id := ElementIds.CalculatorCalculate, ^.`type` := "submit" )("Calculate") ), <.div(^.id := ElementIds.CalculatorAdded)( s"${self.state.lhs} + ${self.state.rhs} = ${self.state.added.getOrElse("?")}" ), <.div(^.id := ElementIds.CalculatorSubtracted)( s"${self.state.lhs} - ${self.state.rhs} = ${self.state.subtracted.getOrElse("?")}" ) ).asReactElement ) private def onLhsChange(self: Self) = (event: FormSyntheticEvent[HTMLInputElement]) => { val value = event.target.value self.setState(_.copy( lhs = value.toInt, added = None, subtracted = None )) } private def onRhsChange(self: Self) = (event: FormSyntheticEvent[HTMLInputElement]) => { val value = event.target.value self.setState(_.copy( rhs = value.toInt, added = None, subtracted = None )) } private def onSubmit(self: Self) = (event: SyntheticEvent) => { event.preventDefault() val lhs = self.state.lhs val rhs = self.state.rhs calculatorAPI.add(lhs, rhs).onComplete { case Success(added) if lhs == self.state.lhs && rhs == self.state.rhs => { self.setState(_.copy(added = Some(added))) } case _ => } calculatorAPI.subtract(lhs, rhs).onComplete { case Success(subtracted) if lhs == self.state.lhs && rhs == self.state.rhs => { self.setState(_.copy(subtracted = Some(subtracted))) } case _ => } } }
Example 46
Source File: Echo.scala From scala-json-rpc with MIT License | 5 votes |
package io.github.shogowada.scala.jsonrpc.example.e2e import io.github.shogowada.scalajs.reactjs.React import io.github.shogowada.scalajs.reactjs.VirtualDOM._ import io.github.shogowada.scalajs.reactjs.events.FormSyntheticEvent import org.scalajs.dom.raw.HTMLInputElement import scala.concurrent.ExecutionContext.Implicits.global import scala.util.Success object Echo { case class State(text: String, echoedText: Option[String]) type Self = React.Self[Unit, State] } class Echo(echoAPI: EchoAPI) { import Echo._ def apply() = reactClass private lazy val reactClass = React.createClass[Unit, State]( getInitialState = (self) => State(text = "", echoedText = Some("")), render = (self) => <.div()( <.h2()("Echo"), <.label(^.`for` := ElementIds.EchoText)("I say:"), <.input( ^.id := ElementIds.EchoText, ^.value := self.state.text, ^.onChange := onChange(self) )(), <.label(^.`for` := ElementIds.EchoEchoedText)("Server says:"), <.span(^.id := ElementIds.EchoEchoedText)(self.state.echoedText.getOrElse("")) ).asReactElement ) private def onChange(self: Self) = (event: FormSyntheticEvent[HTMLInputElement]) => { val text = event.target.value self.setState(_.copy( text = text, echoedText = None )) echoAPI.echo(text).onComplete { case Success(echoedText) if self.state.text == text => self.setState(_.copy(echoedText = Some(echoedText))) case _ => } } }
Example 47
Source File: RandomDataGenerator.scala From random-data-generator with Apache License 2.0 | 5 votes |
package com.danielasfregola.randomdatagenerator import com.danielasfregola.randomdatagenerator.utils.{SeedDetector, ShapelessLike} import org.scalacheck._ import scala.reflect.runtime.universe._ import scala.util.{Success, Try} object RandomDataGenerator extends RandomDataGenerator trait RandomDataGenerator extends ShapelessLike { protected[randomdatagenerator] val seed = SeedDetector.seed def random[T: WeakTypeTag: Arbitrary]: T = random(1).head def random[T: WeakTypeTag: Arbitrary](n: Int): Seq[T] = { val gen = Gen.infiniteStream(implicitly[Arbitrary[T]].arbitrary) Try(gen.apply(Gen.Parameters.default, seed)) match { case Success(Some(v)) => v.take(n) case _ => explode[T] } } private def explode[T: WeakTypeTag]() = { val tpe = implicitly[WeakTypeTag[T]].tpe val msg = s"""Could not generate a random value for $tpe. |Please, make use that the Arbitrary instance for type $tpe is not too restrictive""".stripMargin throw new RandomDataException(msg) } }
Example 48
Source File: SeedDetector.scala From random-data-generator with Apache License 2.0 | 5 votes |
package com.danielasfregola.randomdatagenerator.utils import org.scalacheck.rng.Seed import scala.util.{Failure, Success, Try} private[randomdatagenerator] object SeedDetector extends SeedDetector private[randomdatagenerator] trait SeedDetector { protected lazy val logger = new PrettyPrinter() lazy val seed: Seed = createSeedObj(seedValue) private def createSeedObj(seedValue: Long): Seed = { logger.info(s"Generating random data using seed $seedValue") logger.info(s"Replicate this session by setting ${SeedVariable.name}=$seedValue") Seed(seedValue) } private lazy val seedValue: Long = optLongVariable match { case Some(preSelectedSeed) => logger.info(s"Variable ${SeedVariable.name} detected: setting $preSelectedSeed as seed") preSelectedSeed case None => logger.info(s"No variable ${SeedVariable.name} detected: setting random seed") randomLong } private lazy val optLongVariable: Option[Long] = envVariable.map { value => Try(value.toLong) match { case Success(l) => l case Failure(ex) => throw new RuntimeException(s"Please, provide a numeric value for ${SeedVariable.name}", ex) } } protected lazy val envVariable: Option[String] = SeedVariable.value protected def randomLong = scala.util.Random.nextLong }
Example 49
Source File: RandomDataGenerator.scala From random-data-generator with Apache License 2.0 | 5 votes |
package com.danielasfregola.randomdatagenerator import com.danielasfregola.randomdatagenerator.utils.{SeedDetector, ShapelessLike} import org.scalacheck._ import scala.reflect.ClassTag import scala.util.{Success, Try} object RandomDataGenerator extends RandomDataGenerator trait RandomDataGenerator extends ShapelessLike { protected[randomdatagenerator] val seed = SeedDetector.seed def random[T: Arbitrary: ClassTag]: T = { val arbitrary = implicitly[Arbitrary[T]] val ct = implicitly[ClassTag[T]] random(1)(arbitrary, ct).head } def random[T: Arbitrary: ClassTag](n: Int): Seq[T] = { val gen = Gen.infiniteStream(implicitly[Arbitrary[T]].arbitrary) Try(gen.apply(Gen.Parameters.default, seed)) match { case Success(Some(v)) => v.take(n) case _ => explode[T] } } private def explode[T: ClassTag]() = { val classTag = implicitly[ClassTag[T]] val msg = s"""Could not generate a random value for $classTag. |Please, make use that the Arbitrary instance for type $classTag is not too restrictive""".stripMargin throw new RandomDataException(msg) } }
Example 50
Source File: Tests.scala From lolhttp with Apache License 2.0 | 5 votes |
package lol.http import cats.effect.IO import org.scalatest._ import scala.util.{Try, Success, Failure} import scala.concurrent.{Await, ExecutionContext} import scala.concurrent.duration._ abstract class Tests extends FunSuite with Matchers with OptionValues with Inside with Inspectors { val Pure = Tag("Pure") val Slow = Tag("Slow") def await[A](atMost: FiniteDuration = 30.seconds)(a: IO[A]): A = Await.result(a.unsafeToFuture, atMost) def withServer(server: Server)(test: Server => Unit) = try { test(server) } finally { server.stop() } def status(req: Request, atMost: FiniteDuration = 30.seconds, followRedirects: Boolean = true, protocol: String = HTTP)(implicit e: ExecutionContext, ssl: SSL.ClientConfiguration): Int = { await(atMost) { Client.run(req, followRedirects = followRedirects, timeout = atMost, protocol = protocol)(res => IO.pure(res.status)) } } def contentString(req: Request, atMost: FiniteDuration = 30.seconds, followRedirects: Boolean = true, protocol: String = HTTP)(implicit e: ExecutionContext, ssl: SSL.ClientConfiguration): String = { await(atMost) { Client.run(req, followRedirects = followRedirects, timeout = atMost, protocol = protocol)(_.readAs[String]) } } def headers(req: Request, atMost: FiniteDuration = 30.seconds, protocol: String = HTTP)(implicit e: ExecutionContext, ssl: SSL.ClientConfiguration): Map[HttpString,HttpString] = { await(atMost) { Client.run(req, timeout = atMost, protocol = protocol)(res => IO.pure(res.headers)) } } def header(req: Request, header: HttpString, atMost: FiniteDuration = 30.seconds, protocol: String = HTTP)(implicit e: ExecutionContext, ssl: SSL.ClientConfiguration): Option[HttpString] = { await(atMost) { Client.run(req, timeout = atMost, protocol = protocol)(res => IO.pure(res.headers.get(header))) } } def getString(content: Content, codec: String = "utf-8") = new String(getBytes(content).toArray, codec) def getBytes(content: Content): Vector[Byte] = content.stream.compile.toVector.unsafeRunSync() def bytes(data: Int*): Seq[Byte] = data.map(_.toByte) def eventually[A](assertion: => A, timeout: FiniteDuration = 5.seconds): A = { val start = System.currentTimeMillis def go(): A = Try(assertion) match { case Success(a) => a case Failure(e) => if(System.currentTimeMillis - start < timeout.toMillis) go() else throw e } go() } }
Example 51
Source File: Server.scala From opencensus-scala with Apache License 2.0 | 5 votes |
package io.opencensus.scala.examples.akka.http import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server.Route import com.typesafe.scalalogging.LazyLogging import io.opencensus.scala.akka.http.TracingDirective._ import io.opencensus.trace.AttributeValue import org.slf4j.bridge.SLF4JBridgeHandler import scala.util.{Failure, Success} object Server extends App with LazyLogging { // Forward java.util.Logging to slf4j SLF4JBridgeHandler.removeHandlersForRootLogger() SLF4JBridgeHandler.install() implicit val system: ActorSystem = ActorSystem() import system.dispatcher val routes: Route = traceRequest { span => complete { val attrValue = AttributeValue.stringAttributeValue("test") span.putAttribute("my-attribute", attrValue) "Hello opencensus" } } logger.info("Binding...") Http().bindAndHandle(routes, "0.0.0.0", 8080).onComplete { case Success(bound) => logger.info(s"Bound to ${bound.localAddress}") case Failure(e) => logger.error("Failed to bind", e) } }
Example 52
Source File: Client.scala From opencensus-scala with Apache License 2.0 | 5 votes |
package io.opencensus.scala.examples.akka.http import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.model.HttpRequest import akka.stream.scaladsl.{Sink, Source} import io.opencensus.scala.akka.http.TracingClient import org.slf4j.bridge.SLF4JBridgeHandler import scala.concurrent.duration._ import scala.concurrent.{Await, Future} import scala.util.{Failure, Success} object Client extends App { // Forward java.util.Logging to slf4j SLF4JBridgeHandler.removeHandlersForRootLogger() SLF4JBridgeHandler.install() implicit val system: ActorSystem = ActorSystem() import system.dispatcher def await[T](f: Future[T]) = Await.result(f, 3.seconds) // Request level client val pipeling = Http().singleRequest(_: HttpRequest) val r1 = await { TracingClient .traceRequest(pipeling)(HttpRequest(uri = "http://localhost:8080")) .flatMap(_.entity.toStrict(1.second)) .map(_.data.utf8String) } println(r1) // Host level client val pool = Http().cachedHostConnectionPool[Unit]("localhost", 8080) val hostFlow = TracingClient.traceRequestForPool(pool) val r2 = await { Source .single(HttpRequest(uri = "/")) .map((_, ())) .via(hostFlow) .map(_._1) .flatMapConcat { case Success(response) => response.entity.dataBytes case Failure(e) => throw e } .map(_.utf8String) .runWith(Sink.head) } println(r2) // Connection level client val connection = Http().outgoingConnection("localhost", 8080) val connectionFlow = TracingClient.traceRequestForConnection(connection) val r3 = await { Source .single(HttpRequest(uri = "/")) .via(connectionFlow) .flatMapConcat(_.entity.dataBytes) .map(_.utf8String) .runWith(Sink.head) } println(r3) }
Example 53
Source File: MockPropagation.scala From opencensus-scala with Apache License 2.0 | 5 votes |
package io.opencensus.scala.http.testSuite import io.opencensus.scala.http.propagation.Propagation import io.opencensus.trace._ import scala.collection.immutable import scala.util.{Failure, Success, Try} trait MockPropagation[Header, Request] extends Propagation[Header, Request] { def rawHeader(key: String, value: String): Header def path(request: Request): String val requestPathWithoutParent = "/no/parent/context" val fakeTraceId = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" val fakeSpanId = "bbbbbbbbbbbbbbbb" val sampledSpanContext = SpanContext.create( TraceId.fromLowerBase16(fakeTraceId), SpanId.fromLowerBase16(fakeSpanId), TraceOptions.builder().setIsSampled(true).build(), Tracestate.builder.build() ) override def headersWithTracingContext(span: Span): immutable.Seq[Header] = List(rawHeader("X-Mock-Trace", "12345")) override def extractContext(request: Request): Try[SpanContext] = if (path(request) == requestPathWithoutParent) Failure(new Exception("test error")) else Success(sampledSpanContext) }
Example 54
Source File: MenuIO.scala From hacktoberfest-scala-algorithms with GNU General Public License v3.0 | 5 votes |
package io.github.sentenza.hacktoberfest import java.lang.System.out.println import java.lang.reflect.Method import java.util.concurrent.atomic.AtomicInteger import io.github.sentenza.hacktoberfest.algos.{ImmutableSorting, MutableSorting, Sorting} import scala.annotation.tailrec import scala.util.{Success, Try} def printDisclaimer() { println(heading + gplDisclaimer) } private val noOp = () => () def readNumberInputs = scala.io.StdIn.readLine().split(",").map(_.toInt) case class MenuEntry(selector: Int, display: String, code: () => Unit) private val entries = List( MenuEntry(1, "Sorting algorithms", () => { println("You chose sorting\n") renderInteractiveMenu(List( MenuEntry(2, "MutableSorting", () => { println("You chose mutable sorting.") renderInteractiveMenu(createMethodMenuEntries(MutableSorting)) }), MenuEntry(1, "ImmutableSorting", () => { println("You chose immutable sorting.") renderInteractiveMenu(createMethodMenuEntries(ImmutableSorting)) }), MenuEntry(0, "Quit sorting", () => noOp) )) }), MenuEntry(0, "Quit the program",() => System.exit(0)) ) private def createMethodMenuEntries(sorting: Sorting[_,_]) = { val count = new AtomicInteger() retrieveMethodNames(sorting) .map(mName => MenuEntry(count.incrementAndGet(), mName, () => executeSortMethod(sorting, mName)) ).toList } private def retrieveMethodNames(sorting:Sorting[_,_]) = sorting.getClass.getMethods.map(_.getName).filter(_.endsWith("Sort")).distinct private def executeSortMethod(sorting: Sorting[_,_], method: String) = { println("You've chosen " + method + "! Please enter a list of comma separated integers.") val numberInputs = readNumberInputs println(s"You entered:${numberInputs.mkString(",")}. They are going to be sorted by $method.\n Sorting...") val sorted = execute(sorting, method, numberInputs) println(s"Your number entries sorted are: ${sorted.mkString(",")}") } private def execute[F[_],T](sorting: Sorting[_,_], method: String, numberInputs: F[_]) = { findMethod(sorting, method) match { case Some(m:Method) => m.invoke(sorting, numberInputs).asInstanceOf[F[_]] case None => throw new RuntimeException(s"Method $method not found in $sorting") } } private def findMethod(sorting: Sorting[_,_], method: String) = sorting.getClass.getMethods.find(m => m.getName.compare(method) == 0) @tailrec def renderInteractiveMenu(entries:List[MenuEntry]=entries): Unit = { println("Please choose:") entries.foreach { case MenuEntry(num, label, _) => println(s"$num: $label") } Try(scala.io.StdIn.readInt()) match { case Success(0) => () case Success(choice) if entries.exists(_.selector == choice) => entries.find(_.selector == choice).foreach{ case MenuEntry(_, _, code) => code() } renderInteractiveMenu() case _ => println("Invalid selection\n") renderInteractiveMenu() } } }
Example 55
Source File: NativeFunctionRegistration.scala From spark-alchemy with Apache License 2.0 | 5 votes |
package com.swoop.alchemy.spark.expressions import org.apache.spark.sql.EncapsulationViolator.createAnalysisException import org.apache.spark.sql.SparkSession import org.apache.spark.sql.catalyst.FunctionIdentifier import org.apache.spark.sql.catalyst.analysis.FunctionRegistry import org.apache.spark.sql.catalyst.expressions.{Expression, ExpressionDescription, ExpressionInfo, RuntimeReplaceable} import scala.reflect.ClassTag import scala.util.{Failure, Success, Try} // based on Spark's FunctionRegistry @ossSpark trait NativeFunctionRegistration extends FunctionRegistration { type FunctionBuilder = Seq[Expression] => Expression def expressions: Map[String, (ExpressionInfo, FunctionBuilder)] def registerFunctions(fr: FunctionRegistry): Unit = { expressions.foreach { case (name, (info, builder)) => fr.registerFunction(FunctionIdentifier(name), info, builder) } } def registerFunctions(spark: SparkSession): Unit = { registerFunctions(spark.sessionState.functionRegistry) } protected def expressionInfo[T <: Expression : ClassTag](name: String): ExpressionInfo = { val clazz = scala.reflect.classTag[T].runtimeClass val df = clazz.getAnnotation(classOf[ExpressionDescription]) if (df != null) { new ExpressionInfo(clazz.getCanonicalName, null, name, df.usage(), df.extended()) } else { new ExpressionInfo(clazz.getCanonicalName, name) } } }
Example 56
Source File: MacroUtils.scala From refined with MIT License | 5 votes |
package eu.timepit.refined.macros import eu.timepit.refined.api.{Refined, RefType} import scala.reflect.macros.blackbox import scala.util.{Success, Try} import shapeless.tag.@@ trait MacroUtils { val c: blackbox.Context import c.universe.weakTypeOf def abort(msg: String): Nothing = c.abort(c.enclosingPosition, msg) def eval[T](t: c.Expr[T]): T = { // Duplicate and untypecheck before calling `eval`, see: // http://www.scala-lang.org/api/2.12.0/scala-reflect/scala/reflect/macros/Evals.html#eval[T]%28expr:Evals.this.Expr[T]%29:T val expr = c.Expr[T](c.untypecheck(t.tree.duplicate)) // Try evaluating expr twice before failing, see // https://github.com/fthomas/refined/issues/3 tryN(2, c.eval(expr)) } def tryN[T](n: Int, t: => T): T = Stream.fill(n)(Try(t)).collectFirst { case Success(r) => r }.getOrElse(t) protected def refTypeInstance[F[_, _]](rt: c.Expr[RefType[F]]): RefType[F] = if (rt.tree.tpe =:= weakTypeOf[RefType[Refined]]) RefType.refinedRefType.asInstanceOf[RefType[F]] else if (rt.tree.tpe =:= weakTypeOf[RefType[@@]]) RefType.tagRefType.asInstanceOf[RefType[F]] else eval(rt) }
Example 57
Source File: RefinedTypeOpsSpec.scala From refined with MIT License | 5 votes |
package eu.timepit.refined.api import eu.timepit.refined.types.numeric.NonNegInt import org.scalacheck.Prop._ import org.scalacheck.Properties import scala.util.{Failure, Success, Try} class RefinedTypeOpsSpec extends Properties("RefinedTypeOps") { property("from ~= unapply") = forAll { i: Int => NonNegInt.from(i).right.toOption ?= NonNegInt.unapply(i) } property("from ~= unsafeFrom") = forAll { i: Int => val stringOrNonNegInt = Try(NonNegInt.unsafeFrom(i)) match { case Success(n) => Right(n) case Failure(t) => Left(t.getMessage) } NonNegInt.from(i) ?= stringOrNonNegInt } }
Example 58
Source File: RefTypeMonadErrorSpec.scala From refined with MIT License | 5 votes |
package eu.timepit.refined.cats import _root_.cats.MonadError import eu.timepit.refined.types.numeric.PosInt import org.scalacheck.Prop._ import org.scalacheck.Properties import scala.annotation.tailrec import scala.util.{Failure, Success, Try} trait Decoder[A] { def decode(s: String): Either[String, A] } object Decoder { def apply[A](implicit d: Decoder[A]): Decoder[A] = d def instance[A](f: String => Either[String, A]): Decoder[A] = new Decoder[A] { override def decode(s: String): Either[String, A] = f(s) } implicit val decoderMonadError: MonadError[Decoder, String] = new MonadError[Decoder, String] { override def flatMap[A, B](fa: Decoder[A])(f: A => Decoder[B]): Decoder[B] = instance { s => fa.decode(s) match { case Right(a) => f(a).decode(s) case Left(err) => Left(err) } } override def tailRecM[A, B](a: A)(f: A => Decoder[Either[A, B]]): Decoder[B] = { @tailrec def step(s: String, a1: A): Either[String, B] = f(a1).decode(s) match { case Right(Right(b)) => Right(b) case Right(Left(a2)) => step(s, a2) case Left(err) => Left(err) } instance(s => step(s, a)) } override def raiseError[A](e: String): Decoder[A] = instance(_ => Left(e)) override def handleErrorWith[A](fa: Decoder[A])(f: String => Decoder[A]): Decoder[A] = instance { s => fa.decode(s) match { case Right(a) => Right(a) case Left(err) => f(err).decode(s) } } override def pure[A](x: A): Decoder[A] = instance(_ => Right(x)) } implicit val intDecoder: Decoder[Int] = instance(s => Try(s.toInt) match { case Success(i) => Right(i) case Failure(t) => Left(t.getMessage) } ) } class RefTypeMonadErrorSpec extends Properties("MonadError") { property("Decoder[Int]") = secure { Decoder[Int].decode("1") ?= Right(1) } property("derive Decoder[PosInt] via MonadError[Decoder, String]") = { // This import is needed because of https://github.com/scala/bug/issues/10753 import Decoder.decoderMonadError import eu.timepit.refined.cats.derivation._ val decoder = Decoder[PosInt] (decoder.decode("1") ?= Right(PosInt.unsafeFrom(1))) && (decoder.decode("-1") ?= Left("Predicate failed: (-1 > 0).")) } }
Example 59
Source File: CounterEtlItem.scala From incubator-s2graph with Apache License 2.0 | 5 votes |
package org.apache.s2graph.counter.loader.core import org.apache.s2graph.counter.util.UnitConverter import org.slf4j.LoggerFactory import play.api.libs.json._ import scala.util.{Failure, Success, Try} case class CounterEtlItem(ts: Long, service: String, action: String, item: String, dimension: JsValue, property: JsValue, useProfile: Boolean = false) { def toKafkaMessage: String = { s"$ts\t$service\t$action\t$item\t${dimension.toString()}\t${property.toString()}" } lazy val value = { (property \ "value").toOption match { case Some(JsNumber(n)) => n.longValue() case Some(JsString(s)) => s.toLong case None => 1L case _ => throw new Exception("wrong type") } } } object CounterEtlItem { val log = LoggerFactory.getLogger(this.getClass) def apply(line: String): Option[CounterEtlItem] = { Try { val Array(ts, service, action, item, dimension, property) = line.split('\t') CounterEtlItem(UnitConverter.toMillis(ts.toLong), service, action, item, Json.parse(dimension), Json.parse(property)) } match { case Success(item) => Some(item) case Failure(ex) => log.error(">>> failed") log.error(s"${ex.toString}: $line") None } } }
Example 60
Source File: Retry.scala From incubator-s2graph with Apache License 2.0 | 5 votes |
package org.apache.s2graph.counter.util import scala.annotation.tailrec import scala.concurrent.{ExecutionContext, Future, Promise} import scala.util.{Failure, Success, Try} object Retry { @tailrec def apply[T](n: Int, withSleep: Boolean = true, tryCount: Int = 0)(fn: => T): T = { Try { fn } match { case Success(x) => x case Failure(e) if e.isInstanceOf[RetryStopException] => throw e.getCause case _ if n > 1 => // backoff if (withSleep) Thread.sleep(tryCount * 1000) apply(n - 1, withSleep, tryCount + 1)(fn) case Failure(e) => throw e } } } object RetryAsync { def apply[T](n: Int, withSleep: Boolean = true, tryCount: Int = 0)(fn: => Future[T])(implicit ex: ExecutionContext): Future[T] = { val promise = Promise[T]() fn onComplete { case Success(x) => promise.success(x) case Failure(e) if e.isInstanceOf[RetryStopException] => promise.failure(e.getCause) case _ if n > 1 => // backoff if (withSleep) Thread.sleep(tryCount * 1000) apply(n - 1, withSleep, tryCount + 1)(fn) case Failure(e) => promise.failure(e) } promise.future } } class RetryStopException(message: String, cause: Throwable) extends Exception(message, cause) { def this(message: String) = this(message, null) def this(cause: Throwable) = this(cause.toString, cause) }
Example 61
Source File: Server.scala From incubator-s2graph with Apache License 2.0 | 5 votes |
package org.apache.s2graph.http import java.time.Instant import scala.language.postfixOps import scala.concurrent.{Await, ExecutionContext, Future} import scala.concurrent.duration.Duration import scala.util.{Failure, Success} import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.model.{ContentTypes, HttpEntity, HttpResponse, StatusCodes} import akka.http.scaladsl.server.Route import akka.http.scaladsl.server.Directives._ import akka.stream.ActorMaterializer import com.typesafe.config.ConfigFactory import org.apache.s2graph.core.S2Graph import org.slf4j.LoggerFactory object Server extends App with S2GraphTraversalRoute with S2GraphAdminRoute with S2GraphMutateRoute with S2GraphQLRoute { implicit val system: ActorSystem = ActorSystem("S2GraphHttpServer") implicit val materializer: ActorMaterializer = ActorMaterializer() implicit val executionContext: ExecutionContext = system.dispatcher val config = ConfigFactory.load() override val s2graph = new S2Graph(config) override val logger = LoggerFactory.getLogger(this.getClass) val port = sys.props.get("http.port").fold(8000)(_.toInt) val interface = sys.props.get("http.interface").fold("0.0.0.0")(identity) val startAt = System.currentTimeMillis() def uptime = System.currentTimeMillis() - startAt def serverHealth = s"""{ "port": ${port}, "interface": "${interface}", "started_at": ${Instant.ofEpochMilli(startAt)}, "uptime": "${uptime} millis" """ def health = HttpResponse(status = StatusCodes.OK, entity = HttpEntity(ContentTypes.`application/json`, serverHealth)) // Allows you to determine routes to expose according to external settings. lazy val routes: Route = concat( pathPrefix("graphs")(traversalRoute), pathPrefix("mutate")(mutateRoute), pathPrefix("admin")(adminRoute), pathPrefix("graphql")(graphqlRoute), get(complete(health)) ) val binding: Future[Http.ServerBinding] = Http().bindAndHandle(routes, interface, port) binding.onComplete { case Success(bound) => logger.info(s"Server online at http://${bound.localAddress.getHostString}:${bound.localAddress.getPort}/") case Failure(e) => logger.error(s"Server could not start!", e) } scala.sys.addShutdownHook { () => s2graph.shutdown() system.terminate() logger.info("System terminated") } Await.result(system.whenTerminated, Duration.Inf) }
Example 62
Source File: WsConnection.scala From matcher with MIT License | 5 votes |
package com.wavesplatform.dex.api.ws.connection import java.util.concurrent.ConcurrentLinkedQueue import akka.Done import akka.actor.{ActorRef, ActorSystem, Status} import akka.http.scaladsl.Http import akka.http.scaladsl.model.ws.{BinaryMessage, Message, TextMessage, WebSocketRequest} import akka.stream.scaladsl.{Flow, Sink, Source} import akka.stream.{CompletionStrategy, Materializer, OverflowStrategy} import com.wavesplatform.dex.api.ws.protocol.{WsClientMessage, WsMessage, WsPingOrPong, WsServerMessage} import com.wavesplatform.dex.domain.utils.ScorexLogging import play.api.libs.json.Json import scala.collection.JavaConverters._ import scala.concurrent.Future import scala.concurrent.duration._ import scala.util.{Failure, Success, Try} class WsConnection(uri: String, keepAlive: Boolean = true)(implicit system: ActorSystem, materializer: Materializer) extends ScorexLogging { log.info(s"""Connecting to Matcher WS API: | URI = $uri | Keep alive = $keepAlive""".stripMargin) import materializer.executionContext private val wsHandlerRef = system.actorOf(TestWsHandlerActor props keepAlive) protected def stringifyClientMessage(cm: WsClientMessage): TextMessage.Strict = WsMessage.toStrictTextMessage(cm)(WsClientMessage.wsClientMessageWrites) // From test to server private val source: Source[TextMessage.Strict, ActorRef] = { val completionMatcher: PartialFunction[Any, CompletionStrategy] = { case akka.actor.Status.Success(_) => CompletionStrategy.draining } val failureMatcher: PartialFunction[Any, Throwable] = { case Status.Failure(cause) => cause } Source .actorRef[WsClientMessage](completionMatcher, failureMatcher, 10, OverflowStrategy.fail) .map(stringifyClientMessage) .mapMaterializedValue { source => wsHandlerRef.tell(TestWsHandlerActor.AssignSourceRef, source) source } } private val messagesBuffer: ConcurrentLinkedQueue[WsServerMessage] = new ConcurrentLinkedQueue[WsServerMessage]() // From server to test private val sink: Sink[Message, Future[Done]] = Sink.foreach { case tm: TextMessage => for { strictText <- tm.toStrict(1.second).map(_.getStrictText) clientMessage <- { log.trace(s"Got $strictText") Try { Json.parse(strictText).as[WsServerMessage] } match { case Failure(exception) => Future.failed(exception) case Success(x) => { messagesBuffer.add(x) if (keepAlive) x match { case value: WsPingOrPong => wsHandlerRef ! value case _ => } Future.successful(x) } } } } yield clientMessage case bm: BinaryMessage => bm.dataStream.runWith(Sink.ignore) Future.failed { new IllegalArgumentException("Binary messages are not supported") } } private val flow: Flow[Message, TextMessage.Strict, Future[Done]] = Flow.fromSinkAndSourceCoupled(sink, source).watchTermination() { case (_, f) => f.onComplete { case Success(_) => log.info(s"WebSocket connection to $uri successfully closed") case Failure(e) => log.error(s"WebSocket connection to $uri closed with an error", e) }(materializer.executionContext) f } val (connectionResponse, closed) = Http().singleWebSocketRequest(WebSocketRequest(uri), flow) val connectionOpenedTs: Long = System.currentTimeMillis val connectionClosedTs: Future[Long] = closed.map(_ => System.currentTimeMillis) val connectionLifetime: Future[FiniteDuration] = connectionClosedTs.map(cc => FiniteDuration(cc - connectionOpenedTs, MILLISECONDS)) def messages: List[WsServerMessage] = messagesBuffer.iterator().asScala.toList def clearMessages(): Unit = messagesBuffer.clear() def send(message: WsClientMessage): Unit = wsHandlerRef ! TestWsHandlerActor.SendToServer(message) def close(): Unit = if (!isClosed) wsHandlerRef ! TestWsHandlerActor.CloseConnection def isClosed: Boolean = closed.isCompleted }
Example 63
Source File: BlockchainCache.scala From matcher with MIT License | 5 votes |
package com.wavesplatform.dex.grpc.integration.caches import java.time.Duration import com.google.common.cache.{CacheBuilder, CacheLoader, LoadingCache} import com.wavesplatform.dex.domain.utils.ScorexLogging import scala.concurrent.{ExecutionContext, Future} import scala.util.{Failure, Success} abstract class BlockchainCache[K <: AnyRef, V <: AnyRef](loader: K => Future[V], expiration: Option[Duration], invalidationPredicate: V => Boolean)( implicit ec: ExecutionContext) extends ScorexLogging { lazy private val cache: LoadingCache[K, Future[V]] = { val builder = CacheBuilder.newBuilder expiration .fold(builder)(builder.expireAfterWrite) .build { new CacheLoader[K, Future[V]] { override def load(key: K): Future[V] = loader(key) andThen { case Success(value) if invalidationPredicate(value) => cache.invalidate(key) // value may persist for a little longer than expected due to the fact that all the threads in the EC may be busy case Failure(exception) => log.error(s"Error while value loading occurred: ", exception); cache.invalidate(key) } } } } def get(key: K): Future[V] = cache.get(key) def put(key: K, value: Future[V]): Unit = cache.put(key, value) } object BlockchainCache { def noCustomInvalidationLogic[V](value: V): Boolean = false }
Example 64
Source File: package.scala From matcher with MIT License | 5 votes |
package com.wavesplatform.dex.domain import scala.util.{Failure, Success, Try} package object utils { private val BytesMaxValue = 256 private val Base58MaxValue = 58 private val BytesLog = math.log(BytesMaxValue) private val BaseLog = math.log(Base58MaxValue) def base58Length(byteArrayLength: Int): Int = math.ceil(BytesLog / BaseLog * byteArrayLength).toInt implicit class EitherExt2[A, B](ei: Either[A, B]) { def explicitGet(): B = ei match { case Left(value) => throw makeException(value) case Right(value) => value } def foldToTry: Try[B] = ei.fold( left => Failure(makeException(left)), right => Success(right) ) @inline private[this] def makeException(value: Any): Throwable = value match { case err: Throwable => err case _ => new RuntimeException(value.toString) } } }
Example 65
Source File: KeyPair.scala From matcher with MIT License | 5 votes |
package com.wavesplatform.dex.domain.account import com.wavesplatform.dex.domain.bytes.ByteStr import com.wavesplatform.dex.domain.bytes.ByteStr._ import com.wavesplatform.dex.domain.bytes.codec.Base58 import com.wavesplatform.dex.domain.crypto import com.wavesplatform.dex.domain.error.ValidationError.GenericError import play.api.libs.json.{Format, Json, Writes} import scala.util.{Failure, Success} final case class KeyPair(seed: ByteStr) { lazy val (PrivateKey(privateKey), PublicKey(publicKey)) = crypto.createKeyPair(seed) } object KeyPair { def fromSeed(base58: String): Either[GenericError, KeyPair] = Base58.tryDecodeWithLimit(base58) match { case Success(x) => Right(KeyPair(x)) case Failure(e) => Left(GenericError(s"Unable to get a private key from the seed '$base58': ${e.getMessage}")) } implicit class KeyPairImplicitOps(private val kp: KeyPair) extends AnyVal { def toAddress: Address = PublicKey.toAddress(kp) } implicit def toPublicKey(kp: KeyPair): PublicKey = kp.publicKey implicit def toPrivateKey(kp: KeyPair): PrivateKey = kp.privateKey implicit def toAddress(keyPair: KeyPair): Address = keyPair.toAddress implicit val jsonFormat: Format[KeyPair] = Format( byteStrFormat.map(KeyPair(_)), Writes(v => Json.obj("seed" -> Base58.encode(v.seed), "publicKey" -> v.publicKey, "privateKey" -> v.privateKey)) ) }
Example 66
Source File: AssetPair.scala From matcher with MIT License | 5 votes |
package com.wavesplatform.dex.domain.asset import com.wavesplatform.dex.domain.asset.Asset.{IssuedAsset, Waves} import com.wavesplatform.dex.domain.bytes.{ByteStr, deser} import com.wavesplatform.dex.domain.validation.Validation import com.wavesplatform.dex.domain.validation.Validation.booleanOperators import io.swagger.annotations.{ApiModel, ApiModelProperty} import net.ceedubs.ficus.readers.ValueReader import play.api.libs.functional.syntax._ import play.api.libs.json._ import scala.util.{Failure, Success, Try} @ApiModel( description = """A pair of assets sorted by two rules: 1. A price asset is chosen by a priority from priceAssets of /matcher/settings; 2. If both assets are not present among priceAssets, they are sorted lexicographically: price asset bytes < amount asset bytes""") case class AssetPair(@ApiModelProperty( value = "Base58 encoded amount asset ID. Waves is used if field isn't specified", dataType = "string", example = "8LQW8f7P5d5PZM7GtZEBgaqRPGSzS3DfPuiXrURJ4AJS", ) amountAsset: Asset, @ApiModelProperty( value = "Base58 encoded price asset ID. Waves is used if field isn't specified", dataType = "string", example = "DG2xFkPdDwKUoBkzGAhQtLpSGzfXLiCYPEzeKH2Ad24p", ) priceAsset: Asset) { @ApiModelProperty(hidden = true) lazy val priceAssetStr: String = priceAsset.toString @ApiModelProperty(hidden = true) lazy val amountAssetStr: String = amountAsset.toString def key: String = amountAssetStr + "-" + priceAssetStr override def toString: String = key def isValid: Validation = (amountAsset != priceAsset) :| "Invalid AssetPair" def bytes: Array[Byte] = amountAsset.byteRepr ++ priceAsset.byteRepr def reverse: AssetPair = AssetPair(priceAsset, amountAsset) def assets: Set[Asset] = Set(amountAsset, priceAsset) } object AssetPair { def extractAsset(a: String): Try[Asset] = a match { case Asset.WavesName => Success(Waves) case other => ByteStr.decodeBase58(other).map(IssuedAsset) } def extractAssetPair(s: String): Try[AssetPair] = s.split('-') match { case Array(amtAssetStr, prcAssetStr) => AssetPair.createAssetPair(amtAssetStr, prcAssetStr).recoverWith { case e => Failure(new Exception(s"$s (${e.getMessage})", e)) } case xs => Failure(new Exception(s"$s (incorrect assets count, expected 2 but got ${xs.length})")) } def createAssetPair(amountAsset: String, priceAsset: String): Try[AssetPair] = for { a1 <- extractAsset(amountAsset) a2 <- extractAsset(priceAsset) } yield AssetPair(a1, a2) def fromBytes(xs: Array[Byte]): AssetPair = { val (amount, offset) = deser.parseByteArrayOption(xs, 0, Asset.AssetIdLength) val (price, _) = deser.parseByteArrayOption(xs, offset, Asset.AssetIdLength) AssetPair( Asset.fromCompatId(amount.map(ByteStr(_))), Asset.fromCompatId(price.map(ByteStr(_))) ) } implicit val assetPairReader: ValueReader[AssetPair] = { (cfg, path) => val source = cfg.getString(path) extractAssetPair(source).fold(e => throw e, identity) } implicit val assetPairFormat: OFormat[AssetPair] = ( (JsPath \ "amountAsset").formatWithDefault[Asset](Waves) and (JsPath \ "priceAsset").formatWithDefault[Asset](Waves) )(AssetPair.apply, Function.unlift(AssetPair.unapply)) val assetPairKeyAsStringFormat: Format[AssetPair] = Format( fjs = Reads { case JsString(x) => AssetPair.extractAssetPair(x).fold(e => JsError(e.getMessage), JsSuccess(_)) case x => JsError(JsPath, s"Expected a string, but got ${x.toString().take(10)}...") }, tjs = Writes { x => JsString(x.key) } ) }
Example 67
Source File: WsConnection.scala From matcher with MIT License | 5 votes |
package com.wavesplatform.dex.load.ws import akka.Done import akka.actor.{ActorRef, ActorSystem, Status} import akka.http.scaladsl.Http import akka.http.scaladsl.model.ws.{BinaryMessage, Message, TextMessage, WebSocketRequest} import akka.stream.scaladsl.{Flow, Sink, Source} import akka.stream.{CompletionStrategy, Materializer, OverflowStrategy} import com.wavesplatform.dex.api.ws.connection.TestWsHandlerActor import com.wavesplatform.dex.api.ws.protocol.{WsClientMessage, WsMessage, WsServerMessage} import com.wavesplatform.dex.domain.utils.ScorexLogging import play.api.libs.json.Json import scala.concurrent.Future import scala.concurrent.duration.DurationInt import scala.util.{Failure, Success, Try} class WsConnection(uri: String, receive: WsServerMessage => Option[WsClientMessage])(implicit system: ActorSystem) extends ScorexLogging { import system.dispatcher private implicit val materializer = Materializer(system) private val wsHandlerRef = system.actorOf(TestWsHandlerActor.props(keepAlive = true)) log.info(s"Connecting to Matcher WS API: $uri") protected def stringifyClientMessage(cm: WsClientMessage): TextMessage.Strict = WsMessage.toStrictTextMessage(cm)(WsClientMessage.wsClientMessageWrites) // To server private val source: Source[TextMessage.Strict, ActorRef] = { val completionMatcher: PartialFunction[Any, CompletionStrategy] = { case akka.actor.Status.Success(_) => CompletionStrategy.draining } val failureMatcher: PartialFunction[Any, Throwable] = { case Status.Failure(cause) => cause } Source .actorRef[WsClientMessage](completionMatcher, failureMatcher, 10, OverflowStrategy.fail) .map(stringifyClientMessage) .mapMaterializedValue { source => wsHandlerRef.tell(TestWsHandlerActor.AssignSourceRef, source) source } } // To client private val sink: Sink[Message, Future[Done]] = Sink.foreach { case tm: TextMessage => // TODO move to tests for { strictText <- tm.toStrict(1.second).map(_.getStrictText) clientMessage <- { log.trace(s"Got $strictText") Try { Json.parse(strictText).as[WsServerMessage] } match { case Failure(exception) => Future.failed(exception) case Success(x) => Future.successful { receive(x).foreach(wsHandlerRef ! _) } } } } yield clientMessage case bm: BinaryMessage => bm.dataStream.runWith(Sink.ignore) Future.failed { new IllegalArgumentException("Binary messages are not supported") } } private val flow: Flow[Message, TextMessage.Strict, Future[Done]] = Flow.fromSinkAndSourceCoupled(sink, source).watchTermination() { case (_, f) => f.onComplete { case Success(_) => log.info(s"WebSocket connection to $uri successfully closed") case Failure(e) => log.error(s"WebSocket connection to $uri closed with an error", e) }(materializer.executionContext) f } val (connectionResponse, closed) = Http().singleWebSocketRequest(WebSocketRequest(uri), flow) def send(message: WsClientMessage): Unit = wsHandlerRef ! TestWsHandlerActor.SendToServer(message) def isClosed: Boolean = closed.isCompleted def close(): Future[Done] = { if (!isClosed) wsHandlerRef ! TestWsHandlerActor.CloseConnection closed } }
Example 68
Source File: package.scala From matcher with MIT License | 5 votes |
package com.wavesplatform.dex.it import com.wavesplatform.dex.domain.account.AddressScheme import com.wavesplatform.dex.domain.asset.{Asset, AssetPair} import com.wavesplatform.dex.domain.bytes.ByteStr import com.wavesplatform.dex.domain.order.Order import com.wavesplatform.wavesj.Transaction import com.wavesplatform.wavesj.json.WavesJsonMapper import com.wavesplatform.wavesj.transactions.ExchangeTransaction import play.api.libs.json._ import play.api.libs.json.jackson.PlayJsonModule import scala.util.{Failure, Success, Try} package object json { private val mapper = new WavesJsonMapper(AddressScheme.current.chainId) mapper.registerModule(new PlayJsonModule(JsonParserSettings())) private def wavesJDeserializeTx(json: JsValue): Transaction = mapper.readValue(json.toString, classOf[Transaction]) implicit val transactionFormat: Format[Transaction] = Format[Transaction]( Reads { json => Try(wavesJDeserializeTx(json)) match { case Success(x) => JsSuccess(x) case Failure(e) => JsError(e.getMessage) } }, Writes(tx => Json.parse(mapper.writeValueAsString(tx))) ) implicit val byteStrFormat: Format[ByteStr] = Format( Reads { case JsString(str) => ByteStr.decodeBase58(str) match { case Success(x) => JsSuccess(x) case Failure(e) => JsError(e.getMessage) } case _ => JsError("Can't read ByteStr") }, Writes(x => JsString(x.toString)) ) implicit val exchangeTxReads: Reads[ExchangeTransaction] = transactionFormat.map(_.asInstanceOf[ExchangeTransaction]) implicit val orderWrites: Writes[Order] = Writes(_.json()) implicit val assetPairFormat: Format[AssetPair] = AssetPair.assetPairFormat implicit val assetRatesReads: Reads[Map[Asset, Double]] = Reads { json => json.validate[Map[String, Double]].map { assetRates => assetRates.map { case (assetStr, rateValue) => AssetPair.extractAsset(assetStr).get -> rateValue } } } implicit val assetBalancesReads: Reads[Map[Asset, Long]] = Reads.map[Long].map { assetBalances => assetBalances.map { case (assetStr, balanceValue) => AssetPair.extractAsset(assetStr).get -> balanceValue } } implicit val assetPairOffsetsReads: Reads[Map[AssetPair, Long]] = Reads { json => json.validate[Map[String, Long]].map { _.map { case (assetPairStr, offset) => val assetPairStrArr = assetPairStr.split("-") val assetPair = ( assetPairStrArr match { case Array(amtAssetStr, prcAssetStr) => AssetPair.createAssetPair(amtAssetStr, prcAssetStr) case _ => throw new Exception(s"$assetPairStr (incorrect assets count, expected 2 but got ${assetPairStrArr.size})") } ) fold (ex => throw new Exception(s"$assetPairStr (${ex.getMessage})"), identity) assetPair -> offset } } } }
Example 69
Source File: CanWait.scala From matcher with MIT License | 5 votes |
package com.wavesplatform.dex.it.fp import com.wavesplatform.dex.it.time.GlobalTimer import com.wavesplatform.dex.it.time.GlobalTimer.TimerOpsImplicits import scala.concurrent.Future import scala.concurrent.duration.FiniteDuration import scala.util.{Success, Try} trait CanWait[F[_]] { def wait(duration: FiniteDuration): F[Unit] } object CanWait { implicit val future: CanWait[Future] = (duration: FiniteDuration) => GlobalTimer.instance.sleep(duration) implicit val tryCanWait: CanWait[Try] = (duration: FiniteDuration) => { Thread.sleep(duration.toMillis) Success(()) } }
Example 70
Source File: ResponseParsers.scala From matcher with MIT License | 5 votes |
package com.wavesplatform.dex.it.sttp import com.google.common.primitives.Longs import com.softwaremill.sttp.{DeserializationError, ResponseAs, MonadError => _, _} import com.typesafe.config.{Config, ConfigFactory} import play.api.libs.json.JsError import scala.util.{Failure, Success, Try} object ResponseParsers { val asUtf8String: ResponseAs[String, Nothing] = asString("UTF-8") def asLong: ResponseAs[Either[DeserializationError[JsError], Long], Nothing] = asUtf8String.map { string => val r = Longs.tryParse(string) if (r == null) Left(DeserializationError[JsError](string, JsError("Can't parse Long"), "Can't parse Long")) else Right(r) } def asConfig: ResponseAs[Either[DeserializationError[JsError], Config], Nothing] = asUtf8String.map { string => Try(ConfigFactory.parseString(string)) match { case Success(r) => Right(r) case Failure(e) => Left(DeserializationError[JsError](string, JsError("Can't parse Config"), s"Can't parse Config: ${e.getMessage}")) } } }
Example 71
Source File: InformativeTestStart.scala From matcher with MIT License | 5 votes |
package com.wavesplatform.dex.it.test import java.time.{LocalDateTime, ZoneId} import com.wavesplatform.dex.it.api.BaseContainersKit import mouse.any._ import org.scalatest.{Args, Status, Suite} import scala.util.{Failure, Success} trait InformativeTestStart extends Suite { self: BaseContainersKit => override protected def runTest(testName: String, args: Args): Status = { def print(text: String): Unit = writeGlobalLog(s"---------- [${LocalDateTime.now(ZoneId.of("UTC"))}] $text ----------") print(s"Test '$testName' started") super.runTest(testName, args) unsafeTap { _.whenCompleted { case Success(r) => print(s"Test '$testName' ${if (r) "succeeded" else "failed"}") case Failure(e) => print(s"Test '$testName' failed with exception '${e.getClass.getSimpleName}'") } } } protected def writeGlobalLog(x: String): Unit = { log.debug(x) knownContainers.get().foreach { _.printDebugMessage(x) } } }
Example 72
Source File: EmbeddedCassandra.scala From phantom-activator-template with Apache License 2.0 | 5 votes |
package controllers import java.io.File import java.util.concurrent.atomic.AtomicBoolean import org.cassandraunit.utils.EmbeddedCassandraServerHelper import org.slf4j.Logger import scala.concurrent.blocking import scala.util.control.NonFatal import scala.util.{Failure, Success, Try} def start(logger: Logger, config: Option[File] = None, timeout: Option[Int] = None): Unit = { this.synchronized { if (started.compareAndSet(false, true)) { blocking { val configFile = config.map(_.toURI.toString) getOrElse EmbeddedCassandraServerHelper.DEFAULT_CASSANDRA_YML_FILE System.setProperty("cassandra.config", configFile) Try { EmbeddedCassandraServerHelper.mkdirs() } match { case Success(value) => logger.info("Successfully created directories for embedded Cassandra.") case Failure(NonFatal(e)) => logger.error(s"Error creating Embedded cassandra directories: ${e.getMessage}") } (config, timeout) match { case (Some(file), None) => logger.info(s"Starting Cassandra in embedded mode with configuration from $file.") EmbeddedCassandraServerHelper.startEmbeddedCassandra( file, EmbeddedCassandraServerHelper.DEFAULT_TMP_DIR, EmbeddedCassandraServerHelper.DEFAULT_STARTUP_TIMEOUT ) case (Some(file), Some(time)) => logger.info(s"Starting Cassandra in embedded mode with configuration from $file and timeout set to $timeout ms.") EmbeddedCassandraServerHelper.startEmbeddedCassandra( file, EmbeddedCassandraServerHelper.DEFAULT_TMP_DIR, time ) case (None, Some(time)) => logger.info(s"Starting Cassandra in embedded mode with default configuration and timeout set to $timeout ms.") EmbeddedCassandraServerHelper.startEmbeddedCassandra(time) case (None, None) => logger.info("Starting Cassandra in embedded mode with default configuration.") EmbeddedCassandraServerHelper.startEmbeddedCassandra() logger.info("Successfully started embedded Cassandra") } } } else { logger.info("Embedded Cassandra has already been started") } } } def cleanup(logger: Logger): Unit = { this.synchronized { if (started.compareAndSet(true, false)) { logger.info("Cleaning up embedded Cassandra") EmbeddedCassandraServerHelper.cleanEmbeddedCassandra() } else { logger.info("Cassandra is not running, not cleaning up") } } } }
Example 73
Source File: MetricsReporter.scala From akka-persistence-dynamodb with Apache License 2.0 | 5 votes |
package com.github.j5ik2o.akka.persistence.dynamodb.metrics import akka.actor.DynamicAccess import com.github.j5ik2o.akka.persistence.dynamodb.config.PluginConfig import com.github.j5ik2o.akka.persistence.dynamodb.exception.PluginException import scala.concurrent.duration.Duration import scala.collection.immutable._ import scala.util.{ Failure, Success } trait MetricsReporter { def setDynamoDBClientPutItemDuration(duration: Duration): Unit def setDynamoDBClientBatchWriteItemDuration(duration: Duration): Unit def setDynamoDBClientUpdateItemDuration(duration: Duration): Unit def setDynamoDBClientDeleteItemDuration(duration: Duration): Unit def setDynamoDBClientQueryDuration(duration: Duration): Unit def setDynamoDBClientScanDuration(duration: Duration): Unit } object MetricsReporter { class None(pluginConfig: PluginConfig) extends MetricsReporter { override def setDynamoDBClientPutItemDuration(duration: Duration): Unit = {} override def setDynamoDBClientBatchWriteItemDuration(duration: Duration): Unit = {} override def setDynamoDBClientUpdateItemDuration(duration: Duration): Unit = {} override def setDynamoDBClientDeleteItemDuration(duration: Duration): Unit = {} override def setDynamoDBClientQueryDuration(duration: Duration): Unit = {} override def setDynamoDBClientScanDuration(duration: Duration): Unit = {} } } trait MetricsReporterProvider { def create: Option[MetricsReporter] } object MetricsReporterProvider { def create(dynamicAccess: DynamicAccess, pluginConfig: PluginConfig): MetricsReporterProvider = { val className = pluginConfig.metricsReporterProviderClassName dynamicAccess .createInstanceFor[MetricsReporterProvider]( className, Seq(classOf[DynamicAccess] -> dynamicAccess, classOf[PluginConfig] -> pluginConfig) ) match { case Success(value) => value case Failure(ex) => throw new PluginException("Failed to initialize MetricsReporterProvider", Some(ex)) } } final class Default(dynamicAccess: DynamicAccess, pluginConfig: PluginConfig) extends MetricsReporterProvider { def create: Option[MetricsReporter] = { pluginConfig.metricsReporterClassName.map { className => dynamicAccess .createInstanceFor[MetricsReporter]( className, Seq(classOf[PluginConfig] -> pluginConfig) ) match { case Success(value) => value case Failure(ex) => throw new PluginException("Failed to initialize MetricsReporter", Some(ex)) } } } } }
Example 74
Source File: DnsResolverProvider.scala From akka-persistence-dynamodb with Apache License 2.0 | 5 votes |
package com.github.j5ik2o.akka.persistence.dynamodb.client.v1 import akka.actor.DynamicAccess import com.amazonaws.DnsResolver import com.github.j5ik2o.akka.persistence.dynamodb.config.PluginConfig import com.github.j5ik2o.akka.persistence.dynamodb.exception.PluginException import scala.collection.immutable._ import scala.util.{ Failure, Success } trait DnsResolverProvider { def create: Option[DnsResolver] } object DnsResolverProvider { def create(dynamicAccess: DynamicAccess, pluginConfig: PluginConfig): DnsResolverProvider = { val className = pluginConfig.clientConfig.v1ClientConfig.clientConfiguration.dnsResolverProviderClassName dynamicAccess .createInstanceFor[DnsResolverProvider]( className, Seq( classOf[DynamicAccess] -> dynamicAccess, classOf[PluginConfig] -> pluginConfig ) ) match { case Success(value) => value case Failure(ex) => throw new PluginException("Failed to initialize DnsResolverProvider", Some(ex)) } } final class Default(dynamicAccess: DynamicAccess, pluginConfig: PluginConfig) extends DnsResolverProvider { override def create: Option[DnsResolver] = { val classNameOpt = pluginConfig.clientConfig.v1ClientConfig.clientConfiguration.dnsResolverClassName classNameOpt.map { className => dynamicAccess .createInstanceFor[DnsResolver]( className, Seq( classOf[PluginConfig] -> pluginConfig ) ) match { case Success(value) => value case Failure(ex) => throw new PluginException("Failed to initialize DnsResolver", Some(ex)) } } } } }
Example 75
Source File: ExecutionInterceptorsProvider.scala From akka-persistence-dynamodb with Apache License 2.0 | 5 votes |
package com.github.j5ik2o.akka.persistence.dynamodb.client.v1 import akka.actor.DynamicAccess import com.github.j5ik2o.akka.persistence.dynamodb.config.PluginConfig import com.github.j5ik2o.akka.persistence.dynamodb.exception.PluginException import software.amazon.awssdk.core.interceptor.ExecutionInterceptor import scala.collection.immutable._ import scala.util.{ Failure, Success } trait ExecutionInterceptorsProvider { def create: Seq[ExecutionInterceptor] } object ExecutionInterceptorsProvider { def create(dynamicAccess: DynamicAccess, pluginConfig: PluginConfig): ExecutionInterceptorsProvider = { val className = pluginConfig.clientConfig.v2ClientConfig.executionInterceptorsProviderClassName dynamicAccess .createInstanceFor[ExecutionInterceptorsProvider]( className, Seq(classOf[DynamicAccess] -> dynamicAccess, classOf[PluginConfig] -> pluginConfig) ) match { case Success(value) => value case Failure(ex) => throw new PluginException("Failed to initialize ExecutionInterceptorsProvider", Some(ex)) } } final class Default(dynamicAccess: DynamicAccess, pluginConfig: PluginConfig) extends ExecutionInterceptorsProvider { override def create: Seq[ExecutionInterceptor] = { val classNames = pluginConfig.clientConfig.v2ClientConfig.executionInterceptorClassNames classNames.map { className => dynamicAccess .createInstanceFor[ExecutionInterceptor](className, Seq(classOf[PluginConfig] -> pluginConfig)) match { case Success(value) => value case Failure(ex) => throw new PluginException("Failed to initialize ExecutionInterceptor", Some(ex)) } } } } }
Example 76
Source File: RetryPolicyProvider.scala From akka-persistence-dynamodb with Apache License 2.0 | 5 votes |
package com.github.j5ik2o.akka.persistence.dynamodb.client.v1 import akka.actor.DynamicAccess import com.amazonaws.retry.{ PredefinedRetryPolicies, RetryPolicy } import com.github.j5ik2o.akka.persistence.dynamodb.config.PluginConfig import com.github.j5ik2o.akka.persistence.dynamodb.exception.PluginException import scala.collection.immutable._ import scala.util.{ Failure, Success } trait RetryPolicyProvider { def create: RetryPolicy } object RetryPolicyProvider { def create(dynamicAccess: DynamicAccess, pluginConfig: PluginConfig): Option[RetryPolicyProvider] = { val classNameOpt = pluginConfig.clientConfig.v1ClientConfig.clientConfiguration.retryPolicyProviderClassName classNameOpt.map { className => dynamicAccess .createInstanceFor[RetryPolicyProvider]( className, Seq( classOf[DynamicAccess] -> dynamicAccess, classOf[PluginConfig] -> pluginConfig ) ) match { case Success(value) => value case Failure(ex) => throw new PluginException("Failed to initialize RetryPolicyProvider", Some(ex)) } } } final class Default(dynamicAccess: DynamicAccess, pluginConfig: PluginConfig) extends RetryPolicyProvider { override def create: RetryPolicy = { pluginConfig.clientConfig.v1ClientConfig.clientConfiguration.maxErrorRetry .fold(PredefinedRetryPolicies.getDynamoDBDefaultRetryPolicy) { maxErrorRetry => PredefinedRetryPolicies.getDynamoDBDefaultRetryPolicyWithCustomMaxRetries(maxErrorRetry) } } } }
Example 77
Source File: RequestHandlersProvider.scala From akka-persistence-dynamodb with Apache License 2.0 | 5 votes |
package com.github.j5ik2o.akka.persistence.dynamodb.client.v1 import akka.actor.DynamicAccess import com.amazonaws.handlers.RequestHandler2 import com.github.j5ik2o.akka.persistence.dynamodb.config.PluginConfig import com.github.j5ik2o.akka.persistence.dynamodb.exception.PluginException import scala.collection.immutable._ import scala.util.{ Failure, Success } trait RequestHandlersProvider { def create: Seq[RequestHandler2] } object RequestHandlersProvider { def create(dynamicAccess: DynamicAccess, pluginConfig: PluginConfig): RequestHandlersProvider = { val className = pluginConfig.clientConfig.v1ClientConfig.requestHandlersProviderClassName dynamicAccess .createInstanceFor[RequestHandlersProvider]( className, Seq( classOf[DynamicAccess] -> dynamicAccess, classOf[PluginConfig] -> pluginConfig ) ) match { case Success(value) => value case Failure(ex) => throw new PluginException("Failed to initialize RequestHandlersProvider", Some(ex)) } } final class Default(dynamicAccess: DynamicAccess, pluginConfig: PluginConfig) extends RequestHandlersProvider { override def create: Seq[RequestHandler2] = { val classNames = pluginConfig.clientConfig.v1ClientConfig.requestHandlerClassNames classNames.map { className => dynamicAccess .createInstanceFor[RequestHandler2]( className, Seq( classOf[DynamicAccess] -> dynamicAccess, classOf[PluginConfig] -> pluginConfig ) ) match { case Success(value) => value case Failure(ex) => throw new PluginException("Failed to initialize RequestHandler2", Some(ex)) } } } } }
Example 78
Source File: MonitoringListenerProvider.scala From akka-persistence-dynamodb with Apache License 2.0 | 5 votes |
package com.github.j5ik2o.akka.persistence.dynamodb.client.v1 import akka.actor.DynamicAccess import com.amazonaws.monitoring.MonitoringListener import com.github.j5ik2o.akka.persistence.dynamodb.config.PluginConfig import com.github.j5ik2o.akka.persistence.dynamodb.exception.PluginException import scala.collection.immutable._ import scala.util.{ Failure, Success } trait MonitoringListenerProvider { def create: Option[MonitoringListener] } object MonitoringListenerProvider { def create(dynamicAccess: DynamicAccess, pluginConfig: PluginConfig): MonitoringListenerProvider = { val className = pluginConfig.clientConfig.v1ClientConfig.monitoringListenerProviderClassName dynamicAccess .createInstanceFor[MonitoringListenerProvider]( className, Seq( classOf[DynamicAccess] -> dynamicAccess, classOf[PluginConfig] -> pluginConfig ) ) match { case Success(value) => value case Failure(ex) => throw new PluginException("Failed to initialize MonitoringListenerProvider", Some(ex)) } } final class Default(dynamicAccess: DynamicAccess, pluginConfig: PluginConfig) extends MonitoringListenerProvider { override def create: Option[MonitoringListener] = { val classNameOpt = pluginConfig.clientConfig.v1ClientConfig.monitoringListenerClassName classNameOpt.map { className => dynamicAccess .createInstanceFor[MonitoringListener]( className, Seq( classOf[DynamicAccess] -> dynamicAccess, classOf[PluginConfig] -> pluginConfig ) ) match { case Success(value) => value case Failure(ex) => throw new PluginException("Failed to initialize MonitoringListener", Some(ex)) } } } } }
Example 79
Source File: SecureRandomProvider.scala From akka-persistence-dynamodb with Apache License 2.0 | 5 votes |
package com.github.j5ik2o.akka.persistence.dynamodb.client.v1 import java.security.SecureRandom import akka.actor.DynamicAccess import com.github.j5ik2o.akka.persistence.dynamodb.config.PluginConfig import com.github.j5ik2o.akka.persistence.dynamodb.exception.PluginException import scala.collection.immutable._ import scala.util.{ Failure, Success } trait SecureRandomProvider { def create: SecureRandom } object SecureRandomProvider { def create(dynamicAccess: DynamicAccess, pluginConfig: PluginConfig): SecureRandomProvider = { val className = pluginConfig.clientConfig.v1ClientConfig.clientConfiguration.secureRandomProviderClassName dynamicAccess .createInstanceFor[SecureRandomProvider]( className, Seq( classOf[DynamicAccess] -> dynamicAccess, classOf[PluginConfig] -> pluginConfig ) ) match { case Success(value) => value case Failure(ex) => throw new PluginException("Failed to initialize SecureRandomProvider", Some(ex)) } } final class Default(dynamicAccess: DynamicAccess, pluginConfig: PluginConfig) extends SecureRandomProvider { override def create: SecureRandom = new SecureRandom() } }
Example 80
Source File: RequestMetricCollectorProvider.scala From akka-persistence-dynamodb with Apache License 2.0 | 5 votes |
package com.github.j5ik2o.akka.persistence.dynamodb.client.v1 import akka.actor.DynamicAccess import com.amazonaws.metrics.RequestMetricCollector import com.github.j5ik2o.akka.persistence.dynamodb.config.PluginConfig import com.github.j5ik2o.akka.persistence.dynamodb.exception.PluginException import scala.collection.immutable._ import scala.util.{ Failure, Success } trait RequestMetricCollectorProvider { def create: Option[RequestMetricCollector] } object RequestMetricCollectorProvider { def create(dynamicAccess: DynamicAccess, pluginConfig: PluginConfig): RequestMetricCollectorProvider = { val className = pluginConfig.clientConfig.v1ClientConfig.requestMetricCollectorProviderClassName dynamicAccess .createInstanceFor[RequestMetricCollectorProvider]( className, Seq(classOf[DynamicAccess] -> dynamicAccess, classOf[PluginConfig] -> pluginConfig) ) match { case Success(value) => value case Failure(ex) => throw new PluginException("Failed to initialize RequestMetricCollectorProvider", Some(ex)) } } final class Default(dynamicAccess: DynamicAccess, pluginConfig: PluginConfig) extends RequestMetricCollectorProvider { override def create: Option[RequestMetricCollector] = { val classNameOpt = pluginConfig.clientConfig.v1ClientConfig.requestMetricCollectorClassName classNameOpt.map { className => dynamicAccess .createInstanceFor[RequestMetricCollector]( className, Seq(classOf[DynamicAccess] -> dynamicAccess, classOf[PluginConfig] -> pluginConfig) ) match { case Success(value) => value case Failure(ex) => throw new PluginException("Failed to initialize RequestMetricCollector", Some(ex)) } } } } }
Example 81
Source File: ByteArrayJournalSerializer.scala From akka-persistence-dynamodb with Apache License 2.0 | 5 votes |
package com.github.j5ik2o.akka.persistence.dynamodb.serialization import akka.persistence.PersistentRepr import akka.serialization.Serialization import com.github.j5ik2o.akka.persistence.dynamodb.journal.JournalRow import com.github.j5ik2o.akka.persistence.dynamodb.model.{ PersistenceId, SequenceNumber } import scala.util.{ Failure, Success } class ByteArrayJournalSerializer(serialization: Serialization, separator: String) extends FlowPersistentReprSerializer[JournalRow] { override def serialize( persistentRepr: PersistentRepr, tags: Set[String], index: Option[Int] ): Either[Throwable, JournalRow] = { serialization .serialize(persistentRepr) .map( JournalRow( PersistenceId(persistentRepr.persistenceId), SequenceNumber(persistentRepr.sequenceNr), persistentRepr.deleted, _, System.currentTimeMillis(), encodeTags(tags, separator) ) ) match { case Success(value) => Right(value) case Failure(ex) => Left(ex) } } override def deserialize(journalRow: JournalRow): Either[Throwable, (PersistentRepr, Set[String], Long)] = { serialization .deserialize(journalRow.message, classOf[PersistentRepr]) .map((_, decodeTags(journalRow.tags, separator), journalRow.ordering)) match { case Success(value) => Right(value) case Failure(ex) => Left(ex) } } }
Example 82
Source File: FlowPersistentReprSerializer.scala From akka-persistence-dynamodb with Apache License 2.0 | 5 votes |
package com.github.j5ik2o.akka.persistence.dynamodb.serialization import akka.NotUsed import akka.persistence.PersistentRepr import akka.stream.scaladsl.Flow import scala.util.{ Failure, Success, Try } trait FlowPersistentReprSerializer[T] extends PersistentReprSerializer[T] { def deserializeFlow: Flow[T, (PersistentRepr, Set[String], Long), NotUsed] = { Flow[T].map(deserialize).map { case Right(r) => r case Left(ex) => throw ex } } def deserializeFlowWithoutTags: Flow[T, PersistentRepr, NotUsed] = { deserializeFlow.map(keepPersistentRepr) } // --- def deserializeFlowAsEither: Flow[T, Either[Throwable, (PersistentRepr, Set[String], Long)], NotUsed] = { Flow[T].map(deserialize) } def deserializeFlowWithoutTagsAsEither: Flow[T, Either[Throwable, PersistentRepr], NotUsed] = { deserializeFlowAsEither.map { case Right(v) => Right(keepPersistentRepr(v)) case Left(ex) => Left(ex) } } // --- def deserializeFlowAsTry: Flow[T, Try[(PersistentRepr, Set[String], Long)], NotUsed] = { Flow[T].map(deserialize).map { case Right(v) => Success(v) case Left(ex) => Failure(ex) } } def deserializeFlowWithoutTagsAsTry: Flow[T, Try[PersistentRepr], NotUsed] = { deserializeFlowAsTry.map(_.map(keepPersistentRepr)) } private def keepPersistentRepr(tup: (PersistentRepr, Set[String], Long)): PersistentRepr = tup match { case (repr, _, _) => repr } }
Example 83
Source File: ByteArraySnapshotSerializer.scala From akka-persistence-dynamodb with Apache License 2.0 | 5 votes |
package com.github.j5ik2o.akka.persistence.dynamodb.serialization import akka.persistence.SnapshotMetadata import akka.persistence.serialization.Snapshot import akka.serialization.Serialization import com.github.j5ik2o.akka.persistence.dynamodb.model.{ PersistenceId, SequenceNumber } import com.github.j5ik2o.akka.persistence.dynamodb.snapshot.dao.SnapshotRow import scala.util.{ Failure, Success } class ByteArraySnapshotSerializer(serialization: Serialization) extends SnapshotSerializer[SnapshotRow] { override def serialize( metadata: SnapshotMetadata, snapshot: Any ): Either[Throwable, SnapshotRow] = { serialization .serialize(Snapshot(snapshot)) .map( SnapshotRow(PersistenceId(metadata.persistenceId), SequenceNumber(metadata.sequenceNr), metadata.timestamp, _) ) match { case Success(value) => Right(value) case Failure(ex) => Left(ex) } } override def deserialize(snapshotRow: SnapshotRow): Either[Throwable, (SnapshotMetadata, Any)] = { serialization .deserialize(snapshotRow.snapshot, classOf[Snapshot]) .map(snapshot => { val snapshotMetadata = SnapshotMetadata(snapshotRow.persistenceId.asString, snapshotRow.sequenceNumber.value, snapshotRow.created) (snapshotMetadata, snapshot.data) }) match { case Success(value) => Right(value) case Failure(ex) => Left(ex) } } }
Example 84
Source File: ByteArraySnapshotSerializer.scala From akka-persistence-dynamodb with Apache License 2.0 | 5 votes |
package com.github.j5ik2o.akka.persistence.dynamodb.snapshot.dao import akka.persistence.SnapshotMetadata import akka.persistence.serialization.Snapshot import akka.serialization.Serialization import com.github.j5ik2o.akka.persistence.dynamodb.model.{ PersistenceId, SequenceNumber } import scala.util.{ Failure, Success } trait SnapshotSerializer[T] { def serialize(metadata: SnapshotMetadata, snapshot: Any): Either[Throwable, T] def deserialize(t: T): Either[Throwable, (SnapshotMetadata, Any)] } class ByteArraySnapshotSerializer(serialization: Serialization) extends SnapshotSerializer[SnapshotRow] { override def serialize( metadata: SnapshotMetadata, snapshot: Any ): Either[Throwable, SnapshotRow] = { serialization .serialize(Snapshot(snapshot)) .map( SnapshotRow(PersistenceId(metadata.persistenceId), SequenceNumber(metadata.sequenceNr), metadata.timestamp, _) ) match { case Success(value) => Right(value) case Failure(ex) => Left(ex) } } override def deserialize(snapshotRow: SnapshotRow): Either[Throwable, (SnapshotMetadata, Any)] = { serialization .deserialize(snapshotRow.snapshot, classOf[Snapshot]) .map(snapshot => { val snapshotMetadata = SnapshotMetadata(snapshotRow.persistenceId.asString, snapshotRow.sequenceNumber.value, snapshotRow.created) (snapshotMetadata, snapshot.data) }) match { case Success(value) => Right(value) case Failure(ex) => Left(ex) } } }
Example 85
Source File: Http.scala From AI with Apache License 2.0 | 5 votes |
package com.bigchange.http import com.bigchange.log.CLogger import dispatch.Defaults._ import dispatch._ import scala.collection.mutable import scala.util.{Failure, Success} def post(strUrl:String, parameters:mutable.HashMap[String,String], parse: String): Unit = { val post = url(strUrl) << parameters val response : Future[String] = Http(post OK as.String) response onComplete { case Success(content) => // parse(content) println("post Success content:"+content) case Failure(t) => println("post Failure content:"+t) } } }
Example 86
Source File: FlowErrorTest.scala From intro-to-akka-streams with Apache License 2.0 | 5 votes |
package com.github.dnvriend.streams.flow import akka.stream.ActorAttributes.supervisionStrategy import akka.stream.Supervision.resumingDecider import akka.stream.scaladsl._ import akka.stream.testkit.scaladsl._ import com.github.dnvriend.streams.TestSpec import scala.concurrent.Future import scala.util.{ Failure, Success, Try } class FlowErrorTest extends TestSpec { "Error stream" should "" in { } // it should "stop the stream" in { // Source(Future[String](throw new RuntimeException("Test"))) // .withAttributes(supervisionStrategy(resumingDecider)) // .map { x => println(x); x } // .runWith(TestSink.probe[String]) // .request(1) // .expectError() // } it should "resume with no result for the failed future" in { val t = new RuntimeException("Test") Source(List(1, 2, 3)) .log("before") .mapAsync(3) { x ⇒ Future { if (x == 2) throw t else x } } .withAttributes(supervisionStrategy(resumingDecider)) .log("after") .runWith(TestSink.probe[Int]) .request(4) .expectNext(Success(1)) .expectNext(Failure(t)) .expectNext(Success(3)) .expectComplete() } }
Example 87
Source File: SimpleScalaRiakDataframesExample.scala From spark-riak-connector with Apache License 2.0 | 5 votes |
package com.basho.riak.spark.examples.dataframes import com.basho.riak.client.core.query.indexes.LongIntIndex import com.basho.riak.client.core.query.Namespace import com.basho.riak.spark._ import com.basho.riak.spark.util.RiakObjectConversionUtil import org.apache.spark.SparkConf import org.apache.spark.SparkContext import scala.reflect.runtime.universe import scala.concurrent.Future import scala.concurrent.ExecutionContext.Implicits.global import scala.util.{ Failure, Success } import com.basho.riak.client.core.query.RiakObject import com.basho.riak.client.api.RiakClient import com.basho.riak.client.core.query.Location import com.basho.riak.spark.rdd.RiakFunctions object SimpleScalaRiakDataframesExample { private val bucketName = "users" case class UserData(user_id: String, name: String, age: Int, category: String) val testData = Seq( UserData("u1", "Ben", 23, "CategoryA"), UserData("u2", "Clair", 19, "CategoryB"), UserData("u3", "John", 21, null), UserData("u4", "Chris", 50, "Categoryc"), UserData("u5", "Mary", 15, "CategoryB"), UserData("u6", "George", 31, "CategoryC") ) def main(args: Array[String]) { val sparkConf = new SparkConf().setAppName("Riak Spark Dataframes Example") setSparkOpt(sparkConf, "spark.master", "local") setSparkOpt(sparkConf, "spark.riak.connection.host", "127.0.0.1:8087") val sc = new SparkContext(sparkConf) // Work with clear bucket clearBucket(sparkConf) val sqlContext = new org.apache.spark.sql.SQLContext(sc) // To enable toDF() import sqlContext.implicits._ println(s" Saving data to Riak: \n ${println(testData)}") // Save test data from json file to riak bucket val inputRDD = sc.parallelize(testData).map { line => val obj = RiakObjectConversionUtil.to(line) // RiakObjectConversionUtil.to() sets content type to text/plain if String is passed // Overwriting content type to application/json will allow automatic conversion to // User defined type when reading from Riak obj.setContentType("application/json") obj }.saveToRiak(bucketName) // Read from Riak with UDT to enable schema inference using reflection val df = sc.riakBucket[UserData](bucketName).queryAll.toDF println(s"Dataframe from Riak query: \n ${df.show()}") df.registerTempTable("users") println("count by category") df.groupBy("category").count.show println("sort by num of letters") // Register user defined function sqlContext.udf.register("stringLength", (s: String) => s.length) sqlContext.sql("select user_id, name, stringLength(name) nameLength from users order by nameLength").show println("filter age >= 21") sqlContext.sql("select * from users where age >= 21").show } private def clearBucket(sparkConf: SparkConf): Unit = { val rf = RiakFunctions(sparkConf) rf.withRiakDo(session => { rf.resetAndEmptyBucketByName(bucketName) }) } private def setSparkOpt(sparkConf: SparkConf, option: String, defaultOptVal: String): SparkConf = { val optval = sparkConf.getOption(option).getOrElse(defaultOptVal) sparkConf.set(option, optval) } }
Example 88
Source File: BackendConnector.scala From nisp-frontend with Apache License 2.0 | 5 votes |
package uk.gov.hmrc.nisp.connectors import play.api.data.validation.ValidationError import play.api.libs.json.{Format, JsObject, JsPath} import uk.gov.hmrc.http.cache.client.SessionCache import uk.gov.hmrc.nisp.models.enums.APIType._ import uk.gov.hmrc.nisp.services.MetricsService import uk.gov.hmrc.nisp.utils.JsonDepersonaliser import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.Future import scala.util.{Failure, Success} import uk.gov.hmrc.http.{ HeaderCarrier, HttpGet, HttpResponse } trait BackendConnector { def http: HttpGet def serviceUrl: String def sessionCache: SessionCache val metricsService: MetricsService protected def retrieveFromCache[A](api: APIType, url: String)(implicit hc: HeaderCarrier, formats: Format[A]): Future[A] = { val keystoreTimerContext = metricsService.keystoreReadTimer.time() val sessionCacheF = sessionCache.fetchAndGetEntry[A](api.toString) sessionCacheF.onFailure { case _ => metricsService.keystoreReadFailed.inc() } sessionCacheF.flatMap { keystoreResult => keystoreTimerContext.stop() keystoreResult match { case Some(data) => metricsService.keystoreHitCounter.inc() Future.successful(data) case None => metricsService.keystoreMissCounter.inc() connectToMicroservice[A](url, api) map { data: A => cacheResult(data, api.toString) } } } } private def connectToMicroservice[A](urlToRead: String, apiType: APIType)(implicit hc: HeaderCarrier, formats: Format[A]): Future[A] = { val timerContext = metricsService.startTimer(apiType) val httpResponseF = http.GET[HttpResponse](urlToRead) httpResponseF onSuccess { case _ => timerContext.stop() } httpResponseF onFailure { case _ => metricsService.incrementFailedCounter(apiType) } httpResponseF.map { httpResponse => httpResponse.json.validate[A].fold( errs => { val json = JsonDepersonaliser.depersonalise(httpResponse.json) match { case Success(s) => s"Depersonalised JSON\n$s" case Failure(e) => s"JSON could not be depersonalised\n${e.toString()}" } throw new JsonValidationException(s"Unable to deserialise $apiType: ${formatJsonErrors(errs)}\n$json") }, valid => valid ) } } private def cacheResult[A](a:A,name: String)(implicit hc: HeaderCarrier, formats: Format[A]): A = { val timerContext = metricsService.keystoreWriteTimer.time() val cacheF = sessionCache.cache[A](name, a) cacheF.onSuccess { case _ => timerContext.stop() } cacheF.onFailure { case _ => metricsService.keystoreWriteFailed.inc() } a } private def formatJsonErrors(errors: Seq[(JsPath, Seq[ValidationError])]): String = { errors.map(p => p._1 + " - " + p._2.map(e => removeJson(e.message)).mkString(",")).mkString(" | ") } private def removeJson(message: String): String = { message.indexOf("{") match { case i if i != -1 => message.substring(0, i - 1) + " [JSON removed]" case _ => message } } private[connectors] class JsonValidationException(message: String) extends Exception(message) }
Example 89
Source File: BackendConnectorSpec.scala From nisp-frontend with Apache License 2.0 | 5 votes |
package uk.gov.hmrc.nisp.connectors import org.mockito.Mockito.when import org.scalatest.concurrent.ScalaFutures import org.scalatest.mock.MockitoSugar import play.api.libs.json.Json import uk.gov.hmrc.http.cache.client.SessionCache import uk.gov.hmrc.nisp.helpers.{MockMetricsService, MockSessionCache} import uk.gov.hmrc.nisp.models.NationalInsuranceRecord import uk.gov.hmrc.nisp.models.enums.APIType import uk.gov.hmrc.nisp.services.MetricsService import uk.gov.hmrc.nisp.utils.JsonDepersonaliser import uk.gov.hmrc.play.test.UnitSpec import scala.concurrent.Future import scala.concurrent.ExecutionContext.Implicits.global import scala.util.{Failure, Success} import uk.gov.hmrc.http.{HeaderCarrier, HttpGet, HttpResponse} class BackendConnectorSpec extends UnitSpec with MockitoSugar with ScalaFutures { val mockHttp: HttpGet = mock[HttpGet] object BackendConnectorImpl extends BackendConnector { override def http: HttpGet = mockHttp override def sessionCache: SessionCache = MockSessionCache override def serviceUrl: String = "national-insurance" override val metricsService: MetricsService = MockMetricsService def getNationalInsurance()(implicit headerCarrier: HeaderCarrier): Future[NationalInsuranceRecord] = { val urlToRead = s"$serviceUrl/ni" retrieveFromCache[NationalInsuranceRecord](APIType.NationalInsurance, urlToRead)(headerCarrier, NationalInsuranceRecord.formats) } } implicit val headerCarrier = HeaderCarrier(extraHeaders = Seq("Accept" -> "application/vnd.hmrc.1.0+json")) "connectToMicroservice" should { "should return depersonalised JSON" in { val json = Json.obj( "qualifyingYearsPriorTo1975" -> 0, "numberOfGaps" -> 6, "numberOfGapsPayable" -> 4, "dateOfEntry" -> "1975-08-01", "homeResponsibilitiesProtection" -> false, "earningsIncludedUpTo" -> "2016-04-05", "_embedded" -> Json.obj( "taxYears" -> Json.arr() ) ) val depersonalisedJson = JsonDepersonaliser.depersonalise(json) match { case Success(s) => s case Failure(_) => fail() } val Ok = 200 val response = Future(HttpResponse(Ok, Option.apply(json))) when(mockHttp.GET[HttpResponse]("national-insurance/ni")).thenReturn(response) val future: Future[NationalInsuranceRecord] = BackendConnectorImpl.getNationalInsurance() whenReady(future.failed) { t: Throwable => t.getMessage.contains(depersonalisedJson) shouldBe true t.getMessage.contains("2016-04-05") shouldBe false } } } }
Example 90
Source File: Main.scala From kafka-configurator with BSD 3-Clause "New" or "Revised" License | 5 votes |
package com.sky.kafka.configurator import cats.implicits._ import com.sky.BuildInfo import com.sky.kafka.configurator.error.ConfiguratorFailure import com.typesafe.scalalogging.LazyLogging import org.zalando.grafter._ import scala.util.{Failure, Success, Try} object Main extends LazyLogging { def main(args: Array[String]): Unit = { logger.info(s"Running ${BuildInfo.name} ${BuildInfo.version} with args: ${args.mkString(", ")}") run(args, sys.env) match { case Success((errors, infoLogs)) => errors.foreach(e => logger.warn(s"${e.getMessage}. Cause: ${e.getCause.getMessage}")) infoLogs.foreach(msg => logger.info(msg)) if (errors.isEmpty) System.exit(0) else System.exit(1) case Failure(t) => logger.error(t.getMessage) System.exit(1) } } def run(args: Array[String], envVars: Map[String, String]): Try[(List[ConfiguratorFailure], List[String])] = ConfigParsing.parse(args, envVars).flatMap { conf => val app = KafkaConfiguratorApp.reader(conf) val result = app.configureTopicsFrom(conf.files.toList) stop(app) result } private def stop(app: KafkaConfiguratorApp): Unit = Rewriter.stop(app).value.foreach { case StopOk(msg) => logger.debug(s"Component stopped: $msg") case StopError(msg, ex) => logger.warn(s"Error whilst stopping component: $msg", ex) case StopFailure(msg) => logger.warn(s"Failure whilst stopping component: $msg") } }
Example 91
Source File: TopicConfigurator.scala From kafka-configurator with BSD 3-Clause "New" or "Revised" License | 5 votes |
package com.sky.kafka.configurator import cats.Eq import cats.data.Reader import cats.instances.int._ import cats.instances.vector._ import cats.instances.try_._ import cats.syntax.eq._ import com.sky.kafka.configurator.error.{ReplicationChangeFound, TopicNotFound} import com.typesafe.scalalogging.LazyLogging import scala.util.control.NonFatal import scala.util.{Failure, Success} case class TopicConfigurator(topicReader: TopicReader, topicWriter: TopicWriter) extends LazyLogging { def configure(topic: Topic): Logger[Unit] = topicReader.fetch(topic.name) match { case Success(currentTopic) => updateTopic(currentTopic, topic) case Failure(TopicNotFound(_)) => topicWriter.create(topic) .withLog(s"Topic ${topic.name} was not found, so it has been created") case Failure(NonFatal(t)) => Failure(t).asWriter } private def updateTopic(oldTopic: Topic, newTopic: Topic): Logger[Unit] = { def ifDifferent[T: Eq](oldValue: T, newValue: T)(updateOperation: (Topic, Topic) => Logger[Unit])(messageIfSame: String): Logger[Unit] = if (oldValue =!= newValue) updateOperation(oldTopic, newTopic) else Success(()).withLog(messageIfSame) import TopicConfigurator._ for { _ <- ifDifferent(oldTopic.replicationFactor, newTopic.replicationFactor)(failReplicationChange)(s"Replication factor unchanged for ${newTopic.name}.") _ <- ifDifferent(oldTopic.partitions, newTopic.partitions)(updatePartitions)(s"No change in number of partitions for ${newTopic.name}") _ <- ifDifferent(oldTopic.config, newTopic.config)(updateConfig)(s"No change in config for ${newTopic.name}") } yield () } private def failReplicationChange(oldTopic: Topic, newTopic: Topic): Logger[Unit] = Failure(ReplicationChangeFound).asWriter private def updatePartitions(oldTopic: Topic, newTopic: Topic): Logger[Unit] = topicWriter .updatePartitions(newTopic.name, newTopic.partitions) .withLog(s"Updated topic ${newTopic.name} from ${oldTopic.partitions} to ${newTopic.partitions} partition(s)") private def updateConfig(oldTopic: Topic, newTopic: Topic): Logger[Unit] = topicWriter .updateConfig(newTopic.name, newTopic.config) .withLog(s"Updated configuration of topic ${newTopic.name}") } object TopicConfigurator { def reader: Reader[AppConfig, TopicConfigurator] = KafkaTopicAdmin.reader .map(kafkaAdminClient => TopicConfigurator(kafkaAdminClient, kafkaAdminClient)) private implicit val topicConfigIsContained: Eq[Map[String, String]] = Eq.instance { case (left, right) => left.toList.forall(right.toList.contains(_)) || right.toList.forall(left.toList.contains(_)) } }
Example 92
Source File: KafkaConfiguratorApp.scala From kafka-configurator with BSD 3-Clause "New" or "Revised" License | 5 votes |
package com.sky.kafka.configurator import java.io.{File, FileReader} import cats.data.Reader import cats.implicits._ import com.sky.kafka.configurator.error.ConfiguratorFailure import scala.util.{Failure, Success, Try} case class KafkaConfiguratorApp(configurator: TopicConfigurator) { def configureTopicsFrom(files: List[File]): Try[(List[ConfiguratorFailure], List[String])] = files.traverse { file => for { fileReader <- Try(new FileReader(file)) topics <- TopicConfigurationParser(fileReader).toTry } yield configureAll(topics) }.map(_.separate.bimap(_.flatten, _.flatten)) private def configureAll(topics: List[Topic]): (List[ConfiguratorFailure], List[String]) = { val (errors, allLogs) = topics.map { topic => configurator.configure(topic).run match { case Success((logs, _)) => Right(logs) case Failure(t) => Left(ConfiguratorFailure(topic.name, t)) } }.separate (errors, allLogs.flatten) } } object KafkaConfiguratorApp { def reader: Reader[AppConfig, KafkaConfiguratorApp] = TopicConfigurator.reader.map(KafkaConfiguratorApp.apply) }
Example 93
Source File: KafkaTopicAdmin.scala From kafka-configurator with BSD 3-Clause "New" or "Revised" License | 5 votes |
package com.sky.kafka.configurator import java.util.concurrent.ExecutionException import cats.data.Reader import com.sky.kafka.configurator.error.TopicNotFound import org.apache.kafka.clients.admin.AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG import org.apache.kafka.clients.admin._ import org.apache.kafka.common.config.ConfigResource import org.apache.kafka.common.errors.UnknownTopicOrPartitionException import org.zalando.grafter.{ Stop, StopResult } import scala.collection.JavaConverters._ import scala.language.postfixOps import scala.util.{ Failure, Success, Try } object KafkaTopicAdmin { def apply(adminClient: AdminClient): KafkaTopicAdmin = new KafkaTopicAdmin(adminClient) def reader: Reader[AppConfig, KafkaTopicAdmin] = Reader { config => import com.sky.kafka.utils.MapToJavaPropertiesConversion.mapToProperties KafkaTopicAdmin(AdminClient.create(Map(BOOTSTRAP_SERVERS_CONFIG -> config.bootstrapServers) ++ config.props)) } } class KafkaTopicAdmin(ac: AdminClient) extends TopicReader with TopicWriter with Stop { override def fetch(topicName: String) = { def topicDescription = Try { val allDescriptions = ac.describeTopics(Seq(topicName).asJava).all.get allDescriptions.get(topicName) } match { case Success(result) => Success(result) case Failure(e: ExecutionException) if e.getCause.isInstanceOf[UnknownTopicOrPartitionException] => Failure(TopicNotFound(topicName)) case other => other } def topicConfig = Try { val allConfigs = ac.describeConfigs(Seq(configResourceForTopic(topicName)).asJava).all.get allConfigs.get(configResourceForTopic(topicName)) } for { desc <- topicDescription partitions = desc.partitions().size() replicationFactor = desc.partitions().asScala.head.replicas().size() config <- topicConfig } yield Topic(desc.name(), partitions, replicationFactor, config) } override def create(topic: Topic) = Try { val newTopic = new NewTopic(topic.name, topic.partitions, topic.replicationFactor.toShort).configs(topic.config.asJava) ac.createTopics(Seq(newTopic).asJava).all().get } override def updateConfig(topicName: String, config: Map[String, Object]) = Try { val c = config.map { case (key, value) => new ConfigEntry(key, value.toString) }.toList.asJava ac.alterConfigs(Map(configResourceForTopic(topicName) -> new Config(c)).asJava).all().get } override def updatePartitions(topicName: String, numPartitions: Int) = Try { ac.createPartitions(Map(topicName -> NewPartitions.increaseTo(numPartitions)).asJava).all().get() } override def stop = StopResult.eval("KafkaAdminClient")(ac.close()) private def configResourceForTopic(topicName: String) = new ConfigResource(ConfigResource.Type.TOPIC, topicName) private implicit def kafkaConfigToMap(config: Config): Map[String, String] = config.entries().asScala.map { entry => entry.name() -> entry.value() } toMap }
Example 94
Source File: KafkaConfiguratorAppSpec.scala From kafka-configurator with BSD 3-Clause "New" or "Revised" License | 5 votes |
package com.sky.kafka.configurator import java.io.{File, FileReader} import com.sky.kafka.configurator.error.{ConfiguratorFailure, TopicNotFound} import common.BaseSpec import io.circe.generic.AutoDerivation import org.mockito.Mockito._ import org.scalatest.mockito.MockitoSugar import scala.util.{Failure, Success} class KafkaConfiguratorAppSpec extends BaseSpec with MockitoSugar with AutoDerivation { val topicConfigurator = mock[TopicConfigurator] val kafkaConfiguratorApp = KafkaConfiguratorApp(topicConfigurator) it should "provide logs and errors when file has been parsed successfully" in { val file = new File(getClass.getResource("/topic-configuration-with-error.yml").getPath) val topics = TopicConfigurationParser(new FileReader(file)).right.value val error = TopicNotFound(topics(1).name) when(topicConfigurator.configure(topics.head)) .thenReturn(Success(()).withLog("foo")) when(topicConfigurator.configure(topics(1))) .thenReturn(Failure[Unit](error).asWriter) when(topicConfigurator.configure(topics(2))) .thenReturn(Success(()).withLog("bar")) kafkaConfiguratorApp.configureTopicsFrom(List(file)) shouldBe Success(( List(ConfiguratorFailure(topics.tail.head.name, error)), List("foo", "bar") )) } it should "succeed when given empty configuration file" in { val invalidFile = File.createTempFile("empty", "yml") invalidFile.deleteOnExit() kafkaConfiguratorApp.configureTopicsFrom(List(invalidFile)) shouldBe a[Success[_]] } it should "fail-fast when the file does not exist" in { kafkaConfiguratorApp.configureTopicsFrom(List(new File("does-not-exist"))) shouldBe a[Failure[_]] } }
Example 95
Source File: KafkaConfiguratorIntSpec.scala From kafka-configurator with BSD 3-Clause "New" or "Revised" License | 5 votes |
package com.sky.kafka.configurator import common.KafkaIntSpec import kafka.admin.AdminUtils import org.scalatest.concurrent.Eventually import scala.util.Success class KafkaConfiguratorIntSpec extends KafkaIntSpec with Eventually { "KafkaConfigurator" should "create new topics in Kafka from multiple input files" in { val topics = List("topic1", "topic2", "topic3") topics.map(AdminUtils.topicExists(zkUtils, _) shouldBe false) Main.run(testArgs(Seq("/topic-configuration.yml", "/topic-configuration-2.yml")), Map.empty) shouldBe a[Success[_]] eventually { withClue("Topic exists: ") { topics.map(AdminUtils.topicExists(zkUtils, _) shouldBe true) } } } it should "still configure all topics when one fails" in { val correctTopics = List("correctConfig1", "correctConfig2") val errorTopic = "errorConfig" (correctTopics :+ errorTopic).map(AdminUtils.topicExists(zkUtils, _) shouldBe false) Main.run(testArgs(Seq("/topic-configuration-with-error.yml")), Map.empty) shouldBe a[Success[_]] eventually { withClue("Topic exists: ") { correctTopics.map(AdminUtils.topicExists(zkUtils, _) shouldBe true) } withClue("Topic doesn't exist: ") { AdminUtils.topicExists(zkUtils, errorTopic) shouldBe false } } } it should "configure topics from correct files if another input file is empty" in { val topic = "topic4" AdminUtils.topicExists(zkUtils, topic) shouldBe false Main.run(testArgs(Seq("/topic-configuration-3.yml", "/no-topics.yml")), Map.empty) shouldBe a[Success[_]] eventually { withClue("Topic exists: ") { AdminUtils.topicExists(zkUtils, topic) shouldBe true } } } private def testArgs(filePaths: Seq[String]): Array[String] = Array( "-f", filePaths.map(path => getClass.getResource(path).getPath).mkString(","), "--bootstrap-servers", s"localhost:${kafkaServer.kafkaPort}" ) }
Example 96
Source File: JsonMatchers.scala From scalatest-json with Apache License 2.0 | 5 votes |
package com.stephenn.scalatest.jsonassert import org.scalatest.matchers.MatchResult import org.skyscreamer.jsonassert.{JSONCompare, JSONCompareMode} import scala.util.{Failure, Success, Try} import org.scalatest.matchers.Matcher trait JsonMatchers { def matchJson(right: String): Matcher[String] = Matcher[String] { left => Try( JSONCompare .compareJSON(right, left, JSONCompareMode.STRICT) ) match { case Failure(_) => MatchResult( matches = false, rawFailureMessage = "Could not parse json {0} did not equal {1}", rawNegatedFailureMessage = "Json should not have matched {0} {1}", args = IndexedSeq(left.trim, right.trim) ) case Success(jSONCompareResult) => MatchResult( matches = jSONCompareResult.passed(), rawFailureMessage = "Json did not match {0} did not match {1}\n\nJson Diff:\n{2}", rawNegatedFailureMessage = "Json should not have matched {0} matched {1}\n\nJson Diff:\n{2}", args = IndexedSeq(left.trim, right.trim, jSONCompareResult.getMessage) ) } } } object JsonMatchers extends JsonMatchers
Example 97
Source File: akkaHttp.scala From sup with Apache License 2.0 | 5 votes |
package sup.modules import akka.http.scaladsl.marshalling.ToEntityMarshaller import akka.http.scaladsl.model.StatusCode import akka.http.scaladsl.model.StatusCodes import akka.http.scaladsl.server.Directives.{path => akkaPath, _} import akka.http.scaladsl.server.Route import cats.effect.Effect import cats.syntax.functor._ import cats.syntax.reducible._ import cats.~> import cats.Functor import cats.Reducible import sup.HealthCheck import sup.HealthResult import scala.concurrent.Future import scala.util.Failure import scala.util.Success import akka.http.scaladsl.model.HttpRequest object akkahttp { def healthCheckRoutes[F[_]: Effect, H[_]: Reducible]( healthCheck: HealthCheck[F, H], path: String = "health-check" )( implicit marshaller: ToEntityMarshaller[HealthResult[H]] ): Route = akkaPath(path) { get { onComplete(Effect[F].toIO(healthCheckResponse(healthCheck)).unsafeToFuture()) { case Success(response) => complete(response) case Failure(error) => failWith(error) } } } def healthCheckResponse[F[_]: Functor, H[_]: Reducible]( healthCheck: HealthCheck[F, H] ): F[(StatusCode, HealthResult[H])] = healthCheck.check.map { check => if (check.value.reduce.isHealthy) StatusCodes.OK -> check else StatusCodes.ServiceUnavailable -> check } def healthCheckRoutesWithContext[F[_]: Functor, H[_]: Reducible, R]( healthCheck: HealthCheck[F, H], path: String = "health-check" )( run: HttpRequest => F ~> Future )( implicit marshaller: ToEntityMarshaller[HealthResult[H]] ): Route = akkaPath(path) { get { extractRequest { request => onComplete(run(request)(healthCheckResponse(healthCheck))) { case Success(response) => complete(response) case Failure(error) => failWith(error) } } } } }
Example 98
Source File: AbstractController.scala From daf with BSD 3-Clause "New" or "Revised" License | 5 votes |
package controllers import it.gov.daf.common.authentication.Authentication import it.gov.daf.common.config.Read import javax.security.auth.login.LoginContext import org.apache.hadoop.security.UserGroupInformation import org.pac4j.play.store.PlaySessionStore import play.api.Configuration import play.api.mvc._ import scala.util.{ Failure, Success, Try } abstract class AbstractController(protected val configuration: Configuration, val playSessionStore: PlaySessionStore) extends Controller { private def prepareEnv() = Try { System.setProperty("javax.security.auth.useSubjectCredsOnly", "false") } private def loginUserFromConf = for { user <- Read.string { "kerberos.principal" }.! path <- Read.string { "kerberos.keytab" }.! } yield UserGroupInformation.loginUserFromKeytab(user, path) private def prepareAuth() = Try { Authentication(configuration, playSessionStore) } private def initUser() = for { _ <- prepareEnv() _ <- loginUserFromConf.read { configuration } _ <- prepareAuth() } yield UserGroupInformation.getLoginUser protected implicit val proxyUser = initUser() match { case Success(null) => throw new RuntimeException("Unable to initialize user for application") case Success(user) => user case Failure(error) => throw new RuntimeException("Unable to initialize user for application", error) } }
Example 99
Source File: QueryExecution.scala From daf with BSD 3-Clause "New" or "Revised" License | 5 votes |
package controllers import akka.stream.scaladsl.Source import cats.syntax.show.toShow import daf.dataset._ import daf.dataset.query.jdbc.{ JdbcResult, QueryFragmentWriterSyntax, Writers } import daf.dataset.query.Query import daf.web._ import daf.filesystem._ import daf.instances.FileSystemInstance import it.gov.daf.common.utils._ import org.apache.hadoop.fs.Path import play.api.libs.json.JsValue import scala.concurrent.Future import scala.util.{ Failure, Success, Try } trait QueryExecution { this: DatasetController with DatasetExport with FileSystemInstance => private def extractDatabaseName(parent: String, params: FileDatasetParams) = parent.toLowerCase match { case "opendata" => params.extraParams.get("theme").map { s => s"opendata__${s.toLowerCase}" } getOrElse "opendata" // append __{theme} for opendata case other => other // use the parent dir for other data } private def extractTableName(path: Path, params: FileDatasetParams): Try[String] = Try { s"${extractDatabaseName(path.getParent.getName, params)}.${path.getName.toLowerCase}" } private def extractTableName(params: DatasetParams, userId: String): Try[String] = params match { case kudu: KuduDatasetParams => (proxyUser as userId) { downloadService.tableInfo(kudu.table) } map { _ => kudu.table } case file: FileDatasetParams => (proxyUser as userId) { extractTableName(file.path.asHadoop.resolve, file) } } private def prepareQuery(params: DatasetParams, query: Query, userId: String) = for { tableName <- extractTableName(params, userId) fragment <- Writers.sql(query, tableName).write } yield fragment.query[Unit].sql private def analyzeQuery(params: DatasetParams, query: Query, userId: String) = for { tableName <- extractTableName(params, userId) analysis <- queryService.explain(query, tableName, userId) } yield analysis private def transform(jdbcResult: JdbcResult, targetFormat: FileDataFormat) = targetFormat match { case CsvFileFormat => Try { Source[String](jdbcResult.toCsv).map { csv => s"$csv${System.lineSeparator}" } } case JsonFileFormat => Try { wrapJson { Source[JsValue](jdbcResult.toJson).map { _.toString } } } case _ => Failure { new IllegalArgumentException(s"Invalid target format [$targetFormat]; must be [csv | json]") } } // Web // Failure private def failQuickExec(params: DatasetParams, targetFormat: FileDataFormat) = Future.successful { TemporaryRedirect { s"${controllers.routes.DatasetController.queryDataset(params.catalogUri, targetFormat.show, "batch").url}" } } // Executions private def doBatchExec(params: DatasetParams, query: Query, targetFormat: FileDataFormat, userId: String) = prepareQuery(params, query, userId) match { case Success(sql) => prepareQueryExport(sql, targetFormat).map { formatExport(_, targetFormat) } case Failure(error) => Future.failed { error } } private def doQuickExec(params: DatasetParams, query: Query, targetFormat: FileDataFormat, userId: String) = for { tableName <- extractTableName(params, userId) jdbcResult <- queryService.exec(query, tableName, userId) data <- transform(jdbcResult, targetFormat) } yield data // API protected def quickExec(params: DatasetParams, query: Query, targetFormat: FileDataFormat, userId: String) = analyzeQuery(params, query, userId) match { case Success(analysis) if analysis.memoryEstimation <= impalaConfig.memoryEstimationLimit => doQuickExec(params, query, targetFormat, userId).~>[Future].map { respond(_, params.name, targetFormat) } case Success(_) => failQuickExec(params, targetFormat) case Failure(error) => Future.failed { error } } protected def batchExec(params: DatasetParams, query: Query, targetFormat: FileDataFormat, userId: String) = doBatchExec(params, query, targetFormat, userId).map { respond(_, params.name, targetFormat) } protected def exec(params: DatasetParams, query: Query, userId: String, targetFormat: FileDataFormat, method: DownloadMethod) = method match { case QuickDownloadMethod => quickExec(params, query, targetFormat, userId) case BatchDownloadMethod => batchExec(params, query, targetFormat, userId) } }
Example 100
Source File: DownloadExecution.scala From daf with BSD 3-Clause "New" or "Revised" License | 5 votes |
package controllers import cats.syntax.show.toShow import daf.dataset._ import daf.filesystem.{ FileDataFormat, fileFormatShow } import daf.web._ import daf.instances.FileSystemInstance import it.gov.daf.common.utils._ import scala.concurrent.Future import scala.util.{ Failure, Success } trait DownloadExecution { this: DatasetController with DatasetExport with FileSystemInstance => // Failures private def failQuickDownload(params: DatasetParams, targetFormat: FileDataFormat, limit: Option[Int]) = Future.successful { TemporaryRedirect { s"${controllers.routes.DatasetController.getDataset(params.catalogUri, targetFormat.show, "batch", limit).url}" } } // Retrievals private def retrieveFileInfo(path: String, userId: String) = (proxyUser as userId) { downloadService.fileInfo(path) } private def retrieveTableInfo(tableName: String, userId: String) = (proxyUser as userId) { downloadService.tableInfo(tableName) } // Executions private def doTableExport(params: KuduDatasetParams, userId: String, targetFormat: FileDataFormat, limit: Option[Int]) = retrieveTableInfo(params.table, userId) match { case Success(_) => prepareTableExport(params.table, targetFormat, params.extraParams, limit).map { formatExport(_, targetFormat) } case Failure(error) => Future.failed { error } } private def doFileExport(params: FileDatasetParams, userId: String, targetFormat: FileDataFormat, limit: Option[Int]) = retrieveFileInfo(params.path, userId) match { case Success(pathInfo) => prepareFileExport(pathInfo, params.format, targetFormat, params.extraParams, limit).map { formatExport(_, targetFormat) } case Failure(error) => Future.failed { error } } private def doQuickFile(params: DatasetParams, targetFormat: FileDataFormat, limit: Option[Int]) = prepareDirect(params, targetFormat, limit).map { respond(_, params.name, targetFormat) }.~>[Future] private def quickFileDownload(params: FileDatasetParams, userId: String, targetFormat: FileDataFormat, limit: Option[Int]) = retrieveFileInfo(params.path, userId) match { case Success(pathInfo) if pathInfo.estimatedSize <= exportConfig.sizeThreshold => doQuickFile(params, targetFormat, limit) case Success(pathInfo) => failQuickDownload(params, targetFormat, limit) case Failure(error) => Future.failed { error } } // API protected def quickDownload(params: DatasetParams, userId: String, targetFormat: FileDataFormat, limit: Option[Int] = None) = params match { case fileParams: FileDatasetParams => quickFileDownload(fileParams, userId, targetFormat, limit) case kuduParams: KuduDatasetParams => failQuickDownload(kuduParams, targetFormat, limit) // no quick download option for kudu } protected def batchDownload(params: DatasetParams, userId: String, targetFormat: FileDataFormat, limit: Option[Int] = None) = params match { case kuduParams: KuduDatasetParams => doTableExport(kuduParams, userId, targetFormat, limit).map { respond(_, kuduParams.table, targetFormat) } case fileParams: FileDatasetParams => doFileExport(fileParams, userId, targetFormat, limit).map { respond(_, fileParams.name, targetFormat) } } protected def download(params: DatasetParams, userId: String, targetFormat: FileDataFormat, method: DownloadMethod, limit: Option[Int] = None) = method match { case QuickDownloadMethod => quickDownload(params, userId, targetFormat, limit) case BatchDownloadMethod => batchDownload(params, userId, targetFormat, limit) } }
Example 101
Source File: DatasetExport.scala From daf with BSD 3-Clause "New" or "Revised" License | 5 votes |
package controllers import akka.stream.scaladsl.{ Source, StreamConverters } import cats.syntax.show.toShow import daf.dataset.{ DatasetParams, ExtraParams } import daf.filesystem.{ CsvFileFormat, FileDataFormat, JsonFileFormat, PathInfo, fileFormatShow } import daf.web.contentType import scala.concurrent.Future import scala.util.{ Failure, Success } trait DatasetExport { this: DatasetController => protected def prepareDirect(params: DatasetParams, targetFormat: FileDataFormat, limit: Option[Int]) = targetFormat match { case JsonFileFormat => datasetService.jsonData(params, limit) case CsvFileFormat => datasetService.csvData(params, limit) case _ => Failure { new IllegalArgumentException("Unable to prepare download; only CSV and JSON are permitted") } } protected def prepareFileExport(pathInfo: PathInfo, sourceFormat: FileDataFormat, targetFormat: FileDataFormat, extraParams: ExtraParams, limit: Option[Int] = None) = fileExportService.exportFile(pathInfo.path, sourceFormat, targetFormat, extraParams, limit).map { downloadService.openPath }.flatMap { case Success(stream) => Future.successful { StreamConverters.fromInputStream { () => stream } } case Failure(error) => Future.failed { error } } protected def prepareTableExport(table: String, targetFormat: FileDataFormat, extraParams: ExtraParams, limit: Option[Int] = None) = fileExportService.exportTable(table, targetFormat, extraParams, limit).map { downloadService.openPath }.flatMap { case Success(stream) => Future.successful { StreamConverters.fromInputStream { () => stream } } case Failure(error) => Future.failed { error } } protected def prepareQueryExport(query: String, targetFormat: FileDataFormat) = fileExportService.exportQuery(query, targetFormat).map { downloadService.openPath }.flatMap { case Success(stream) => Future.successful { StreamConverters.fromInputStream { () => stream } } case Failure(error) => Future.failed { error } } protected def respond(data: Source[String, _], fileName: String, targetFormat: FileDataFormat) = Ok.chunked(data).withHeaders( CONTENT_DISPOSITION -> s"""attachment; filename="$fileName.${targetFormat.show}"""", CONTENT_TYPE -> contentType(targetFormat) ) }
Example 102
Source File: FileExportJob.scala From daf with BSD 3-Clause "New" or "Revised" License | 5 votes |
package daf.dataset.export import daf.dataset.ExtraParams import daf.filesystem._ import org.apache.hadoop.fs.Path import org.apache.livy.{ Job, JobContext } import org.apache.spark.sql._ import scala.util.{ Failure, Success, Try } class FileExportJob(val from: FileExportInfo, val to: FileExportInfo, val extraParams: Map[String, String], limit: Option[Int]) extends Job[String] { private val csvDelimiter = extraParams.getOrElse("separator", ",") private val csvIncludeHeader = true private val csvInferSchema = true // Export private def prepareCsvReader(reader: DataFrameReader) = reader .option("inferSchema", csvInferSchema) .option("header", csvIncludeHeader) .option("delimiter", csvDelimiter) private def prepareCsvWriter(writer: DataFrameWriter[Row]) = writer .option("header", csvIncludeHeader) .option("delimiter", csvDelimiter) private def read(session: SparkSession) = from match { case FileExportInfo(path, RawFileFormat | CsvFileFormat) => prepareCsvReader(session.read).csv(path) case FileExportInfo(path, ParquetFileFormat) => session.read.parquet(path) case FileExportInfo(path, JsonFileFormat) => session.read.json(path) case FileExportInfo(_, unsupported) => throw new IllegalArgumentException(s"Input file format [$unsupported] is invalid") } private def addLimit(data: DataFrame) = limit match { case Some(value) => data.limit(value) case None => data } private def write(data: DataFrame) = to match { case FileExportInfo(path, CsvFileFormat) => prepareCsvWriter(data.write).csv(path) case FileExportInfo(path, JsonFileFormat) => data.write.json(path) case FileExportInfo(_, unsupported) => throw new IllegalArgumentException(s"Output file format [$unsupported] is invalid") } private def doExport(session: SparkSession) = for { data <- Try { read(session) } limited <- Try { addLimit(data) } _ <- Try { write(limited) } } yield () override def call(jobContext: JobContext) = doExport { jobContext.sqlctx().sparkSession } match { case Success(_) => to.path case Failure(error) => throw new RuntimeException("Export Job execution failed", error) } } object FileExportJob { def create(inputPath: String, outputPath: String, from: FileDataFormat, to: FileDataFormat, extraParams: ExtraParams = Map.empty[String, String], limit: Option[Int]) = new FileExportJob( FileExportInfo(inputPath, from), FileExportInfo(outputPath, to), extraParams, limit ) } case class FileExportInfo(path: String, format: FileDataFormat) object FileExportInfo { def apply(path: Path, format: FileDataFormat): FileExportInfo = apply(path.toUri.getPath, format) }
Example 103
Source File: QueryExportJob.scala From daf with BSD 3-Clause "New" or "Revised" License | 5 votes |
package daf.dataset.export import daf.dataset.ExtraParams import daf.filesystem._ import org.apache.livy.{ Job, JobContext } import org.apache.spark.sql._ import scala.util.{ Failure, Success, Try } class QueryExportJob(val query: String, val to: FileExportInfo, val extraParams: Map[String, String]) extends Job[String] { private val csvDelimiter = extraParams.getOrElse("separator", ",") private val csvIncludeHeader = true // Export private def prepareCsvWriter(writer: DataFrameWriter[Row]) = writer .option("header", csvIncludeHeader) .option("delimiter", csvDelimiter) private def write(data: DataFrame) = to match { case FileExportInfo(path, CsvFileFormat) => prepareCsvWriter(data.write).csv(path) case FileExportInfo(path, JsonFileFormat) => data.write.json(path) case FileExportInfo(_, unsupported) => throw new IllegalArgumentException(s"Output file format [$unsupported] is invalid") } private def doExport(session: SparkSession) = for { data <- Try { session.sql(query) } _ <- Try { write(data) } } yield () override def call(jobContext: JobContext) = doExport { jobContext.sqlctx().sparkSession } match { case Success(_) => to.path case Failure(error) => throw new RuntimeException("Export Job execution failed", error) } } object QueryExportJob { def create(query: String, outputPath: String, to: FileDataFormat, extraParams: ExtraParams = Map.empty[String, String]) = new QueryExportJob( query, FileExportInfo(outputPath, to), extraParams ) }
Example 104
Source File: KuduExportJob.scala From daf with BSD 3-Clause "New" or "Revised" License | 5 votes |
package daf.dataset.export import daf.filesystem.{ CsvFileFormat, FileDataFormat, JsonFileFormat } import org.apache.livy.{ Job, JobContext } import org.apache.kudu.spark.kudu._ import org.apache.spark.sql._ import scala.util.{ Failure, Success, Try } class KuduExportJob(val table: String, val master: String, val to: FileExportInfo, extraParams: Map[String, String], limit: Option[Int]) extends Job[String] { private val csvDelimiter = extraParams.getOrElse("separator", ",") private val csvIncludeHeader = true private def prepareCsvWriter(writer: DataFrameWriter[Row]) = writer .option("header", csvIncludeHeader) .option("delimiter", csvDelimiter) private def prepareReader(reader: DataFrameReader) = reader .option("kudu.master", master) .option("kudu.table", table) private def read(session: SparkSession) = prepareReader { session.read }.kudu private def addLimit(data: DataFrame) = limit match { case Some(value) => data.limit(value) case None => data } private def write(data: DataFrame) = to match { case FileExportInfo(path, CsvFileFormat) => prepareCsvWriter(data.write).csv(path) case FileExportInfo(path, JsonFileFormat) => data.write.json(path) case FileExportInfo(_, unsupported) => throw new IllegalArgumentException(s"Output file format [$unsupported] is invalid") } private def doExport(session: SparkSession) = for { data <- Try { read(session) } limited <- Try { addLimit(data) } _ <- Try { write(limited) } } yield () def call(jobContext: JobContext) = doExport { jobContext.sqlctx().sparkSession } match { case Success(_) => to.path case Failure(error) => throw new RuntimeException("Export Job execution failed", error) } } object KuduExportJob { def create(table: String, master: String, outputPath: String, outputFormat: FileDataFormat, extraParams: Map[String, String] = Map.empty[String, String], limit: Option[Int]) = new KuduExportJob( table, master, FileExportInfo(outputPath, outputFormat), extraParams, limit ) }
Example 105
Source File: TestCatalogClient.scala From daf with BSD 3-Clause "New" or "Revised" License | 5 votes |
package controllers import java.io.FileNotFoundException import daf.catalogmanager._ import scala.util.{ Failure, Success } trait TestCatalogClient { this: DatasetController => override protected val catalogClient = new TestCatalogManagerClient } sealed class TestCatalogManagerClient extends CatalogManagerClient("") { private def makeCatalog(id: String) = MetaCatalog( dataschema = DatasetCatalog( avro = null, flatSchema = List.empty, kyloSchema = None ), operational = Operational( theme = "", subtheme = "", logical_uri = id, physical_uri = Some { s"test-dir/$id" }, is_std = true, group_own = "test", group_access = None, std_schema = None, read_type = "", georef = None, input_src = null, ingestion_pipeline = None, storage_info = Some { StorageInfo( hdfs = Some { StorageHdfs(s"test-dir/$id", Some(s"test-dir/$id"), None) }, kudu = None, hbase = None, textdb = None, mongo = None ) }, dataset_type = "" ), dcatapit = null ) override def getById(authorization: String, catalogId: String) = catalogId match { case "path/to/failure" => Failure { new FileNotFoundException("Encountered failure condition") } case other => Success { makeCatalog(other) } } }
Example 106
Source File: ColumnFragmentsSpec.scala From daf with BSD 3-Clause "New" or "Revised" License | 5 votes |
package daf.dataset.query.jdbc import daf.dataset.query._ import org.scalatest.{ MustMatchers, WordSpec } import doobie.implicits.toSqlInterpolator import scala.annotation.tailrec import scala.util.Success class SelectFragmentSpec extends WordSpec with MustMatchers { "A [select] fragment writer" must { "serialize a [select] clause in SQL" in { ColumnFragments.select { SelectClauses.simple }.run.map { _._1.toString } must be { Success { fr"SELECT col1, col2 AS alias1, 1, 'string' AS alias2, MAX(col3) AS alias3, SUM(true)".toString } } } "create a column/alias reference set" in { ColumnFragments.select { SelectClauses.simple }.run.get._2 must have ( ColumnReferenceMatchers hasColumn "col1", ColumnReferenceMatchers hasColumn "col2", ColumnReferenceMatchers hasColumn "1", ColumnReferenceMatchers hasColumn "'string'", ColumnReferenceMatchers hasColumn "col3", ColumnReferenceMatchers hasColumn "true", ColumnReferenceMatchers hasAlias "alias1", ColumnReferenceMatchers hasAlias "alias2", ColumnReferenceMatchers hasAlias "alias3" ) } "serialize a very long [select] without stack overflow" in { ColumnFragments.select { SelectClauses.nested }.run must be { 'Success } } "fail serialization when sql is injected in a column name" in { ColumnFragments.select { SelectClauses.injectNamed }.run must be { 'Failure } } "escape quotes in value strings" in { ColumnFragments.select { SelectClauses.injectValue }.run.map { _._1.toString } must be { Success { fr"""SELECT '\' SELECT col2 FROM table WHERE \'\' == \''""".toString } } } } } object SelectClauses { val simple = SelectClause { Seq( NamedColumn("col1"), NamedColumn("col2") as "alias1", ValueColumn(1), ValueColumn("string") as "alias2", Max(NamedColumn("col3")) as "alias3", Sum(ValueColumn(true)) ) } val injectNamed = SelectClause { Seq( NamedColumn("SELECT col2 FROM table") ) } val injectValue = SelectClause { Seq( ValueColumn("' SELECT col2 FROM table WHERE '' == '") ) } @tailrec private def nest(column: Column, n: Int = 10000): Column = if (n == 0) column else nest(Sum(column), n - 1) val nested = SelectClause { Seq { nest(ValueColumn(true)) } } }
Example 107
Source File: GroupingFragmentsSpec.scala From daf with BSD 3-Clause "New" or "Revised" License | 5 votes |
package daf.dataset.query.jdbc import daf.dataset.query.{ GroupByClause, Max, NamedColumn } import doobie.implicits.toSqlInterpolator import org.scalatest.{ MustMatchers, WordSpec } import scala.util.Success class GroupingFragmentsSpec extends WordSpec with MustMatchers { "A [groupBy] fragment writer" must { "serialize a [groupBy] clause in SQL" in { GroupingFragments.groupBy(GroupByClauses.valid, GroupByClauses.validRef).run.map { _._1.toString } must be { Success { fr"GROUP BY col1, col2".toString } } } "throw an error" when { "a [groupBy] clause contains an alias column" in { GroupingFragments.groupBy(GroupByClauses.invalidAlias, GroupByClauses.validRef).run must be { 'Failure } } "a [groupBy] clause contains an function column" in { GroupingFragments.groupBy(GroupByClauses.invalidFunction, GroupByClauses.validRef).run must be { 'Failure } } "an invalid column reference is encountered" in { GroupingFragments.groupBy(GroupByClauses.valid, GroupByClauses.invalidRef).run must be { 'Failure } } } } } object GroupByClauses { val validRef = ColumnReference( Set("col1", "col2"), Set("alias1") ) val invalidRef = ColumnReference( Set("col1", "col2", "col3"), Set.empty[String] ) val valid = GroupByClause { Seq(NamedColumn("col1"), NamedColumn("col2")) } val invalidAlias = GroupByClause { Seq(NamedColumn("col1") as "alias1") } val invalidFunction = GroupByClause { Seq(Max(NamedColumn("col1"))) } }
Example 108
Source File: UriDataset.scala From daf with BSD 3-Clause "New" or "Revised" License | 5 votes |
package it.gov.daf.catalogmanager.utilities.uri import catalog_manager.yaml.MetaCatalog import com.typesafe.config.ConfigFactory import it.gov.daf.catalogmanager.utilities.datastructures.DatasetType import play.api.Logger import scala.util.{Failure, Success, Try} case class UriDataset( domain: String = "NO_DOMAIN", typeDs: DatasetType.Value = DatasetType.RAW, groupOwn: String = "NO_groupOwn", owner: String = "NO_owner", theme: String = "NO_theme", subtheme :String = "NO_theme", nameDs: String = "NO_nameDs") { val config = ConfigFactory.load() def getUri(): String = { domain + "://" + "dataset/" + typeDs + "/" + groupOwn + "/" + owner + "/" + theme + "/" + subtheme + "/" + nameDs } def getUrl(): String = { val basePath = config.getString("Inj-properties.hdfsBasePath") val baseDataPath = config.getString("Inj-properties.dataBasePath") typeDs match { case DatasetType.STANDARD => basePath + baseDataPath + "/" + typeDs + "/" + theme + "/" + subtheme + "/" + groupOwn + "/" + nameDs case DatasetType.ORDINARY => basePath + baseDataPath + "/" + typeDs + "/" + owner + "/" + theme + "/" + subtheme + "/" + groupOwn + "/" + nameDs case DatasetType.RAW => basePath + baseDataPath + "/" + typeDs + "/" + owner + "/" + theme + "/" + subtheme + "/" + groupOwn + "/" + nameDs case _ => "-1" } } } object UriDataset { def apply(uri: String): UriDataset = { Try { val uri2split = uri.split("://") val uriParts = uri2split(1).split("/") new UriDataset( domain = uri2split(0), typeDs = DatasetType.withNameOpt(uriParts(1)).get, groupOwn = uriParts(2), owner = uriParts(3), theme = uriParts(4), subtheme = uriParts(5), nameDs = uriParts(6)) } match { case Success(s) => s case Failure(err) => Logger.error("Error while creating uri: " + uri + " - " + err.getMessage) UriDataset() } } def convertToUriDataset(schema: MetaCatalog): UriDataset = { val typeDs = if (schema.operational.is_std) DatasetType.STANDARD else DatasetType.ORDINARY new UriDataset( domain = "daf", typeDs = typeDs, groupOwn = schema.operational.group_own, owner = schema.dcatapit.owner_org.get, theme = schema.operational.theme, subtheme = schema.operational.subtheme, nameDs = schema.dataschema.avro.name ) } }
Example 109
Source File: EventToKuduEventSpec.scala From daf with BSD 3-Clause "New" or "Revised" License | 5 votes |
package it.teamdigitale.events import it.gov.daf.iotingestion.event.Event import it.teamdigitale.EventModel.EventToKuduEvent import it.teamdigitale.EventModel.EventToStorableEvent import org.scalatest.{FlatSpec, Matchers} import scala.util.{Success, Try} class EventToKuduEventSpec extends FlatSpec with Matchers{ val metrics = Range(0,100).map(x => Success( Event( version = 1L, id = x + "metric", ts = System.currentTimeMillis(), event_type_id = 0, location = "41.1260529:16.8692905", source = "http://domain/sensor/url", body = Option("""{"rowdata": "this json should contain row data"}""".getBytes()), event_subtype_id = Some("SPEED_Via_Cernaia_TO"), attributes = Map( "value" -> x.toString) ))) // this metric doesn't have any value val wrongMetric = Success( Event( version = 1L, id = "wrongmetric1", ts = System.currentTimeMillis(), event_type_id = 0, location = "41.1260529:16.8692905", source = "http://domain/sensor/url", body = Option("""{"rowdata": "this json should contain row data"}""".getBytes()), event_annotation = Some(s"This is a free text for a wrong metric"), event_subtype_id = Some("SPEED_Via_Cernaia_TO"), attributes = Map() )) // this metric doesn't have a correct value val wrongMetric2 = Success( Event( version = 1L, id = "wrongmetric2", ts = System.currentTimeMillis(), event_type_id = 0, location = "41.1260529:16.8692905", source = "http://domain/sensor/url", body = Option("""{"rowdata": "this json should contain row data"}""".getBytes()), event_annotation = Some(s"This is a free text ©"), event_subtype_id = Some("SPEED_Via_Cernaia_TO"), attributes = Map( "value" -> "wrongValue" ) )) // this metric doesn't have the metric id val wrongMetric3 = Success( Event( version = 1L, id = "wrongmetric3", ts = System.currentTimeMillis(), event_type_id = 2, location = "41.1260529:16.8692905", source = "http://domain/sensor/url", body = Option("""{"rowdata": "this json should contain row data"}""".getBytes()), event_annotation = Some(s"This is a free text for a wrong metric"), attributes = Map( "value" -> "100" ) )) "Correct events" should "be converted" in { val res = metrics.map(event => EventToStorableEvent(event)).flatMap(_.toOption).map(event => EventToKuduEvent(event)).filter(_.isSuccess) res.length shouldBe 100 res.head.get.metric shouldBe 0D res.head.get.metric_id shouldBe "SPEED_Via_Cernaia_TO" } "Wrong events" should "be filtered" in { val seq = metrics ++ List(wrongMetric, wrongMetric2, wrongMetric3) val res = seq.map(event => EventToStorableEvent(event)).flatMap(_.toOption).map(event => EventToKuduEvent(event)).filter(_.isSuccess) res.length shouldBe 100 } }
Example 110
Source File: KuduEventsHandlerSpec.scala From daf with BSD 3-Clause "New" or "Revised" License | 5 votes |
package it.teamdigitale.storage import java.io.File import java.util.concurrent.TimeUnit import org.apache.kudu.spark.kudu._ import it.teamdigitale.miniclusters.KuduMiniCluster import it.teamdigitale.config.IotIngestionManagerConfig.KuduConfig import it.teamdigitale.managers.IotIngestionManager import org.scalatest.{BeforeAndAfterAll, FlatSpec, Matchers} import it.gov.daf.iotingestion.event.Event import it.teamdigitale.EventModel.{EventToKuduEvent, EventToStorableEvent} import org.apache.logging.log4j.LogManager import scala.util.{Failure, Success, Try} class KuduEventsHandlerSpec extends FlatSpec with Matchers with BeforeAndAfterAll { val logger = LogManager.getLogger(this.getClass) val kuduCluster = new KuduMiniCluster() val metrics: Seq[Try[Event]] = Range(0,100).map(x => Success( Event( version = 1L, id = x + "metric", ts = System.currentTimeMillis() + x , event_type_id = 0, location = "41.1260529:16.8692905", source = "http://domain/sensor/url", body = Option("""{"rowdata": "this json should contain row data"}""".getBytes()), event_subtype_id = Some("Via Cernaia(TO)"), attributes = Map("value" -> x.toString) ))) val rdd = kuduCluster.sparkSession.sparkContext.parallelize(metrics) "KuduEventsHandler" should "store correctly data" in { val metricsRDD = rdd .map(event => EventToStorableEvent(event)) .flatMap(e => e.toOption) .map(se => EventToKuduEvent(se)).flatMap(e => e.toOption) val metricsDF = kuduCluster.sparkSession.createDataFrame(metricsRDD) val kuduConfig = KuduConfig(kuduCluster.kuduMiniCluster.getMasterAddresses, "TestEvents", 2) KuduEventsHandler.getOrCreateTable(kuduCluster.kuduContext, kuduConfig) KuduEventsHandler.write(metricsDF, kuduCluster.kuduContext, kuduConfig) val df = kuduCluster.sparkSession.sqlContext .read .options(Map("kudu.master" -> kuduConfig.masterAdresses,"kudu.table" -> kuduConfig.eventsTableName)) .kudu df.count shouldBe 100 } "KuduEventsHandler" should "handle redundant data" in { val metricsRDD = rdd .map(event => EventToStorableEvent(event)) .flatMap(e => e.toOption) .map(se => EventToKuduEvent(se)) .flatMap(e => e.toOption) val metricsDF = kuduCluster.sparkSession.createDataFrame(metricsRDD) val kuduConfig = KuduConfig(kuduCluster.kuduMiniCluster.getMasterAddresses, "TestEventsDuplicate", 2) KuduEventsHandler.getOrCreateTable(kuduCluster.kuduContext, kuduConfig) KuduEventsHandler.write(metricsDF, kuduCluster.kuduContext, kuduConfig) KuduEventsHandler.write(metricsDF, kuduCluster.kuduContext, kuduConfig) val df = kuduCluster.sparkSession.sqlContext .read .options(Map("kudu.master" -> kuduConfig.masterAdresses,"kudu.table" -> kuduConfig.eventsTableName)) .kudu df.count shouldBe 100 } override def beforeAll() { kuduCluster.start() } override def afterAll() { kuduCluster.start() } }
Example 111
Source File: SonarFileSystem.scala From sonar-scala with GNU Lesser General Public License v3.0 | 5 votes |
package com.mwz.sonar.scala package util package syntax import java.io.File import java.nio.file.Path import scala.util.{Failure, Success, Try} import cats.syntax.flatMap._ import cats.{Monad, MonoidK} import org.sonar.api.batch.fs.FileSystem object SonarFileSystem { implicit final class FileSystemOps(private val fs: FileSystem) extends AnyVal { def resolve[F[_]: Monad: MonoidK](toResolve: F[Path]): F[File] = toResolve.flatMap[File] { path => Try(fs.resolvePath(path.toString)) match { case Failure(_) => MonoidK[F].empty case Success(f) => Monad[F].pure(f) } } } }
Example 112
Source File: PlayParSeq.scala From play-parseq with Apache License 2.0 | 5 votes |
package com.linkedin.playparseq.s import com.linkedin.parseq.{Engine, Task} import com.linkedin.parseq.promise.Promises import com.linkedin.playparseq.s.PlayParSeqImplicits._ import com.linkedin.playparseq.s.stores.ParSeqTaskStore import com.linkedin.playparseq.utils.PlayParSeqHelper import javax.inject.{Inject, Singleton} import play.api.mvc.RequestHeader import scala.concurrent.{ExecutionContext, Future} import scala.util.{Failure, Success} override def runTask[T](task: Task[T])(implicit requestHeader: RequestHeader): Future[T] = { // Bind a Future to the ParSeq Task val future: Future[T] = bindTaskToFuture(task) // Put the ParSeq Task into store parSeqTaskStore.put(task) // Run the ParSeq Task engine.run(task) // Return the Future future } }
Example 113
Source File: Effects.scala From monadless with Apache License 2.0 | 5 votes |
package io.monadless.lst import scala.util.Failure import scala.util.Try import scala.util.Success import scala.concurrent.Future import scala.concurrent.Promise object Effects { val optionEffect = new SyncEffect[Option] { def point[T](v: T) = Some(v) def lift[T](v: => T) = Option(v) def apply[T](o: Option[T]) = o match { case Some(v) => Sync(Left(v)) case None => Sync(Right(None)) } } val tryEffect = new SyncEffect[Try] { def point[T](v: T) = Success(v) def lift[T](v: => T) = Try(v) def apply[T](o: Try[T]) = o match { case Success(v) => Sync(Left(v)) case Failure(ex) => Sync(Right(Failure(ex))) } } val futureEffect = new AsyncEffect[Future] { import scala.concurrent.ExecutionContext.Implicits.global def point[T](v: T) = Future.successful(v) def lift[T](v: => T) = Future(v) def async[T](r: Async[Future[T]]): Future[T] = { val p = Promise[T]() r.cb(p.completeWith(_)) p.future } def apply[T](o: Future[T]) = Async { f => o.onComplete { case Success(v) => f(Left(v)) case Failure(ex) => f(Right(Future.failed(ex))) } } } }
Example 114
Source File: _05_TryWithForComprehensions.scala From LearningScala with Apache License 2.0 | 5 votes |
package _090_failure_handling import scala.util.{Success, Try} object _05_TryWithForComprehensions { def adder(a: String, b: String): Try[Int] = { for { n <- Try(a.toInt) m <- Try(b.toInt) } yield n + m } def main(args: Array[String]): Unit = { println(adder("123", "112")) println(adder("123", "ab")) println(adder("12gf3", "562")) // random example val randomExample: Try[String] = for { language <- Success("Scala") behaviour <- Success("rocks") } yield s"$language $behaviour" println println(s"randomExample: $randomExample") } }
Example 115
Source File: _03_TryWithPatternMatching.scala From LearningScala with Apache License 2.0 | 5 votes |
package _090_failure_handling import scala.util.{Failure, Success, Try} object _03_TryWithPatternMatching { def convertToInt(s: String): Try[Int] = Try(s.toInt) val printMyInteger: Try[_] => Unit = { case Success(n) => println(n) case Failure(ex) => println(ex) } def main(args: Array[String]): Unit = { val a = convertToInt("123") val b = convertToInt("1a2b3") val list = List("12", "x", "23", "14", "y", "18") printMyInteger(a) printMyInteger(b) println(list.map(convertToInt)) } }
Example 116
Source File: DynamoActor.scala From scala-spark-cab-rides-predictions with MIT License | 5 votes |
package actors import akka.actor.{Actor, ActorLogging, Status} import com.amazonaws.services.dynamodbv2.model.BatchWriteItemResult import dynamodb.{CabImpl, WeatherImp} import models.{CabPriceBatch, WeatherBatch} import scala.concurrent.Future import scala.util.{Failure, Success} def putCabPrices(cabPriceBatch: CabPriceBatch): Unit = { val cabPrices = cabPriceBatch.cabPrices.toSeq log.info("received " + cabPrices.size + " number of cab price records") val result: Future[Seq[BatchWriteItemResult]] = CabImpl.put(cabPrices) result onComplete { case Success(_) => log.info("Cab Prices Batch processed on DynamoDB") case Failure(exception) => log.error("error process Cab Prices batch on dynamoDB :" + exception.getStackTrace) } } }
Example 117
Source File: Director.scala From nescala with GNU General Public License v2.0 | 5 votes |
package com.owlandrews.nescala.ui import java.awt.Canvas import com.owlandrews.nescala.BuildInfo import org.lwjgl.opengl.{Display, GL11} import scala.swing.Dialog import scala.util.{Failure, Success, Try} case class Director(gameWindow : Canvas, menuWindow: WrapPanel) { private var view : Option[View] = None private var pause = false def Menu() = setView(Some(MenuView(menuWindow))) def Reset() = view.foreach(_.Reset()) def Close() = setView(None) def Pause() = pause = true def Resume() = pause = false def Save() = view.foreach(_.Save()) def Start(path: String) = loadGameView(path) match { case Success(_) => run() case Failure(e) => Dialog.showMessage(new {def peer = gameWindow.getParent}, e.getMessage, BuildInfo.name, Dialog.Message.Warning) } private def loadGameView(path: String) = Try(com.owlandrews.nescala.Console(path)).map(console => setView(Some(GameView(console, gameWindow)))) private def setView(view : Option[View]) { this.view.foreach(_.Close()) this.view = view this.view.foreach(_.Open()) } private def step(ts:Long) = { // Clear the screen and depth buffer GL11.glClear(GL11.GL_COLOR_BUFFER_BIT) val dt = System.nanoTime() - ts val nextTimeStamp = System.nanoTime() val seconds = dt / 1000000000.0F view.foreach(_.Update(seconds)) nextTimeStamp } private def run() = { var timestamp = System.nanoTime() while (view.isDefined) { if (!pause) timestamp = step(timestamp) Display.update() } Display.destroy() } }
Example 118
Source File: LogCollector.scala From pulse with Apache License 2.0 | 5 votes |
package io.phdata.pulse.logcollector import java.io.FileInputStream import java.util.Properties import java.util.concurrent.TimeUnit import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.stream.{ ActorMaterializer, Materializer } import com.typesafe.scalalogging.LazyLogging import io.phdata.pulse.common.SolrService import io.phdata.pulse.solr.SolrProvider import org.apache.kudu.client.KuduClient.KuduClientBuilder import scala.concurrent.duration.Duration import scala.concurrent.{ Await, Future } import scala.util.{ Failure, Success } def main(args: Array[String]): Unit = System.getProperty("java.security.auth.login.config") match { case null => { logger.info( "java.security.auth.login.config is not set, continuing without kerberos authentication") } case _ => { KerberosContext.scheduleKerberosLogin(0, 9, TimeUnit.HOURS) } start(args) } private def start(args: Array[String]): Unit = { val cliParser = new LogCollectorCliParser(args) val solrService = SolrProvider.create(cliParser.zkHosts().split(",").toList) val solrStream = new SolrCloudStream(solrService) val kuduClient = cliParser.kuduMasters.toOption.map(masters => KerberosContext.runPrivileged(new KuduClientBuilder(masters).build())) val kuduService = kuduClient.map(client => KerberosContext.runPrivileged(new KuduService(client))) val routes = new LogCollectorRoutes(solrStream, kuduService) cliParser.mode() match { case "kafka" => { kafka(solrService, cliParser.kafkaProps(), cliParser.topic()) } case _ => { http(cliParser.port(), routes) } } } // Starts Http Service def http(port: Int, routes: LogCollectorRoutes): Future[Unit] = { implicit val actorSystem: ActorSystem = ActorSystem() implicit val ec = actorSystem.dispatchers.lookup("akka.actor.http-dispatcher") implicit val materializer: Materializer = ActorMaterializer.create(actorSystem) val httpServerFuture = Http().bindAndHandle(routes.routes, "0.0.0.0", port)(materializer) map { binding => logger.info(s"Log Collector interface bound to: ${binding.localAddress}") } httpServerFuture.onComplete { case Success(v) => () case Failure(ex) => { logger.error("HTTP server failed, exiting. ", ex) System.exit(1) } } Await.ready( httpServerFuture, Duration.Inf ) } // Starts Kafka Consumer def kafka(solrService: SolrService, kafkaProps: String, topic: String): Unit = { val solrCloudStream = new SolrCloudStream(solrService) val kafkaConsumer = new PulseKafkaConsumer(solrCloudStream) val kafkaConsumerProps = new Properties() kafkaConsumerProps.load(new FileInputStream(kafkaProps)) kafkaConsumer.read(kafkaConsumerProps, topic) } }
Example 119
Source File: Stream.scala From pulse with Apache License 2.0 | 5 votes |
package io.phdata.pulse import io.phdata.pulse.log.{ HttpManager, JsonParser } import monix.reactive.subjects.ConcurrentSubject import monix.execution.Scheduler.Implicits.global import monix.reactive.OverflowStrategy import org.apache.log4j.helpers.LogLog import org.apache.log4j.spi.LoggingEvent import scala.concurrent.duration.FiniteDuration import scala.util.{ Failure, Success, Try } abstract class Stream[E](flushDuration: FiniteDuration, flushSize: Int, maxBuffer: Int) { val overflowStragegy = OverflowStrategy.DropNewAndSignal(maxBuffer, (_: Long) => None) val subject = ConcurrentSubject.publish[E](overflowStragegy) subject .bufferTimedAndCounted(flushDuration, flushSize) .map(save) .subscribe() def append(value: E): Unit = Try { subject.onNext(value) } match { case Success(_) => () case Failure(e) => LogLog.error("Error appending to stream", e) } def save(values: Seq[E]) } class HttpStream(flushDuration: FiniteDuration, flushSize: Int, maxBuffer: Int, httpManager: HttpManager) extends Stream[LoggingEvent](flushDuration, flushSize, maxBuffer) { val jsonParser = new JsonParser override def save(values: Seq[LoggingEvent]): Unit = { val logArray = values.toArray LogLog.debug(s"Flushing ${logArray.length} messages") val logMessage = jsonParser.marshallArray(logArray) httpManager.send(logMessage) } }
Example 120
Source File: ValidationImplicitsTest.scala From pulse with Apache License 2.0 | 5 votes |
package io.phdata.pulse.collectionroller.util import org.scalatest.FunSuite import scala.util.{ Failure, Success, Try } class ValidationImplicitsTest extends FunSuite { import ValidationImplicits._ val testKeyword = "worked" val success: Try[String] = Success(testKeyword) val failure: Try[String] = Failure(new Exception()) val sequence = Seq(success, failure) test("map over a sequence of valid values") { val mapped = sequence.toValidated().mapValid(x => x.toUpperCase()) assert(mapped.exists(x => x.exists(_ == testKeyword.toUpperCase()))) } test("convert at Try into a Validated") { assert(Try(throw new Exception).toValidated().isInvalid) assert(Try(1).toValidated().isValid) } test("convert an Iterable[Try] to Iterable[Validated]") { assert(sequence.toValidated().exists(_.isValid)) assert(sequence.toValidated().exists(_.isInvalid)) } }
Example 121
Source File: TemplateRendering.scala From avoin-voitto with MIT License | 5 votes |
package liigavoitto.journalist.utils import liigavoitto.util.Logging import scala.util.{Failure, Success, Try} case class Template(template: String, weight: Double = 1.0) { require(weight > 0.0) } case class RenderedTemplate(text: String, weight: Double) { require(weight > 0.0) } object TemplateRendering extends Logging{ def render(template: Template, attributes: Map[String, Any]): Option[RenderedTemplate] = { Try { RenderedTemplate(Mustache(template.template).apply(attributes), template.weight) } match { case Success(rendered) => Some(rendered) case Failure(e) => log.warn(s"Could not render '$template': " + e.getMessage) None } } }
Example 122
Source File: ScoresApiParser.scala From avoin-voitto with MIT License | 5 votes |
package liigavoitto.scores import org.joda.time.DateTime import org.json4s.DefaultFormats import org.json4s.jackson.JsonMethods._ import liigavoitto.util.{ DateTimeNoMillisSerializer, Logging } import scala.util.{ Failure, Success, Try } case class Data(data: List[Sport]) case class Sport(id: String, series: List[Series]) case class Series(id: String, seasons: List[Season]) case class Season(id: String, stages: List[Stage]) case class Stage(id: String, matches: Option[List[Match]], standing: Option[List[LeagueTableEntry]], playerStatistics: Option[List[PlayerStatsEntry]]) case class Match(id: String, name: String, date: DateTime, status: String, teams: List[Team], feed: List[Feed] = List(), stats: GeneralMatchStats) case class Feed(`type`: String, gameTime: Option[String], period: Option[String], player: Option[FeedPlayer], standing: Option[String], team: Option[Team], goalType: Option[String], saves: Option[String], timeInMins: Option[String], text: Option[String], beginTime: Option[String], endTime: Option[String]) case class FeedPlayer(id: String, name: PlayerName, meta: Option[PlayerMeta]) case class Team(id: String, name: String, abbr: String, meta: Meta, score: Option[Score], players: List[Player]) case class Score(now: Int, periods: List[Period], outcome: Outcome) case class Period(id: String, score: Int) case class Meta(images: List[Image], directives: Option[Map[String, Any]]) case class Image(id: String, imageType: String) case class Outcome(wins: Int, draws: Int, losses: Int, otWins: Int, otLosses: Int) case class Player(id: String, name: PlayerName, position: Option[String], specific: Map[String, Any], meta: Option[PlayerMeta]) case class PlayerName(first: String, last: String) case class PlayerMeta(gender: Option[String] = None, country: Option[String] = None, tags: List[String] = Nil, directives: Map[String, String] = Map.empty) case class LeagueTableEntry( team: Team, home: Option[LeagueTableResult] = None, away: Option[LeagueTableResult] = None, total: Option[LeagueTableResult] = None, specific: Option[Map[String, Any]] = None ) case class LeagueTableResult(gamesPlayed: Int, outcome: Outcome, goals: Goals, points: Option[Int] = None, specific: Option[Map[String, Any]] = None) case class Goals(score: Int, conceded: Int) case class PlayerStatsEntry(player: PlayerStatsPlayer, teamStats: List[PlayerTeamStatsEntry]) case class PlayerTeamStatsEntry(team: Team, points: Int, goals: Int, assists: Int) case class PlayerStatsPlayer(id: String, name: PlayerName) case class GeneralMatchStats(attendance: Int) trait ScoresApiParser extends Logging { implicit val formats = DefaultFormats + DateTimeNoMillisSerializer def parseMatchSport(json: String): Option[Sport] = extractData(json).map(_.head) def parseMatchList(json: String): Option[List[Match]] = extractData(json) match { case Some(sports) => if (sports.nonEmpty) Some(extractMatchesFromSport(sports.head).get) else Some(List()) case None => None } def parseLeagueTable(json: String): Option[List[LeagueTableEntry]] = { extractData(json) match { case Some(sports) => if (sports.nonEmpty) Some(extractLeagueTableFromSport(sports.head).get) else Some(List()) case None => None } } def parsePlayerStats(json: String): Option[List[PlayerStatsEntry]] = { extractData(json) match { case Some(sports) => if (sports.nonEmpty) Some(extractPlayerStatsFromSport(sports.head).get) else Some(List()) case None => None } } protected def extractMatchesFromSport(sport: Sport) = sport.series.head.seasons.head.stages.head.matches protected def extractLeagueTableFromSport(sport: Sport) = sport.series.head.seasons.head.stages.head.standing protected def extractPlayerStatsFromSport(sport: Sport) = sport.series.head.seasons.head.stages.head.playerStatistics protected def extractData(json: String) = { Try { log.debug(s"Sport JSON: $json") parse(json).extract[Data] } match { case Success(s) => Some(s.data) case Failure(e) => log.info(s"Failed to parse '$json': " + e) None } } }
Example 123
Source File: FacetIndex.scala From NSDb with Apache License 2.0 | 5 votes |
package io.radicalbit.nsdb.index import io.radicalbit.nsdb.common.protocol.{Bit, DimensionFieldType} import org.apache.lucene.document._ import org.apache.lucene.facet._ import org.apache.lucene.facet.taxonomy.SearcherTaxonomyManager import org.apache.lucene.facet.taxonomy.directory.{DirectoryTaxonomyReader, DirectoryTaxonomyWriter} import org.apache.lucene.index.IndexWriter import org.apache.lucene.search._ import org.apache.lucene.store.Directory import scala.collection.immutable import scala.util.{Failure, Success, Try} abstract class FacetIndex(val directory: Directory, val taxoDirectory: Directory) extends AbstractStructuredIndex { private lazy val searchTaxonomyManager: SearcherTaxonomyManager = new SearcherTaxonomyManager(directory, taxoDirectory, null) def write(bit: Bit)(implicit writer: IndexWriter, taxonomyWriter: DirectoryTaxonomyWriter): Try[Long] protected[this] def facetNamePrefix: String protected[this] def facetName(name: String): String = s"$facetNamePrefix$name" protected[index] def internalResult(query: Query, groupField: String, sort: Option[Sort], limit: Option[Int], valueIndexType: IndexType[_]): Option[FacetResult] protected[index] def result(query: Query, groupField: String, sort: Option[Sort], limit: Option[Int], groupFieldIndexType: IndexType[_], valueIndexType: IndexType[_]): Seq[Bit] override def validateRecord(bit: Bit): Try[immutable.Iterable[Field]] = validateSchemaTypeSupport(bit) .map( se => se.collect { case (_, t) if t.fieldClassType != DimensionFieldType => t } .flatMap(elem => elem.indexType.facetField(elem.name, elem.value))) protected[this] def commonWrite(bit: Bit, facetConfig: Seq[Field] => FacetsConfig, facetField: (Field, String, FacetsConfig) => Field)( implicit writer: IndexWriter, taxonomyWriter: DirectoryTaxonomyWriter): Try[Long] = { val allFields = validateRecord(bit) allFields match { case Success(fields) => val doc = new Document val c = facetConfig(fields.toSeq) fields .filterNot(f => f.name() == "value") .foreach { f => doc.add(f) if (f.isInstanceOf[StringField] || f.isInstanceOf[FloatPoint] || f.isInstanceOf[DoublePoint] || f.isInstanceOf[IntPoint] || f.isInstanceOf[LongPoint]) { val path = if (f.numericValue != null) f.numericValue.toString else f.stringValue doc.add(facetField(f, path, c)) } } Try(writer.addDocument(c.build(taxonomyWriter, doc))).recoverWith { case t => t.printStackTrace() Failure(t) } case Failure(t) => t.printStackTrace() Failure(t) } } }
Example 124
Source File: FutureRetryUtility.scala From NSDb with Apache License 2.0 | 5 votes |
package io.radicalbit.nsdb.util import akka.actor.{Actor, ActorRef, Scheduler, Status} import akka.event.LoggingAdapter import akka.pattern.after import scala.concurrent.duration.FiniteDuration import scala.concurrent.{ExecutionContext, Future} import scala.util.{Failure, Success} trait FutureRetryUtility { implicit class FutureRetry[T](f: => Future[T]) { def retry(delay: FiniteDuration, retries: Int)( wasSuccessful: T => Boolean)(implicit ec: ExecutionContext, s: Scheduler, log: LoggingAdapter): Future[T] = (for { a <- f result <- if (wasSuccessful(a) || retries < 1) Future(a) else { log.warning("{}. Retrying...", a); after(delay, s)(retry(delay, retries - 1)(wasSuccessful)) } } yield result) recoverWith { case t if retries > 0 => log.warning("{}. Retrying...", t); after(delay, s)(retry(delay, retries - 1)(wasSuccessful)) } } implicit class PipeToFutureRetry[T](f: => Future[T]) { def pipeTo(delay: FiniteDuration, retries: Int, recipient: ActorRef)(wasSuccessful: T => Boolean = _ => true)( implicit ec: ExecutionContext, s: Scheduler, log: LoggingAdapter, sender: ActorRef = Actor.noSender) = f.retry(delay, retries)(wasSuccessful) andThen { case Success(r) ⇒ recipient ! r case Failure(f) ⇒ recipient ! Status.Failure(f) } } }
Example 125
Source File: PipeableFutureWithSideEffect.scala From NSDb with Apache License 2.0 | 5 votes |
package io.radicalbit.nsdb.util import akka.actor.{Actor, ActorRef, Status} import scala.concurrent.{ExecutionContext, Future} import scala.util.{Failure, Success} final class PipeableFutureWithSideEffect[T](val future: Future[T])(implicit executionContext: ExecutionContext) { def pipeTo(recipient: ActorRef)(implicit sender: ActorRef = Actor.noSender): Future[T] = { future andThen { case Success(r) ⇒ recipient ! r case Failure(f) ⇒ recipient ! Status.Failure(f) } } def pipeToWithEffect(recipient: ActorRef)(effect: T => Unit)( implicit sender: ActorRef = Actor.noSender): Future[T] = { future andThen { case Success(r) ⇒ effect(r) recipient ! r case Failure(f) ⇒ recipient ! Status.Failure(f) } } } object PipeableFutureWithSideEffect { implicit def pipe[T](future: Future[T])( implicit executionContext: ExecutionContext): PipeableFutureWithSideEffect[T] = new PipeableFutureWithSideEffect(future) }
Example 126
Source File: SequentialFutureProcessing.scala From NSDb with Apache License 2.0 | 5 votes |
package io.radicalbit.nsdb.cluster.actor import akka.actor.{Actor, ActorRef, Stash, Status} import io.radicalbit.nsdb.cluster.actor.SequentialFutureProcessing.{Continue, PipeableFutureWithContinue} import io.radicalbit.nsdb.common.protocol.NSDbSerializable import scala.concurrent.{ExecutionContext, Future} import scala.util.{Failure, Success} def pipeToWithEffect(recipient: ActorRef)(effect: T => Unit)( implicit sender: ActorRef = Actor.noSender): Future[T] = { future andThen { case Success(r) => effect(r) recipient ! r sender ! Continue case Failure(f) => recipient ! Status.Failure(f) sender ! Continue } future } } }
Example 127
Source File: LocationIndex.scala From NSDb with Apache License 2.0 | 5 votes |
package io.radicalbit.nsdb.cluster.index import io.radicalbit.nsdb.index.SimpleIndex import io.radicalbit.nsdb.model.Location import io.radicalbit.nsdb.statement.StatementParser.SimpleField import org.apache.lucene.document.Field.Store import org.apache.lucene.document._ import org.apache.lucene.index.{IndexWriter, Term} import org.apache.lucene.search._ import org.apache.lucene.store.Directory import scala.collection.JavaConverters._ import scala.util.{Failure, Success, Try} class LocationIndex(override val directory: Directory) extends SimpleIndex[Location] { override val _keyField: String = "_metric" override def validateRecord(data: Location): Try[Seq[Field]] = { Success( Seq( new StringField(_keyField, data.metric, Store.YES), new StringField("node", data.node, Store.YES), new LongPoint("from", data.from), new LongPoint("to", data.to), new NumericDocValuesField("from", data.from), new NumericDocValuesField("to", data.to), new StoredField("from", data.from), new StoredField("to", data.to) ) ) } override def write(data: Location)(implicit writer: IndexWriter): Try[Long] = { val doc = new Document validateRecord(data) match { case Success(fields) => Try { fields.foreach(doc.add) writer.addDocument(doc) } case Failure(t) => Failure(t) } } override def toRecord(document: Document, fields: Seq[SimpleField]): Location = { val fields = document.getFields.asScala.map(f => f.name() -> f).toMap Location( document.get(_keyField), document.get("node"), fields("from").numericValue().longValue(), fields("to").numericValue().longValue() ) } def getLocationsForMetric(metric: String): Seq[Location] = { val queryTerm = new TermQuery(new Term(_keyField, metric)) Try(query(queryTerm, Seq.empty, Integer.MAX_VALUE, None)(identity)) match { case Success(metadataSeq) => metadataSeq case Failure(_) => Seq.empty } } def getLocationForMetricAtTime(metric: String, t: Long): Option[Location] = { val builder = new BooleanQuery.Builder() builder.add(LongPoint.newRangeQuery("to", t, Long.MaxValue), BooleanClause.Occur.SHOULD) builder.add(LongPoint.newRangeQuery("from", 0, t), BooleanClause.Occur.SHOULD).build() Try(query(builder.build(), Seq.empty, Integer.MAX_VALUE, None)(identity).headOption) match { case Success(metadataSeq) => metadataSeq case Failure(_) => None } } def deleteByMetric(metric: String)(implicit writer: IndexWriter): Try[Long] = { Try { val result = writer.deleteDocuments(new TermQuery(new Term(_keyField, metric))) writer.forceMergeDeletes(true) result } } override def delete(data: Location)(implicit writer: IndexWriter): Try[Long] = { Try { val builder = new BooleanQuery.Builder() builder.add(new TermQuery(new Term(_keyField, data.metric)), BooleanClause.Occur.MUST) builder.add(new TermQuery(new Term("node", data.node)), BooleanClause.Occur.MUST) builder.add(LongPoint.newExactQuery("from", data.from), BooleanClause.Occur.MUST) builder.add(LongPoint.newExactQuery("to", data.to), BooleanClause.Occur.MUST) val query = builder.build() val result = writer.deleteDocuments(query) writer.forceMergeDeletes(true) result } } }
Example 128
Source File: MetricInfoIndex.scala From NSDb with Apache License 2.0 | 5 votes |
package io.radicalbit.nsdb.cluster.index import io.radicalbit.nsdb.common.model.MetricInfo import io.radicalbit.nsdb.index.SimpleIndex import io.radicalbit.nsdb.index.lucene.Index._ import io.radicalbit.nsdb.statement.StatementParser.SimpleField import org.apache.lucene.document.Field.Store import org.apache.lucene.document._ import org.apache.lucene.index.{IndexWriter, Term} import org.apache.lucene.search._ import org.apache.lucene.store.Directory import scala.collection.JavaConverters._ import scala.util.{Failure, Success, Try} class MetricInfoIndex(override val directory: Directory) extends SimpleIndex[MetricInfo] { override val _keyField: String = "_metric" override def validateRecord(data: MetricInfo): Try[Seq[Field]] = { Success( Seq( new StringField(_keyField, data.metric, Store.YES), new StoredField("shardInterval", data.shardInterval) ) ) } override def write(data: MetricInfo)(implicit writer: IndexWriter): Try[Long] = { val doc = new Document validateRecord(data) match { case Success(fields) => Try { fields.foreach(doc.add) writer.addDocument(doc) } case Failure(t) => Failure(t) } } override def toRecord(document: Document, fields: Seq[SimpleField]): MetricInfo = { val fields = document.getFields.asScala.map(f => f.name() -> f).toMap MetricInfo( "notPresent", "notPresent", document.get(_keyField), fields("shardInterval").numericValue().longValue() ) } def getMetricInfo(metric: String): Option[MetricInfo] = { val results = handleNoIndexResults(Try { val queryTerm = new TermQuery(new Term(_keyField, metric)) query(queryTerm, Seq.empty, Integer.MAX_VALUE, None)(identity) }) results match { case Success(metricInfoes) => metricInfoes.headOption case Failure(_) => None } } override def delete(data: MetricInfo)(implicit writer: IndexWriter): Try[Long] = { Try { val queryTerm = new TermQuery(new Term(_keyField, data.metric)) val result = writer.deleteDocuments(queryTerm) writer.forceMergeDeletes(true) result } } def deleteByMetric(metric: String)(implicit writer: IndexWriter): Try[Long] = { Try { val queryTerm = new TermQuery(new Term(_keyField, metric)) val result = writer.deleteDocuments(queryTerm) writer.forceMergeDeletes(true) result } } }
Example 129
Source File: MonixAsyncHandler.scala From pulsar4s with Apache License 2.0 | 5 votes |
package com.sksamuel.pulsar4s.monixs import java.util.concurrent.CompletableFuture import com.sksamuel.pulsar4s.{AsyncHandler, ConsumerMessage, DefaultProducer, MessageId, Producer} import monix.eval.Task import org.apache.pulsar.client.api import org.apache.pulsar.client.api.Consumer import org.apache.pulsar.client.api.{ProducerBuilder, Reader, TypedMessageBuilder} import scala.compat.java8.FutureConverters import scala.concurrent.Future import scala.language.implicitConversions import scala.util.{Failure, Success, Try} class MonixAsyncHandler extends AsyncHandler[Task] { implicit def completableTToFuture[T](f: => CompletableFuture[T]): Future[T] = FutureConverters.toScala(f) implicit def completableVoidToTask(f: => CompletableFuture[Void]): Task[Unit] = Task.deferFuture(FutureConverters.toScala(f)).map(_ => ()) override def failed(e: Throwable): Task[Nothing] = Task.raiseError(e) override def createProducer[T](builder: ProducerBuilder[T]): Task[Producer[T]] = Task.deferFuture(FutureConverters.toScala(builder.createAsync())).map(new DefaultProducer(_)) override def send[T](t: T, producer: api.Producer[T]): Task[MessageId] = { Task.deferFuture { val future = producer.sendAsync(t) FutureConverters.toScala(future) }.map { id => MessageId.fromJava(id) } } override def receive[T](consumer: api.Consumer[T]): Task[ConsumerMessage[T]] = { Task.deferFuture { val future = consumer.receiveAsync() FutureConverters.toScala(future) }.map(ConsumerMessage.fromJava) } override def getLastMessageId[T](consumer: api.Consumer[T]): Task[MessageId] = { Task.deferFuture { val future = consumer.getLastMessageIdAsync() FutureConverters.toScala(future) }.map(MessageId.fromJava) } def unsubscribeAsync(consumer: api.Consumer[_]): Task[Unit] = consumer.unsubscribeAsync() override def close(producer: api.Producer[_]): Task[Unit] = producer.closeAsync() override def close(consumer: api.Consumer[_]): Task[Unit] = consumer.closeAsync() override def seekAsync(consumer: api.Consumer[_], messageId: MessageId): Task[Unit] = consumer.seekAsync(messageId) override def seekAsync(reader: api.Reader[_], messageId: MessageId): Task[Unit] = reader.seekAsync(messageId) override def seekAsync(reader: api.Reader[_], timestamp: Long): Task[Unit] = reader.seekAsync(timestamp) override def transform[A, B](t: Task[A])(fn: A => Try[B]): Task[B] = t.flatMap { a => fn(a) match { case Success(b) => Task.now(b) case Failure(e) => Task.raiseError(e) } } override def acknowledgeAsync[T](consumer: api.Consumer[T], messageId: MessageId): Task[Unit] = consumer.acknowledgeAsync(messageId) override def acknowledgeCumulativeAsync[T](consumer: api.Consumer[T], messageId: MessageId): Task[Unit] = consumer.acknowledgeCumulativeAsync(messageId) override def negativeAcknowledgeAsync[T](consumer: Consumer[T], messageId: MessageId): Task[Unit] = Task { consumer.negativeAcknowledge(messageId) } override def close(reader: Reader[_]): Task[Unit] = reader.closeAsync() override def flush(producer: api.Producer[_]): Task[Unit] = producer.flushAsync() override def nextAsync[T](reader: Reader[T]): Task[ConsumerMessage[T]] = Task.deferFuture(reader.readNextAsync()).map(ConsumerMessage.fromJava) override def send[T](builder: TypedMessageBuilder[T]): Task[MessageId] = Task.deferFuture(builder.sendAsync()).map(MessageId.fromJava) } object MonixAsyncHandler { implicit def handler: AsyncHandler[Task] = new MonixAsyncHandler }
Example 130
Source File: CatsAsyncHandler.scala From pulsar4s with Apache License 2.0 | 5 votes |
package com.sksamuel.pulsar4s.cats import java.util.concurrent.CompletableFuture import java.util.function.BiConsumer import cats.implicits._ import cats.effect._ import com.sksamuel.pulsar4s.{AsyncHandler, ConsumerMessage, DefaultProducer, MessageId, Producer} import org.apache.pulsar.client.api import org.apache.pulsar.client.api.{ProducerBuilder, Reader, TypedMessageBuilder} import scala.language.higherKinds import scala.util.{Failure, Success, Try} object CatsAsyncHandler extends CatsAsyncHandlerLowPriority { implicit def handler: AsyncHandler[IO] = asyncHandlerForCatsEffectAsync[IO] } trait CatsAsyncHandlerLowPriority { object CompletableFutureConverters { implicit class CompletableOps[T](f: => CompletableFuture[T]) { def toF[F[_]: Async]: F[T] = Async[F].async[T] { k => f.whenCompleteAsync(new BiConsumer[T, Throwable] { override def accept(t: T, e: Throwable): Unit = { if (e != null) k.apply(Left(e)) else k.apply(Right(t)) } }) } } } implicit def asyncHandlerForCatsEffectAsync[F[_]: Async]: AsyncHandler[F] = new AsyncHandler[F] { import CompletableFutureConverters._ override def failed(e: Throwable): F[Nothing] = Async[F].raiseError(e) override def createProducer[T](builder: ProducerBuilder[T]): F[Producer[T]] = builder.createAsync().toF[F].map(new DefaultProducer(_)) override def send[T](t: T, producer: api.Producer[T]): F[MessageId] = producer.sendAsync(t).toF[F].map(MessageId.fromJava) override def receive[T](consumer: api.Consumer[T]): F[ConsumerMessage[T]] = consumer.receiveAsync().toF[F].map(ConsumerMessage.fromJava) override def unsubscribeAsync(consumer: api.Consumer[_]): F[Unit] = consumer.unsubscribeAsync().toF[F].void override def getLastMessageId[T](consumer: api.Consumer[T]): F[MessageId] = consumer.getLastMessageIdAsync().toF[F].map(MessageId.fromJava) override def close(producer: api.Producer[_]): F[Unit] = producer.closeAsync().toF[F].void override def close(consumer: api.Consumer[_]): F[Unit] = consumer.closeAsync().toF[F].void override def seekAsync(consumer: api.Consumer[_], messageId: MessageId): F[Unit] = consumer.seekAsync(messageId).toF[F].void override def seekAsync(reader: api.Reader[_], messageId: MessageId): F[Unit] = reader.seekAsync(messageId).toF[F].void override def seekAsync(reader: api.Reader[_], timestamp: Long): F[Unit] = reader.seekAsync(timestamp).toF[F].void override def transform[A, B](t: F[A])(fn: A => Try[B]): F[B] = t.flatMap { a => fn(a) match { case Success(b) => Async[F].pure(b) case Failure(e) => Async[F].raiseError(e) } } override def acknowledgeAsync[T](consumer: api.Consumer[T], messageId: MessageId): F[Unit] = consumer.acknowledgeAsync(messageId).toF[F].void override def acknowledgeCumulativeAsync[T](consumer: api.Consumer[T], messageId: MessageId): F[Unit] = consumer.acknowledgeCumulativeAsync(messageId).toF[F].void override def negativeAcknowledgeAsync[T](consumer: api.Consumer[T], messageId: MessageId): F[Unit] = Async[F].delay { consumer.negativeAcknowledge(messageId) } override def close(reader: Reader[_]): F[Unit] = reader.closeAsync().toF[F].void override def flush(producer: api.Producer[_]): F[Unit] = producer.flushAsync().toF[F].void override def nextAsync[T](reader: Reader[T]): F[ConsumerMessage[T]] = reader.readNextAsync().toF[F].map(ConsumerMessage.fromJava) override def send[T](builder: TypedMessageBuilder[T]): F[MessageId] = builder.sendAsync().toF[F].map(MessageId.fromJava) } }
Example 131
Source File: ScalazAsyncHandler.scala From pulsar4s with Apache License 2.0 | 5 votes |
package com.sksamuel.pulsar4s.scalaz import java.util.concurrent.CompletableFuture import java.util.function.BiConsumer import com.sksamuel.pulsar4s.{AsyncHandler, ConsumerMessage, DefaultProducer, MessageId, Producer} import org.apache.pulsar.client.api import org.apache.pulsar.client.api.Consumer import org.apache.pulsar.client.api.{ProducerBuilder, Reader, TypedMessageBuilder} import scalaz.concurrent.Task import scala.language.implicitConversions import scala.util.{Failure, Success, Try} class ScalazAsyncHandler extends AsyncHandler[Task] { implicit def completableVoidToTask(f: => CompletableFuture[Void]): Task[Unit] = completableToTask(f).map(_ => ()) implicit def completableToTask[T](f: => CompletableFuture[T]): Task[T] = { Task.async[T] { k => f.whenCompleteAsync(new BiConsumer[T, Throwable] { override def accept(t: T, e: Throwable): Unit = { if (e != null) k.apply(scalaz.\/.left(e)) else k.apply(scalaz.\/.right(t)) } }) } } override def failed(e: Throwable): Task[Nothing] = Task.fail(e) override def createProducer[T](builder: ProducerBuilder[T]): Task[Producer[T]] = completableToTask(builder.createAsync()).map(new DefaultProducer(_)) override def send[T](t: T, producer: api.Producer[T]): Task[MessageId] = completableToTask(producer.sendAsync(t)).map(MessageId.fromJava) override def receive[T](consumer: api.Consumer[T]): Task[ConsumerMessage[T]] = completableToTask(consumer.receiveAsync).map(ConsumerMessage.fromJava) override def getLastMessageId[T](consumer: api.Consumer[T]): Task[MessageId] = completableToTask(consumer.getLastMessageIdAsync()).map(MessageId.fromJava) override def unsubscribeAsync(consumer: api.Consumer[_]): Task[Unit] = consumer.unsubscribeAsync() override def seekAsync(consumer: api.Consumer[_], messageId: MessageId): Task[Unit] = consumer.seekAsync(messageId) override def seekAsync(reader: api.Reader[_], messageId: MessageId): Task[Unit] = reader.seekAsync(messageId) override def seekAsync(reader: api.Reader[_], timestamp: Long): Task[Unit] = reader.seekAsync(timestamp) override def transform[A, B](f: Task[A])(fn: A => Try[B]): Task[B] = f.flatMap { a => fn(a) match { case Success(b) => Task.now(b) case Failure(e) => Task.fail(e) } } override def acknowledgeAsync[T](consumer: api.Consumer[T], messageId: MessageId): Task[Unit] = consumer.acknowledgeAsync(messageId) override def acknowledgeCumulativeAsync[T](consumer: api.Consumer[T], messageId: MessageId): Task[Unit] = consumer.acknowledgeCumulativeAsync(messageId) override def negativeAcknowledgeAsync[T](consumer: Consumer[T], messageId: MessageId): Task[Unit] = Task { consumer.negativeAcknowledge(messageId) } override def close(reader: Reader[_]): Task[Unit] = reader.closeAsync() override def close(producer: api.Producer[_]): Task[Unit] = producer.closeAsync() override def close(consumer: api.Consumer[_]): Task[Unit] = consumer.closeAsync() override def flush(producer: api.Producer[_]): Task[Unit] = producer.flushAsync() override def nextAsync[T](reader: Reader[T]): Task[ConsumerMessage[T]] = reader.readNextAsync().map(ConsumerMessage.fromJava) override def send[T](builder: TypedMessageBuilder[T]): Task[MessageId] = builder.sendAsync().map(MessageId.fromJava) } object ScalazAsyncHandler { implicit def handler: AsyncHandler[Task] = new ScalazAsyncHandler }
Example 132
Source File: FutureAsyncHandler.scala From pulsar4s with Apache License 2.0 | 5 votes |
package com.sksamuel.pulsar4s import java.util.concurrent.CompletableFuture import org.apache.pulsar.client.api import org.apache.pulsar.client.api.TypedMessageBuilder import scala.compat.java8.FutureConverters import scala.compat.java8.FutureConverters.CompletionStageOps import scala.concurrent.ExecutionContext import scala.concurrent.Future import scala.util.Failure import scala.util.Success import scala.util.Try class FutureAsyncHandler(implicit ec: ExecutionContext) extends AsyncHandler[Future] { implicit class VoidCompletableFutureOps(val completableFuture: CompletableFuture[Void]) { def toScala: Future[Unit] = new CompletionStageOps(completableFuture).toScala.map(_ => ()) } override def failed(e: Throwable): Future[Nothing] = Future.failed(e) override def createProducer[T](builder: api.ProducerBuilder[T]): Future[Producer[T]] = { builder.createAsync().thenApply[Producer[T]](new DefaultProducer(_)).toScala } override def send[T](t: T, producer: api.Producer[T]): Future[MessageId] = { val future = producer.sendAsync(t) FutureConverters.toScala(future).map(MessageId.fromJava) } override def receive[T](consumer: api.Consumer[T]): Future[ConsumerMessage[T]] = { val future = consumer.receiveAsync() FutureConverters.toScala(future).map(ConsumerMessage.fromJava) } override def unsubscribeAsync(consumer: api.Consumer[_]): Future[Unit] = consumer.unsubscribeAsync().toScala override def getLastMessageId[T](consumer: api.Consumer[T]): Future[MessageId] = { val future = consumer.getLastMessageIdAsync() FutureConverters.toScala(future).map(MessageId.fromJava) } override def close(producer: api.Producer[_]): Future[Unit] = producer.closeAsync().toScala override def close(consumer: api.Consumer[_]): Future[Unit] = consumer.closeAsync().toScala override def seekAsync(consumer: api.Consumer[_], messageId: MessageId): Future[Unit] = consumer.seekAsync(messageId).toScala override def seekAsync(reader: api.Reader[_], messageId: MessageId): Future[Unit] = reader.seekAsync(messageId).toScala override def seekAsync(reader: api.Reader[_], timestamp: Long): Future[Unit] = reader.seekAsync(timestamp).toScala override def transform[A, B](f: Future[A])(fn: A => Try[B]): Future[B] = f.flatMap { a => fn(a) match { case Success(b) => Future.successful(b) case Failure(e) => Future.failed(e) } } override def acknowledgeAsync[T](consumer: api.Consumer[T], messageId: MessageId): Future[Unit] = consumer.acknowledgeAsync(messageId).toScala override def negativeAcknowledgeAsync[T](consumer: JConsumer[T], messageId: MessageId): Future[Unit] = Future.successful(consumer.negativeAcknowledge(messageId)) override def acknowledgeCumulativeAsync[T](consumer: api.Consumer[T], messageId: MessageId): Future[Unit] = consumer.acknowledgeCumulativeAsync(messageId).toScala override def close(reader: api.Reader[_]): Future[Unit] = reader.closeAsync().toScala override def flush(producer: api.Producer[_]): Future[Unit] = producer.flushAsync().toScala override def nextAsync[T](reader: api.Reader[T]): Future[ConsumerMessage[T]] = reader.readNextAsync().toScala.map(ConsumerMessage.fromJava) override def send[T](builder: TypedMessageBuilder[T]): Future[MessageId] = builder.sendAsync().toScala.map(MessageId.fromJava) }
Example 133
Source File: PulsarCommittableSourceGraphStage.scala From pulsar4s with Apache License 2.0 | 5 votes |
package com.sksamuel.pulsar4s.akka.streams import akka.Done import akka.stream.Attributes import akka.stream.Outlet import akka.stream.SourceShape import akka.stream.stage.AsyncCallback import akka.stream.stage.GraphStageLogic import akka.stream.stage.GraphStageWithMaterializedValue import akka.stream.stage.OutHandler import com.sksamuel.exts.Logging import com.sksamuel.pulsar4s.Consumer import com.sksamuel.pulsar4s.ConsumerMessage import com.sksamuel.pulsar4s.MessageId import org.apache.pulsar.client.api.ConsumerStats import scala.concurrent.ExecutionContext import scala.concurrent.Future import scala.util.Failure import scala.util.Success trait CommittableMessage[T] { def ack(cumulative: Boolean = false): Future[Done] def nack(): Future[Done] def message: ConsumerMessage[T] } class PulsarCommittableSourceGraphStage[T](create: () => Consumer[T], seek: Option[MessageId]) extends GraphStageWithMaterializedValue[SourceShape[CommittableMessage[T]], Control] with Logging { private val out = Outlet[CommittableMessage[T]]("pulsar.out") override def shape: SourceShape[CommittableMessage[T]] = SourceShape(out) private class PulsarCommittableSourceLogic(shape: Shape) extends GraphStageLogic(shape) with OutHandler with Control { setHandler(out, this) var consumer: Consumer[T] = _ var receiveCallback: AsyncCallback[CommittableMessage[T]] = _ override def preStart(): Unit = { implicit val context: ExecutionContext = super.materializer.executionContext consumer = create() seek foreach consumer.seek receiveCallback = getAsyncCallback(push(out, _)) } override def onPull(): Unit = { implicit val context: ExecutionContext = super.materializer.executionContext logger.debug("Pull received; asking consumer for message") consumer.receiveAsync.onComplete { case Success(msg) => logger.debug(s"Message received: $msg") receiveCallback.invoke(new CommittableMessage[T] { override def message: ConsumerMessage[T] = msg override def ack(cumulative: Boolean): Future[Done] = { logger.debug(s"Acknowledging message: $msg") val ackFuture = if (cumulative) { consumer.acknowledgeCumulativeAsync(msg.messageId) } else { consumer.acknowledgeAsync(msg.messageId) } ackFuture.map(_ => Done) } override def nack(): Future[Done] = { logger.debug(s"Negatively acknowledging message: $msg") consumer.negativeAcknowledgeAsync(msg.messageId).map(_ => Done) } }) case Failure(e) => logger.warn("Error when receiving message", e) failStage(e) } } override def stop(): Unit = completeStage() override def shutdown()(implicit ec: ExecutionContext): Future[Done] = { completeStage() consumer.closeAsync.map(_ => Done) } def stats: ConsumerStats = consumer.stats } override def createLogicAndMaterializedValue(inheritedAttributes: Attributes): (GraphStageLogic, Control) = { val logic = new PulsarCommittableSourceLogic(shape) (logic, logic) } }
Example 134
Source File: PulsarSourceGraphStage.scala From pulsar4s with Apache License 2.0 | 5 votes |
package com.sksamuel.pulsar4s.akka.streams import akka.Done import akka.stream.Attributes import akka.stream.Outlet import akka.stream.SourceShape import akka.stream.stage.AsyncCallback import akka.stream.stage.GraphStageLogic import akka.stream.stage.GraphStageWithMaterializedValue import akka.stream.stage.OutHandler import com.sksamuel.exts.Logging import com.sksamuel.pulsar4s.Consumer import com.sksamuel.pulsar4s.ConsumerMessage import com.sksamuel.pulsar4s.MessageId import org.apache.pulsar.client.api.ConsumerStats import scala.concurrent.ExecutionContext import scala.concurrent.Future import scala.util.Failure import scala.util.Success trait Control { def stats: ConsumerStats } class PulsarSourceGraphStage[T](create: () => Consumer[T], seek: Option[MessageId]) extends GraphStageWithMaterializedValue[SourceShape[ConsumerMessage[T]], Control] with Logging { private val out = Outlet[ConsumerMessage[T]]("pulsar.out") override def shape: SourceShape[ConsumerMessage[T]] = SourceShape(out) override def createLogicAndMaterializedValue(inheritedAttributes: Attributes): (GraphStageLogic, Control) = { val logic: GraphStageLogic with Control = new GraphStageLogic(shape) with OutHandler with Control { setHandler(out, this) var consumer: Consumer[T] = _ var callback: AsyncCallback[ConsumerMessage[T]] = _ override def preStart(): Unit = { consumer = create() seek foreach consumer.seek callback = getAsyncCallback(msg => push(out, msg)) } override def onPull(): Unit = { implicit val context: ExecutionContext = super.materializer.executionContext logger.debug("Pull received; asking consumer for message") consumer.receiveAsync.onComplete { case Success(msg) => logger.debug(s"Msg received $msg") callback.invoke(msg) consumer.acknowledge(msg.messageId) case Failure(e) => logger.warn("Error when receiving message", e) failStage(e) } } override def stop(): Unit = completeStage() override def shutdown()(implicit ec: ExecutionContext): Future[Done] = { completeStage() consumer.closeAsync.map(_ => Done) } override def stats: ConsumerStats = consumer.stats } (logic, logic) } }
Example 135
Source File: PulsarMultiSinkGraphStage.scala From pulsar4s with Apache License 2.0 | 5 votes |
package com.sksamuel.pulsar4s.akka.streams import akka.Done import akka.stream.stage.{AsyncCallback, GraphStageLogic, GraphStageWithMaterializedValue, InHandler} import akka.stream.{Attributes, Inlet, SinkShape} import com.sksamuel.exts.Logging import com.sksamuel.pulsar4s.{Producer, ProducerMessage, Topic} import scala.concurrent.duration._ import scala.concurrent.{Await, ExecutionContextExecutor, Future, Promise} import scala.util.{Failure, Success} class PulsarMultiSinkGraphStage[T](createFn: Topic => Producer[T], initTopics: Set[Topic] = Set.empty) extends GraphStageWithMaterializedValue[SinkShape[(Topic, ProducerMessage[T])], Future[Done]] with Logging { private val in = Inlet.create[(Topic, ProducerMessage[T])]("pulsar.in") override def shape: SinkShape[(Topic, ProducerMessage[T])] = SinkShape.of(in) override def createLogicAndMaterializedValue(inheritedAttributes: Attributes): (GraphStageLogic, Future[Done]) = { val promise = Promise[Done]() val logic: GraphStageLogic = new GraphStageLogic(shape) with InHandler { setHandler(in, this) implicit def context: ExecutionContextExecutor = super.materializer.executionContext var producers: Map[Topic, Producer[T]] = _ var next: AsyncCallback[(Topic, ProducerMessage[T])] = _ var error: Throwable = _ override def preStart(): Unit = { producers = initTopics.map(t => t -> createFn(t)).toMap next = getAsyncCallback { _ => pull(in) } pull(in) } private def getProducer(topic: Topic): Producer[T] = producers.get(topic) match { case Some(p) => p case None => logger.debug(s"creating new producer for topic $topic") val producer = createFn(topic) producers += topic -> producer producer } override def onPush(): Unit = { try { val (topic, message) = grab(in) logger.debug(s"Sending message $message to $topic") val producer = getProducer(topic) producer.sendAsync(message).onComplete { case Success(_) => next.invoke(topic -> message) case Failure(e) => logger.error("Failing pulsar sink stage", e) failStage(e) } } catch { case e: Throwable => logger.error("Failing pulsar sink stage", e) failStage(e) } } override def postStop(): Unit = { logger.debug("Graph stage stopping; closing producers") val fs = producers.flatMap { case (_, p) => Seq( p.flushAsync, p.closeAsync ) } Await.ready(Future.sequence(fs), 15.seconds) } override def onUpstreamFailure(ex: Throwable): Unit = { promise.tryFailure(ex) } override def onUpstreamFinish(): Unit = { promise.trySuccess(Done) } } (logic, promise.future) } }
Example 136
Source File: PulsarSinkGraphStage.scala From pulsar4s with Apache License 2.0 | 5 votes |
package com.sksamuel.pulsar4s.akka.streams import akka.Done import akka.stream.stage.{AsyncCallback, GraphStageLogic, GraphStageWithMaterializedValue, InHandler} import akka.stream.{Attributes, Inlet, SinkShape} import com.sksamuel.exts.Logging import com.sksamuel.pulsar4s.{Producer, ProducerMessage} import scala.concurrent.{ExecutionContextExecutor, Future, Promise} import scala.util.{Failure, Success} class PulsarSinkGraphStage[T](createFn: () => Producer[T]) extends GraphStageWithMaterializedValue[SinkShape[ProducerMessage[T]], Future[Done]] with Logging { private val in = Inlet.create[ProducerMessage[T]]("pulsar.in") override def shape: SinkShape[ProducerMessage[T]] = SinkShape.of(in) override def createLogicAndMaterializedValue(inheritedAttributes: Attributes): (GraphStageLogic, Future[Done]) = { val promise = Promise[Done]() val logic: GraphStageLogic = new GraphStageLogic(shape) with InHandler { setHandler(in, this) implicit def context: ExecutionContextExecutor = super.materializer.executionContext var producer: Producer[T] = _ var next: AsyncCallback[ProducerMessage[T]] = _ var error: Throwable = _ override def preStart(): Unit = { producer = createFn() next = getAsyncCallback { _ => pull(in) } pull(in) } override def onPush(): Unit = { try { val t = grab(in) logger.debug(s"Sending message $t") producer.sendAsync(t).onComplete { case Success(_) => next.invoke(t) case Failure(e) => logger.error("Failing pulsar sink stage", e) failStage(e) } } catch { case e: Throwable => logger.error("Failing pulsar sink stage", e) failStage(e) } } override def postStop(): Unit = { logger.debug("Graph stage stopping; closing producer") producer.flush() producer.close() } override def onUpstreamFailure(ex: Throwable): Unit = { promise.tryFailure(ex) } override def onUpstreamFinish(): Unit = { promise.trySuccess(Done) } } (logic, promise.future) } }
Example 137
Source File: ConvertProcessorsToOdinson.scala From odinson with Apache License 2.0 | 5 votes |
package ai.lum.odinson.extra import java.io.File import scala.util.{ Try, Success, Failure } import com.typesafe.scalalogging.LazyLogging import org.clulab.serialization.json.JSONSerializer import ai.lum.common.ConfigUtils._ import ai.lum.common.ConfigFactory import ai.lum.common.FileUtils._ import ai.lum.odinson.Document object ConvertProcessorsToOdinson extends App with LazyLogging { val config = ConfigFactory.load() val docsDir = config[File]("odinson.docsDir") val procDir = config[File]("odinson.procDir") logger.info(s"processors documents at $procDir") logger.info(s"odinson documents at $docsDir") for (f <- procDir.listFilesByWildcard("*.json", recursive = true).par) { Try { val newFile = new File(docsDir, f.getName + ".gz") val processorsDoc = JSONSerializer.toDocument(f) val odinsonDoc = ProcessorsUtils.convertDocument(processorsDoc) newFile.writeString(odinsonDoc.toJson) } match { case Success(_) => logger.info(s"converted ${f.getName}") case Failure(e) => logger.error(s"failed to convert ${f.getName}", e) } } }
Example 138
Source File: IndexDocuments.scala From odinson with Apache License 2.0 | 5 votes |
package ai.lum.odinson.extra import java.io._ import scala.util.{ Try, Success, Failure } import com.typesafe.scalalogging.LazyLogging import ai.lum.common.ConfigFactory import ai.lum.common.ConfigUtils._ import ai.lum.common.FileUtils._ import ai.lum.odinson.{ Document, Field, OdinsonIndexWriter, StringField } import scala.collection.GenIterable object IndexDocuments extends App with LazyLogging { val config = ConfigFactory.load() val docsDir = config[File]("odinson.docsDir") val synchronizeOrderWithDocumentId = config[Boolean]("odinson.index.synchronizeOrderWithDocumentId") val writer = OdinsonIndexWriter.fromConfig() val wildcards = Seq("*.json", "*.json.gz") logger.info(s"Gathering documents from $docsDir") val documentFiles = if (synchronizeOrderWithDocumentId) { // files ordered by the id of the document docsDir .listFilesByWildcards(wildcards, recursive = true) .map(f => (Document.fromJson(f).id.toInt, f)) .toSeq .sortBy(_._1) .map(_._2) } else { docsDir .listFilesByWildcards(wildcards, recursive = true) .par } logger.info("Indexing documents") indexDocuments(writer, documentFiles) writer.close // fin // Note that documentFiles may or may not be parallel, hence the GenIterable def indexDocuments(writer: OdinsonIndexWriter, documentFiles: GenIterable[File]): Unit = { // index documents for (f <- documentFiles) { Try { val origDoc = Document.fromJson(f) // keep track of file name to retrieve sentence JSON, // but ignore the path to the docs directory to avoid issues encountered when moving `odinson.dataDir`. // NOTE: this assumes all files are located immediately under `odinson.docsDir` // With large document collections, it may be necessary to split documents across many subdirectories // To avoid performance issues and limitations of certain file systems (ex. FAT32, ext2, etc.) val fileField: Field = StringField(name = "fileName", string = f.getName, store = true) val doc = origDoc.copy(metadata = origDoc.metadata ++ Seq(fileField)) val block = writer.mkDocumentBlock(doc) writer.addDocuments(block) } match { case Success(_) => logger.info(s"Indexed ${f.getName}") case Failure(e) => logger.error(s"Failed to index ${f.getName}", e) } } } }
Example 139
Source File: AnnotateText.scala From odinson with Apache License 2.0 | 5 votes |
package ai.lum.odinson.extra import java.io.File import scala.util.{ Failure, Success, Try } import com.typesafe.scalalogging.LazyLogging import org.clulab.processors.Processor import org.clulab.processors.clu.{ BioCluProcessor, CluProcessor } import org.clulab.processors.fastnlp.FastNLPProcessor import ai.lum.common.FileUtils._ import ai.lum.common.ConfigUtils._ import ai.lum.common.ConfigFactory import ai.lum.odinson.Document object AnnotateText extends App with LazyLogging { val config = ConfigFactory.load() val textDir: File = config[File]("odinson.textDir") val docsDir: File = config[File]("odinson.docsDir") val processorType = config[String]("odinson.extra.processorType") val processor: Processor = processorType match { case "FastNLPProcessor" => new FastNLPProcessor case "CluProcessor" => new CluProcessor case "BioCluProcessor" => new BioCluProcessor } // create output directory if it does not exist if (!docsDir.exists) { logger.warn(s"Making directory $docsDir") docsDir.mkdirs() } processor.annotate("this") // load all required models def annotateTextFile(f: File): Document = { val text = f.readString() val doc = processor.annotate(text) // use file base name as document id doc.id = Some(f.getBaseName()) ProcessorsUtils.convertDocument(doc) } // NOTE parses the documents in parallel for (f <- textDir.listFilesByWildcard("*.txt", caseInsensitive = true, recursive = true).par) { val docFile = new File(docsDir, f.getBaseName() + ".json.gz") if (docFile.exists) { logger.warn(s"${docFile.getCanonicalPath} already exists") } else { Try { val doc = annotateTextFile(f) docFile.writeString(doc.toJson) } match { case Success(_) => logger.info(s"Annotated ${f.getCanonicalPath}") case Failure(e) => logger.error(s"Failed to process ${f.getName}", e) } } } }
Example 140
Source File: TryRDDFunctions.scala From spark-additions with Apache License 2.0 | 5 votes |
package it.nerdammer.spark.additions.tryfunctions import org.apache.spark.rdd.RDD import scala.collection.mutable import scala.reflect.ClassTag import scala.util.{Failure, Success, Try} def tryFlatMap[U](f: (T) ⇒ TraversableOnce[U], accumulatorName: String = TryRDDAccumulatorHolder.ExceptionAccumulatorDefaultName)(implicit ct: ClassTag[U]): RDD[U] = { val accumulator = retrieveAccumulator(accumulatorName) rdd.flatMap(e => { val fe = Try{f(e)} val trial = fe match { case Failure(t) => accumulator += (e, t) Nil case Success(r) => r } trial }) } }
Example 141
Source File: KafkaSecurityManager.scala From kafka-security-manager with MIT License | 5 votes |
package com.github.simplesteph.ksm import java.util.concurrent.atomic.AtomicBoolean import com.github.simplesteph.ksm.grpc.KsmGrpcServer import com.github.simplesteph.ksm.parser.CsvAclParser import com.typesafe.config.ConfigFactory import org.slf4j.LoggerFactory import scala.util.{Failure, Success, Try} import java.util.concurrent.{ ExecutionException, Executors, ScheduledExecutorService, TimeUnit } object KafkaSecurityManager extends App { val log = LoggerFactory.getLogger(KafkaSecurityManager.getClass) val config = ConfigFactory.load() val appConfig: AppConfig = new AppConfig(config) var isCancelled: AtomicBoolean = new AtomicBoolean(false) var grpcServer: KsmGrpcServer = _ var aclSynchronizer: AclSynchronizer = _ val aclParser = new CsvAclParser(appConfig.Parser.csvDelimiter) val scheduler: ScheduledExecutorService = Executors.newScheduledThreadPool(1) if (appConfig.KSM.extract) { new ExtractAcl(appConfig.Authorizer.authorizer, aclParser).extract() } else { aclSynchronizer = new AclSynchronizer( appConfig.Authorizer.authorizer, appConfig.Source.sourceAcl, appConfig.Notification.notification, aclParser, appConfig.KSM.readOnly ) Try { grpcServer = new KsmGrpcServer( aclSynchronizer, appConfig.GRPC.port, appConfig.GRPC.gatewayPort, appConfig.Feature.grpc ) grpcServer.start() } match { case Success(_) => case Failure(e) => log.error("gRPC Server failed to start", e) shutdown() } Runtime.getRuntime.addShutdownHook(new Thread() { override def run(): Unit = { log.info("Received stop signal") shutdown() } }) try { //if appConfig.KSM.refreshFrequencyMs is equal or less than 0 the aclSyngronizer is run just once. if (appConfig.KSM.refreshFrequencyMs <= 0) { log.info("Single run mode: ACL will be synchornized once.") aclSynchronizer.run() } else { log.info( "Continuous mode: ACL will be synchronized every " + appConfig.KSM.refreshFrequencyMs + " ms." ) val handle = scheduler.scheduleAtFixedRate( aclSynchronizer, 0, appConfig.KSM.refreshFrequencyMs, TimeUnit.MILLISECONDS ) handle.get } } catch { case e: ExecutionException => log.error("unexpected exception", e) } finally { shutdown() } } def shutdown(): Unit = { log.info("Kafka Security Manager is shutting down...") isCancelled = new AtomicBoolean(true) aclSynchronizer.close() grpcServer.stop() scheduler.shutdownNow() } }
Example 142
Source File: ConsoleNotification.scala From kafka-security-manager with MIT License | 5 votes |
package com.github.simplesteph.ksm.notification import com.github.simplesteph.ksm.parser.CsvParserException import com.typesafe.config.Config import kafka.security.auth.{Acl, Resource} import org.slf4j.{Logger, LoggerFactory} import scala.util.{Failure, Try, Success} case class ConsoleNotification() extends Notification { val log: Logger = LoggerFactory.getLogger(classOf[ConsoleNotification].getSimpleName) override def configure(config: Config): Unit = () override def notifyErrors(errs: List[Try[Throwable]]): Unit = { errs.foreach { case Failure(cPE: CsvParserException) => log.error(s"${cPE.getLocalizedMessage} | Row: ${cPE.printRow()}") case Success(t) => log.error("refresh exception", t) case Failure(t) => log.error("refresh exception", t) } } override protected def notifyOne( action: String, acls: Set[(Resource, Acl)] ): Unit = { if (acls.nonEmpty) { acls.foreach { case (resource, acl) => val message = Notification.printAcl(acl, resource) log.info(s"$action $message") } } } override def close(): Unit = () }
Example 143
Source File: SlackNotification.scala From kafka-security-manager with MIT License | 5 votes |
package com.github.simplesteph.ksm.notification import com.fasterxml.jackson.databind.ObjectMapper import com.github.simplesteph.ksm.parser.CsvParserException import com.typesafe.config.Config import kafka.security.auth.{Acl, Resource} import org.slf4j.LoggerFactory import skinny.http.HTTP import scala.util.{Failure, Success, Try} class SlackNotification extends Notification { override def configure(config: Config): Unit = { webhook = config.getString(WEBHOOK_CONFIG) username = config.getString(USERNAME_CONFIG) icon = config.getString(ICON_CONFIG) channel = config.getString(CHANNEL_CONFIG) } override def notifyOne(action: String, acls: Set[(Resource, Acl)]): Unit = { if (acls.nonEmpty) { val messages = acls.map { case (resource, acl) => val message = Notification.printAcl(acl, resource) s"$action $message" }.toList sendToSlack(messages) } } def sendToSlack(messages: List[String], retries: Int = 5): Unit = { if (retries > 0) { messages .grouped(50) .foreach(msgChunks => { val text = s"""``` |${msgChunks.mkString("\n")} |``` """.stripMargin val payload = objectMapper .createObjectNode() .put("text", text) .put("username", username) .put("icon_url", icon) .put("channel", channel) val response = HTTP.post(webhook, payload.toString) response.status match { case 200 => () case _ => log.warn(response.asString) if (retries > 1) log.warn("Retrying...") Thread.sleep(300) sendToSlack(msgChunks, retries - 1) } }) } else { log.error("Can't send notification to Slack after retries") } } override def notifyErrors(errs: List[Try[Throwable]]): Unit = { val messages = errs.map { case Failure(cPE: CsvParserException) => s"${cPE.getLocalizedMessage} | Row: ${cPE.printRow()}" case Success(t) => s"refresh exception: ${t.getLocalizedMessage}" case Failure(t) => s"refresh exception: ${t.getLocalizedMessage}" } sendToSlack(messages) } override def close(): Unit = {} }
Example 144
Source File: UISuite.scala From BigDatalog with Apache License 2.0 | 5 votes |
package org.apache.spark.ui import java.net.ServerSocket import scala.io.Source import scala.util.{Failure, Success, Try} import org.eclipse.jetty.servlet.ServletContextHandler import org.scalatest.concurrent.Eventually._ import org.scalatest.time.SpanSugar._ import org.apache.spark.LocalSparkContext._ import org.apache.spark.{SparkConf, SparkContext, SparkFunSuite} class UISuite extends SparkFunSuite { private def newSparkContext(): SparkContext = { val conf = new SparkConf() .setMaster("local") .setAppName("test") .set("spark.ui.enabled", "true") val sc = new SparkContext(conf) assert(sc.ui.isDefined) sc } ignore("basic ui visibility") { withSpark(newSparkContext()) { sc => // test if the ui is visible, and all the expected tabs are visible eventually(timeout(10 seconds), interval(50 milliseconds)) { val html = Source.fromURL(sc.ui.get.appUIAddress).mkString assert(!html.contains("random data that should not be present")) assert(html.toLowerCase.contains("stages")) assert(html.toLowerCase.contains("storage")) assert(html.toLowerCase.contains("environment")) assert(html.toLowerCase.contains("executors")) } } } ignore("visibility at localhost:4040") { withSpark(newSparkContext()) { sc => // test if visible from http://localhost:4040 eventually(timeout(10 seconds), interval(50 milliseconds)) { val html = Source.fromURL("http://localhost:4040").mkString assert(html.toLowerCase.contains("stages")) } } } test("jetty selects different port under contention") { val server = new ServerSocket(0) val startPort = server.getLocalPort val serverInfo1 = JettyUtils.startJettyServer( "0.0.0.0", startPort, Seq[ServletContextHandler](), new SparkConf) val serverInfo2 = JettyUtils.startJettyServer( "0.0.0.0", startPort, Seq[ServletContextHandler](), new SparkConf) // Allow some wiggle room in case ports on the machine are under contention val boundPort1 = serverInfo1.boundPort val boundPort2 = serverInfo2.boundPort assert(boundPort1 != startPort) assert(boundPort2 != startPort) assert(boundPort1 != boundPort2) serverInfo1.server.stop() serverInfo2.server.stop() server.close() } test("jetty binds to port 0 correctly") { val serverInfo = JettyUtils.startJettyServer( "0.0.0.0", 0, Seq[ServletContextHandler](), new SparkConf) val server = serverInfo.server val boundPort = serverInfo.boundPort assert(server.getState === "STARTED") assert(boundPort != 0) Try { new ServerSocket(boundPort) } match { case Success(s) => fail("Port %s doesn't seem used by jetty server".format(boundPort)) case Failure(e) => } } test("verify appUIAddress contains the scheme") { withSpark(newSparkContext()) { sc => val ui = sc.ui.get val uiAddress = ui.appUIAddress val uiHostPort = ui.appUIHostPort assert(uiAddress.equals("http://" + uiHostPort)) } } test("verify appUIAddress contains the port") { withSpark(newSparkContext()) { sc => val ui = sc.ui.get val splitUIAddress = ui.appUIAddress.split(':') val boundPort = ui.boundPort assert(splitUIAddress(2).toInt == boundPort) } } }
Example 145
Source File: Image.scala From gospeak with Apache License 2.0 | 5 votes |
package gospeak.libs.scala.domain import gospeak.libs.scala.Crypto import gospeak.libs.scala.Extensions._ import scala.util.{Failure, Success, Try} abstract class Image(val url: Url) { def value: String = url.value def thumbnail: String = transform(Seq("w_50", "c_scale")) def isCloudinary: Boolean = Image.CloudinaryUrl.parse(url.value).isSuccess def isAdorable: Boolean = Image.AdorableUrl.parse(url.value).isSuccess def isGravatar: Boolean = Image.GravatarUrl.parse(url.value).isSuccess def isDefault: Boolean = isAdorable || isGravatar private def transform(transformations: Seq[String]*): String = Image.CloudinaryUrl.parse(url.value).map(_.transform(transformations: _*).value).getOrElse(url.value) } object Image { final case class CloudinaryUrl(cloudName: String, resource: String, kind: String, transformations: Seq[Seq[String]], version: Option[Long], publicId: String, format: String) { def value: String = { val txs = transformations.map("/" + _.mkString(",")).mkString val v = version.map("/v" + _).getOrElse("") s"https://res.cloudinary.com/$cloudName/$resource/$kind$txs$v/$publicId.$format" } def toUrl: Url = Url.from(value).get def transform(txs: Seq[String]*): CloudinaryUrl = copy(transformations = transformations ++ txs) } object CloudinaryUrl { private val cloudinaryRegex = "https://res.cloudinary.com/([^/]+)/([^/]+)/([^/]+)((?:/[a-z]{1,3}_[^/]+)*)(?:/v([0-9]{10}))?/([^.]+)\\.([a-z]+)".r def parse(url: String): Try[CloudinaryUrl] = url match { case cloudinaryRegex(cloudName, resource, kind, transformations, version, id, format) => val txs = Option(transformations).filter(_.nonEmpty).map(_.stripPrefix("/").split("/").toSeq.map(_.split(",").toSeq)).getOrElse(Seq()) Option(version).map(v => Try(v.toLong)).sequence.map { v => CloudinaryUrl(cloudName, resource, kind, txs, v, id, format) } case _ => Failure(new IllegalArgumentException(s"Unable to parse '$url' as Image.CloudinaryUrl")) } } final case class AdorableUrl(hash: String, size: Option[Int]) { def value: String = s"https://api.adorable.io/avatars${size.map("/" + _).getOrElse("")}/$hash.png" def toUrl: Url = Url.from(value).get } object AdorableUrl { private val adorableRegex = "https://api.adorable.io/avatars(?:/([0-9]+))?/([^/]*).png".r def parse(url: String): Try[AdorableUrl] = url match { case adorableRegex(size, hash) => Option(size).map(s => Try(s.toInt)).sequence.map(s => AdorableUrl(hash, s)) case _ => Failure(new IllegalArgumentException(s"Unable to parse '$url' as Image.AdorableUrl")) } } final case class GravatarUrl(hash: String, params: Seq[(String, String)]) { def value: String = { val queryParams = params.map { case (key, value) => s"$key=$value" }.mkString("&") s"https://secure.gravatar.com/avatar/$hash" + Some(queryParams).filter(_.nonEmpty).map("?" + _).getOrElse("") } def toUrl: Url = Url.from(value).get } object GravatarUrl { private val gravatarRegex = "https://secure.gravatar.com/avatar/([0-9a-f]{32})(\\?.*)?".r def apply(email: EmailAddress, params: Seq[(String, String)]): GravatarUrl = GravatarUrl(Crypto.md5(email.value.trim.toLowerCase), params) def parse(url: String): Try[GravatarUrl] = url match { case gravatarRegex(hash, queryParams) => val params = Option(queryParams) .map(_.stripPrefix("?").split("&").toSeq).getOrElse(Seq()) .map(p => p.splitAt(p.indexOf("="))) .map { case (key, value) => (key, value.stripPrefix("=")) } Success(GravatarUrl(hash, params)) case _ => Failure(new IllegalArgumentException(s"Unable to parse '$url' as Image.GravatarUrl")) } } }
Example 146
Source File: ExtensionsSpec.scala From gospeak with Apache License 2.0 | 5 votes |
package gospeak.libs.scala import gospeak.libs.scala.Extensions._ import gospeak.libs.scala.domain.CustomException import gospeak.libs.testingutils.BaseSpec import scala.util.{Failure, Success} class ExtensionsSpec extends BaseSpec { describe("Extensions") { describe("TraversableOnceExtension") { describe("swap") { val list = Seq(1, 2, 3, 4, 5) describe("before") { it("should move an element one place before") { list.swap(elt = 3) shouldBe Seq(1, 3, 2, 4, 5) } it("should do nothing for the first element") { list.swap(elt = 1) shouldBe Seq(1, 2, 3, 4, 5) } it("should work for the last element") { list.swap(elt = 5) shouldBe Seq(1, 2, 3, 5, 4) } it("should do nothing if element is not found") { list.swap(elt = 6) shouldBe Seq(1, 2, 3, 4, 5) } it("should move every element that match") { val list = Seq(1, 2, 3, 2, 2, 3) list.swap(elt = 2) shouldBe Seq(2, 1, 2, 3, 2, 3) } } describe("after") { it("should move an element one place after") { list.swap(elt = 3, before = false) shouldBe Seq(1, 2, 4, 3, 5) } it("should do nothing for the last element") { list.swap(elt = 5, before = false) shouldBe Seq(1, 2, 3, 4, 5) } it("should work for the first element") { list.swap(elt = 1, before = false) shouldBe Seq(2, 1, 3, 4, 5) } it("should do nothing if element is not found") { list.swap(elt = 6, before = false) shouldBe Seq(1, 2, 3, 4, 5) } it("should move every element that match") { val list = Seq(1, 2, 3, 2, 2, 3) list.swap(elt = 2, before = false) shouldBe Seq(1, 3, 2, 2, 2, 3) } } } } describe("TraversableOnceOptionExtension") { it("should invert Seq & Option with sequence") { Seq(Option(1), Option(2)).sequence shouldBe Option(Seq(1, 2)) Seq(Option(1), Option(2), None).sequence shouldBe None } } describe("MapOptionExtension") { it("should invert Map & Option with sequence") { Map(1 -> Option(1), 2 -> Option(2)).sequence shouldBe Option(Map(1 -> 1, 2 -> 2)) Map(1 -> Option(1), 2 -> Option(2), 3 -> None).sequence shouldBe None } } describe("OptionExtension") { describe("toTry") { it("should convert an Option to a Try") { val e = CustomException("") Some(1).toTry(e) shouldBe Success(1) None.toTry(e) shouldBe Failure(e) } } } } }
Example 147
Source File: ResponseFactory.scala From akka-http-circe-json-template with Apache License 2.0 | 5 votes |
package com.vitorsvieira.http.routes import akka.http.scaladsl.marshalling.ToResponseMarshallable import akka.http.scaladsl.model.StatusCodes._ import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server._ import com.vitorsvieira.http.config.ServerSettingsTemplate import com.vitorsvieira.http.model.{ ApiMessage, ApiStatusMessages } import de.heikoseeberger.akkahttpcirce.CirceSupport._ import scala.concurrent.Future import scala.util.{ Failure, Success } trait ResponseFactory { import ServerSettingsTemplate._ import io.circe.generic.auto._ def sendResponse[T](eventualResult: Future[T])(implicit marshaller: T ⇒ ToResponseMarshallable): Route = { onComplete(eventualResult) { case Success(result) ⇒ complete(result) case Failure(e) ⇒ log.error(s"Error: ${e.toString}") complete(ToResponseMarshallable(InternalServerError → ApiMessage(ApiStatusMessages.unknownException))) } } }
Example 148
Source File: MetadataLoaderGroup.scala From sbt-dependency-updates with Apache License 2.0 | 5 votes |
package org.jmotor.sbt.metadata import org.apache.maven.artifact.versioning.ArtifactVersion import org.jmotor.artifact.exception.ArtifactNotFoundException import org.jmotor.artifact.metadata.MetadataLoader import org.jmotor.artifact.metadata.loader.IvyPatternsMetadataLoader import org.jmotor.sbt.concurrent.MultiFuture import sbt.librarymanagement.Constant import sbt.librarymanagement.Patch import sbt.librarymanagement.{ Binary, Disabled, Full, ModuleID } import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.{ ExecutionContext, Future, Promise } import scala.util.{ Failure, Success } class MetadataLoaderGroup(scalaVersion: String, scalaBinaryVersion: String, loaders: Seq[MetadataLoader]) { def getVersions(module: ModuleID, sbtSettings: Option[(String, String)]): Future[Seq[ArtifactVersion]] = { if (loaders.lengthCompare(1) > 0) { firstCompletedOf(loaders.map { loader ⇒ val (artifactId, attrs) = getArtifactIdAndAttrs(loader, module, sbtSettings) loader.getVersions(module.organization, artifactId, attrs) }) } else { loaders.headOption.fold(Future.successful(Seq.empty[ArtifactVersion])) { loader ⇒ val (artifactId, attrs) = getArtifactIdAndAttrs(loader, module, sbtSettings) loader.getVersions(module.organization, artifactId, attrs) } } } private[metadata] def firstCompletedOf(futures: TraversableOnce[Future[Seq[ArtifactVersion]]]) (implicit executor: ExecutionContext): Future[Seq[ArtifactVersion]] = { val p = Promise[Seq[ArtifactVersion]]() val multiFuture = new MultiFuture[Seq[ArtifactVersion]](p, futures.size, Seq.empty) futures foreach { future ⇒ future.onComplete { case Success(r) if r.nonEmpty ⇒ p trySuccess r case Success(_) ⇒ multiFuture.tryComplete() case Failure(_: ArtifactNotFoundException) ⇒ multiFuture.tryComplete() case Failure(t) ⇒ multiFuture.tryComplete(t) }(scala.concurrent.ExecutionContext.Implicits.global) } p.future } private[metadata] def getArtifactIdAndAttrs(loader: MetadataLoader, module: ModuleID, sbtSettings: Option[(String, String)]): (String, Map[String, String]) = { val remapVersion = module.crossVersion match { case _: Disabled ⇒ None case _: Binary ⇒ Option(scalaBinaryVersion) case _: Full ⇒ Option(scalaVersion) case _: Patch ⇒ Option(scalaVersion) case constant: Constant ⇒ Option(constant.value) case _ ⇒ None } val name = remapVersion.map(v ⇒ s"${module.name}_$v").getOrElse(module.name) loader match { case _: IvyPatternsMetadataLoader if sbtSettings.isDefined ⇒ val settings = sbtSettings.get name -> Map("sbtVersion" -> settings._1, "scalaVersion" -> settings._2) case _ ⇒ name -> Map.empty } } } object MetadataLoaderGroup { def apply(scalaVersion: String, scalaBinaryVersion: String, loaders: MetadataLoader*): MetadataLoaderGroup = { new MetadataLoaderGroup(scalaVersion, scalaBinaryVersion, loaders) } }
Example 149
Source File: Reporter.scala From sbt-dependency-updates with Apache License 2.0 | 5 votes |
package org.jmotor.sbt import java.nio.file.{ Files, Path, Paths } import org.jmotor.sbt.dto.ModuleStatus import org.jmotor.sbt.parser.PluginParser import org.jmotor.sbt.service.VersionService import sbt.{ ModuleID, ResolvedProject } import scala.collection.JavaConverters._ import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.Future import scala.util.{ Failure, Success, Try } class Reporter(versionService: VersionService) { def dependencyUpdates(dependencies: Seq[ModuleID]): Future[Seq[ModuleStatus]] = { Future.traverse(dependencies)(versionService.checkForUpdates).map(_.sortBy(_.status.id)) } def pluginUpdates( sbtBinaryVersion: String, project: ResolvedProject): Future[Seq[ModuleStatus]] = { val dir = Paths.get(project.base.getPath, "project") val sbtScalaBinaryVersion = getSbtScalaBinaryVersion(sbtBinaryVersion) Future.traverse(plugins(dir)) { module ⇒ versionService.checkPluginForUpdates(module, sbtBinaryVersion, sbtScalaBinaryVersion) }.map(_.sortBy(_.status.id)) } def globalPluginUpdates(sbtBinaryVersion: String): Future[Seq[ModuleStatus]] = { val dir = Paths.get(System.getProperty("user.home"), ".sbt", sbtBinaryVersion, "plugins") val sbtScalaBinaryVersion = getSbtScalaBinaryVersion(sbtBinaryVersion) Future.traverse(plugins(dir)) { module ⇒ versionService.checkPluginForUpdates(module, sbtBinaryVersion, sbtScalaBinaryVersion) }.map(_.sortBy(_.status.id)) } def plugins(dir: Path): Seq[ModuleID] = { Try { Files.newDirectoryStream(dir, "*.sbt").asScala.toSeq.flatMap { path ⇒ Files.readAllLines(path).asScala } } match { case Success(lines) ⇒ PluginParser.parse(lines) case Failure(_) ⇒ Seq.empty[ModuleID] } } private[sbt] def getSbtScalaBinaryVersion(sbtBinaryVersion: String): String = { sbtBinaryVersion match { case "1.0" ⇒ "2.12" case _ ⇒ "2.10" } } } object Reporter { def apply(versionService: VersionService): Reporter = new Reporter(versionService) }
Example 150
Source File: RedisJobState.scala From asura with MIT License | 5 votes |
package asura.core.redis import java.util import asura.common.util.LogUtils import asura.core.concurrent.ExecutionContextManager.cachedExecutor import asura.core.redis.RedisClient.{redisson, toScala} import com.typesafe.scalalogging.Logger import org.redisson.client.codec.StringCodec import scala.collection.JavaConverters.setAsJavaSet import scala.concurrent.Future import scala.util.{Failure, Success} object RedisJobState { val logger = Logger("RedisJobState") val KEY_JOB_STATE = "asura_job_state" def updateJobState(scheduler: String, jobGroup: String, jobName: String, state: String)(successBlock: => Unit): Unit = { val jobStates = redisson.getMap[String, String](KEY_JOB_STATE, StringCodec.INSTANCE) jobStates.fastPutAsync(jobName, state).onComplete { case Failure(t) => logger.error(LogUtils.stackTraceToString(t)) case Success(_) => logger.debug(s"update $jobName to $state successful.") successBlock } } def deleteJobState(scheduler: String, jobGroup: String, jobName: String)(successBlock: => Unit): Unit = { val jobStates = redisson.getMap[String, String](KEY_JOB_STATE, StringCodec.INSTANCE) jobStates.fastRemoveAsync(jobName).onComplete { case Failure(t) => logger.error(LogUtils.stackTraceToString(t)) case Success(_) => logger.debug(s"delete $jobName state.") successBlock } } def getJobState(keys: Set[String]): Future[util.Map[String, String]] = { val jobStates = redisson.getMap[String, String](KEY_JOB_STATE, StringCodec.INSTANCE) jobStates.getAllAsync(setAsJavaSet(keys)) } }
Example 151
package com.ebay.neutrino.www import java.util.concurrent.TimeUnit import akka.actor.{ActorRef, Props, ActorSystem, ScalaActorRef} import akka.pattern.ask import akka.util.Timeout import com.ebay.neutrino.{SLB, NeutrinoPoolId} import com.ebay.neutrino.api.ApiData import com.ebay.neutrino.cluster.{SLBTopology, SystemConfiguration} import com.ebay.neutrino.www.ui.SideMenu import com.ebay.neutrino.www.ui.PageFormatting import com.ebay.neutrino.cluster.SLBLoader import com.typesafe.config.ConfigRenderOptions import com.typesafe.scalalogging.slf4j.StrictLogging import spray.http.StatusCodes import scala.concurrent.Await import scala.concurrent.duration.FiniteDuration import scala.concurrent.ExecutionContext.Implicits.global import scala.util.{Failure, Success} trait WebService extends spray.routing.HttpService with ApiData with PageFormatting with StrictLogging { def system: ActorSystem def topology = SystemConfiguration(system).topology val poolPage = new SideMenu("Pools") with PoolsPage val serverPage = new SideMenu("Servers") with ServersPage def webRoutes = path ("activity") { complete { import PageFormatting.ScalaTagsPrettyMarshaller ActivityPage.page() } } ~ path("pools") { complete { import PageFormatting.ScalaTagsPrettyMarshaller poolPage.summary(topology.toSeq) } } ~ path("pools" / Segment) { id => complete { import PageFormatting.ScalaTagsPrettyMarshaller val pool = topology.getPool(NeutrinoPoolId(id)) poolPage.detail(pool) } } ~ path("servers") { complete { import PageFormatting.ScalaTagsPrettyMarshaller val pools = topology.toSeq val services = topology.asInstanceOf[SLBTopology].core.services val nodes = services flatMap (_.pools()) flatMap (_.nodes()) serverPage.summary(pools, nodes.toSeq) } } ~ path("refresh") { complete { import PageFormatting.ScalaTagsPrettyMarshaller implicit val timeout = Timeout(FiniteDuration(3, TimeUnit.SECONDS)) // Wait for the result, since refresh api has to be synchronous val reloader = Await.result(system.actorSelection("user/loader").resolveOne(), timeout.duration) val future = reloader ? "reload" val result = Await.result(future, timeout.duration) if (result == "complete") { logger.warn("Config reloaded, Successfully completed") } else { logger.warn("Unable to load the configuration") } poolPage.summary(topology.toSeq) } } ~ path("config") { complete { val sysconfig = SystemConfiguration(system) sysconfig.config.root.render(ConfigRenderOptions.defaults) } } ~ pathEndOrSingleSlash { complete { import PageFormatting.ScalaTagsPrettyMarshaller Overview.generate(generateStatus()) } } ~ get { redirect("/", StatusCodes.PermanentRedirect) } }
Example 152
Source File: PageFormatting.scala From Neutrino with Apache License 2.0 | 5 votes |
package com.ebay.neutrino.www.ui import java.util.Date import com.typesafe.scalalogging.slf4j.StrictLogging import nl.grons.metrics.scala.{Counter, Meter} import spray.http.ContentType import spray.http.MediaTypes._ import spray.httpx.marshalling.Marshaller import scala.concurrent.duration._ import scala.util.{Failure, Success, Try} import scala.xml.Elem trait PageFormatting extends StrictLogging { import com.twitter.conversions.storage._ import scala.language.implicitConversions val pretty = true val prettyXml = false // Use XML parsing for prettyprinting val prettier = new scala.xml.PrettyPrinter(160, 4) val starttime = new Date() // Run the HTML format content through the pretty-printer def prettify(content: String): String = { if (pretty && prettyXml) Try(prettier.format(scala.xml.XML.loadString(content))) match { case Success(content) => content case Failure(ex) => logger.warn("Unable to pretty-print html", ex); content } else if (pretty) PrettyPrinter.format(content) else content } def prettyxml(content: String) = { Try(prettier.format(scala.xml.XML.loadString(content))) match { case Success(content) => content case Failure(ex) => logger.warn("Unable to pretty-print html", ex); content } } // Convert current time to uptime def uptime() = pretty((System.currentTimeMillis()-starttime.getTime).millis) // Convenience method: pretty print storage size def bytes(data: Long): String = data.bytes.toHuman // Convenience method: pretty print count size def count(data: Long): String = data match { case count if count < (2<<10) => s"$count" case count if count < (2<<18) => "%.1f K".format(count/1000f) case count if count < (2<<28) => "%.1f M".format(count/1000000f) case count => "%.1f G".format(count/1000000000f) } // Convenience method; pretty print time def pretty(duration: FiniteDuration): String = { if (duration.toDays > 0) duration.toDays+" days" else if (duration.toHours > 0) duration.toHours+" hours" else if (duration.toMinutes > 0) duration.toMinutes+" minutes" else duration.toSeconds +" seconds" } // Convenience method; ensure non-null string @inline def str(value: String) = if (value == null) "" else value } object PageFormatting extends PageFormatting { import scalatags.Text.all._ val SupportedOutput: Seq[ContentType] = Seq(`text/xml`, `application/xml`, `text/html`, `application/xhtml+xml`) implicit val ScalaTagsMarshaller = Marshaller.delegate[Frag, String](SupportedOutput:_*) { frag => "<!DOCTYPE html>\n" + frag.toString } implicit val ScalaTagsPrettyMarshaller = Marshaller.delegate[Frag, String](SupportedOutput:_*) { frag => "<!DOCTYPE html>\n" + prettyxml(frag.toString) } implicit val XmlMarshaller = Marshaller.delegate[Elem, String](SupportedOutput:_*) { elem => prettify(elem.toString) } }
Example 153
Source File: SLBLoader.scala From Neutrino with Apache License 2.0 | 5 votes |
package com.ebay.neutrino.cluster import akka.actor.Actor import com.ebay.neutrino.config.{LoadBalancer, Configuration} import com.typesafe.scalalogging.slf4j.StrictLogging import scala.concurrent.duration._ import scala.util.{Failure, Success} import com.ebay.neutrino.datasource.DataSource class SLBLoader extends Actor with StrictLogging { import context.dispatcher // Create a new SLB Configuration based off the file // Note that the system configuration is pulled from common.conf val config = SystemConfiguration(context.system) val dataSourceReader = config.settings.dataSource.datasourceReader.getConstructor().newInstance() // Schedule a configuration reload override def preStart() { context.system.scheduler.schedule(5 seconds, config.settings.dataSource.refreshPeriod, self, "reload") } def receive: Receive = { case "reload" => // Create a new SLB configuration val results = dataSourceReader.load(); logger.info("Reloading the configuration: {}") config.topology.update(results) sender ! "complete" case "complete" => logger.info("Reloading of configuration complete") case msg => logger.warn("Unexpected message received: {}", msg.toString) } }
Example 154
Source File: PacketProxy.scala From Neutrino with Apache License 2.0 | 5 votes |
package com.ebay.neutrino import java.net.{InetAddress, InetSocketAddress, SocketAddress} import com.ebay.neutrino.util.Utilities import scala.concurrent.Future import scala.util.{Failure, Success} import com.typesafe.scalalogging.slf4j.StrictLogging import io.netty.bootstrap.{Bootstrap, ServerBootstrap} import io.netty.channel.ChannelHandler.Sharable import io.netty.channel._ import io.netty.channel.nio.NioEventLoopGroup import io.netty.channel.socket.nio.{NioServerSocketChannel, NioSocketChannel} import io.netty.util.AttributeKey override def channelRead(ctx: ChannelHandlerContext, msg: AnyRef) { println("Writing packet from downstream to upstream...") upstream.writeAndFlush(msg) //ctx.fireChannelRead(msg) } override def channelInactive(ctx: ChannelHandlerContext): Unit = { println("Downstream closing..") upstream.close() ctx.fireChannelInactive() } }
Example 155
Source File: FileSystemAnnotationDataService.scala From qamr with MIT License | 5 votes |
package qamr import spacro.util._ import scala.util.{Try, Success} import java.nio.file.Path import java.nio.file.Files import com.typesafe.scalalogging.StrictLogging class FileSystemAnnotationDataService(dataPath: Path) extends AnnotationDataService { private[this] def getDataDirectoryPath = Try { val directory = dataPath if(!Files.exists(directory)) { Files.createDirectories(directory) } directory } private[this] def getFullFilename(name: String) = s"$name.txt" override def saveLiveData(name: String, contents: String): Try[Unit] = for { directory <- getDataDirectoryPath _ <- Try(Files.write(directory.resolve(getFullFilename(name)), contents.getBytes())) } yield () import scala.collection.JavaConverters._ override def loadLiveData(name: String): Try[List[String]] = for { directory <- getDataDirectoryPath lines <- Try(Files.lines(directory.resolve(getFullFilename(name))).iterator.asScala.toList) } yield lines }
Example 156
Source File: PackagePlatformExtensions.scala From qamr with MIT License | 5 votes |
package qamr.util import java.io.StringWriter import java.io.PrintWriter import scala.util.{Try, Success, Failure} import com.typesafe.scalalogging.Logger trait PackagePlatformExtensions { implicit class RichTry[A](val t: Try[A]) { def toOptionLogging(logger: Logger): Option[A] = t match { case Success(a) => Some(a) case Failure(e) => val sw = new StringWriter() val pw = new PrintWriter(sw, true) e.printStackTrace(pw) logger.error(e.getLocalizedMessage + "\n" + sw.getBuffer.toString) None } } }
Example 157
Source File: AbstractOrchestrator.scala From squbs with Apache License 2.0 | 5 votes |
package org.squbs.pattern.orchestration.japi import java.util.concurrent.CompletableFuture import akka.actor.{Actor, ActorRef, ActorSelection} import akka.pattern.{AskableActorRef, AskableActorSelection} import akka.util.Timeout import org.squbs.pattern.orchestration.Orchestrator import scala.concurrent.Future import scala.util.{Failure, Success, Try} abstract class AbstractOrchestrator extends Actor with Orchestrator { override def receive = super.receive def ask(actor: ActorRef, message: Any, timeout: Timeout): Ask = { val future = new AskableActorRef(actor).ask(message)(timeout) new Ask(future) } def ask(actor: ActorSelection, message: Any, timeout: Timeout): Ask = { val future = new AskableActorSelection(actor).ask(message)(timeout) new Ask(future) } class Ask private[japi](private val future: Future[Any]) { def thenComplete[T](cFuture: CompletableFuture[T]): Unit = { // Dragons here: DO NOT call nextMessageId from inside future.onComplete as that executes // outside the context of the actor. Instead, obtain the (val) id eagerly inside the actor and // give it to the function so it becomes pre-assigned. val nextId = nextMessageId import context.dispatcher future onComplete { self ! UniqueTryWrapper(nextId, _) } expectOnce { case UniqueTryWrapper(`nextId`, tt: Try[_]) => tt match { case Success(t) => cFuture.complete(t.asInstanceOf[T]) case Failure(e) => cFuture.completeExceptionally(e) } } } } }
Example 158
Source File: ClientFlowHttpsSpec.scala From squbs with Apache License 2.0 | 5 votes |
package org.squbs.httpclient import java.io.InputStream import java.security.{KeyStore, SecureRandom} import javax.net.ssl.{KeyManagerFactory, SSLContext, TrustManagerFactory} import akka.actor.ActorSystem import akka.http.scaladsl.model._ import akka.http.scaladsl.{ConnectionContext, Http} import akka.stream.ActorMaterializer import akka.stream.scaladsl.{Sink, Source} import akka.util.ByteString import com.typesafe.config.ConfigFactory import org.scalatest.{AsyncFlatSpec, BeforeAndAfterAll, Matchers} import org.squbs.resolver.ResolverRegistry import org.squbs.testkit.Timeouts._ import scala.concurrent.{Await, Future} import scala.util.{Success, Try} object ClientFlowHttpsSpec { val config = ConfigFactory.parseString( """ |helloHttps { | type = squbs.httpclient | akka.ssl-config.loose.disableHostnameVerification = true |} """.stripMargin) implicit val system = ActorSystem("ClientFlowHttpsSpec", config) implicit val materializer = ActorMaterializer() ResolverRegistry(system).register[HttpEndpoint]("LocalhostHttpsEndpointResolver") { (name, _) => name match { case "helloHttps" => Some(HttpEndpoint(s"https://localhost:$port", Some(sslContext("exampletrust.jks", "changeit")), None)) case _ => None } } import akka.http.scaladsl.server.Directives._ import system.dispatcher val route = path("hello") { get { complete(HttpEntity(ContentTypes.`text/html(UTF-8)`, "Hello World!")) } } val serverBinding = Await.result(Http().bindAndHandle(route, "localhost", 0, ConnectionContext.https(sslContext("example.com.jks", "changeit"))), awaitMax) val port = serverBinding.localAddress.getPort } class ClientFlowHttpsSpec extends AsyncFlatSpec with Matchers with BeforeAndAfterAll { import ClientFlowHttpsSpec._ override def afterAll: Unit = { serverBinding.unbind() map {_ => system.terminate()} } it should "make a call to Hello Service" in { val clientFlow = ClientFlow[Int]("helloHttps") val responseFuture: Future[(Try[HttpResponse], Int)] = Source.single(HttpRequest(uri = "/hello") -> 42) .via(clientFlow) .runWith(Sink.head) val (Success(response), _) = Await.result(responseFuture, awaitMax) response.status should be (StatusCodes.OK) val entity = response.entity.dataBytes.runFold(ByteString(""))(_ ++ _) map(_.utf8String) entity map { e => e shouldEqual "Hello World!" } } }
Example 159
Source File: ClientFlowIdleTimeoutSpec.scala From squbs with Apache License 2.0 | 5 votes |
package org.squbs.httpclient import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.model._ import akka.stream.ActorMaterializer import akka.stream.scaladsl.{Sink, Source, TcpIdleTimeoutException} import com.typesafe.config.ConfigFactory import org.scalatest.{AsyncFlatSpec, BeforeAndAfterAll, Matchers} import org.squbs.resolver.ResolverRegistry import org.squbs.testkit.Timeouts.awaitMax import scala.concurrent.{Await, Promise} import scala.util.{Failure, Success} object ClientFlowIdleTimeoutSpec { val config = ConfigFactory.parseString( """ |akka { | loggers = [ | "akka.event.Logging$DefaultLogger" | ] | | loglevel = "DEBUG" | | http { | server { | idle-timeout = 240 s | request-timeout = 120 s | } | | client.idle-timeout = 1 s | | host-connection-pool.max-retries = 0 | } |} """.stripMargin) implicit val system = ActorSystem("ClientFlowIdleTimeoutSpec", config) implicit val materializer = ActorMaterializer() ResolverRegistry(system).register[HttpEndpoint]("LocalhostEndpointResolver") { (svcName, _) => svcName match { case "slow" => Some(HttpEndpoint(s"http://localhost:$port")) case _ => None }} import akka.http.scaladsl.server.Directives._ import system.dispatcher val route = path("slow") { get { val promise = Promise[String] // Never completing the promise onComplete(promise.future) { case Success(value) => complete(HttpEntity(ContentTypes.`text/html(UTF-8)`, "Slow...!")) case Failure(ex) => complete(HttpEntity(ContentTypes.`text/html(UTF-8)`, "Slow failed...!")) } } } val serverBinding = Await.result(Http().bindAndHandle(route, "localhost", 0), awaitMax) val port = serverBinding.localAddress.getPort } class ClientFlowIdleTimeoutSpec extends AsyncFlatSpec with Matchers with BeforeAndAfterAll { import ClientFlowIdleTimeoutSpec._ override def afterAll: Unit = { serverBinding.unbind() map {_ => system.terminate()} } it should "drop the connection after idle-timeout and resume the stream with new connections" in { val clientFlow = ClientFlow[Int]("slow") val result = Source(1 to 10) .map(HttpRequest(uri = "/slow") -> _) .via(clientFlow) .runWith(Sink.seq) result map { r => val failures = r.map(_._1).filter(_.isFailure).map(_.failed) failures should have size 10 failures.forall(_.get.isInstanceOf[TcpIdleTimeoutException]) shouldBe true r.map(_._2) should contain theSameElementsAs(1 to 10) } } }
Example 160
Source File: Bootstrap.scala From squbs with Apache License 2.0 | 5 votes |
package org.squbs.unicomplex import akka.actor.ActorSystem import akka.pattern.ask import akka.util.Timeout import org.squbs.lifecycle.GracefulStop import org.squbs.util.ConfigUtil._ import scala.concurrent.Await import scala.concurrent.duration._ import scala.util.{Failure, Success, Try} import scala.language.postfixOps object Bootstrap extends App { println("Booting unicomplex") // Note, the config directories may change during extension init. It is important to re-read the full config // for the actor system start. UnicomplexBoot { (name, config) => ActorSystem(name, config) } .scanResources() .initExtensions .stopJVMOnExit .start() sys.addShutdownHook { Shutdown.shutdown() } } object Shutdown extends App { shutdown(actorSystemName = args.headOption) def shutdown(delayParameter: Option[FiniteDuration] = None, actorSystemName: Option[String] = None): Unit = { val name = actorSystemName getOrElse { val preConfig = UnicomplexBoot.getFullConfig(None) preConfig.getString("squbs.actorsystem-name") } UnicomplexBoot.actorSystems.get(name) map { actorSystem => val delay = delayParameter orElse actorSystem.settings.config.getOption[FiniteDuration]("squbs.shutdown-delay") getOrElse Duration.Zero implicit val squbsStopTimeout = Timeout(actorSystem.settings.config.get[FiniteDuration]("squbs.default-stop-timeout", 3.seconds)) val systemState = (Unicomplex(actorSystem).uniActor ? SystemState).mapTo[LifecycleState] import actorSystem.dispatcher systemState.onComplete { case Success(Stopping | Stopped) | Failure(_) => // Termination already started/happened. Do nothing! case _ => actorSystem.scheduler.scheduleOnce(delay, Unicomplex(name), GracefulStop) } Try { Await.ready(actorSystem.whenTerminated, delay + squbsStopTimeout.duration + (1 second)) } } } }
Example 161
Source File: TestingUtil.scala From infinispan-spark with Apache License 2.0 | 5 votes |
package org.infinispan.spark.test import java.util.function.BooleanSupplier import scala.annotation.tailrec import scala.concurrent.duration.{Duration, _} import scala.language.postfixOps import scala.util.{Failure, Success, Try} object TestingUtil { val DefaultDuration = 60 seconds val waitBetweenRetries = 500 def waitForCondition(command: () => Boolean, duration: Duration): Unit = { val NumTimes = duration.toMillis.toInt / waitBetweenRetries @tailrec def waitForCondition(numTimes: Int, sleep: Boolean): Unit = { if (sleep) Thread.sleep(waitBetweenRetries) Try(command.apply()) match { case Success(true) => case Success(false) if numTimes == 0 => throw new Exception("Timeout waiting for condition.") case Failure(e) if numTimes == 0 => throw new Exception("Given up trying to execute command.", e) case _ => waitForCondition(numTimes - 1, sleep = true) } } waitForCondition(NumTimes, sleep = false) } def waitForCondition(command: () => Boolean): Unit = waitForCondition(command, DefaultDuration) def waitForCondition(command: BooleanSupplier): Unit = waitForCondition(toScala(command), DefaultDuration) private def toScala(f: BooleanSupplier) = new (() => Boolean) { override def apply() = f.getAsBoolean } }
Example 162
Source File: MetronomeBuildInfo.scala From metronome with Apache License 2.0 | 5 votes |
package dcos.metronome import java.util.jar.{Attributes, Manifest} import mesosphere.marathon.io.IO import scala.collection.JavaConverters._ import scala.io.Source import scala.util.control.NonFatal import scala.util.{Failure, Success, Try} lazy val manifestPath: List[java.net.URL] = getClass.getClassLoader .getResources("META-INF/MANIFEST.MF") .asScala .filter { manifest => metronomeJar.findFirstMatchIn(manifest.getPath).nonEmpty } .toList lazy val manifest: Option[Manifest] = manifestPath match { case Nil => None case List(file) => val mf = new Manifest() IO.using(file.openStream) { f => mf.read(f) Some(mf) } case otherwise => throw new RuntimeException(s"Multiple metronome JAR manifests returned! $otherwise") } lazy val attributes: Option[Attributes] = manifest.map(_.getMainAttributes()) def getAttribute(name: String): Option[String] = attributes.flatMap { attrs => try { Option(attrs.getValue(name)) } catch { case NonFatal(_) => None } } // IntelliJ has its own manifest.mf that will inject a version that doesn't necessarily match // our actual version. This can cause Migrations to fail since the version number doesn't correctly match up. lazy val version: String = getAttribute("Implementation-Version").getOrElse(devBuildVersion) lazy val scalaVersion: String = getAttribute("Scala-Version").getOrElse("2.x.x") lazy val marathonVersion: mesosphere.marathon.SemVer = MarathonBuildInfo.version }
Example 163
Source File: LoadContentOnStartup.scala From metronome with Apache License 2.0 | 5 votes |
package dcos.metronome package repository import akka.actor.{Actor, ActorLogging, Stash} import mesosphere.marathon.StoreCommandFailedException import org.apache.zookeeper.KeeperException.NoNodeException import scala.concurrent.Future import scala.util.control.NonFatal import scala.util.{Failure, Success} trait LoadContentOnStartup[Id, Model] extends Actor with Stash with ActorLogging { import LoadContentOnStartup._ //TODO: change me to zk ec import context.dispatcher override def preStart(): Unit = { super.preStart() context.become(waitForInit) loadAll() } def repo: Repository[Id, Model] def initialize(specs: List[Model]): Unit def waitForInit: Receive = { case init: Init[Model] => initialize(init.result) context.become(receive) unstashAll() case _ => stash() } def loadAll(): Unit = { val loadAllFuture = repo.ids().flatMap { ids => Future.sequence(ids.map(id => getModel(id))).map(_.flatten.toList) } val me = self loadAllFuture.onComplete { case Success(result) => me ! Init(result) case Failure(ex) => log.error(ex, "Can not load initial data. Give up.") System.exit(-1) } } private def getModel(id: Id): Future[Option[Model]] = { repo.get(id).recoverWith { case ex: StoreCommandFailedException => ex.getCause match { case cause: NoNodeException => log.error( s"ID $id or job-specs znode missing. Zk will need to be manually repaired. Exception message: ${cause.getMessage}" ) Future.successful(None) case NonFatal(cause) => log .error(s"Unexpected exception occurred in reading zk at startup. Exception message: ${cause.getMessage}") // We need crash strategy similar to marathon, for now we can NOT continue with such a zk failure. System.exit(-1) Future.failed(cause) } } } } object LoadContentOnStartup { case class Init[T](result: List[T]) }
Example 164
Source File: NoConcurrentRepoChange.scala From metronome with Apache License 2.0 | 5 votes |
package dcos.metronome package repository import akka.actor.{Actor, ActorLogging, ActorRef, Stash} import scala.concurrent.{ExecutionContext, Future} import scala.util.control.NonFatal import scala.util.{Failure, Success} trait NoConcurrentRepoChange[Id, Model, Data] extends Actor with ActorLogging with Stash { import NoConcurrentRepoChange._ final def repoChange( change: => Future[Model], data: Data, onSuccess: (ActorRef, Model, Data) => Change, onFailed: (ActorRef, Throwable, Data) => Failed )(implicit ec: ExecutionContext): Unit = { val from = sender() try { val changed = change //can throw an exception, so execute before we enter waiting state context.become(waitForPersisted, discardOld = false) changed.onComplete { case Success(result) => self ! onSuccess(from, result, data) case Failure(ex) => self ! onFailed(from, ex, data) } } catch { case NonFatal(ex) => log.error(ex, "Could not apply repository change") notifySender(from, onFailed(from, ex, data)) } } private[this] def waitForPersisted: Receive = { case event: Failed => log.error(event.ex, "Repository change failed") notifySender(event.sender, event) case event: Change => notifySender(event.sender, event) case _ => stash() } private[this] def notifySender(recipient: ActorRef, message: Any): Unit = { context.unbecome() recipient ! message unstashAll() } } object NoConcurrentRepoChange { trait Change { def sender: ActorRef } trait Failed { def sender: ActorRef def ex: Throwable } }
Example 165
Source File: ChangedFilesBuilder.scala From mvn_scalafmt with Apache License 2.0 | 5 votes |
package org.antipathy.mvn_scalafmt.builder import java.io.File import java.nio.file.Paths import org.apache.maven.plugin.logging.Log import scala.sys.process.{Process, ProcessLogger} import scala.util.{Failure, Success, Try} override def build(input: Seq[File]): Seq[File] = if (diff) { log.info(s"Checking for files changed from $branch") Try { val changedFiles = changeFunction() log.info(changedFiles.mkString(s"Changed from $branch:\n", "\n", "")) changedFiles.filter(isSupportedFile) } match { case Success(value) => value case Failure(e) => log.error("Could not obtain list of changed files", e) throw e } } else { input } } // $COVERAGE-OFF$ object ChangedFilesBuilder { def apply(log: Log, diff: Boolean, branch: String, workingDirectory: File): ChangedFilesBuilder = { val logger: ProcessLogger = ProcessLogger(_ => (), err => log.error(err)) def run(cmd: String) = Process(cmd, workingDirectory).!!(logger).trim val prefix = ": " val actualBranch = if (!branch.startsWith(prefix)) branch else run(branch.substring(prefix.length)) def processFunction(): Seq[File] = { val diffOutput = run(s"git diff --name-only --diff-filter=d $actualBranch") val gitRootOutput = run("git rev-parse --show-toplevel") val gitRootPath = Paths.get(gitRootOutput) diffOutput.linesIterator .map(gitRootPath.resolve) .map(_.toFile) .toSeq } new ChangedFilesBuilder(log, diff, actualBranch, processFunction) } } // $COVERAGE-ON$
Example 166
Source File: RemoteConfigReader.scala From mvn_scalafmt with Apache License 2.0 | 5 votes |
package org.antipathy.mvn_scalafmt.io import java.net.URL import org.antipathy.mvn_scalafmt.model.RemoteConfig import org.apache.maven.plugin.logging.Log import scala.util.{Failure, Success, Try} override def read(location: String): RemoteConfig = Try { log.info(s"Reading config from $location") RemoteConfig( contents = scala.io.Source.fromURL(new URL(location)).mkString ) } match { case Success(value) => value case Failure(exception) => log.error(s"error retrieving remote config: ${exception.getMessage}", exception) throw exception } }
Example 167
Source File: FunctionalAbstractions.scala From Mastering-Functional-Programming with MIT License | 5 votes |
package jvm import scala.util.{ Try, Success, Failure } object FunctionalAbstractions extends App { def divide(n1: Double, n2: Double): Try[Double] = if (n2 == 0) Failure(new RuntimeException("Division by zero!")) else Success(n1 / n2) def f1Match(x: Double): Try[Double] = divide(2, x) match { case Success(res) => Success(res + 3) case f@Failure(_) => f } def f1Map(x: Double): Try[Double] = divide(2, x).map(r => r + 3) def f2Match(x: Double, y: Double): Try[Double] = divide(2, x) match { case Success(r1) => divide(r1, y) match { case Success(r2) => Success(r2 + 3) case f@Failure(_) => f } case f@Failure(_) => f } def f2FlatMap(x: Double, y: Double): Try[Double] = divide(2, x).flatMap(r1 => divide(r1, y)) .map(r2 => r2 + 3) println(f1Match(2)) // 4.0 println(f1Match(0)) // Failure println(f1Map(2)) // 4.0 println(f1Map(0)) // Failure println(f2Match(2, 2)) // 3.5 println(f2Match(2, 0)) // Failure println(f2FlatMap(2, 2)) // 3.5 println(f2FlatMap(2, 0)) // Failure }
Example 168
Source File: DataUtils.scala From scalismo with Apache License 2.0 | 5 votes |
package scalismo.statisticalmodel.dataset import scalismo.geometry.{_3D, Point} import scalismo.mesh.TriangleMesh import scalismo.registration.Transformation import scalismo.mesh.TetrahedralMesh import scala.util.{Failure, Success, Try} private object DataUtils { def meshToTransformation(refMesh: TriangleMesh[_3D], targetMesh: TriangleMesh[_3D]): Try[Transformation[_3D]] = { if (refMesh.pointSet.numberOfPoints != targetMesh.pointSet.numberOfPoints) Failure( new Throwable( s"reference and target mesh do not have the same number of points (${refMesh.pointSet.numberOfPoints} != ${targetMesh.pointSet.numberOfPoints}" ) ) else { val t = new Transformation[_3D] { override val domain = refMesh.boundingBox override val f = (x: Point[_3D]) => { val ptId = refMesh.pointSet.findClosestPoint(x).id targetMesh.pointSet.point(ptId) } } Success(t) } } }
Example 169
Source File: ModelMetrics.scala From scalismo with Apache License 2.0 | 5 votes |
package scalismo.statisticalmodel.dataset import scalismo.geometry._3D import scalismo.mesh.{MeshMetrics, TriangleMesh} import scalismo.statisticalmodel.StatisticalMeshModel import scalismo.utils.Random import scala.util.{Failure, Success, Try} def generalization(pcaModel: StatisticalMeshModel, dc: DataCollection): Try[Double] = { if (pcaModel.referenceMesh == dc.reference) Success { dc.dataItems.par.map { item => val mesh = dc.reference.transform(item.transformation) val projection = pcaModel.project(mesh) MeshMetrics.avgDistance(projection, mesh) }.sum / dc.size.toDouble } else Failure(new Exception("pca model and test data collection must have the same reference")) } }
Example 170
Source File: DataUtils.scala From scalismo with Apache License 2.0 | 5 votes |
package scalismo.statisticalmodel.experimental.dataset import scalismo.geometry.{_3D, Point} import scalismo.mesh.{TetrahedralMesh, TriangleMesh} import scalismo.registration.Transformation import scala.util.{Failure, Success, Try} private object DataUtils { def volumeMeshToTransformation(refMesh: TetrahedralMesh[_3D], targetMesh: TetrahedralMesh[_3D]): Try[Transformation[_3D]] = { if (refMesh.pointSet.numberOfPoints != targetMesh.pointSet.numberOfPoints) Failure( new Throwable( s"reference and target mesh do not have the same number of points (${refMesh.pointSet.numberOfPoints} != ${targetMesh.pointSet.numberOfPoints}" ) ) else { val t = new Transformation[_3D] { override val domain = refMesh.boundingBox override val f = (x: Point[_3D]) => { val ptId = refMesh.pointSet.findClosestPoint(x).id targetMesh.pointSet.point(ptId) } } Success(t) } } }
Example 171
Source File: 5-Future.scala From wow-spark with MIT License | 5 votes |
package com.sev7e0.wow.scala import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.Future import scala.util.{Failure, Random, Success} object UseFuture { type CoffeeBeans = String type GroundCoffee = String type Milk = String type FrothedMilk = String type Espresso = String type Cappuccino = String def main(args: Array[String]): Unit = { // 语句引入了一个全局的执行上下文,确保了隐式值的存在 import scala.concurrent.ExecutionContext.Implicits.global //成功或者失败都可以使用回调函数,一个偏函数 不推荐使用,推荐使用onComplete 代替 grind("ara beans").onSuccess { case ground => Thread.sleep(Random.nextInt(2000)) println("okay, got my ground coffee") } //在 onComplete 方法上注册回调,回调的输入是一个 Try。 grind("java beans").onComplete { case Success(message) => println(s"okay, got my ground coffee $message") case Failure(exception) => println(exception.getMessage) } val eventualBoolean: Future[Boolean] = heatWater(Water(50)) flatMap { water => temperatureOkay(water) } eventualBoolean.foreach(println) //for语句之前三个Future并行运行,for语句之内顺序执行,注意for语句内部其实是flat map的语法糖 val eventualCoffee = grind("java beans") val eventualWater = heatWater(Water(20)) val eventualMilk = frothMilk("milk") val coffee = for { ground <- eventualCoffee water <- eventualWater milk <- eventualMilk okay <- brew(ground, water) } yield combine(okay, milk) coffee.foreach(println) Thread.sleep(10000) } //使用异步线程检查水的温度 def temperatureOkay(water: Water): Future[Boolean] = Future { (80 to 85) contains water.temperature } def grind(coffeeBeans: CoffeeBeans): Future[GroundCoffee] = Future { println("start grinding...") Thread.sleep(Random.nextInt(2000)) if (coffeeBeans == "baked beans") throw GrindingException("are you joking?") println("finished grinding...") s"ground coffee of $coffeeBeans" } def heatWater(water: Water): Future[Water] = Future { println("heating the water now") Thread.sleep(Random.nextInt(2000)) println("hot, it's hot!") water.copy(temperature = 85) } def frothMilk(milk: Milk): Future[FrothedMilk] = Future { println("milk frothing system engaged!") Thread.sleep(Random.nextInt(2000)) println("shutting down milk frothing system") s"frothed $milk" } def brew(coffeeBeans: CoffeeBeans, water: Water): Future[Espresso] = Future { println("happy brewing :)") Thread.sleep(Random.nextInt(2000)) println("it's brewed!") "espresso" } def combine(espresso: Espresso, frothedMilk: FrothedMilk): Cappuccino = "cappuccino" case class Water(temperature: Int) case class GrindingException(msg: String) extends Exception(msg) case class FrothingException(msg: String) extends Exception(msg) case class WaterBoilingException(msg: String) extends Exception(msg) case class BrewingException(msg: String) extends Exception(msg) }
Example 172
Source File: package.scala From lighthouse with Apache License 2.0 | 5 votes |
package be.dataminded.lighthouse import java.time.LocalDate import java.time.format.DateTimeFormatter import be.dataminded.lighthouse.common.DateTimeFormatters import scopt.Read import scopt.Read.reads import scala.util.{Failure, Success, Try} package object config { implicit val LocalDateSupport: Read[LocalDate] = reads { timestamp => tryParseLocalDate(timestamp) match { case Success(localDate) => localDate case Failure(e) => throw new IllegalArgumentException(s"The given timestamp: [$timestamp] could not be parsed", e) } } private def tryParseLocalDate(timestamp: String): Try[LocalDate] = { Try { LocalDate.parse(timestamp, DateTimeFormatter.ISO_LOCAL_DATE) } recover { case _ => LocalDate.parse(timestamp, DateTimeFormatters.SimpleDateFormat) } } }
Example 173
Source File: ErgoTransactionValidator.scala From sigmastate-interpreter with MIT License | 5 votes |
package sigmastate.helpers import org.ergoplatform._ import sigmastate.eval.IRContext import sigmastate.interpreter.Interpreter.{ScriptNameProp, emptyEnv} import sigmastate.utxo.CostTable import scala.util.{Failure, Success} class ErgoLikeTestInterpreter(implicit override val IR: IRContext) extends ErgoLikeInterpreter { override type CTX = ErgoLikeContext } class ErgoTransactionValidator(implicit IR: IRContext) { val verifier = new ErgoLikeTestInterpreter() def validate(tx: ErgoLikeTransaction, blockchainState: BlockchainState, minerPubkey: Array[Byte], boxesReader: ErgoBoxReader): Either[Throwable, Long] = { val msg = tx.messageToSign val inputs = tx.inputs val boxes: IndexedSeq[ErgoBox] = tx.inputs.map(_.boxId).map{id => boxesReader.byId(id) match { case Success(box) => box case Failure(e) => return Left[Throwable, Long](e) } } val txCost = boxes.zipWithIndex.foldLeft(0L) { case (accCost, (box, idx)) => val input = inputs(idx) val proof = input.spendingProof val proverExtension = tx.inputs(idx).spendingProof.extension val context = ErgoLikeContextTesting(blockchainState.currentHeight, blockchainState.lastBlockUtxoRoot, minerPubkey, boxes, tx, box, proverExtension) val verificationResult = verifier.verify( emptyEnv + (ScriptNameProp -> s"height_${blockchainState.currentHeight }_verify"), box.ergoTree, context, proof, msg) val scriptCost: Long = verificationResult match { case Success((res, cost)) => if(!res) return Left[Throwable, Long](new Exception(s"Validation failed for input #$idx")) else cost case Failure(e) => return Left[Throwable, Long](e) } accCost + scriptCost } Right(txCost) } }
Example 174
Source File: package.scala From cosmos with Apache License 2.0 | 5 votes |
package com.mesosphere.cosmos import com.mesosphere.http.OriginHostScheme import io.lemonlabs.uri.Uri import scala.util.Failure import scala.util.Success import scala.util.Try package object repository { def rewriteUrlWithProxyInfo( origin: OriginHostScheme )( value: String ): String = { Try(Uri.parse(value)) match { case Success(url) => // TODO: This can throw!! Uri.parse( s"${origin.urlScheme}://${origin.rawHost}/package/resource?url=$url" ).toString case Failure(_) => value } } }
Example 175
Source File: MarathonPackageRunner.scala From cosmos with Apache License 2.0 | 5 votes |
package com.mesosphere.cosmos import _root_.io.circe.JsonObject import com.mesosphere.cosmos.circe.Decoders.decode import com.mesosphere.cosmos.error.MarathonBadGateway import com.mesosphere.cosmos.error.MarathonBadResponse import com.mesosphere.cosmos.error.MarathonGenericError import com.mesosphere.cosmos.error.ServiceAlreadyStarted import com.mesosphere.cosmos.http.RequestSession import com.mesosphere.cosmos.thirdparty.marathon.model.MarathonApp import com.mesosphere.cosmos.thirdparty.marathon.model.MarathonError import com.mesosphere.error.ResultOps import com.twitter.finagle.http.Status import com.twitter.util.Future import io.netty.handler.codec.http.HttpResponseStatus import scala.util.Failure import scala.util.Success import scala.util.Try final class MarathonPackageRunner(adminRouter: AdminRouter) { private[this] lazy val logger = org.slf4j.LoggerFactory.getLogger(getClass) def launch(renderedConfig: JsonObject)(implicit session: RequestSession): Future[MarathonApp] = { adminRouter.createApp(renderedConfig) .map { response => response.status match { case Status.Conflict => throw ServiceAlreadyStarted().exception case status if (400 until 500).contains(status.code) => logger.warn(s"Marathon returned [${status.code}]: " + s"${trimContentForPrinting(response.contentString)}") Try(decode[MarathonError](response.contentString).getOrThrow) match { case Success(marathonError) => throw MarathonBadResponse(marathonError).exception case Failure(_) => throw MarathonGenericError(HttpResponseStatus.valueOf(status.code)).exception } case status if (500 until 600).contains(status.code) => logger.warn(s"Marathon returned [${status.code}]: " + s"${trimContentForPrinting(response.contentString)}") throw MarathonBadGateway(HttpResponseStatus.valueOf(status.code)).exception case _ => decode[MarathonApp](response.contentString).getOrThrow } } } }
Example 176
Source File: QualityValueSpec.scala From cosmos with Apache License 2.0 | 5 votes |
package com.mesosphere.http import org.scalatest.FreeSpec import scala.util.Failure import scala.util.Success class QualityValueSpec extends FreeSpec { "QualityValue.parse should" - { "error when input that is not a double in base10" in { val Failure(err) = QualityValue.parse("asd") assertResult("Unexpected q value 'asd'. Expected 0.0 <= q <= 1.0 with no more than 3 decimal places")(err.getMessage) } "error when the provide value is a double and is outside the valid range" in { val Failure(err) = QualityValue.parse("1.1") assertResult("Unexpected q value '1.1'. Expected 0.0 <= q <= 1.0 with no more than 3 decimal places")(err.getMessage) } "error when more than 3 decimal places" in { val Failure(err) = QualityValue.parse("0.1234") assertResult("Unexpected q value '0.1234'. Expected 0.0 <= q <= 1.0 with no more than 3 decimal places")(err.getMessage) } "error when does not start with 0 or 1" in { val Failure(err) = QualityValue.parse("7.1234") assertResult("Unexpected q value '7.1234'. Expected 0.0 <= q <= 1.0 with no more than 3 decimal places")(err.getMessage) } "succeed when input is" - { "0.0" in { assertResult(Success(QualityValue(0.0)))(QualityValue.parse("0.0")) } "0.25" in { assertResult(Success(QualityValue(0.25)))(QualityValue.parse("0.25")) } "1.0" in { assertResult(Success(QualityValue(1.0)))(QualityValue.parse("1.0")) } } } }
Example 177
Source File: MediaTypeSpec.scala From cosmos with Apache License 2.0 | 5 votes |
package com.mesosphere.http import com.mesosphere.Generators.Implicits._ import org.scalatest.FreeSpec import org.scalatest.prop.PropertyChecks import scala.util.Success final class MediaTypeSpec extends FreeSpec with PropertyChecks { private val testMediaType = MediaType( "application", MediaTypeSubType("vnd.dcos.test", Some("json")), Map("charset" -> "utf-8", "version" -> "v1") ) "MediaType.parse(string) should" - { "parse basic type" in { val Success(t) = MediaType.parse("text/html") assertResult("text/html")(t.show) } "parse with suffix" in { val Success(t) = MediaType.parse("""image/svg+xml""") assertResult("image")(t.`type`) assertResult("svg")(t.subType.value) assertResult(Some("xml"))(t.subType.suffix) assertResult("""image/svg+xml""")(t.show) } "parse parameters" in { val Success(t) = MediaType.parse("""text/html; charset=utf-8; foo=bar""") assertResult("text")(t.`type`) assertResult("html")(t.subType.value) assertResult(Map( "charset" -> "utf-8", "foo" -> "bar" ))(t.parameters) } "lower-case type" in { val Success(t) = MediaType.parse("""IMAGE/SVG+XML""") assertResult("""image/svg+xml""")(t.show) } "lower-case parameter names" in { val Success(t) = MediaType.parse("""text/html; Charset=utf-8""") assertResult("text")(t.`type`) assertResult("html")(t.subType.value) assertResult(Map( "charset" -> "utf-8" ))(t.parameters) } "parse a vendor type" in { val Success(t) = MediaType.parse("application/vnd.dcos.test+json; charset=utf-8; version=v1") assertResult(testMediaType)(t) } "unquote parameter values" in { val Success(t) = MediaType.parse("""text/html; charset="utf-8"""") assertResult("text")(t.`type`) assertResult("html")(t.subType.value) assertResult(Map( "charset" -> "utf-8" ))(t.parameters) } } "A MediaType should show correctly" - { "text/html" in { assertResult("text/html")(MediaType("text", MediaTypeSubType("html")).show) } "application/xhtml+xml" in { assertResult("application/xhtml+xml")(MediaType("application", MediaTypeSubType("xhtml", Some("xml"))).show) } "application/vnd.dcos.custom-request+json" in { assertResult("application/vnd.dcos.custom-request+json")( MediaType("application", MediaTypeSubType("vnd.dcos.custom-request", Some("json"))).show ) } } "MediaType.show followed by MediaType.parse should be the identity function" in { forAll { (mediaType: MediaType) => assertResult(Success(mediaType))(MediaType.parse(mediaType.show)) } } // TODO package-add: Test for case-insensitive comparison }
Example 178
Source File: ReleaseVersionConverterSpec.scala From cosmos with Apache License 2.0 | 5 votes |
package com.mesosphere.universe.bijection import com.mesosphere.universe import com.mesosphere.universe.bijection.UniverseConversions._ import com.twitter.bijection.Conversion.asMethod import com.twitter.bijection.{Bijection, Injection} import org.scalatest.FreeSpec import scala.util.{Failure, Success, Try} final class ReleaseVersionConverterSpec extends FreeSpec { "v2ReleaseVersionToString should" - { "always succeed in the forward direction" in { assertResult("0")(universe.v2.model.ReleaseVersion("0").as[String]) } "always succeed in the reverse direction" in { assertResult(universe.v2.model.ReleaseVersion("1"))("1".as[universe.v2.model.ReleaseVersion]) } } "v3ReleaseVersionToLong should" - { "always succeed in the forward direction" in { val version = universe.v3.model.ReleaseVersion(2) assertResult(2)(version.as[Long]) } "succeed in the reverse direction on nonnegative numbers" in { val version = universe.v3.model.ReleaseVersion(0) assertResult(Success(version))( 0L.as[Try[universe.v3.model.ReleaseVersion]] ) } "fail in the reverse direction on negative numbers" in { val Failure(iae) = (-1L).as[Try[universe.v3.model.ReleaseVersion]] val expectedMessage = "Expected integer value >= 0 for release version, but found [-1]" assertResult(expectedMessage)(iae.getMessage) } } "v3ReleaseVersionToString should" - { behave like v3ReleaseVersionStringConversions[String] } "v3ReleaseVersionToV2ReleaseVersion should" - { behave like v3ReleaseVersionStringConversions[universe.v2.model.ReleaseVersion] } private[this] def v3ReleaseVersionStringConversions[A](implicit versionToA: Injection[universe.v3.model.ReleaseVersion, A], aToString: Bijection[A, String] ): Unit = { "always succeed in the forward direction" in { val version = 42L assertResult("42") { universe.v3.model.ReleaseVersion(version).as[A].as[String] } } "succeed in the reverse direction on nonnegative version numbers" in { val version = 24L assertResult(Success(universe.v3.model.ReleaseVersion(version))) { "24".as[A].as[Try[universe.v3.model.ReleaseVersion]] } } "fail in the reverse direction on negative version numbers" in { val Failure(iae) = "-2".as[A].as[Try[universe.v3.model.ReleaseVersion]] val message = "Expected integer value >= 0 for release version, but found [-2]" assertResult(message)(iae.getMessage) } "fail in the reverse direction on non-number values" in { val Failure(iae) = "foo".as[A].as[Try[universe.v3.model.ReleaseVersion]] val message = "Failed to invert: foo" assertResult(message)(iae.getMessage) } } }
Example 179
Source File: CompoundMediaTypeParser.scala From cosmos with Apache License 2.0 | 5 votes |
package com.mesosphere.http import cats.instances.list._ import cats.instances.try_._ import cats.syntax.traverse._ import scala.util.Success import scala.util.Try object CompoundMediaTypeParser { def parse(s: String): Try[CompoundMediaType] = { s.split(',').toList.filterNot(_.trim.isEmpty) match { case Nil => Success(new CompoundMediaType(Set.empty)) case mts => mts.map(MediaType.parse) .sequence .map { mediaTypes => CompoundMediaType(backfillParams(mediaTypes)._2.toSet) } } } private[this] def backfillParams( mts: List[MediaType] ): (Map[String, String], List[MediaType]) = { mts match { case Nil => (Map.empty, Nil) case x :: xs => // walk down the list of media types backfillParams(xs) match { case (m, l) => if (x.parameters.isEmpty) { // if the current media type doesn't have parameters set on it set it to be the parameters returned // from the next media type in the list m -> (x.copy(parameters = m) :: l) } else { // if the current media type does have parameters set on it, leave them intact and pass // up for previous media type in the list x.parameters -> (x :: l) } } } } }
Example 180
Source File: QualityValue.scala From cosmos with Apache License 2.0 | 5 votes |
package com.mesosphere.http import scala.util.Failure import scala.util.Success import scala.util.Try private[this] def validateString(s: String): String = { if ( (s.charAt(0) == '0' || s.charAt(0) == '1') && s.length <= 5 && "0.0" <= s && s <= "1.000" ) { s } else { throw err(s) } } private[this] def err(value: String): IllegalArgumentException = { new IllegalArgumentException( s"Unexpected q value '$value'. Expected 0.0 <= q <= 1.0 with no more than 3 decimal places" ) } }
Example 181
Source File: BijectionUtils.scala From cosmos with Apache License 2.0 | 5 votes |
package com.mesosphere.universe.bijection import scala.util.{Failure, Success} object BijectionUtils { def twitterTryToScalaTry[A](a: com.twitter.util.Try[A]): scala.util.Try[A] = { a match { case com.twitter.util.Return(aaa) => Success(aaa) case com.twitter.util.Throw(e) => Failure(e) } } def scalaTryToTwitterTry[A](a: scala.util.Try[A]): com.twitter.util.Try[A] = { a match { case scala.util.Success(aaa) => com.twitter.util.Return(aaa) case scala.util.Failure(e) => com.twitter.util.Throw(e) } } }
Example 182
Source File: Detector.scala From cloudflow with Apache License 2.0 | 5 votes |
package cloudflow.installer import akka.event.LoggingAdapter import java.util.concurrent.TimeUnit import org.zeroturnaround.exec._ import scala.annotation.tailrec import scala.collection.JavaConverters._ import scala.util.{ Failure, Success, Try } sealed trait ClusterFeature final case object Scc extends ClusterFeature final case class StorageClasses(classes: Set[StorageClass]) extends ClusterFeature final case class StorageClass(name: String, provisioner: String) case class ClusterFeatures( storageClasses: Set[StorageClass] = Set.empty, hasSecurityContextConstraints: Boolean = false ) { private val set = { var s = Set.empty[ClusterFeature] if (storageClasses.nonEmpty) s += StorageClasses(storageClasses) if (hasSecurityContextConstraints) s += Scc s } def contains(feature: ClusterFeature) = set.contains(feature) def print()(implicit log: LoggingAdapter): Unit = { val header = s"""+${"-" * 80}+""" log.info(header) log.info("Features detected:") log.info("") if (hasSecurityContextConstraints) log.info("Scc") storageClasses.foreach { case StorageClass(name, provisioner) => log.info(s"Storage class: $name - $provisioner") } log.info(header) } } object Detector { def apply(): Detector = Detector(executor) def executor(commandLine: Array[String], log: LoggingAdapter, settings: Settings): Try[String] = { val command = s"${commandLine.mkString(" ")}" log.info(s"Executing command '$command'") Try( new ProcessExecutor() .command(commandLine.toList.asJava) .readOutput(true) .exitValues(0) .timeout(settings.executionTimeout, TimeUnit.SECONDS) .execute() .outputUTF8() ) } } case class Detector(executor: (Array[String], LoggingAdapter, Settings) => Try[String]) { def detectClusterFeatures()(implicit log: LoggingAdapter, settings: Settings): ClusterFeatures = ClusterFeatures(getStorageClasses(), hasSecurityContextConstraints()) def hasSecurityContextConstraints()(implicit log: LoggingAdapter, settings: Settings): Boolean = executor("oc get scc".split(" "), log, settings).isSuccess def getStorageClasses()(implicit log: LoggingAdapter, settings: Settings): Set[StorageClass] = { @tailrec def extractStorageClass(a: List[String], b: Set[StorageClass] = Set.empty): Set[StorageClass] = a match { case name :: provisioner :: _ :: tail => extractStorageClass(tail, b + StorageClass(name, provisioner)) case nil @ _ => b } executor( "kubectl get sc --no-headers".split(" "), log, settings ) match { case Success(result) => if (result.startsWith("error:")) Set.empty else if (result.contains("No resources found")) Set.empty else extractStorageClass(result.replaceAll("\n", " ").split(" ").filter(s => s != "(default)" && s != "").toList) case Failure(ex) => log.error(s"Failed to query storage classes, ${ex.getMessage()}") Set.empty } } }
Example 183
Source File: RunnerConfigUtils.scala From cloudflow with Apache License 2.0 | 5 votes |
package cloudflow.blueprint import com.typesafe.config.{ Config, ConfigFactory } import scala.io.Source import scala.util.{ Failure, Success, Try } object RunnerConfigUtils { val StorageMountPathKey = "storage.mountPath" val MetadataName = "metadata.name" val MetadataNamespace = "metadata.namespace" val MetadataUid = "metadata.uid" def addStorageConfig(config: Config, pvcVolumeMountPath: String): Config = { val storageConfig = ConfigFactory.parseString(s"""$StorageMountPathKey:"$pvcVolumeMountPath"""") config.withFallback(storageConfig) } def addPodRuntimeConfig(config: Config, downwardApiVolumeMountPath: String): Config = { val (name, namespace, uid) = getPodMetadata(downwardApiVolumeMountPath) val podRuntimeConfig = ConfigFactory.parseString(s""" |cloudflow.runner.pod: { | $MetadataName:"$name" | $MetadataNamespace:"$namespace" | $MetadataUid:"$uid" |} |""".stripMargin) config.withFallback(podRuntimeConfig) } def getPodMetadata(downwardApiVolumeMountPath: String): (String, String, String) = { val name = readDownwardApi(downwardApiVolumeMountPath, MetadataName) val namespace = readDownwardApi(downwardApiVolumeMountPath, MetadataNamespace) val uid = readDownwardApi(downwardApiVolumeMountPath, MetadataUid) (name, namespace, uid) } private def readDownwardApi(downwardApiVolumeMountPath: String, filename: String): String = { val path = s"$downwardApiVolumeMountPath/$filename" Try(Source.fromFile(path).getLines.mkString) match { case Success(contents) ⇒ contents case Failure(ex) ⇒ throw new Exception(s"An error occurred while attempting to access the downward API volume mount with path '$path'", ex) } } }
Example 184
Source File: StreamletScannerSpec.scala From cloudflow with Apache License 2.0 | 5 votes |
package cloudflow.sbt import org.scalatest._ import scala.collection.JavaConverters._ import scala.util.Success final class StreamletScannerSpec extends WordSpec with TryValues with OptionValues with MustMatchers { "StreamletScanner.scan" should { val classLoader = this.getClass.getClassLoader val results = StreamletScanner.scan(classLoader) val (valid, invalid) = results.partition { case (_, triedDiscoveredStreamlet) ⇒ triedDiscoveredStreamlet.isSuccess } val validStreamlets = valid.collect { case (k, Success(discovered)) ⇒ (k, discovered) } val invalidStreamlets = invalid.toMap // These are all valid streamlets defined in TestStreamlets.scala "find all valid test streamlets" in { valid must have size 6 } // These are all invalid streamlets defined in TestStreamlets.scala "find all invalid test streamlets" in { invalid must have size 2 } "find streamlets defined as objects" in { validStreamlets.keys must contain(BarFlow.getClass.getCanonicalName) } "find streamlets defined as companion objects" in { validStreamlets.keys must contain(ClassWithCompanionObject.getClass.getCanonicalName) } "find streamlets defined as classes with a default constructor" in { validStreamlets.keys must contain(classOf[CodeFlow].getCanonicalName) } "find streamlet with config parameters" in { val key = classOf[BarFlowWithConfig].getCanonicalName validStreamlets.keys must contain(key) val expected = new BarFlowWithConfig validStreamlets(key) .getConfigList("config_parameters") .asScala .map { confParConf ⇒ confParConf.getString("description") mustBe expected.GoldPrice.description confParConf.getString("key") mustBe expected.GoldPrice.key confParConf.getString("validation_type") mustBe expected.GoldPrice.toDescriptor.validationType } } "find streamlet with Java config parameters" in { val key = classOf[BarFlowWithJavaConfig].getCanonicalName validStreamlets.keys must contain(key) val expected = new BarFlowWithJavaConfig validStreamlets(key) .getConfigList("config_parameters") .asScala .map { confParConf ⇒ confParConf.getString("description") mustBe expected.GoldPrice.description confParConf.getString("key") mustBe expected.GoldPrice.key confParConf.getString("validation_type") mustBe expected.GoldPrice.toDescriptor.validationType } } "produce failures for classes with no default constructor" in { val noConstructorFailure = invalidStreamlets.get("cloudflow.sbt.NoDefaultConstructorStreamlet").value noConstructorFailure.failure.exception mustBe a[ConstructorMissing] } "produce failures for classes with constructors that throw exceptions" in { val noConstructorFailure = invalidStreamlets.get("cloudflow.sbt.StreamletThatThrowsAnExceptionInItsConstructor").value noConstructorFailure.failure.exception mustBe a[ConstructorFailure] } "produce no failures for abstract Streamlet classes" in { val abstractClassFailure = invalidStreamlets.get("cloudflow.sbt.AbstractStreamlet") abstractClassFailure mustBe None } } }
Example 185
Source File: TaxiFareOps.scala From cloudflow with Apache License 2.0 | 5 votes |
package cloudflow.flink import java.util.Locale import org.joda.time.DateTime import org.joda.time.format.DateTimeFormat import scala.util.{ Failure, Success, Try } import cloudflow.flink.avro._ object TaxiFareOps { @transient val timeFormatter = DateTimeFormat.forPattern("yyyy-MM-dd HH:mm:ss").withLocale(Locale.US).withZoneUTC(); def fromString(fare: String): Try[TaxiFare] = { def parseFloat(s: String) = if (s.length() > 0) s.toFloat else 0.0f def parseDateTime(s: String) = DateTime.parse(s, timeFormatter) val tokens = fare.split(",") if (tokens.length != 8) Failure(new RuntimeException(s"Invalid record: $fare")) else Try { val rideId = tokens(0).toLong new TaxiFare( rideId, tokens(1).toLong, tokens(4), tokens(2).toLong, parseDateTime(tokens(3)).getMillis(), parseFloat(tokens(5)), parseFloat(tokens(6)), parseFloat(tokens(7)) ) }.transform(s ⇒ Success(s), e ⇒ Failure(new RuntimeException(s"Invalid record: $fare", e))) } def getEventTime(fare: TaxiFare): Long = fare.startTime }
Example 186
Source File: TaxiRideOps.scala From cloudflow with Apache License 2.0 | 5 votes |
package cloudflow.flink import java.util.Locale import org.joda.time.DateTime import org.joda.time.format.DateTimeFormat import scala.util.{ Failure, Success, Try } import cloudflow.flink.avro._ object TaxiRideOps { @transient val timeFormatter = DateTimeFormat.forPattern("yyyy-MM-dd HH:mm:ss").withLocale(Locale.US).withZoneUTC(); def fromString(ride: String): Try[TaxiRide] = { def parseFloat(s: String) = if (s.length() > 0) s.toFloat else 0.0f def parseDateTime(s: String) = DateTime.parse(s, timeFormatter) val tokens = ride.split(",") if (tokens.length != 11) Failure(new RuntimeException(s"Invalid record: $ride")) else Try { val rideId = tokens(0).toLong val (isStart, startTime, endTime) = tokens(1) match { case "START" ⇒ (true, parseDateTime(tokens(2)), parseDateTime(tokens(3))) case "END" ⇒ (false, parseDateTime(tokens(3)), parseDateTime(tokens(2))) case _ ⇒ throw new RuntimeException(s"Invalid record: $ride") } new TaxiRide( rideId, isStart, tokens(9).toLong, tokens(8).toShort, tokens(10).toLong, parseFloat(tokens(4)), parseFloat(tokens(5)), parseFloat(tokens(6)), parseFloat(tokens(7)), startTime.getMillis(), endTime.getMillis() ) }.transform(s ⇒ Success(s), e ⇒ Failure(new RuntimeException(s"Invalid record: $ride", e))) } def getEventTime(ride: TaxiRide): Long = if (ride.isStart) ride.startTime else ride.endTime }
Example 187
Source File: AvroUtil.scala From cloudflow with Apache License 2.0 | 5 votes |
package cloudflow.streamlets.avro import scala.util.{ Failure, Success, Try } import scala.reflect.ClassTag import scala.reflect._ import org.apache.avro.specific.SpecificRecordBase import org.apache.avro.Schema import cloudflow.streamlets._ object AvroUtil { val Format = "avro" def makeSchema[T <: SpecificRecordBase: ClassTag]: Schema = Try(classTag[T].runtimeClass.getDeclaredMethod("SCHEMA$")) match { case Success(schema) ⇒ schema.invoke(null).asInstanceOf[Schema] case Failure(_) ⇒ { Try(classTag[T].runtimeClass.getDeclaredField("SCHEMA$")) match { case Success(schema) ⇒ schema.get(null).asInstanceOf[Schema] case Failure(ex) ⇒ throw new RuntimeException(s"Error fetching avro schema for class ${classTag[T].runtimeClass}", ex) } } } def fingerprintSha256(schema: Schema): String = { import java.util.Base64 import org.apache.avro.SchemaNormalization._ Base64 .getEncoder() .encodeToString(parsingFingerprint("SHA-256", schema)) } def createSchemaDefinition(schema: Schema) = SchemaDefinition( name = schema.getFullName, schema = schema.toString(false), fingerprint = fingerprintSha256(schema), format = Format ) }
Example 188
Source File: Execution.scala From maze with Apache License 2.0 | 5 votes |
package fr.vsct.dt.maze.core import scala.reflect.ClassTag import scala.util.{Failure, Success, Try} def sequence[A: ClassTag](executions: Traversable[Execution[A]]): Execution[Array[A]] = new Execution[Array[A]]() { override val label: String = s"execution of all of: ${executions.map(_.label).mkString("[", ", ", "]")}" override def execute(): Try[Array[A]] = { executions.toSeq .par.map(_.execute()) .seq.foldLeft[Try[List[A]]](Success(List[A]())) { (acc, v) => for { array <- acc nextElement <- v } yield nextElement :: array }.map(_.reverse.toArray) } } }
Example 189
Source File: Predicate.scala From maze with Apache License 2.0 | 5 votes |
package fr.vsct.dt.maze.core import scala.util.{Failure, Success, Try} abstract class Predicate { self => val label: String def get(): PredicateResult def execute(): Boolean = self.get().result.getOrElse(false) def labeled(text: String): Predicate = { new Predicate { override val label: String = text override def get(): PredicateResult = self.get() } } def unary_! : Predicate = new Predicate { override val label: String = "!" + self.label override def get(): PredicateResult = self.get() match { case PredicateResult(Success(true), _) => Result.failure(s"Expected ${self.label} to be false") case PredicateResult(Failure(e), m) => Result(e, m) case _ => Result.success } } def &&(other: Predicate): Predicate = { new Predicate { override val label: String = self.label + " AND " + other.label override def get(): PredicateResult = self.get() match { case PredicateResult(Success(true), _) => other.get() case PredicateResult(Failure(e), m) => Result(e, m) case _ => Result.failure(s"${this.label} returned false") } } } def ||(other: Predicate): Predicate = new Predicate { override def get(): PredicateResult = self.get() match { case PredicateResult(Success(true), _) => Result.success case PredicateResult(Failure(e), m) => Result(e, m) case _ => other.get() } override val label: String = self.label + " OR " + other.label } override def toString: String = label } object Predicate { val `true` = new Predicate { override def get(): PredicateResult = Result.success override val label: String = "True predicate" } val `false` = new Predicate { override def get(): PredicateResult = Result.failure("") override val label: String = "False predicate" } } case class PredicateResult(result: Try[Boolean], message: String) object Result { def apply(value: Boolean, message: String = ""): PredicateResult = PredicateResult(Success(value), message) def apply(e: Throwable, message: String): PredicateResult = PredicateResult(Failure(e), message) val success: PredicateResult = Result(value = true) def failure(message: String): PredicateResult = Result(value = false, message) def exception(e: Throwable): PredicateResult = PredicateResult(Failure(e), s"Predicate failed: ${e.getMessage}") }
Example 190
Source File: OptionTest.scala From maze with Apache License 2.0 | 5 votes |
package fr.vsct.dt.maze.core import fr.vsct.dt.maze.core.Predef._ import org.scalatest.FlatSpec import scala.util.{Failure, Success} class OptionTest extends FlatSpec { "a valued option" should "execute without errors" in { val execution: Execution[Option[String]] = Execution{Some("it works")} execution.get().execute() match { case Success("it works") => // ok case Success(somethingElse) => fail(s"Unexpected value : $somethingElse") case Failure(e) => fail(e) } execution.getOrElse("something else").execute() match { case Success("it works") => // ok case Success(somethingElse) => fail(s"Unexpected value : $somethingElse") case Failure(e) => fail(e) } execution.exists().get() match { case PredicateResult(Success(true), _) => // ok case PredicateResult(Success(false), message) => fail(s"Unexpected value : false, message : $message") case PredicateResult(Failure(e), _) => fail(e) } } "an empty option" should "execute without errors" in { val execution: Execution[Option[String]] = Execution{None} execution.get().execute() match { case Success(_) => fail("empty option not seen as empty") case Failure(e) => } execution.getOrElse("something else").execute() match { case Success("something else") => // ok case Success(somethingElse) => fail(s"Unexpected value : $somethingElse") case Failure(e) => fail(e) } execution.exists().get() match { case PredicateResult(Success(false), _) => // ok case PredicateResult(Success(true), message) => fail(s"Unexpected value : true, message : $message") case PredicateResult(Failure(e), _) => fail(e) } } }
Example 191
Source File: XpathTest.scala From maze with Apache License 2.0 | 5 votes |
package fr.vsct.dt.maze.core import fr.vsct.dt.maze.core.Predef._ import net.sf.saxon.s9api.XdmValue import org.scalatest.FlatSpec import scala.util.{Failure, Success} class XpathTest extends FlatSpec { "an xpath resulting to an array" should "return it properly" in { val xml = """<?xml version="1.0" ?> |<test> | <value>0</value> | <value>1</value> | <value>2</value> |</test> """.stripMargin val base: Execution[String] = Execution{xml} val xpath: Execution[XdmValue] = base.xpath("//test/value") xpath.length().execute() match { case Success(3) => // ok case Failure(e) => fail(e) case Success(number) => fail(s"Found $number results, expected 3") } xpath.first().stringValue().execute() match { case Success("0") => // cool case Failure(e) => fail(e) case Success(other) => fail(s"Expected to find 0 but got $other") } xpath.itemAt(2).stringValue().execute() match { case Success("2") => // cool case Failure(e) => fail(e) case Success(other) => fail(s"Expected to find 0 but got $other") } } "an xpath resulting to an empty array" should "handle it properly" in { val xml = """<?xml version="1.0" ?> |<test> | <value>0</value> | <value>1</value> | <value>2</value> |</test> """.stripMargin val base: Execution[String] = Execution{xml} val xpath: Execution[XdmValue] = base.xpath("//some/path") xpath.length().execute() match { case Success(0) => // ok case Failure(e) => fail(e) case Success(number) => fail(s"Found $number results, expected 3") } } "an xpath from a non-valid xml" should "generate an error" in { val xml = "some non-xml value" val base: Execution[String] = Execution{xml} val xpath: Execution[XdmValue] = base.xpath("//test/value") xpath.execute() match { case Failure(e) => // was expected case Success(_) => fail("was expecting an error but didn't get one.") } } }
Example 192
Source File: KafkaOptionsLoaderUtils.scala From gimel with Apache License 2.0 | 5 votes |
package com.paypal.gimel.kafka2.utilities import scala.collection.immutable.Map import scala.util.{Failure, Success, Try} import com.paypal.gimel.common.conf.{GimelConstants, KafkaOptionsLoader} import com.paypal.gimel.kafka2.conf.{KafkaClientConfiguration, KafkaConfigs, KafkaConstants} import com.paypal.gimel.kafka2.utilities.KafkaUtilities.logger object KafkaOptionsLoaderUtils { def getEachKafkaTopicToOptionsMap(kafkaOptions: Map[String, Map[String, String]]): Map[String, Map[String, String]] = { def MethodName: String = new Exception().getStackTrace.apply(1).getMethodName logger.info(" @Begin --> " + MethodName) val kafkaOptionsWithKafkaKeyword: Map[String, Map[String, String]] = kafkaOptions.map{case (topicList, options) => { (topicList, options.map(eachOption => { if (!eachOption._1.startsWith(KafkaConstants.KAFKA_CONST)) { (s"${KafkaConstants.KAFKA_CONST}.${eachOption._1}", eachOption._2) } else { (eachOption._1, eachOption._2) } })) }} kafkaOptionsWithKafkaKeyword.flatMap(x => { x._1.split(",").map(each => (each, x._2)) }) } }
Example 193
Source File: ExecutorManager.scala From marvin-engine-executor with Apache License 2.0 | 5 votes |
package org.marvin.executor.manager import akka.actor.{Actor, ActorLogging} import akka.util.Timeout import org.marvin.executor.api.{GenericAPI, GenericAPIFunctions} import org.marvin.executor.manager.ExecutorManager.{GetMetadata, StopActor} import scala.concurrent.ExecutionContext import scala.concurrent.duration._ import scala.util.{Failure, Success} object ExecutorManager { case class StopActor(actorName: String) case class GetMetadata() } class ExecutorManager(api: GenericAPIFunctions) extends Actor with ActorLogging { implicit val ec = ExecutionContext.global implicit val futureTimeout = Timeout(30 seconds) override def preStart() = { log.info(s"Executor Manager enabled and starting!!!") log.info(s"Executor Manager path ${self.path}") } override def receive = { case StopActor(actorName) => if(api.manageableActors.contains(actorName)){ val actorRef = api.manageableActors(actorName) log.info(s"Actor ${actorRef.path} found. Trying to stop selected actor..") context.stop(actorRef) log.info(s"Actor ${actorRef.path} successfully stopped!") sender ! Success }else{ log.info(s"Actor related with the key ${actorName} is not a valid manageable actor.") sender ! Failure } case GetMetadata => log.info(s"Getting Metadata object from engine ...") sender ! api.getMetadata } }
Example 194
Source File: OnlineAction.scala From marvin-engine-executor with Apache License 2.0 | 5 votes |
package org.marvin.executor.actions import akka.Done import akka.actor.SupervisorStrategy._ import akka.actor.{Actor, ActorLogging, ActorRef, OneForOneStrategy, Props, Status} import akka.pattern.{ask, pipe} import akka.util.Timeout import io.grpc.StatusRuntimeException import org.marvin.artifact.manager.ArtifactSaver import org.marvin.executor.actions.OnlineAction.{OnlineExecute, OnlineHealthCheck, OnlineReload} import org.marvin.executor.proxies.EngineProxy.{ExecuteOnline, HealthCheck, Reload} import org.marvin.executor.proxies.OnlineActionProxy import org.marvin.artifact.manager.ArtifactSaver.SaveToLocal import org.marvin.model.{EngineActionMetadata, EngineMetadata} import org.marvin.util.ProtocolUtil import scala.collection.mutable.ListBuffer import scala.concurrent.Future import scala.concurrent.duration._ import scala.util.{Failure, Success} object OnlineAction { case class OnlineExecute(message: String, params: String) case class OnlineReload(protocol: String) case class OnlineHealthCheck() } class OnlineAction(actionName: String, metadata: EngineMetadata) extends Actor with ActorLogging { var onlineActionProxy: ActorRef = _ var artifactSaver: ActorRef = _ var engineActionMetadata: EngineActionMetadata = _ var artifactsToLoad: String = _ implicit val ec = context.dispatcher override def preStart() = { engineActionMetadata = metadata.actionsMap(actionName) artifactsToLoad = engineActionMetadata.artifactsToLoad.mkString(",") onlineActionProxy = context.actorOf(Props(new OnlineActionProxy(engineActionMetadata)), name = "onlineActionProxy") artifactSaver = context.actorOf(ArtifactSaver.build(metadata), name = "artifactSaver") } override val supervisorStrategy = OneForOneStrategy(maxNrOfRetries = 10, withinTimeRange = metadata.onlineActionTimeout milliseconds) { case _: StatusRuntimeException => Restart case _: Exception => Escalate } override def receive = { case OnlineExecute(message, params) => implicit val futureTimeout = Timeout(metadata.onlineActionTimeout milliseconds) log.info(s"Starting to process execute to $actionName. Message: [$message] and params: [$params].") val originalSender = sender ask(onlineActionProxy, ExecuteOnline(message, params)) pipeTo originalSender case OnlineReload(protocol) => implicit val futureTimeout = Timeout(metadata.reloadTimeout milliseconds) log.info(s"Starting to process reload to $actionName. Protocol: [$protocol].") if(protocol == null || protocol.isEmpty){ onlineActionProxy forward Reload() }else{ val splitedProtocols = ProtocolUtil.splitProtocol(protocol, metadata) val futures:ListBuffer[Future[Any]] = ListBuffer[Future[Any]]() for(artifactName <- engineActionMetadata.artifactsToLoad) { futures += (artifactSaver ? SaveToLocal(artifactName, splitedProtocols(artifactName))) } val origSender = sender() Future.sequence(futures).onComplete{ case Success(_) => onlineActionProxy.ask(Reload(protocol)) pipeTo origSender case Failure(e) => { log.error(s"Failure to reload artifacts using protocol $protocol.") origSender ! Status.Failure(e) } } } case OnlineHealthCheck => implicit val futureTimeout = Timeout(metadata.healthCheckTimeout milliseconds) log.info(s"Starting to process health to $actionName.") val originalSender = sender ask(onlineActionProxy, HealthCheck) pipeTo originalSender case Done => log.info("Work Done!") case _ => log.warning(s"Not valid message !!") } }
Example 195
Source File: PipelineAction.scala From marvin-engine-executor with Apache License 2.0 | 5 votes |
package org.marvin.executor.actions import java.time.LocalDateTime import java.util.NoSuchElementException import akka.Done import akka.actor.{Actor, ActorLogging, ActorRef, Props} import akka.pattern.ask import akka.util.Timeout import org.marvin.artifact.manager.ArtifactSaver import org.marvin.artifact.manager.ArtifactSaver.SaveToRemote import org.marvin.exception.MarvinEExecutorException import org.marvin.executor.actions.PipelineAction.{PipelineExecute, PipelineExecutionStatus} import org.marvin.executor.proxies.BatchActionProxy import org.marvin.executor.proxies.EngineProxy.{ExecuteBatch, Reload} import org.marvin.model._ import org.marvin.util.{JsonUtil, LocalCache} import scala.collection.mutable.ListBuffer import scala.concurrent.duration._ import scala.concurrent.{Await, Future} import scala.util.Success object PipelineAction { case class PipelineExecute(protocol:String, params:String) case class PipelineExecutionStatus(protocol:String) } class PipelineAction(metadata: EngineMetadata) extends Actor with ActorLogging{ implicit val ec = context.dispatcher var artifactSaver: ActorRef = _ var cache: LocalCache[BatchExecution] = _ override def preStart() = { artifactSaver = context.actorOf(ArtifactSaver.build(metadata), name = "artifactSaver") cache = new LocalCache[BatchExecution](maximumSize = 10000L, defaultTTL = 30.days) } override def receive = { case PipelineExecute(protocol, params) => implicit val futureTimeout = Timeout(metadata.pipelineTimeout milliseconds) log.info(s"Starting to process pipeline process with. Protocol: [$protocol] and Params: [$params].") cache.save(protocol, new BatchExecution("pipeline", protocol, LocalDateTime.now, Working)) try{ for(actionName <- metadata.pipelineActions){ val engineActionMetadata = metadata.actionsMap(actionName) val _actor: ActorRef = context.actorOf(Props(new BatchActionProxy(engineActionMetadata)), name = actionName.concat("Actor")) Await.result((_actor ? Reload(protocol)), futureTimeout.duration) Await.result((_actor ? ExecuteBatch(protocol, params)), futureTimeout.duration) context stop _actor val futures:ListBuffer[Future[Done]] = ListBuffer[Future[Done]]() for(artifactName <- engineActionMetadata.artifactsToPersist) { futures += (artifactSaver ? SaveToRemote(artifactName, protocol)).mapTo[Done] } if (!futures.isEmpty) Future.sequence(futures).onComplete{ case Success(response) => log.info(s"All artifacts from [$actionName] were saved with success!! [$response]") } } }catch { case e: Exception => cache.save(protocol, new BatchExecution("pipeline", protocol, LocalDateTime.now, Failed)) throw e } cache.save(protocol, new BatchExecution("pipeline", protocol, LocalDateTime.now, Finished)) case PipelineExecutionStatus(protocol) => log.info(s"Getting pipeline execution status to protocol $protocol.") try { sender ! JsonUtil.toJson(cache.load(protocol).get) }catch { case _: NoSuchElementException => sender ! akka.actor.Status.Failure(new MarvinEExecutorException(s"Protocol $protocol not found!")) } case Done => log.info("Work Done!") case _ => log.warning(s"Not valid message !!") } }
Example 196
Source File: AkkaHttpWebsocketTest.scala From sttp with Apache License 2.0 | 5 votes |
package sttp.client.akkahttp import java.util.concurrent.ConcurrentLinkedQueue import akka.Done import akka.http.scaladsl.model.ws.{Message, TextMessage} import akka.stream.Materializer import akka.stream.scaladsl._ import org.scalatest.BeforeAndAfterAll import org.scalatest.concurrent.{Eventually, IntegrationPatience} import sttp.client._ import scala.collection.JavaConverters._ import scala.concurrent.duration._ import scala.concurrent.{ExecutionContext, Future, Promise} import scala.util.Success import org.scalatest.flatspec.AsyncFlatSpec import org.scalatest.matchers.should.Matchers import sttp.client.testing.HttpTest.wsEndpoint class AkkaHttpWebsocketTest extends AsyncFlatSpec with Matchers with BeforeAndAfterAll with Eventually with IntegrationPatience { implicit val ec: ExecutionContext = scala.concurrent.ExecutionContext.global implicit val backend: SttpBackend[Future, Nothing, Flow[Message, Message, *]] = AkkaHttpBackend() it should "send and receive ten messages" in { val received = new ConcurrentLinkedQueue[String]() val sink: Sink[Message, Future[Done]] = collectionSink(received) val n = 10 val source: Source[Message, Promise[Option[Message]]] = Source((1 to n).map(i => TextMessage(s"test$i"))).concatMat(Source.maybe[Message])(Keep.right) val flow: Flow[Message, Message, (Future[Done], Promise[Option[Message]])] = Flow.fromSinkAndSourceMat(sink, source)(Keep.both) basicRequest.get(uri"$wsEndpoint/ws/echo").openWebsocket(flow).flatMap { r => eventually { received.asScala.toList shouldBe (1 to n).map(i => s"echo: test$i").toList } r.result._2.complete(Success(None)) // the source should now complete r.result._1.map(_ => succeed) // the future should be completed once the stream completes (and the ws closes) } } it should "receive two messages" in { val received = new ConcurrentLinkedQueue[String]() val sink: Sink[Message, Future[Done]] = collectionSink(received) val source: Source[Message, Promise[Option[Message]]] = Source.maybe[Message] val flow: Flow[Message, Message, Promise[Option[Message]]] = Flow.fromSinkAndSourceMat(sink, source)(Keep.right) basicRequest.get(uri"$wsEndpoint/ws/send_and_wait").openWebsocket(flow).flatMap { r => eventually { received.asScala.toList shouldBe List("test10", "test20") } r.result.success(None) // closing succeed } } it should "error if the endpoint is not a websocket" in { basicRequest.get(uri"$wsEndpoint/echo").openWebsocket(Flow.apply[Message]).failed.map { t => t shouldBe a[NotAWebsocketException] } } def collectionSink(queue: ConcurrentLinkedQueue[String]): Sink[Message, Future[Done]] = Sink .setup[Message, Future[Done]] { (_materializer, _) => Flow[Message] // mapping with parallelism 1 so that messages don't get reordered .mapAsync(1) { case m: TextMessage => implicit val materializer: Materializer = _materializer m.toStrict(1.second).map(Some(_)) case _ => Future.successful(None) } .collect { case Some(TextMessage.Strict(text)) => text } .toMat(Sink.foreach(queue.add))(Keep.right) } .mapMaterializedValue(_.flatMap(identity)) override protected def afterAll(): Unit = { backend.close() super.afterAll() } }
Example 197
Source File: SttpPlayJsonApi.scala From sttp with Apache License 2.0 | 5 votes |
package sttp.client.playJson import sttp.client._ import sttp.model._ import sttp.client.internal.Utf8 import play.api.libs.json.{JsError, Json, Reads, Writes} import sttp.client.{IsOption, JsonInput, ResponseAs, ResponseError} import sttp.model.MediaType import scala.util.{Failure, Success, Try} trait SttpPlayJsonApi { implicit def playJsonBodySerializer[B: Writes]: BodySerializer[B] = b => StringBody(Json.stringify(Json.toJson(b)), Utf8, Some(MediaType.ApplicationJson)) def asJsonAlwaysUnsafe[B: Reads: IsOption]: ResponseAs[B, Nothing] = asStringAlways.map(ResponseAs.deserializeOrThrow(deserializeJson)) // Note: None of the play-json utilities attempt to catch invalid // json, so Json.parse needs to be wrapped in Try def deserializeJson[B: Reads: IsOption]: String => Either[JsError, B] = JsonInput.sanitize[B].andThen { s => Try(Json.parse(s)) match { case Failure(e: Exception) => Left(JsError(e.getMessage)) case Failure(t: Throwable) => throw t case Success(json) => Json.fromJson(json).asEither match { case Left(failures) => Left(JsError(failures)) case Right(success) => Right(success) } } } }
Example 198
Source File: HttpClientMonixBackend.scala From sttp with Apache License 2.0 | 5 votes |
package sttp.client.httpclient.monix import java.io.InputStream import java.net.http.HttpRequest.BodyPublishers import java.net.http.{HttpClient, HttpRequest} import java.nio.ByteBuffer import cats.effect.Resource import monix.eval.Task import monix.execution.Scheduler import monix.reactive.Observable import org.reactivestreams.FlowAdapters import sttp.client.httpclient.HttpClientBackend.EncodingHandler import sttp.client.httpclient.{HttpClientAsyncBackend, HttpClientBackend, WebSocketHandler} import sttp.client.impl.monix.TaskMonadAsyncError import sttp.client.testing.SttpBackendStub import sttp.client.{FollowRedirectsBackend, SttpBackend, SttpBackendOptions} import scala.util.{Success, Try} class HttpClientMonixBackend private ( client: HttpClient, closeClient: Boolean, customizeRequest: HttpRequest => HttpRequest, customEncodingHandler: EncodingHandler )(implicit s: Scheduler) extends HttpClientAsyncBackend[Task, Observable[ByteBuffer]]( client, TaskMonadAsyncError, closeClient, customizeRequest, customEncodingHandler ) { override def streamToRequestBody(stream: Observable[ByteBuffer]): Task[HttpRequest.BodyPublisher] = { monad.eval(BodyPublishers.fromPublisher(FlowAdapters.toFlowPublisher(stream.toReactivePublisher))) } override def responseBodyToStream(responseBody: InputStream): Try[Observable[ByteBuffer]] = { Success( Observable .fromInputStream(Task.now(responseBody)) .map(ByteBuffer.wrap) .guaranteeCase(_ => Task(responseBody.close())) ) } } object HttpClientMonixBackend { private def apply( client: HttpClient, closeClient: Boolean, customizeRequest: HttpRequest => HttpRequest, customEncodingHandler: EncodingHandler )(implicit s: Scheduler ): SttpBackend[Task, Observable[ByteBuffer], WebSocketHandler] = new FollowRedirectsBackend( new HttpClientMonixBackend(client, closeClient, customizeRequest, customEncodingHandler)(s) ) def apply( options: SttpBackendOptions = SttpBackendOptions.Default, customizeRequest: HttpRequest => HttpRequest = identity, customEncodingHandler: EncodingHandler = PartialFunction.empty )(implicit s: Scheduler = Scheduler.global ): Task[SttpBackend[Task, Observable[ByteBuffer], WebSocketHandler]] = Task.eval( HttpClientMonixBackend( HttpClientBackend.defaultClient(options), closeClient = true, customizeRequest, customEncodingHandler )(s) ) def resource( options: SttpBackendOptions = SttpBackendOptions.Default, customizeRequest: HttpRequest => HttpRequest = identity, customEncodingHandler: EncodingHandler = PartialFunction.empty )(implicit s: Scheduler = Scheduler.global ): Resource[Task, SttpBackend[Task, Observable[ByteBuffer], WebSocketHandler]] = Resource.make(apply(options, customizeRequest, customEncodingHandler))(_.close()) def usingClient( client: HttpClient, customizeRequest: HttpRequest => HttpRequest = identity, customEncodingHandler: EncodingHandler = PartialFunction.empty )(implicit s: Scheduler = Scheduler.global): SttpBackend[Task, Observable[ByteBuffer], WebSocketHandler] = HttpClientMonixBackend(client, closeClient = false, customizeRequest, customEncodingHandler)(s) def stub: SttpBackendStub[Task, Observable[ByteBuffer], WebSocketHandler] = SttpBackendStub(TaskMonadAsyncError) }
Example 199
Source File: package.scala From sttp with Apache License 2.0 | 5 votes |
package sttp.client.impl import sttp.client.testing.ConvertToFuture import _root_.scalaz.concurrent.Task import _root_.scalaz.{-\/, \/-} import scala.concurrent.{Future, Promise} import scala.util.{Failure, Success} package object scalaz { val convertScalazTaskToFuture: ConvertToFuture[Task] = new ConvertToFuture[Task] { // from https://github.com/Verizon/delorean override def toFuture[T](value: Task[T]): Future[T] = { val p = Promise[T]() value.unsafePerformAsync { case \/-(a) => p.complete(Success(a)); () case -\/(t) => p.complete(Failure(t)); () } p.future } } }
Example 200
Source File: package.scala From sttp with Apache License 2.0 | 5 votes |
package sttp.client.impl import _root_.zio._ import _root_.zio.blocking.Blocking import sttp.client.testing.ConvertToFuture import scala.concurrent.{Future, Promise} import scala.util.{Failure, Success} package object zio { val runtime: Runtime[ZEnv] = Runtime.default type BlockingTask[A] = ZIO[Blocking, Throwable, A] val convertZioTaskToFuture: ConvertToFuture[Task] = new ConvertToFuture[Task] { override def toFuture[T](value: Task[T]): Future[T] = { val p = Promise[T]() runtime.unsafeRunSync(value) match { case Exit.Failure(c) => p.complete( Failure( c.failures.headOption.orElse(c.defects.headOption).getOrElse(new RuntimeException(s"Unknown cause: $c")) ) ) case Exit.Success(v) => p.complete(Success(v)) } p.future } } val convertZioBlockingTaskToFuture: ConvertToFuture[BlockingTask] = new ConvertToFuture[BlockingTask] { override def toFuture[T](value: BlockingTask[T]): Future[T] = { val p = Promise[T]() runtime.unsafeRunSync(value) match { case Exit.Failure(c) => p.complete( Failure( c.failures.headOption.orElse(c.defects.headOption).getOrElse(new RuntimeException(s"Unknown cause: $c")) ) ) case Exit.Success(v) => p.complete(Success(v)) } p.future } } }