scala.concurrent.ExecutionContext Scala Examples
The following examples show how to use scala.concurrent.ExecutionContext.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: NaptimeModuleTest.scala From naptime with Apache License 2.0 | 8 votes |
package org.coursera.naptime import java.util.Date import javax.inject.Inject import akka.stream.Materializer import com.google.inject.Guice import com.google.inject.Stage import com.linkedin.data.schema.DataSchema import com.linkedin.data.schema.DataSchemaUtil import com.linkedin.data.schema.PrimitiveDataSchema import com.linkedin.data.schema.RecordDataSchema import org.coursera.naptime.model.KeyFormat import org.coursera.naptime.resources.TopLevelCollectionResource import org.coursera.naptime.router2.NaptimeRoutes import org.junit.Test import org.mockito.Mockito.mock import org.scalatest.junit.AssertionsForJUnit import play.api.libs.json.Json import play.api.libs.json.OFormat import scala.concurrent.ExecutionContext object NaptimeModuleTest { case class User(name: String, createdAt: Date) object User { implicit val oFormat: OFormat[User] = Json.format[User] } class MyResource(implicit val executionContext: ExecutionContext, val materializer: Materializer) extends TopLevelCollectionResource[String, User] { override implicit def resourceFormat: OFormat[User] = User.oFormat override def keyFormat: KeyFormat[KeyType] = KeyFormat.stringKeyFormat override def resourceName: String = "myResource" implicit val fields = Fields def get(id: String) = Nap.get(ctx => ???) } object MyFakeModule extends NaptimeModule { override def configure(): Unit = { bindResource[MyResource] bind[MyResource].toInstance(mock(classOf[MyResource])) bindSchemaType[Date](DataSchemaUtil.dataSchemaTypeToPrimitiveDataSchema(DataSchema.Type.LONG)) } } class OverrideTypesHelper @Inject()(val schemaOverrideTypes: NaptimeModule.SchemaTypeOverrides) } class NaptimeModuleTest extends AssertionsForJUnit { import NaptimeModuleTest._ @Test def checkInferredOverrides(): Unit = { val injector = Guice.createInjector(Stage.DEVELOPMENT, MyFakeModule, NaptimeModule) val overrides = injector.getInstance(classOf[OverrideTypesHelper]) assert(overrides.schemaOverrideTypes.size === 1) assert(overrides.schemaOverrideTypes.contains("java.util.Date")) } @Test def checkComputedOverrides(): Unit = { val injector = Guice.createInjector(Stage.DEVELOPMENT, MyFakeModule, NaptimeModule) val overrides = injector.getInstance(classOf[OverrideTypesHelper]) val routes = injector.getInstance(classOf[NaptimeRoutes]) assert(1 === routes.routerBuilders.size) val routerBuilder = routes.routerBuilders.head val inferredSchemaKeyed = routerBuilder.types.find(_.key == "org.coursera.naptime.NaptimeModuleTest.User").get assert(inferredSchemaKeyed.value.isInstanceOf[RecordDataSchema]) val userSchema = inferredSchemaKeyed.value.asInstanceOf[RecordDataSchema] assert(2 === userSchema.getFields.size()) val initialCreatedAtSchema = userSchema.getField("createdAt").getType.getDereferencedDataSchema assert(initialCreatedAtSchema.isInstanceOf[RecordDataSchema]) assert( initialCreatedAtSchema .asInstanceOf[RecordDataSchema] .getDoc .contains("Unable to infer schema")) SchemaUtils.fixupInferredSchemas(userSchema, overrides.schemaOverrideTypes) val fixedCreatedAtSchema = userSchema.getField("createdAt").getType.getDereferencedDataSchema assert(fixedCreatedAtSchema.isInstanceOf[PrimitiveDataSchema]) } }
Example 2
Source File: ScalajHttpClient.scala From telegram with Apache License 2.0 | 6 votes |
package com.bot4s.telegram.clients import java.net.Proxy import java.nio.file.Files import cats.instances.future._ import com.bot4s.telegram.api.RequestHandler import com.bot4s.telegram.methods.{Request, JsonRequest, MultipartRequest, Response} import com.bot4s.telegram.models.InputFile import com.bot4s.telegram.marshalling import io.circe.parser.parse import io.circe.{Decoder, Encoder} import scalaj.http.{Http, MultiPart} import slogging.StrictLogging import scala.concurrent.{ExecutionContext, Future, blocking} class ScalajHttpClient(token: String, proxy: Proxy = Proxy.NO_PROXY, telegramHost: String = "api.telegram.org") (implicit ec: ExecutionContext) extends RequestHandler[Future] with StrictLogging { val connectionTimeoutMs = 10000 val readTimeoutMs = 50000 private val apiBaseUrl = s"https://$telegramHost/bot$token/" def sendRequest[R, T <: Request[_]](request: T)(implicit encT: Encoder[T], decR: Decoder[R]): Future[R] = { val url = apiBaseUrl + request.methodName val scalajRequest = request match { case r: JsonRequest[_] => Http(url) .postData(marshalling.toJson(request)) .header("Content-Type", "application/json") case r: MultipartRequest[_] => // InputFile.FileIds are encoded as query params. val (fileIds, files) = r.getFiles.partition { case (key, _: InputFile.FileId) => true case _ => false } val parts = files.map { case (camelKey, inputFile) => val key = marshalling.snakenize(camelKey) inputFile match { case InputFile.FileId(id) => throw new RuntimeException("InputFile.FileId cannot must be encoded as a query param") case InputFile.Contents(filename, contents) => MultiPart(key, filename, "application/octet-stream", contents) case InputFile.Path(path) => MultiPart(key, path.getFileName.toString(), "application/octet-stream", Files.newInputStream(path), Files.size(path), _ => ()) case other => throw new RuntimeException(s"InputFile $other not supported") } } val fields = parse(marshalling.toJson(request)).fold(throw _, _.asObject.map { _.toMap.mapValues { json => json.asString.getOrElse(marshalling.printer.pretty(json)) } }) val fileIdsParams = fileIds.map { case (key, inputFile: InputFile.FileId) => marshalling.snakenize(key) -> inputFile.fileId } val params = fields.getOrElse(Map()) Http(url).params(params ++ fileIdsParams).postMulti(parts: _*) } import marshalling.responseDecoder Future { blocking { scalajRequest .timeout(connectionTimeoutMs, readTimeoutMs) .proxy(proxy) .asString } } map { x => if (x.isSuccess) marshalling.fromJson[Response[R]](x.body) else throw new RuntimeException(s"Error ${x.code} on request") } map (processApiResponse[R]) } }
Example 3
Source File: RateController.scala From drizzle-spark with Apache License 2.0 | 6 votes |
package org.apache.spark.streaming.scheduler import java.io.ObjectInputStream import java.util.concurrent.atomic.AtomicLong import scala.concurrent.{ExecutionContext, Future} import org.apache.spark.SparkConf import org.apache.spark.streaming.scheduler.rate.RateEstimator import org.apache.spark.util.{ThreadUtils, Utils} private def computeAndPublish(time: Long, elems: Long, workDelay: Long, waitDelay: Long): Unit = Future[Unit] { val newRate = rateEstimator.compute(time, elems, workDelay, waitDelay) newRate.foreach { s => rateLimit.set(s.toLong) publish(getLatestRate()) } } def getLatestRate(): Long = rateLimit.get() override def onBatchCompleted(batchCompleted: StreamingListenerBatchCompleted) { val elements = batchCompleted.batchInfo.streamIdToInputInfo for { processingEnd <- batchCompleted.batchInfo.processingEndTime workDelay <- batchCompleted.batchInfo.processingDelay waitDelay <- batchCompleted.batchInfo.schedulingDelay elems <- elements.get(streamUID).map(_.numRecords) } computeAndPublish(processingEnd, elems, workDelay, waitDelay) } } object RateController { def isBackPressureEnabled(conf: SparkConf): Boolean = conf.getBoolean("spark.streaming.backpressure.enabled", false) }
Example 4
Source File: PreferencesFrontendService.scala From pertax-frontend with Apache License 2.0 | 6 votes |
package services import com.kenshoo.play.metrics.Metrics import config.ConfigDecorator import controllers.auth.requests.UserRequest import com.google.inject.{Inject, Singleton} import metrics.HasMetrics import models.{ActivatePaperlessActivatedResponse, ActivatePaperlessNotAllowedResponse, ActivatePaperlessRequiresUserActionResponse, ActivatePaperlessResponse} import play.api.Mode.Mode import play.api.i18n.{I18nSupport, Messages, MessagesApi} import play.api.libs.json.{JsObject, Json} import play.api.{Configuration, Environment, Logger} import uk.gov.hmrc.play.bootstrap.http.DefaultHttpClient import uk.gov.hmrc.crypto.PlainText import uk.gov.hmrc.play.bootstrap.config.ServicesConfig import uk.gov.hmrc.play.bootstrap.filters.frontend.crypto.SessionCookieCrypto import uk.gov.hmrc.play.partials.HeaderCarrierForPartialsConverter import util.Tools import scala.concurrent.{ExecutionContext, Future} @Singleton class PreferencesFrontendService @Inject()( environment: Environment, runModeConfiguration: Configuration, val simpleHttp: DefaultHttpClient, val messagesApi: MessagesApi, val metrics: Metrics, val configDecorator: ConfigDecorator, val sessionCookieCrypto: SessionCookieCrypto, val tools: Tools, servicesConfig: ServicesConfig)(implicit ec: ExecutionContext) extends HeaderCarrierForPartialsConverter with HasMetrics with I18nSupport { val mode: Mode = environment.mode val preferencesFrontendUrl = servicesConfig.baseUrl("preferences-frontend") override def crypto: String => String = cookie => cookie def getPaperlessPreference()(implicit request: UserRequest[_]): Future[ActivatePaperlessResponse] = { def absoluteUrl = configDecorator.pertaxFrontendHost + request.uri def activatePaperless: Future[ActivatePaperlessResponse] = withMetricsTimer("get-activate-paperless") { timer => val url = s"$preferencesFrontendUrl/paperless/activate?returnUrl=${tools.encryptAndEncode(absoluteUrl)}&returnLinkText=${tools .encryptAndEncode(Messages("label.continue"))}" //TODO remove ref to Messages simpleHttp.PUT[JsObject, ActivatePaperlessResponse](url, Json.obj("active" -> true)) map { case ActivatePaperlessActivatedResponse => timer.completeTimerAndIncrementSuccessCounter() ActivatePaperlessActivatedResponse case response: ActivatePaperlessRequiresUserActionResponse => timer.completeTimerAndIncrementSuccessCounter() response case ActivatePaperlessNotAllowedResponse => timer.completeTimerAndIncrementFailedCounter() ActivatePaperlessNotAllowedResponse } recover { case e => timer.completeTimerAndIncrementFailedCounter() Logger.warn("Error getting paperless preference record from preferences-frontend-service", e) ActivatePaperlessNotAllowedResponse } } if (request.isGovernmentGateway) { activatePaperless } else { Future.successful(ActivatePaperlessNotAllowedResponse) } } }
Example 5
Source File: SessionAuditor.scala From pertax-frontend with Apache License 2.0 | 6 votes |
package controllers.auth import com.google.inject.Inject import controllers.auth.SessionAuditor._ import controllers.auth.requests.AuthenticatedRequest import play.api.Logger import play.api.libs.json.{Format, Json} import play.api.mvc.Result import uk.gov.hmrc.auth.core.retrieve.Credentials import uk.gov.hmrc.auth.core.{ConfidenceLevel, Enrolment} import uk.gov.hmrc.domain.{Nino, SaUtr} import uk.gov.hmrc.http.HeaderCarrier import uk.gov.hmrc.play.audit.http.connector.AuditConnector import uk.gov.hmrc.play.audit.http.connector.AuditResult.{Failure, Success} import uk.gov.hmrc.play.audit.model.ExtendedDataEvent import util.AuditServiceTools import scala.concurrent.{ExecutionContext, Future} private[auth] class SessionAuditor @Inject()(auditConnector: AuditConnector)(implicit ec: ExecutionContext) extends AuditTags { val logger = Logger(this.getClass) def auditOnce[A](request: AuthenticatedRequest[A], result: Result)(implicit hc: HeaderCarrier): Future[Result] = request.session.get(sessionKey) match { case None => logger.info(request.profile.toString) val eventDetail = UserSessionAuditEvent(request) val sendAuditEvent = auditConnector .sendExtendedEvent( ExtendedDataEvent( auditSource = AuditServiceTools.auditSource, auditType = auditType, detail = Json.toJson(eventDetail), tags = buildTags(request)) ) .recover { case e: Exception => Logger.warn(s"Unable to audit: ${e.getMessage}") Failure("UserSessionAuditor.auditOncePerSession exception occurred whilst auditing", Some(e)) } sendAuditEvent.map { case Success => result.addingToSession(sessionKey -> "true")(request) case _ => result } case _ => Future.successful(result) } } object SessionAuditor { val sessionKey = "sessionAudited" val auditType = "user-session-visit" case class UserSessionAuditEvent( nino: Option[Nino], credentials: Credentials, confidenceLevel: ConfidenceLevel, name: Option[String], saUtr: Option[SaUtr], allEnrolments: Set[Enrolment]) object UserSessionAuditEvent { def apply[A](request: AuthenticatedRequest[A]): UserSessionAuditEvent = { val nino = request.nino val credentials = request.credentials val confidenceLevel = request.confidenceLevel val name = request.name map (_.toString) val saUtr = request.saEnrolment map (_.saUtr) val enrolments = request.enrolments UserSessionAuditEvent(nino, credentials, confidenceLevel, name, saUtr, enrolments) } implicit val credentialsFormats = Json.format[Credentials] implicit val formats: Format[UserSessionAuditEvent] = Json.format[UserSessionAuditEvent] } }
Example 6
Source File: PersonalDetailsControllerSpec.scala From pertax-frontend with Apache License 2.0 | 6 votes |
package controllers.address import config.ConfigDecorator import controllers.auth.requests.UserRequest import controllers.auth.{AuthJourney, WithActiveTabAction} import controllers.controllershelpers.{AddressJourneyCachingHelper, PersonalDetailsCardGenerator} import models.AddressJourneyTTLModel import models.dto.AddressPageVisitedDto import org.mockito.ArgumentCaptor import org.mockito.Mockito.{times, verify, when} import org.mockito.Matchers.{eq => meq, _} import org.scalatestplus.mockito.MockitoSugar import play.api.http.Status.OK import play.api.libs.json.Json import play.api.mvc.{MessagesControllerComponents, Request, Result} import play.api.test.FakeRequest import repositories.EditAddressLockRepository import services.{LocalSessionCache, NinoDisplayService} import uk.gov.hmrc.http.cache.client.CacheMap import uk.gov.hmrc.play.audit.http.connector.{AuditConnector, AuditResult} import uk.gov.hmrc.play.audit.model.DataEvent import uk.gov.hmrc.renderer.TemplateRenderer import util.UserRequestFixture.buildUserRequest import util.{ActionBuilderFixture, BaseSpec, Fixtures, LocalPartialRetriever} import views.html.interstitial.DisplayAddressInterstitialView import views.html.personaldetails.{AddressAlreadyUpdatedView, CannotUseServiceView, PersonalDetailsView} import scala.concurrent.{ExecutionContext, Future} class PersonalDetailsControllerSpec extends AddressBaseSpec { val ninoDisplayService = mock[NinoDisplayService] trait LocalSetup extends AddressControllerSetup { when(ninoDisplayService.getNino(any(), any())).thenReturn { Future.successful(Some(Fixtures.fakeNino)) } def currentRequest[A]: Request[A] = FakeRequest().asInstanceOf[Request[A]] def controller = new PersonalDetailsController( injected[PersonalDetailsCardGenerator], mockEditAddressLockRepository, ninoDisplayService, mockAuthJourney, addressJourneyCachingHelper, withActiveTabAction, mockAuditConnector, cc, displayAddressInterstitialView, injected[PersonalDetailsView] ) {} "Calling AddressController.onPageLoad" should { "call citizenDetailsService.fakePersonDetails and return 200" in new LocalSetup { override def sessionCacheResponse: Option[CacheMap] = Some(CacheMap("id", Map("addressPageVisitedDto" -> Json.toJson(AddressPageVisitedDto(true))))) val result = controller.onPageLoad()(FakeRequest()) status(result) shouldBe OK verify(mockLocalSessionCache, times(1)) .cache(meq("addressPageVisitedDto"), meq(AddressPageVisitedDto(true)))(any(), any(), any()) verify(mockEditAddressLockRepository, times(1)).get(any()) } "send an audit event when user arrives on personal details page" in new LocalSetup { override def sessionCacheResponse: Option[CacheMap] = Some(CacheMap("id", Map("addressPageVisitedDto" -> Json.toJson(AddressPageVisitedDto(true))))) val result = controller.onPageLoad()(FakeRequest()) val eventCaptor = ArgumentCaptor.forClass(classOf[DataEvent]) status(result) shouldBe OK verify(mockAuditConnector, times(1)).sendEvent(eventCaptor.capture())(any(), any()) } } } }
Example 7
Source File: TimeBoundObserver.scala From daml with Apache License 2.0 | 6 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.testing import com.daml.timer.Delayed import io.grpc.Context import io.grpc.stub.StreamObserver import scala.concurrent.duration.FiniteDuration import scala.concurrent.{ExecutionContext, Future, Promise} final class TimeBoundObserver[T](duration: FiniteDuration)( implicit executionContext: ExecutionContext) extends StreamObserver[T] { private val promise = Promise[Vector[T]] private val buffer = Vector.newBuilder[T] Delayed.by(duration)(onCompleted()) def result: Future[Vector[T]] = promise.future override def onNext(value: T): Unit = { buffer += value } override def onError(t: Throwable): Unit = { val _ = promise.tryFailure(t) } override def onCompleted(): Unit = { val _succeeded = promise.trySuccess(buffer.result()) val _cancelled = Context.current().withCancellation().cancel(null) } }
Example 8
Source File: MainWithEphemeralDirectory.scala From daml with Apache License 2.0 | 6 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.on.sql import java.nio.file.Files import akka.stream.Materializer import com.daml.ledger.participant.state.kvutils.app.{ Config, LedgerFactory, ParticipantConfig, ReadWriteService, Runner } import com.daml.lf.engine.Engine import com.daml.logging.LoggingContext import com.daml.resources.{ProgramResource, Resource, ResourceOwner} import scopt.OptionParser import scala.concurrent.ExecutionContext object MainWithEphemeralDirectory { private val DirectoryPattern = "%DIR" def main(args: Array[String]): Unit = { new ProgramResource(new Runner("SQL Ledger", TestLedgerFactory).owner(args)).run() } object TestLedgerFactory extends LedgerFactory[ReadWriteService, ExtraConfig] { override val defaultExtraConfig: ExtraConfig = SqlLedgerFactory.defaultExtraConfig override def extraConfigParser(parser: OptionParser[Config[ExtraConfig]]): Unit = SqlLedgerFactory.extraConfigParser(parser) override def manipulateConfig(config: Config[ExtraConfig]): Config[ExtraConfig] = SqlLedgerFactory.manipulateConfig(config) override def readWriteServiceOwner( config: Config[ExtraConfig], participantConfig: ParticipantConfig, engine: Engine, )( implicit materializer: Materializer, logCtx: LoggingContext ): ResourceOwner[ReadWriteService] = new Owner(config, participantConfig, engine) class Owner( config: Config[ExtraConfig], participantConfig: ParticipantConfig, engine: Engine, )(implicit materializer: Materializer, logCtx: LoggingContext) extends ResourceOwner[ReadWriteService] { override def acquire()( implicit executionContext: ExecutionContext ): Resource[ReadWriteService] = { val directory = Files.createTempDirectory("ledger-on-sql-ephemeral-") val jdbcUrl = config.extra.jdbcUrl.map(_.replace(DirectoryPattern, directory.toString)) SqlLedgerFactory .readWriteServiceOwner( config.copy(extra = config.extra.copy(jdbcUrl = jdbcUrl)), participantConfig, engine, ) .acquire() } } } }
Example 9
Source File: GrpcServerOwner.scala From daml with Apache License 2.0 | 6 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.apiserver import java.io.IOException import java.net.{BindException, InetAddress, InetSocketAddress} import java.util.concurrent.TimeUnit.SECONDS import com.daml.metrics.Metrics import com.daml.platform.apiserver.GrpcServerOwner._ import com.daml.ports.Port import com.daml.resources.{Resource, ResourceOwner} import com.google.protobuf.Message import io.grpc.netty.NettyServerBuilder import io.grpc._ import io.netty.channel.socket.nio.NioServerSocketChannel import io.netty.handler.ssl.SslContext import scala.concurrent.{ExecutionContext, Future} import scala.util.control.NoStackTrace final class GrpcServerOwner( address: Option[String], desiredPort: Port, maxInboundMessageSize: Int, sslContext: Option[SslContext] = None, interceptors: List[ServerInterceptor] = List.empty, metrics: Metrics, eventLoopGroups: ServerEventLoopGroups, services: Iterable[BindableService], ) extends ResourceOwner[Server] { override def acquire()(implicit executionContext: ExecutionContext): Resource[Server] = { val host = address.map(InetAddress.getByName).getOrElse(InetAddress.getLoopbackAddress) Resource(Future { val builder = NettyServerBuilder.forAddress(new InetSocketAddress(host, desiredPort.value)) builder.sslContext(sslContext.orNull) builder.channelType(classOf[NioServerSocketChannel]) builder.permitKeepAliveTime(10, SECONDS) builder.permitKeepAliveWithoutCalls(true) builder.directExecutor() builder.maxInboundMessageSize(maxInboundMessageSize) interceptors.foreach(builder.intercept) builder.intercept(new MetricsInterceptor(metrics)) eventLoopGroups.populate(builder) services.foreach { service => builder.addService(service) toLegacyService(service).foreach(builder.addService) } val server = builder.build() try { server.start() } catch { case e: IOException if e.getCause != null && e.getCause.isInstanceOf[BindException] => throw new UnableToBind(desiredPort, e.getCause) } server })(server => Future(server.shutdown().awaitTermination())) } // This exposes the existing services under com.daml also under com.digitalasset. // This is necessary to allow applications built with an earlier version of the SDK // to still work. // The "proxy" services will not show up on the reflection service, because of the way it // processes service definitions via protobuf file descriptors. private def toLegacyService(service: BindableService): Option[ServerServiceDefinition] = { val `com.daml` = "com.daml" val `com.digitalasset` = "com.digitalasset" val damlDef = service.bindService() val damlDesc = damlDef.getServiceDescriptor // Only add "proxy" services if it actually contains com.daml in the service name. // There are other services registered like the reflection service, that doesn't need the special treatment. if (damlDesc.getName.contains(`com.daml`)) { val digitalassetName = damlDesc.getName.replace(`com.daml`, `com.digitalasset`) val digitalassetDef = ServerServiceDefinition.builder(digitalassetName) damlDef.getMethods.forEach { methodDef => val damlMethodDesc = methodDef.getMethodDescriptor val digitalassetMethodName = damlMethodDesc.getFullMethodName.replace(`com.daml`, `com.digitalasset`) val digitalassetMethodDesc = damlMethodDesc.toBuilder.setFullMethodName(digitalassetMethodName).build() val _ = digitalassetDef.addMethod( digitalassetMethodDesc.asInstanceOf[MethodDescriptor[Message, Message]], methodDef.getServerCallHandler.asInstanceOf[ServerCallHandler[Message, Message]] ) } Option(digitalassetDef.build()) } else None } } object GrpcServerOwner { final class UnableToBind(port: Port, cause: Throwable) extends RuntimeException( s"The API server was unable to bind to port $port. Terminate the process occupying the port, or choose a different one.", cause) with NoStackTrace }
Example 10
Source File: JScheduledExecutorServiceWrapper.scala From gfc-concurrent with Apache License 2.0 | 5 votes |
package com.gilt.gfc.concurrent import java.util.concurrent.{TimeUnit, ScheduledFuture, Delayed, Callable, ScheduledExecutorService => JScheduledExecutorService} import scala.concurrent.{Future, ExecutionContext} import scala.concurrent.duration.{Duration, FiniteDuration} trait JScheduledExecutorServiceWrapper extends JExecutorServiceWrapper with AsyncScheduledExecutorService { override def executorService: JScheduledExecutorService override def scheduleWithFixedDelay(r: Runnable, initialDelay: Long, delay: Long, timeUnit: TimeUnit): ScheduledFuture[_] = executorService.scheduleWithFixedDelay(r, initialDelay, delay, timeUnit) override def scheduleAtFixedRate(r: Runnable, initialDelay: Long, period: Long, timeUnit: TimeUnit): ScheduledFuture[_] = executorService.scheduleAtFixedRate(r, initialDelay, period, timeUnit) override def schedule[V](c: Callable[V], delay: Long, timeUnit: TimeUnit): ScheduledFuture[V] = executorService.schedule(c, delay, timeUnit) override def schedule(r: Runnable, delay: Long, timeUnit: TimeUnit): ScheduledFuture[_] = executorService.schedule(r, delay, timeUnit) override def scheduleWithFixedDelay(initialDelay: FiniteDuration, delay: FiniteDuration)(f: => Unit): ScheduledFuture[_] = { scheduleWithFixedDelay(initialDelay.toMillis, delay.toMillis, TimeUnit.MILLISECONDS)(f) } override def scheduleWithFixedDelay(initialDelay: Long, delay: Long, timeUnit: TimeUnit)(f: => Unit): ScheduledFuture[_] = { scheduleWithFixedDelay(asRunnable(f), initialDelay, delay, timeUnit) } override def scheduleAtFixedRate(initialDelay: FiniteDuration, period: FiniteDuration)(f: => Unit): ScheduledFuture[_] = { scheduleAtFixedRate(initialDelay.toMillis, period.toMillis, TimeUnit.MILLISECONDS)(f) } override def scheduleAtFixedRate(initialDelay: Long, period: Long, timeUnit: TimeUnit)(f: => Unit): ScheduledFuture[_] = { scheduleAtFixedRate(asRunnable(f), initialDelay, period, timeUnit) } override def schedule[V](delay: FiniteDuration)(f: => V): ScheduledFuture[V] = { schedule(delay.toMillis, TimeUnit.MILLISECONDS)(f) } override def schedule[V](delay: Long, timeUnit: TimeUnit)(f: => V): ScheduledFuture[V] = { schedule(asCallable(f), delay, timeUnit) } override def asyncSchedule(initialDelay: FiniteDuration, delayUntilNext: FiniteDuration => FiniteDuration) (futureTask: => Future[_]) (implicit executor: ExecutionContext): ScheduledFuture[_] = { val wrapper: ScheduledFutureWrapper[Unit] = new ScheduledFutureWrapper() def doSchedule(delay: FiniteDuration): Unit = { if (!wrapper.isCancelled) { delay.max(Duration.Zero) val future: ScheduledFuture[Unit] = schedule(delay.max(Duration.Zero)) { val start = System.currentTimeMillis() try { futureTask.onComplete { _ => // Task complete: Schedule again doSchedule(delayUntilNext(FiniteDuration(System.currentTimeMillis() - start, TimeUnit.MILLISECONDS))) } } catch { case e: Throwable => // Exception in futureTask(): Schedule again doSchedule(delayUntilNext(FiniteDuration(System.currentTimeMillis() - start, TimeUnit.MILLISECONDS))) throw e } } // store future in wrapper so that it can be cancelled wrapper.set(future) } } doSchedule(initialDelay) wrapper } private class ScheduledFutureWrapper[V] extends ScheduledFuture[V] { @volatile private var delegate: ScheduledFuture[V] = _ @volatile private var cancelled: Boolean = false def set(future: ScheduledFuture[V]): Unit = this.synchronized { if (!cancelled) { delegate = future } else { future.cancel(true) } } override def getDelay(p1: TimeUnit): Long = delegate.getDelay(p1) override def isCancelled: Boolean = cancelled override def get(): V = delegate.get override def get(p1: Long, p2: TimeUnit): V = delegate.get(p1, p2) override def cancel(p1: Boolean): Boolean = this.synchronized { cancelled = true delegate.cancel(p1) } override def isDone: Boolean = cancelled && delegate.isDone override def compareTo(p1: Delayed): Int = delegate.compareTo(p1) } }
Example 11
Source File: BasicShabondiTest.scala From ohara with Apache License 2.0 | 5 votes |
package oharastream.ohara.shabondi import java.util import java.util.concurrent.{ExecutorService, Executors} import com.google.common.util.concurrent.ThreadFactoryBuilder import com.typesafe.scalalogging.Logger import oharastream.ohara.common.data.Row import oharastream.ohara.common.setting.TopicKey import oharastream.ohara.common.util.{CommonUtils, Releasable} import oharastream.ohara.kafka.TopicAdmin import oharastream.ohara.shabondi.common.ShabondiUtils import oharastream.ohara.shabondi.sink.SinkConfig import oharastream.ohara.shabondi.source.SourceConfig import oharastream.ohara.testing.WithBroker import org.junit.After import scala.collection.{immutable, mutable} import scala.concurrent.{ExecutionContext, Future} import scala.jdk.CollectionConverters._ private[shabondi] abstract class BasicShabondiTest extends WithBroker { protected val log = Logger(this.getClass()) protected val brokerProps = testUtil.brokersConnProps protected val topicAdmin: TopicAdmin = TopicAdmin.of(brokerProps) protected val newThreadPool: () => ExecutorService = () => Executors.newCachedThreadPool(new ThreadFactoryBuilder().setNameFormat(this.getClass.getSimpleName + "-").build()) protected val countRows: (util.Queue[Row], Long, ExecutionContext) => Future[Long] = (queue, executionTime, ec) => Future { log.debug("countRows begin...") val baseTime = System.currentTimeMillis() var count = 0L var running = true while (running) { val row = queue.poll() if (row != null) count += 1 else Thread.sleep(100) running = (System.currentTimeMillis() - baseTime) < executionTime } log.debug("countRows done") count }(ec) protected def createTopicKey = TopicKey.of("default", CommonUtils.randomString(5)) protected def createTestTopic(topicKey: TopicKey): Unit = topicAdmin.topicCreator .numberOfPartitions(1) .numberOfReplications(1.toShort) .topicKey(topicKey) .create protected def defaultSourceConfig( sourceToTopics: Seq[TopicKey] = Seq.empty[TopicKey] ): SourceConfig = { import ShabondiDefinitions._ val args = mutable.ArrayBuffer( GROUP_DEFINITION.key + "=" + CommonUtils.randomString(5), NAME_DEFINITION.key + "=" + CommonUtils.randomString(3), SHABONDI_CLASS_DEFINITION.key + "=" + classOf[ShabondiSource].getName, CLIENT_PORT_DEFINITION.key + "=8080", BROKERS_DEFINITION.key + "=" + testUtil.brokersConnProps ) if (sourceToTopics.nonEmpty) args += s"${SOURCE_TO_TOPICS_DEFINITION.key}=${TopicKey.toJsonString(sourceToTopics.asJava)}" val rawConfig = ShabondiUtils.parseArgs(args.toArray) new SourceConfig(rawConfig) } protected def defaultSinkConfig( sinkFromTopics: Seq[TopicKey] = Seq.empty[TopicKey] ): SinkConfig = { import ShabondiDefinitions._ val args = mutable.ArrayBuffer( GROUP_DEFINITION.key + "=" + CommonUtils.randomString(5), NAME_DEFINITION.key + "=" + CommonUtils.randomString(3), SHABONDI_CLASS_DEFINITION.key + "=" + classOf[ShabondiSink].getName, CLIENT_PORT_DEFINITION.key + "=8080", BROKERS_DEFINITION.key + "=" + testUtil.brokersConnProps ) if (sinkFromTopics.nonEmpty) args += s"${SINK_FROM_TOPICS_DEFINITION.key}=${TopicKey.toJsonString(sinkFromTopics.asJava)}" val rawConfig = ShabondiUtils.parseArgs(args.toArray) new SinkConfig(rawConfig) } protected def singleRow(columnSize: Int, rowId: Int = 0): Row = KafkaSupport.singleRow(columnSize, rowId) protected def multipleRows(rowSize: Int): immutable.Iterable[Row] = KafkaSupport.multipleRows(rowSize) @After def tearDown(): Unit = { Releasable.close(topicAdmin) } }
Example 12
Source File: ServiceCollieImpl.scala From ohara with Apache License 2.0 | 5 votes |
package oharastream.ohara.agent.docker import java.util.concurrent.ExecutorService import oharastream.ohara.agent.{ClusterKind, ClusterStatus, _} import oharastream.ohara.client.configurator.ContainerApi.ContainerInfo import oharastream.ohara.client.configurator.NodeApi.Node import oharastream.ohara.common.setting.ObjectKey import oharastream.ohara.common.util.Releasable import scala.concurrent.duration.Duration import scala.concurrent.{Await, ExecutionContext, Future} // accessible to configurator private[ohara] class ServiceCollieImpl(cacheTimeout: Duration, dataCollie: DataCollie, cacheThreadPool: ExecutorService) extends ServiceCollie { override val containerClient: DockerClient = DockerClient(dataCollie) private[this] val clusterCache: ServiceCache = ServiceCache.builder .frequency(cacheTimeout) // TODO: 5 * timeout is enough ??? by chia .supplier(() => Await.result(doClusters(ExecutionContext.fromExecutor(cacheThreadPool)), cacheTimeout * 5)) // Giving some time to process to complete the build and then we can remove it from cache safety. .lazyRemove(cacheTimeout) .build() override val zookeeperCollie: ZookeeperCollie = new BasicCollieImpl(dataCollie, containerClient, clusterCache) with ZookeeperCollie override val brokerCollie: BrokerCollie = new BasicCollieImpl(dataCollie, containerClient, clusterCache) with BrokerCollie override val workerCollie: WorkerCollie = new BasicCollieImpl(dataCollie, containerClient, clusterCache) with WorkerCollie override val streamCollie: StreamCollie = new BasicCollieImpl(dataCollie, containerClient, clusterCache) with StreamCollie override val shabondiCollie: ShabondiCollie = new BasicCollieImpl(dataCollie, containerClient, clusterCache) with ShabondiCollie private[this] def doClusters( implicit executionContext: ExecutionContext ): Future[Seq[ClusterStatus]] = containerClient .containers() .map { allContainers => def parse( kind: ClusterKind, toClusterStatus: (ObjectKey, Seq[ContainerInfo]) => ClusterStatus ): Seq[ClusterStatus] = allContainers .filter(container => Collie.matched(container.name, kind)) .map(container => Collie.objectKeyOfContainerName(container.name) -> container) .groupBy(_._1) .map { case (clusterKey, value) => clusterKey -> value.map(_._2) } .map { case (clusterKey, containers) => toClusterStatus(clusterKey, containers) } .toSeq parse(ClusterKind.ZOOKEEPER, zookeeperCollie.toStatus) ++ parse(ClusterKind.BROKER, brokerCollie.toStatus) ++ parse(ClusterKind.WORKER, workerCollie.toStatus) ++ parse(ClusterKind.STREAM, streamCollie.toStatus) ++ parse(ClusterKind.SHABONDI, shabondiCollie.toStatus) } override def close(): Unit = { Releasable.close(containerClient) Releasable.close(clusterCache) Releasable.close(() => cacheThreadPool.shutdownNow()) } override def verifyNode(node: Node)(implicit executionContext: ExecutionContext): Future[String] = containerClient .resources() .map { resources => if (resources.getOrElse(node.hostname, Seq.empty).nonEmpty) s"succeed to check the docker resources on ${node.name}" else throw new IllegalStateException(s"the docker on ${node.hostname} is unavailable") } }
Example 13
Source File: FakeK8SClient.scala From ohara with Apache License 2.0 | 5 votes |
package oharastream.ohara.agent.fake import oharastream.ohara.agent.container.{ContainerClient, ContainerName, ContainerVolume} import oharastream.ohara.agent.k8s._ import oharastream.ohara.client.configurator.ContainerApi.ContainerInfo import oharastream.ohara.client.configurator.{BrokerApi, NodeApi, WorkerApi, ZookeeperApi} import scala.concurrent.{ExecutionContext, Future} class FakeK8SClient(isK8SNode: Boolean, k8sStatusInfo: Option[K8SStatusInfo], containerName: String) extends K8SClient { override def coordinatorUrl: String = "fake url" override def metricsUrl: Option[String] = None override def imageNames()(implicit executionContext: ExecutionContext): Future[Map[String, Seq[String]]] = Future.successful { Map("node1" -> Seq(ZookeeperApi.IMAGE_NAME_DEFAULT, BrokerApi.IMAGE_NAME_DEFAULT, WorkerApi.IMAGE_NAME_DEFAULT)) } override def checkNode(nodeName: String)(implicit executionContext: ExecutionContext): Future[Report] = Future.successful( Report(nodeName, isK8SNode, k8sStatusInfo) ) override def containers()(implicit executionContext: ExecutionContext): Future[Seq[ContainerInfo]] = Future.successful { Seq( ContainerInfo( nodeName = "node1", id = "0000", imageName = "fakeimage", state = "running", kind = "unknow", name = containerName, size = -1, portMappings = Seq.empty, environments = Map.empty, hostname = "host1" ) ) } override def remove(name: String)(implicit executionContext: ExecutionContext): Future[Unit] = Future.failed(new UnsupportedOperationException("FakeK8SClient does not support remove function")) override def log(name: String, sinceSeconds: Option[Long])( implicit executionContext: ExecutionContext ): Future[Map[ContainerName, String]] = containerNames(name).map(_.map(n => n -> s"fake k8s log for $name").toMap) override def nodeNameIPInfo()(implicit executionContext: ExecutionContext): Future[Seq[K8SJson.HostAliases]] = Future.successful(Seq.empty) override def containerCreator: K8SClient.ContainerCreator = throw new UnsupportedOperationException("FakeK8SClient does not support containerCreator function") override def forceRemove(name: String)(implicit executionContext: ExecutionContext): Future[Unit] = Future.failed(new UnsupportedOperationException("FakeK8SClient does not support force remove function")) override def nodes()(implicit executionContext: ExecutionContext): Future[Seq[K8SNodeReport]] = throw new UnsupportedOperationException("FakeK8SClient does not support force nodes function") override def resources()(implicit executionContext: ExecutionContext): Future[Map[String, Seq[NodeApi.Resource]]] = Future.successful(Map.empty) override def volumeCreator: ContainerClient.VolumeCreator = throw new UnsupportedOperationException("FakeK8SClient does not support volumeCreator function") override def volumes()( implicit executionContext: ExecutionContext ): Future[Seq[ContainerVolume]] = throw new UnsupportedOperationException("FakeK8SClient does not support volumes function") override def removeVolumes(name: String)(implicit executionContext: ExecutionContext): Future[Unit] = throw new UnsupportedOperationException("FakeK8SClient does not support removeVolume function") override def close(): Unit = { // do nothing } }
Example 14
Source File: K8SServiceCollieImpl.scala From ohara with Apache License 2.0 | 5 votes |
package oharastream.ohara.agent.k8s import oharastream.ohara.agent._ import oharastream.ohara.client.configurator.NodeApi.Node import scala.concurrent.{ExecutionContext, Future} // accessible to configurator private[ohara] class K8SServiceCollieImpl(dataCollie: DataCollie, val containerClient: K8SClient) extends ServiceCollie { override val zookeeperCollie: ZookeeperCollie = new K8SBasicCollieImpl(dataCollie, containerClient) with ZookeeperCollie override val brokerCollie: BrokerCollie = new K8SBasicCollieImpl(dataCollie, containerClient) with BrokerCollie override val workerCollie: WorkerCollie = new K8SBasicCollieImpl(dataCollie, containerClient) with WorkerCollie override val streamCollie: StreamCollie = new K8SBasicCollieImpl(dataCollie, containerClient) with StreamCollie override val shabondiCollie: ShabondiCollie = new K8SBasicCollieImpl(dataCollie, containerClient) with ShabondiCollie override def verifyNode(node: Node)(implicit executionContext: ExecutionContext): Future[String] = containerClient .checkNode(node.name) .map(report => { val statusInfo = report.statusInfo.getOrElse(K8SStatusInfo(false, s"${node.name} node doesn't exists.")) if (statusInfo.isHealth) s"${node.name} node is running." else throw new IllegalStateException(s"${node.name} node doesn't running container. cause: ${statusInfo.message}") }) override def close(): Unit = { // do nothing } }
Example 15
Source File: K8SBasicCollieImpl.scala From ohara with Apache License 2.0 | 5 votes |
package oharastream.ohara.agent.k8s import oharastream.ohara.agent.{ClusterStatus, Collie, DataCollie} import oharastream.ohara.client.configurator.ClusterState import oharastream.ohara.client.configurator.ContainerApi.ContainerInfo import oharastream.ohara.client.configurator.NodeApi.Node import oharastream.ohara.client.configurator.VolumeApi.Volume import scala.concurrent.{ExecutionContext, Future} private[this] abstract class K8SBasicCollieImpl(val dataCollie: DataCollie, val containerClient: K8SClient) extends Collie { override protected def doRemove(clusterInfo: ClusterStatus, beRemovedContainer: Seq[ContainerInfo])( implicit executionContext: ExecutionContext ): Future[Unit] = Future.sequence(beRemovedContainer.map(c => containerClient.remove(c.name))).map(_ => ()) override protected def doForceRemove(clusterInfo: ClusterStatus, containerInfos: Seq[ContainerInfo])( implicit executionContext: ExecutionContext ): Future[Unit] = Future.sequence(containerInfos.map(c => containerClient.forceRemove(c.name))).map(_ => ()) override def clusters()( implicit executionContext: ExecutionContext ): Future[Seq[ClusterStatus]] = containerClient .containers() .map(_.filter(container => Collie.matched(container.name, kind))) .map( _.map(container => Collie.objectKeyOfContainerName(container.name) -> container) .groupBy(_._1) .map { case (objectKey, value) => objectKey -> value.map(_._2) } .map { case (objectKey, containers) => toStatus(objectKey, containers) } .toSeq ) override protected def toClusterState(containers: Seq[ContainerInfo]): Option[ClusterState] = if (containers.isEmpty) None else { // we use a "pod" as a container of ohara cluster, so it is more easy to define a cluster state than docker // since a "pod" in k8s is actually an application with multiple containers... if (containers.exists(_.state == K8sContainerState.RUNNING.name)) Some(ClusterState.RUNNING) else if (containers.exists(_.state == K8sContainerState.FAILED.name)) Some(ClusterState.FAILED) else if (containers.exists(_.state == K8sContainerState.PENDING.name)) Some(ClusterState.PENDING) // All Containers in the Pod have terminated in success, BUT it is still failed :( else if (containers.exists(_.state == K8sContainerState.SUCCEEDED.name)) Some(ClusterState.FAILED) else Some(ClusterState.UNKNOWN) } //----------------------------[override helper methods]----------------------------// override protected def doCreator( executionContext: ExecutionContext, containerInfo: ContainerInfo, node: Node, route: Map[String, String], arguments: Seq[String], volumeMaps: Map[Volume, String] ): Future[Unit] = containerClient.containerCreator .imageName(containerInfo.imageName) .portMappings( containerInfo.portMappings.map(portMapping => portMapping.hostPort -> portMapping.containerPort).toMap ) .nodeName(containerInfo.nodeName) .hostname(containerInfo.hostname) .envs(containerInfo.environments) .name(containerInfo.name) .threadPool(executionContext) .arguments(arguments) .volumeMaps(volumeMaps) .create() override protected def postCreate( clusterStatus: ClusterStatus, existentNodes: Map[Node, ContainerInfo], routes: Map[String, String], volumeMaps: Map[Volume, String] )(implicit executionContext: ExecutionContext): Future[Unit] = Future.unit }
Example 16
Source File: TestContainerCreator.scala From ohara with Apache License 2.0 | 5 votes |
package oharastream.ohara.agent.docker import java.util.Objects import oharastream.ohara.client.configurator.VolumeApi.Volume import oharastream.ohara.common.rule.OharaTest import oharastream.ohara.common.util.CommonUtils import org.junit.Test import org.scalatest.matchers.should.Matchers._ import scala.concurrent.{ExecutionContext, Future} class TestContainerCreator extends OharaTest { private[this] def fake(): DockerClient.ContainerCreator = ( nodeName: String, hostname: String, imageName: String, volumeMaps: Map[Volume, String], name: String, command: Option[String], arguments: Seq[String], ports: Map[Int, Int], envs: Map[String, String], routes: Map[String, String], _: ExecutionContext ) => Future.successful { // we check only the required arguments CommonUtils.requireNonEmpty(nodeName) CommonUtils.requireNonEmpty(hostname) CommonUtils.requireNonEmpty(imageName) CommonUtils.requireNonEmpty(name) Objects.requireNonNull(command) Objects.requireNonNull(ports) Objects.requireNonNull(envs) Objects.requireNonNull(routes) Objects.requireNonNull(arguments) Objects.requireNonNull(volumeMaps) } @Test def nullHostname(): Unit = an[NullPointerException] should be thrownBy fake().hostname(null) @Test def emptyHostname(): Unit = an[IllegalArgumentException] should be thrownBy fake().hostname("") @Test def nullImageName(): Unit = an[NullPointerException] should be thrownBy fake().imageName(null) @Test def emptyImageName(): Unit = an[IllegalArgumentException] should be thrownBy fake().imageName("") @Test def nullName(): Unit = an[NullPointerException] should be thrownBy fake().name(null) @Test def emptyName(): Unit = an[IllegalArgumentException] should be thrownBy fake().name("") @Test def nullCommand(): Unit = an[NullPointerException] should be thrownBy fake().command(null) @Test def emptyCommand(): Unit = fake().command("") @Test def nullPorts(): Unit = an[NullPointerException] should be thrownBy fake().portMappings(null) @Test def emptyPorts(): Unit = fake().portMappings(Map.empty) @Test def nullEnvs(): Unit = an[NullPointerException] should be thrownBy fake().envs(null) @Test def emptyEnvs(): Unit = fake().envs(Map.empty) @Test def nullRoute(): Unit = an[NullPointerException] should be thrownBy fake().routes(null) @Test def emptyRoute(): Unit = fake().routes(Map.empty) @Test def nullArguments(): Unit = an[NullPointerException] should be thrownBy fake().arguments(null) @Test def emptyArguments(): Unit = fake().arguments(Seq.empty) @Test def testExecuteWithoutRequireArguments(): Unit = // At least assign imageName an[NullPointerException] should be thrownBy fake().create() }
Example 17
Source File: TestK8SServiceCollieImpl.scala From ohara with Apache License 2.0 | 5 votes |
package oharastream.ohara.agent.k8s import java.util.concurrent.TimeUnit import oharastream.ohara.agent.DataCollie import oharastream.ohara.agent.fake.FakeK8SClient import oharastream.ohara.client.configurator.NodeApi import oharastream.ohara.client.configurator.NodeApi.{Node, Resource} import oharastream.ohara.common.rule.OharaTest import org.junit.Test import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.{Await, ExecutionContext, Future} import org.scalatest.matchers.should.Matchers._ import scala.concurrent.duration.Duration class TestK8SServiceCollieImpl extends OharaTest { @Test def testResource(): Unit = { val nodeCache = (1 to 3).map(x => Node(s"node$x", "user", "password")) val dataCollie = DataCollie(nodeCache) val k8sClient = new FakeK8SClient(false, None, "container1") { override def resources()( implicit executionContext: ExecutionContext ): Future[Map[String, Seq[NodeApi.Resource]]] = Future.successful { Map( "node1" -> Seq(Resource.cpu(8, Option(2.0)), Resource.memory(1024 * 1024 * 1024 * 100, Option(5.0))), "node2" -> Seq(Resource.cpu(8, Option(1.0)), Resource.memory(1024 * 1024 * 1024 * 100, Option(5.0))), "node3" -> Seq(Resource.cpu(8, Option(3.0)), Resource.memory(1024 * 1024 * 1024 * 100, Option(5.0))) ) } } val k8sServiceCollieImpl = new K8SServiceCollieImpl(dataCollie, k8sClient) val resource = result(k8sServiceCollieImpl.resources()) resource.size shouldBe 3 val nodeNames = resource.keys.toSeq nodeNames(0) shouldBe "node1" nodeNames(1) shouldBe "node2" nodeNames(2) shouldBe "node3" val node1Resource: Seq[Resource] = resource.filter(x => x._1 == "node1").flatMap(x => x._2).toSeq node1Resource(0).name shouldBe "CPU" node1Resource(0).unit shouldBe "cores" node1Resource(0).used.get shouldBe 2.0 node1Resource(0).value shouldBe 8 node1Resource(1).name shouldBe "Memory" node1Resource(1).unit shouldBe "bytes" node1Resource(1).used.get shouldBe 5.0 node1Resource(1).value shouldBe 1024 * 1024 * 1024 * 100 } @Test def testEmptyResource(): Unit = { val nodeCache = (1 to 3).map(x => Node(s"node$x", "user", "password")) val dataCollie = DataCollie(nodeCache) val k8sClient = new FakeK8SClient(false, None, "container1") { override def resources()( implicit executionContext: ExecutionContext ): Future[Map[String, Seq[NodeApi.Resource]]] = Future.successful(Map.empty) } val k8sServiceCollieImpl = new K8SServiceCollieImpl(dataCollie, k8sClient) val resource = result(k8sServiceCollieImpl.resources()) resource.size shouldBe 0 } private[this] def result[T](future: Future[T]): T = Await.result(future, Duration(10, TimeUnit.SECONDS)) }
Example 18
Source File: FakeWorkerCollie.scala From ohara with Apache License 2.0 | 5 votes |
package oharastream.ohara.configurator.fake import java.util.concurrent.ConcurrentSkipListMap import oharastream.ohara.agent.container.ContainerClient import oharastream.ohara.agent.{ClusterKind, DataCollie, NoSuchClusterException, WorkerCollie} import oharastream.ohara.client.configurator.WorkerApi.WorkerClusterInfo import oharastream.ohara.client.kafka.ConnectorAdmin import oharastream.ohara.common.setting.ObjectKey import scala.jdk.CollectionConverters._ import scala.concurrent.{ExecutionContext, Future} private[configurator] class FakeWorkerCollie( val containerClient: ContainerClient, dataCollie: DataCollie, wkConnectionProps: String ) extends FakeCollie(dataCollie) with WorkerCollie { private[this] val fakeClientCache = new ConcurrentSkipListMap[WorkerClusterInfo, FakeConnectorAdmin]( (o1: WorkerClusterInfo, o2: WorkerClusterInfo) => o1.key.compareTo(o2.key) ) override def creator: WorkerCollie.ClusterCreator = (_, creation) => Future.successful( addCluster( key = creation.key, kind = ClusterKind.WORKER, nodeNames = creation.nodeNames ++ clusterCache.asScala .find(_._1 == creation.key) .map(_._2.nodeNames) .getOrElse(Set.empty), imageName = creation.imageName, ports = creation.ports ) ) override def connectorAdmin( cluster: WorkerClusterInfo )(implicit executionContext: ExecutionContext): Future[ConnectorAdmin] = if (wkConnectionProps != null) Future.successful( ConnectorAdmin.builder.workerClusterKey(ObjectKey.of("fake", "fake")).connectionProps(wkConnectionProps).build ) else if (clusterCache.keySet().asScala.contains(cluster.key)) { val fake = FakeConnectorAdmin() val r = fakeClientCache.putIfAbsent(cluster, fake) Future.successful(if (r == null) fake else r) } else Future.failed(new NoSuchClusterException(s"cluster:${cluster.key} is not running")) }
Example 19
Source File: FakeBrokerCollie.scala From ohara with Apache License 2.0 | 5 votes |
package oharastream.ohara.configurator.fake import java.util.concurrent.ConcurrentSkipListMap import oharastream.ohara.agent.container.ContainerClient import oharastream.ohara.agent.{BrokerCollie, ClusterKind, DataCollie, NoSuchClusterException} import oharastream.ohara.client.configurator.BrokerApi.BrokerClusterInfo import oharastream.ohara.common.annotations.VisibleForTesting import oharastream.ohara.kafka.TopicAdmin import scala.jdk.CollectionConverters._ import scala.concurrent.{ExecutionContext, Future} private[configurator] class FakeBrokerCollie( val containerClient: ContainerClient, dataCollie: DataCollie, bkConnectionProps: String ) extends FakeCollie(dataCollie) with BrokerCollie { @VisibleForTesting private[configurator] val fakeAdminCache = new ConcurrentSkipListMap[BrokerClusterInfo, FakeTopicAdmin]( (o1: BrokerClusterInfo, o2: BrokerClusterInfo) => o1.key.compareTo(o2.key) ) override def creator: BrokerCollie.ClusterCreator = (_, creation) => Future.successful( addCluster( key = creation.key, kind = ClusterKind.BROKER, nodeNames = creation.nodeNames ++ clusterCache.asScala .find(_._1 == creation.key) .map(_._2.nodeNames) .getOrElse(Set.empty), imageName = creation.imageName, ports = creation.ports ) ) override def topicAdmin( brokerClusterInfo: BrokerClusterInfo )(implicit executionContext: ExecutionContext): Future[TopicAdmin] = if (bkConnectionProps != null) Future.successful(TopicAdmin.of(bkConnectionProps)) else if (clusterCache.keySet().asScala.contains(brokerClusterInfo.key)) { val fake = new FakeTopicAdmin val r = fakeAdminCache.putIfAbsent(brokerClusterInfo, fake) Future.successful(if (r == null) fake else r) } else Future.failed(new NoSuchClusterException(s"cluster:${brokerClusterInfo.key} is not running")) }
Example 20
Source File: ObjectRoute.scala From ohara with Apache License 2.0 | 5 votes |
package oharastream.ohara.configurator.route import akka.http.scaladsl.server import oharastream.ohara.client.configurator.ObjectApi import oharastream.ohara.client.configurator.ObjectApi.{Creation, OBJECTS_PREFIX_PATH, ObjectInfo, Updating} import oharastream.ohara.common.setting.SettingDef import oharastream.ohara.common.util.CommonUtils import oharastream.ohara.configurator.store.DataStore import scala.annotation.nowarn import scala.concurrent.{ExecutionContext, Future} private[configurator] object ObjectRoute { private[this] def toObject(creation: Creation): Future[ObjectInfo] = Future.successful( ObjectInfo( creation.raw, // add the last timestamp manually since there is no explicit field in ObjectInfo CommonUtils.current() ) ) @nowarn("cat=deprecation") def apply(implicit store: DataStore, executionContext: ExecutionContext): server.Route = RouteBuilder[Creation, Updating, ObjectInfo]() .prefixOfPlural(OBJECTS_PREFIX_PATH) .prefixOfSingular(SettingDef.Reference.OBJECT.name().toLowerCase) .hookOfCreation(toObject) .hookOfUpdating( (key, updating, previousOption) => toObject(previousOption match { case None => new Creation(updating.raw) case Some(previous) => ObjectApi.access.request.key(key).settings(previous.settings).settings(updating.raw).creation }) ) .hookOfGet(Future.successful(_)) .hookOfList(Future.successful(_)) .hookBeforeDelete(_ => Future.unit) .build() }
Example 21
Source File: ValidationRoute.scala From ohara with Apache License 2.0 | 5 votes |
package oharastream.ohara.configurator.route import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._ import akka.http.scaladsl.model.{ContentTypes, _} import akka.http.scaladsl.server import akka.http.scaladsl.server.Directives.{as, complete, entity, path, pathPrefix, put, _} import oharastream.ohara.agent.WorkerCollie import oharastream.ohara.client.configurator.ConnectorApi import oharastream.ohara.client.configurator.ConnectorApi.Creation import oharastream.ohara.client.configurator.ValidationApi._ import oharastream.ohara.configurator.store.DataStore import scala.concurrent.ExecutionContext private[configurator] object ValidationRoute { def apply( implicit dataStore: DataStore, workerCollie: WorkerCollie, executionContext: ExecutionContext ): server.Route = pathPrefix(VALIDATION_KIND) { path(ConnectorApi.KIND) { put { entity(as[Creation])( req => complete( connectorAdmin(req.workerClusterKey) { (_, connectorAdmin) => connectorAdmin .connectorValidator() .settings(req.plain) .className(req.className) // the topic name is composed by group and name. However, the kafka topic is still a pure string. // Hence, we can't just push Ohara topic "key" to kafka topic "name". // The name of topic is a required for connector and hence we have to fill the filed when starting // connector. .topicKeys(req.topicKeys) // add the connector key manually since the arguments exposed to user is "group" and "name" than "key" .connectorKey(req.key) .run() }.map(settingInfo => HttpEntity(ContentTypes.`application/json`, settingInfo.toJsonString)) ) ) } } } }
Example 22
Source File: ContainerRoute.scala From ohara with Apache License 2.0 | 5 votes |
package oharastream.ohara.configurator.route import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._ import akka.http.scaladsl.server import akka.http.scaladsl.server.Directives._ import oharastream.ohara.agent.ServiceCollie import oharastream.ohara.client.configurator.ContainerApi import oharastream.ohara.client.configurator.ContainerApi._ import oharastream.ohara.common.setting.ObjectKey import spray.json.DefaultJsonProtocol._ import scala.annotation.nowarn import scala.concurrent.ExecutionContext object ContainerRoute { @nowarn("cat=deprecation") def apply(implicit serviceCollie: ServiceCollie, executionContext: ExecutionContext): server.Route = path((ContainerApi.CONTAINER_PREFIX_PATH | ContainerApi.KIND) / Segment)({ clusterName => parameter(GROUP_KEY ? GROUP_DEFAULT) { group => get { complete( serviceCollie .clusters() .map(_.filter(_.key == ObjectKey.of(group, clusterName)).map { cluster => ContainerGroup( clusterKey = ObjectKey.of(group, clusterName), clusterType = cluster.kind.toString.toLowerCase, containers = cluster.containers ) }) ) } } }) }
Example 23
Source File: VolumeRoute.scala From ohara with Apache License 2.0 | 5 votes |
package oharastream.ohara.configurator.route import akka.http.scaladsl.server import oharastream.ohara.client.configurator.VolumeApi import oharastream.ohara.client.configurator.VolumeApi.{Creation, Updating, Volume} import oharastream.ohara.common.util.CommonUtils import oharastream.ohara.configurator.store.DataStore import spray.json.DeserializationException import scala.concurrent.{ExecutionContext, Future} private[configurator] object VolumeRoute { private[this] def toVolume(creation: Creation): Future[Volume] = Future.successful( Volume( group = creation.group, name = creation.name, nodeNames = creation.nodeNames, path = creation.path, state = None, error = None, tags = creation.tags, lastModified = CommonUtils.current() ) ) def apply(implicit store: DataStore, executionContext: ExecutionContext): server.Route = RouteBuilder[Creation, Updating, Volume]() .prefixOfPlural("volumes") .prefixOfSingular(VolumeApi.KIND) .hookOfCreation(toVolume) .hookOfUpdating( (key, updating, previousOption) => toVolume(previousOption match { case None => if (updating.nodeNames.isEmpty) throw DeserializationException("nodeNames is required", fieldNames = List("nodeNames")) if (updating.path.isEmpty) throw DeserializationException("path is required", fieldNames = List("path")) Creation( group = key.group(), name = key.name(), nodeNames = updating.nodeNames.get, path = updating.path.get, tags = updating.tags.getOrElse(Map.empty) ) case Some(previous) => Creation( group = key.group(), name = key.name(), nodeNames = updating.nodeNames.getOrElse(previous.nodeNames), path = updating.path.getOrElse(previous.path), tags = updating.tags.getOrElse(previous.tags) ) }) ) .hookOfGet(Future.successful(_)) .hookOfList(Future.successful(_)) .hookBeforeDelete(_ => Future.unit) .build() }
Example 24
Source File: TestConfiguratorMain.scala From ohara with Apache License 2.0 | 5 votes |
package oharastream.ohara.configurator import java.util.concurrent.{Executors, TimeUnit} import oharastream.ohara.common.rule.OharaTest import oharastream.ohara.common.util.{CommonUtils, Releasable} import oharastream.ohara.configurator.Configurator.Mode import org.junit.{After, Test} import org.scalatest.matchers.should.Matchers._ import scala.concurrent.{ExecutionContext, Future} class TestConfiguratorMain extends OharaTest { @Test def illegalK8sUrl(): Unit = intercept[IllegalArgumentException] { Configurator.main(Array[String](Configurator.K8S_KEY, s"http://localhost:${CommonUtils.availablePort()}")) }.getMessage should include("unable to access") @Test def emptyK8sArgument(): Unit = an[IllegalArgumentException] should be thrownBy Configurator.main(Array[String](Configurator.K8S_KEY, "")) @Test def nullK8sArgument(): Unit = an[IllegalArgumentException] should be thrownBy Configurator.main(Array[String](Configurator.K8S_KEY)) @Test def fakeWithK8s(): Unit = an[IllegalArgumentException] should be thrownBy Configurator.main( Array[String](Configurator.K8S_KEY, "http://localhost", Configurator.FAKE_KEY, "true") ) @Test def k8sWithFake(): Unit = an[IllegalArgumentException] should be thrownBy Configurator.main( Array[String](Configurator.FAKE_KEY, "true", Configurator.K8S_KEY, "http://localhost") ) @Test def testFakeMode(): Unit = runMain( Array[String](Configurator.HOSTNAME_KEY, "localhost", Configurator.PORT_KEY, "0", Configurator.FAKE_KEY, "true"), configurator => configurator.mode shouldBe Mode.FAKE ) @Test def testDockerMode(): Unit = runMain( Array[String](Configurator.HOSTNAME_KEY, "localhost", Configurator.PORT_KEY, "0"), configurator => configurator.mode shouldBe Mode.DOCKER ) private[this] def runMain(args: Array[String], action: Configurator => Unit): Unit = { Configurator.GLOBAL_CONFIGURATOR_SHOULD_CLOSE = false val service = ExecutionContext.fromExecutorService(Executors.newSingleThreadExecutor()) Future[Unit](Configurator.main(args))(service) import java.time.Duration try { CommonUtils.await(() => Configurator.GLOBAL_CONFIGURATOR_RUNNING, Duration.ofSeconds(30)) action(Configurator.GLOBAL_CONFIGURATOR) } finally { Configurator.GLOBAL_CONFIGURATOR_SHOULD_CLOSE = true service.shutdownNow() service.awaitTermination(60, TimeUnit.SECONDS) } } @After def tearDown(): Unit = { Configurator.GLOBAL_CONFIGURATOR_SHOULD_CLOSE = false Releasable.close(Configurator.GLOBAL_CONFIGURATOR) Configurator.GLOBAL_CONFIGURATOR == null } }
Example 25
Source File: TestConcurrentAccess.scala From ohara with Apache License 2.0 | 5 votes |
package oharastream.ohara.configurator import java.util.concurrent.atomic.AtomicInteger import java.util.concurrent.{Executors, TimeUnit} import oharastream.ohara.client.configurator.NodeApi import oharastream.ohara.common.rule.OharaTest import oharastream.ohara.common.util.{CommonUtils, Releasable} import org.junit.{After, Test} import org.scalatest.matchers.should.Matchers._ import scala.concurrent.duration.Duration import scala.concurrent.{Await, ExecutionContext, ExecutionContextExecutor, Future} class TestConcurrentAccess extends OharaTest { private[this] val configurator = Configurator.builder.fake().build() private[this] val nodeApi = NodeApi.access.hostname(configurator.hostname).port(configurator.port) private[this] def result[T](f: Future[T]): T = Await.result(f, Duration(10, TimeUnit.SECONDS)) @Test def deletedObjectShouldDisappearFromGet(): Unit = { val threadCount = 10 val threadsPool = Executors.newFixedThreadPool(threadCount) val unmatchedCount = new AtomicInteger() implicit val executionContext: ExecutionContextExecutor = ExecutionContext.fromExecutor(threadsPool) (0 until threadCount).foreach { _ => threadsPool.execute { () => val nodeName = CommonUtils.randomString(10) val nodes = result( nodeApi.request .nodeName(nodeName) .user(CommonUtils.randomString(10)) .password(CommonUtils.randomString(10)) .create() .flatMap(node => nodeApi.delete(node.key)) .flatMap(_ => nodeApi.list()) ) if (nodes.exists(_.hostname == nodeName)) unmatchedCount.incrementAndGet() } } threadsPool.shutdown() threadsPool.awaitTermination(60, TimeUnit.SECONDS) shouldBe true unmatchedCount.get() shouldBe 0 } @After def tearDown(): Unit = Releasable.close(configurator) }
Example 26
Source File: ValidationApi.scala From ohara with Apache License 2.0 | 5 votes |
package oharastream.ohara.client.configurator import oharastream.ohara.common.util.CommonUtils import oharastream.ohara.kafka.connector.json.SettingInfo import spray.json.{RootJsonFormat, _} import scala.concurrent.{ExecutionContext, Future} object ValidationApi { val VALIDATION_KIND: String = "validate" private[this] implicit val SETTING_INFO_FORMAT: RootJsonFormat[SettingInfo] = new RootJsonFormat[SettingInfo] { override def write(obj: SettingInfo): JsValue = obj.toJsonString.parseJson override def read(json: JsValue): SettingInfo = SettingInfo.ofJson(json.toString()) } def connectorRequest: ConnectorRequest } def access: Access = new Access(VALIDATION_KIND) { override def connectorRequest: ConnectorRequest = new ConnectorRequest { override def verify()(implicit executionContext: ExecutionContext): Future[SettingInfo] = exec.put[oharastream.ohara.client.configurator.ConnectorApi.Creation, SettingInfo, ErrorApi.Error]( s"$url/${ConnectorApi.KIND}", creation ) } } }
Example 27
Source File: BasicAccess.scala From ohara with Apache License 2.0 | 5 votes |
package oharastream.ohara.client.configurator import java.util.Objects import oharastream.ohara.client.HttpExecutor import oharastream.ohara.client.configurator.BasicAccess.UrlBuilder import oharastream.ohara.common.setting.ObjectKey import oharastream.ohara.common.util.CommonUtils import scala.concurrent.{ExecutionContext, Future} protected final def url: String = s"http://${CommonUtils.requireNonEmpty(hostname)}:${CommonUtils .requireConnectionPort(port)}/${CommonUtils.requireNonEmpty(version)}/${CommonUtils.requireNonEmpty(prefixPath)}" protected def urlBuilder: UrlBuilder = (prefix, key, postfix, params) => { var url = BasicAccess.this.url prefix.foreach(s => url = s"$url/$s") key.foreach(k => url = s"$url/${k.name()}") postfix.foreach(s => url = s"$url/$s") key.foreach(k => url = s"$url?$GROUP_KEY=${k.group()}") val divider = key match { case None => "?" case Some(_) => "&" } if (params.nonEmpty) url = url + divider + params .map { case (key, value) => s"$key=$value" } .mkString("&") url } } object BasicAccess { trait UrlBuilder extends oharastream.ohara.common.pattern.Builder[String] { private[this] var prefix: Option[String] = None private[this] var key: Option[ObjectKey] = None private[this] var postfix: Option[String] = None private[this] var params: Map[String, String] = Map.empty def prefix(prefix: String): UrlBuilder = { this.prefix = Some(CommonUtils.requireNonEmpty(prefix)) this } def key(key: ObjectKey): UrlBuilder = { this.key = Some(key) this } def postfix(postfix: String): UrlBuilder = { this.postfix = Some(CommonUtils.requireNonEmpty(postfix)) this } def param(key: String, value: String): UrlBuilder = { this.params += (key -> value) this } def params(params: Map[String, String]): UrlBuilder = { this.params ++= Objects.requireNonNull(params) this } override def build(): String = doBuild( prefix = prefix, key = key, postfix = postfix, params = params ) protected def doBuild( prefix: Option[String], key: Option[ObjectKey], postfix: Option[String], params: Map[String, String] ): String } }
Example 28
Source File: ContainerApi.scala From ohara with Apache License 2.0 | 5 votes |
package oharastream.ohara.client.configurator import oharastream.ohara.common.setting.ObjectKey import spray.json.DefaultJsonProtocol._ import spray.json.RootJsonFormat import scala.concurrent.{ExecutionContext, Future} object ContainerApi { val KIND: String = "container" @deprecated(message = s"replaced by $KIND", since = "0.11.0") val CONTAINER_PREFIX_PATH: String = "containers" final case class PortMapping(hostIp: String, hostPort: Int, containerPort: Int) implicit val PORT_MAPPING_FORMAT: RootJsonFormat[PortMapping] = jsonFormat3(PortMapping) final case class ContainerInfo( nodeName: String, id: String, imageName: String, state: String, kind: String, name: String, size: Long, portMappings: Seq[PortMapping], environments: Map[String, String], hostname: String ) implicit val CONTAINER_INFO_FORMAT: RootJsonFormat[ContainerInfo] = jsonFormat10(ContainerInfo) final case class ContainerGroup(clusterKey: ObjectKey, clusterType: String, containers: Seq[ContainerInfo]) implicit val CONTAINER_GROUP_FORMAT: RootJsonFormat[ContainerGroup] = jsonFormat3(ContainerGroup) class Access private[configurator] extends BasicAccess(KIND) { def get(key: ObjectKey)(implicit executionContext: ExecutionContext): Future[Seq[ContainerGroup]] = exec.get[Seq[ContainerGroup], ErrorApi.Error](urlBuilder.key(key).build()) } def access: Access = new Access }
Example 29
Source File: TaxCalculationService.scala From pertax-frontend with Apache License 2.0 | 5 votes |
package services import com.kenshoo.play.metrics.Metrics import com.google.inject.{Inject, Singleton} import metrics._ import models.{TaxCalculation, TaxYearReconciliation} import play.api.Mode.Mode import play.api.http.Status._ import play.api.{Configuration, Environment, Logger} import services.http.SimpleHttp import uk.gov.hmrc.domain.Nino import uk.gov.hmrc.http.{HeaderCarrier, HttpResponse} import uk.gov.hmrc.play.bootstrap.config.ServicesConfig import uk.gov.hmrc.play.bootstrap.http.HttpClient import scala.concurrent.{ExecutionContext, Future} import scala.util.control.NonFatal sealed trait TaxCalculationResponse case class TaxCalculationSuccessResponse(taxCalculation: TaxCalculation) extends TaxCalculationResponse case object TaxCalculationNotFoundResponse extends TaxCalculationResponse case class TaxCalculationUnexpectedResponse(r: HttpResponse) extends TaxCalculationResponse case class TaxCalculationErrorResponse(cause: Exception) extends TaxCalculationResponse @Singleton class TaxCalculationService @Inject()( environment: Environment, configuration: Configuration, val simpleHttp: SimpleHttp, val metrics: Metrics, val http: HttpClient, servicesConfig: ServicesConfig)(implicit ec: ExecutionContext) extends HasMetrics { val mode: Mode = environment.mode val runModeConfiguration: Configuration = configuration lazy val taxCalcUrl = servicesConfig.baseUrl("taxcalc") def getTaxCalculation(nino: Nino, year: Int)(implicit hc: HeaderCarrier): Future[TaxCalculationResponse] = withMetricsTimer("get-taxcalc-summary") { t => simpleHttp.get[TaxCalculationResponse](s"$taxCalcUrl/taxcalc/$nino/taxSummary/$year")( onComplete = { case r if r.status >= 200 && r.status < 300 => Logger.debug(r.body) t.completeTimerAndIncrementSuccessCounter() TaxCalculationSuccessResponse(r.json.as[TaxCalculation]) case r if r.status == NOT_FOUND => Logger.debug(r.body) t.completeTimerAndIncrementSuccessCounter() TaxCalculationNotFoundResponse case r => Logger.debug(r.body) t.completeTimerAndIncrementFailedCounter() Logger.debug(s"Unexpected ${r.status} response getting tax calculation from tax-calculation-service") TaxCalculationUnexpectedResponse(r) }, onError = { e => Logger.debug(e.toString) t.completeTimerAndIncrementFailedCounter() Logger.warn("Error getting tax calculation from tax-calculation-service", e) TaxCalculationErrorResponse(e) } ) } def getTaxYearReconciliations(nino: Nino)( implicit headerCarrier: HeaderCarrier): Future[List[TaxYearReconciliation]] = http .GET[List[TaxYearReconciliation]](s"$taxCalcUrl/taxcalc/$nino/reconciliations") .recover { case NonFatal(e) => Logger.debug(s"An exception was thrown by taxcalc reconciliations: ${e.getMessage}") Nil } }
Example 30
Source File: EnrolmentStoreCachingService.scala From pertax-frontend with Apache License 2.0 | 5 votes |
package services import com.google.inject.Inject import connectors.EnrolmentsConnector import models.{NonFilerSelfAssessmentUser, NotEnrolledSelfAssessmentUser, SelfAssessmentUserType, WrongCredentialsSelfAssessmentUser} import play.api.Logger import uk.gov.hmrc.domain.SaUtr import uk.gov.hmrc.http.HeaderCarrier import scala.concurrent.{ExecutionContext, Future} class EnrolmentStoreCachingService @Inject()( val sessionCache: LocalSessionCache, enrolmentsConnector: EnrolmentsConnector) { private def addSaUserTypeToCache( user: SelfAssessmentUserType)(implicit hc: HeaderCarrier, ec: ExecutionContext): Future[SelfAssessmentUserType] = sessionCache.cache[SelfAssessmentUserType](SelfAssessmentUserType.cacheId, user).map(_ => user) def getSaUserTypeFromCache( saUtr: SaUtr)(implicit hc: HeaderCarrier, ec: ExecutionContext): Future[SelfAssessmentUserType] = sessionCache.fetchAndGetEntry[SelfAssessmentUserType](SelfAssessmentUserType.cacheId).flatMap { case Some(user) => Future.successful(user) case _ => enrolmentsConnector .getUserIdsWithEnrolments(saUtr.utr) .flatMap[SelfAssessmentUserType]( (response: Either[String, Seq[String]]) => response.fold( error => { Logger.warn(error) addSaUserTypeToCache(NonFilerSelfAssessmentUser) }, ids => if (ids.nonEmpty) { addSaUserTypeToCache(WrongCredentialsSelfAssessmentUser(saUtr)) } else { addSaUserTypeToCache(NotEnrolledSelfAssessmentUser(saUtr)) } ) ) } }
Example 31
Source File: SimpleHttp.scala From pertax-frontend with Apache License 2.0 | 5 votes |
package services.http import com.google.inject.{Inject, Singleton} import play.api.libs.json.Writes import uk.gov.hmrc.http.{HeaderCarrier, HttpReads, HttpResponse} import uk.gov.hmrc.play.bootstrap.http.HttpClient import scala.concurrent.{ExecutionContext, Future} @Singleton class SimpleHttp @Inject()(http: HttpClient)(implicit executionContext: ExecutionContext) { implicit val r = new HttpReads[HttpResponse] { override def read(method: String, url: String, response: HttpResponse): HttpResponse = response } def get[T](url: String)(onComplete: HttpResponse => T, onError: Exception => T)( implicit hc: HeaderCarrier): Future[T] = http.GET[HttpResponse](url) map { response => onComplete(response) } recover { case e: Exception => onError(e) } def post[I, T](url: String, body: I)(onComplete: HttpResponse => T, onError: Exception => T)( implicit hc: HeaderCarrier, w: Writes[I]): Future[T] = http.POST[I, HttpResponse](url, body) map { response => onComplete(response) } recover { case e: Exception => onError(e) } def put[I, T](url: String, body: I)(onComplete: HttpResponse => T, onError: Exception => T)( implicit hc: HeaderCarrier, w: Writes[I]): Future[T] = http.PUT[I, HttpResponse](url, body) map { response => onComplete(response) } recover { case e: Exception => onError(e) } }
Example 32
Source File: SaPartialService.scala From pertax-frontend with Apache License 2.0 | 5 votes |
package services.partials import com.google.inject.{Inject, Singleton} import com.kenshoo.play.metrics.Metrics import config.ConfigDecorator import metrics.HasMetrics import play.api.Mode.Mode import play.api.mvc.RequestHeader import play.api.{Configuration, Environment} import uk.gov.hmrc.play.bootstrap.config.ServicesConfig import uk.gov.hmrc.play.bootstrap.filters.frontend.crypto.SessionCookieCrypto import uk.gov.hmrc.play.bootstrap.http.HttpClient import uk.gov.hmrc.play.partials.HtmlPartial import util.{EnhancedPartialRetriever, Tools} import scala.concurrent.{ExecutionContext, Future} @Singleton class SaPartialService @Inject()( environment: Environment, runModeConfiguration: Configuration, override val http: HttpClient, val metrics: Metrics, val configDecorator: ConfigDecorator, sessionCookieCrypto: SessionCookieCrypto, val tools: Tools, servicesConfig: ServicesConfig)(implicit executionContext: ExecutionContext) extends EnhancedPartialRetriever(sessionCookieCrypto) with HasMetrics { val mode: Mode = environment.mode private val returnUrl = configDecorator.pertaxFrontendHomeUrl private val returnLinkText = configDecorator.saPartialReturnLinkText def getSaAccountSummary(implicit request: RequestHeader): Future[HtmlPartial] = loadPartial( configDecorator.businessTaxAccountService + s"/business-account/partial/sa/account-summary?returnUrl=${tools .urlEncode(returnUrl)}&returnLinkText=${tools.urlEncode(returnLinkText)}") }
Example 33
Source File: MessageFrontendService.scala From pertax-frontend with Apache License 2.0 | 5 votes |
package services.partials import com.google.inject.{Inject, Singleton} import com.kenshoo.play.metrics.Metrics import metrics.HasMetrics import models.MessageCount import play.api.Mode.Mode import play.api.mvc.RequestHeader import play.api.{Configuration, Environment, Logger} import uk.gov.hmrc.http.HeaderCarrier import uk.gov.hmrc.play.bootstrap.config.ServicesConfig import uk.gov.hmrc.play.bootstrap.filters.frontend.crypto.SessionCookieCrypto import uk.gov.hmrc.play.bootstrap.http.HttpClient import uk.gov.hmrc.play.partials.HtmlPartial import util.EnhancedPartialRetriever import scala.concurrent.{ExecutionContext, Future} @Singleton class MessageFrontendService @Inject()( environment: Environment, runModeConfiguration: Configuration, override val http: HttpClient, val metrics: Metrics, val sessionCookieCrypto: SessionCookieCrypto, servicesConfig: ServicesConfig)(implicit executionContext: ExecutionContext) extends EnhancedPartialRetriever(sessionCookieCrypto) with HasMetrics { val mode: Mode = environment.mode lazy val messageFrontendUrl: String = servicesConfig.baseUrl("message-frontend") def getMessageListPartial(implicit request: RequestHeader): Future[HtmlPartial] = loadPartial(messageFrontendUrl + "/messages") def getMessageDetailPartial(messageToken: String)(implicit request: RequestHeader): Future[HtmlPartial] = loadPartial(messageFrontendUrl + "/messages/" + messageToken) def getMessageInboxLinkPartial(implicit request: RequestHeader): Future[HtmlPartial] = loadPartial( messageFrontendUrl + "/messages/inbox-link?messagesInboxUrl=" + controllers.routes.MessageController .messageList()) def getUnreadMessageCount(implicit request: RequestHeader): Future[Option[Int]] = loadJson(messageFrontendUrl + "/messages/count?read=No").map(_.map(_.count)) private def loadJson(url: String)(implicit hc: HeaderCarrier): Future[Option[MessageCount]] = withMetricsTimer("load-json") { t => http.GET[Option[MessageCount]](url) recover { case e => t.completeTimerAndIncrementFailedCounter() Logger.warn(s"Failed to load json", e) None } } }
Example 34
Source File: PreferencesFrontendPartialService.scala From pertax-frontend with Apache License 2.0 | 5 votes |
package services.partials import com.google.inject.{Inject, Singleton} import com.kenshoo.play.metrics.Metrics import metrics.HasMetrics import play.api.Mode.Mode import play.api.mvc.RequestHeader import play.api.{Configuration, Environment} import uk.gov.hmrc.play.bootstrap.config.ServicesConfig import uk.gov.hmrc.play.bootstrap.filters.frontend.crypto.SessionCookieCrypto import uk.gov.hmrc.play.bootstrap.http.HttpClient import uk.gov.hmrc.play.partials.HtmlPartial import util.{EnhancedPartialRetriever, Tools} import scala.concurrent.{ExecutionContext, Future} @Singleton class PreferencesFrontendPartialService @Inject()( environment: Environment, runModeConfiguration: Configuration, val http: HttpClient, val metrics: Metrics, sessionCookieCrypto: SessionCookieCrypto, val tools: Tools, servicesConfig: ServicesConfig)(implicit executionContext: ExecutionContext) extends EnhancedPartialRetriever(sessionCookieCrypto) with HasMetrics { val mode: Mode = environment.mode val preferencesFrontendUrl = servicesConfig.baseUrl("preferences-frontend") def getManagePreferencesPartial(returnUrl: String, returnLinkText: String)( implicit request: RequestHeader): Future[HtmlPartial] = loadPartial(s"$preferencesFrontendUrl/paperless/manage?returnUrl=${tools .encryptAndEncode(returnUrl)}&returnLinkText=${tools.encryptAndEncode(returnLinkText)}") }
Example 35
Source File: FormPartialService.scala From pertax-frontend with Apache License 2.0 | 5 votes |
package services.partials import com.google.inject.{Inject, Singleton} import com.kenshoo.play.metrics.Metrics import config.ConfigDecorator import metrics.HasMetrics import play.api.Mode.Mode import play.api.mvc.RequestHeader import play.api.{Configuration, Environment} import uk.gov.hmrc.play.bootstrap.config.ServicesConfig import uk.gov.hmrc.play.bootstrap.filters.frontend.crypto.SessionCookieCrypto import uk.gov.hmrc.play.bootstrap.http.HttpClient import uk.gov.hmrc.play.partials.HtmlPartial import util.EnhancedPartialRetriever import scala.concurrent.{ExecutionContext, Future} @Singleton class FormPartialService @Inject()( environment: Environment, runModeConfiguration: Configuration, override val http: HttpClient, val metrics: Metrics, val configDecorator: ConfigDecorator, sessionCookieCrypto: SessionCookieCrypto, servicesConfig: ServicesConfig)(implicit executionContext: ExecutionContext) extends EnhancedPartialRetriever(sessionCookieCrypto) with HasMetrics { val mode: Mode = environment.mode def getNationalInsurancePartial(implicit request: RequestHeader): Future[HtmlPartial] = loadPartial(configDecorator.nationalInsuranceFormPartialLinkUrl) def getSelfAssessmentPartial(implicit request: RequestHeader): Future[HtmlPartial] = loadPartial(configDecorator.selfAssessmentFormPartialLinkUrl) }
Example 36
Source File: AddressMovedService.scala From pertax-frontend with Apache License 2.0 | 5 votes |
package services import com.google.inject.Inject import models.{AddressChanged, AnyOtherMove, MovedFromScotland, MovedToScotland} import uk.gov.hmrc.http.HeaderCarrier import scala.concurrent.{ExecutionContext, Future} class AddressMovedService @Inject()(addressLookupService: AddressLookupService) { def moved(fromAddressId: String, toAddressId: String)( implicit hc: HeaderCarrier, ec: ExecutionContext): Future[AddressChanged] = withAddressExists(fromAddressId, toAddressId) { for { fromResponse <- addressLookupService.lookup(fromAddressId) toResponse <- addressLookupService.lookup(toAddressId) } yield { (fromResponse, toResponse) match { case (AddressLookupSuccessResponse(fromRecordSet), AddressLookupSuccessResponse(toRecordSet)) => val fromSubdivision = fromRecordSet.addresses.headOption.flatMap(_.address.subdivision) val toSubdivision = toRecordSet.addresses.headOption.flatMap(_.address.subdivision) if (hasMovedFromScotland(fromSubdivision, toSubdivision)) MovedFromScotland else if (hasMovedToScotland(fromSubdivision, toSubdivision)) MovedToScotland else AnyOtherMove case _ => AnyOtherMove } } } def toMessageKey(addressChanged: AddressChanged): Option[String] = addressChanged match { case MovedFromScotland => Some("label.moved_from_scotland") case MovedToScotland => Some("label.moved_to_scotland") case AnyOtherMove => None } private val scottishSubdivision = "GB-SCT" private def hasMovedFromScotland(fromSubdivision: Option[String], toSubdivision: Option[String]): Boolean = fromSubdivision.contains(scottishSubdivision) && !toSubdivision.contains(scottishSubdivision) private def hasMovedToScotland(fromSubdivision: Option[String], toSubdivision: Option[String]): Boolean = !fromSubdivision.contains(scottishSubdivision) && toSubdivision.contains(scottishSubdivision) private def withAddressExists(fromAddressId: String, toAddressId: String)( f: => Future[AddressChanged]): Future[AddressChanged] = if (fromAddressId.trim.isEmpty || toAddressId.trim.isEmpty) Future.successful(AnyOtherMove) else f }
Example 37
Source File: NinoDisplayService.scala From pertax-frontend with Apache License 2.0 | 5 votes |
package services import com.google.inject.{Inject, Singleton} import config.ConfigDecorator import controllers.auth.requests.UserRequest import uk.gov.hmrc.domain.Nino import uk.gov.hmrc.http.HeaderCarrier import scala.concurrent.{ExecutionContext, Future} @Singleton class NinoDisplayService @Inject()(configDecorator: ConfigDecorator, citizenDetailsService: CitizenDetailsService)( implicit ec: ExecutionContext) { def getNino(implicit request: UserRequest[_], hc: HeaderCarrier): Future[Option[Nino]] = if (configDecorator.getNinoFromCID) { request.nino match { case Some(nino) => for { result <- citizenDetailsService.personDetails(nino) } yield { result match { case PersonDetailsSuccessResponse(personDetails) => personDetails.person.nino case _ => None } } case _ => Future.successful(None) } } else { Future.successful(request.nino) } }
Example 38
Source File: MessageController.scala From pertax-frontend with Apache License 2.0 | 5 votes |
package controllers import com.google.inject.Inject import config.ConfigDecorator import controllers.auth._ import error.RendersErrors import models.Breadcrumb import play.api.i18n.Messages import play.api.mvc.{Action, AnyContent, MessagesControllerComponents} import play.twirl.api.Html import services.partials.MessageFrontendService import uk.gov.hmrc.play.partials.HtmlPartial import uk.gov.hmrc.renderer.{ActiveTabMessages, TemplateRenderer} import util.LocalPartialRetriever import views.html.message.{MessageDetailView, MessageInboxView} import scala.concurrent.ExecutionContext class MessageController @Inject()( val messageFrontendService: MessageFrontendService, authJourney: AuthJourney, withActiveTabAction: WithActiveTabAction, withBreadcrumbAction: WithBreadcrumbAction, cc: MessagesControllerComponents, messageInboxView: MessageInboxView, messageDetailView: MessageDetailView)( implicit val partialRetriever: LocalPartialRetriever, val configDecorator: ConfigDecorator, val templateRenderer: TemplateRenderer, ec: ExecutionContext) extends PertaxBaseController(cc) with RendersErrors { def messageBreadcrumb: Breadcrumb = "label.all_messages" -> routes.MessageController.messageList().url :: baseBreadcrumb def messageList: Action[AnyContent] = (authJourney.authWithPersonalDetails andThen withActiveTabAction.addActiveTab(ActiveTabMessages) andThen withBreadcrumbAction .addBreadcrumb(baseBreadcrumb)).async { implicit request => messageFrontendService.getMessageListPartial map { p => Ok( messageInboxView( messageListPartial = p successfulContentOrElse Html( Messages("label.sorry_theres_been_a_technical_problem_retrieving_your_messages"))) ) } } def messageDetail(messageToken: String): Action[AnyContent] = (authJourney.authWithPersonalDetails andThen withActiveTabAction.addActiveTab(ActiveTabMessages) andThen withBreadcrumbAction .addBreadcrumb(messageBreadcrumb)).async { implicit request => messageFrontendService.getMessageDetailPartial(messageToken).map { case HtmlPartial.Success(Some(title), content) => Ok(messageDetailView(message = content, title = title)) case HtmlPartial.Success(None, content) => Ok(messageDetailView(message = content, title = Messages("label.message"))) case HtmlPartial.Failure(_, _) => Ok( messageDetailView( message = Html(Messages("label.sorry_theres_been_a_techinal_problem_retrieving_your_message")), title = Messages("label.message") ) ) } } }
Example 39
Source File: PaperlessPreferencesController.scala From pertax-frontend with Apache License 2.0 | 5 votes |
package controllers import com.google.inject.Inject import config.ConfigDecorator import controllers.auth._ import controllers.auth.requests.UserRequest import play.api.Mode.Mode import play.api.i18n.{Messages, MessagesApi} import play.api.mvc.{Action, AnyContent, MessagesControllerComponents} import services.partials.PreferencesFrontendPartialService import play.api.mvc.{Action, AnyContent} import play.api.{Configuration, Environment} import uk.gov.hmrc.renderer.{ActiveTabMessages, TemplateRenderer} import util.{LocalPartialRetriever, Tools} import scala.concurrent.{ExecutionContext, Future} class PaperlessPreferencesController @Inject()( val preferencesFrontendPartialService: PreferencesFrontendPartialService, authJourney: AuthJourney, withActiveTabAction: WithActiveTabAction, withBreadcrumbAction: WithBreadcrumbAction, cc: MessagesControllerComponents, tools: Tools)( implicit partialRetriever: LocalPartialRetriever, configDecorator: ConfigDecorator, templateRenderer: TemplateRenderer, ec: ExecutionContext) extends PertaxBaseController(cc) { def managePreferences: Action[AnyContent] = (authJourney.authWithPersonalDetails andThen withActiveTabAction .addActiveTab(ActiveTabMessages) andThen withBreadcrumbAction.addBreadcrumb(baseBreadcrumb)).async { implicit request: UserRequest[_] => if (request.isVerify) { Future.successful( BadRequest( views.html.error( "global.error.BadRequest.title", Some("global.error.BadRequest.heading"), List("global.error.BadRequest.message")))) } else { Future.successful( Redirect( getManagePreferencesUrl(configDecorator.pertaxFrontendHomeUrl, Messages("label.back_to_account_home")))) } } private def getManagePreferencesUrl(returnUrl: String, returnLinkText: String): String = s"${configDecorator.preferencesFrontendService}/paperless/check-settings?returnUrl=${tools.encryptAndEncode(returnUrl)}&returnLinkText=${tools .encryptAndEncode(returnLinkText)}" }
Example 40
Source File: PertaxBaseController.scala From pertax-frontend with Apache License 2.0 | 5 votes |
package controllers import controllers.controllershelpers.ControllerLikeHelpers import models.Breadcrumb import play.api.i18n.I18nSupport import play.api.mvc._ import uk.gov.hmrc.play.bootstrap.controller.{FrontendController, Utf8MimeTypes} import scala.concurrent.{ExecutionContext, Future} abstract class PertaxBaseController(cc: MessagesControllerComponents)(implicit ec: ExecutionContext) extends FrontendController(cc) with I18nSupport with ControllerLikeHelpers { implicit class SessionKeyRemover(result: Future[Result]) { def removeSessionKey(key: String)(implicit request: Request[_]): Future[Result] = result.map { _.withSession(request.session - key) } } val baseBreadcrumb: Breadcrumb = List("label.account_home" -> routes.HomeController.index().url) } trait PertaxBaseControllerTrait extends PertaxBaseController
Example 41
Source File: PaymentsController.scala From pertax-frontend with Apache License 2.0 | 5 votes |
package controllers import config.ConfigDecorator import connectors.PayApiConnector import controllers.auth.{AuthJourney, WithBreadcrumbAction} import error.RendersErrors import com.google.inject.Inject import models.{NonFilerSelfAssessmentUser, PaymentRequest, SelfAssessmentUser} import org.joda.time.DateTime import play.api.Logger import play.api.mvc.{Action, AnyContent, MessagesControllerComponents} import uk.gov.hmrc.renderer.TemplateRenderer import uk.gov.hmrc.time.CurrentTaxYear import util.LocalPartialRetriever import scala.concurrent.ExecutionContext class PaymentsController @Inject()( val payApiConnector: PayApiConnector, authJourney: AuthJourney, withBreadcrumbAction: WithBreadcrumbAction, cc: MessagesControllerComponents)( implicit partialRetriever: LocalPartialRetriever, configDecorator: ConfigDecorator, val templateRenderer: TemplateRenderer, ec: ExecutionContext) extends PertaxBaseController(cc) with CurrentTaxYear with RendersErrors { override def now: () => DateTime = () => DateTime.now() def makePayment: Action[AnyContent] = (authJourney.authWithPersonalDetails andThen withBreadcrumbAction.addBreadcrumb(baseBreadcrumb)).async { implicit request => if (request.isSa) { request.saUserType match { case saUser: SelfAssessmentUser => { val paymentRequest = PaymentRequest(configDecorator, saUser.saUtr.toString()) for { response <- payApiConnector.createPayment(paymentRequest) } yield { response match { case Some(createPayment) => Redirect(createPayment.nextUrl) case None => error(BAD_REQUEST) } } } case NonFilerSelfAssessmentUser => { Logger.warn("User had no sa account when one was required") futureError(INTERNAL_SERVER_ERROR) } } } else { Logger.warn("User had no sa account when one was required") futureError(INTERNAL_SERVER_ERROR) } } }
Example 42
Source File: PartialsController.scala From pertax-frontend with Apache License 2.0 | 5 votes |
package controllers import config.ConfigDecorator import error.LocalErrorHandler import com.google.inject.Inject import models.Breadcrumb import org.joda.time.DateTime import play.api.mvc.{Action, AnyContent, MessagesControllerComponents} import uk.gov.hmrc.auth.core.AuthConnector import uk.gov.hmrc.play.audit.http.connector.AuditConnector import util.LocalPartialRetriever import scala.concurrent.{ExecutionContext, Future} class PartialsController @Inject()( val localErrorHandler: LocalErrorHandler, auditConnector: AuditConnector, authConnector: AuthConnector, cc: MessagesControllerComponents)( implicit partialRetriever: LocalPartialRetriever, configDecorator: ConfigDecorator, ex: ExecutionContext) extends PertaxBaseController(cc) { def mainContentHeader( name: Option[String], lastLogin: Option[Long], itemText: List[String], itemUrl: List[String], showBetaBanner: Option[Boolean], deskProToken: Option[String], langReturnUrl: Option[String], lang: Option[String], showLastItem: Boolean): Action[AnyContent] = Action.async { implicit request => Future.successful { val breadcrumb: Breadcrumb = (itemText zip itemUrl).dropRight(if (showLastItem) 0 else 1) Ok( views.html.integration.mainContentHeader( name, lastLogin.map(new DateTime(_)), breadcrumb, showBetaBanner.getOrElse(false), deskProToken, langReturnUrl.filter(x => configDecorator.welshLangEnabled) ) ) } } }
Example 43
Source File: WithBreadcrumbAction.scala From pertax-frontend with Apache License 2.0 | 5 votes |
package controllers.auth import controllers.auth.requests.UserRequest import models.Breadcrumb import play.api.mvc.{ActionRefiner, Result} import com.google.inject.Inject import scala.concurrent.{ExecutionContext, Future} class WithBreadcrumbAction @Inject()(implicit ec: ExecutionContext) { def addBreadcrumb(breadcrumb: Breadcrumb): ActionRefiner[UserRequest, UserRequest] = new ActionRefiner[UserRequest, UserRequest] { override protected def refine[A](request: UserRequest[A]): Future[Either[Result, UserRequest[A]]] = Future.successful( Right( UserRequest( request.nino, request.retrievedName, request.saUserType, request.credentials, request.confidenceLevel, request.personDetails, request.trustedHelper, request.profile, request.unreadMessageCount, request.activeTab, Some(breadcrumb), request.request ) ) ) override protected def executionContext: ExecutionContext = ec } }
Example 44
Source File: SelfAssessmentStatusAction.scala From pertax-frontend with Apache License 2.0 | 5 votes |
package controllers.auth import com.google.inject.Inject import controllers.auth.requests._ import models._ import play.api.mvc.{ActionFunction, ActionRefiner, ControllerComponents, Result} import services.{CitizenDetailsService, EnrolmentStoreCachingService, MatchingDetailsSuccessResponse} import uk.gov.hmrc.domain.{Nino, SaUtr} import uk.gov.hmrc.http.HeaderCarrier import uk.gov.hmrc.play.HeaderCarrierConverter import scala.concurrent.{ExecutionContext, Future} class SelfAssessmentStatusAction @Inject()( citizenDetailsService: CitizenDetailsService, enrolmentsCachingService: EnrolmentStoreCachingService, cc: ControllerComponents)(implicit ec: ExecutionContext) extends ActionRefiner[AuthenticatedRequest, UserRequest] with ActionFunction[AuthenticatedRequest, UserRequest] { private def getSaUtrFromCitizenDetailsService(nino: Nino)(implicit hc: HeaderCarrier): Future[Option[SaUtr]] = citizenDetailsService.getMatchingDetails(nino) map { case MatchingDetailsSuccessResponse(matchingDetails) => matchingDetails.saUtr case _ => None } private def getSelfAssessmentUserType[A]( implicit hc: HeaderCarrier, request: AuthenticatedRequest[A]): Future[SelfAssessmentUserType] = request.nino.fold[Future[SelfAssessmentUserType]](Future.successful(NonFilerSelfAssessmentUser)) { nino => request.saEnrolment match { case Some(SelfAssessmentEnrolment(saUtr, Activated)) => Future.successful(ActivatedOnlineFilerSelfAssessmentUser(saUtr)) case Some(SelfAssessmentEnrolment(saUtr, NotYetActivated)) => Future.successful(NotYetActivatedOnlineFilerSelfAssessmentUser(saUtr)) case None => getSaUtrFromCitizenDetailsService(nino).flatMap { case Some(saUtr) => enrolmentsCachingService.getSaUserTypeFromCache(saUtr) case None => Future.successful(NonFilerSelfAssessmentUser) } } } override protected def refine[A](request: AuthenticatedRequest[A]): Future[Either[Result, UserRequest[A]]] = { implicit val hc: HeaderCarrier = HeaderCarrierConverter.fromHeadersAndSession(request.headers, Some(request.session)) getSelfAssessmentUserType(hc, request).map { saType => Right( UserRequest( request.nino, request.name, saType, request.credentials, request.confidenceLevel, None, request.trustedHelper, request.profile, None, None, None, request.request ) ) } } override protected def executionContext: ExecutionContext = cc.executionContext }
Example 45
Source File: WithActiveTabAction.scala From pertax-frontend with Apache License 2.0 | 5 votes |
package controllers.auth import controllers.auth.requests.UserRequest import play.api.mvc.{ActionRefiner, MessagesControllerComponents, Result} import uk.gov.hmrc.renderer.ActiveTab import javax.inject.Inject import scala.concurrent.{ExecutionContext, Future} class WithActiveTabAction @Inject()(implicit ec: ExecutionContext) { def addActiveTab(currentActiveTab: ActiveTab): ActionRefiner[UserRequest, UserRequest] = new ActionRefiner[UserRequest, UserRequest] { override protected def refine[A](request: UserRequest[A]): Future[Either[Result, UserRequest[A]]] = Future.successful( Right( UserRequest( request.nino, request.retrievedName, request.saUserType, request.credentials, request.confidenceLevel, request.personDetails, request.trustedHelper, request.profile, request.unreadMessageCount, Some(currentActiveTab), request.breadcrumb, request.request ) ) ) override protected def executionContext: ExecutionContext = ec } }
Example 46
Source File: GetPersonDetailsAction.scala From pertax-frontend with Apache License 2.0 | 5 votes |
package controllers.auth import com.google.inject.Inject import config.ConfigDecorator import controllers.auth.requests.UserRequest import models.PersonDetails import play.api.i18n.{I18nSupport, MessagesApi} import play.api.mvc.Results.Locked import play.api.mvc.{ActionFunction, ActionRefiner, ControllerComponents, MessagesControllerComponents, Result} import services.partials.MessageFrontendService import services.{CitizenDetailsService, PersonDetailsHiddenResponse, PersonDetailsSuccessResponse} import uk.gov.hmrc.http.HeaderCarrier import uk.gov.hmrc.play.HeaderCarrierConverter import uk.gov.hmrc.renderer.TemplateRenderer import util.LocalPartialRetriever import scala.concurrent.{ExecutionContext, Future} class GetPersonDetailsAction @Inject()( citizenDetailsService: CitizenDetailsService, messageFrontendService: MessageFrontendService, cc: ControllerComponents, val messagesApi: MessagesApi)( implicit configDecorator: ConfigDecorator, partialRetriever: LocalPartialRetriever, ec: ExecutionContext, templateRenderer: TemplateRenderer) extends ActionRefiner[UserRequest, UserRequest] with ActionFunction[UserRequest, UserRequest] with I18nSupport { override protected def refine[A](request: UserRequest[A]): Future[Either[Result, UserRequest[A]]] = populatingUnreadMessageCount()(request).flatMap { messageCount => if (!request.uri.contains("/signout")) { getPersonDetails()(request).map { a => a.fold( Left(_), pd => Right( UserRequest( request.nino, request.retrievedName, request.saUserType, request.credentials, request.confidenceLevel, pd, request.trustedHelper, request.profile, messageCount, request.activeTab, request.breadcrumb, request.request ) ) ) } } else { Future.successful( Right( UserRequest( request.nino, request.retrievedName, request.saUserType, request.credentials, request.confidenceLevel, None, request.trustedHelper, request.profile, messageCount, request.activeTab, request.breadcrumb, request.request ) ) ) } } def populatingUnreadMessageCount()(implicit request: UserRequest[_]): Future[Option[Int]] = messageFrontendService.getUnreadMessageCount private def getPersonDetails()(implicit request: UserRequest[_]): Future[Either[Result, Option[PersonDetails]]] = { implicit val hc: HeaderCarrier = HeaderCarrierConverter.fromHeadersAndSession(request.headers, Some(request.session)) request.nino match { case Some(nino) => citizenDetailsService.personDetails(nino).map { case PersonDetailsSuccessResponse(pd) => Right(Some(pd)) case PersonDetailsHiddenResponse => Left(Locked(views.html.manualCorrespondence())) case _ => Right(None) } case _ => Future.successful(Right(None)) } } override protected def executionContext: ExecutionContext = cc.executionContext }
Example 47
Source File: MinimumAuthAction.scala From pertax-frontend with Apache License 2.0 | 5 votes |
package controllers.auth import com.google.inject.Inject import config.ConfigDecorator import controllers.auth.requests.{AuthenticatedRequest, SelfAssessmentEnrolment, SelfAssessmentStatus} import controllers.routes import models.UserName import play.api.Configuration import play.api.mvc._ import uk.gov.hmrc.auth.core._ import uk.gov.hmrc.auth.core.retrieve.v2.Retrievals import uk.gov.hmrc.auth.core.retrieve.{Name, v2, ~} import uk.gov.hmrc.domain import uk.gov.hmrc.domain.SaUtr import uk.gov.hmrc.http.HeaderCarrier import uk.gov.hmrc.play.HeaderCarrierConverter import scala.concurrent.{ExecutionContext, Future} class MinimumAuthAction @Inject()( val authConnector: AuthConnector, configuration: Configuration, configDecorator: ConfigDecorator, sessionAuditor: SessionAuditor, cc: ControllerComponents)(implicit ec: ExecutionContext) extends AuthAction with AuthorisedFunctions { override def invokeBlock[A](request: Request[A], block: AuthenticatedRequest[A] => Future[Result]): Future[Result] = { implicit val hc: HeaderCarrier = HeaderCarrierConverter.fromHeadersAndSession(request.headers, Some(request.session)) authorised(ConfidenceLevel.L50) .retrieve( Retrievals.nino and Retrievals.allEnrolments and Retrievals.credentials and Retrievals.confidenceLevel and Retrievals.name and Retrievals.trustedHelper and Retrievals.profile) { case nino ~ Enrolments(enrolments) ~ Some(credentials) ~ confidenceLevel ~ name ~ trustedHelper ~ profile => val saEnrolment = enrolments.find(_.key == "IR-SA").flatMap { enrolment => enrolment.identifiers .find(id => id.key == "UTR") .map(key => SelfAssessmentEnrolment(SaUtr(key.value), SelfAssessmentStatus.fromString(enrolment.state))) } val trimmedRequest: Request[A] = request .map { case AnyContentAsFormUrlEncoded(data) => AnyContentAsFormUrlEncoded(data.map { case (key, vals) => (key, vals.map(_.trim)) }) case b => b } .asInstanceOf[Request[A]] val authenticatedRequest = AuthenticatedRequest[A]( nino.map(domain.Nino), saEnrolment, credentials, confidenceLevel, Some(UserName(name.getOrElse(Name(None, None)))), trustedHelper, profile, enrolments, trimmedRequest ) for { result <- block(authenticatedRequest) updatedResult <- sessionAuditor.auditOnce(authenticatedRequest, result) } yield updatedResult case _ => throw new RuntimeException("Can't find credentials for user") } } recover { case _: NoActiveSession => Results.Redirect(routes.PublicController.sessionTimeout()).withNewSession case _: InsufficientEnrolments => throw InsufficientEnrolments("") } override def parser: BodyParser[AnyContent] = cc.parsers.defaultBodyParser override protected def executionContext: ExecutionContext = cc.executionContext }
Example 48
Source File: SelfAssessmentController.scala From pertax-frontend with Apache License 2.0 | 5 votes |
package controllers import com.google.inject.Inject import config.ConfigDecorator import controllers.auth.requests.UserRequest import controllers.auth.{AuthJourney, WithBreadcrumbAction} import error.RendersErrors import models._ import org.joda.time.DateTime import play.api.mvc.{Action, AnyContent, MessagesControllerComponents} import uk.gov.hmrc.http.HeaderCarrier import uk.gov.hmrc.play.audit.http.connector.{AuditConnector, AuditResult} import uk.gov.hmrc.play.bootstrap.binders.SafeRedirectUrl import uk.gov.hmrc.renderer.TemplateRenderer import uk.gov.hmrc.time.CurrentTaxYear import util.AuditServiceTools.buildEvent import util.{DateTimeTools, LocalPartialRetriever} import views.html.ActivatedSaFilerIntermediateView import views.html.iv.failure.{CannotConfirmIdentityView, FailedIvContinueToActivateSaView} import views.html.selfassessment.RequestAccessToSelfAssessmentView import scala.concurrent.{ExecutionContext, Future} class SelfAssessmentController @Inject()( authJourney: AuthJourney, withBreadcrumbAction: WithBreadcrumbAction, auditConnector: AuditConnector, cc: MessagesControllerComponents, activatedSaFilerIntermediateView: ActivatedSaFilerIntermediateView, failedIvContinueToActivateSaView: FailedIvContinueToActivateSaView, cannotConfirmIdentityView: CannotConfirmIdentityView, requestAccessToSelfAssessmentView: RequestAccessToSelfAssessmentView)( implicit partialRetriever: LocalPartialRetriever, configDecorator: ConfigDecorator, val templateRenderer: TemplateRenderer, ec: ExecutionContext) extends PertaxBaseController(cc) with CurrentTaxYear with RendersErrors { override def now: () => DateTime = () => DateTime.now() def handleSelfAssessment: Action[AnyContent] = (authJourney.authWithPersonalDetails andThen withBreadcrumbAction.addBreadcrumb(baseBreadcrumb)) { implicit request => if (request.isGovernmentGateway) { request.saUserType match { case NotYetActivatedOnlineFilerSelfAssessmentUser(_) => Redirect(configDecorator.ssoToActivateSaEnrolmentPinUrl) case WrongCredentialsSelfAssessmentUser(_) => Redirect(routes.SaWrongCredentialsController.landingPage()) case NotEnrolledSelfAssessmentUser(_) => Redirect(routes.SelfAssessmentController.requestAccess()) case _ => Redirect(routes.HomeController.index()) } } else { error(INTERNAL_SERVER_ERROR) } } def ivExemptLandingPage(continueUrl: Option[SafeRedirectUrl]): Action[AnyContent] = authJourney.minimumAuthWithSelfAssessment { implicit request => val retryUrl = routes.ApplicationController.uplift(continueUrl).url request.saUserType match { case ActivatedOnlineFilerSelfAssessmentUser(x) => handleIvExemptAuditing("Activated online SA filer") Ok(activatedSaFilerIntermediateView(x.toString, DateTimeTools.previousAndCurrentTaxYear)) case NotYetActivatedOnlineFilerSelfAssessmentUser(_) => handleIvExemptAuditing("Not yet activated SA filer") Ok(failedIvContinueToActivateSaView()) case WrongCredentialsSelfAssessmentUser(_) => handleIvExemptAuditing("Wrong credentials SA filer") Redirect(routes.SaWrongCredentialsController.landingPage()) case NotEnrolledSelfAssessmentUser(_) => handleIvExemptAuditing("Never enrolled SA filer") Redirect(routes.SelfAssessmentController.requestAccess()) case NonFilerSelfAssessmentUser => Ok(cannotConfirmIdentityView(retryUrl)) } } private def handleIvExemptAuditing( saUserType: String)(implicit hc: HeaderCarrier, request: UserRequest[_]): Future[AuditResult] = auditConnector.sendEvent( buildEvent( "saIdentityVerificationBypass", "sa17_exceptions_or_insufficient_evidence", Map("saUserType" -> Some(saUserType)))) def requestAccess: Action[AnyContent] = authJourney.minimumAuthWithSelfAssessment { implicit request => request.saUserType match { case NotEnrolledSelfAssessmentUser(saUtr) => val deadlineYear = current.finishYear.toString Ok(requestAccessToSelfAssessmentView(saUtr.utr, deadlineYear)) case _ => Redirect(routes.HomeController.index()) } } }
Example 49
Source File: AddressJourneyCachingHelper.scala From pertax-frontend with Apache License 2.0 | 5 votes |
package controllers.controllershelpers import com.google.inject.{Inject, Singleton} import controllers.bindable.AddrType import controllers.routes import models.{AddressFinderDtoId, AddressJourneyData, AddressPageVisitedDtoId, CacheIdentifier, SelectedAddressRecordId, SelectedRecordSetId, SubmittedAddressDtoId, SubmittedInternationalAddressChoiceId, SubmittedResidencyChoiceDtoId, SubmittedStartDateId, SubmittedTaxCreditsChoiceId} import models.addresslookup.{AddressRecord, RecordSet} import models.dto._ import play.api.libs.json.Writes import play.api.mvc.{Result, Results} import services.LocalSessionCache import uk.gov.hmrc.http.cache.client.CacheMap import uk.gov.hmrc.http.{HeaderCarrier, HttpResponse} import scala.concurrent.{ExecutionContext, Future} @Singleton class AddressJourneyCachingHelper @Inject()(val sessionCache: LocalSessionCache)(implicit ec: ExecutionContext) extends Results { val addressLookupServiceDownKey = "addressLookupServiceDown" def addToCache[A: Writes](id: CacheIdentifier[A], record: A)(implicit hc: HeaderCarrier): Future[CacheMap] = sessionCache.cache(id.id, record) def cacheAddressLookupServiceDown()(implicit hc: HeaderCarrier): Future[CacheMap] = sessionCache.cache(addressLookupServiceDownKey, true) def clearCache()(implicit hc: HeaderCarrier): Future[HttpResponse] = sessionCache.remove() def gettingCachedAddressPageVisitedDto[T](block: Option[AddressPageVisitedDto] => Future[T])( implicit hc: HeaderCarrier): Future[T] = sessionCache.fetch() flatMap { case Some(cacheMap) => block(cacheMap.getEntry[AddressPageVisitedDto](AddressPageVisitedDtoId.id)) case None => block(None) } def gettingCachedAddressLookupServiceDown[T](block: Option[Boolean] => T)(implicit hc: HeaderCarrier): Future[T] = sessionCache.fetch() map { cacheMap => { block(cacheMap.flatMap(_.getEntry[Boolean](addressLookupServiceDownKey))) } } def gettingCachedTaxCreditsChoiceDto[T](block: Option[TaxCreditsChoiceDto] => T)( implicit hc: HeaderCarrier): Future[T] = sessionCache.fetch() map { cacheMap => { block(cacheMap.flatMap(_.getEntry[TaxCreditsChoiceDto](SubmittedTaxCreditsChoiceId.id))) } } def gettingCachedJourneyData[T](typ: AddrType)(block: AddressJourneyData => Future[T])( implicit hc: HeaderCarrier): Future[T] = sessionCache.fetch() flatMap { case Some(cacheMap) => block( AddressJourneyData( cacheMap.getEntry[AddressPageVisitedDto](AddressPageVisitedDtoId.id), cacheMap.getEntry[ResidencyChoiceDto](SubmittedResidencyChoiceDtoId(typ).id), cacheMap.getEntry[RecordSet](SelectedRecordSetId(typ).id), cacheMap.getEntry[AddressFinderDto](AddressFinderDtoId(typ).id), cacheMap.getEntry[AddressRecord](SelectedAddressRecordId(typ).id), cacheMap.getEntry[AddressDto](SubmittedAddressDtoId(typ).id), cacheMap.getEntry[InternationalAddressChoiceDto](SubmittedInternationalAddressChoiceId.id), cacheMap.getEntry[DateDto](SubmittedStartDateId(typ).id), cacheMap.getEntry[Boolean](addressLookupServiceDownKey).getOrElse(false) ) ) case None => block(AddressJourneyData(None, None, None, None, None, None, None, None, addressLookupServiceDown = false)) } def enforceDisplayAddressPageVisited(addressPageVisitedDto: Option[AddressPageVisitedDto])(block: => Future[Result])( implicit hc: HeaderCarrier): Future[Result] = addressPageVisitedDto match { case Some(_) => block case None => Future.successful(Redirect(controllers.address.routes.PersonalDetailsController.onPageLoad())) } def enforceResidencyChoiceSubmitted(journeyData: AddressJourneyData)( block: AddressJourneyData => Future[Result]): Future[Result] = journeyData match { case AddressJourneyData(_, Some(_), _, _, _, _, _, _, _) => block(journeyData) case AddressJourneyData(_, None, _, _, _, _, _, _, _) => Future.successful(Redirect(controllers.address.routes.PersonalDetailsController.onPageLoad())) } }
Example 50
Source File: UserResearchDismissalController.scala From pertax-frontend with Apache License 2.0 | 5 votes |
package controllers import config.ConfigDecorator import controllers.auth.AuthJourney import controllers.controllershelpers.HomePageCachingHelper import error.LocalErrorHandler import com.google.inject.Inject import play.api.i18n.MessagesApi import play.api.mvc.{Action, AnyContent, MessagesControllerComponents} import services._ import services.partials.MessageFrontendService import uk.gov.hmrc.auth.core.AuthConnector import uk.gov.hmrc.play.audit.http.connector.AuditConnector import util.LocalPartialRetriever import scala.concurrent.ExecutionContext class UserResearchDismissalController @Inject()( val citizenDetailsService: CitizenDetailsService, val messageFrontendService: MessageFrontendService, val localErrorHandler: LocalErrorHandler, val homePageCachingHelper: HomePageCachingHelper, authJourney: AuthJourney, auditConnector: AuditConnector, authConnector: AuthConnector, cc: MessagesControllerComponents)( implicit partialRetriever: LocalPartialRetriever, configDecorator: ConfigDecorator, ec: ExecutionContext) extends PertaxBaseController(cc) { def dismissUrBanner: Action[AnyContent] = authJourney.authWithPersonalDetails { implicit request => homePageCachingHelper.storeUserUrDismissal() NoContent } }
Example 51
Source File: AddressController.scala From pertax-frontend with Apache License 2.0 | 5 votes |
package controllers.address import com.google.inject.Inject import config.ConfigDecorator import controllers.PertaxBaseController import controllers.auth.{AuthJourney, WithActiveTabAction} import controllers.auth.requests.UserRequest import models.{Address, PersonDetails} import play.api.mvc.{ActionBuilder, AnyContent, MessagesControllerComponents, Result} import uk.gov.hmrc.domain.Nino import uk.gov.hmrc.renderer.{ActiveTabYourAccount, TemplateRenderer} import util.LocalPartialRetriever import views.html.error import views.html.interstitial.DisplayAddressInterstitialView import scala.concurrent.{ExecutionContext, Future} abstract class AddressController @Inject()( authJourney: AuthJourney, withActiveTabAction: WithActiveTabAction, cc: MessagesControllerComponents, displayAddressInterstitialView: DisplayAddressInterstitialView)( implicit partialRetriever: LocalPartialRetriever, configDecorator: ConfigDecorator, templateRenderer: TemplateRenderer, ec: ExecutionContext) extends PertaxBaseController(cc) { def authenticate: ActionBuilder[UserRequest, AnyContent] = authJourney.authWithPersonalDetails andThen withActiveTabAction .addActiveTab(ActiveTabYourAccount) def addressJourneyEnforcer(block: Nino => PersonDetails => Future[Result])( implicit request: UserRequest[_]): Future[Result] = (for { payeAccount <- request.nino personDetails <- request.personDetails } yield { block(payeAccount)(personDetails) }).getOrElse { Future.successful { val continueUrl = configDecorator.pertaxFrontendHost + routes.PersonalDetailsController .onPageLoad() .url Ok(displayAddressInterstitialView(continueUrl)) } } def internalServerError(implicit userRequest: UserRequest[_]): Result = InternalServerError( error( "global.error.InternalServerError500.title", Some("global.error.InternalServerError500.title"), List("global.error.InternalServerError500.message") )) }
Example 52
Source File: AddressErrorController.scala From pertax-frontend with Apache License 2.0 | 5 votes |
package controllers.address import com.google.inject.Inject import config.ConfigDecorator import controllers.auth.{AuthJourney, WithActiveTabAction} import controllers.bindable.AddrType import controllers.controllershelpers.AddressJourneyCachingHelper import play.api.mvc.{Action, AnyContent, MessagesControllerComponents} import uk.gov.hmrc.renderer.TemplateRenderer import util.LocalPartialRetriever import views.html.interstitial.DisplayAddressInterstitialView import views.html.personaldetails.{AddressAlreadyUpdatedView, CannotUseServiceView} import scala.concurrent.{ExecutionContext, Future} class AddressErrorController @Inject()( authJourney: AuthJourney, cachingHelper: AddressJourneyCachingHelper, withActiveTabAction: WithActiveTabAction, cc: MessagesControllerComponents, displayAddressInterstitialView: DisplayAddressInterstitialView, cannotUseServiceView: CannotUseServiceView, addressAlreadyUpdatedView: AddressAlreadyUpdatedView )( implicit partialRetriever: LocalPartialRetriever, configDecorator: ConfigDecorator, templateRenderer: TemplateRenderer, ec: ExecutionContext) extends AddressController(authJourney, withActiveTabAction, cc, displayAddressInterstitialView) { def cannotUseThisService(typ: AddrType): Action[AnyContent] = authenticate.async { implicit request => addressJourneyEnforcer { _ => _ => cachingHelper.gettingCachedAddressPageVisitedDto { addressPageVisitedDto => cachingHelper.enforceDisplayAddressPageVisited(addressPageVisitedDto) { Future.successful(Ok(cannotUseServiceView(typ))) } } } } def showAddressAlreadyUpdated(typ: AddrType): Action[AnyContent] = authenticate.async { implicit request => addressJourneyEnforcer { _ => _ => Future.successful(Ok(addressAlreadyUpdatedView())) } } }
Example 53
Source File: PersonalDetailsController.scala From pertax-frontend with Apache License 2.0 | 5 votes |
package controllers.address import com.google.inject.Inject import config.ConfigDecorator import controllers.auth.{AuthJourney, WithActiveTabAction} import controllers.controllershelpers.{AddressJourneyCachingHelper, PersonalDetailsCardGenerator} import models.{AddressJourneyTTLModel, AddressPageVisitedDtoId, PersonDetails} import play.api.mvc.{Action, AnyContent, MessagesControllerComponents} import play.twirl.api.Html import repositories.EditAddressLockRepository import services.NinoDisplayService import uk.gov.hmrc.play.audit.http.connector.AuditConnector import uk.gov.hmrc.renderer.TemplateRenderer import util.AuditServiceTools.buildPersonDetailsEvent import util.LocalPartialRetriever import views.html.interstitial.DisplayAddressInterstitialView import views.html.personaldetails.PersonalDetailsView import scala.concurrent.{ExecutionContext, Future} class PersonalDetailsController @Inject()( val personalDetailsCardGenerator: PersonalDetailsCardGenerator, val editAddressLockRepository: EditAddressLockRepository, ninoDisplayService: NinoDisplayService, authJourney: AuthJourney, cachingHelper: AddressJourneyCachingHelper, withActiveTabAction: WithActiveTabAction, auditConnector: AuditConnector, cc: MessagesControllerComponents, displayAddressInterstitialView: DisplayAddressInterstitialView, personalDetailsView: PersonalDetailsView )( implicit partialRetriever: LocalPartialRetriever, configDecorator: ConfigDecorator, templateRenderer: TemplateRenderer, ec: ExecutionContext) extends AddressController(authJourney, withActiveTabAction, cc, displayAddressInterstitialView) { def onPageLoad: Action[AnyContent] = authenticate.async { implicit request => import models.dto.AddressPageVisitedDto for { addressModel <- request.nino .map { nino => editAddressLockRepository.get(nino.withoutSuffix) } .getOrElse(Future.successful(List[AddressJourneyTTLModel]())) ninoToDisplay <- ninoDisplayService.getNino personalDetailsCards: Seq[Html] = personalDetailsCardGenerator .getPersonalDetailsCards(addressModel, ninoToDisplay) personDetails: Option[PersonDetails] = request.personDetails _ <- personDetails .map { details => auditConnector.sendEvent(buildPersonDetailsEvent("personalDetailsPageLinkClicked", details)) } .getOrElse(Future.successful(Unit)) _ <- cachingHelper.addToCache(AddressPageVisitedDtoId, AddressPageVisitedDto(true)) } yield Ok(personalDetailsView(personalDetailsCards)) } }
Example 54
Source File: TaxCreditsChoiceController.scala From pertax-frontend with Apache License 2.0 | 5 votes |
package controllers.address import com.google.inject.Inject import config.ConfigDecorator import controllers.auth.{AuthJourney, WithActiveTabAction} import controllers.controllershelpers.AddressJourneyCachingHelper import models.SubmittedTaxCreditsChoiceId import models.dto.TaxCreditsChoiceDto import play.api.mvc.{Action, AnyContent, MessagesControllerComponents} import uk.gov.hmrc.renderer.TemplateRenderer import util.LocalPartialRetriever import views.html.interstitial.DisplayAddressInterstitialView import views.html.personaldetails.TaxCreditsChoiceView import scala.concurrent.{ExecutionContext, Future} class TaxCreditsChoiceController @Inject()( authJourney: AuthJourney, withActiveTabAction: WithActiveTabAction, cc: MessagesControllerComponents, cachingHelper: AddressJourneyCachingHelper, taxCreditsChoiceView: TaxCreditsChoiceView, displayAddressInterstitialView: DisplayAddressInterstitialView)( implicit partialRetriever: LocalPartialRetriever, configDecorator: ConfigDecorator, templateRenderer: TemplateRenderer, ec: ExecutionContext) extends AddressController(authJourney, withActiveTabAction, cc, displayAddressInterstitialView) { def onPageLoad: Action[AnyContent] = authenticate.async { implicit request => addressJourneyEnforcer { _ => _ => cachingHelper.gettingCachedAddressPageVisitedDto { addressPageVisitedDto => cachingHelper.enforceDisplayAddressPageVisited(addressPageVisitedDto) { Future.successful( Ok(taxCreditsChoiceView(TaxCreditsChoiceDto.form, configDecorator.tcsChangeAddressUrl)) ) } } } } def onSubmit: Action[AnyContent] = authenticate.async { implicit request => addressJourneyEnforcer { _ => _ => TaxCreditsChoiceDto.form.bindFromRequest.fold( formWithErrors => { Future.successful(BadRequest(taxCreditsChoiceView(formWithErrors, configDecorator.tcsChangeAddressUrl))) }, taxCreditsChoiceDto => { cachingHelper.addToCache(SubmittedTaxCreditsChoiceId, taxCreditsChoiceDto) map { _ => if (taxCreditsChoiceDto.value) { Redirect(configDecorator.tcsChangeAddressUrl) } else { Redirect(routes.ResidencyChoiceController.onPageLoad()) } } } ) } } }
Example 55
Source File: InternationalAddressChoiceController.scala From pertax-frontend with Apache License 2.0 | 5 votes |
package controllers.address import com.google.inject.Inject import config.ConfigDecorator import controllers.auth.{AuthJourney, WithActiveTabAction} import controllers.bindable.AddrType import controllers.controllershelpers.AddressJourneyCachingHelper import models.SubmittedInternationalAddressChoiceId import models.dto.InternationalAddressChoiceDto import play.api.mvc.{Action, AnyContent, MessagesControllerComponents} import uk.gov.hmrc.renderer.TemplateRenderer import util.LocalPartialRetriever import views.html.interstitial.DisplayAddressInterstitialView import views.html.personaldetails.InternationalAddressChoiceView import scala.concurrent.{ExecutionContext, Future} class InternationalAddressChoiceController @Inject()( cachingHelper: AddressJourneyCachingHelper, authJourney: AuthJourney, withActiveTabAction: WithActiveTabAction, cc: MessagesControllerComponents, internationalAddressChoiceView: InternationalAddressChoiceView, displayAddressInterstitialView: DisplayAddressInterstitialView)( implicit partialRetriever: LocalPartialRetriever, configDecorator: ConfigDecorator, templateRenderer: TemplateRenderer, ec: ExecutionContext) extends AddressController(authJourney, withActiveTabAction, cc, displayAddressInterstitialView) { def onPageLoad(typ: AddrType): Action[AnyContent] = authenticate.async { implicit request => addressJourneyEnforcer { _ => _ => cachingHelper.gettingCachedAddressPageVisitedDto { addressPageVisitedDto => cachingHelper.enforceDisplayAddressPageVisited(addressPageVisitedDto) { Future.successful( Ok(internationalAddressChoiceView(InternationalAddressChoiceDto.form, typ)) ) } } } } def onSubmit(typ: AddrType): Action[AnyContent] = authenticate.async { implicit request => addressJourneyEnforcer { _ => _ => InternationalAddressChoiceDto.form.bindFromRequest.fold( formWithErrors => { Future.successful(BadRequest(internationalAddressChoiceView(formWithErrors, typ))) }, internationalAddressChoiceDto => { cachingHelper.addToCache(SubmittedInternationalAddressChoiceId, internationalAddressChoiceDto) map { _ => if (internationalAddressChoiceDto.value) { Redirect(routes.PostcodeLookupController.onPageLoad(typ)) } else { if (configDecorator.updateInternationalAddressInPta) { Redirect(routes.UpdateInternationalAddressController.onPageLoad(typ)) } else { Redirect(routes.AddressErrorController.cannotUseThisService(typ)) } } } } ) } } }
Example 56
Source File: ResidencyChoiceController.scala From pertax-frontend with Apache License 2.0 | 5 votes |
package controllers.address import com.google.inject.Inject import config.ConfigDecorator import controllers.auth.{AuthJourney, WithActiveTabAction} import controllers.controllershelpers.AddressJourneyCachingHelper import models.SubmittedResidencyChoiceDtoId import models.dto.{ResidencyChoiceDto, TaxCreditsChoiceDto} import play.api.mvc.{Action, AnyContent, MessagesControllerComponents} import uk.gov.hmrc.renderer.TemplateRenderer import util.LocalPartialRetriever import views.html.interstitial.DisplayAddressInterstitialView import views.html.personaldetails.ResidencyChoiceView import scala.concurrent.{ExecutionContext, Future} class ResidencyChoiceController @Inject()( cachingHelper: AddressJourneyCachingHelper, authJourney: AuthJourney, withActiveTabAction: WithActiveTabAction, cc: MessagesControllerComponents, residencyChoiceView: ResidencyChoiceView, displayAddressInterstitialView: DisplayAddressInterstitialView)( implicit partialRetriever: LocalPartialRetriever, configDecorator: ConfigDecorator, templateRenderer: TemplateRenderer, ec: ExecutionContext) extends AddressController(authJourney, withActiveTabAction, cc, displayAddressInterstitialView) { def onPageLoad: Action[AnyContent] = authenticate.async { implicit request => addressJourneyEnforcer { _ => _ => cachingHelper.gettingCachedTaxCreditsChoiceDto { case Some(TaxCreditsChoiceDto(false)) => Ok(residencyChoiceView(ResidencyChoiceDto.form)) case _ => if (configDecorator.taxCreditsEnabled) { Redirect(routes.PersonalDetailsController.onPageLoad()) } else { Ok(residencyChoiceView(ResidencyChoiceDto.form)) } } } } def onSubmit: Action[AnyContent] = authenticate.async { implicit request => addressJourneyEnforcer { _ => _ => ResidencyChoiceDto.form.bindFromRequest.fold( formWithErrors => { Future.successful(BadRequest(residencyChoiceView(formWithErrors))) }, residencyChoiceDto => { cachingHelper .addToCache(SubmittedResidencyChoiceDtoId(residencyChoiceDto.residencyChoice), residencyChoiceDto) map { _ => Redirect(routes.InternationalAddressChoiceController.onPageLoad(residencyChoiceDto.residencyChoice)) } } ) } } }
Example 57
Source File: UpdateInternationalAddressController.scala From pertax-frontend with Apache License 2.0 | 5 votes |
package controllers.address import com.google.inject.Inject import config.ConfigDecorator import controllers.auth.{AuthJourney, WithActiveTabAction} import controllers.bindable.{AddrType, PostalAddrType} import controllers.controllershelpers.{AddressJourneyCachingHelper, CountryHelper} import models.dto.{AddressDto, DateDto} import models.{SubmittedAddressDtoId, SubmittedStartDateId} import org.joda.time.LocalDate import play.api.mvc.{Action, AnyContent, MessagesControllerComponents, Result} import uk.gov.hmrc.play.audit.http.connector.AuditConnector import uk.gov.hmrc.renderer.TemplateRenderer import util.AuditServiceTools.buildAddressChangeEvent import util.LocalPartialRetriever import views.html.interstitial.DisplayAddressInterstitialView import views.html.personaldetails.UpdateInternationalAddressView import scala.concurrent.{ExecutionContext, Future} class UpdateInternationalAddressController @Inject()( countryHelper: CountryHelper, cachingHelper: AddressJourneyCachingHelper, auditConnector: AuditConnector, authJourney: AuthJourney, withActiveTabAction: WithActiveTabAction, cc: MessagesControllerComponents, updateInternationalAddressView: UpdateInternationalAddressView, displayAddressInterstitialView: DisplayAddressInterstitialView)( implicit partialRetriever: LocalPartialRetriever, configDecorator: ConfigDecorator, templateRenderer: TemplateRenderer, ec: ExecutionContext) extends AddressController(authJourney, withActiveTabAction, cc, displayAddressInterstitialView) { def onPageLoad(typ: AddrType): Action[AnyContent] = authenticate.async { implicit request => cachingHelper.gettingCachedJourneyData[Result](typ) { journeyData => addressJourneyEnforcer { _ => personDetails => typ match { case PostalAddrType => auditConnector.sendEvent( buildAddressChangeEvent("postalAddressChangeLinkClicked", personDetails, isInternationalAddress = true)) cachingHelper.enforceDisplayAddressPageVisited(journeyData.addressPageVisitedDto) { Future.successful( Ok( updateInternationalAddressView( journeyData.submittedAddressDto.fold(AddressDto.internationalForm)( AddressDto.internationalForm.fill), typ, countryHelper.countries ) ) ) } case _ => auditConnector.sendEvent( buildAddressChangeEvent("mainAddressChangeLinkClicked", personDetails, isInternationalAddress = true)) cachingHelper.enforceResidencyChoiceSubmitted(journeyData) { _ => Future.successful( Ok( updateInternationalAddressView(AddressDto.internationalForm, typ, countryHelper.countries) ) ) } } } } } def onSubmit(typ: AddrType): Action[AnyContent] = authenticate.async { implicit request => cachingHelper.gettingCachedJourneyData[Result](typ) { _ => addressJourneyEnforcer { _ => _ => AddressDto.internationalForm.bindFromRequest.fold( formWithErrors => { Future.successful( BadRequest(updateInternationalAddressView(formWithErrors, typ, countryHelper.countries))) }, addressDto => { cachingHelper.addToCache(SubmittedAddressDtoId(typ), addressDto) flatMap { _ => typ match { case PostalAddrType => cachingHelper.addToCache(SubmittedStartDateId(typ), DateDto(LocalDate.now())) Future.successful(Redirect(routes.AddressSubmissionController.onPageLoad(typ))) case _ => Future.successful(Redirect(routes.StartDateController.onPageLoad(typ))) } } } ) } } } }
Example 58
Source File: StartDateController.scala From pertax-frontend with Apache License 2.0 | 5 votes |
package controllers.address import com.google.inject.Inject import config.ConfigDecorator import controllers.auth.{AuthJourney, WithActiveTabAction} import controllers.bindable.{AddrType, PostalAddrType, ResidentialAddrType} import controllers.controllershelpers.AddressJourneyCachingHelper import models.dto.DateDto import models.{Address, SubmittedStartDateId} import play.api.data.Form import play.api.mvc.{Action, AnyContent, MessagesControllerComponents, Result} import uk.gov.hmrc.renderer.TemplateRenderer import util.{LanguageHelper, LocalPartialRetriever} import views.html.interstitial.DisplayAddressInterstitialView import views.html.personaldetails.{CannotUpdateAddressView, EnterStartDateView} import scala.concurrent.{ExecutionContext, Future} class StartDateController @Inject()( authJourney: AuthJourney, withActiveTabAction: WithActiveTabAction, cc: MessagesControllerComponents, cachingHelper: AddressJourneyCachingHelper, enterStartDateView: EnterStartDateView, cannotUpdateAddressView: CannotUpdateAddressView, displayAddressInterstitialView: DisplayAddressInterstitialView)( implicit partialRetriever: LocalPartialRetriever, configDecorator: ConfigDecorator, templateRenderer: TemplateRenderer, ec: ExecutionContext) extends AddressController(authJourney, withActiveTabAction, cc, displayAddressInterstitialView) { def onPageLoad(typ: AddrType): Action[AnyContent] = authenticate.async { implicit request => addressJourneyEnforcer { _ => personDetails => nonPostalJourneyEnforcer(typ) { cachingHelper.gettingCachedJourneyData(typ) { journeyData => val newPostcode = journeyData.submittedAddressDto.map(_.postcode).getOrElse("").toString val oldPostcode = personDetails.address.flatMap(add => add.postcode).getOrElse("") journeyData.submittedAddressDto map { _ => val postcodesMatch = if (newPostcode.replace(" ", "").equalsIgnoreCase(oldPostcode.replace(" ", ""))) { journeyData.submittedStartDateDto.fold(dateDtoForm)(dateDtoForm.fill) } else { dateDtoForm } Future.successful(Ok(enterStartDateView(postcodesMatch, typ))) } getOrElse { Future.successful(Redirect(routes.PersonalDetailsController.onPageLoad())) } } } } } def onSubmit(typ: AddrType): Action[AnyContent] = authenticate.async { implicit request => addressJourneyEnforcer { _ => personDetails => nonPostalJourneyEnforcer(typ) { dateDtoForm.bindFromRequest.fold( formWithErrors => { Future.successful(BadRequest(enterStartDateView(formWithErrors, typ))) }, dateDto => { cachingHelper.addToCache(SubmittedStartDateId(typ), dateDto) map { _ => val proposedStartDate = dateDto.startDate personDetails.address match { case Some(Address(_, _, _, _, _, _, _, Some(currentStartDate), _, _)) => if (!currentStartDate.isBefore(proposedStartDate)) { BadRequest( cannotUpdateAddressView(typ, LanguageHelper.langUtils.Dates.formatDate(proposedStartDate))) } else { Redirect(routes.AddressSubmissionController.onPageLoad(typ)) } case _ => Redirect(routes.AddressSubmissionController.onPageLoad(typ)) } } } ) } } } private def dateDtoForm: Form[DateDto] = DateDto.form(configDecorator.currentLocalDate) private def nonPostalJourneyEnforcer(typ: AddrType)(block: => Future[Result]): Future[Result] = typ match { case _: ResidentialAddrType => block case PostalAddrType => Future.successful(Redirect(controllers.address.routes.UpdateAddressController.onPageLoad(typ))) } }
Example 59
Source File: PublicController.scala From pertax-frontend with Apache License 2.0 | 5 votes |
package controllers import config.ConfigDecorator import com.google.inject.Inject import play.api.i18n.MessagesApi import play.api.mvc.{Action, AnyContent, MessagesControllerComponents} import uk.gov.hmrc.http.SessionKeys import uk.gov.hmrc.play.binders.Origin import uk.gov.hmrc.renderer.TemplateRenderer import util.LocalPartialRetriever import scala.concurrent.{ExecutionContext, Future} class PublicController @Inject()(cc: MessagesControllerComponents)( implicit partialRetriever: LocalPartialRetriever, configDecorator: ConfigDecorator, templateRenderer: TemplateRenderer, ec: ExecutionContext) extends PertaxBaseController(cc) { def verifyEntryPoint: Action[AnyContent] = Action.async { implicit request => Future.successful { Redirect(routes.HomeController.index).withNewSession.addingToSession( SessionKeys.authProvider -> configDecorator.authProviderVerify ) } } def governmentGatewayEntryPoint: Action[AnyContent] = Action.async { implicit request => Future.successful { Redirect(routes.HomeController.index).withNewSession.addingToSession( SessionKeys.authProvider -> configDecorator.authProviderGG ) } } def sessionTimeout: Action[AnyContent] = Action.async { implicit request => Future.successful { Ok(views.html.public.sessionTimeout()) } } def redirectToExitSurvey(origin: Origin): Action[AnyContent] = Action.async { implicit request => Future.successful { Redirect(configDecorator.getFeedbackSurveyUrl(origin)) } } def redirectToTaxCreditsService(): Action[AnyContent] = Action.async { implicit request => Future.successful { Redirect(configDecorator.tcsServiceRouterUrl, MOVED_PERMANENTLY) } } def redirectToPersonalDetails(): Action[AnyContent] = Action.async { implicit request => Future.successful { Redirect(controllers.address.routes.PersonalDetailsController.onPageLoad()) } } }
Example 60
Source File: LocalTemplateRenderer.scala From pertax-frontend with Apache License 2.0 | 5 votes |
package config import com.google.inject.Inject import play.api.Mode.Mode import play.api.{Configuration, Environment} import uk.gov.hmrc.http.HeaderCarrier import uk.gov.hmrc.play.bootstrap.config.ServicesConfig import uk.gov.hmrc.play.bootstrap.http.HttpClient import uk.gov.hmrc.renderer.TemplateRenderer import scala.concurrent.duration._ import scala.concurrent.{ExecutionContext, Future} class LocalTemplateRenderer @Inject()( environment: Environment, configuration: Configuration, wsHttp: HttpClient, servicesConfig: ServicesConfig)(implicit executionContext: ExecutionContext) extends TemplateRenderer { val mode: Mode = environment.mode val runModeConfiguration: Configuration = configuration override lazy val templateServiceBaseUrl = servicesConfig.baseUrl("frontend-template-provider") override lazy val refreshAfter: Duration = runModeConfiguration.getInt("template.refreshInterval").getOrElse(600) seconds private implicit val hc = HeaderCarrier() override def fetchTemplate(path: String): Future[String] = wsHttp.GET(path).map(_.body) }
Example 61
Source File: EnhancedPartialRetriever.scala From pertax-frontend with Apache License 2.0 | 5 votes |
package util import com.google.inject.Inject import metrics.HasMetrics import play.api.Logger import uk.gov.hmrc.http.{HeaderCarrier, HttpException, HttpGet} import uk.gov.hmrc.play.bootstrap.filters.frontend.crypto.SessionCookieCrypto import uk.gov.hmrc.play.partials.HtmlPartial._ import uk.gov.hmrc.play.partials.{HeaderCarrierForPartialsConverter, HtmlPartial} import scala.concurrent.{ExecutionContext, Future} abstract class EnhancedPartialRetriever @Inject()(sessionCookieCrypto: SessionCookieCrypto)( implicit executionContext: ExecutionContext) extends HeaderCarrierForPartialsConverter with HasMetrics { def http: HttpGet override def crypto: String => String = cookie => cookie def loadPartial(url: String)(implicit hc: HeaderCarrier): Future[HtmlPartial] = withMetricsTimer("load-partial") { timer => http.GET[HtmlPartial](url) map { case partial: HtmlPartial.Success => timer.completeTimerAndIncrementSuccessCounter() partial case partial: HtmlPartial.Failure => timer.completeTimerAndIncrementFailedCounter() partial } recover { case e => timer.completeTimerAndIncrementFailedCounter() Logger.warn(s"Failed to load partial", e) e match { case ex: HttpException => HtmlPartial.Failure(Some(ex.responseCode)) case _ => HtmlPartial.Failure(None) } } } }
Example 62
Source File: SessionIdFilter.scala From pertax-frontend with Apache License 2.0 | 5 votes |
package filters import java.util.UUID import akka.stream.Materializer import com.google.inject.Inject import play.api.mvc._ import play.api.mvc.request.{Cell, RequestAttrKey} import uk.gov.hmrc.http.{SessionKeys, HeaderNames => HMRCHeaderNames} import scala.concurrent.{ExecutionContext, Future} class SessionIdFilter( override val mat: Materializer, uuid: => UUID, implicit val ec: ExecutionContext ) extends Filter { @Inject def this(mat: Materializer, ec: ExecutionContext) { this(mat, UUID.randomUUID(), ec) } override def apply(f: RequestHeader => Future[Result])(rh: RequestHeader): Future[Result] = { lazy val sessionId: String = s"session-$uuid" if (rh.session.get(SessionKeys.sessionId).isEmpty) { val headers = rh.headers.add( HMRCHeaderNames.xSessionId -> sessionId ) val session = rh.session + (SessionKeys.sessionId -> sessionId) f(rh.withHeaders(headers).addAttr(RequestAttrKey.Session, Cell(session))).map { result => val updatedSession = if (result.session(rh).get(SessionKeys.sessionId).isDefined) { result.session(rh) } else { result.session(rh) + (SessionKeys.sessionId -> sessionId) } result.withSession(updatedSession) } } else { f(rh) } } }
Example 63
Source File: EnrolmentsConnector.scala From pertax-frontend with Apache License 2.0 | 5 votes |
package connectors import com.google.inject.Inject import config.ConfigDecorator import play.api.http.Status._ import uk.gov.hmrc.http.{HeaderCarrier, HttpResponse} import uk.gov.hmrc.play.bootstrap.http.HttpClient import scala.concurrent.{ExecutionContext, Future} class EnrolmentsConnector @Inject()(http: HttpClient, configDecorator: ConfigDecorator) { val baseUrl = configDecorator.enrolmentStoreProxyUrl def getUserIdsWithEnrolments( saUtr: String)(implicit hc: HeaderCarrier, ec: ExecutionContext): Future[Either[String, Seq[String]]] = { val url = s"$baseUrl/enrolment-store/enrolments/IR-SA~UTR~$saUtr/users" http.GET[HttpResponse](url) map { response => response.status match { case OK => Right((response.json \ "principalUserIds").as[Seq[String]]) case NO_CONTENT => Right(Seq.empty) case errorCode => Left(s"HttpError: $errorCode. Invalid call for getUserIdsWithEnrolments: $response") } } } }
Example 64
Source File: PayApiConnector.scala From pertax-frontend with Apache License 2.0 | 5 votes |
package connectors import com.google.inject.Inject import config.ConfigDecorator import models.{CreatePayment, PaymentRequest} import play.api.http.Status._ import uk.gov.hmrc.http.{HeaderCarrier, HttpResponse} import uk.gov.hmrc.play.bootstrap.http.HttpClient import scala.concurrent.{ExecutionContext, Future} class PayApiConnector @Inject()(http: HttpClient, configDecorator: ConfigDecorator) { def createPayment( request: PaymentRequest)(implicit hc: HeaderCarrier, ec: ExecutionContext): Future[Option[CreatePayment]] = { val postUrl = configDecorator.makeAPaymentUrl http.POST[PaymentRequest, HttpResponse](postUrl, request) flatMap { response => response.status match { case CREATED => Future.successful(Some(response.json.as[CreatePayment])) case _ => Future.successful(None) } } } }
Example 65
Source File: SaWrongCredentialsControllerSpec.scala From pertax-frontend with Apache License 2.0 | 5 votes |
package controllers import controllers.auth.FakeAuthJourney import models.WrongCredentialsSelfAssessmentUser import org.scalatestplus.mockito.MockitoSugar import play.api.i18n.MessagesApi import play.api.mvc.MessagesControllerComponents import play.api.test.FakeRequest import play.api.test.Helpers._ import uk.gov.hmrc.domain.{SaUtr, SaUtrGenerator} import uk.gov.hmrc.renderer.TemplateRenderer import util.{BaseSpec, LocalPartialRetriever} import views.html.selfassessment.{DoYouKnowOtherCredentialsView, DoYouKnowUserIdView, FindYourUserIdView, NeedToResetPasswordView, SignInAgainView, SignedInWrongAccountView} import scala.concurrent.ExecutionContext class SaWrongCredentialsControllerSpec extends BaseSpec with MockitoSugar { val fakeAuthJourney = new FakeAuthJourney( WrongCredentialsSelfAssessmentUser(SaUtr(new SaUtrGenerator().nextSaUtr.utr))) def controller = new SaWrongCredentialsController( fakeAuthJourney, injected[MessagesControllerComponents], injected[SignedInWrongAccountView], injected[DoYouKnowOtherCredentialsView], injected[SignInAgainView], injected[DoYouKnowUserIdView], injected[NeedToResetPasswordView], injected[FindYourUserIdView] )(injected[LocalPartialRetriever], config, injected[TemplateRenderer], injected[ExecutionContext]) "processDoYouKnowOtherCredentials" should { "redirect to 'Sign in using Government Gateway' page when supplied with value Yes" in { val request = FakeRequest("POST", "").withFormUrlEncodedBody("wrongCredentialsFormChoice" -> "true") val result = controller.processDoYouKnowOtherCredentials(request) status(result) shouldBe SEE_OTHER redirectLocation(await(result)) shouldBe Some(routes.SaWrongCredentialsController.signInAgain().url) } "redirect to 'You need to use the creds you've created' page when supplied with value No (false)" in { val request = FakeRequest("POST", "").withFormUrlEncodedBody("wrongCredentialsFormChoice" -> "false") val result = controller.processDoYouKnowOtherCredentials(request) status(result) shouldBe SEE_OTHER redirectLocation(await(result)) shouldBe Some(routes.SaWrongCredentialsController.doYouKnowUserId().url) } "return a bad request when supplied no value" in { val request = FakeRequest("POST", "") val result = controller.processDoYouKnowOtherCredentials(request) status(result) shouldBe BAD_REQUEST } } "processDoYouKnowUserId" should { "redirect to 'Sign in using Government Gateway' page when supplied with value Yes" in { val request = FakeRequest("POST", "").withFormUrlEncodedBody("wrongCredentialsFormChoice" -> "true") val result = controller.processDoYouKnowUserId(request) status(result) shouldBe SEE_OTHER redirectLocation(await(result)) shouldBe Some(routes.SaWrongCredentialsController.needToResetPassword().url) } "redirect to 'You need to use the creds you've created' page when supplied with value No (false)" in { val request = FakeRequest("POST", "").withFormUrlEncodedBody("wrongCredentialsFormChoice" -> "false") val result = controller.processDoYouKnowUserId(request) status(result) shouldBe SEE_OTHER redirectLocation(await(result)) shouldBe Some(routes.SaWrongCredentialsController.findYourUserId().url) } "return a bad request when supplied no value" in { val request = FakeRequest("POST", "") val result = controller.processDoYouKnowUserId(request) status(result) shouldBe BAD_REQUEST } } "ggSignInUrl" should { "be the gg-sign in url" in { controller.ggSignInUrl shouldBe "/gg/sign-in?continue=/personal-account&accountType=individual&origin=PERTAX" } } }
Example 66
Source File: PublicControllerSpec.scala From pertax-frontend with Apache License 2.0 | 5 votes |
package controllers import config.ConfigDecorator import org.scalatestplus.mockito.MockitoSugar import play.api.mvc.{MessagesControllerComponents, Session} import play.api.test.FakeRequest import play.api.test.Helpers._ import uk.gov.hmrc.http.SessionKeys import uk.gov.hmrc.play.binders.Origin import uk.gov.hmrc.renderer.TemplateRenderer import util.BaseSpec import util.Fixtures._ import scala.concurrent.ExecutionContext class PublicControllerSpec extends BaseSpec with MockitoSugar { private val mockTemplateRenderer = mock[TemplateRenderer] private val configDecorator = injected[ConfigDecorator] private def controller = new PublicController(injected[MessagesControllerComponents])( mockLocalPartialRetriever, configDecorator, mockTemplateRenderer, injected[ExecutionContext] ) "Calling PublicController.sessionTimeout" should { "return 200" in { val r = controller.sessionTimeout(buildFakeRequestWithAuth("GET")) status(r) shouldBe OK } } "Calling PublicController.redirectToExitSurvey" should { "return 303" in { val r = controller.redirectToExitSurvey(Origin("PERTAX"))(buildFakeRequestWithAuth("GET")) status(r) shouldBe SEE_OTHER redirectLocation(r) shouldBe Some("/feedback/PERTAX") } } "Calling PublicController.redirectToTaxCreditsService" should { "redirect to tax-credits-service/renewals/service-router" in { val r = controller.redirectToTaxCreditsService()(buildFakeRequestWithAuth("GET")) status(r) shouldBe MOVED_PERMANENTLY redirectLocation(r) shouldBe Some("/tax-credits-service/renewals/service-router") } } "Calling PublicController.redirectToPersonalDetails" should { "redirect to /personal-details page" in { val r = controller.redirectToPersonalDetails()(buildFakeRequestWithAuth("GET")) status(r) shouldBe SEE_OTHER redirectLocation(r) shouldBe Some("/personal-account/personal-details") } } "Calling PublicController.verifyEntryPoint" should { "redirect to /personal-account page with Verify auth provider" in { val request = FakeRequest("GET", "/personal-account/start-verify") val r = controller.verifyEntryPoint()(request) status(r) shouldBe SEE_OTHER redirectLocation(r) shouldBe Some("/personal-account") session(r) shouldBe new Session(Map(SessionKeys.authProvider -> configDecorator.authProviderVerify)) } } "Calling PublicController.governmentGatewayEntryPoint" should { "redirect to /personal-account page with GG auth provider" in { val request = FakeRequest("GET", "/personal-account/start-government-gateway") val r = controller.governmentGatewayEntryPoint()(request) status(r) shouldBe SEE_OTHER redirectLocation(r) shouldBe Some("/personal-account") session(r) shouldBe new Session(Map(SessionKeys.authProvider -> configDecorator.authProviderGG)) } } }
Example 67
Source File: PaymentsControllerSpec.scala From pertax-frontend with Apache License 2.0 | 5 votes |
package controllers import config.ConfigDecorator import connectors._ import controllers.auth.requests.UserRequest import controllers.auth.{AuthJourney, WithBreadcrumbAction} import models.CreatePayment import org.joda.time.DateTime import org.mockito.Matchers.any import org.mockito.Mockito.when import org.scalatestplus.mockito.MockitoSugar import play.api.Application import play.api.i18n.MessagesApi import play.api.inject.bind import play.api.mvc.{ActionBuilder, MessagesControllerComponents, Request, Result} import play.api.test.FakeRequest import play.api.test.Helpers.{redirectLocation, _} import uk.gov.hmrc.renderer.TemplateRenderer import uk.gov.hmrc.time.CurrentTaxYear import util.UserRequestFixture.buildUserRequest import util.{ActionBuilderFixture, BaseSpec} import scala.concurrent.{ExecutionContext, Future} class PaymentsControllerSpec extends BaseSpec with CurrentTaxYear with MockitoSugar { override def now: () => DateTime = DateTime.now lazy val fakeRequest = FakeRequest("", "") val mockPayConnector = mock[PayApiConnector] val mockAuthJourney = mock[AuthJourney] override implicit lazy val app: Application = localGuiceApplicationBuilder() .overrides( bind[PayApiConnector].toInstance(mockPayConnector), bind[AuthJourney].toInstance(mockAuthJourney) ) .build() def controller = new PaymentsController( mockPayConnector, mockAuthJourney, injected[WithBreadcrumbAction], injected[MessagesControllerComponents] )(mockLocalPartialRetriever, injected[ConfigDecorator], mock[TemplateRenderer], injected[ExecutionContext]) when(mockAuthJourney.authWithPersonalDetails).thenReturn(new ActionBuilderFixture { override def invokeBlock[A](request: Request[A], block: UserRequest[A] => Future[Result]): Future[Result] = block( buildUserRequest( request = request )) }) "makePayment" should { "redirect to the response's nextUrl" in { val expectedNextUrl = "someNextUrl" val createPaymentResponse = CreatePayment("someJourneyId", expectedNextUrl) when(mockPayConnector.createPayment(any())(any(), any())) .thenReturn(Future.successful(Some(createPaymentResponse))) val result = controller.makePayment()(FakeRequest()) status(result) shouldBe SEE_OTHER redirectLocation(result) shouldBe Some("someNextUrl") } "redirect to a BAD_REQUEST page if createPayment failed" in { when(mockPayConnector.createPayment(any())(any(), any())) .thenReturn(Future.successful(None)) val result = controller.makePayment()(FakeRequest()) status(result) shouldBe BAD_REQUEST } } }
Example 68
Source File: PaperlessPreferencesControllerSpec.scala From pertax-frontend with Apache License 2.0 | 5 votes |
package controllers import config.ConfigDecorator import controllers.auth.requests.UserRequest import controllers.auth.{AuthJourney, WithActiveTabAction, WithBreadcrumbAction} import models.{ActivatedOnlineFilerSelfAssessmentUser, NonFilerSelfAssessmentUser} import org.mockito.Matchers._ import org.mockito.Mockito._ import org.scalatestplus.mockito.MockitoSugar import play.api.i18n.MessagesApi import play.api.mvc.{ActionBuilder, MessagesControllerComponents, Request, Result} import play.api.test.FakeRequest import play.api.test.Helpers._ import play.twirl.api.Html import services.partials.PreferencesFrontendPartialService import uk.gov.hmrc.auth.core.ConfidenceLevel import uk.gov.hmrc.auth.core.retrieve.Credentials import uk.gov.hmrc.domain.SaUtr import uk.gov.hmrc.play.partials.HtmlPartial import uk.gov.hmrc.renderer.TemplateRenderer import util.UserRequestFixture.buildUserRequest import util.{ActionBuilderFixture, BaseSpec, BetterOptionValues, LocalPartialRetriever, Tools} import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.{ExecutionContext, Future} class PaperlessPreferencesControllerSpec extends BaseSpec with MockitoSugar { import BetterOptionValues._ override implicit lazy val app = localGuiceApplicationBuilder().build() val mockPreferencesFrontendPartialService = mock[PreferencesFrontendPartialService] val mockAuthJourney = mock[AuthJourney] def controller: PaperlessPreferencesController = new PaperlessPreferencesController( mockPreferencesFrontendPartialService, mockAuthJourney, injected[WithActiveTabAction], injected[WithBreadcrumbAction], injected[MessagesControllerComponents], injected[Tools] )(mock[LocalPartialRetriever], injected[ConfigDecorator], injected[TemplateRenderer], injected[ExecutionContext]) {} "Calling PaperlessPreferencesController.managePreferences" should { "Redirect to preferences-frontend manage paperless url when a user is logged in using GG" in { when(mockAuthJourney.authWithPersonalDetails).thenReturn(new ActionBuilderFixture { override def invokeBlock[A](request: Request[A], block: UserRequest[A] => Future[Result]): Future[Result] = block( buildUserRequest(request = request) ) }) val r = controller.managePreferences(FakeRequest()) status(r) shouldBe SEE_OTHER val redirectUrl = redirectLocation(r).getValue val configDecorator = app.injector.instanceOf[ConfigDecorator] redirectUrl should include regex s"${configDecorator.preferencesFrontendService}/paperless/check-settings\\?returnUrl=.*\\&returnLinkText=.*" } "Return 400 for Verify users" in { when(mockAuthJourney.authWithPersonalDetails).thenReturn(new ActionBuilderFixture { override def invokeBlock[A](request: Request[A], block: UserRequest[A] => Future[Result]): Future[Result] = block( buildUserRequest( credentials = Credentials("", "Verify"), confidenceLevel = ConfidenceLevel.L500, request = request )) }) val r = controller.managePreferences(FakeRequest()) status(r) shouldBe BAD_REQUEST } } }
Example 69
Source File: ClientUtil.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.quickstart.iou import java.util.UUID import akka.stream.Materializer import akka.stream.scaladsl.{Sink, Source} import akka.{Done, NotUsed} import com.daml.ledger.api.domain.LedgerId import com.daml.ledger.api.refinements.ApiTypes.{ApplicationId, WorkflowId} import com.daml.ledger.api.v1.command_submission_service.SubmitRequest import com.daml.ledger.api.v1.commands.{Command, Commands} import com.daml.ledger.api.v1.ledger_offset.LedgerOffset import com.daml.ledger.api.v1.transaction.Transaction import com.daml.ledger.api.v1.transaction_filter.{Filters, TransactionFilter} import com.daml.ledger.client.LedgerClient import com.daml.quickstart.iou.FutureUtil.toFuture import com.google.protobuf.empty.Empty import scala.concurrent.{ExecutionContext, Future} class ClientUtil( client: LedgerClient, applicationId: ApplicationId, ) { import ClientUtil._ private val ledgerId = client.ledgerId private val packageClient = client.packageClient private val commandClient = client.commandClient private val transactionClient = client.transactionClient def listPackages(implicit ec: ExecutionContext): Future[Set[String]] = packageClient.listPackages().map(_.packageIds.toSet) def ledgerEnd(implicit ec: ExecutionContext): Future[LedgerOffset] = transactionClient.getLedgerEnd().flatMap(response => toFuture(response.offset)) def submitCommand(party: String, workflowId: WorkflowId, cmd: Command.Command): Future[Empty] = { val commands = Commands( ledgerId = LedgerId.unwrap(ledgerId), workflowId = WorkflowId.unwrap(workflowId), applicationId = ApplicationId.unwrap(applicationId), commandId = uniqueId, party = party, commands = Seq(Command(cmd)), ) commandClient.submitSingleCommand(SubmitRequest(Some(commands), None)) } def nextTransaction(party: String, offset: LedgerOffset)( implicit mat: Materializer): Future[Transaction] = transactionClient .getTransactions(offset, None, transactionFilter(party)) .take(1L) .runWith(Sink.head) def subscribe(party: String, offset: LedgerOffset, max: Option[Long])(f: Transaction => Unit)( implicit mat: Materializer): Future[Done] = { val source: Source[Transaction, NotUsed] = transactionClient.getTransactions(offset, None, transactionFilter(party)) max.fold(source)(n => source.take(n)) runForeach f } override lazy val toString: String = s"ClientUtil{ledgerId=$ledgerId}" } object ClientUtil { def transactionFilter(parties: String*): TransactionFilter = TransactionFilter(parties.map((_, Filters.defaultInstance)).toMap) def uniqueId: String = UUID.randomUUID.toString def workflowIdFromParty(p: String): WorkflowId = WorkflowId(s"$p Workflow") }
Example 70
Source File: ClientUtil.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.quickstart.iou import java.util.UUID import akka.stream.Materializer import akka.stream.scaladsl.{Sink, Source} import akka.{Done, NotUsed} import com.daml.ledger.api.refinements.ApiTypes.{ApplicationId, WorkflowId} import com.daml.ledger.api.v1.command_submission_service.SubmitRequest import com.daml.ledger.api.v1.commands.Commands import com.daml.ledger.api.v1.ledger_offset.LedgerOffset import com.daml.ledger.api.v1.transaction.Transaction import com.daml.ledger.api.v1.transaction_filter.{Filters, TransactionFilter} import com.daml.ledger.client.LedgerClient import com.daml.ledger.client.binding.{Primitive => P} import com.daml.quickstart.iou.FutureUtil.toFuture import com.google.protobuf.empty.Empty import scalaz.syntax.tag._ import scala.concurrent.{ExecutionContext, Future} class ClientUtil( client: LedgerClient, applicationId: ApplicationId, ) { import ClientUtil._ private val ledgerId = client.ledgerId private val commandClient = client.commandClient private val transactionClient = client.transactionClient def ledgerEnd(implicit ec: ExecutionContext): Future[LedgerOffset] = transactionClient.getLedgerEnd().flatMap(response => toFuture(response.offset)) def submitCommand[T]( sender: P.Party, workflowId: WorkflowId, command: P.Update[P.ContractId[T]]): Future[Empty] = { commandClient.submitSingleCommand(submitRequest(sender, workflowId, command)) } def submitRequest[T]( party: P.Party, workflowId: WorkflowId, seq: P.Update[P.ContractId[T]]*): SubmitRequest = { val commands = Commands( ledgerId = ledgerId.unwrap, workflowId = WorkflowId.unwrap(workflowId), applicationId = ApplicationId.unwrap(applicationId), commandId = uniqueId, party = P.Party.unwrap(party), commands = seq.map(_.command) ) SubmitRequest(Some(commands), None) } def nextTransaction(party: P.Party, offset: LedgerOffset)( implicit mat: Materializer): Future[Transaction] = transactionClient .getTransactions(offset, None, transactionFilter(party)) .take(1L) .runWith(Sink.head) def subscribe(party: P.Party, offset: LedgerOffset, max: Option[Long])(f: Transaction => Unit)( implicit mat: Materializer): Future[Done] = { val source: Source[Transaction, NotUsed] = transactionClient.getTransactions(offset, None, transactionFilter(party)) max.fold(source)(n => source.take(n)) runForeach f } override lazy val toString: String = s"ClientUtil{ledgerId=$ledgerId}" } object ClientUtil { def transactionFilter(ps: P.Party*): TransactionFilter = TransactionFilter(P.Party.unsubst(ps).map((_, Filters.defaultInstance)).toMap) def uniqueId: String = UUID.randomUUID.toString def workflowIdFromParty(p: P.Party): WorkflowId = WorkflowId(s"${P.Party.unwrap(p): String} Workflow") }
Example 71
Source File: ClientUtil.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.quickstart.iou import java.util.UUID import akka.stream.Materializer import akka.stream.scaladsl.{Sink, Source} import akka.{Done, NotUsed} import com.daml.ledger.api.refinements.ApiTypes.{ApplicationId, WorkflowId} import com.daml.ledger.api.v1.command_submission_service.SubmitRequest import com.daml.ledger.api.v1.commands.Commands import com.daml.ledger.api.v1.ledger_offset.LedgerOffset import com.daml.ledger.api.v1.transaction.Transaction import com.daml.ledger.api.v1.transaction_filter.{Filters, TransactionFilter} import com.daml.ledger.client.LedgerClient import com.daml.ledger.client.binding.{Primitive => P} import com.daml.quickstart.iou.FutureUtil.toFuture import com.google.protobuf.empty.Empty import scalaz.syntax.tag._ import scala.concurrent.{ExecutionContext, Future} class ClientUtil( client: LedgerClient, applicationId: ApplicationId, ) { import ClientUtil._ private val ledgerId = client.ledgerId private val commandClient = client.commandClient private val transactionClient = client.transactionClient def ledgerEnd(implicit ec: ExecutionContext): Future[LedgerOffset] = transactionClient.getLedgerEnd().flatMap(response => toFuture(response.offset)) def submitCommand[T]( sender: P.Party, workflowId: WorkflowId, command: P.Update[P.ContractId[T]]): Future[Empty] = { commandClient.submitSingleCommand(submitRequest(sender, workflowId, command)) } def submitRequest[T]( party: P.Party, workflowId: WorkflowId, seq: P.Update[P.ContractId[T]]*): SubmitRequest = { val commands = Commands( ledgerId = ledgerId.unwrap, workflowId = WorkflowId.unwrap(workflowId), applicationId = ApplicationId.unwrap(applicationId), commandId = uniqueId, party = P.Party.unwrap(party), commands = seq.map(_.command) ) SubmitRequest(Some(commands), None) } def nextTransaction(party: P.Party, offset: LedgerOffset)( implicit mat: Materializer): Future[Transaction] = transactionClient .getTransactions(offset, None, transactionFilter(party)) .take(1L) .runWith(Sink.head) def subscribe(party: P.Party, offset: LedgerOffset, max: Option[Long])(f: Transaction => Unit)( implicit mat: Materializer): Future[Done] = { val source: Source[Transaction, NotUsed] = transactionClient.getTransactions(offset, None, transactionFilter(party)) max.fold(source)(n => source.take(n)) runForeach f } override lazy val toString: String = s"ClientUtil{ledgerId=$ledgerId}" } object ClientUtil { def transactionFilter(ps: P.Party*): TransactionFilter = TransactionFilter(P.Party.unsubst(ps).map((_, Filters.defaultInstance)).toMap) def uniqueId: String = UUID.randomUUID.toString def workflowIdFromParty(p: P.Party): WorkflowId = WorkflowId(s"${P.Party.unwrap(p): String} Workflow") }
Example 72
Source File: InMemoryState.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.on.memory import java.util.concurrent.locks.StampedLock import com.daml.ledger.on.memory.InMemoryState._ import com.daml.ledger.participant.state.kvutils.Bytes import com.daml.ledger.participant.state.kvutils.api.LedgerRecord import com.daml.ledger.participant.state.v1.Offset import com.google.protobuf.ByteString import scala.collection.mutable import scala.concurrent.{ExecutionContext, Future, blocking} private[memory] class InMemoryState private (log: MutableLog, state: MutableState) { private val lockCurrentState = new StampedLock() @volatile private var lastLogEntryIndex = 0 def readLog[A](action: ImmutableLog => A): A = action(log) // `log` is mutable, but the interface is immutable def newHeadSinceLastWrite(): Int = lastLogEntryIndex def write[A](action: (MutableLog, MutableState) => Future[A])( implicit executionContext: ExecutionContext ): Future[A] = for { stamp <- Future { blocking { lockCurrentState.writeLock() } } result <- action(log, state) .andThen { case _ => lastLogEntryIndex = log.size - 1 lockCurrentState.unlock(stamp) } } yield result } object InMemoryState { type ImmutableLog = IndexedSeq[LedgerRecord] type ImmutableState = collection.Map[StateKey, StateValue] type MutableLog = mutable.Buffer[LedgerRecord] with ImmutableLog type MutableState = mutable.Map[StateKey, StateValue] with ImmutableState type StateKey = Bytes type StateValue = Bytes // The first element will never be read because begin offsets are exclusive. private val Beginning = LedgerRecord(Offset.beforeBegin, ByteString.EMPTY, ByteString.EMPTY) def empty = new InMemoryState( log = mutable.ArrayBuffer(Beginning), state = mutable.Map.empty, ) }
Example 73
Source File: InMemoryLedgerStateOperations.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.on.memory import com.daml.ledger.on.memory.InMemoryState.MutableLog import com.daml.ledger.participant.state.kvutils.KVOffset import com.daml.ledger.participant.state.kvutils.api.LedgerRecord import com.daml.ledger.participant.state.v1.Offset import com.daml.ledger.validator.BatchingLedgerStateOperations import com.daml.ledger.validator.LedgerStateOperations.{Key, Value} import scala.collection.mutable import scala.concurrent.{ExecutionContext, Future} private[memory] final class InMemoryLedgerStateOperations( log: InMemoryState.MutableLog, state: InMemoryState.MutableState, )(implicit executionContext: ExecutionContext) extends BatchingLedgerStateOperations[Index] { import InMemoryLedgerStateOperations.appendEntry override def readState(keys: Seq[Key]): Future[Seq[Option[Value]]] = Future.successful(keys.map(state.get)) override def writeState(keyValuePairs: Seq[(Key, Value)]): Future[Unit] = { state ++= keyValuePairs Future.unit } override def appendToLog(key: Key, value: Value): Future[Index] = Future.successful(appendEntry(log, LedgerRecord(_, key, value))) } object InMemoryLedgerStateOperations { def apply()(implicit executionContext: ExecutionContext): InMemoryLedgerStateOperations = { val inMemoryState = mutable.Map.empty[Key, Value] val inMemoryLog = mutable.ArrayBuffer[LedgerRecord]() new InMemoryLedgerStateOperations(inMemoryLog, inMemoryState) } private[memory] def appendEntry(log: MutableLog, createEntry: Offset => LedgerRecord): Index = { val entryAtIndex = log.size val offset = KVOffset.fromLong(entryAtIndex.toLong) val entry = createEntry(offset) log += entry entryAtIndex } }
Example 74
Source File: InMemoryLedgerStateAccess.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.on.memory import com.daml.ledger.validator.{ LedgerStateAccess, LedgerStateOperations, TimedLedgerStateOperations } import com.daml.metrics.Metrics import scala.concurrent.{ExecutionContext, Future} private[memory] class InMemoryLedgerStateAccess(state: InMemoryState, metrics: Metrics)( implicit executionContext: ExecutionContext) extends LedgerStateAccess[Index] { override def inTransaction[T](body: LedgerStateOperations[Index] => Future[T]): Future[T] = state.write { (log, state) => body(new TimedLedgerStateOperations(new InMemoryLedgerStateOperations(log, state), metrics)) } }
Example 75
Source File: InMemoryLedgerReaderWriterSpec.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.on.memory import com.codahale.metrics.MetricRegistry import com.daml.ledger.api.testing.utils.AkkaBeforeAndAfterAll import com.daml.ledger.participant.state.kvutils.api.CommitMetadata import com.daml.ledger.participant.state.v1.{ParticipantId, SubmissionResult} import com.daml.ledger.validator.{BatchedValidatingCommitter, LedgerStateOperations} import com.daml.lf.data.Ref import com.daml.metrics.Metrics import com.daml.platform.akkastreams.dispatcher.Dispatcher import com.google.protobuf.ByteString import org.mockito.ArgumentMatchers._ import org.mockito.Mockito.{times, verify, when} import org.scalatest.mockito.MockitoSugar import org.scalatest.{AsyncWordSpec, Matchers} import scala.concurrent.{ExecutionContext, Future} class InMemoryLedgerReaderWriterSpec extends AsyncWordSpec with AkkaBeforeAndAfterAll with Matchers with MockitoSugar { "commit" should { "not signal new head in case of failure" in { val mockDispatcher = mock[Dispatcher[Index]] val mockCommitter = mock[BatchedValidatingCommitter[Index]] when( mockCommitter.commit( anyString(), any[ByteString](), any[ParticipantId](), any[LedgerStateOperations[Index]])(any[ExecutionContext]())) .thenReturn( Future.successful(SubmissionResult.InternalError("Validation failed with an exception"))) val instance = new InMemoryLedgerReaderWriter( Ref.ParticipantId.assertFromString("participant ID"), "ledger ID", mockDispatcher, InMemoryState.empty, mockCommitter, new Metrics(new MetricRegistry) ) instance .commit("correlation ID", ByteString.copyFromUtf8("some bytes"), CommitMetadata.Empty) .map { actual => verify(mockDispatcher, times(0)).signalNewHead(anyInt()) actual should be(a[SubmissionResult.InternalError]) } } } }
Example 76
Source File: StreamConsumer.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.testing import com.daml.dec.DirectExecutionContext import io.grpc.stub.StreamObserver import scala.concurrent.{ExecutionContext, Future} import scala.concurrent.duration.FiniteDuration final class StreamConsumer[A](attach: StreamObserver[A] => Unit) { def filterTake(p: A => Boolean)(n: Int): Future[Vector[A]] = if (n < 0) { Future.failed(new IllegalArgumentException(s"Bad argument $n, non-negative integer required")) } else if (n == 0) { Future.successful(Vector.empty[A]) } else { val observer = new SizeBoundObserver[A](n, p) attach(observer) observer.result } def take(n: Int): Future[Vector[A]] = filterTake(_ => true)(n) def find(p: A => Boolean): Future[Option[A]] = filterTake(p)(1).map(_.headOption)(DirectExecutionContext) def first(): Future[Option[A]] = find(_ => true) def within(duration: FiniteDuration)(implicit ec: ExecutionContext): Future[Vector[A]] = { val observer = new TimeBoundObserver[A](duration) attach(observer) observer.result } }
Example 77
Source File: SqlLedgerFactory.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.on.sql import java.time.Duration import akka.stream.Materializer import com.daml.caching import com.daml.ledger.participant.state.kvutils.api.KeyValueParticipantState import com.daml.ledger.participant.state.kvutils.app.{ Config, LedgerFactory, ParticipantConfig, ReadWriteService } import com.daml.ledger.participant.state.kvutils.caching._ import com.daml.ledger.participant.state.v1.SeedService import com.daml.lf.engine.Engine import com.daml.logging.LoggingContext import com.daml.platform.configuration.LedgerConfiguration import com.daml.resources.{Resource, ResourceOwner} import scopt.OptionParser import scala.concurrent.ExecutionContext object SqlLedgerFactory extends LedgerFactory[ReadWriteService, ExtraConfig] { override val defaultExtraConfig: ExtraConfig = ExtraConfig( jdbcUrl = None, ) override def ledgerConfig(config: Config[ExtraConfig]): LedgerConfiguration = super.ledgerConfig(config).copy(initialConfigurationSubmitDelay = Duration.ZERO) override def extraConfigParser(parser: OptionParser[Config[ExtraConfig]]): Unit = { parser .opt[String]("jdbc-url") .required() .text("The URL used to connect to the database.") .action((jdbcUrl, config) => config.copy(extra = config.extra.copy(jdbcUrl = Some(jdbcUrl)))) () } override def manipulateConfig(config: Config[ExtraConfig]): Config[ExtraConfig] = config.copy(participants = config.participants.map(_.copy(allowExistingSchemaForIndex = true))) override def readWriteServiceOwner( config: Config[ExtraConfig], participantConfig: ParticipantConfig, engine: Engine, )(implicit materializer: Materializer, logCtx: LoggingContext): ResourceOwner[ReadWriteService] = new Owner(config, participantConfig, engine) class Owner( config: Config[ExtraConfig], participantConfig: ParticipantConfig, engine: Engine, )(implicit materializer: Materializer, logCtx: LoggingContext) extends ResourceOwner[KeyValueParticipantState] { override def acquire()( implicit executionContext: ExecutionContext ): Resource[KeyValueParticipantState] = { val jdbcUrl = config.extra.jdbcUrl.getOrElse { throw new IllegalStateException("No JDBC URL provided.") } val metrics = createMetrics(participantConfig, config) new SqlLedgerReaderWriter.Owner( config.ledgerId, participantConfig.participantId, metrics = metrics, engine, jdbcUrl, stateValueCache = caching.WeightedCache.from( configuration = config.stateValueCache, metrics = metrics.daml.kvutils.submission.validator.stateValueCache, ), seedService = SeedService(config.seeding), resetOnStartup = false ).acquire() .map(readerWriter => new KeyValueParticipantState(readerWriter, readerWriter, metrics)) } } }
Example 78
Source File: AuthorizationInterceptor.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.api.auth.interceptor import com.daml.ledger.api.auth.{AuthService, Claims} import com.daml.platform.server.api.validation.ErrorFactories.unauthenticated import io.grpc.{ Context, Contexts, Metadata, ServerCall, ServerCallHandler, ServerInterceptor, Status } import org.slf4j.{Logger, LoggerFactory} import scala.compat.java8.FutureConverters import scala.concurrent.ExecutionContext import scala.util.{Failure, Success, Try} final class AuthorizationInterceptor(protected val authService: AuthService, ec: ExecutionContext) extends ServerInterceptor { private val logger: Logger = LoggerFactory.getLogger(AuthorizationInterceptor.getClass) private val internalAuthenticationError = Status.INTERNAL.withDescription("Failed to get claims from request metadata") import AuthorizationInterceptor.contextKeyClaim override def interceptCall[ReqT, RespT]( call: ServerCall[ReqT, RespT], headers: Metadata, nextListener: ServerCallHandler[ReqT, RespT]): ServerCall.Listener[ReqT] = { // Note: Context uses ThreadLocal storage, we need to capture it outside of the async block below. // Contexts are immutable and safe to pass around. val prevCtx = Context.current // The method interceptCall() must return a Listener. // The target listener is created by calling `Contexts.interceptCall()`. // However, this is only done after we have asynchronously received the claims. // Therefore, we need to return a listener that buffers all messages until the target listener is available. new AsyncForwardingListener[ReqT] { FutureConverters .toScala(authService.decodeMetadata(headers)) .onComplete { case Failure(exception) => logger.warn(s"Failed to get claims from request metadata: ${exception.getMessage}") call.close(internalAuthenticationError, new Metadata()) new ServerCall.Listener[Nothing]() {} case Success(Claims.empty) => logger.debug(s"Auth metadata decoded into empty claims, returning UNAUTHENTICATED") call.close(Status.UNAUTHENTICATED, new Metadata()) new ServerCall.Listener[Nothing]() {} case Success(claims) => val nextCtx = prevCtx.withValue(contextKeyClaim, claims) // Contexts.interceptCall() creates a listener that wraps all methods of `nextListener` // such that `Context.current` returns `nextCtx`. val nextListenerWithContext = Contexts.interceptCall(nextCtx, call, headers, nextListener) setNextListener(nextListenerWithContext) nextListenerWithContext }(ec) } } } object AuthorizationInterceptor { private val contextKeyClaim = Context.key[Claims]("AuthServiceDecodedClaim") def extractClaimsFromContext(): Try[Claims] = Option(contextKeyClaim.get()).fold[Try[Claims]](Failure(unauthenticated()))(Success(_)) def apply(authService: AuthService, ec: ExecutionContext): AuthorizationInterceptor = new AuthorizationInterceptor(authService, ec) }
Example 79
Source File: LedgerFactories.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.sandbox.perf import java.io.File import com.daml.ledger.api.domain.LedgerId import com.daml.ledger.api.testing.utils.{OwnedResource, Resource} import com.daml.lf.archive.UniversalArchiveReader import com.daml.lf.data.Ref import com.daml.platform.common.LedgerIdMode import com.daml.platform.sandbox.SandboxServer import com.daml.platform.sandbox.config.SandboxConfig import com.daml.platform.sandbox.services.GrpcClientResource import com.daml.platform.services.time.TimeProviderType.Static import com.daml.ports.Port import com.daml.resources.ResourceOwner import com.daml.testing.postgresql.PostgresResource import scala.concurrent.ExecutionContext object LedgerFactories { private def getPackageIdOrThrow(file: File): Ref.PackageId = UniversalArchiveReader().readFile(file).map(_.all.head._1).get private def sandboxConfig(jdbcUrl: Option[String], darFiles: List[File]) = SandboxConfig.default.copy( port = Port.Dynamic, damlPackages = darFiles, ledgerIdMode = LedgerIdMode.Static(LedgerId(Ref.LedgerString.assertFromString("ledger-server"))), jdbcUrl = jdbcUrl, timeProviderType = Some(Static), ) val mem = "InMemory" val sql = "Postgres" def createSandboxResource(store: String, darFiles: List[File])( implicit executionContext: ExecutionContext ): Resource[LedgerContext] = new OwnedResource( for { jdbcUrl <- store match { case `mem` => ResourceOwner.successful(None) case `sql` => PostgresResource.owner().map(database => Some(database.url)) } server <- SandboxServer.owner(sandboxConfig(jdbcUrl, darFiles)) channel <- GrpcClientResource.owner(server.port) } yield new LedgerContext(channel, darFiles.map(getPackageIdOrThrow)) ) }
Example 80
Source File: InfiniteRetries.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.sandbox.perf import akka.actor.ActorSystem import scala.concurrent.duration.{DurationInt, FiniteDuration} import scala.concurrent.{ExecutionContext, Future, Promise} import scala.util.{Failure, Success} trait InfiniteRetries { protected def retry[T](action: => Future[T], delay: FiniteDuration = 10.millis)( implicit system: ActorSystem): Future[T] = { implicit val ec: ExecutionContext = system.dispatcher action.transformWith { case Success(v) => Future.successful(v) case Failure(t) => val p = Promise[T]() system.scheduler.scheduleOnce( delay, () => retry[T](action, delay).onComplete { case Success(s) => p.success(s) case Failure(throwable) => p.failure(throwable) } ) p.future } } }
Example 81
Source File: LedgerContext.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.sandbox.perf import akka.actor.ActorSystem import akka.pattern import com.daml.lf.data.Ref.PackageId import com.daml.ledger.api.domain import com.daml.ledger.api.v1.active_contracts_service.ActiveContractsServiceGrpc import com.daml.ledger.api.v1.active_contracts_service.ActiveContractsServiceGrpc.ActiveContractsServiceStub import com.daml.ledger.api.v1.command_service.CommandServiceGrpc import com.daml.ledger.api.v1.command_service.CommandServiceGrpc.CommandService import com.daml.ledger.api.v1.ledger_identity_service.LedgerIdentityServiceGrpc.LedgerIdentityServiceStub import com.daml.ledger.api.v1.ledger_identity_service.{ GetLedgerIdentityRequest, LedgerIdentityServiceGrpc } import com.daml.ledger.api.v1.testing.reset_service.ResetServiceGrpc.ResetService import com.daml.ledger.api.v1.testing.reset_service.{ResetRequest, ResetServiceGrpc} import io.grpc.{Channel, StatusRuntimeException} import org.slf4j.LoggerFactory import scalaz.syntax.tag._ import scala.concurrent.duration._ import scala.concurrent.{ExecutionContext, Future} final class LedgerContext(channel: Channel, packageIds: Iterable[PackageId])( implicit executionContext: ExecutionContext ) { private val logger = LoggerFactory.getLogger(this.getClass) val ledgerId: domain.LedgerId = domain.LedgerId( LedgerIdentityServiceGrpc .blockingStub(channel) .getLedgerIdentity(GetLedgerIdentityRequest()) .ledgerId) def reset()(implicit system: ActorSystem): Future[LedgerContext] = { def waitForNewLedger(retries: Int): Future[domain.LedgerId] = if (retries <= 0) Future.failed(new RuntimeException("waitForNewLedger: out of retries")) else { ledgerIdentityService .getLedgerIdentity(GetLedgerIdentityRequest()) .flatMap { resp => // TODO: compare with current Ledger ID and retry when not changed Future.successful(domain.LedgerId(resp.ledgerId)) } .recoverWith { case _: StatusRuntimeException => logger.debug( "waitForNewLedger: retrying identity request in 1 second. {} retries remain", retries - 1) pattern.after(1.seconds, system.scheduler)(waitForNewLedger(retries - 1)) case t: Throwable => logger.warn("waitForNewLedger: failed to reconnect!") throw t } } for { _ <- resetService.reset(ResetRequest(ledgerId.unwrap)) _ <- waitForNewLedger(10) } yield new LedgerContext(channel, packageIds) } def ledgerIdentityService: LedgerIdentityServiceStub = LedgerIdentityServiceGrpc.stub(channel) def commandService: CommandService = CommandServiceGrpc.stub(channel) def acsService: ActiveContractsServiceStub = ActiveContractsServiceGrpc.stub(channel) def resetService: ResetService = ResetServiceGrpc.stub(channel) }
Example 82
Source File: PackageManagementClient.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.client.services.admin import com.daml.ledger.api.v1.admin.package_management_service.PackageManagementServiceGrpc.PackageManagementServiceStub import com.daml.ledger.api.v1.admin.package_management_service.{ ListKnownPackagesRequest, PackageDetails, UploadDarFileRequest } import com.daml.ledger.client.LedgerClient import com.google.protobuf.ByteString import scala.concurrent.{ExecutionContext, Future} object PackageManagementClient { private val listKnownPackagesRequest = ListKnownPackagesRequest() } final class PackageManagementClient(service: PackageManagementServiceStub)( implicit ec: ExecutionContext) { def listKnownPackages(token: Option[String] = None): Future[Seq[PackageDetails]] = LedgerClient .stub(service, token) .listKnownPackages(PackageManagementClient.listKnownPackagesRequest) .map(_.packageDetails) def uploadDarFile(darFile: ByteString, token: Option[String] = None): Future[Unit] = LedgerClient .stub(service, token) .uploadDarFile(UploadDarFileRequest(darFile)) .map(_ => ()) }
Example 83
Source File: PartyManagementClient.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.client.services.admin import com.daml.lf.data.Ref import com.daml.lf.data.Ref.Party import com.daml.ledger.api.domain.{ParticipantId, PartyDetails} import com.daml.ledger.api.v1.admin.party_management_service.PartyManagementServiceGrpc.PartyManagementServiceStub import com.daml.ledger.api.v1.admin.party_management_service.{ AllocatePartyRequest, GetParticipantIdRequest, GetPartiesRequest, ListKnownPartiesRequest, PartyDetails => ApiPartyDetails } import com.daml.ledger.client.LedgerClient import scalaz.OneAnd import scala.concurrent.{ExecutionContext, Future} object PartyManagementClient { private def details(d: ApiPartyDetails): PartyDetails = PartyDetails( Party.assertFromString(d.party), if (d.displayName.isEmpty) None else Some(d.displayName), d.isLocal) private val getParticipantIdRequest = GetParticipantIdRequest() private val listKnownPartiesRequest = ListKnownPartiesRequest() private def getPartiesRequest(parties: OneAnd[Set, Ref.Party]) = { import scalaz.std.iterable._ import scalaz.syntax.foldable._ GetPartiesRequest(parties.toList) } } final class PartyManagementClient(service: PartyManagementServiceStub)( implicit ec: ExecutionContext) { def getParticipantId(token: Option[String] = None): Future[ParticipantId] = LedgerClient .stub(service, token) .getParticipantId(PartyManagementClient.getParticipantIdRequest) .map(r => ParticipantId(Ref.ParticipantId.assertFromString(r.participantId))) def listKnownParties(token: Option[String] = None): Future[List[PartyDetails]] = LedgerClient .stub(service, token) .listKnownParties(PartyManagementClient.listKnownPartiesRequest) .map(_.partyDetails.map(PartyManagementClient.details)(collection.breakOut)) def getParties( parties: OneAnd[Set, Ref.Party], token: Option[String] = None): Future[List[PartyDetails]] = LedgerClient .stub(service, token) .getParties(PartyManagementClient.getPartiesRequest(parties)) .map(_.partyDetails.map(PartyManagementClient.details)(collection.breakOut)) def allocateParty( hint: Option[String], displayName: Option[String], token: Option[String] = None): Future[PartyDetails] = LedgerClient .stub(service, token) .allocateParty(new AllocatePartyRequest(hint.getOrElse(""), displayName.getOrElse(""))) .map(_.partyDetails.getOrElse(sys.error("No PartyDetails in response."))) .map(PartyManagementClient.details) }
Example 84
Source File: TransactionClient.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.client.services.transactions import akka.NotUsed import akka.stream.scaladsl.Source import com.daml.grpc.adapter.ExecutionSequencerFactory import com.daml.ledger.api.domain.LedgerId import com.daml.ledger.api.v1.ledger_offset.LedgerOffset import com.daml.ledger.api.v1.transaction.{Transaction, TransactionTree} import com.daml.ledger.api.v1.transaction_filter.TransactionFilter import com.daml.ledger.api.v1.transaction_service.TransactionServiceGrpc.TransactionServiceStub import com.daml.ledger.api.v1.transaction_service._ import com.daml.ledger.client.LedgerClient import scalaz.syntax.tag._ import scala.concurrent.{ExecutionContext, Future} final class TransactionClient(ledgerId: LedgerId, service: TransactionServiceStub)( implicit esf: ExecutionSequencerFactory) { def getTransactionTrees( start: LedgerOffset, end: Option[LedgerOffset], transactionFilter: TransactionFilter, verbose: Boolean = false, token: Option[String] = None ): Source[TransactionTree, NotUsed] = TransactionSource.trees( LedgerClient.stub(service, token).getTransactionTrees, GetTransactionsRequest(ledgerId.unwrap, Some(start), end, Some(transactionFilter), verbose)) def getTransactions( start: LedgerOffset, end: Option[LedgerOffset], transactionFilter: TransactionFilter, verbose: Boolean = false, token: Option[String] = None ): Source[Transaction, NotUsed] = TransactionSource.flat( LedgerClient.stub(service, token).getTransactions, GetTransactionsRequest(ledgerId.unwrap, Some(start), end, Some(transactionFilter), verbose)) def getTransactionById(transactionId: String, parties: Seq[String], token: Option[String] = None)( implicit ec: ExecutionContext): Future[GetTransactionResponse] = LedgerClient .stub(service, token) .getTransactionById(GetTransactionByIdRequest(ledgerId.unwrap, transactionId, parties)) def getTransactionByEventId(eventId: String, parties: Seq[String], token: Option[String] = None)( implicit ec: ExecutionContext): Future[GetTransactionResponse] = LedgerClient .stub(service, token) .getTransactionByEventId(GetTransactionByEventIdRequest(ledgerId.unwrap, eventId, parties)) def getFlatTransactionById( transactionId: String, parties: Seq[String], token: Option[String] = None)( implicit ec: ExecutionContext): Future[GetFlatTransactionResponse] = LedgerClient .stub(service, token) .getFlatTransactionById(GetTransactionByIdRequest(ledgerId.unwrap, transactionId, parties)) def getFlatTransactionByEventId( eventId: String, parties: Seq[String], token: Option[String] = None)( implicit ec: ExecutionContext): Future[GetFlatTransactionResponse] = LedgerClient .stub(service, token) .getFlatTransactionByEventId( GetTransactionByEventIdRequest(ledgerId.unwrap, eventId, parties)) def getLedgerEnd(token: Option[String] = None): Future[GetLedgerEndResponse] = LedgerClient.stub(service, token).getLedgerEnd(GetLedgerEndRequest(ledgerId.unwrap)) }
Example 85
Source File: LedgerIdentityClient.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.client.services.identity import com.daml.dec.DirectExecutionContext import com.daml.ledger.api.domain.LedgerId import com.daml.ledger.api.v1.ledger_identity_service.GetLedgerIdentityRequest import com.daml.ledger.api.v1.ledger_identity_service.LedgerIdentityServiceGrpc.LedgerIdentityServiceStub import com.daml.ledger.client.LedgerClient import com.daml.ledger.client.configuration.LedgerIdRequirement import scala.concurrent.{ExecutionContext, Future} final class LedgerIdentityClient(service: LedgerIdentityServiceStub) { def satisfies(ledgerIdRequirement: LedgerIdRequirement, token: Option[String] = None)( implicit ec: ExecutionContext): Future[LedgerId] = for { ledgerId <- getLedgerId(token) } yield { val requirement = ledgerIdRequirement require( requirement.isAccepted(ledgerId), s"Required Ledger ID ${requirement.optionalLedgerId.get} does not match received Ledger ID $ledgerId" ) LedgerId(ledgerId) } def getLedgerId(token: Option[String] = None): Future[String] = LedgerClient .stub(service, token) .getLedgerIdentity(new GetLedgerIdentityRequest()) .map(_.ledgerId)(DirectExecutionContext) }
Example 86
Source File: StaticTime.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.client.services.testing.time import java.time.Instant import java.util.concurrent.atomic.AtomicReference import akka.stream.scaladsl.{Broadcast, Flow, GraphDSL, RunnableGraph, Sink} import akka.stream.{ClosedShape, KillSwitches, Materializer, UniqueKillSwitch} import com.daml.api.util.{TimeProvider, TimestampConversion} import com.daml.api.util.TimestampConversion._ import com.daml.grpc.adapter.ExecutionSequencerFactory import com.daml.grpc.adapter.client.akka.ClientAdapter import com.daml.dec.DirectExecutionContext import com.daml.ledger.api.v1.testing.time_service.{GetTimeRequest, SetTimeRequest} import com.daml.ledger.api.v1.testing.time_service.TimeServiceGrpc.{TimeService, TimeServiceStub} import com.daml.ledger.client.LedgerClient import scala.concurrent.{ExecutionContext, Future} class StaticTime( timeService: TimeService, clock: AtomicReference[Instant], killSwitch: UniqueKillSwitch, ledgerId: String) extends TimeProvider with AutoCloseable { def getCurrentTime: Instant = clock.get def timeRequest(instant: Instant) = SetTimeRequest( ledgerId, Some(TimestampConversion.fromInstant(getCurrentTime)), Some(TimestampConversion.fromInstant(instant))) def setTime(instant: Instant)(implicit ec: ExecutionContext): Future[Unit] = { timeService.setTime(timeRequest(instant)).map { _ => val _ = StaticTime.advanceClock(clock, instant) } } override def close(): Unit = killSwitch.shutdown() } object StaticTime { def advanceClock(clock: AtomicReference[Instant], instant: Instant): Instant = { clock.updateAndGet { case current if instant isAfter current => instant case current => current } } def updatedVia(timeService: TimeServiceStub, ledgerId: String, token: Option[String] = None)( implicit m: Materializer, esf: ExecutionSequencerFactory): Future[StaticTime] = { val clockRef = new AtomicReference[Instant](Instant.EPOCH) val killSwitchExternal = KillSwitches.single[Instant] val sinkExternal = Sink.head[Instant] RunnableGraph .fromGraph { GraphDSL.create(killSwitchExternal, sinkExternal) { case (killSwitch, futureOfFirstElem) => // We serve this in a future which completes when the first element has passed through. // Thus we make sure that the object we serve already received time data from the ledger. futureOfFirstElem.map(_ => new StaticTime(timeService, clockRef, killSwitch, ledgerId))( DirectExecutionContext) } { implicit b => (killSwitch, sinkHead) => import GraphDSL.Implicits._ val instantSource = b.add( ClientAdapter .serverStreaming( GetTimeRequest(ledgerId), LedgerClient.stub(timeService, token).getTime) .map(r => toInstant(r.getCurrentTime))) val updateClock = b.add(Flow[Instant].map { i => advanceClock(clockRef, i) i }) val broadcastTimes = b.add(Broadcast[Instant](2)) val ignore = b.add(Sink.ignore) // format: OFF instantSource ~> killSwitch ~> updateClock ~> broadcastTimes.in broadcastTimes.out(0) ~> sinkHead broadcastTimes.out(1) ~> ignore // format: ON ClosedShape } } .run() } }
Example 87
Source File: MetricsReporting.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.sandbox.metrics import java.time.Duration import java.util.concurrent.TimeUnit import com.codahale.metrics.Slf4jReporter.LoggingLevel import com.codahale.metrics.jmx.JmxReporter import com.codahale.metrics.{MetricRegistry, Reporter, Slf4jReporter} import com.daml.metrics.{JvmMetricSet, Metrics} import com.daml.platform.configuration.MetricsReporter import com.daml.resources.{Resource, ResourceOwner} import scala.concurrent.{ExecutionContext, Future} final class MetricsReporting( jmxDomain: String, extraMetricsReporter: Option[MetricsReporter], extraMetricsReportingInterval: Duration, ) extends ResourceOwner[Metrics] { def acquire()(implicit executionContext: ExecutionContext): Resource[Metrics] = { val registry = new MetricRegistry registry.registerAll(new JvmMetricSet) for { slf4JReporter <- acquire(newSlf4jReporter(registry)) _ <- acquire(newJmxReporter(registry)) .map(_.start()) _ <- extraMetricsReporter.fold(Resource.unit) { reporter => acquire(reporter.register(registry)) .map(_.start(extraMetricsReportingInterval.getSeconds, TimeUnit.SECONDS)) } // Trigger a report to the SLF4J logger on shutdown. _ <- Resource(Future.successful(slf4JReporter))(reporter => Future.successful(reporter.report())) } yield new Metrics(registry) } private def newJmxReporter(registry: MetricRegistry): JmxReporter = JmxReporter .forRegistry(registry) .inDomain(jmxDomain) .build() private def newSlf4jReporter(registry: MetricRegistry): Slf4jReporter = Slf4jReporter .forRegistry(registry) .convertRatesTo(TimeUnit.SECONDS) .convertDurationsTo(TimeUnit.MILLISECONDS) .withLoggingLevel(LoggingLevel.DEBUG) .build() private def acquire[T <: Reporter](reporter: => T)( implicit executionContext: ExecutionContext ): Resource[T] = ResourceOwner .forCloseable(() => reporter) .acquire() }
Example 88
Source File: LedgerApiServer.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.apiserver import akka.actor.ActorSystem import akka.stream.Materializer import com.daml.logging.{ContextualizedLogger, LoggingContext} import com.daml.metrics.Metrics import com.daml.ports.Port import com.daml.resources.{Resource, ResourceOwner} import io.grpc.ServerInterceptor import io.netty.handler.ssl.SslContext import scala.concurrent.{ExecutionContext, Future, Promise} final class LedgerApiServer( apiServicesOwner: ResourceOwner[ApiServices], desiredPort: Port, maxInboundMessageSize: Int, address: Option[String], sslContext: Option[SslContext] = None, interceptors: List[ServerInterceptor] = List.empty, metrics: Metrics, )(implicit actorSystem: ActorSystem, materializer: Materializer, logCtx: LoggingContext) extends ResourceOwner[ApiServer] { private val logger = ContextualizedLogger.get(this.getClass) override def acquire()(implicit executionContext: ExecutionContext): Resource[ApiServer] = { val servicesClosedPromise = Promise[Unit]() for { eventLoopGroups <- new ServerEventLoopGroups.Owner( actorSystem.name, workerParallelism = sys.runtime.availableProcessors(), bossParallelism = 1, ).acquire() apiServicesResource = apiServicesOwner.acquire() apiServices <- apiServicesResource server <- new GrpcServerOwner( address, desiredPort, maxInboundMessageSize, sslContext, interceptors, metrics, eventLoopGroups, apiServices.services, ).acquire() // Notify the caller that the services have been closed, so a reset request can complete // without blocking on the server terminating. _ <- Resource(Future.successful(()))(_ => apiServicesResource.release().map(_ => servicesClosedPromise.success(()))) } yield { val host = address.getOrElse("localhost") val actualPort = server.getPort val transportMedium = if (sslContext.isDefined) "TLS" else "plain text" logger.info(s"Listening on $host:$actualPort over $transportMedium.") new ApiServer { override val port: Port = Port(server.getPort) override def servicesClosed(): Future[Unit] = servicesClosedPromise.future } } } }
Example 89
Source File: EventLoopGroupOwner.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.apiserver import java.util.UUID import java.util.concurrent.TimeUnit.MILLISECONDS import com.daml.resources.{Resource, ResourceOwner} import io.netty.channel.nio.NioEventLoopGroup import io.netty.channel.socket.nio.{NioServerSocketChannel, NioSocketChannel} import io.netty.channel.{Channel, EventLoopGroup, ServerChannel} import io.netty.util.concurrent.DefaultThreadFactory import scala.concurrent.{ExecutionContext, Future, Promise} import scala.util.Try final class EventLoopGroupOwner(threadPoolName: String, parallelism: Int) extends ResourceOwner[EventLoopGroup] { override def acquire()(implicit executionContext: ExecutionContext): Resource[EventLoopGroup] = Resource( Future(new NioEventLoopGroup( parallelism, new DefaultThreadFactory(s"$threadPoolName-grpc-event-loop-${UUID.randomUUID()}", true))))( group => { val promise = Promise[Unit]() val future = group.shutdownGracefully(0, 0, MILLISECONDS) future.addListener((f: io.netty.util.concurrent.Future[_]) => promise.complete(Try(f.get).map(_ => ()))) promise.future } ) } object EventLoopGroupOwner { val clientChannelType: Class[_ <: Channel] = classOf[NioSocketChannel] val serverChannelType: Class[_ <: ServerChannel] = classOf[NioServerSocketChannel] }
Example 90
Source File: TimedCommandExecutor.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.apiserver.execution import com.daml.ledger.api.domain import com.daml.lf.crypto import com.daml.logging.LoggingContext import com.daml.metrics.{Metrics, Timed} import com.daml.platform.store.ErrorCause import scala.concurrent.{ExecutionContext, Future} class TimedCommandExecutor( delegate: CommandExecutor, metrics: Metrics, ) extends CommandExecutor { override def execute( commands: domain.Commands, submissionSeed: crypto.Hash, )( implicit ec: ExecutionContext, logCtx: LoggingContext, ): Future[Either[ErrorCause, CommandExecutionResult]] = Timed.future(metrics.daml.execution.total, delegate.execute(commands, submissionSeed)) }
Example 91
Source File: CommandExecutor.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.apiserver.execution import com.daml.ledger.api.domain.{Commands => ApiCommands} import com.daml.lf.crypto import com.daml.logging.LoggingContext import com.daml.platform.store.ErrorCause import scala.concurrent.{ExecutionContext, Future} trait CommandExecutor { def execute( commands: ApiCommands, submissionSeed: crypto.Hash, )( implicit ec: ExecutionContext, logCtx: LoggingContext, ): Future[Either[ErrorCause, CommandExecutionResult]] }
Example 92
Source File: ServerEventLoopGroups.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.apiserver import com.daml.resources.{Resource, ResourceOwner} import io.grpc.netty.NettyServerBuilder import io.netty.channel.{EventLoopGroup, ServerChannel} import scala.concurrent.ExecutionContext case class ServerEventLoopGroups( worker: EventLoopGroup, boss: EventLoopGroup, channelType: Class[_ <: ServerChannel], ) { def populate(builder: NettyServerBuilder): NettyServerBuilder = builder .channelType(channelType) .bossEventLoopGroup(boss) .workerEventLoopGroup(worker) } object ServerEventLoopGroups { final class Owner(name: String, workerParallelism: Int, bossParallelism: Int) extends ResourceOwner[ServerEventLoopGroups] { override def acquire()( implicit executionContext: ExecutionContext ): Resource[ServerEventLoopGroups] = Resource .sequence( Seq( new EventLoopGroupOwner(s"$name-worker", parallelism = workerParallelism).acquire(), new EventLoopGroupOwner(s"$name-boss", parallelism = bossParallelism).acquire(), )) .map { case Seq(worker, boss) => ServerEventLoopGroups( worker = worker, boss = boss, channelType = EventLoopGroupOwner.serverChannelType, ) } } }
Example 93
Source File: ApiActiveContractsService.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.apiserver.services import akka.NotUsed import akka.stream.Materializer import akka.stream.scaladsl.Source import com.daml.ledger.participant.state.index.v2.{IndexActiveContractsService => ACSBackend} import com.daml.dec.DirectExecutionContext import com.daml.grpc.adapter.ExecutionSequencerFactory import com.daml.ledger.api.domain.LedgerId import com.daml.ledger.api.v1.active_contracts_service.ActiveContractsServiceGrpc.ActiveContractsService import com.daml.ledger.api.v1.active_contracts_service._ import com.daml.ledger.api.validation.TransactionFilterValidator import com.daml.logging.{ContextualizedLogger, LoggingContext} import com.daml.platform.api.grpc.GrpcApiService import com.daml.platform.server.api.validation.ActiveContractsServiceValidation import io.grpc.{BindableService, ServerServiceDefinition} import scala.concurrent.ExecutionContext final class ApiActiveContractsService private ( backend: ACSBackend, )( implicit executionContext: ExecutionContext, protected val mat: Materializer, protected val esf: ExecutionSequencerFactory, logCtx: LoggingContext, ) extends ActiveContractsServiceAkkaGrpc with GrpcApiService { private val logger = ContextualizedLogger.get(this.getClass) override protected def getActiveContractsSource( request: GetActiveContractsRequest): Source[GetActiveContractsResponse, NotUsed] = { logger.trace("Serving an Active Contracts request...") TransactionFilterValidator .validate(request.getFilter, "filter") .fold(Source.failed, backend.getActiveContracts(_, request.verbose)) .via(logger.logErrorsOnStream) } override def bindService(): ServerServiceDefinition = ActiveContractsServiceGrpc.bindService(this, DirectExecutionContext) } object ApiActiveContractsService { def create(ledgerId: LedgerId, backend: ACSBackend)( implicit ec: ExecutionContext, mat: Materializer, esf: ExecutionSequencerFactory, logCtx: LoggingContext): ActiveContractsService with GrpcApiService = new ActiveContractsServiceValidation(new ApiActiveContractsService(backend), ledgerId) with BindableService { override def bindService(): ServerServiceDefinition = ActiveContractsServiceGrpc.bindService(this, DirectExecutionContext) } }
Example 94
Source File: ApiCommandCompletionService.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.apiserver.services import java.util.concurrent.atomic.AtomicLong import akka.NotUsed import akka.stream.Materializer import akka.stream.scaladsl.Source import com.daml.ledger.participant.state.index.v2.IndexCompletionsService import com.daml.dec.DirectExecutionContext import com.daml.grpc.adapter.ExecutionSequencerFactory import com.daml.ledger.api.domain import com.daml.ledger.api.domain.{LedgerId, LedgerOffset} import com.daml.ledger.api.messages.command.completion.CompletionStreamRequest import com.daml.ledger.api.v1.command_completion_service._ import com.daml.ledger.api.validation.PartyNameChecker import com.daml.logging.LoggingContext.withEnrichedLoggingContext import com.daml.logging.{ContextualizedLogger, LoggingContext} import com.daml.platform.api.grpc.GrpcApiService import com.daml.platform.server.api.services.domain.CommandCompletionService import com.daml.platform.server.api.services.grpc.GrpcCommandCompletionService import io.grpc.ServerServiceDefinition import scala.concurrent.{ExecutionContext, Future} final class ApiCommandCompletionService private (completionsService: IndexCompletionsService)( implicit ec: ExecutionContext, protected val mat: Materializer, protected val esf: ExecutionSequencerFactory, logCtx: LoggingContext) extends CommandCompletionService { private val logger = ContextualizedLogger.get(this.getClass) private val subscriptionIdCounter = new AtomicLong() override def completionStreamSource( request: CompletionStreamRequest): Source[CompletionStreamResponse, NotUsed] = withEnrichedLoggingContext(logging.parties(request.parties), logging.offset(request.offset)) { implicit logCtx => val subscriptionId = subscriptionIdCounter.getAndIncrement().toString logger.debug(s"Received request for completion subscription $subscriptionId: $request") val offset = request.offset.getOrElse(LedgerOffset.LedgerEnd) completionsService .getCompletions(offset, request.applicationId, request.parties) .via(logger.logErrorsOnStream) } override def getLedgerEnd(ledgerId: domain.LedgerId): Future[LedgerOffset.Absolute] = completionsService.currentLedgerEnd().andThen(logger.logErrorsOnCall[LedgerOffset.Absolute]) } object ApiCommandCompletionService { def create(ledgerId: LedgerId, completionsService: IndexCompletionsService)( implicit ec: ExecutionContext, mat: Materializer, esf: ExecutionSequencerFactory, logCtx: LoggingContext): GrpcCommandCompletionService with GrpcApiService = { val impl: CommandCompletionService = new ApiCommandCompletionService(completionsService) new GrpcCommandCompletionService(ledgerId, impl, PartyNameChecker.AllowAllParties) with GrpcApiService { override def bindService(): ServerServiceDefinition = CommandCompletionServiceGrpc.bindService(this, DirectExecutionContext) } } }
Example 95
Source File: TrackerMap.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.apiserver.services.tracking import java.util.concurrent.atomic.AtomicReference import com.daml.dec.DirectExecutionContext import com.daml.ledger.api.v1.command_service.SubmitAndWaitRequest import com.daml.ledger.api.v1.completion.Completion import com.daml.logging.{ContextualizedLogger, LoggingContext} import org.slf4j.LoggerFactory import scala.collection.immutable.HashMap import scala.concurrent.duration.{FiniteDuration, _} import scala.concurrent.{ExecutionContext, Future} import scala.util.{Failure, Success} final class AsyncResource[T <: AutoCloseable](future: Future[T]) { private val logger = LoggerFactory.getLogger(this.getClass) // Must progress Waiting => Ready => Closed or Waiting => Closed. val state: AtomicReference[AsyncResourceState[T]] = new AtomicReference(Waiting) future.andThen({ case Success(t) => if (!state.compareAndSet(Waiting, Ready(t))) { // This is the punch line of AsyncResource. // If we've been closed in the meantime, we must close the underlying resource also. // This "on-failure-to-complete" behavior is not present in scala or java Futures. t.close() } // Someone should be listening to this failure downstream // TODO(mthvedt): Refactor so at least one downstream listener is always present, // and exceptions are never dropped. case Failure(ex) => logger.error("failure to get async resource", ex) state.set(Closed) })(DirectExecutionContext) def flatMap[U](f: T => Future[U])(implicit ex: ExecutionContext): Future[U] = { state.get() match { case Waiting => future.flatMap(f) case Closed => throw new IllegalStateException() case Ready(t) => f(t) } } def map[U](f: T => U)(implicit ex: ExecutionContext): Future[U] = flatMap(t => Future.successful(f(t))) def ifPresent[U](f: T => U): Option[U] = state.get() match { case Ready(t) => Some(f(t)) case _ => None } def close(): Unit = state.getAndSet(Closed) match { case Ready(t) => t.close() case _ => } } def apply(retentionPeriod: FiniteDuration)(implicit logCtx: LoggingContext): TrackerMap = new TrackerMap(retentionPeriod) }
Example 96
Source File: ApiLedgerConfigurationService.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.apiserver.services import akka.NotUsed import akka.stream.Materializer import akka.stream.scaladsl.Source import com.daml.ledger.participant.state.index.v2.IndexConfigurationService import com.daml.api.util.DurationConversion._ import com.daml.dec.DirectExecutionContext import com.daml.grpc.adapter.ExecutionSequencerFactory import com.daml.ledger.api.domain.LedgerId import com.daml.ledger.api.v1.ledger_configuration_service._ import com.daml.logging.{ContextualizedLogger, LoggingContext} import com.daml.platform.api.grpc.GrpcApiService import com.daml.platform.server.api.validation.LedgerConfigurationServiceValidation import io.grpc.{BindableService, ServerServiceDefinition} import scala.concurrent.ExecutionContext final class ApiLedgerConfigurationService private (configurationService: IndexConfigurationService)( implicit protected val esf: ExecutionSequencerFactory, protected val mat: Materializer, logCtx: LoggingContext) extends LedgerConfigurationServiceAkkaGrpc with GrpcApiService { private val logger = ContextualizedLogger.get(this.getClass) override protected def getLedgerConfigurationSource( request: GetLedgerConfigurationRequest): Source[GetLedgerConfigurationResponse, NotUsed] = configurationService .getLedgerConfiguration() .map( configuration => GetLedgerConfigurationResponse( Some(LedgerConfiguration( Some(toProto(configuration.maxDeduplicationTime)), )))) .via(logger.logErrorsOnStream) override def bindService(): ServerServiceDefinition = LedgerConfigurationServiceGrpc.bindService(this, DirectExecutionContext) } object ApiLedgerConfigurationService { def create(ledgerId: LedgerId, configurationService: IndexConfigurationService)( implicit ec: ExecutionContext, esf: ExecutionSequencerFactory, mat: Materializer, logCtx: LoggingContext) : LedgerConfigurationServiceGrpc.LedgerConfigurationService with GrpcApiService = new LedgerConfigurationServiceValidation( new ApiLedgerConfigurationService(configurationService), ledgerId) with BindableService { override def bindService(): ServerServiceDefinition = LedgerConfigurationServiceGrpc.bindService(this, DirectExecutionContext) } }
Example 97
Source File: ExecutionSequencerFactoryOwner.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.apiserver import java.util.UUID import akka.actor.ActorSystem import com.daml.grpc.adapter.{AkkaExecutionSequencerPool, ExecutionSequencerFactory} import com.daml.resources.{Resource, ResourceOwner} import scala.concurrent.{ExecutionContext, Future} final class ExecutionSequencerFactoryOwner(implicit actorSystem: ActorSystem) extends ResourceOwner[ExecutionSequencerFactory] { // NOTE: Pick a unique pool name as we want to allow multiple LedgerApiServer instances, // and it's pretty difficult to wait for the name to become available again. // The name deregistration is asynchronous and the close method does not wait, and it isn't // trivial to implement. // https://doc.akka.io/docs/akka/2.5/actors.html#graceful-stop private val poolName = s"ledger-api-server-rs-grpc-bridge-${UUID.randomUUID}" private val ActorCount = Runtime.getRuntime.availableProcessors() * 8 override def acquire()( implicit executionContext: ExecutionContext ): Resource[ExecutionSequencerFactory] = Resource(Future(new AkkaExecutionSequencerPool(poolName, ActorCount)))(_.closeAsync()) }
Example 98
Source File: StandaloneIndexerServer.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.indexer import akka.stream.Materializer import com.daml.ledger.participant.state.v1.ReadService import com.daml.logging.{ContextualizedLogger, LoggingContext} import com.daml.metrics.Metrics import com.daml.platform.configuration.ServerRole import com.daml.platform.store.dao.events.LfValueTranslation import com.daml.resources.{Resource, ResourceOwner} import scala.concurrent.ExecutionContext final class StandaloneIndexerServer( readService: ReadService, config: IndexerConfig, metrics: Metrics, lfValueTranslationCache: LfValueTranslation.Cache, )(implicit materializer: Materializer, logCtx: LoggingContext) extends ResourceOwner[Unit] { private val logger = ContextualizedLogger.get(this.getClass) override def acquire()(implicit executionContext: ExecutionContext): Resource[Unit] = { val indexerFactory = new JdbcIndexerFactory( ServerRole.Indexer, config, readService, metrics, lfValueTranslationCache, ) val indexer = new RecoveringIndexer(materializer.system.scheduler, config.restartDelay) config.startupMode match { case IndexerStartupMode.MigrateOnly => Resource.successful(()) case IndexerStartupMode.MigrateAndStart => Resource .fromFuture(indexerFactory.migrateSchema(config.allowExistingSchema)) .flatMap(startIndexer(indexer, _)) .map { _ => logger.debug("Waiting for the indexer to initialize the database.") } case IndexerStartupMode.ResetAndStart => Resource .fromFuture(indexerFactory.resetSchema()) .flatMap(startIndexer(indexer, _)) .map { _ => logger.debug("Waiting for the indexer to initialize the database.") } case IndexerStartupMode.ValidateAndStart => Resource .fromFuture(indexerFactory.validateSchema()) .flatMap(startIndexer(indexer, _)) .map { _ => logger.debug("Waiting for the indexer to initialize the database.") } } } private def startIndexer( indexer: RecoveringIndexer, initializedIndexerFactory: ResourceOwner[JdbcIndexer], )(implicit executionContext: ExecutionContext): Resource[Unit] = indexer .start(() => initializedIndexerFactory.flatMap(_.subscription(readService)).acquire()) .map(_ => ()) }
Example 99
Source File: FlywayMigrations.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.store import com.daml.logging.{ContextualizedLogger, LoggingContext} import com.daml.platform.configuration.ServerRole import com.daml.platform.store.FlywayMigrations._ import com.daml.platform.store.dao.HikariConnection import com.daml.resources.ResourceOwner import com.zaxxer.hikari.HikariDataSource import org.flywaydb.core.Flyway import org.flywaydb.core.api.MigrationVersion import org.flywaydb.core.api.configuration.FluentConfiguration import scala.concurrent.duration.DurationInt import scala.concurrent.{ExecutionContext, Future} class FlywayMigrations(jdbcUrl: String)(implicit logCtx: LoggingContext) { private val logger = ContextualizedLogger.get(this.getClass) private val dbType = DbType.jdbcType(jdbcUrl) def validate()(implicit executionContext: ExecutionContext): Future[Unit] = dataSource.use { ds => Future { val flyway = configurationBase(dbType).dataSource(ds).load() logger.info("Running Flyway validation...") flyway.validate() logger.info("Flyway schema validation finished successfully.") } } def migrate(allowExistingSchema: Boolean = false)( implicit executionContext: ExecutionContext ): Future[Unit] = dataSource.use { ds => Future { val flyway = configurationBase(dbType) .dataSource(ds) .baselineOnMigrate(allowExistingSchema) .baselineVersion(MigrationVersion.fromVersion("0")) .load() logger.info("Running Flyway migration...") val stepsTaken = flyway.migrate() logger.info(s"Flyway schema migration finished successfully, applying $stepsTaken steps.") } } def reset()(implicit executionContext: ExecutionContext): Future[Unit] = dataSource.use { ds => Future { val flyway = configurationBase(dbType).dataSource(ds).load() logger.info("Running Flyway clean...") flyway.clean() logger.info("Flyway schema clean finished successfully.") } } private def dataSource: ResourceOwner[HikariDataSource] = HikariConnection.owner( serverRole = ServerRole.IndexMigrations, jdbcUrl = jdbcUrl, minimumIdle = 2, maxPoolSize = 2, connectionTimeout = 250.millis, metrics = None, ) } object FlywayMigrations { def configurationBase(dbType: DbType): FluentConfiguration = Flyway.configure().locations("classpath:db/migration/" + dbType.name) }
Example 100
Source File: SandboxNextFixture.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.sandboxnext import com.daml.ledger.participant.state.v1.SeedService import com.daml.ledger.api.testing.utils.{OwnedResource, Resource, SuiteResource} import com.daml.platform.sandbox.AbstractSandboxFixture import com.daml.platform.sandbox.config.SandboxConfig import com.daml.platform.sandbox.services.GrpcClientResource import com.daml.ports.Port import com.daml.resources.ResourceOwner import io.grpc.Channel import org.scalatest.Suite import scala.concurrent.ExecutionContext import scala.concurrent.duration.DurationInt trait SandboxNextFixture extends AbstractSandboxFixture with SuiteResource[(Port, Channel)] { self: Suite => override protected def config: SandboxConfig = super.config.copy( seeding = Some(SeedService.Seeding.Weak), ) override protected def serverPort: Port = suiteResource.value._1 override protected def channel: Channel = suiteResource.value._2 override protected lazy val suiteResource: Resource[(Port, Channel)] = { implicit val ec: ExecutionContext = system.dispatcher new OwnedResource[(Port, Channel)]( for { jdbcUrl <- database .fold[ResourceOwner[Option[String]]](ResourceOwner.successful(None))(_.map(info => Some(info.jdbcUrl))) port <- new Runner(config.copy(jdbcUrl = jdbcUrl)) channel <- GrpcClientResource.owner(port) } yield (port, channel), acquisitionTimeout = 1.minute, releaseTimeout = 1.minute, ) } }
Example 101
Source File: LedgerResource.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.sandbox import akka.stream.Materializer import com.codahale.metrics.MetricRegistry import com.daml.api.util.TimeProvider import com.daml.ledger.api.domain.LedgerId import com.daml.ledger.api.testing.utils.{OwnedResource, Resource} import com.daml.ledger.participant.state.v1.ParticipantId import com.daml.lf.data.ImmArray import com.daml.lf.transaction.StandardTransactionCommitter import com.daml.logging.LoggingContext import com.daml.metrics.Metrics import com.daml.platform.common.LedgerIdMode import com.daml.platform.configuration.ServerRole import com.daml.platform.packages.InMemoryPackageStore import com.daml.platform.sandbox.stores.InMemoryActiveLedgerState import com.daml.platform.sandbox.stores.ledger.Ledger import com.daml.platform.sandbox.stores.ledger.ScenarioLoader.LedgerEntryOrBump import com.daml.platform.sandbox.stores.ledger.inmemory.InMemoryLedger import com.daml.platform.sandbox.stores.ledger.sql.{SqlLedger, SqlStartMode} import com.daml.platform.store.dao.events.LfValueTranslation import com.daml.resources.ResourceOwner import com.daml.testing.postgresql.PostgresResource import scala.concurrent.ExecutionContext object LedgerResource { def inMemory( ledgerId: LedgerId, participantId: ParticipantId, timeProvider: TimeProvider, acs: InMemoryActiveLedgerState = InMemoryActiveLedgerState.empty, packages: InMemoryPackageStore = InMemoryPackageStore.empty, entries: ImmArray[LedgerEntryOrBump] = ImmArray.empty, )(implicit executionContext: ExecutionContext): Resource[Ledger] = new OwnedResource( ResourceOwner.forValue(() => new InMemoryLedger( ledgerId = ledgerId, participantId = participantId, timeProvider = timeProvider, acs0 = acs, transactionCommitter = StandardTransactionCommitter, packageStoreInit = packages, ledgerEntries = entries, ))) def postgres( testClass: Class[_], ledgerId: LedgerId, participantId: ParticipantId, timeProvider: TimeProvider, metrics: MetricRegistry, packages: InMemoryPackageStore = InMemoryPackageStore.empty, )( implicit executionContext: ExecutionContext, materializer: Materializer, logCtx: LoggingContext, ): Resource[Ledger] = new OwnedResource( for { database <- PostgresResource.owner() ledger <- new SqlLedger.Owner( serverRole = ServerRole.Testing(testClass), jdbcUrl = database.url, initialLedgerId = LedgerIdMode.Static(ledgerId), participantId = participantId, timeProvider = timeProvider, acs = InMemoryActiveLedgerState.empty, packages = packages, initialLedgerEntries = ImmArray.empty, queueDepth = 128, transactionCommitter = StandardTransactionCommitter, startMode = SqlStartMode.AlwaysReset, eventsPageSize = 100, metrics = new Metrics(metrics), lfValueTranslationCache = LfValueTranslation.Cache.none, ) } yield ledger ) }
Example 102
Source File: SandboxFixture.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.sandbox.services import com.daml.ledger.api.testing.utils.{OwnedResource, Resource, SuiteResource} import com.daml.platform.sandbox.{AbstractSandboxFixture, SandboxServer} import com.daml.ports.Port import com.daml.resources.ResourceOwner import io.grpc.Channel import org.scalatest.Suite import scala.concurrent.ExecutionContext import scala.concurrent.duration.DurationInt trait SandboxFixture extends AbstractSandboxFixture with SuiteResource[(SandboxServer, Channel)] { self: Suite => protected def server: SandboxServer = suiteResource.value._1 override protected def serverPort: Port = server.port override protected def channel: Channel = suiteResource.value._2 override protected lazy val suiteResource: Resource[(SandboxServer, Channel)] = { implicit val ec: ExecutionContext = system.dispatcher new OwnedResource[(SandboxServer, Channel)]( for { jdbcUrl <- database .fold[ResourceOwner[Option[String]]](ResourceOwner.successful(None))(_.map(info => Some(info.jdbcUrl))) server <- SandboxServer.owner(config.copy(jdbcUrl = jdbcUrl)) channel <- GrpcClientResource.owner(server.port) } yield (server, channel), acquisitionTimeout = 1.minute, releaseTimeout = 1.minute, ) } }
Example 103
Source File: GrpcClientResource.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.sandbox.services import java.net.{InetAddress, InetSocketAddress} import java.util.concurrent.TimeUnit import com.daml.platform.apiserver.EventLoopGroupOwner import com.daml.ports.Port import com.daml.resources.{Resource, ResourceOwner} import io.grpc.Channel import io.grpc.netty.NettyChannelBuilder import io.netty.channel.EventLoopGroup import scala.concurrent.{ExecutionContext, Future} object GrpcClientResource { def owner(port: Port): ResourceOwner[Channel] = for { eventLoopGroup <- new EventLoopGroupOwner("api-client", sys.runtime.availableProcessors()) channel <- channelOwner(port, EventLoopGroupOwner.clientChannelType, eventLoopGroup) } yield channel private def channelOwner( port: Port, channelType: Class[_ <: io.netty.channel.Channel], eventLoopGroup: EventLoopGroup, ): ResourceOwner[Channel] = new ResourceOwner[Channel] { override def acquire()(implicit executionContext: ExecutionContext): Resource[Channel] = { Resource(Future { NettyChannelBuilder .forAddress(new InetSocketAddress(InetAddress.getLoopbackAddress, port.value)) .channelType(channelType) .eventLoopGroup(eventLoopGroup) .usePlaintext() .directExecutor() .build() })(channel => Future { channel.shutdownNow() if (!channel.awaitTermination(5, TimeUnit.SECONDS)) { sys.error( "Unable to shutdown channel to a remote API under tests. Unable to recover. Terminating.") } }) } } }
Example 104
Source File: JdbcLedgerDaoBackend.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.store.dao import com.codahale.metrics.MetricRegistry import com.daml.ledger.api.domain.LedgerId import com.daml.ledger.api.testing.utils.AkkaBeforeAndAfterAll import com.daml.logging.LoggingContext import com.daml.logging.LoggingContext.newLoggingContext import com.daml.metrics.Metrics import com.daml.platform.configuration.ServerRole import com.daml.platform.store.dao.events.LfValueTranslation import com.daml.platform.store.{DbType, FlywayMigrations} import com.daml.resources.{Resource, ResourceOwner} import org.scalatest.Suite import scala.concurrent.duration.DurationInt import scala.concurrent.{Await, ExecutionContext} private[dao] trait JdbcLedgerDaoBackend extends AkkaBeforeAndAfterAll { this: Suite => protected def dbType: DbType protected def jdbcUrl: String protected def daoOwner(implicit logCtx: LoggingContext): ResourceOwner[LedgerDao] = JdbcLedgerDao .writeOwner( serverRole = ServerRole.Testing(getClass), jdbcUrl = jdbcUrl, eventsPageSize = 100, metrics = new Metrics(new MetricRegistry), lfValueTranslationCache = LfValueTranslation.Cache.none, ) protected final var ledgerDao: LedgerDao = _ // `dbDispatcher` and `ledgerDao` depend on the `postgresFixture` which is in turn initialized `beforeAll` private var resource: Resource[LedgerDao] = _ override protected def beforeAll(): Unit = { super.beforeAll() implicit val executionContext: ExecutionContext = system.dispatcher resource = newLoggingContext { implicit logCtx => for { _ <- Resource.fromFuture(new FlywayMigrations(jdbcUrl).migrate()) dao <- daoOwner.acquire() _ <- Resource.fromFuture(dao.initializeLedger(LedgerId("test-ledger"))) } yield dao } ledgerDao = Await.result(resource.asFuture, 10.seconds) } override protected def afterAll(): Unit = { Await.result(resource.release(), 10.seconds) super.afterAll() } }
Example 105
Source File: GrpcHealthService.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.server.api.services.grpc import akka.NotUsed import akka.stream.Materializer import akka.stream.scaladsl.Source import com.daml.grpc.adapter.ExecutionSequencerFactory import com.daml.dec.DirectExecutionContext import com.daml.ledger.api.health.HealthChecks import com.daml.platform.api.grpc.GrpcApiService import com.daml.platform.server.api.DropRepeated import com.daml.platform.server.api.services.grpc.GrpcHealthService._ import io.grpc.health.v1.health.{ HealthAkkaGrpc, HealthCheckRequest, HealthCheckResponse, HealthGrpc } import io.grpc.{ServerServiceDefinition, Status, StatusException} import scala.concurrent.duration.{DurationInt, FiniteDuration} import scala.concurrent.{ExecutionContext, Future} import scala.util.{Failure, Success, Try} class GrpcHealthService( healthChecks: HealthChecks, maximumWatchFrequency: FiniteDuration = 1.second, )( implicit protected val esf: ExecutionSequencerFactory, protected val mat: Materializer, executionContext: ExecutionContext, ) extends HealthAkkaGrpc with GrpcApiService { override def bindService(): ServerServiceDefinition = HealthGrpc.bindService(this, DirectExecutionContext) override def check(request: HealthCheckRequest): Future[HealthCheckResponse] = Future.fromTry(matchResponse(serviceFrom(request))) override def watchSource(request: HealthCheckRequest): Source[HealthCheckResponse, NotUsed] = Source .fromIterator(() => Iterator.continually(matchResponse(serviceFrom(request)).get)) .throttle(1, per = maximumWatchFrequency) .via(DropRepeated()) private def matchResponse(componentName: Option[String]): Try[HealthCheckResponse] = if (!componentName.forall(healthChecks.hasComponent)) Failure(new StatusException(Status.NOT_FOUND)) else if (healthChecks.isHealthy(componentName)) Success(servingResponse) else Success(notServingResponse) } object GrpcHealthService { private[grpc] val servingResponse = HealthCheckResponse(HealthCheckResponse.ServingStatus.SERVING) private[grpc] val notServingResponse = HealthCheckResponse(HealthCheckResponse.ServingStatus.NOT_SERVING) private def serviceFrom(request: HealthCheckRequest): Option[String] = { Option(request.service).filter(_.nonEmpty) } }
Example 106
Source File: DropRepeatedSpec.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.server.api import akka.actor.ActorSystem import akka.pattern.pipe import akka.stream.Materializer import akka.stream.scaladsl.{Sink, Source} import akka.testkit.{TestKit, TestProbe} import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike} import scala.collection.immutable import scala.concurrent.ExecutionContext final class DropRepeatedSpec extends TestKit(ActorSystem(classOf[DropRepeatedSpec].getSimpleName)) with WordSpecLike with Matchers with BeforeAndAfterAll { private[this] implicit val materializer: Materializer = Materializer(system) private[this] implicit val executionContext: ExecutionContext = materializer.executionContext override def afterAll: Unit = { TestKit.shutdownActorSystem(system) } "DropRepeated" should { "drop repeated elements" in { val probe = TestProbe() val input = immutable.Seq(1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5) val _ = Source(input) .via(DropRepeated()) .runWith(Sink.seq) .pipeTo(probe.ref) .failed .foreach(fail(_)) probe.expectMsg(Vector(1, 2, 3, 4, 5)) } "does not drop duplicate elements that are not repeated" in { val probe = TestProbe() val input = immutable.Seq(1, 1, 2, 2, 1, 1, 2, 2) val _ = Source(input) .via(DropRepeated()) .runWith(Sink.seq) .pipeTo(probe.ref) .failed .foreach(fail(_)) probe.expectMsg(Vector(1, 2, 1, 2)) } } }
Example 107
Source File: LedgerTestSuite.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.api.testtool.infrastructure import com.daml.ledger.api.testtool.infrastructure.Allocation.{ParticipantAllocation, Participants} import com.daml.lf.data.Ref import scala.collection.mutable.ListBuffer import scala.concurrent.{ExecutionContext, Future} private[testtool] abstract class LedgerTestSuite { val name: String = getClass.getSimpleName private val testCaseBuffer: ListBuffer[LedgerTestCase] = ListBuffer() final lazy val tests: Vector[LedgerTestCase] = testCaseBuffer.toVector protected final def test( shortIdentifier: String, description: String, participants: ParticipantAllocation, timeoutScale: Double = 1.0, )(testCase: ExecutionContext => Participants => Future[Unit]): Unit = { val shortIdentifierRef = Ref.LedgerString.assertFromString(shortIdentifier) testCaseBuffer.append( new LedgerTestCase( this, shortIdentifierRef, description, timeoutScale, participants, testCase, ), ) } }
Example 108
Source File: Synchronize.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.api.testtool.infrastructure import com.daml.ledger.api.testtool.infrastructure.Eventually.eventually import com.daml.ledger.api.testtool.infrastructure.participant.ParticipantTestContext import com.daml.ledger.test.model.Test.AgreementFactory import com.daml.ledger.test.model.Test.AgreementFactory._ import scala.concurrent.{ExecutionContext, Future} object Synchronize { final def synchronize(alpha: ParticipantTestContext, beta: ParticipantTestContext)( implicit ec: ExecutionContext, ): Future[Unit] = { for { alice <- alpha.allocateParty() bob <- beta.allocateParty() _ <- alpha.waitForParties(Set(beta), Set(alice, bob)) _ <- beta.waitForParties(Set(alpha), Set(alice, bob)) factory <- alpha.create(alice, AgreementFactory(bob, alice)) agreement <- eventually { beta.exercise(bob, factory.exerciseCreateAgreement) } _ <- eventually { alpha.transactionTreeById(agreement.transactionId, alice) } } yield { // Nothing to do, by flatmapping over this we know // the two participants are synchronized up to the // point before invoking this method } } }
Example 109
Source File: Eventually.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.api.testtool.infrastructure import com.daml.timer.RetryStrategy import scala.concurrent.duration.{Duration, DurationInt} import scala.concurrent.{ExecutionContext, Future} object Eventually { def eventually[A]( runAssertion: => Future[A], attempts: Int = 10, firstWaitTime: Duration = 10.millis, )(implicit ec: ExecutionContext): Future[A] = RetryStrategy.exponentialBackoff(attempts, firstWaitTime) { (_, _) => runAssertion } }
Example 110
Source File: SidechainTransactionActor.scala From Sidechains-SDK with MIT License | 5 votes |
package com.horizen.api.http import akka.actor.{Actor, ActorRef, ActorSystem, Props} import com.horizen.SidechainTypes import com.horizen.api.http.SidechainTransactionActor.ReceivableMessages.BroadcastTransaction import scorex.core.NodeViewHolder.ReceivableMessages.LocallyGeneratedTransaction import scorex.core.network.NodeViewSynchronizer.ReceivableMessages.{FailedTransaction, SuccessfulTransaction} import scorex.util.{ModifierId, ScorexLogging} import scala.collection.concurrent.TrieMap import scala.concurrent.{ExecutionContext, Promise} class SidechainTransactionActor[T <: SidechainTypes#SCBT](sidechainNodeViewHolderRef: ActorRef)(implicit ec: ExecutionContext) extends Actor with ScorexLogging { private var transactionMap : TrieMap[String, Promise[ModifierId]] = TrieMap() override def preStart(): Unit = { context.system.eventStream.subscribe(self, classOf[SuccessfulTransaction[T]]) context.system.eventStream.subscribe(self, classOf[FailedTransaction]) } protected def broadcastTransaction: Receive = { case BroadcastTransaction(transaction) => val promise = Promise[ModifierId] val future = promise.future transactionMap(transaction.id) = promise sender() ! future sidechainNodeViewHolderRef ! LocallyGeneratedTransaction[SidechainTypes#SCBT](transaction) } protected def sidechainNodeViewHolderEvents: Receive = { case SuccessfulTransaction(transaction) => transactionMap.remove(transaction.id) match { case Some(promise) => promise.success(transaction.id) case None => } case FailedTransaction(transactionId, throwable, _) => transactionMap.remove(transactionId) match { case Some(promise) => promise.failure(throwable) case None => } } override def receive: Receive = { broadcastTransaction orElse sidechainNodeViewHolderEvents orElse { case message: Any => log.error("SidechainTransactionActor received strange message: " + message) } } } object SidechainTransactionActor { object ReceivableMessages { case class BroadcastTransaction[T <: SidechainTypes#SCBT](transaction: T) } } object SidechainTransactionActorRef { def props(sidechainNodeViewHolderRef: ActorRef) (implicit ec: ExecutionContext): Props = Props(new SidechainTransactionActor(sidechainNodeViewHolderRef)) def apply(sidechainNodeViewHolderRef: ActorRef) (implicit system: ActorSystem, ec: ExecutionContext): ActorRef = system.actorOf(props(sidechainNodeViewHolderRef)) }
Example 111
Source File: BoxStoreTest.scala From fs2-blobstore with Apache License 2.0 | 5 votes |
package blobstore.box import java.util.concurrent.Executors import blobstore.Path import cats.effect.{Blocker, ContextShift, IO} import com.box.sdk.BoxAPIConnection import org.scalatest.matchers.must.Matchers import org.scalatest.flatspec.AnyFlatSpec import scala.concurrent.ExecutionContext class BoxStoreTest extends AnyFlatSpec with Matchers { implicit val cs: ContextShift[IO] = IO.contextShift(ExecutionContext.global) val blocker = Blocker.liftExecutionContext(ExecutionContext.fromExecutor(Executors.newCachedThreadPool)) "splitPath" should "correctly split a long path" in { val boxStore = new BoxStore[IO](new BoxAPIConnection(""), "", blocker) val testPath = Path("long/path/to/filename") val (pathToParentFolder, key) = boxStore.splitPath(testPath) pathToParentFolder must be("long" :: "path" :: "to" :: Nil) key must be("filename") } it should "split a single element path into a single element list and empty string key" in { val boxStore = new BoxStore[IO](new BoxAPIConnection(""), "", blocker) val testPath = Path("filename") val (pathToParentFolder, key) = boxStore.splitPath(testPath) pathToParentFolder must be("filename"::Nil) key must be("") } it should "split an empty path into empty list, empty string key" in { val boxStore = new BoxStore[IO](new BoxAPIConnection(""), "", blocker) val testPath = Path("") val (pathToParentFolder, key) = boxStore.splitPath(testPath) pathToParentFolder must be(""::Nil) key must be("") } }
Example 112
Source File: StoreOpsTest.scala From fs2-blobstore with Apache License 2.0 | 5 votes |
package blobstore import java.nio.charset.Charset import java.nio.file.Files import java.util.concurrent.Executors import cats.effect.{Blocker, IO} import cats.effect.laws.util.TestInstances import cats.implicits._ import fs2.Pipe import org.scalatest.Assertion import org.scalatest.flatspec.AnyFlatSpec import implicits._ import org.scalatest.matchers.must.Matchers import scala.collection.mutable.ArrayBuffer import scala.concurrent.ExecutionContext class StoreOpsTest extends AnyFlatSpec with Matchers with TestInstances { implicit val cs = IO.contextShift(ExecutionContext.global) val blocker = Blocker.liftExecutionContext(ExecutionContext.fromExecutor(Executors.newCachedThreadPool)) behavior of "PutOps" it should "buffer contents and compute size before calling Store.put" in { val bytes: Array[Byte] = "AAAAAAAAAA".getBytes(Charset.forName("utf-8")) val store = DummyStore(_.size must be(Some(bytes.length))) fs2.Stream.emits(bytes).covary[IO].through(store.bufferedPut(Path("path/to/file.txt"), blocker)).compile.drain.unsafeRunSync() store.buf.toArray must be(bytes) } it should "upload a file from a nio Path" in { val bytes = "hello".getBytes(Charset.forName("utf-8")) val store = DummyStore(_.size must be(Some(bytes.length))) fs2.Stream.bracket(IO(Files.createTempFile("test-file", ".bin"))) { p => IO(p.toFile.delete).void }.flatMap { p => fs2.Stream.emits(bytes).covary[IO].through(fs2.io.file.writeAll(p, blocker)).drain ++ fs2.Stream.eval(store.put(p, Path("path/to/file.txt"), blocker)) }.compile.drain.unsafeRunSync() store.buf.toArray must be(bytes) } } final case class DummyStore(check: Path => Assertion) extends Store[IO] { val buf = new ArrayBuffer[Byte]() override def put(path: Path): Pipe[IO, Byte, Unit] = { check(path) in => { buf.appendAll(in.compile.toVector.unsafeRunSync()) fs2.Stream.emit(()) } } override def list(path: Path): fs2.Stream[IO, Path] = ??? override def get(path: Path, chunkSize: Int): fs2.Stream[IO, Byte] = ??? override def move(src: Path, dst: Path): IO[Unit] = ??? override def copy(src: Path, dst: Path): IO[Unit] = ??? override def remove(path: Path): IO[Unit] = ??? }
Example 113
Source File: GlobalTimer.scala From EncryCore with GNU General Public License v3.0 | 5 votes |
package encry.it.util import io.netty.util.{HashedWheelTimer, Timer} import scala.concurrent.{ExecutionContext, Future, Promise} import scala.concurrent.duration.FiniteDuration import scala.util.control.NonFatal object GlobalTimer { val timer: Timer = new HashedWheelTimer() sys.addShutdownHook { timer.stop() } implicit class TimerExt(val timer: Timer) extends AnyVal { def schedule[A](f: => Future[A], delay: FiniteDuration): Future[A] = { val p = Promise[A] try { timer.newTimeout(_ => p.completeWith(f), delay.length, delay.unit) } catch { case NonFatal(e) => p.failure(e) } p.future } def sleep(term: FiniteDuration): Future[Unit] = schedule(Future.successful(()), term) def retryUntil[A](f: => Future[A], cond: A => Boolean, retryInterval: FiniteDuration)(implicit ec: ExecutionContext): Future[A] = f.flatMap(v => if (cond(v)) Future.successful(v) else schedule(retryUntil(f, cond, retryInterval), retryInterval)) } }
Example 114
Source File: ProcessStep.scala From process with Apache License 2.0 | 5 votes |
package processframework import scala.concurrent.duration.Duration import scala.concurrent.{ ExecutionContext, Future, Promise } import scala.reflect.ClassTag import akka.actor.{ Actor, ActorContext, ActorRef, Props } import akka.util.Timeout trait ProcessStep[S] { implicit def context: ActorContext private[processframework] val promise: Promise[Unit] = Promise[Unit]() type Execution = S ⇒ Unit type UpdateFunction = PartialFunction[Process.Event, S ⇒ S] type CommandToEvent = PartialFunction[Any, Process.Event] def execute()(implicit process: ActorRef): Execution def receiveCommand: CommandToEvent def updateState: UpdateFunction def retryInterval: Duration = Duration.Inf final def isCompleted = promise.isCompleted final def markDone(): Unit = promise.trySuccess(()) final def markDone(newState: S): S = { markDone() newState } private[processframework] def abort(): Unit = promise.tryFailure(new RuntimeException("Process aborted")) final def onComplete(completeFn: ((ActorContext, S)) ⇒ Unit)(implicit executionContext: ExecutionContext, process: ActorRef): Unit = promise.future.foreach { _ ⇒ process ! PersistentProcess.Perform(completeFn) } final def onCompleteAsync(completeFn: ⇒ Unit)(implicit executionContext: ExecutionContext): Unit = promise.future.foreach(_ ⇒ completeFn) final def ~>(next: ProcessStep[S]*)(implicit context: ActorContext): ProcessStep[S] = new Chain(this, next: _*) private[processframework] def run()(implicit process: ActorRef, executionContext: ExecutionContext, classTag: ClassTag[S]): Future[Unit] = runImpl private val innerActor = context.actorOf(Props(new Actor { def receive = { case msg if receiveCommand.isDefinedAt(msg) ⇒ val event = receiveCommand(msg) context.parent ! event } })) private[processframework] def handleUpdateState: UpdateFunction = if (isCompleted) PartialFunction.empty[Process.Event, S ⇒ S] else updateState private[processframework] def handleReceiveCommand: CommandToEvent = if (isCompleted) PartialFunction.empty[Any, Process.Event] else receiveCommand private[processframework] def executeWithPossibleRetry()(implicit process: ActorRef): Execution = { state ⇒ implicit val _ = context.dispatcher if (retryInterval.isFinite()) context.system.scheduler.scheduleOnce(Duration.fromNanos(retryInterval.toNanos)) { if (!isCompleted) executeWithPossibleRetry()(process)(state) } execute()(process)(state) } private[processframework] def runImpl()(implicit process: ActorRef, executionContext: ExecutionContext, classTag: ClassTag[S]): Future[Unit] = { import akka.pattern.ask import scala.concurrent.duration._ implicit val timeout: Timeout = 5 seconds if (!isCompleted) (process ? Process.GetState).mapTo[S].foreach(executeWithPossibleRetry()(innerActor)) promise.future } }
Example 115
Source File: Choice.scala From process with Apache License 2.0 | 5 votes |
package processframework import scala.concurrent.{ ExecutionContext, Future, Promise } import scala.reflect.ClassTag import akka.actor.{ ActorContext, ActorRef } class Choice[S](condition: S ⇒ Boolean, processIfTrue: ProcessStep[S], processIfFalse: ProcessStep[S])(implicit val context: ActorContext, classTag: ClassTag[S]) extends ProcessStep[S] { private[processframework] val truePromise: Promise[Unit] = Promise[Unit]() private[processframework] val falsePromise: Promise[Unit] = Promise[Unit]() var result = Option.empty[Boolean] override private[processframework] def abort(): Unit = { processIfTrue.abort() processIfFalse.abort() super.abort() } def receiveCommand: CommandToEvent = { if (truePromise.isCompleted) processIfTrue.receiveCommand else if (falsePromise.isCompleted) processIfFalse.receiveCommand else PartialFunction.empty } def updateState: UpdateFunction = { case event if processIfFalse.handleUpdateState.isDefinedAt(event) || processIfTrue.handleUpdateState.isDefinedAt(event) ⇒ result match { case Some(true) ⇒ truePromise.trySuccess(()) processIfTrue.updateState.apply(event) case Some(false) ⇒ falsePromise.trySuccess(()) processIfFalse.updateState.apply(event) case None ⇒ { state ⇒ result = Some(condition(state)) updateState.apply(event)(state) } } } override private[processframework] def runImpl()(implicit self: ActorRef, executionContext: ExecutionContext, classTag: ClassTag[S]): Future[Unit] = { val trueFlow = truePromise.future flatMap { _ ⇒ processIfTrue.run() } val falseFlow = falsePromise.future flatMap { _ ⇒ processIfFalse.run() } super.runImpl() Future.firstCompletedOf(List(trueFlow, falseFlow)) } def execute()(implicit process: ActorRef): Execution = { state ⇒ val choiceResult = condition(state) result = Some(choiceResult) if (choiceResult) truePromise.trySuccess(()) else falsePromise.trySuccess(()) } }
Example 116
Source File: Chain.scala From process with Apache License 2.0 | 5 votes |
package processframework import scala.concurrent.Future import scala.reflect.ClassTag import scala.concurrent.ExecutionContext import akka.actor.{ ActorContext, ActorRef } private[processframework] class Chain[S](a: ProcessStep[S], b: ProcessStep[S]*)(implicit val context: ActorContext) extends ProcessStep[S] { override private[processframework] def abort(): Unit = { a.abort() b.foreach(_.abort()) super.abort() } override private[processframework] def runImpl()(implicit self: ActorRef, executionContext: ExecutionContext, classTag: ClassTag[S]): Future[Unit] = { a.run() flatMap { _ ⇒ Future.sequence(b.map(_.run())).flatMap { _ ⇒ markDone() promise.future } } } def execute()(implicit process: ActorRef) = throw new UnsupportedOperationException("This is a chain. It does not execute by itself. Please invoke run.") def receiveCommand: CommandToEvent = a.handleReceiveCommand orElse b.foldRight(PartialFunction.empty[Any, Process.Event]) { case (x, y) ⇒ x.handleReceiveCommand orElse y } def updateState: UpdateFunction = a.handleUpdateState orElse b.foldRight(PartialFunction.empty[Process.Event, S ⇒ S]) { case (x, y) ⇒ x.handleUpdateState orElse y } }
Example 117
Source File: AppError.scala From octopus with Apache License 2.0 | 5 votes |
package octopus import scala.concurrent.{ExecutionContext, Future} import language.higherKinds import scala.annotation.implicitNotFound @implicitNotFound("Implicit instance for octopus.AppError[${M}] not found in scope!") trait AppError[M[_]] extends Serializable { def pure[A](a: A): M[A] def failed[A](why: Throwable): M[A] def map[A, B](ma: M[A])(f: A => B): M[B] def map2[A, B, C](ma: M[A], mb: M[B])(f: (A, B) => C): M[C] def recover[A, B <: A](ma: M[A], f: PartialFunction[Throwable, B]): M[A] } object AppError extends Serializable { def apply[M[_]](implicit a: AppError[M]): AppError[M] = a implicit def futureAppError(implicit ec: ExecutionContext): AppError[Future] = new AppError[Future] { def pure[A](a: A): Future[A] = Future.successful(a) def failed[A](why: Throwable): Future[A] = Future.failed[A](why) def map[A, B](fa: Future[A])(f: A => B): Future[B] = fa.map(f) def map2[A, B, C](fa: Future[A], fb: Future[B])(f: (A, B) => C): Future[C] = fa.zip(fb).map { case (a, b) => f(a, b) } def recover[A, B <: A](fa: Future[A], f: PartialFunction[Throwable, B]): Future[A] = fa.recover(f) } }
Example 118
Source File: CouchbaseStatements.scala From akka-persistence-couchbase with Apache License 2.0 | 5 votes |
package akka.persistence.couchbase.journal import java.util.concurrent.TimeUnit import akka.actor.{Actor, ActorLogging} import akka.persistence.couchbase.CouchbaseJournalConfig import com.couchbase.client.java.Bucket import com.couchbase.client.java.document.JsonDocument import com.couchbase.client.java.document.json.JsonArray import com.couchbase.client.java.view._ import rx.Observable import rx.functions.Func1 import scala.collection.immutable.Seq import scala.concurrent.ExecutionContext import scala.util.{Failure, Try} trait CouchbaseStatements extends Actor with ActorLogging { def config: CouchbaseJournalConfig def bucket: Bucket implicit def executionContext: ExecutionContext def bySequenceNr(persistenceId: String, from: Long, to: Long): ViewQuery = { ViewQuery .from("journal", "by_sequenceNr") .stale(config.stale) .startKey(JsonArray.from(persistenceId, from.asInstanceOf[AnyRef])) .endKey(JsonArray.from(persistenceId, to.asInstanceOf[AnyRef])) } def nextKey(name: String): Try[String] = { Try { val counterKey = s"counter::$name" val counter = bucket.counter(counterKey, 1L, 0L).content() s"$name-$counter" } } }
Example 119
Source File: CouchbaseStatements.scala From akka-persistence-couchbase with Apache License 2.0 | 5 votes |
package akka.persistence.couchbase.snapshot import java.util.concurrent.TimeUnit import akka.actor.{Actor, ActorLogging} import akka.persistence.couchbase.CouchbaseSnapshotStoreConfig import com.couchbase.client.java.Bucket import com.couchbase.client.java.document.JsonDocument import com.couchbase.client.java.document.json.JsonArray import com.couchbase.client.java.view.ViewQuery import scala.concurrent.ExecutionContext import scala.util.{Failure, Try} trait CouchbaseStatements extends Actor with ActorLogging { def config: CouchbaseSnapshotStoreConfig def bucket: Bucket implicit def executionContext: ExecutionContext def bySequenceNr(persistenceId: String, maxSequenceNr: Long): ViewQuery = { ViewQuery .from("snapshots", "by_sequenceNr") .stale(config.stale) .descending(true) .startKey(JsonArray.from(persistenceId, maxSequenceNr.asInstanceOf[AnyRef])) .endKey(JsonArray.from(persistenceId, Long.MinValue.asInstanceOf[AnyRef])) } def byTimestamp(persistenceId: String, maxTimestamp: Long): ViewQuery = { ViewQuery .from("snapshots", "by_timestamp") .stale(config.stale) .descending(true) .startKey(JsonArray.from(persistenceId, maxTimestamp.asInstanceOf[AnyRef])) .endKey(JsonArray.from(persistenceId, Long.MinValue.asInstanceOf[AnyRef])) } def all(persistenceId: String): ViewQuery = { ViewQuery .from("snapshots", "all") .stale(config.stale) .descending(true) .key(persistenceId) } def executeSave(snapshotMessage: SnapshotMessage): Try[Unit] = { Try(SnapshotMessageKey.fromMetadata(snapshotMessage.metadata).value).flatMap { key => Try { val jsonObject = SnapshotMessage.serialize(snapshotMessage) val jsonDocument = JsonDocument.create(key, jsonObject) bucket.upsert( jsonDocument, config.persistTo, config.replicateTo, config.timeout.toSeconds, TimeUnit.SECONDS ) log.debug("Wrote snapshot: {}", key) } recoverWith { case e => log.error(e, "Writing snapshot: {}", key) Failure(e) } } } }
Example 120
Source File: Execution.scala From wookiee with Apache License 2.0 | 5 votes |
package com.webtrends.harness.libs.iteratee import java.util.ArrayDeque import scala.annotation.tailrec import scala.concurrent.{ ExecutionContextExecutor, ExecutionContext } @tailrec private def executeScheduled(): Unit = { local.get match { case Empty => // Nothing to run () case next: Runnable => // Mark the queue of Runnables after this one as empty local.set(Empty) // Run the only scheduled Runnable next.run() // Recurse in case more Runnables were added executeScheduled() case arrayDeque: ArrayDeque[_] => val runnables = arrayDeque.asInstanceOf[ArrayDeque[Runnable]] // Rather than recursing, we can use a more efficient // while loop. The value of the ThreadLocal will stay as // an ArrayDeque until all the scheduled Runnables have been // run. while (!runnables.isEmpty) { val runnable = runnables.removeFirst() runnable.run() } case illegal => throw new IllegalStateException(s"Unsupported trampoline ThreadLocal value: $illegal") } } def reportFailure(t: Throwable): Unit = t.printStackTrace() } }
Example 121
Source File: TypedCommandExecution.scala From wookiee with Apache License 2.0 | 5 votes |
package com.webtrends.harness.command.typed import akka.pattern._ import akka.util.Timeout import scala.concurrent.duration._ import scala.concurrent.{ExecutionContext, Future} case class ExecuteTypedCommand(args: Any) object TypedCommandExecution { def execute[U, V](name: String, args: U)(implicit executionContext: ExecutionContext, timeout: Timeout): Future[V] = { TypedCommandManager.commands.get(name) match { case Some(commandActor) => (commandActor ? ExecuteTypedCommand(args)).map(_.asInstanceOf[V]) case None => Future.failed(new IllegalArgumentException(s"Command $name not found.")) } } }
Example 122
Source File: TypedCommand.scala From wookiee with Apache License 2.0 | 5 votes |
package com.webtrends.harness.command.typed import akka.actor.Actor import akka.pattern._ import com.webtrends.harness.logging.ActorLoggingAdapter import com.webtrends.harness.utils.FutureExtensions._ import scala.concurrent.{ExecutionContext, Future} import scala.util.{Failure, Success} trait TypedCommand[T, V] extends Actor with ActorLoggingAdapter { implicit def executionContext: ExecutionContext = context.dispatcher def commandName: String def receive: Receive = { case ExecuteTypedCommand(args) => pipe{ val startTime = System.currentTimeMillis() execute(args.asInstanceOf[T]) mapAll { case Success(t) => log.info(s"Command $commandName succeeded in ${System.currentTimeMillis() - startTime}ms") t case Failure(f) => log.info(s"Command $commandName failed in ${System.currentTimeMillis() - startTime}ms") throw f } } to sender } def execute(args: T): Future[V] }
Example 123
Source File: TypedCommandHelper.scala From wookiee with Apache License 2.0 | 5 votes |
package com.webtrends.harness.command.typed import akka.actor.{Actor, ActorRef, Props} import akka.pattern._ import akka.util.Timeout import com.webtrends.harness.HarnessConstants import scala.concurrent.duration._ import scala.concurrent.{ExecutionContext, Future} trait TypedCommandHelper { this: Actor => var typedCommandManager: Option[ActorRef] = None implicit def ec: ExecutionContext = context.dispatcher def registerTypedCommand[T<:TypedCommand[_,_]](name: String, actorClass: Class[T], checkHealth: Boolean = false): Future[ActorRef] = { implicit val timeout = Timeout(2 seconds) getManager().flatMap { cm => (cm ? RegisterCommand(name, Props(actorClass), checkHealth)).mapTo[ActorRef] } } protected def getManager(): Future[ActorRef] = { typedCommandManager match { case Some(cm) => Future.successful(cm) case None => context.system.actorSelection(HarnessConstants.TypedCommandFullName).resolveOne()(2 seconds).map { s => typedCommandManager = Some(s) s } } } }
Example 124
Source File: ExecutionSpecification.scala From wookiee with Apache License 2.0 | 5 votes |
package com.webtrends.harness.libs.iteratee import scala.concurrent.ExecutionContext import org.specs2.mutable.SpecificationLike trait ExecutionSpecification { self: SpecificationLike => def testExecution[A](f: TestExecutionContext => A): A = { val ec = TestExecutionContext() val result = ec.preparable(f(ec)) result } def testExecution[A](f: (TestExecutionContext, TestExecutionContext) => A): A = { testExecution(ec1 => testExecution(ec2 => f(ec1, ec2))) } def testExecution[A](f: (TestExecutionContext, TestExecutionContext, TestExecutionContext) => A): A = { testExecution(ec1 => testExecution(ec2 => testExecution(ec3 => f(ec1, ec2, ec3)))) } def mustExecute[A](expectedCount: => Int)(f: ExecutionContext => A): A = { testExecution { tec => val result = f(tec) tec.executionCount must equalTo(expectedCount) result } } def mustExecute[A](expectedCount1: Int, expectedCount2: Int)(f: (ExecutionContext, ExecutionContext) => A): A = { mustExecute(expectedCount1)(ec1 => mustExecute(expectedCount2)(ec2 => f(ec1, ec2))) } def mustExecute[A](expectedCount1: Int, expectedCount2: Int, expectedCount3: Int)(f: (ExecutionContext, ExecutionContext, ExecutionContext) => A): A = { mustExecute(expectedCount1)(ec1 => mustExecute(expectedCount2)(ec2 => mustExecute(expectedCount3)(ec3 => f(ec1, ec2, ec3)))) } }
Example 125
Source File: IterateeSpecification.scala From wookiee with Apache License 2.0 | 5 votes |
package com.webtrends.harness.libs.iteratee import com.webtrends.harness.libs.iteratee.internal.executeFuture import scala.concurrent.{ Await, ExecutionContext, Future, Promise } import scala.concurrent.duration.{ Duration, SECONDS, MILLISECONDS } import scala.util.Try def delayed(it: => Iteratee[String, String], delay: Duration = Duration(5, MILLISECONDS))(implicit ec: ExecutionContext): Iteratee[String, String] = { Iteratee.flatten(timeout(it, delay)) } val timer = new java.util.Timer(true) def timeout[A](a: => A, d: Duration)(implicit e: ExecutionContext): Future[A] = { val p = Promise[A]() timer.schedule(new java.util.TimerTask { def run() { p.complete(Try(a)) } }, d.toMillis) p.future } }
Example 126
Source File: ExecutionSpec.scala From wookiee with Apache License 2.0 | 5 votes |
package com.webtrends.harness.libs.iteratee import scala.language.reflectiveCalls import org.specs2.mutable._ import scala.concurrent.{ ExecutionContext, Future, Await } import scala.concurrent.duration.{ Duration, SECONDS } import scala.util.Try object ExecutionSpec extends Specification { import Execution.trampoline val waitTime = Duration(5, SECONDS) "trampoline" should { "execute code in the same thread" in { val f = Future(Thread.currentThread())(trampoline) Await.result(f, waitTime) must equalTo(Thread.currentThread()) } "not overflow the stack" in { def executeRecursively(ec: ExecutionContext, times: Int) { if (times > 0) { ec.execute(new Runnable { def run() = executeRecursively(ec, times - 1) }) } } // Work out how deep to go to cause an overflow val overflowingExecutionContext = new ExecutionContext { def execute(runnable: Runnable): Unit = { runnable.run() } def reportFailure(t: Throwable): Unit = t.printStackTrace() } var overflowTimes = 1 << 10 try { while (overflowTimes > 0) { executeRecursively(overflowingExecutionContext, overflowTimes) overflowTimes = overflowTimes << 1 } sys.error("Can't get the stack to overflow") } catch { case _: StackOverflowError => () } // Now verify that we don't overflow Try(executeRecursively(trampoline, overflowTimes)) must beSuccessfulTry[Unit] } "execute code in the order it was submitted" in { val runRecord = scala.collection.mutable.Buffer.empty[Int] case class TestRunnable(id: Int, children: Runnable*) extends Runnable { def run() = { runRecord += id for (c <- children) trampoline.execute(c) } } trampoline.execute( TestRunnable(0, TestRunnable(1), TestRunnable(2, TestRunnable(4, TestRunnable(6), TestRunnable(7)), TestRunnable(5, TestRunnable(8))), TestRunnable(3)) ) runRecord must equalTo(0 to 8) } } }
Example 127
Source File: NonBlockingMutexSpec.scala From wookiee with Apache License 2.0 | 5 votes |
package com.webtrends.harness.libs.concurrent import scala.language.reflectiveCalls import org.specs2.mutable._ import java.util.concurrent.atomic.AtomicInteger import scala.concurrent.{ ExecutionContext, Promise, Future, Await } import scala.concurrent.duration.{ Duration, SECONDS } object NonBlockingMutexSpec extends Specification { val waitTime = Duration(2, SECONDS) trait Tester { def run(body: => Unit): Unit } class MutexTester extends Tester { val mutex = new NonBlockingMutex() def run(body: => Unit) = mutex.exclusive(body) } class NaiveTester extends Tester { def run(body: => Unit) = body } def countOrderingErrors(runs: Int, tester: Tester)(implicit ec: ExecutionContext): Future[Int] = { val result = Promise[Int]() val runCount = new AtomicInteger(0) val orderingErrors = new AtomicInteger(0) for (i <- 0 until runs) { tester.run { val observedRunCount = runCount.getAndIncrement() // We see observedRunCount != i then this task was run out of order if (observedRunCount != i) { orderingErrors.incrementAndGet() // Record the error } // If this is the last task, complete our result promise if ((observedRunCount + 1) >= runs) { result.success(orderingErrors.get) } } } result.future } "NonBlockingMutex" should { "run a single operation" in { val p = Promise[Int]() val mutex = new NonBlockingMutex() mutex.exclusive { p.success(1) } Await.result(p.future, waitTime) must_== (1) } "run two operations" in { val p1 = Promise[Unit]() val p2 = Promise[Unit]() val mutex = new NonBlockingMutex() mutex.exclusive { p1.success(()) } mutex.exclusive { p2.success(()) } Await.result(p1.future, waitTime) must_== (()) Await.result(p2.future, waitTime) must_== (()) } "run code in order" in { import ExecutionContext.Implicits.global def percentageOfRunsWithOrderingErrors(runSize: Int, tester: Tester): Int = { val results: Seq[Future[Int]] = for (i <- 0 until 9) yield { countOrderingErrors(runSize, tester) } Await.result(Future.sequence(results), waitTime).filter(_ > 0).size * 10 } // Iteratively increase the run size until we get observable errors 90% of the time // We want a high error rate because we want to then use the MutexTester // on the same run size and know that it is fixing up some problems. If the run size // is too small then the MutexTester probably isn't doing anything. We use // dynamic run sizing because the actual size that produces errors will vary // depending on the environment in which this test is run. var runSize = 8 // This usually reaches 8192 on my dev machine with 10 simultaneous queues var errorPercentage = 0 while (errorPercentage < 90 && runSize < 1000000) { runSize = runSize << 1 errorPercentage = percentageOfRunsWithOrderingErrors(runSize, new NaiveTester()) } //println(s"Got $errorPercentage% ordering errors on run size of $runSize") // Now show that this run length works fine with the MutexTester percentageOfRunsWithOrderingErrors(runSize, new MutexTester()) must_== 0 } } }
Example 128
Source File: ConfluentSchemaRegistry.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.avro.registry import com.google.common.cache.{CacheBuilder, CacheLoader} import com.typesafe.config.{Config, ConfigFactory} import hydra.common.logging.LoggingAdapter import io.confluent.kafka.schemaregistry.client.{ CachedSchemaRegistryClient, MockSchemaRegistryClient, SchemaMetadata, SchemaRegistryClient } import scala.collection.JavaConverters._ import scala.concurrent.{ExecutionContext, Future} case class ConfluentSchemaRegistry( registryClient: SchemaRegistryClient, registryUrl: String ) extends SchemaRegistryComponent { def getAllSubjects()(implicit ec: ExecutionContext): Future[Seq[String]] = Future( registryClient .getAllSubjects() .asScala .map(s => if (s.endsWith("-value")) s.dropRight(6) else s) .toSeq ) def getById(id: Int, suffix: String = "-value")( implicit ec: ExecutionContext ): Future[SchemaMetadata] = Future { val schema = registryClient.getById(id) val subject = schema.getFullName + suffix registryClient.getLatestSchemaMetadata(subject) } } object ConfluentSchemaRegistry extends LoggingAdapter { import hydra.common.config.ConfigSupport._ case class SchemaRegistryClientInfo( url: String, schemaRegistryMaxCapacity: Int ) private val cachedClients = CacheBuilder .newBuilder() .build( new CacheLoader[SchemaRegistryClientInfo, ConfluentSchemaRegistry] { def load(info: SchemaRegistryClientInfo): ConfluentSchemaRegistry = { log.debug(s"Creating new schema registry client for ${info.url}") val client = if (info.url == "mock") { mockRegistry } else { new CachedSchemaRegistryClient( info.url, info.schemaRegistryMaxCapacity ) } ConfluentSchemaRegistry(client, info.url) } } ) val mockRegistry = new MockSchemaRegistryClient() def registryUrl(config: Config): String = config.getStringOpt("schema.registry.url") .getOrElse(throw new IllegalArgumentException("A schema registry url is required.")) def forConfig( config: Config = ConfigFactory.load() ): ConfluentSchemaRegistry = { val identityMapCapacity = config.getIntOpt("max.schemas.per.subject").getOrElse(1000) cachedClients.get( SchemaRegistryClientInfo(registryUrl(config), identityMapCapacity) ) } }
Example 129
Source File: Routes.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.ingest.modules import akka.actor.ActorSystem import akka.http.scaladsl.server.directives.RouteDirectives import akka.http.scaladsl.server.{Route, RouteConcatenation} import cats.effect.Sync import hydra.common.config.ConfigSupport import hydra.common.util.{ActorUtils, Futurable} import hydra.ingest.app.AppConfig.AppConfig import hydra.ingest.http._ import hydra.kafka.consumer.KafkaConsumerProxy import hydra.kafka.endpoints.{BootstrapEndpoint, BootstrapEndpointV2, TopicMetadataEndpoint, TopicsEndpoint} import hydra.kafka.util.KafkaUtils.TopicDetails import scala.concurrent.ExecutionContext final class Routes[F[_]: Sync: Futurable] private(programs: Programs[F], algebras: Algebras[F], cfg: AppConfig) (implicit system: ActorSystem) extends RouteConcatenation with ConfigSupport { private implicit val ec: ExecutionContext = system.dispatcher private val bootstrapEndpointV2 = if (cfg.v2MetadataTopicConfig.createV2TopicsEnabled) { val topicDetails = TopicDetails( cfg.createTopicConfig.defaultNumPartions, cfg.createTopicConfig.defaultReplicationFactor ) new BootstrapEndpointV2(programs.createTopic, topicDetails).route } else { RouteDirectives.reject } lazy val routes: F[Route] = Sync[F].delay { import ConfigSupport._ //TODO: remove this lookup val consumerPath = applicationConfig .getStringOpt("actors.kafka.consumer_proxy.path") .getOrElse( s"/user/service/${ActorUtils.actorName(classOf[KafkaConsumerProxy])}" ) val consumerProxy = system.actorSelection(consumerPath) new SchemasEndpoint().route ~ new BootstrapEndpoint(system).route ~ new TopicMetadataEndpoint(consumerProxy, algebras.metadata).route ~ new IngestorRegistryEndpoint().route ~ new IngestionWebSocketEndpoint().route ~ new IngestionEndpoint(cfg.ingestConfig.alternateIngestEnabled, programs.ingestionFlow, programs.ingestionFlowV2, cfg.ingestConfig.useOldIngestIfUAContains).route ~ new TopicsEndpoint(consumerProxy)(system.dispatcher).route ~ HealthEndpoint.route ~ bootstrapEndpointV2 } } object Routes { def make[F[_]: Sync: Futurable](programs: Programs[F], algebras: Algebras[F], config: AppConfig) (implicit system: ActorSystem): F[Routes[F]] = Sync[F].delay(new Routes[F](programs, algebras, config)) }
Example 130
Source File: MockEndpoint.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.ingest.http.mock import akka.actor.ActorSystem import akka.http.scaladsl.model.StatusCodes._ import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server.{ExceptionHandler, Route} import hydra.ingest.http.SchemasEndpoint import io.confluent.kafka.schemaregistry.client.rest.exceptions.RestClientException import scala.concurrent.{ExecutionContext, Future} class MockEndpoint( implicit system: ActorSystem, implicit val e: ExecutionContext ) { def throwRestClientException( statusCode: Int, errorCode: Int, errorMessage: String ): Future[Any] = { throw new RestClientException(errorMessage, statusCode, errorCode) } val schemaRouteExceptionHandler: ExceptionHandler = new SchemasEndpoint().excptHandler def route: Route = { pathPrefix("throwRestClientException") { handleExceptions(schemaRouteExceptionHandler) { get { parameters('statusCode, 'errorCode, 'errorMessage) { (statusCode, errorCode, errorMessage) => pathEndOrSingleSlash { onSuccess( throwRestClientException( statusCode.toInt, errorCode.toInt, errorMessage ) ) { _ => complete(OK) } } } } } } } }
Example 131
Source File: IngestionFlowSpec.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.ingest.services import cats.effect.{Concurrent, ContextShift, IO} import hydra.avro.registry.SchemaRegistry import hydra.core.ingest.HydraRequest import hydra.core.ingest.RequestParams.{HYDRA_KAFKA_TOPIC_PARAM,HYDRA_RECORD_KEY_PARAM} import hydra.ingest.services.IngestionFlow.MissingTopicNameException import hydra.kafka.algebras.KafkaClientAlgebra import org.apache.avro.{Schema, SchemaBuilder} import org.scalatest.flatspec.AnyFlatSpec import org.scalatest.matchers.should.Matchers import scala.concurrent.ExecutionContext class IngestionFlowSpec extends AnyFlatSpec with Matchers { private implicit val contextShift: ContextShift[IO] = IO.contextShift(ExecutionContext.global) private implicit val concurrentEffect: Concurrent[IO] = IO.ioConcurrentEffect private implicit val mode: scalacache.Mode[IO] = scalacache.CatsEffect.modes.async private val testSubject: String = "test_subject" private val testSubjectNoKey: String = "test_subject_no_key" private val testKey: String = "test" private val testPayload: String = s"""{"id": "$testKey", "testField": true}""" private val testSchema: Schema = SchemaBuilder.record("TestRecord") .prop("hydra.key", "id") .fields().requiredString("id").requiredBoolean("testField").endRecord() private val testSchemaNoKey: Schema = SchemaBuilder.record("TestRecordNoKey") .fields().requiredString("id").requiredBoolean("testField").endRecord() private def ingest(request: HydraRequest): IO[KafkaClientAlgebra[IO]] = for { schemaRegistry <- SchemaRegistry.test[IO] _ <- schemaRegistry.registerSchema(testSubject + "-value", testSchema) _ <- schemaRegistry.registerSchema(testSubjectNoKey + "-value", testSchemaNoKey) kafkaClient <- KafkaClientAlgebra.test[IO] ingestFlow <- IO(new IngestionFlow[IO](schemaRegistry, kafkaClient, "https://schemaRegistry.notreal")) _ <- ingestFlow.ingest(request) } yield kafkaClient it should "ingest a message" in { val testRequest = HydraRequest("correlationId", testPayload, metadata = Map(HYDRA_KAFKA_TOPIC_PARAM -> testSubject)) ingest(testRequest).flatMap { kafkaClient => kafkaClient.consumeStringKeyMessages(testSubject, "test-consumer").take(1).compile.toList.map { publishedMessages => val firstMessage = publishedMessages.head (firstMessage._1, firstMessage._2.get.toString) shouldBe (Some(testKey), testPayload) } }.unsafeRunSync() } it should "ingest a message with a null key" in { val testRequest = HydraRequest("correlationId", testPayload, metadata = Map(HYDRA_KAFKA_TOPIC_PARAM -> testSubjectNoKey)) ingest(testRequest).flatMap { kafkaClient => kafkaClient.consumeStringKeyMessages(testSubjectNoKey, "test-consumer").take(1).compile.toList.map { publishedMessages => val firstMessage = publishedMessages.head (firstMessage._1, firstMessage._2.get.toString) shouldBe (None, testPayload) } }.unsafeRunSync() } it should "return an error when no topic name is provided" in { val testRequest = HydraRequest("correlationId", testPayload) ingest(testRequest).attempt.unsafeRunSync() shouldBe Left(MissingTopicNameException(testRequest)) } it should "take the key from the header if present" in { val headerKey = "someDifferentKey" val testRequest = HydraRequest("correlationId", testPayload, metadata = Map(HYDRA_RECORD_KEY_PARAM -> headerKey, HYDRA_KAFKA_TOPIC_PARAM -> testSubject)) ingest(testRequest).flatMap { kafkaClient => kafkaClient.consumeStringKeyMessages(testSubject, "test-consumer").take(1).compile.toList.map { publishedMessages => val firstMessage = publishedMessages.head (firstMessage._1, firstMessage._2.get.toString) shouldBe (Some(headerKey), testPayload) } }.unsafeRunSync() } }
Example 132
Source File: TestRecordFactory.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.ingest.test import hydra.core.ingest.HydraRequest import hydra.core.transport.{AckStrategy, HydraRecord, RecordFactory} import scala.concurrent.{ExecutionContext, Future} object TestRecordFactory extends RecordFactory[String, String] { override def build(r: HydraRequest)(implicit ec: ExecutionContext) = { val timeout = r.metadataValueEquals("timeout", "true") if (timeout) { Future.successful( TimeoutRecord( "test-topic", r.correlationId.toString, r.payload, r.ackStrategy ) ) } else { Future.successful( TestRecord( "test-topic", r.correlationId.toString, r.payload, r.ackStrategy ) ) } } } case class TestRecord( destination: String, key: String, payload: String, ackStrategy: AckStrategy ) extends HydraRecord[String, String] case class TimeoutRecord( destination: String, key: String, payload: String, ackStrategy: AckStrategy ) extends HydraRecord[String, String]
Example 133
Source File: RabbitRecordFactory.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.rabbit import hydra.common.config.ConfigSupport import hydra.core.ingest.HydraRequest import hydra.core.transport.{ AckStrategy, HydraRecord, RecordFactory, RecordMetadata } import hydra.rabbit.RabbitRecord.{ DESTINATION_TYPE_EXCHANGE, DESTINATION_TYPE_QUEUE, HYDRA_RABBIT_EXCHANGE, HYDRA_RABBIT_QUEUE } import org.apache.commons.lang3.StringUtils import scala.concurrent.{ExecutionContext, Future} object RabbitRecordFactory extends RecordFactory[String, String] with ConfigSupport { override def build( request: HydraRequest )(implicit ec: ExecutionContext): Future[RabbitRecord] = { val props = Seq( request.metadataValue(HYDRA_RABBIT_EXCHANGE), request.metadataValue(HYDRA_RABBIT_QUEUE) ).flatten Future { require( props.length == 1, "A single parameter for exchange or queue is required" ) val destination = request.metadataValue(HYDRA_RABBIT_EXCHANGE) match { case Some(exchange) => (exchange, DESTINATION_TYPE_EXCHANGE) case _ => ( request.metadataValue(HYDRA_RABBIT_QUEUE).get, DESTINATION_TYPE_QUEUE ) } RabbitRecord( destination._1, destination._2, request.payload, request.ackStrategy ) } } } case class RabbitRecord( destination: String, destinationType: String, payload: String, ackStrategy: AckStrategy ) extends HydraRecord[String, String] { override val key: String = StringUtils.EMPTY } object RabbitRecord { val HYDRA_RABBIT_EXCHANGE = "hydra-rabbit-exchange" val HYDRA_RABBIT_QUEUE = "hydra-rabbit-queue" val DESTINATION_TYPE_EXCHANGE = "exchange" val DESTINATION_TYPE_QUEUE = "queue" } case class RabbitRecordMetadata( timestamp: Long, id: Long, destination: String, destinationType: String, ackStrategy: AckStrategy ) extends RecordMetadata
Example 134
Source File: DeleteTombstoneRecordFactory.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.kafka.producer import hydra.core.ingest.HydraRequest import scala.concurrent.{ExecutionContext, Future} import scala.util.{Failure, Success, Try} object DeleteTombstoneRecordFactory extends KafkaRecordFactory[String, Any] { override def build( request: HydraRequest )(implicit ex: ExecutionContext): Future[DeleteTombstoneRecord] = { val theKey = getKey(request) .fold[Try[String]]( Failure(new IllegalArgumentException("A key is required for deletes.")) )(Success(_)) for { key <- Future.fromTry(theKey) topic <- Future.fromTry(getTopic(request)) } yield DeleteTombstoneRecord(topic, key, request.ackStrategy) } }
Example 135
Source File: StringRecordFactory.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.kafka.producer import hydra.core.ingest.HydraRequest import scala.concurrent.{ExecutionContext, Future} object StringRecordFactory extends KafkaRecordFactory[String, String] { override def build(request: HydraRequest)(implicit ex: ExecutionContext) = { for { topic <- Future.fromTry(getTopic(request)) } yield StringRecord( topic, getKey(request, request.payload), request.payload, request.ackStrategy ) } }
Example 136
Source File: JsonRecordFactory.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.kafka.producer import com.fasterxml.jackson.databind.{JsonNode, ObjectMapper} import hydra.core.ingest.HydraRequest import scala.concurrent.{ExecutionContext, Future} object JsonRecordFactory extends KafkaRecordFactory[String, JsonNode] { val mapper = new ObjectMapper() override def build( request: HydraRequest )(implicit ec: ExecutionContext): Future[KafkaRecord[String, JsonNode]] = { for { topic <- Future.fromTry(getTopic(request)) payload <- parseJson(request.payload) } yield JsonRecord( topic, getKey(request, payload), payload, request.ackStrategy ) } private def parseJson(json: String)(implicit ec: ExecutionContext) = Future(mapper.reader().readTree(json)) }
Example 137
Source File: KafkaRecordFactories.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.kafka.producer import akka.actor.ActorRef import hydra.core.ingest.HydraRequest import hydra.core.ingest.RequestParams.HYDRA_RECORD_FORMAT_PARAM import hydra.core.transport.{HydraRecord, RecordFactory} import scala.concurrent.{ExecutionContext, Future} import scala.util.{Failure, Success, Try} class KafkaRecordFactories(schemaLoader: ActorRef) extends RecordFactory[Any, Any] { private val avroRecordFactory = new AvroRecordFactory(schemaLoader) def factoryFor(request: HydraRequest): Try[KafkaRecordFactory[_, _]] = { deleteOrElse(request) { request.metadataValue(HYDRA_RECORD_FORMAT_PARAM) match { case Some(value) if (value.equalsIgnoreCase("string")) => StringRecordFactory case Some(value) if (value.equalsIgnoreCase("json")) => JsonRecordFactory case Some(value) if (value.equalsIgnoreCase("avro")) => avroRecordFactory case Some(value) if (value.equalsIgnoreCase("avro-key")) => new AvroKeyRecordFactory(schemaLoader) case Some(value) => throw new IllegalArgumentException(s"'$value' is not a valid format.") case _ => avroRecordFactory } } } private def deleteOrElse( r: HydraRequest )(orElse: => KafkaRecordFactory[_, _]): Try[KafkaRecordFactory[_, _]] = { Try { Option(r.payload) .map(_ => orElse) .getOrElse(DeleteTombstoneRecordFactory) } } override def build( request: HydraRequest )(implicit ec: ExecutionContext): Future[HydraRecord[_, _]] = { factoryFor(request) match { case Success(factory) => factory.build(request) case Failure(ex) => Future.failed(ex) } } }
Example 138
Source File: AvroRecordFactory.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.kafka.producer import akka.actor.ActorRef import akka.pattern.ask import akka.util import com.pluralsight.hydra.avro.JsonConverter import hydra.avro.registry.ConfluentSchemaRegistry import hydra.avro.resource.SchemaResource import hydra.avro.util.AvroUtils import hydra.common.config.ConfigSupport import hydra.common.logging.LoggingAdapter import hydra.core.akka.SchemaRegistryActor.{FetchSchemaRequest, FetchSchemaResponse} import hydra.core.ingest.HydraRequest import hydra.core.transport.ValidationStrategy.Strict import org.apache.avro.generic.GenericRecord import scala.concurrent.duration._ import scala.concurrent.{ExecutionContext, Future} class AvroRecordFactory(schemaResourceLoader: ActorRef) extends KafkaRecordFactory[String, GenericRecord] with ConfigSupport with LoggingAdapter { private implicit val timeout = util.Timeout(3.seconds) override def build( request: HydraRequest )(implicit ec: ExecutionContext): Future[AvroRecord] = { for { (topic, subject) <- Future.fromTry(getTopicAndSchemaSubject(request)) schemaResource <- (schemaResourceLoader ? FetchSchemaRequest(subject)) .mapTo[FetchSchemaResponse] .map(_.schemaResource) record <- convert(schemaResource, request) } yield AvroRecord( topic, schemaResource.schema, getKey(request, record), record, request.ackStrategy ) } private def convert(schemaResource: SchemaResource, request: HydraRequest)( implicit ec: ExecutionContext ): Future[GenericRecord] = { val converter = new JsonConverter[GenericRecord]( schemaResource.schema, request.validationStrategy == Strict ) Future({ val converted = converter.convert(request.payload) converted }).recover { case ex => throw AvroUtils.improveException(ex, schemaResource, ConfluentSchemaRegistry.registryUrl(applicationConfig)) } } }
Example 139
Source File: BootstrapEndpointActors.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.kafka.endpoints import akka.actor.{ActorRef, ActorSystem} import akka.stream.{ActorMaterializer, Materializer} import hydra.avro.registry.ConfluentSchemaRegistry import hydra.common.config.ConfigSupport import hydra.core.akka.SchemaRegistryActor import hydra.kafka.services.{StreamsManagerActor, TopicBootstrapActor} import hydra.kafka.util.KafkaUtils import scala.concurrent.ExecutionContext trait BootstrapEndpointActors extends ConfigSupport { implicit val system: ActorSystem private[kafka] val kafkaIngestor = system.actorSelection(path = applicationConfig.getString("kafka-ingestor-path") ) private[kafka] val schemaRegistryActor = system.actorOf(SchemaRegistryActor.props(applicationConfig)) private[kafka] val bootstrapKafkaConfig = applicationConfig.getConfig("bootstrap-config") private[kafka] val streamsManagerProps = StreamsManagerActor.props( bootstrapKafkaConfig, KafkaUtils.BootstrapServers, ConfluentSchemaRegistry.forConfig(applicationConfig).registryClient ) val bootstrapActor: ActorRef = system.actorOf( TopicBootstrapActor.props( schemaRegistryActor, kafkaIngestor, streamsManagerProps, Some(bootstrapKafkaConfig) ) ) }
Example 140
Source File: TopicsEndpoint.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.kafka.endpoints import akka.actor.ActorSelection import akka.http.scaladsl.common.EntityStreamingSupport import akka.kafka.Subscriptions import akka.kafka.scaladsl.Consumer import akka.pattern.ask import akka.util.Timeout import hydra.core.http.RouteSupport import hydra.kafka.consumer.KafkaConsumerProxy.{GetLatestOffsets, LatestOffsetsResponse} import org.apache.kafka.clients.consumer.ConsumerRecord import org.apache.kafka.common.TopicPartition import scala.collection.immutable.Map import scala.concurrent.duration._ import scala.concurrent.{Await, ExecutionContext, Future} class TopicsEndpoint(consumerProxy:ActorSelection)(implicit ec:ExecutionContext) extends RouteSupport { import hydra.kafka.util.KafkaUtils._ implicit val jsonStreamingSupport = EntityStreamingSupport.json() override val route = path("transports" / "kafka" / "consumer" / "topics" / Segment) { topicName => get { extractRequestContext { ctx => parameters('format.?, 'group.?, 'n ? 10, 'start ? "earliest") { (format, groupId, n, startOffset) => val settings = loadConsumerSettings[Any, Any]( format.getOrElse("avro"), groupId.getOrElse("hydra"), startOffset ) val offsets = latestOffsets(topicName) val source = Consumer .plainSource(settings, Subscriptions.topics(topicName)) .initialTimeout(5.seconds) .zipWithIndex .takeWhile(rec => rec._2 <= n && !shouldCancel(offsets, rec._1) ) .map(rec => rec._1.value().toString) .watchTermination()((_, termination) => termination.failed.foreach { case cause => ctx.fail(cause) } ) complete(source) } } } } def shouldCancel( fpartitions: Future[Map[TopicPartition, Long]], record: ConsumerRecord[Any, Any] ): Boolean = { if (fpartitions.isCompleted) { val partitions = Await.result(fpartitions, 1.millis) val tp = new TopicPartition(record.topic(), record.partition()) partitions.get(tp) match { case Some(offset) => record.offset() >= offset case None => false } } else { false } } private def latestOffsets( topic: String ): Future[Map[TopicPartition, Long]] = { implicit val timeout = Timeout(5 seconds) (consumerProxy ? GetLatestOffsets(topic)) .mapTo[LatestOffsetsResponse] .map(_.offsets) } }
Example 141
Source File: KafkaAdminAlgebraSpec.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.kafka.algebras import akka.actor.ActorSystem import cats.effect.{ContextShift, IO} import cats.implicits._ import hydra.kafka.util.KafkaUtils.TopicDetails import net.manub.embeddedkafka.{EmbeddedKafka, EmbeddedKafkaConfig} import org.scalatest.BeforeAndAfterAll import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AnyWordSpecLike import scala.concurrent.ExecutionContext final class KafkaAdminAlgebraSpec extends AnyWordSpecLike with Matchers with BeforeAndAfterAll with EmbeddedKafka { private val port = 8023 implicit private val kafkaConfig: EmbeddedKafkaConfig = EmbeddedKafkaConfig(kafkaPort = port, zooKeeperPort = 3027) implicit private val contextShift: ContextShift[IO] = IO.contextShift(ExecutionContext.global) implicit private val system: ActorSystem = ActorSystem( "kafka-client-spec-system" ) override def beforeAll(): Unit = { super.beforeAll() EmbeddedKafka.start() } override def afterAll(): Unit = { super.afterAll() EmbeddedKafka.stop() } (for { live <- KafkaAdminAlgebra .live[IO](s"localhost:$port") test <- KafkaAdminAlgebra.test[IO] } yield { runTests(live) runTests(test, isTest = true) }).unsafeRunSync() private def runTests(kafkaClient: KafkaAdminAlgebra[IO], isTest: Boolean = false): Unit = { (if (isTest) "KafkaAdmin#test" else "KafkaAdmin#live") must { "create a topic" in { val topicName = "Topic1" val topicDetails = TopicDetails(3, 1.toShort) (kafkaClient.createTopic(topicName, topicDetails) *> kafkaClient .describeTopic(topicName) .map { case Some(topic) => topic.name shouldBe topicName topic.numberPartitions shouldBe topicDetails.numPartitions case None => fail("Found None when a Topic was Expected") }).unsafeRunSync() } "list all topics" in { kafkaClient.getTopicNames.unsafeRunSync() shouldBe List("Topic1") } "delete a topic" in { val topicToDelete = "topic_to_delete" (for { _ <- kafkaClient.createTopic(topicToDelete, TopicDetails(1, 1)) _ <- kafkaClient.deleteTopic(topicToDelete) maybeTopic <- kafkaClient.describeTopic(topicToDelete) } yield maybeTopic should not be defined).unsafeRunSync() } } } }
Example 142
Source File: MetadataAlgebraSpec.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.kafka.algebras import java.time.Instant import cats.data.NonEmptyList import cats.effect.{Concurrent, ContextShift, IO, Sync, Timer} import cats.implicits._ import hydra.avro.registry.SchemaRegistry import hydra.core.marshallers.History import hydra.kafka.algebras.MetadataAlgebra.TopicMetadataContainer import hydra.kafka.model.ContactMethod.Slack import hydra.kafka.model.TopicMetadataV2Request.Subject import hydra.kafka.model.{Public, StreamTypeV2, TopicMetadataV2, TopicMetadataV2Key, TopicMetadataV2Request, TopicMetadataV2Value} import io.chrisdavenport.log4cats.SelfAwareStructuredLogger import io.chrisdavenport.log4cats.slf4j.Slf4jLogger import org.apache.avro.generic.GenericRecord import org.scalatest.Assertion import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AnyWordSpecLike import retry.RetryPolicies._ import retry.syntax.all._ import retry.{RetryPolicy, _} import scala.concurrent.ExecutionContext import scala.concurrent.duration._ class MetadataAlgebraSpec extends AnyWordSpecLike with Matchers { implicit private val contextShift: ContextShift[IO] = IO.contextShift(ExecutionContext.global) private implicit val concurrentEffect: Concurrent[IO] = IO.ioConcurrentEffect private implicit val policy: RetryPolicy[IO] = limitRetries[IO](5) |+| exponentialBackoff[IO](500.milliseconds) private implicit val timer: Timer[IO] = IO.timer(ExecutionContext.global) private implicit def noop[A]: (A, RetryDetails) => IO[Unit] = retry.noop[IO, A] implicit private def unsafeLogger[F[_]: Sync]: SelfAwareStructuredLogger[F] = Slf4jLogger.getLogger[F] private implicit class RetryAndAssert[A](boolIO: IO[A]) { def retryIfFalse(check: A => Boolean): IO[Assertion] = boolIO.map(check).retryingM(identity, policy, noop).map(assert(_)) } private val metadataTopicName = "_internal.metadataTopic" private val consumerGroup = "Consumer Group" (for { kafkaClient <- KafkaClientAlgebra.test[IO] schemaRegistry <- SchemaRegistry.test[IO] metadata <- MetadataAlgebra.make(metadataTopicName, consumerGroup, kafkaClient, schemaRegistry, consumeMetadataEnabled = true) } yield { runTests(metadata, kafkaClient) }).unsafeRunSync() private def runTests(metadataAlgebra: MetadataAlgebra[IO], kafkaClientAlgebra: KafkaClientAlgebra[IO]): Unit = { "MetadataAlgebraSpec" should { "retrieve none for non-existant topic" in { val subject = Subject.createValidated("Non-existantTopic").get metadataAlgebra.getMetadataFor(subject).unsafeRunSync() shouldBe None } "retrieve metadata" in { val subject = Subject.createValidated("subject1").get val (genericRecordsIO, key, value) = getMetadataGenericRecords(subject) (for { record <- genericRecordsIO _ <- kafkaClientAlgebra.publishMessage(record, metadataTopicName) _ <- metadataAlgebra.getMetadataFor(subject).retryIfFalse(_.isDefined) metadata <- metadataAlgebra.getMetadataFor(subject) } yield metadata shouldBe Some(TopicMetadataContainer(key, value, None, None))).unsafeRunSync() } "retrieve all metadata" in { val subject = Subject.createValidated("subject2").get val (genericRecordsIO, key, value) = getMetadataGenericRecords(subject) (for { record <- genericRecordsIO _ <- kafkaClientAlgebra.publishMessage(record, metadataTopicName) _ <- metadataAlgebra.getMetadataFor(subject).retryIfFalse(_.isDefined) allMetadata <- metadataAlgebra.getAllMetadata } yield allMetadata should have length 2).unsafeRunSync() } } } private def getMetadataGenericRecords(subject: Subject): (IO[(GenericRecord, Option[GenericRecord])], TopicMetadataV2Key, TopicMetadataV2Value) = { val key = TopicMetadataV2Key(subject) val value = TopicMetadataV2Value( StreamTypeV2.Entity, deprecated = false, Public, NonEmptyList.one(Slack.create("#channel").get), Instant.now, List(), None) (TopicMetadataV2.encode[IO](key, Some(value)), key, value) } }
Example 143
Source File: TestRecordFactory.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.kafka.test import hydra.core.ingest.HydraRequest import hydra.core.transport.{AckStrategy, HydraRecord, RecordFactory} import scala.concurrent.{ExecutionContext, Future} object TestRecordFactory extends RecordFactory[String, String] { override def build(r: HydraRequest)(implicit ec: ExecutionContext) = { val timeout = r.metadataValueEquals("timeout", "true") if (timeout) { Future.successful( TimeoutRecord( "test-topic", r.correlationId.toString, r.payload, r.ackStrategy ) ) } else { Future.successful( TestRecord( "test-topic", r.correlationId.toString, r.payload, r.ackStrategy ) ) } } } case class TestRecord( destination: String, key: String, payload: String, ackStrategy: AckStrategy ) extends HydraRecord[String, String] case class TimeoutRecord( destination: String, key: String, payload: String, ackStrategy: AckStrategy ) extends HydraRecord[String, String]
Example 144
Source File: HydraMetrics.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.core.monitor import kamon.Kamon import kamon.metric.{Counter, Gauge, Histogram} import kamon.tag.TagSet import scalacache.guava.GuavaCache import scala.concurrent.{ExecutionContext, Future} object HydraMetrics { import scalacache.modes.scalaFuture._ type Tags = Seq[(String, String)] private[core] lazy val countersCache = GuavaCache[Counter] private[core] lazy val gaugesCache = GuavaCache[Gauge] private[core] lazy val histogramsCache = GuavaCache[Histogram] def getOrCreateCounter(lookupKey: String, metricName: String, tags: => Tags)( implicit ec: ExecutionContext ): Future[Counter] = { countersCache.caching(lookupKey)(ttl = None) { Kamon.counter(metricName).withTags(TagSet.from(tags.toMap)) } } def getOrCreateGauge(lookupKey: String, metricName: String, tags: => Tags)( implicit ec: ExecutionContext ): Future[Gauge] = { gaugesCache.caching(lookupKey)(ttl = None) { Kamon.gauge(metricName).withTags(TagSet.from(tags.toMap)) } } def getOrCreateHistogram( lookupKey: String, metricName: String, tags: => Tags )(implicit ec: ExecutionContext): Future[Histogram] = { histogramsCache.caching(lookupKey)(ttl = None) { Kamon.histogram(metricName).withTags(TagSet.from(tags.toMap)) } } def incrementCounter(lookupKey: String, metricName: String, tags: => Tags)( implicit ec: ExecutionContext ): Future[Unit] = { getOrCreateCounter(lookupKey, metricName, tags).map(_.increment()) } def incrementGauge(lookupKey: String, metricName: String, tags: => Tags)( implicit ec: ExecutionContext ): Future[Unit] = { getOrCreateGauge(lookupKey, metricName, tags).map(_.increment()) } def decrementGauge(lookupKey: String, metricName: String, tags: => Tags)( implicit ec: ExecutionContext ): Future[Unit] = { getOrCreateGauge(lookupKey, metricName, tags).map(_.decrement()) } def recordToHistogram( lookupKey: String, metricName: String, value: Long, tags: => Tags )(implicit ec: ExecutionContext): Future[Unit] = { getOrCreateHistogram(lookupKey, metricName, tags).map(_.record(value)) } }
Example 145
Source File: TestRecordFactory.scala From hydra with Apache License 2.0 | 5 votes |
package hydra.core.test import hydra.core.ingest.HydraRequest import hydra.core.transport._ import scala.concurrent.{ExecutionContext, Future} object TestRecordFactory extends RecordFactory[String, String] { override def build(r: HydraRequest)(implicit ec: ExecutionContext) = { val timeout = r.metadataValueEquals("timeout", "true") if (timeout) { Future.successful( TimeoutRecord( "test-topic", r.correlationId.toString, r.payload, r.ackStrategy ) ) } else { Future.successful( TestRecord( "test-topic", r.correlationId.toString, r.payload, r.ackStrategy ) ) } } } case class TestRecord( destination: String, key: String, payload: String, ackStrategy: AckStrategy ) extends HydraRecord[String, String] case class TestRecordMetadata( deliveryId: Long, timestamp: Long = System.currentTimeMillis, destination: String, ackStrategy: AckStrategy ) extends RecordMetadata case class TimeoutRecord( destination: String, key: String, payload: String, ackStrategy: AckStrategy ) extends HydraRecord[String, String]
Example 146
Source File: package.scala From eventuate with Apache License 2.0 | 5 votes |
package com.rbmhtechnology.example import io.vertx.core.{ AsyncResult, DeploymentOptions, Handler, Vertx } import scala.concurrent.{ ExecutionContext, Future, Promise } import scala.reflect.ClassTag package object vertx { object ExampleVertxExtensions { implicit class PromiseHandler[A](promise: Promise[A]) { def asVertxHandler: Handler[AsyncResult[A]] = new Handler[AsyncResult[A]] { override def handle(res: AsyncResult[A]): Unit = { if (res.succeeded()) { promise.success(res.result()) } else { promise.failure(res.cause()) } } } } implicit class RichVertxDeployment(vertx: Vertx) { def deploy[T](options: DeploymentOptions = new DeploymentOptions())(implicit t: ClassTag[T], ec: ExecutionContext): Future[String] = { val promise = Promise[String] vertx.deployVerticle(t.runtimeClass.getName, options, promise.asVertxHandler) promise.future } } } }
Example 147
Source File: Writer.scala From eventuate with Apache License 2.0 | 5 votes |
package com.rbmhtechnology.example.querydb //#writer import java.lang.{ Long => JLong } import akka.actor.ActorRef import com.datastax.driver.core._ import com.rbmhtechnology.eventuate.EventsourcedWriter import scala.concurrent.Future override def readSuccess(result: Long): Option[Long] = Some(result + 1L) } object Writer { import java.util.concurrent.Executor import com.google.common.util.concurrent.ListenableFuture import scala.concurrent.{ ExecutionContext, Promise } import scala.language.implicitConversions import scala.util.Try implicit class ListenableFutureConverter[A](lf: ListenableFuture[A])(implicit executionContext: ExecutionContext) { def toFuture: Future[A] = { val promise = Promise[A] lf.addListener(new Runnable { def run() = promise.complete(Try(lf.get())) }, executionContext.asInstanceOf[Executor]) promise.future } } } //#
Example 148
Source File: CassandraEventLogStore.scala From eventuate with Apache License 2.0 | 5 votes |
package com.rbmhtechnology.eventuate.log.cassandra import java.io.Closeable import java.lang.{ Long => JLong } import com.datastax.driver.core._ import com.rbmhtechnology.eventuate._ import com.rbmhtechnology.eventuate.log._ import scala.collection.JavaConverters._ import scala.collection.immutable.{ VectorBuilder, Seq } import scala.concurrent.{ ExecutionContext, Future } private[eventuate] class CassandraEventLogStore(cassandra: Cassandra, logId: String) { val preparedWriteEventStatement: PreparedStatement = cassandra.prepareWriteEvent(logId) val preparedReadEventsStatement: PreparedStatement = cassandra.prepareReadEvents(logId) def write(events: Seq[DurableEvent], partition: Long) = cassandra.executeBatch { batch => events.foreach { event => batch.add(preparedWriteEventStatement.bind(partition: JLong, event.localSequenceNr: JLong, cassandra.eventToByteBuffer(event))) } } def readAsync(fromSequenceNr: Long, toSequenceNr: Long, max: Int, fetchSize: Int)(implicit executor: ExecutionContext): Future[BatchReadResult] = readAsync(fromSequenceNr, toSequenceNr, max, fetchSize, Int.MaxValue, _ => true) def readAsync(fromSequenceNr: Long, toSequenceNr: Long, max: Int, scanLimit: Int, fetchSize: Int, filter: DurableEvent => Boolean)(implicit executor: ExecutionContext): Future[BatchReadResult] = Future(read(fromSequenceNr, toSequenceNr, max, scanLimit, fetchSize, filter)) def read(fromSequenceNr: Long, toSequenceNr: Long, max: Int, scanLimit: Int, fetchSize: Int, filter: DurableEvent => Boolean): BatchReadResult = { val iter = eventIterator(fromSequenceNr, toSequenceNr, fetchSize) val builder = new VectorBuilder[DurableEvent] var lastSequenceNr = fromSequenceNr - 1L var scanned = 0 var filtered = 0 while (iter.hasNext && filtered < max && scanned < scanLimit) { val event = iter.next() if (filter(event)) { builder += event filtered += 1 } scanned += 1 lastSequenceNr = event.localSequenceNr } BatchReadResult(builder.result(), lastSequenceNr) } def eventIterator(fromSequenceNr: Long, toSequenceNr: Long, fetchSize: Int): Iterator[DurableEvent] with Closeable = new EventIterator(fromSequenceNr, toSequenceNr, fetchSize) private class EventIterator(fromSequenceNr: Long, toSequenceNr: Long, fetchSize: Int) extends Iterator[DurableEvent] with Closeable { import cassandra.settings._ import EventLog._ var currentSequenceNr = math.max(fromSequenceNr, 1L) var currentPartition = partitionOf(currentSequenceNr, partitionSize) var currentIter = newIter() var read = currentSequenceNr != firstSequenceNr(currentPartition, partitionSize) def newIter(): Iterator[Row] = if (currentSequenceNr > toSequenceNr) Iterator.empty else read(lastSequenceNr(currentPartition, partitionSize) min toSequenceNr).iterator.asScala def read(upperSequenceNr: Long): ResultSet = cassandra.session.execute(preparedReadEventsStatement.bind(currentPartition: JLong, currentSequenceNr: JLong, upperSequenceNr: JLong).setFetchSize(fetchSize)) @annotation.tailrec final def hasNext: Boolean = { if (currentIter.hasNext) { true } else if (read) { // some events read from current partition, try next partition currentPartition += 1 currentSequenceNr = firstSequenceNr(currentPartition, partitionSize) currentIter = newIter() read = false hasNext } else { // no events read from current partition, we're done false } } def next(): DurableEvent = { val row = currentIter.next() currentSequenceNr = row.getLong("sequence_nr") read = true cassandra.eventFromByteBuffer(row.getBytes("event")) } override def close(): Unit = () } }
Example 149
Source File: StorageProvider.scala From eventuate with Apache License 2.0 | 5 votes |
package com.rbmhtechnology.eventuate.adapter.vertx.japi.rx import java.lang.{ Long => JLong } import com.rbmhtechnology.eventuate.adapter.vertx.api.{ StorageProvider => SStorageProvider } import rx.{ Observable, Observer } import scala.concurrent.{ ExecutionContext, Future, Promise } def writeProgress(id: String, sequenceNr: JLong): Observable[JLong] } object StorageProvider { private def futureObserver[A](p: Promise[A]): Observer[A] = new Observer[A] { override def onNext(v: A): Unit = { if (!p.isCompleted) { p.success(v) } } override def onError(e: Throwable): Unit = { p.failure(e) } override def onCompleted(): Unit = { if (!p.isCompleted) { p.failure(new IllegalStateException("No item emitted by Observable")) } } } implicit class StorageProviderConverter(delegate: StorageProvider) { def asScala: SStorageProvider = new SStorageProvider { override def readProgress(id: String)(implicit executionContext: ExecutionContext): Future[Long] = { val p = Promise[JLong] delegate.readProgress(id).subscribe(futureObserver(p)) p.future.map(Long2long) } override def writeProgress(id: String, sequenceNr: Long)(implicit executionContext: ExecutionContext): Future[Long] = { val p = Promise[JLong] delegate.writeProgress(id, sequenceNr).subscribe(futureObserver(p)) p.future.map(Long2long) } } } }
Example 150
Source File: VertxProducer.scala From eventuate with Apache License 2.0 | 5 votes |
package com.rbmhtechnology.eventuate.adapter.vertx import com.rbmhtechnology.eventuate.DurableEvent import com.rbmhtechnology.eventuate.adapter.vertx.api.EventMetadata import io.vertx.core.Vertx import io.vertx.core.eventbus.{ DeliveryOptions, Message } import scala.concurrent.duration.FiniteDuration import scala.concurrent.{ ExecutionContext, Future, Promise } trait VertxProducer { def vertx: Vertx protected def deliveryOptions(event: DurableEvent): DeliveryOptions = new DeliveryOptions().setHeaders(EventMetadata(event).toHeaders) } trait VertxPublisher extends VertxProducer { def publish(address: String, evt: DurableEvent): Unit = vertx.eventBus().publish(address, evt.payload, deliveryOptions(evt)) } trait VertxSender extends VertxProducer { import VertxHandlerConverters._ def send[A](address: String, evt: DurableEvent, timeout: FiniteDuration)(implicit ec: ExecutionContext): Future[A] = { val promise = Promise[Message[A]] vertx.eventBus().send(address, evt.payload, deliveryOptions(evt).setSendTimeout(timeout.toMillis), promise.asVertxHandler) promise.future.map(_.body) } def send(address: String, evt: DurableEvent): Unit = vertx.eventBus().send(address, evt.payload, deliveryOptions(evt)) }
Example 151
Source File: VertxEventDispatcher.scala From eventuate with Apache License 2.0 | 5 votes |
package com.rbmhtechnology.eventuate.adapter.vertx import com.rbmhtechnology.eventuate.{ DurableEvent, EventsourcedWriter } import com.rbmhtechnology.eventuate.adapter.vertx.api.EndpointRouter import scala.collection.immutable.Seq import scala.concurrent.{ ExecutionContext, Future } case class EventEnvelope(address: String, evt: DurableEvent) trait VertxEventDispatcher[R, W] extends EventsourcedWriter[R, W] with ProgressStore[R, W] { import context.dispatcher def endpointRouter: EndpointRouter def dispatch(events: Seq[EventEnvelope])(implicit ec: ExecutionContext): Future[Unit] var events: Vector[EventEnvelope] = Vector.empty override def onCommand: Receive = { case _ => } override def onEvent: Receive = { case ev => events = endpointRouter.endpoint(ev) match { case Some(endpoint) => events :+ EventEnvelope(endpoint, lastHandledEvent) case None => events } } override def write(): Future[W] = { val snr = lastSequenceNr val ft = dispatch(events).flatMap(x => writeProgress(id, snr)) events = Vector.empty ft } override def read(): Future[R] = readProgress(id) override def readSuccess(result: R): Option[Long] = Some(progress(result) + 1L) }
Example 152
Source File: VertxNoConfirmationSender.scala From eventuate with Apache License 2.0 | 5 votes |
package com.rbmhtechnology.eventuate.adapter.vertx import akka.actor.{ ActorRef, Props } import com.rbmhtechnology.eventuate.adapter.vertx.api.{ EndpointRouter, StorageProvider } import io.vertx.core.Vertx import scala.collection.immutable.Seq import scala.concurrent.{ ExecutionContext, Future } private[eventuate] object VertxNoConfirmationSender { def props(id: String, eventLog: ActorRef, endpointRouter: EndpointRouter, vertx: Vertx, storageProvider: StorageProvider): Props = Props(new VertxNoConfirmationSender(id, eventLog, endpointRouter, vertx, storageProvider)) .withDispatcher("eventuate.log.dispatchers.write-dispatcher") } private[eventuate] class VertxNoConfirmationSender(val id: String, val eventLog: ActorRef, val endpointRouter: EndpointRouter, val vertx: Vertx, val storageProvider: StorageProvider) extends VertxEventDispatcher[Long, Long] with VertxSender with SequenceNumberProgressStore { override def dispatch(events: Seq[EventEnvelope])(implicit ec: ExecutionContext): Future[Unit] = Future(events.foreach(e => send(e.address, e.evt))) }
Example 153
Source File: VertxNoConfirmationPublisher.scala From eventuate with Apache License 2.0 | 5 votes |
package com.rbmhtechnology.eventuate.adapter.vertx import akka.actor.{ ActorRef, Props } import com.rbmhtechnology.eventuate.adapter.vertx.api.{ EndpointRouter, StorageProvider } import io.vertx.core.Vertx import scala.collection.immutable.Seq import scala.concurrent.{ ExecutionContext, Future } private[eventuate] object VertxNoConfirmationPublisher { def props(id: String, eventLog: ActorRef, endpointRouter: EndpointRouter, vertx: Vertx, storageProvider: StorageProvider): Props = Props(new VertxNoConfirmationPublisher(id, eventLog, endpointRouter, vertx, storageProvider)) .withDispatcher("eventuate.log.dispatchers.write-dispatcher") } private[eventuate] class VertxNoConfirmationPublisher(val id: String, val eventLog: ActorRef, val endpointRouter: EndpointRouter, val vertx: Vertx, val storageProvider: StorageProvider) extends VertxEventDispatcher[Long, Long] with VertxPublisher with SequenceNumberProgressStore { override def dispatch(events: Seq[EventEnvelope])(implicit ec: ExecutionContext): Future[Unit] = Future(events.foreach(e => publish(e.address, e.evt))) }
Example 154
Source File: ActorStorageProvider.scala From eventuate with Apache License 2.0 | 5 votes |
package com.rbmhtechnology.eventuate.adapter.vertx import akka.actor.{ ActorSystem, Status } import akka.pattern.ask import akka.testkit.TestProbe import akka.util.Timeout import com.rbmhtechnology.eventuate.adapter.vertx.api.StorageProvider import scala.concurrent.duration._ import scala.concurrent.{ ExecutionContext, Future } class ActorStorageProvider(defaultId: String)(implicit system: ActorSystem) extends StorageProvider { implicit val timeout = Timeout(20.seconds) val probe = TestProbe() override def readProgress(id: String)(implicit executionContext: ExecutionContext): Future[Long] = probe.ref.ask(read(id)).mapTo[Long] override def writeProgress(id: String, sequenceNr: Long)(implicit executionContext: ExecutionContext): Future[Long] = probe.ref.ask(write(id, sequenceNr)).mapTo[Long] def expectRead(replySequenceNr: Long, id: String = defaultId): Unit = { probe.expectMsg(read(id)) probe.reply(replySequenceNr) } def expectWrite(sequenceNr: Long, id: String = defaultId): Unit = { probe.expectMsg(write(id, sequenceNr)) probe.reply(sequenceNr) } def expectWriteAndFail(sequenceNr: Long, failure: Throwable, id: String = defaultId): Unit = { probe.expectMsg(write(id, sequenceNr)) probe.reply(Status.Failure(failure)) } def expectWriteAnyOf(sequenceNrs: Seq[Long], id: String = defaultId): Unit = { probe.expectMsgAnyOf(sequenceNrs.map(write(id, _)): _*) probe.reply(sequenceNrs.max) } def expectNoMsg(duration: FiniteDuration): Unit = { probe.expectNoMsg(duration) } private def read(id: String): String = s"read[$id]" private def write(id: String, sequenceNr: Long): String = s"write[$id]-$sequenceNr" }
Example 155
Source File: ClickhouseQueryExecutor.scala From clickhouse-scala-client with GNU Lesser General Public License v3.0 | 5 votes |
package com.crobox.clickhouse.dsl.execution import akka.stream.scaladsl.Source import com.crobox.clickhouse.ClickhouseClient import com.crobox.clickhouse.dsl.language.{ClickhouseTokenizerModule, TokenizerModule} import com.crobox.clickhouse.dsl.{Query, Table} import com.crobox.clickhouse.internal.QuerySettings import com.crobox.clickhouse.internal.progress.QueryProgress.QueryProgress import spray.json.{JsonReader, _} import scala.concurrent.{ExecutionContext, Future} trait ClickhouseQueryExecutor extends QueryExecutor { self: TokenizerModule => implicit val client: ClickhouseClient def execute[V: JsonReader](query: Query)(implicit executionContext: ExecutionContext, settings: QuerySettings = QuerySettings()): Future[QueryResult[V]] = { import QueryResult._ val queryResult = client.query(toSql(query.internalQuery)) queryResult.map(_.parseJson.convertTo[QueryResult[V]]) } def executeWithProgress[V: JsonReader]( query: Query )(implicit executionContext: ExecutionContext, settings: QuerySettings = QuerySettings()): Source[QueryProgress, Future[QueryResult[V]]] = { import QueryResult._ val queryResult = client.queryWithProgress(toSql(query.internalQuery)) queryResult.mapMaterializedValue(_.map(_.parseJson.convertTo[QueryResult[V]])) } override def insert[V: JsonWriter]( table: Table, values: Seq[V] )(implicit executionContext: ExecutionContext, settings: QuerySettings = QuerySettings()): Future[String] = Future { values.map(_.toJson.compactPrint).mkString("\n") + "\n" }.flatMap( entity => client.execute(s"INSERT INTO ${table.quoted} FORMAT JSONEachRow", entity) ) } object ClickhouseQueryExecutor { def default(clickhouseClient: ClickhouseClient): ClickhouseQueryExecutor = new DefaultClickhouseQueryExecutor(clickhouseClient) } class DefaultClickhouseQueryExecutor(override val client: ClickhouseClient) extends ClickhouseQueryExecutor with ClickhouseTokenizerModule
Example 156
Source File: TestSchemaClickhouseQuerySpec.scala From clickhouse-scala-client with GNU Lesser General Public License v3.0 | 5 votes |
package com.crobox.clickhouse import com.crobox.clickhouse.dsl.TestSchema import com.crobox.clickhouse.dsl.execution.ClickhouseQueryExecutor import com.crobox.clickhouse.dsl.schemabuilder.{CreateTable, Engine} import com.crobox.clickhouse.testkit.ClickhouseSpec import org.scalatest.concurrent.ScalaFutures import org.scalatest.{BeforeAndAfterAll, Suite} import scala.concurrent.ExecutionContext trait TestSchemaClickhouseQuerySpec extends ClickhouseSpec with BeforeAndAfterAll with TestSchema with ScalaFutures { this: Suite => val table1Entries: Seq[Table1Entry] = Seq() val table2Entries: Seq[Table2Entry] = Seq() implicit val ec: ExecutionContext implicit lazy val chExecutor: ClickhouseQueryExecutor = ClickhouseQueryExecutor.default(clickClient) override def beforeAll(): Unit = { super.beforeAll() val tables = for { _ <- clickClient.execute( CreateTable(OneTestTable, Engine.Memory, ifNotExists = true).query ) _ <- clickClient.execute( CreateTable( TwoTestTable, Engine.Memory, ifNotExists = true ).query ) } yield {} whenReady(tables) { _ => val inserts = for { _ <- table1Entries.into(OneTestTable) _ <- table2Entries.into(TwoTestTable) } yield {} inserts.futureValue } } override def afterAll(): Unit = { super.afterAll() } }
Example 157
Source File: ClickhouseSink.scala From clickhouse-scala-client with GNU Lesser General Public License v3.0 | 5 votes |
package com.crobox.clickhouse.stream import akka.Done import akka.stream.scaladsl.{Flow, Keep, Sink} import com.crobox.clickhouse.ClickhouseClient import com.crobox.clickhouse.internal.QuerySettings import com.typesafe.config.Config import com.typesafe.scalalogging.LazyLogging import scala.concurrent.duration._ import scala.concurrent.{ExecutionContext, Future} case class ClickhouseIndexingException(msg: String, cause: Throwable, payload: Seq[String], table: String) extends RuntimeException(msg, cause) case class Insert(table: String, jsonRow: String) object ClickhouseSink extends LazyLogging { def insertSink(config: Config, client: ClickhouseClient, indexerName: Option[String] = None)( implicit ec: ExecutionContext, settings: QuerySettings = QuerySettings() ): Sink[Insert, Future[Done]] = { val indexerGeneralConfig = config.getConfig("crobox.clickhouse.indexer") val mergedIndexerConfig = indexerName .flatMap( theIndexName => if (indexerGeneralConfig.hasPath(theIndexName)) Some(indexerGeneralConfig.getConfig(theIndexName).withFallback(indexerGeneralConfig)) else None ) .getOrElse(indexerGeneralConfig) Flow[Insert] .groupBy(Int.MaxValue, _.table) .groupedWithin(mergedIndexerConfig.getInt("batch-size"), mergedIndexerConfig.getDuration("flush-interval").getSeconds.seconds) .mapAsyncUnordered(mergedIndexerConfig.getInt("concurrent-requests"))(inserts => { val table = inserts.head.table val insertQuery = s"INSERT INTO $table FORMAT JSONEachRow" val payload = inserts.map(_.jsonRow) val payloadSql = payload.mkString("\n") client.execute(insertQuery, payloadSql) recover { case ex => throw ClickhouseIndexingException("failed to index", ex, payload, table) } map (_ => inserts) }) .mergeSubstreams .toMat(Sink.ignore)(Keep.right) } }
Example 158
Source File: ClickhouseHostHealth.scala From clickhouse-scala-client with GNU Lesser General Public License v3.0 | 5 votes |
package com.crobox.clickhouse.balancing.discovery.health import akka.NotUsed import akka.actor.{ActorSystem, Cancellable} import akka.http.scaladsl.Http import akka.http.scaladsl.model._ import akka.http.scaladsl.settings.ConnectionPoolSettings import akka.http.scaladsl.unmarshalling.Unmarshaller import akka.stream.Materializer import akka.stream.scaladsl.{Flow, Source} import com.crobox.clickhouse.internal.ClickhouseResponseParser import scala.concurrent.duration._ import scala.concurrent.{ExecutionContext, Future} import scala.util.{Failure, Success, Try} object ClickhouseHostHealth extends ClickhouseResponseParser { sealed trait ClickhouseHostStatus { val host: Uri val code: String } case class Alive(host: Uri) extends ClickhouseHostStatus { override val code: String = "ok" } case class Dead(host: Uri, reason: Throwable) extends ClickhouseHostStatus { override val code: String = "nok" } def healthFlow(host: Uri)( implicit system: ActorSystem, materializer: Materializer, executionContext: ExecutionContext ): Source[ClickhouseHostStatus, Cancellable] = { val healthCheckInterval: FiniteDuration = system.settings.config .getDuration("connection.health-check.interval") .getSeconds.seconds val healthCheckTimeout: FiniteDuration = system.settings.config .getDuration("connection.health-check.timeout") .getSeconds.seconds val healthCachedPool = Http(system).cachedHostConnectionPool[Int]( host.authority.host.address(), host.effectivePort, settings = ConnectionPoolSettings(system) .withMaxConnections(1) .withMinConnections(1) .withMaxOpenRequests(2) .withMaxRetries(3) .withUpdatedConnectionSettings( _.withIdleTimeout(healthCheckTimeout + healthCheckInterval).withConnectingTimeout(healthCheckTimeout) ) ) Source .tick(0.milliseconds, healthCheckInterval, 0) .map(tick => { (HttpRequest(method = HttpMethods.GET, uri = host), tick) }) .via(healthCachedPool) .via(parsingFlow(host)) } private[health] def parsingFlow[T]( host: Uri )(implicit ec: ExecutionContext, mat: Materializer): Flow[(Try[HttpResponse], T), ClickhouseHostStatus, NotUsed] = Flow[(Try[HttpResponse], T)].mapAsync(1) { case (Success(response @ akka.http.scaladsl.model.HttpResponse(StatusCodes.OK, _, _, _)), _) => Unmarshaller.stringUnmarshaller(decodeResponse(response).entity) .map(splitResponse) .map( stringResponse => if (stringResponse.equals(Seq("Ok."))) { Alive(host) } else { Dead(host, new IllegalArgumentException(s"Got wrong result $stringResponse")) } ) case (Success(response), _) => Future.successful(Dead(host, new IllegalArgumentException(s"Got response with status code ${response.status}"))) case (Failure(ex), _) => Future.successful(Dead(host, ex)) } }
Example 159
Source File: ClusterConnectionFlow.scala From clickhouse-scala-client with GNU Lesser General Public License v3.0 | 5 votes |
package com.crobox.clickhouse.balancing.discovery.cluster import akka.actor.{ActorSystem, Cancellable} import akka.http.scaladsl.Http import akka.http.scaladsl.model.Uri import akka.http.scaladsl.settings.ConnectionPoolSettings import akka.stream.Materializer import akka.stream.scaladsl.Source import com.crobox.clickhouse.balancing.discovery.ConnectionManagerActor.Connections import com.crobox.clickhouse.internal.QuerySettings.ReadQueries import com.crobox.clickhouse.internal.{ClickhouseHostBuilder, ClickhouseQueryBuilder, ClickhouseResponseParser, QuerySettings} import com.typesafe.scalalogging.LazyLogging import scala.concurrent.duration._ import scala.concurrent.{ExecutionContext, Future} private[clickhouse] object ClusterConnectionFlow extends ClickhouseQueryBuilder with ClickhouseResponseParser with LazyLogging { def clusterConnectionsFlow( targetHost: => Future[Uri], scanningInterval: FiniteDuration, cluster: String )(implicit system: ActorSystem, materializer: Materializer, ec: ExecutionContext): Source[Connections, Cancellable] = { val http = Http(system) val settings = ConnectionPoolSettings(system) .withMaxConnections(1) .withMinConnections(1) .withMaxOpenRequests(2) .withMaxRetries(3) .withUpdatedConnectionSettings( _.withIdleTimeout(scanningInterval.plus(1.second)) ) Source .tick(0.millis, scanningInterval, {}) .mapAsync(1)(_ => targetHost) .mapAsync(1)(host => { val query = s"SELECT host_address FROM system.clusters WHERE cluster='$cluster'" val request = toRequest(host, query, None, QuerySettings(readOnly = ReadQueries, idempotent = Some(true)), None)( system.settings.config ) processClickhouseResponse(http.singleRequest(request, settings = settings), query, host, None) .map(splitResponse) .map(_.toSet.filter(_.nonEmpty)) .map(result => { if (result.isEmpty) { throw new IllegalArgumentException( s"Could not determine clickhouse cluster hosts for cluster $cluster and host $host. " + s"This could indicate that you are trying to use the cluster balancer to connect to a non cluster based clickhouse server. " + s"Please use the `SingleHostQueryBalancer` in that case." ) } Connections(result.map(ClickhouseHostBuilder.toHost(_, Some(8123)))) }) }) } }
Example 160
Source File: HostBalancer.scala From clickhouse-scala-client with GNU Lesser General Public License v3.0 | 5 votes |
package com.crobox.clickhouse.balancing import akka.actor.ActorSystem import akka.http.scaladsl.model._ import akka.stream.Materializer import com.crobox.clickhouse.balancing.Connection.{BalancingHosts, ClusterAware, ConnectionType, SingleHost} import com.crobox.clickhouse.balancing.discovery.ConnectionManagerActor import com.crobox.clickhouse.balancing.discovery.health.ClickhouseHostHealth import com.crobox.clickhouse.internal.ClickhouseHostBuilder import com.typesafe.config.Config import com.typesafe.scalalogging.LazyLogging import scala.collection.JavaConverters._ import scala.concurrent.duration._ import scala.concurrent.{ExecutionContext, Future} trait HostBalancer extends LazyLogging { def nextHost: Future[Uri] } object HostBalancer extends ClickhouseHostBuilder { def apply( optionalConfig: Option[Config] = None )(implicit system: ActorSystem, materializer: Materializer, ec: ExecutionContext): HostBalancer = { val config = optionalConfig.getOrElse(system.settings.config) val connectionConfig = config.getConfig("connection") val connectionType = ConnectionType(connectionConfig.getString("type")) val connectionHostFromConfig = extractHost(connectionConfig) connectionType match { case SingleHost => SingleHostBalancer(connectionHostFromConfig) case BalancingHosts => val manager = system.actorOf( ConnectionManagerActor .props(ClickhouseHostHealth.healthFlow(_)) ) MultiHostBalancer(connectionConfig .getConfigList("hosts") .asScala .toSet .map((config: Config) => extractHost(config)), manager) case ClusterAware => val manager = system.actorOf( ConnectionManagerActor.props(ClickhouseHostHealth.healthFlow(_)) ) ClusterAwareHostBalancer( connectionHostFromConfig, connectionConfig.getString("cluster"), manager, connectionConfig.getDuration("scanning-interval").getSeconds.seconds )(system, config.getDuration("host-retrieval-timeout").getSeconds.seconds, ec, materializer) } } def extractHost(connectionConfig: Config): Uri = toHost(connectionConfig.getString("host"), if (connectionConfig.hasPath("port")) Option(connectionConfig.getInt("port")) else None) }
Example 161
Source File: ClusterAwareHostBalancer.scala From clickhouse-scala-client with GNU Lesser General Public License v3.0 | 5 votes |
package com.crobox.clickhouse.balancing import akka.actor.{ActorRef, ActorSystem} import akka.http.scaladsl.model.Uri import akka.pattern.ask import akka.stream.scaladsl.Sink import akka.stream.{ActorAttributes, Materializer, Supervision} import akka.util.Timeout import com.crobox.clickhouse.balancing.discovery.ConnectionManagerActor.{GetConnection, LogDeadConnections} import com.crobox.clickhouse.balancing.discovery.cluster.ClusterConnectionFlow import scala.concurrent.duration._ import scala.concurrent.{ExecutionContext, Future} case class ClusterAwareHostBalancer(host: Uri, cluster: String = "cluster", manager: ActorRef, scanningInterval: FiniteDuration)( implicit system: ActorSystem, connectionRetrievalTimeout: Timeout, ec: ExecutionContext, materializer: Materializer ) extends HostBalancer { ClusterConnectionFlow .clusterConnectionsFlow(Future.successful(host), scanningInterval, cluster) .withAttributes( ActorAttributes.supervisionStrategy({ case ex: IllegalArgumentException => logger.error("Failed resolving hosts for cluster, stopping the flow.", ex) Supervision.stop case ex => logger.error("Failed resolving hosts for cluster, resuming.", ex) Supervision.Resume }) ) .runWith(Sink.actorRef(manager, LogDeadConnections)) override def nextHost: Future[Uri] = (manager ? GetConnection()).mapTo[Uri] }
Example 162
Source File: ClickhouseResponseParser.scala From clickhouse-scala-client with GNU Lesser General Public License v3.0 | 5 votes |
package com.crobox.clickhouse.internal import akka.http.scaladsl.coding.{Deflate, Gzip, NoCoding} import akka.http.scaladsl.model._ import akka.http.scaladsl.model.headers.{HttpEncoding, HttpEncodings} import akka.http.scaladsl.unmarshalling.Unmarshaller import akka.stream.Materializer import akka.stream.scaladsl.SourceQueue import com.crobox.clickhouse.internal.progress.QueryProgress.{QueryProgress, _} import com.crobox.clickhouse.{ClickhouseChunkedException, ClickhouseException} import scala.concurrent.{ExecutionContext, Future} import scala.util.{Failure, Success} private[clickhouse] trait ClickhouseResponseParser { protected def processClickhouseResponse(responseFuture: Future[HttpResponse], query: String, host: Uri, progressQueue: Option[SourceQueue[QueryProgress]])( implicit materializer: Materializer, executionContext: ExecutionContext ): Future[String] = responseFuture.flatMap { response => decodeResponse(response) match { case HttpResponse(StatusCodes.OK, _, entity, _) => Unmarshaller.stringUnmarshaller(entity).map(content => { if (content.contains("DB::Exception")) { //FIXME this is quite a fragile way to detect failures, hopefully nobody will have a valid exception string in the result. Check https://github.com/yandex/ClickHouse/issues/2999 throw ClickhouseException("Found exception in the query return body", query, ClickhouseChunkedException(content), StatusCodes.OK) } content }) .andThen { case Success(_) => progressQueue.foreach(queue => { queue.offer(QueryFinished) }) case Failure(exception) => progressQueue.foreach(queue => { queue.offer(QueryFailed(exception)) }) } case HttpResponse(code, _, entity, _) => progressQueue.foreach(_.offer(QueryRejected)) Unmarshaller.stringUnmarshaller(entity).flatMap( response => Future.failed( ClickhouseException(s"Server [$host] returned code $code; $response", query, statusCode = code) ) ) } } protected def decodeResponse(response: HttpResponse): HttpResponse = { val decoder = response.encoding match { case HttpEncodings.gzip => Gzip case HttpEncodings.deflate => Deflate case HttpEncodings.identity => NoCoding case HttpEncoding(enc) => throw new IllegalArgumentException(s"Unsupported response encoding: $enc") } decoder.decodeMessage(response) } protected def splitResponse(response: String): Seq[String] = response.split("\n").toSeq }
Example 163
Source File: ClickhouseClientSpec.scala From clickhouse-scala-client with GNU Lesser General Public License v3.0 | 5 votes |
package com.crobox.clickhouse import java.util.UUID import akka.actor.ActorSystem import akka.stream.{ActorMaterializer, Materializer} import akka.testkit.TestKit import com.typesafe.config.{Config, ConfigFactory} import org.scalatest.BeforeAndAfterAll import org.scalatest.concurrent.ScalaFutures import org.scalatest.flatspec.AnyFlatSpecLike import org.scalatest.matchers.should.Matchers import scala.concurrent.duration._ import scala.concurrent.{Await, ExecutionContext} import scala.util.Random abstract class ClickhouseClientSpec(val config: Config = ConfigFactory.load()) extends TestKit(ActorSystem("clickhouseClientTestSystem", config.getConfig("crobox.clickhouse.client"))) with AnyFlatSpecLike with Matchers with BeforeAndAfterAll with ScalaFutures { implicit val materializer: Materializer = ActorMaterializer() implicit val ec: ExecutionContext = system.dispatcher override implicit def patienceConfig: PatienceConfig = PatienceConfig(1.seconds, 50.millis) override protected def afterAll(): Unit = { try super.afterAll() finally Await.result(system.terminate(), 10.seconds) } def randomUUID: UUID = UUID.randomUUID def randomString: String = Random.alphanumeric.take(10).mkString def randomInt: Int = Random.nextInt(100000) }
Example 164
Source File: Dhall.scala From http4s-jdk-http-client with Apache License 2.0 | 5 votes |
import cats.effect._ import java.nio.file.{Files, Paths} import org.dhallj.core.Expr import org.dhallj.core.converters.JsonConverter import org.dhallj.imports.syntax._ import org.dhallj.parser.DhallParser import org.dhallj.yaml.YamlConverter import org.http4s.client.Client import org.http4s.client.jdkhttpclient.JdkHttpClient import sbt.{IO => _, _} import scala.concurrent.ExecutionContext import upickle.default.{ReadWriter, macroRW} object Dhall { lazy val convertDhall = taskKey[Unit]("Generate YAML/JSON from Dhall.") private lazy val http = { implicit val cs: ContextShift[IO] = IO.contextShift(ExecutionContext.global) JdkHttpClient.simple[IO].unsafeRunSync() } private def loadDhall(expr: String): Expr = { implicit val c: Client[IO] = http DhallParser .parse(expr) .normalize() .resolveImports[IO] .unsafeRunSync() .normalize() } val convertDhallTask = convertDhall := { val baseDir = (Keys.baseDirectory in LocalRootProject).value.absolutePath def convertYaml(from: String, to: String): Unit = { val dhall = loadDhall(s"$baseDir/dhall/$from.dhall") val yaml = YamlConverter.toYamlString(dhall) Files.writeString(Paths.get(s"$baseDir/$to"), yaml) } List("ci", "release", "dhall").foreach { file => convertYaml(file, s".github/workflows/$file.yml") } convertYaml("mergify", s".mergify.yml") } case class ScalaVersions(default: String, all: List[String]) object ScalaVersions { implicit val rw: ReadWriter[ScalaVersions] = macroRW } val scalaVersions = settingKey[ScalaVersions]("Read the Scala versions via Dhall") val scalaVersionsImpl = scalaVersions := { val baseDir = (Keys.baseDirectory in LocalRootProject).value.absolutePath val dhall = loadDhall(s"$baseDir/dhall/scalaVersions.dhall") val json = JsonConverter.toCompactString(dhall) upickle.default.read[ScalaVersions](json) } }
Example 165
Source File: Shifting.scala From http4s-jdk-http-client with Apache License 2.0 | 5 votes |
package org.http4s.client.jdkhttpclient import java.util.concurrent.Executors import cats.effect._ import cats.effect.testing.specs2.CatsEffect import org.http4s.implicits._ import org.specs2.mutable.Specification import scala.concurrent.ExecutionContext class Shifting extends Specification with CatsEffect { val testThreadName = "test-thread-name" val ec: ExecutionContext = ExecutionContext.fromExecutor(Executors.newCachedThreadPool { r => val t = new Thread(r) t.setName(testThreadName) t }) implicit val timer: cats.effect.Timer[IO] = IO.timer(ec) implicit val cs: cats.effect.ContextShift[IO] = IO.contextShift(ec) "The clients" should { "shift back from the HTTP thread pool" in { for { http <- JdkHttpClient.simple[IO] ws <- JdkWSClient.simple[IO] threadName = IO(Thread.currentThread().getName) name1 <- http.expect[String](uri"https://example.org") *> threadName name2 <- ws.connectHighLevel(WSRequest(uri"wss://echo.websocket.org")).use(_ => threadName) } yield List(name1, name2).forall(_ == testThreadName) } } }
Example 166
Source File: BasicAuthAuthenticatedAction.scala From smui with Apache License 2.0 | 5 votes |
package controllers.auth import java.util.Base64 import play.api.{Configuration, Logging} import play.api.mvc._ import scala.concurrent.{ExecutionContext, Future} import scala.util.control.Exception.allCatch class BasicAuthAuthenticatedAction(parser: BodyParsers.Default, appConfig: Configuration)(implicit ec: ExecutionContext) extends ActionBuilderImpl(parser) with Logging { logger.debug("In BasicAuthAuthenticatedAction") val BASIC_AUTH_USER = appConfig.getOptional[String]("smui.BasicAuthAuthenticatedAction.user") match { case Some(strUser: String) => strUser case None => logger.error(":: No value for smui.BasicAuthAuthenticatedAction.user found. Setting user to super-default.") "smui" } val BASIC_AUTH_PASS = appConfig.getOptional[String]("smui.BasicAuthAuthenticatedAction.pass") match { case Some(strUser: String) => strUser case None => logger.error(":: No value for smui.BasicAuthAuthenticatedAction.pass found. Setting pass to super-default.") "smui" } override def invokeBlock[A](request: Request[A], block: Request[A] => Future[Result]): Future[Result] = { logger.debug(s":: invokeBlock :: request.path = ${request.path}") def requestAuthenticated(request: Request[A]): Boolean = { request.headers.get("Authorization") match { case Some(authorization: String) => authorization.split(" ").drop(1).headOption.filter { encoded => val authInfo = new String(Base64.getDecoder().decode(encoded.getBytes)).split(":").toList allCatch.opt { val (username, password) = (authInfo.head, authInfo(1)) username.equals(BASIC_AUTH_USER) && password.equals(BASIC_AUTH_PASS) } getOrElse false }.exists(_ => true) case None => false } } if (requestAuthenticated(request)) { block(request) } else { Future { // TODO return error JSON with authorization violation details, redirect target eventually (instead of empty 401 body) Results.Unauthorized("401 Unauthorized").withHeaders(("WWW-Authenticate", "Basic realm=SMUI")) } } } }
Example 167
Source File: JWTJsonAuthenticatedAction.scala From smui with Apache License 2.0 | 5 votes |
package controllers.auth import com.jayway.jsonpath.JsonPath import net.minidev.json.JSONArray import pdi.jwt.{JwtAlgorithm, JwtClaim, JwtJson} import play.api.mvc._ import play.api.{Configuration, Logging} import scala.concurrent.{ExecutionContext, Future} import scala.util.{Failure, Success, Try} class JWTJsonAuthenticatedAction(parser: BodyParsers.Default, appConfig: Configuration)(implicit ec: ExecutionContext) extends ActionBuilderImpl(parser) with Logging { logger.debug("In JWTJsonAuthenticatedAction") private val JWT_LOGIN_URL = getValueFromConfigWithFallback("smui.JWTJsonAuthenticatedAction.login.url", "") private val JWT_COOKIE = getValueFromConfigWithFallback("smui.JWTJsonAuthenticatedAction.cookie.name", "jwt") private val JWT_PUBLIC_KEY = getValueFromConfigWithFallback("smui.JWTJsonAuthenticatedAction.public.key", "") private val JWT_ALGORITHM = getValueFromConfigWithFallback("smui.JWTJsonAuthenticatedAction.algorithm", "rsa") private val JWT_AUTHORIZATION_ACTIVE = getValueFromConfigWithFallback("smui.JWTJsonAuthenticatedAction.authorization.active", "false").toBoolean private val JWT_ROLES_JSON_PATH = getValueFromConfigWithFallback("smui.JWTJsonAuthenticatedAction.authorization.json.path", "$.roles") private val JWT_AUTHORIZED_ROLES = getValueFromConfigWithFallback("smui.JWTJsonAuthenticatedAction.authorization.roles", "admin") private lazy val authorizedRoles = JWT_AUTHORIZED_ROLES.replaceAll("\\s", "").split(",").toSeq private def getValueFromConfigWithFallback(key: String, default: String): String = { appConfig.getOptional[String](key) match { case Some(value: String) => value case None => logger.error(s":: No value for $key found. Setting pass to super-default.") default } } private def decodeJwtToken(jwt: String): Try[JwtClaim] = { JWT_ALGORITHM match { case "hmac" => JwtJson.decode(jwt, JWT_PUBLIC_KEY, JwtAlgorithm.allHmac()) case "asymmetric" => JwtJson.decode(jwt, JWT_PUBLIC_KEY, JwtAlgorithm.allAsymmetric()) case "rsa" => JwtJson.decode(jwt, JWT_PUBLIC_KEY, JwtAlgorithm.allRSA()) case "ecdsa" => JwtJson.decode(jwt, JWT_PUBLIC_KEY, JwtAlgorithm.allECDSA()) case _ => JwtJson.decode(jwt, JWT_PUBLIC_KEY, JwtAlgorithm.allRSA()) } } private def getJwtCookie[A](request: Request[A]): Option[Cookie] = { request.cookies.get(JWT_COOKIE) } private def isAuthenticated(jwt: String): Option[JwtClaim] = { decodeJwtToken(jwt) match { case Success(token) => Some(token) case Failure(_) => None } } private def isAuthorized(token: String): Boolean = { if (JWT_AUTHORIZATION_ACTIVE) { val rolesInToken = Try(JsonPath.read[JSONArray](token, JWT_ROLES_JSON_PATH).toArray.toSeq) rolesInToken match { case Success(roles) => roles.forall(authorizedRoles.contains) case _ => false } } else true } private def redirectToLoginPage(): Future[Result] = { Future { Results.Redirect(JWT_LOGIN_URL) } } override def invokeBlock[A](request: Request[A], block: Request[A] => Future[Result]): Future[Result] = { logger.debug(s":: invokeBlock :: request.path = ${request.path}") getJwtCookie(request) match { case Some(cookie) => isAuthenticated(cookie.value) match { case Some(token) if isAuthorized(token.content) => block(request) case _ => redirectToLoginPage() } case None => redirectToLoginPage() } } }
Example 168
Source File: AuthActionFactory.scala From smui with Apache License 2.0 | 5 votes |
package controllers.auth import javax.inject.Inject import play.api.{Configuration, Logging} import play.api.mvc._ import scala.concurrent.ExecutionContext class AuthActionFactory @Inject()(parser: BodyParsers.Default, appConfig: Configuration)(implicit ec: ExecutionContext) extends Logging { private def instantiateAuthAction(strClazz: String, defaultAction: ActionBuilder[Request, AnyContent]): ActionBuilder[Request, AnyContent] = { try { // TODO if possible instanciate authenticatedAction only once, not with every controller call def instantiate(clazz: java.lang.Class[_])(args: AnyRef*): AnyRef = { val constructor = clazz.getConstructors()(0) constructor.newInstance(args: _*).asInstanceOf[AnyRef] } val authenticatedAction = instantiate( java.lang.Class.forName(strClazz) )(parser, appConfig, ec) logger.debug(":: having instanciated " + authenticatedAction.toString) authenticatedAction.asInstanceOf[ActionBuilder[Request, AnyContent]] } catch { case e: Throwable => // TODO consider stop serving requests, if an expection during bootstrap of authAction happened. DO NOT return the defaultAction. logger.error(":: Exception during instantiation of smui.authAction :: " + e.getMessage) logger.error(":: Authentication protection IS NOT ACTIVE!") defaultAction } } def getAuthenticatedAction(defaultAction: ActionBuilder[Request, AnyContent]): ActionBuilder[Request, AnyContent] = { appConfig.getOptional[String]("smui.authAction") match { case Some(strClazz: String) => if (strClazz.trim().equals("scala.None")) defaultAction else instantiateAuthAction(strClazz, defaultAction) case None => defaultAction } } }
Example 169
Source File: HomeController.scala From smui with Apache License 2.0 | 5 votes |
package controllers import javax.inject.Inject import controllers.auth.AuthActionFactory import play.api.{Configuration, Logging} import play.api.mvc._ import scala.concurrent.{ExecutionContext, Future} import models.FeatureToggleModel._ class HomeController @Inject()(cc: MessagesControllerComponents, appConfig: Configuration, featureToggleService: FeatureToggleService, authActionFactory: AuthActionFactory)(implicit executionContext: ExecutionContext) extends MessagesAbstractController(cc) with Logging { def index(urlPath: String) = authActionFactory.getAuthenticatedAction(Action).async { Future { logger.debug("In HomeController :: index") Ok( views.html.home( featureToggleService.getJsFrontendToogleList ) ) }(executionContext) // TODO eval withSecurity ... because of play.filters.headers.contentSecurityPolicy (and resolve general setup in application.conf) } // TODO refactor authorizationTestControllerAction into a proper controller behaviour test }
Example 170
Source File: DatabaseTest.scala From watr-works with Apache License 2.0 | 5 votes |
package edu.umass.cs.iesl.watr package watrcolors import org.scalatest._ import corpora._ import corpora.database._ import workflow._ import cats.effect._ import scala.concurrent.ExecutionContext trait DatabaseBaseTest extends Matchers with CorpusTestingUtil { implicit def contextShift: ContextShift[IO] = IO.contextShift(ExecutionContext.global) lazy val corpusAccessDB = new CorpusAccessDB( dbname="watrdev", dbuser="watrworker", dbpass="watrpasswd" ) def workflowApi: WorkflowApi = corpusAccessDB.workflowApi def userbaseApi: UserbaseApi = corpusAccessDB.userbaseApi def annotApi: DocumentAnnotationApi = corpusAccessDB.annotApi def corpusLockApi: CorpusLockingApi = corpusAccessDB.corpusLockApi def createEmptyDocumentZoningApi(): DocumentZoningApi = { println("Dropping/recreating DB tables") corpusAccessDB.runqOnce { corpusAccessDB.veryUnsafeDropDatabase().run } corpusAccessDB.dropAndRecreate corpusAccessDB.docStore } } trait DatabaseFreeSpec extends FreeSpec with DatabaseBaseTest with BeforeAndAfterEach { override def beforeEach(): Unit = { // corpusAccessDB.reinit() } override def afterEach(): Unit = {} } trait DatabaseTest extends FlatSpec with DatabaseBaseTest with BeforeAndAfterEach { override def beforeEach(): Unit = { println("re-initing db connections") // corpusAccessDB.reinit() } override def afterEach(): Unit = { } }
Example 171
Source File: RokkuS3Proxy.scala From rokku with Apache License 2.0 | 5 votes |
package com.ing.wbaa.rokku.proxy import akka.Done import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.server.Directives._ import akka.stream.ActorMaterializer import com.ing.wbaa.rokku.proxy.api.{ AdminService, HealthService, PostRequestActions, ProxyServiceWithListAllBuckets } import com.ing.wbaa.rokku.proxy.config.HttpSettings import com.typesafe.scalalogging.LazyLogging import scala.concurrent.{ ExecutionContext, Future } import scala.util.{ Failure, Success } trait RokkuS3Proxy extends LazyLogging with ProxyServiceWithListAllBuckets with PostRequestActions with HealthService with AdminService { protected[this] implicit def system: ActorSystem implicit val materializer: ActorMaterializer = ActorMaterializer()(system) protected[this] def httpSettings: HttpSettings protected[this] implicit val executionContext: ExecutionContext = system.dispatcher // The routes we serve. final val allRoutes = adminRoute ~ healthRoute ~ proxyServiceRoute // Details about the server binding. lazy val startup: Future[Http.ServerBinding] = Http(system).bindAndHandle(allRoutes, httpSettings.httpBind, httpSettings.httpPort) .andThen { case Success(binding) => logger.info(s"Proxy service started listening: ${binding.localAddress}") case Failure(reason) => logger.error("Proxy service failed to start.", reason) } def shutdown(): Future[Done] = { startup.flatMap(_.unbind) .andThen { case Success(_) => logger.info("Proxy service stopped.") case Failure(reason) => logger.error("Proxy service failed to stop.", reason) } } }
Example 172
Source File: HealthService.scala From rokku with Apache License 2.0 | 5 votes |
package com.ing.wbaa.rokku.proxy.api import akka.http.scaladsl.model.StatusCodes import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server.{ Route, StandardRoute } import com.ing.wbaa.rokku.proxy.data.HealthCheck.{ RGWListBuckets, S3ListBucket } import com.ing.wbaa.rokku.proxy.handler.radosgw.RadosGatewayHandler import com.ing.wbaa.rokku.proxy.provider.aws.S3Client import com.typesafe.scalalogging.LazyLogging import java.util.concurrent.ConcurrentHashMap import scala.collection.JavaConverters._ import scala.collection.mutable import scala.concurrent.{ ExecutionContext, Future } import scala.util.{ Failure, Success, Try } object HealthService { private def timestamp: Long = System.currentTimeMillis() private val statusMap = new ConcurrentHashMap[Long, StandardRoute]() private def clearStatus(): Unit = statusMap.clear() private def addStatus(probeResult: StandardRoute): StandardRoute = statusMap.put(timestamp, probeResult) private def getCurrentStatusMap: Future[mutable.Map[Long, StandardRoute]] = Future.successful(statusMap.asScala) private def getRouteStatus(implicit ec: ExecutionContext): Future[Option[StandardRoute]] = getCurrentStatusMap.map(_.headOption.map { case (_, r) => r }) } trait HealthService extends RadosGatewayHandler with S3Client with LazyLogging { protected[this] implicit def executionContext: ExecutionContext import HealthService.{ addStatus, getCurrentStatusMap, clearStatus, getRouteStatus, timestamp } private lazy val interval = storageS3Settings.hcInterval private def updateStatus: Future[StandardRoute] = Future { clearStatus() storageS3Settings.hcMethod match { case RGWListBuckets => addStatus(execProbe(listAllBuckets _)) case S3ListBucket => addStatus(execProbe(listBucket _)) } } private def updateStatusAndGet: Future[Option[StandardRoute]] = for { _ <- updateStatus s <- getRouteStatus } yield s def getStatus(currentTime: Long): Future[Option[StandardRoute]] = getCurrentStatusMap.flatMap(_ match { case m if m.isEmpty => logger.debug("Status cache empty, running probe") updateStatusAndGet case m => m.keys.map { case entryTime if (entryTime + interval) < currentTime => logger.debug("Status entry expired, renewing") updateStatusAndGet case _ => logger.debug("Serving status from cache") Future.successful(m.map { case (_, r) => r }.headOption) }.head }) private def execProbe[A](p: () => A): StandardRoute = Try { p() } match { case Success(_) => complete("pong") case Failure(ex) => complete(StatusCodes.InternalServerError -> s"storage not available - $ex") } final val healthRoute: Route = path("ping") { get { onComplete(getStatus(timestamp)) { case Success(opt) => opt.getOrElse(complete(StatusCodes.InternalServerError -> "Failed to read status cache")) case Failure(e) => complete(StatusCodes.InternalServerError -> "Failed to read status cache " + e.getMessage) } } } }
Example 173
Source File: PostRequestActions.scala From rokku with Apache License 2.0 | 5 votes |
package com.ing.wbaa.rokku.proxy.api import akka.Done import akka.http.scaladsl.model._ import com.ing.wbaa.rokku.proxy.data._ import com.ing.wbaa.rokku.proxy.handler.LoggerHandlerWithId import com.ing.wbaa.rokku.proxy.handler.parsers.RequestParser.AWSRequestType import com.ing.wbaa.rokku.proxy.metrics.MetricsFactory import com.ing.wbaa.rokku.proxy.util.S3Utils import com.typesafe.config.ConfigFactory import scala.concurrent.{ ExecutionContext, Future } import scala.util.Failure trait PostRequestActions { protected[this] implicit def executionContext: ExecutionContext private val logger = new LoggerHandlerWithId private[this] def bucketNotificationEnabled = ConfigFactory.load().getBoolean("rokku.bucketNotificationEnabled") private[this] def atlasEnabled = ConfigFactory.load().getBoolean("rokku.atlas.enabled") protected[this] def createLineageFromRequest(httpRequest: HttpRequest, userSTS: User, userIPs: UserIps)(implicit id: RequestId): Future[Done] protected[this] def emitEvent(s3Request: S3Request, method: HttpMethod, principalId: String, awsRequest: AWSRequestType)(implicit id: RequestId): Future[Done] protected[this] def setDefaultBucketAclAndPolicy(bucketName: String)(implicit id: RequestId): Future[Unit] protected[this] def awsRequestFromRequest(request: HttpRequest): AWSRequestType private[this] def createAtlasLineage(response: HttpResponse, httpRequest: HttpRequest, userSTS: User, userIPs: UserIps)(implicit id: RequestId): Future[Done] = if (atlasEnabled && (response.status == StatusCodes.OK || response.status == StatusCodes.NoContent)) { // delete on AWS response 204 logger.debug("Atlas integration enabled, about to create Lineage for the request") createLineageFromRequest(httpRequest, userSTS, userIPs) map (_ => Done) } else { Future.successful(Done) } private[this] def createBucketNotification(response: HttpResponse, httpRequest: HttpRequest, s3Request: S3Request, userSTS: User)(implicit id: RequestId): Future[Done] = httpRequest.method match { case HttpMethods.POST | HttpMethods.PUT | HttpMethods.DELETE if bucketNotificationEnabled && (response.status == StatusCodes.OK || response.status == StatusCodes.NoContent) => MetricsFactory.incrementObjectsUploaded(httpRequest.method) emitEvent(s3Request, httpRequest.method, userSTS.userName.value, awsRequestFromRequest(httpRequest)) case _ => Future.successful(Done) } private[this] def updateBucketPermissions(httpRequest: HttpRequest, s3Request: S3Request)(implicit id: RequestId): Future[Done] = { val fullPath = S3Utils.getPathNameFromUrlOrHost(httpRequest) val bucketName = S3Utils.getBucketName(fullPath) logger.debug("trying updateBucketPermissions for bucket={}, fullPath={}", bucketName, fullPath) val isPathOnlyWithBucketName = fullPath.split("/").length == 2 if (httpRequest.method == HttpMethods.PUT && isPathOnlyWithBucketName) { setDefaultBucketAclAndPolicy(bucketName) map (_ => Done) } else { logger.debug("not create bucket command so updateBucketPermissions is not needed") Future.successful(Done) } } protected[this] def handlePostRequestActions(response: HttpResponse, httpRequest: HttpRequest, s3Request: S3Request, userSTS: User)(implicit id: RequestId): Unit = { val lineage = createAtlasLineage(response, httpRequest, userSTS, s3Request.userIps) val notification = createBucketNotification(response, httpRequest, s3Request, userSTS) val permissions = updateBucketPermissions(httpRequest, s3Request) // Set handlers to log errors lineage.andThen({ case Failure(err) => logger.error(s"Error during lineage creation: $err") }) notification.andThen({ case Failure(err) => logger.error(s"Error while emitting bucket notification: $err") }) permissions.andThen({ case Failure(err) => logger.error(s"Error while setting bucket permissions: $err") }) } }
Example 174
Source File: RequestHandlerS3.scala From rokku with Apache License 2.0 | 5 votes |
package com.ing.wbaa.rokku.proxy.handler import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.model._ import akka.http.scaladsl.model.headers.RawHeader import com.ing.wbaa.rokku.proxy.config.StorageS3Settings import com.ing.wbaa.rokku.proxy.data.{ RequestId, S3Request, User } import com.ing.wbaa.rokku.proxy.handler.exception.RokkuThrottlingException import com.ing.wbaa.rokku.proxy.handler.radosgw.RadosGatewayHandler import com.ing.wbaa.rokku.proxy.provider.aws.S3Client import com.ing.wbaa.rokku.proxy.queue.UserRequestQueue import scala.concurrent.{ ExecutionContext, Future } import scala.util.Success trait RequestHandlerS3 extends RadosGatewayHandler with S3Client with UserRequestQueue { private val logger = new LoggerHandlerWithId protected[this] implicit def system: ActorSystem protected[this] implicit def executionContext: ExecutionContext protected[this] def storageS3Settings: StorageS3Settings protected[this] def filterResponse(request: HttpRequest, userSTS: User, s3request: S3Request, response: HttpResponse)(implicit id: RequestId): HttpResponse protected[this] def fireRequestToS3(request: HttpRequest, user: User)(implicit id: RequestId): Future[HttpResponse] = { if (storageS3Settings.isRequestUserQueueEnabled) { if (addIfAllowedUserToRequestQueue(user)) { fireRequestToS3(request).andThen { case _ => decrement(user) } } else { implicit val returnStatusCode: StatusCodes.ServerError = StatusCodes.ServiceUnavailable logger.warn("user {} is sending too many requests", user.userName.value) Future.failed(new RokkuThrottlingException("Throttling")) } } else { fireRequestToS3(request) } } }
Example 175
Source File: S3Client.scala From rokku with Apache License 2.0 | 5 votes |
package com.ing.wbaa.rokku.proxy.provider.aws import com.amazonaws.auth.{ AWSStaticCredentialsProvider, BasicAWSCredentials } import com.amazonaws.client.builder.AwsClientBuilder import com.amazonaws.regions.Regions import com.amazonaws.services.s3.model.{ AccessControlList, BucketPolicy, GroupGrantee, Permission } import com.amazonaws.services.s3.{ AmazonS3, AmazonS3ClientBuilder } import com.ing.wbaa.rokku.proxy.config.StorageS3Settings import com.ing.wbaa.rokku.proxy.data.RequestId import com.ing.wbaa.rokku.proxy.handler.LoggerHandlerWithId import scala.concurrent.{ ExecutionContext, Future } import scala.util.{ Failure, Success, Try } trait S3Client { protected[this] implicit def executionContext: ExecutionContext private val logger = new LoggerHandlerWithId protected[this] def storageS3Settings: StorageS3Settings protected[this] lazy val s3Client: AmazonS3 = { val credentials = new BasicAWSCredentials( storageS3Settings.storageS3AdminAccesskey, storageS3Settings.storageS3AdminSecretkey) val endpointConfiguration = new AwsClientBuilder.EndpointConfiguration( s"http://${storageS3Settings.storageS3Authority.host.address()}:${storageS3Settings.storageS3Authority.port}", Regions.US_EAST_1.getName) AmazonS3ClientBuilder.standard() .withPathStyleAccessEnabled(true) .withCredentials(new AWSStaticCredentialsProvider(credentials)) .withEndpointConfiguration(endpointConfiguration) .build() } protected[this] def setDefaultBucketAclAndPolicy(bucketName: String)(implicit id: RequestId): Future[Unit] = Future { Try { logger.info("setting bucket acls and policies for bucket {}", bucketName) val acl = s3Client.getBucketAcl(bucketName) acl.revokeAllPermissions(GroupGrantee.AuthenticatedUsers) acl.grantPermission(GroupGrantee.AuthenticatedUsers, Permission.Read) acl.grantPermission(GroupGrantee.AuthenticatedUsers, Permission.Write) s3Client.setBucketAcl(bucketName, acl) s3Client.setBucketPolicy(bucketName, """{"Statement": [{"Action": ["s3:GetObject"],"Effect": "Allow","Principal": "*","Resource": ["arn:aws:s3:::*"]}],"Version": "2012-10-17"}""") } match { case Failure(exception) => logger.error("setting bucket acls and policies ex={}", exception.getMessage) case Success(_) => logger.info("acls and policies for bucket {} done", bucketName) } } def getBucketAcl(bucketName: String): Future[AccessControlList] = Future { s3Client.getBucketAcl(bucketName) } def getBucketPolicy(bucketName: String): Future[BucketPolicy] = Future { s3Client.getBucketPolicy(bucketName) } def listBucket: String = { s3Client.listObjects(storageS3Settings.bucketName).getBucketName } }
Example 176
Source File: AuthenticationProviderSTS.scala From rokku with Apache License 2.0 | 5 votes |
package com.ing.wbaa.rokku.proxy.provider import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.model.headers.RawHeader import akka.http.scaladsl.model.{ HttpRequest, StatusCodes, Uri } import akka.http.scaladsl.unmarshalling.Unmarshal import akka.stream.Materializer import com.ing.wbaa.rokku.proxy.config.StsSettings import com.ing.wbaa.rokku.proxy.data.{ AwsRequestCredential, JsonProtocols, RequestId, User, UserRawJson } import com.ing.wbaa.rokku.proxy.handler.LoggerHandlerWithId import com.ing.wbaa.rokku.proxy.util.JwtToken import scala.concurrent.{ ExecutionContext, Future } trait AuthenticationProviderSTS extends JsonProtocols with JwtToken { private val logger = new LoggerHandlerWithId import AuthenticationProviderSTS.STSException import spray.json._ protected[this] implicit def system: ActorSystem protected[this] implicit def executionContext: ExecutionContext protected[this] implicit def materializer: Materializer protected[this] def stsSettings: StsSettings protected[this] def areCredentialsActive(awsRequestCredential: AwsRequestCredential)(implicit id: RequestId): Future[Option[User]] = { val QueryParameters = Map("accessKey" -> awsRequestCredential.accessKey.value) ++ awsRequestCredential.sessionToken.map(s => "sessionToken" -> s.value) val uri = stsSettings.stsBaseUri .withPath(Uri.Path("/isCredentialActive")) .withQuery(Uri.Query(QueryParameters)) Http() .singleRequest( HttpRequest(uri = uri) .addHeader(RawHeader("Authorization", createInternalToken)) .addHeader(RawHeader("x-rokku-request-id", id.value)) ) .flatMap { response => response.status match { case StatusCodes.OK => Unmarshal(response.entity).to[String].map { jsonString => Some(User(jsonString.parseJson.convertTo[UserRawJson])) } case StatusCodes.Forbidden => logger.error(s"User not authenticated " + s"with accessKey (${awsRequestCredential.accessKey.value}) " + s"and sessionToken (${awsRequestCredential.sessionToken})") Future.successful(None) case c => val msg = s"Received unexpected StatusCode ($c) for " + s"accessKey (${awsRequestCredential.accessKey.value}) " + s"and sessionToken (${awsRequestCredential.sessionToken})" logger.error(msg) Future.failed(STSException(msg)) } } } } object AuthenticationProviderSTS { final case class STSException(private val message: String, private val cause: Throwable = None.orNull) extends Exception(message, cause) }
Example 177
Source File: EventProducer.scala From rokku with Apache License 2.0 | 5 votes |
package com.ing.wbaa.rokku.proxy.provider.kafka import akka.Done import akka.http.scaladsl.model.HttpMethod import com.ing.wbaa.rokku.proxy.config.KafkaSettings import com.ing.wbaa.rokku.proxy.data.RequestId import com.ing.wbaa.rokku.proxy.handler.LoggerHandlerWithId import com.ing.wbaa.rokku.proxy.metrics.MetricsFactory import org.apache.kafka.clients.CommonClientConfigs import org.apache.kafka.clients.producer.{ KafkaProducer, ProducerConfig, ProducerRecord, RecordMetadata } import org.apache.kafka.common.serialization.StringSerializer import scala.concurrent.{ ExecutionContext, Future } trait EventProducer { private val logger = new LoggerHandlerWithId import scala.collection.JavaConverters._ protected[this] implicit val kafkaSettings: KafkaSettings protected[this] implicit val executionContext: ExecutionContext private lazy val config: Map[String, Object] = Map[String, Object]( "bootstrap.servers" -> kafkaSettings.bootstrapServers, ProducerConfig.RETRIES_CONFIG -> kafkaSettings.retries, ProducerConfig.RECONNECT_BACKOFF_MS_CONFIG -> kafkaSettings.retriesBackOff, ProducerConfig.RECONNECT_BACKOFF_MAX_MS_CONFIG -> kafkaSettings.retriesBackOffMax, CommonClientConfigs.SECURITY_PROTOCOL_CONFIG -> kafkaSettings.protocol, ProducerConfig.MAX_BLOCK_MS_CONFIG -> kafkaSettings.maxblock, ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG -> kafkaSettings.requestTimeoutMs, "ssl.truststore.location" -> kafkaSettings.sslTruststoreLocation, "ssl.truststore.password" -> kafkaSettings.sslTruststorePassword, "ssl.keystore.location" -> kafkaSettings.sslKeystoreLocation, "ssl.keystore.password" -> kafkaSettings.sslKeystorePassword, "ssl.key.password" -> kafkaSettings.sslKeyPassword ) private lazy val kafkaProducer: KafkaProducer[String, String] = new KafkaProducer(config.asJava, new StringSerializer, new StringSerializer) def sendSingleMessage(event: String, topic: String, httpMethod: Option[HttpMethod] = None)(implicit id: RequestId): Future[Done] = { kafkaProducer .send(new ProducerRecord[String, String](topic, event), (metadata: RecordMetadata, exception: Exception) => { exception match { case e: Exception => MetricsFactory.incrementKafkaSendErrors logger.error("error in sending event {} to topic {}, error={}", event, topic, e) throw new Exception(e) case _ => httpMethod.map { m => MetricsFactory.incrementKafkaNotificationsSent(m) } logger.debug("Message sent {} to kafka, offset {}", event, metadata.offset()) } }) match { case _ => Future(Done) } } }
Example 178
Source File: AuditLogProviderItTest.scala From rokku with Apache License 2.0 | 5 votes |
package com.ing.wbaa.rokku.proxy.provider import java.net.InetAddress import akka.actor.ActorSystem import akka.http.scaladsl.model.{HttpMethods, HttpRequest, RemoteAddress, StatusCodes} import com.ing.wbaa.rokku.proxy.config.KafkaSettings import com.ing.wbaa.rokku.proxy.data._ import com.ing.wbaa.rokku.proxy.handler.parsers.RequestParser.RequestTypeUnknown import net.manub.embeddedkafka.{EmbeddedKafka, EmbeddedKafkaConfig} import org.scalatest.diagrams.Diagrams import org.scalatest.wordspec.AnyWordSpecLike import scala.concurrent.ExecutionContext class AuditLogProviderItTest extends AnyWordSpecLike with Diagrams with EmbeddedKafka with AuditLogProvider { implicit val testSystem: ActorSystem = ActorSystem("kafkaTest") private val testKafkaPort = 9093 override def auditEnabled = true override implicit val kafkaSettings: KafkaSettings = new KafkaSettings(testSystem.settings.config) { override val bootstrapServers: String = s"localhost:$testKafkaPort" } override implicit val executionContext: ExecutionContext = testSystem.dispatcher implicit val requestId: RequestId = RequestId("test") val s3Request = S3Request(AwsRequestCredential(AwsAccessKey("a"), None), Some("demobucket"), Some("s3object"), Read()) .copy(headerIPs = HeaderIPs(Some(RemoteAddress(InetAddress.getByName("127.0.0.1"))), Some(Seq(RemoteAddress(InetAddress.getByName("1.1.1.1")))), Some(RemoteAddress(InetAddress.getByName("2.2.2.2"))))) "AuditLogProvider" should { "send audit" in { implicit val config = EmbeddedKafkaConfig(kafkaPort = testKafkaPort) withRunningKafka { Thread.sleep(3000) val createEventsTopic = "audit_events" createCustomTopic(createEventsTopic) auditLog(s3Request, HttpRequest(HttpMethods.PUT, "http://localhost", Nil), "testUser", RequestTypeUnknown(), StatusCodes.Processing) val result = consumeFirstStringMessageFrom(createEventsTopic) assert(result.contains("\"eventName\":\"PUT\"")) assert(result.contains("\"sourceIPAddress\":\"ClientIp=unknown|X-Real-IP=127.0.0.1|X-Forwarded-For=1.1.1.1|Remote-Address=2.2.2.2\"")) assert(result.contains("\"x-amz-request-id\":\"test\"")) assert(result.contains("\"principalId\":\"testUser\"")) } } } }
Example 179
Source File: AuthenticationProviderSTSItTest.scala From rokku with Apache License 2.0 | 5 votes |
package com.ing.wbaa.rokku.proxy.provider import akka.actor.ActorSystem import akka.stream.ActorMaterializer import com.amazonaws.services.securitytoken.model.{AssumeRoleRequest, GetSessionTokenRequest} import com.ing.wbaa.rokku.proxy.config.StsSettings import com.ing.wbaa.rokku.proxy.data._ import com.ing.wbaa.testkit.awssdk.StsSdkHelpers import com.ing.wbaa.testkit.oauth.OAuth2TokenRequest import org.scalatest.Assertion import org.scalatest.diagrams.Diagrams import org.scalatest.wordspec.AsyncWordSpec import scala.concurrent.{ExecutionContext, Future} class AuthenticationProviderSTSItTest extends AsyncWordSpec with Diagrams with AuthenticationProviderSTS with StsSdkHelpers with OAuth2TokenRequest { override implicit val testSystem: ActorSystem = ActorSystem.create("test-system") override implicit val system: ActorSystem = testSystem override implicit val executionContext: ExecutionContext = testSystem.dispatcher override implicit val materializer: ActorMaterializer = ActorMaterializer()(testSystem) override val stsSettings: StsSettings = StsSettings(testSystem) implicit val requestId: RequestId = RequestId("test") private val validKeycloakCredentials = Map( "grant_type" -> "password", "username" -> "testuser", "password" -> "password", "client_id" -> "sts-rokku" ) private val userOneKeycloakCredentials = Map( "grant_type" -> "password", "username" -> "userone", "password" -> "password", "client_id" -> "sts-rokku" ) def withAwsCredentialsValidInSTS(testCode: AwsRequestCredential => Future[Assertion]): Future[Assertion] = { val stsSdk = getAmazonSTSSdk(StsSettings(testSystem).stsBaseUri) retrieveKeycloackToken(validKeycloakCredentials).flatMap { keycloakToken => val cred = stsSdk.getSessionToken(new GetSessionTokenRequest() .withTokenCode(keycloakToken.access_token)) .getCredentials testCode(AwsRequestCredential(AwsAccessKey(cred.getAccessKeyId), Some(AwsSessionToken(cred.getSessionToken)))) } } def withAssumeRoleInSTS(testCode: AwsRequestCredential => Future[Assertion]): Future[Assertion] = { val stsSdk = getAmazonSTSSdk(StsSettings(testSystem).stsBaseUri) retrieveKeycloackToken(userOneKeycloakCredentials).flatMap { keycloakToken => val assumeRoleReq = new AssumeRoleRequest().withTokenCode(keycloakToken.access_token) assumeRoleReq.setRoleArn("arn:aws:iam::account-id:role/admin") assumeRoleReq.setRoleSessionName("testRole") val cred = stsSdk.assumeRole(assumeRoleReq).getCredentials testCode(AwsRequestCredential(AwsAccessKey(cred.getAccessKeyId), Some(AwsSessionToken(cred.getSessionToken)))) } } "Authentication Provider STS" should { "check authentication" that { "succeeds for valid credentials" in { withAwsCredentialsValidInSTS { awsCredential => areCredentialsActive(awsCredential).map { userResult => assert(userResult.map(_.userName).contains(UserName("testuser"))) assert(userResult.map(_.userGroups).head.contains(UserGroup("testgroup"))) assert(userResult.map(_.userGroups).head.contains(UserGroup("group3"))) assert(userResult.map(_.userGroups).head.size == 2) assert(userResult.exists(_.accessKey.value.length == 32)) assert(userResult.exists(_.secretKey.value.length == 32)) } } } "fail when user is not authenticated" in { areCredentialsActive(AwsRequestCredential(AwsAccessKey("notauthenticated"), Some(AwsSessionToken("okSessionToken")))).map { userResult => assert(userResult.isEmpty) } } "succeeds for valid role" in { withAssumeRoleInSTS { awsCredential => areCredentialsActive(awsCredential).map { roleResult => assert(roleResult.map(_.userRole).contains(UserAssumeRole("admin"))) assert(roleResult.map(_.userGroups).contains(Set())) assert(roleResult.exists(_.accessKey.value.length == 32)) assert(roleResult.exists(_.secretKey.value.length == 32)) } } } } } }
Example 180
Source File: MessageProviderKafkaItTest.scala From rokku with Apache License 2.0 | 5 votes |
package com.ing.wbaa.rokku.proxy.provider import java.net.InetAddress import akka.actor.ActorSystem import akka.http.scaladsl.model.{HttpMethods, RemoteAddress} import com.ing.wbaa.rokku.proxy.config.KafkaSettings import com.ing.wbaa.rokku.proxy.data._ import com.ing.wbaa.rokku.proxy.handler.parsers.RequestParser.RequestTypeUnknown import net.manub.embeddedkafka.{EmbeddedKafka, EmbeddedKafkaConfig} import org.scalatest.RecoverMethods._ import org.scalatest.diagrams.Diagrams import org.scalatest.wordspec.AnyWordSpecLike import scala.concurrent.ExecutionContext class MessageProviderKafkaItTest extends AnyWordSpecLike with Diagrams with EmbeddedKafka with MessageProviderKafka { implicit val testSystem: ActorSystem = ActorSystem("kafkaTest") private val testKafkaPort = 9093 override implicit val kafkaSettings: KafkaSettings = new KafkaSettings(testSystem.settings.config) { override val bootstrapServers: String = s"localhost:$testKafkaPort" } override implicit val executionContext: ExecutionContext = testSystem.dispatcher implicit val requestId: RequestId = RequestId("test") val s3Request = S3Request(AwsRequestCredential(AwsAccessKey("a"), None), Some("demobucket"), Some("s3object"), Read()) .copy(clientIPAddress = RemoteAddress(InetAddress.getByName("127.0.0.1"))) "KafkaMessageProvider" should { "Send message to correct topic with Put or Post" in { implicit val config = EmbeddedKafkaConfig(kafkaPort = testKafkaPort) withRunningKafka { Thread.sleep(3000) val createEventsTopic = "create_events" createCustomTopic(createEventsTopic) emitEvent(s3Request, HttpMethods.PUT, "testUser", RequestTypeUnknown()) val result = consumeFirstStringMessageFrom(createEventsTopic) assert(result.contains("s3:ObjectCreated:PUT")) } } "Send message to correct topic with Delete" in { implicit val config = EmbeddedKafkaConfig(kafkaPort = testKafkaPort) withRunningKafka { Thread.sleep(3000) val deleteEventsTopic = "delete_events" createCustomTopic(deleteEventsTopic) emitEvent(s3Request, HttpMethods.DELETE, "testUser", RequestTypeUnknown()) assert(consumeFirstStringMessageFrom(deleteEventsTopic).contains("s3:ObjectRemoved:DELETE")) } } "fail on incomplete data" in { recoverToSucceededIf[Exception](emitEvent(s3Request.copy(s3Object = None), HttpMethods.PUT, "testUser", RequestTypeUnknown())) } } }
Example 181
Source File: RokkuFixtures.scala From rokku with Apache License 2.0 | 5 votes |
package com.ing.wbaa.testkit import java.io.{File, RandomAccessFile} import com.amazonaws.services.s3.AmazonS3 import com.ing.wbaa.testkit.awssdk.S3SdkHelpers import org.scalatest.Assertion import scala.concurrent.{ExecutionContext, Future} import scala.util.{Random, Try} trait RokkuFixtures extends S3SdkHelpers { def withHomeBucket(s3Client: AmazonS3, objects: Seq[String])(testCode: String => Future[Assertion])(implicit exCtx: ExecutionContext): Future[Assertion] = { val testBucket = "home" Try(s3Client.createBucket(testBucket)) objects.foreach(obj => s3Client.putObject(testBucket, obj, "")) testCode(testBucket).andThen { case _ => cleanBucket(s3Client, testBucket) } } private def cleanBucket(s3Client: AmazonS3, bucketName: String) = { import scala.collection.JavaConverters._ s3Client.listObjectsV2(bucketName).getObjectSummaries.asScala.toList.map(_.getKey).foreach { key => s3Client.deleteObject(bucketName, key) } } }
Example 182
Source File: FilterRecursiveListBucketHandlerSpec.scala From rokku with Apache License 2.0 | 5 votes |
package com.ing.wbaa.rokku.proxy.handler import akka.NotUsed import akka.actor.ActorSystem import akka.http.scaladsl.model.{ HttpMethods, MediaTypes, RemoteAddress, Uri } import akka.stream.scaladsl.{ Sink, Source } import akka.stream.{ ActorMaterializer, Materializer } import akka.util.ByteString import com.ing.wbaa.rokku.proxy.data._ import org.scalatest.diagrams.Diagrams import org.scalatest.wordspec.AsyncWordSpec import scala.concurrent.ExecutionContext class FilterRecursiveListBucketHandlerSpec extends AsyncWordSpec with Diagrams with FilterRecursiveListBucketHandler { implicit val system: ActorSystem = ActorSystem.create("test-system") override implicit val executionContext: ExecutionContext = system.dispatcher implicit val requestId: RequestId = RequestId("test") implicit def materializer: Materializer = ActorMaterializer()(system) def isUserAuthorizedForRequest(request: S3Request, user: User)(implicit id: RequestId): Boolean = { user match { case User(userName, _, _, _, _) if userName.value == "admin" => true case User(userName, _, _, _, _) if userName.value == "user1" => request match { case S3Request(_, s3BucketPath, _, _, _, _, _) => if (s3BucketPath.get.startsWith("/demobucket/user/user2")) false else true } case _ => true } } val listBucketXmlResponse: String = scala.io.Source.fromResource("listBucket.xml").mkString.stripMargin.trim val adminUser = User(UserRawJson("admin", Some(Set.empty[String]), "a", "s", None)) val user1 = User(UserRawJson("user1", Some(Set.empty[String]), "a", "s", None)) val s3Request = S3Request(AwsRequestCredential(AwsAccessKey(""), None), Uri.Path("/demobucket/user"), HttpMethods.GET, RemoteAddress.Unknown, HeaderIPs(), MediaTypes.`text/plain`) val data: Source[ByteString, NotUsed] = Source.single(ByteString.fromString(listBucketXmlResponse)) "List bucket object response" should { "returns all objects to admin" in { data.via(filterRecursiveListObjects(adminUser, s3Request)).map(_.utf8String).runWith(Sink.seq).map(x => { assert(x.mkString.stripMargin.equals(listBucketXmlResponse)) }) } val filteredXml: String = scala.io.Source.fromResource("filteredListBucket.xml").mkString.stripMargin.trim "returns filtered object for user 1" in { data.via(filterRecursiveListObjects(user1, s3Request)).map(_.utf8String).runWith(Sink.seq).map(x => { assert(x.mkString.stripMargin.replaceAll("[\n\r\\s]", "") .equals(filteredXml.replaceAll("[\n\r\\s]", ""))) }) } } }
Example 183
Source File: FilterRecursiveMultiDeleteSpec.scala From rokku with Apache License 2.0 | 5 votes |
package com.ing.wbaa.rokku.proxy.handler import akka.NotUsed import akka.actor.ActorSystem import akka.stream.ActorMaterializer import akka.stream.scaladsl.Source import akka.util.ByteString import com.ing.wbaa.rokku.proxy.handler.FilterRecursiveMultiDelete._ import org.scalatest.diagrams.Diagrams import org.scalatest.wordspec.AsyncWordSpec import scala.collection.mutable.ListBuffer import scala.concurrent.ExecutionContext import scala.util.Random class FilterRecursiveMultiDeleteSpec extends AsyncWordSpec with Diagrams { implicit val system: ActorSystem = ActorSystem.create("test-system") override implicit val executionContext: ExecutionContext = system.dispatcher implicit def materializer: ActorMaterializer = ActorMaterializer()(system) val multiDeleteRequestXml: String = scala.io.Source.fromResource("multiDeleteRequest.xml").mkString.stripMargin.trim val multiDeleteRequestV4Xml: String = scala.io.Source.fromResource("multiDeleteRequestV4.xml").mkString.stripMargin.trim val multiPartComplete: String = scala.io.Source.fromResource("multipartUploadComplete.xml").mkString.stripMargin.trim val data: Source[ByteString, NotUsed] = Source.single(ByteString.fromString(multiDeleteRequestXml)) val dataV4: Source[ByteString, NotUsed] = Source.single(ByteString.fromString(multiDeleteRequestV4Xml)) val otherData: Source[ByteString, NotUsed] = Source.single(ByteString.fromString(multiPartComplete)) val numberOfObjects = 1000 "multiDelete request" should { "should be parsed to objects list" in { exctractMultideleteObjectsFlow(data).map { r => assert(r.contains("testuser/file1")) assert(r.contains("testuser/file2")) assert(r.contains("testuser/file3")) } } "v4 should be parsed to objects list" in { exctractMultideleteObjectsFlow(dataV4).map { r => assert(r.contains("testuser/issue")) assert(!r.contains("true")) } } "should return empty list" in { exctractMultideleteObjectsFlow(otherData).map(r => assert(r == Vector())) } "should return correct size for large xml objects" in { val rand = new Random() val doc = new ListBuffer[String]() for (c <- 1 to numberOfObjects) doc += s"<Object><Key>testuser/one/two/three/four/five/six/seven/eight/nine/ten/eleven/twelve/sub$c/${rand.alphanumeric.take(32).mkString}=${rand.alphanumeric.take(12).mkString}.txt</Key></Object>" exctractMultideleteObjectsFlow(Source.single(ByteString("<Delete>" + doc.mkString + "</Delete>"))).map { r => assert(r.length == numberOfObjects) } } } }
Example 184
Source File: RequestHandlerS3Spec.scala From rokku with Apache License 2.0 | 5 votes |
package com.ing.wbaa.rokku.proxy.handler import akka.actor.ActorSystem import akka.http.scaladsl.model._ import com.ing.wbaa.rokku.proxy.config.StorageS3Settings import com.ing.wbaa.rokku.proxy.data._ import com.ing.wbaa.rokku.proxy.queue.MemoryUserRequestQueue import org.scalatest.diagrams.Diagrams import org.scalatest.wordspec.AsyncWordSpec import scala.concurrent.{ ExecutionContext, Future } class RequestHandlerS3Spec extends AsyncWordSpec with Diagrams with RequestHandlerS3 with MemoryUserRequestQueue { override implicit val system: ActorSystem = ActorSystem.create("test-system") override implicit val executionContext: ExecutionContext = system.dispatcher override val storageS3Settings: StorageS3Settings = new StorageS3Settings(system.settings.config) { override val storageS3Authority: Uri.Authority = Uri.Authority(Uri.Host("1.2.3.4"), 1234) } implicit val requestId: RequestId = RequestId("test") var numFiredRequests = 0 override def fireRequestToS3(request: HttpRequest)(implicit id: RequestId): Future[HttpResponse] = { numFiredRequests = numFiredRequests + 1 Future.successful(HttpResponse(status = StatusCodes.Forbidden)) } override def handleUserCreationRadosGw(userSTS: User)(implicit id: RequestId): Boolean = true def isUserAuthorizedForRequest(request: S3Request, user: User): Boolean = true override protected def filterResponse(request: HttpRequest, userSTS: User, s3request: S3Request, response: HttpResponse)(implicit id: RequestId): HttpResponse = null "Request Handler" should { "execute a request" that { "retries a request when forbidden and user needs to be created" in { val initialNumFiredRequests = numFiredRequests executeRequest( HttpRequest(), User(UserRawJson("u", Some(Set.empty[String]), "a", "s", None)), S3Request(AwsRequestCredential(AwsAccessKey(""), None), Uri.Path("/demobucket/user"), HttpMethods.GET, RemoteAddress.Unknown, HeaderIPs(), MediaTypes.`text/plain`) ).map(_ => assert(numFiredRequests - initialNumFiredRequests == 2)) } } } }
Example 185
Source File: LineageHelperSpec.scala From rokku with Apache License 2.0 | 5 votes |
package com.ing.wbaa.rokku.proxy.provider import akka.http.scaladsl.model.HttpRequest import akka.http.scaladsl.model.headers.RawHeader import com.ing.wbaa.rokku.proxy.config.KafkaSettings import com.ing.wbaa.rokku.proxy.data.{ BucketClassification, DirClassification, ObjectClassification, RequestId } import com.ing.wbaa.rokku.proxy.provider.atlas.LineageHelpers import org.scalatest.PrivateMethodTester import org.scalatest.diagrams.Diagrams import org.scalatest.wordspec.AnyWordSpec import scala.concurrent.ExecutionContext class LineageHelperSpec extends AnyWordSpec with Diagrams with PrivateMethodTester { object LineageHelpersTest extends LineageHelpers { override protected[this] implicit val kafkaSettings: KafkaSettings = null override protected[this] implicit val executionContext: ExecutionContext = null } implicit val id = RequestId("1") "extractMetadataFromHeader" that { "return None for empty header" in { val result = LineageHelpersTest.extractMetadataHeader(None) assert(result.isEmpty) } "return None for wrong header" in { val result = LineageHelpersTest.extractMetadataHeader(Some("k,v")) assert(result.isEmpty) val result2 = LineageHelpersTest.extractMetadataHeader(Some("k=v,k2")) assert(result2.isEmpty) val result3 = LineageHelpersTest.extractMetadataHeader(Some("kv,=k2,v2")) assert(result3.isEmpty) } "return key and value for metadata header" in { val result = LineageHelpersTest.extractMetadataHeader(Some("k=v")) assert(result.contains(Map("k" -> "v"))) } "return keys and values for metadata header" in { val result = LineageHelpersTest.extractMetadataHeader(Some("k1=v1,k2=v2")) assert(result.contains(Map("k1" -> "v1", "k2" -> "v2"))) } } "extractClassifications" that { "returns bucket classifications" in { val request = HttpRequest().withUri("bucket").withHeaders(RawHeader(LineageHelpersTest.CLASSIFICATIONS_HEADER, "classification1")) val result = LineageHelpersTest.extractClassifications(request) assert(result.size == 1) assert(result contains BucketClassification()) assert(result(BucketClassification()) == List("classification1")) } "returns dir classifications" in { val request = HttpRequest().withUri("bucket/dir1/").withHeaders(RawHeader(LineageHelpersTest.CLASSIFICATIONS_HEADER, "classification1,classification2")) val result = LineageHelpersTest.extractClassifications(request) assert(result.size == 1) assert(result contains DirClassification()) assert(result(DirClassification()) == List("classification1", "classification2")) } "returns object classifications" in { val request = HttpRequest().withUri("bucket/obj").withHeaders(RawHeader(LineageHelpersTest.CLASSIFICATIONS_HEADER, "classification1,classification2,classification3")) val result = LineageHelpersTest.extractClassifications(request) assert(result.size == 1) assert(result contains ObjectClassification()) assert(result(ObjectClassification()) == List("classification1", "classification2", "classification3")) val request2 = HttpRequest().withUri("bucket/dir1/obj").withHeaders(RawHeader(LineageHelpersTest.CLASSIFICATIONS_HEADER, "classification1")) val result2 = LineageHelpersTest.extractClassifications(request2) assert(result2.size == 1) assert(result2 contains ObjectClassification()) assert(result2(ObjectClassification()) == List("classification1")) } } }
Example 186
Source File: Imap.scala From gatling-imap with GNU Affero General Public License v3.0 | 5 votes |
package com.linagora.gatling.imap import com.sun.mail.imap.protocol.IMAPResponse import com.yahoo.imapnio.async.client.{ImapAsyncSession, ImapFuture} import com.yahoo.imapnio.async.request._ import com.yahoo.imapnio.async.response.ImapAsyncResponse import scala.concurrent.{ExecutionContext, Future} object Imap { def login(login: String, password: String)(implicit session: ImapAsyncSession, executionContext: ExecutionContext): Future[List[IMAPResponse]] = executeCommand(session.execute(new LoginCommand(login, password))) def rawCommand(command: ImapRequest)(implicit session: ImapAsyncSession, executionContext: ExecutionContext): Future[List[IMAPResponse]] = executeCommand(session.execute(command)) def disconnect()(implicit session: ImapAsyncSession, executionContext: ExecutionContext): Future[Unit] = Future.successful(session.close()) private def executeCommand(command: ImapFuture[ImapAsyncResponse])(implicit executionContext: ExecutionContext) = { Future { import collection.JavaConverters._ command.get().getResponseLines.asScala.toList } } }
Example 187
Source File: CyrusServer.scala From gatling-imap with GNU Affero General Public License v3.0 | 5 votes |
package com.linagora.gatling.imap import org.slf4j.{Logger, LoggerFactory} import org.testcontainers.containers.GenericContainer import com.yahoo.imapnio.async.request.CreateFolderCommand import scala.concurrent.duration._ import scala.concurrent.{Await, ExecutionContext, ExecutionContextExecutor} object CyrusServer extends Server { private val imapPort = 143 private val logger: Logger = LoggerFactory.getLogger(CyrusServer.getClass) class RunningCyrusServer(val container: GenericContainer[_]) extends RunningServer with ImapTestUtils { protected val logger: Logger = CyrusServer.logger lazy val mappedImapPort: Integer = container.getMappedPort(imapPort) def addUser(login: String, password: String): Unit = { container.execInContainer("bash", "-c", s"echo $password | saslpasswd2 -u test -c $login -p") implicit val executionContext: ExecutionContextExecutor = ExecutionContext.global Await.result( connect(mappedImapPort) .flatMap(implicit session => for { _ <- Imap.login("cyrus", "cyrus") _ <- Imap.rawCommand(new CreateFolderCommand(s"user.$login")) _ <- Imap.disconnect() } yield ()), 1.minute) } def stop(): Unit = container.stop() } def start(): RunningServer = { val cyrus = new GenericContainer("linagora/cyrus-imap") cyrus.addExposedPort(imapPort) cyrus.start() new RunningCyrusServer(cyrus) } }
Example 188
Source File: HttpApp.scala From darwin with Apache License 2.0 | 5 votes |
package it.agilelab.darwin.server.rest import java.util.concurrent.Executor import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.server.RouteConcatenation import akka.stream.ActorMaterializer import com.typesafe.config.Config import it.agilelab.darwin.common.Logging import scala.concurrent.duration.Duration import scala.concurrent.{Await, ExecutionContext, ExecutionContextExecutor} class HttpApp(config: Config, services: Service*) (implicit system: ActorSystem, materializer: ActorMaterializer) extends Logging { def run(): Unit = { val interface = config.getString("interface") val port = config.getInt("port") val route = RouteConcatenation.concat(services.map(_.route): _*) log.info("Starting http server on {}:{}", interface, port) val eventuallyBinding = Http().bindAndHandle(route, interface, port) val binding = Await.result(eventuallyBinding, Duration.Inf) log.info("Started http server on {}:{}", interface, port) val shutdownThread = new Thread(new Runnable { override def run(): Unit = { implicit val ec: ExecutionContext = newSameThreadExecutor log.info("Received shutdown hook") val termination = for { _ <- binding.unbind() terminated <- system.terminate() } yield terminated Await.ready(termination, Duration.Inf) log.info("Shutdown") } }) shutdownThread.setName("shutdown") Runtime.getRuntime.addShutdownHook(shutdownThread) log.info("registered shutdown hook") } private def newSameThreadExecutor: ExecutionContextExecutor = ExecutionContext.fromExecutor(new Executor { override def execute(command: Runnable): Unit = command.run() }) } object HttpApp { def apply(config:Config, services: Service*)(implicit system: ActorSystem, materializer: ActorMaterializer): HttpApp = new HttpApp(config, services: _*) }
Example 189
Source File: DruidClient.scala From XSQL with Apache License 2.0 | 5 votes |
package org.apache.spark.sql.execution.datasources.druid import com.ning.http.client.{ AsyncCompletionHandler, AsyncHttpClient, AsyncHttpClientConfig, Response } import org.json4s._ import org.json4s.jackson._ import org.json4s.jackson.JsonMethods._ import scala.concurrent.{ExecutionContext, Future, Promise} import scala.util.{Failure, Success} import org.apache.spark.internal.Logging def descTable(datasouceName: String): Seq[(String, Any)] = { val future = execute(DescTableRequest(datasouceName).toJson, DescTableResponse.parse) var data: Seq[(String, Any)] = null future.onComplete { case Success(resp) => data = resp.data case Failure(ex) => ex.printStackTrace() } while (!future.isCompleted) { Thread.sleep(500) } data } def close(): Unit = { client.close() } }
Example 190
Source File: S3Client.scala From akka-persistence-s3 with MIT License | 5 votes |
package akka.persistence.s3 import java.io.InputStream import com.amazonaws.auth.{ BasicAWSCredentials, DefaultAWSCredentialsProviderChain } import com.amazonaws.services.s3.{ S3ClientOptions, AmazonS3Client } import com.amazonaws.services.s3.model._ import scala.concurrent.{ Future, ExecutionContext } trait S3Client { val s3ClientConfig: S3ClientConfig lazy val client: AmazonS3Client = { val client = if (s3ClientConfig.awsUseDefaultCredentialsProviderChain) new AmazonS3Client(new DefaultAWSCredentialsProviderChain).withRegion(s3ClientConfig.region) else new AmazonS3Client(new BasicAWSCredentials(s3ClientConfig.awsKey, s3ClientConfig.awsSecret)) s3ClientConfig.endpoint.foreach { endpoint => client.withEndpoint(endpoint) () } client.setS3ClientOptions(new S3ClientOptions() .withPathStyleAccess(s3ClientConfig.options.pathStyleAccess) .withChunkedEncodingDisabled(s3ClientConfig.options.chunkedEncodingDisabled)) client } def createBucket(bucketName: String)(implicit ec: ExecutionContext): Future[Bucket] = Future { client.createBucket(bucketName) } def deleteBucket(bucketName: String)(implicit ec: ExecutionContext): Future[Unit] = Future { client.deleteBucket(bucketName) } def putObject(bucketName: String, key: String, input: InputStream, metadata: ObjectMetadata)(implicit ec: ExecutionContext): Future[PutObjectResult] = Future { client.putObject(new PutObjectRequest(bucketName, key, input, metadata)) } def getObject(bucketName: String, key: String)(implicit ec: ExecutionContext): Future[S3Object] = Future { client.getObject(new GetObjectRequest(bucketName, key)) } def listObjects(request: ListObjectsRequest)(implicit ec: ExecutionContext): Future[ObjectListing] = Future { client.listObjects(request) } def deleteObject(bucketName: String, key: String)(implicit ec: ExecutionContext): Future[Unit] = Future { client.deleteObject(bucketName, key) } def deleteObjects(request: DeleteObjectsRequest)(implicit ec: ExecutionContext): Future[Unit] = Future { client.deleteObjects(request) } }
Example 191
Source File: ThrottlingConfig.scala From maha with Apache License 2.0 | 5 votes |
package com.yahoo.maha.worker.throttling import java.util.concurrent.Executors import com.yahoo.maha.core.Engine import com.yahoo.maha.job.service.{JobMetadata, JobStatus, JobType} import com.yahoo.maha.worker.request.MahaWorkerRequest import grizzled.slf4j.Logging import org.joda.time.{DateTime, DateTimeZone} import scala.concurrent.{Await, ExecutionContext} import scala.concurrent.duration._ import scala.util.{Failure, Success} case class EngineBasedThrottler(throttlingConfig: EngineThrottlingConfig, jobMetadata : JobMetadata, jobMetaExecConfig: JobMetaExecConfig) extends Throttler with Logging { implicit val executor = ExecutionContext.fromExecutor(Executors.newFixedThreadPool(jobMetaExecConfig.poolSize)) override def throttle(mahaWorkerRequest: MahaWorkerRequest): Boolean = { val engine: Engine = mahaWorkerRequest.engine val jobType:JobType = { val jobTypeOption = JobType.getJobType(engine) require(jobTypeOption.isDefined, s"Unable to get the job type for engine $engine") jobTypeOption.get } var timesChecked = 0 var countOfRunningJobs = getRunningJobs(engine, jobType) while (countOfRunningJobs > throttlingConfig.countThreshold && timesChecked < throttlingConfig.maxChecks) { warn(s"Throttling: Number of running jobs ($countOfRunningJobs) exceeds threshold (${throttlingConfig.countThreshold}). Checked $timesChecked times.") Thread.sleep(throttlingConfig.checkDelayMs) countOfRunningJobs = getRunningJobs(engine, jobType) timesChecked += 1 } if (timesChecked == throttlingConfig.maxChecks && countOfRunningJobs > throttlingConfig.countThreshold) { warn(s"Timeout: Count of running jobs exceeds threshold even after ${throttlingConfig.checkDelayMs * throttlingConfig.maxChecks} ms. Continuing to process to avoid increasing PULSAR/KAFKA backlog.") //monManager.incrementMetric(Metrics.ThrottleCheckTimeouts) } info(s"Number of running jobs ($countOfRunningJobs) below threshold (${throttlingConfig.countThreshold}), proceeding to process message.") if(timesChecked > 0) { true } else false } def getRunningJobs(engine: Engine, jobType:JobType): Int = { try { val jobCreatedTs = DateTime.now(DateTimeZone.UTC).minusMinutes(throttlingConfig.lookbackMins) val countOfRunningJobsFuture = jobMetadata.countJobsByTypeAndStatus(jobType, JobStatus.RUNNING, jobCreatedTs) Await.result(countOfRunningJobsFuture, jobMetaExecConfig.maxWaitMills millis) val runningJobCount: Int = if(countOfRunningJobsFuture.value.isEmpty) { warn(s"Failed to get the runningJobCount in ${jobMetaExecConfig.maxWaitMills}") 0 } else { countOfRunningJobsFuture.value.get match { case Success(count) => count case Failure(t) => { error(s"Failed to get the result from jobMeta ${t.getMessage}", t) 0 } } } runningJobCount } catch { case e:Exception => e.printStackTrace() 0 } } }
Example 192
Source File: Error.scala From akka-http-oauth2-client with Apache License 2.0 | 5 votes |
package com.github.dakatsuka.akka.http.oauth2.client import akka.http.scaladsl.model.HttpResponse import akka.http.scaladsl.unmarshalling.Unmarshal import akka.stream.Materializer import com.github.dakatsuka.akka.http.oauth2.client.utils.JsonUnmarshaller import io.circe.Decoder import scala.concurrent.{ ExecutionContext, Future } object Error { sealed abstract class Code(val value: String) case object InvalidRequest extends Code("invalid_request") case object InvalidClient extends Code("invalid_client") case object InvalidToken extends Code("invalid_token") case object InvalidGrant extends Code("invalid_grant") case object InvalidScope extends Code("invalid_scope") case object UnsupportedGrantType extends Code("unsupported_grant_type") case object Unknown extends Code("unknown") object Code { def fromString(code: String): Code = code match { case "invalid_request" => InvalidRequest case "invalid_client" => InvalidClient case "invalid_token" => InvalidToken case "invalid_grant" => InvalidGrant case "invalid_scope" => InvalidScope case "unsupported_grant_type" => UnsupportedGrantType case _ => Unknown } } class UnauthorizedException(val code: Code, val description: String, val response: HttpResponse) extends RuntimeException(s"$code: $description") object UnauthorizedException extends JsonUnmarshaller { case class UnauthorizedResponse(error: String, errorDescription: String) implicit def decoder: Decoder[UnauthorizedResponse] = Decoder.instance { c => for { error <- c.downField("error").as[String].right description <- c.downField("error_description").as[String].right } yield UnauthorizedResponse(error, description) } def fromHttpResponse(response: HttpResponse)(implicit ec: ExecutionContext, mat: Materializer): Future[UnauthorizedException] = { Unmarshal(response).to[UnauthorizedResponse].map { r => new UnauthorizedException(Code.fromString(r.error), r.errorDescription, response) } } } }
Example 193
Source File: Client.scala From akka-http-oauth2-client with Apache License 2.0 | 5 votes |
package com.github.dakatsuka.akka.http.oauth2.client import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.model.headers.OAuth2BearerToken import akka.http.scaladsl.model.{ HttpRequest, HttpResponse, Uri } import akka.stream.Materializer import akka.stream.scaladsl.{ Flow, Sink } import com.github.dakatsuka.akka.http.oauth2.client.Error.UnauthorizedException import com.github.dakatsuka.akka.http.oauth2.client.strategy.Strategy import scala.concurrent.{ ExecutionContext, Future } class Client(config: ConfigLike, connection: Option[Flow[HttpRequest, HttpResponse, _]] = None)(implicit system: ActorSystem) extends ClientLike { def getAuthorizeUrl[A <: GrantType](grant: A, params: Map[String, String] = Map.empty)(implicit s: Strategy[A]): Option[Uri] = s.getAuthorizeUrl(config, params) def getAccessToken[A <: GrantType]( grant: A, params: Map[String, String] = Map.empty )(implicit s: Strategy[A], ec: ExecutionContext, mat: Materializer): Future[Either[Throwable, AccessToken]] = { val source = s.getAccessTokenSource(config, params) source .via(connection.getOrElse(defaultConnection)) .mapAsync(1)(handleError) .mapAsync(1)(AccessToken.apply) .runWith(Sink.head) .map(Right.apply) .recover { case ex => Left(ex) } } def getConnectionWithAccessToken(accessToken: AccessToken): Flow[HttpRequest, HttpResponse, _] = Flow[HttpRequest] .map(_.addCredentials(OAuth2BearerToken(accessToken.accessToken))) .via(connection.getOrElse(defaultConnection)) private def defaultConnection: Flow[HttpRequest, HttpResponse, _] = config.site.getScheme match { case "http" => Http().outgoingConnection(config.getHost, config.getPort) case "https" => Http().outgoingConnectionHttps(config.getHost, config.getPort) } private def handleError(response: HttpResponse)(implicit ec: ExecutionContext, mat: Materializer): Future[HttpResponse] = { if (response.status.isFailure()) UnauthorizedException.fromHttpResponse(response).flatMap(Future.failed(_)) else Future.successful(response) } } object Client { def apply(config: ConfigLike)(implicit system: ActorSystem): Client = new Client(config) def apply(config: ConfigLike, connection: Flow[HttpRequest, HttpResponse, _])(implicit system: ActorSystem): Client = new Client(config, Some(connection)) }
Example 194
Source File: AccessTokenSpec.scala From akka-http-oauth2-client with Apache License 2.0 | 5 votes |
package com.github.dakatsuka.akka.http.oauth2.client import akka.actor.ActorSystem import akka.http.scaladsl.model.{ HttpEntity, HttpResponse, StatusCodes } import akka.http.scaladsl.model.ContentTypes.`application/json` import akka.stream.{ ActorMaterializer, Materializer } import org.scalatest.concurrent.ScalaFutures import org.scalatest.time.{ Millis, Seconds, Span } import org.scalatest.{ BeforeAndAfterAll, DiagrammedAssertions, FlatSpec } import scala.concurrent.{ Await, ExecutionContext } import scala.concurrent.duration.Duration class AccessTokenSpec extends FlatSpec with DiagrammedAssertions with ScalaFutures with BeforeAndAfterAll { implicit val system: ActorSystem = ActorSystem() implicit val ec: ExecutionContext = system.dispatcher implicit val materializer: Materializer = ActorMaterializer() implicit val defaultPatience: PatienceConfig = PatienceConfig(timeout = Span(5, Seconds), interval = Span(700, Millis)) override def afterAll(): Unit = { Await.ready(system.terminate(), Duration.Inf) } behavior of "AccessToken" it should "apply from HttpResponse" in { val accessToken = "xxx" val tokenType = "bearer" val expiresIn = 86400 val refreshToken = "yyy" val httpResponse = HttpResponse( status = StatusCodes.OK, headers = Nil, entity = HttpEntity( `application/json`, s""" |{ | "access_token": "$accessToken", | "token_type": "$tokenType", | "expires_in": $expiresIn, | "refresh_token": "$refreshToken" |} """.stripMargin ) ) val result = AccessToken(httpResponse) whenReady(result) { token => assert(token.accessToken == accessToken) assert(token.tokenType == tokenType) assert(token.expiresIn == expiresIn) assert(token.refreshToken.contains(refreshToken)) } } }
Example 195
Source File: ReplicaCoordinatorActor.scala From JustinDB with Apache License 2.0 | 5 votes |
package justin.db.actors import akka.actor.{Actor, Props} import akka.routing.{DefaultResizer, RoundRobinPool} import justin.db.actors.protocol.{ReadData, WriteData} import justin.db.replica.read.ReplicaReadCoordinator import justin.db.replica.write.ReplicaWriteCoordinator import scala.concurrent.ExecutionContext class ReplicaCoordinatorActor(readCoordinator: ReplicaReadCoordinator, writeCoordinator: ReplicaWriteCoordinator) extends Actor { private implicit val ec: ExecutionContext = context.dispatcher override def receive: Receive = { case rd: ReadData => readCoordinator.apply(rd.cmd, rd.clusterMembers).foreach(rd.sender ! _) case wd: WriteData => writeCoordinator.apply(wd.cmd, wd.clusterMembers).foreach(wd.sender ! _) } } object ReplicaCoordinatorActor { def props(readCoordinator: ReplicaReadCoordinator, writeCoordinator: ReplicaWriteCoordinator): Props = { Props(new ReplicaCoordinatorActor(readCoordinator, writeCoordinator)) } } object RoundRobinCoordinatorRouter { def routerName: String = "CoordinatorRouter" private val pool = RoundRobinPool( nrOfInstances = 5, resizer = Some(DefaultResizer(lowerBound = 2, upperBound = 15)) ) def props(readCoordinator: ReplicaReadCoordinator, writeCoordinator: ReplicaWriteCoordinator): Props = { pool.props(ReplicaCoordinatorActor.props(readCoordinator, writeCoordinator)) } }
Example 196
Source File: StorageNodeActor.scala From JustinDB with Apache License 2.0 | 5 votes |
package justin.db.actors import akka.actor.{Actor, ActorRef, Props, RootActorPath, Terminated} import akka.cluster.ClusterEvent.{CurrentClusterState, MemberUp} import akka.cluster.{Cluster, Member, MemberStatus} import com.typesafe.scalalogging.StrictLogging import justin.db.actors.protocol.{RegisterNode, _} import justin.db.cluster.ClusterMembers import justin.db.cluster.datacenter.Datacenter import justin.db.consistenthashing.{NodeId, Ring} import justin.db.replica._ import justin.db.replica.read.{ReplicaLocalReader, ReplicaReadCoordinator, ReplicaRemoteReader} import justin.db.replica.write.{ReplicaLocalWriter, ReplicaRemoteWriter, ReplicaWriteCoordinator} import justin.db.storage.PluggableStorageProtocol import scala.concurrent.ExecutionContext class StorageNodeActor(nodeId: NodeId, datacenter: Datacenter, storage: PluggableStorageProtocol, ring: Ring, n: N) extends Actor with StrictLogging { private[this] implicit val ec: ExecutionContext = context.dispatcher private[this] val cluster = Cluster(context.system) private[this] var clusterMembers = ClusterMembers.empty private[this] val readCoordinator = new ReplicaReadCoordinator(nodeId, ring, n, new ReplicaLocalReader(storage), new ReplicaRemoteReader) private[this] val writeCoordinator = new ReplicaWriteCoordinator(nodeId, ring, n, new ReplicaLocalWriter(storage), new ReplicaRemoteWriter) private[this] val coordinatorRouter = context.actorOf( props = RoundRobinCoordinatorRouter.props(readCoordinator, writeCoordinator), name = RoundRobinCoordinatorRouter.routerName ) private[this] val name = self.path.name override def preStart(): Unit = cluster.subscribe(this.self, classOf[MemberUp]) override def postStop(): Unit = cluster.unsubscribe(this.self) def receive: Receive = { receiveDataPF orElse receiveClusterDataPF orElse receiveRegisterNodePR orElse notHandledPF } private[this] def receiveDataPF: Receive = { case readReq: StorageNodeReadRequest => coordinatorRouter ! ReadData(sender(), clusterMembers, readReq) case writeLocalDataReq: StorageNodeWriteDataLocal => coordinatorRouter ! WriteData(sender(), clusterMembers, writeLocalDataReq) case writeClientReplicaReq: Internal.WriteReplica => coordinatorRouter ! WriteData(sender(), clusterMembers, writeClientReplicaReq) } private[this] def receiveClusterDataPF: Receive = { case "members" => sender() ! clusterMembers case MemberUp(member) => register(nodeId, ring, member) case state: CurrentClusterState => state.members.filter(_.status == MemberStatus.Up).foreach(member => register(nodeId, ring, member)) case Terminated(actorRef) => clusterMembers = clusterMembers.removeByRef(StorageNodeActorRef(actorRef)) } private[this] def receiveRegisterNodePR: Receive = { case RegisterNode(senderNodeId) if clusterMembers.notContains(senderNodeId) => val senderRef = sender() context.watch(senderRef) clusterMembers = clusterMembers.add(senderNodeId, StorageNodeActorRef(senderRef)) senderRef ! RegisterNode(nodeId) logger.info(s"Actor[$name]: Successfully registered node [id-${senderNodeId.id}]") case RegisterNode(senderNodeId) => logger.info(s"Actor[$name]: Node [id-${senderNodeId.id}] is already registered") } private[this] def register(nodeId: NodeId, ring: Ring, member: Member) = { (member.hasRole(StorageNodeActor.role), datacenter.name == member.dataCenter) match { case (true, true) => register() case (_, false) => logger.info(s"Actor[$name]: $member doesn't belong to datacenter [${datacenter.name}]") case (false, _) => logger.info(s"Actor[$name]: $member doesn't have [${StorageNodeActor.role}] role (it has roles ${member.roles}") } def register() = for { ringNodeId <- ring.nodesId nodeName = StorageNodeActor.name(ringNodeId, Datacenter(member.dataCenter)) nodeRef = context.actorSelection(RootActorPath(member.address) / "user" / nodeName) } yield nodeRef ! RegisterNode(nodeId) } private[this] def notHandledPF: Receive = { case t => logger.warn(s"Actor[$name]: Not handled message [$t]") } } object StorageNodeActor { def role: String = "storagenode" def name(nodeId: NodeId, datacenter: Datacenter): String = s"${datacenter.name}-id-${nodeId.id}" def props(nodeId: NodeId, datacenter: Datacenter, storage: PluggableStorageProtocol, ring: Ring, n: N): Props = { Props(new StorageNodeActor(nodeId, datacenter, storage, ring, n)) } } case class StorageNodeActorRef(ref: ActorRef) extends AnyVal
Example 197
Source File: ReplicaWriteCoordinator.scala From JustinDB with Apache License 2.0 | 5 votes |
package justin.db.replica.write import java.util.UUID import justin.db._ import justin.db.actors.protocol._ import justin.db.cluster.ClusterMembers import justin.db.consistenthashing.{NodeId, Ring, UUID2RingPartitionId} import justin.db.replica._ import scala.concurrent.{ExecutionContext, Future} class ReplicaWriteCoordinator( nodeId: NodeId, ring: Ring, n: N, localDataWriter: ReplicaLocalWriter, remoteDataWriter: ReplicaRemoteWriter )(implicit ec: ExecutionContext) extends ((StorageNodeWriteRequest, ClusterMembers) => Future[StorageNodeWriteResponse]) { override def apply(cmd: StorageNodeWriteRequest, clusterMembers: ClusterMembers): Future[StorageNodeWriteResponse] = cmd match { case StorageNodeWriteDataLocal(data) => writeLocal(data) case Internal.WriteReplica(w, data) => coordinateReplicated(w, data, clusterMembers) } private def writeLocal(data: Data) = localDataWriter.apply(data, new IsPrimaryOrReplica(nodeId, ring)) private def coordinateReplicated(w: W, data: Data, clusterMembers: ClusterMembers) = { val ringPartitionId = UUID2RingPartitionId.apply(data.id, ring) PreferenceList(ringPartitionId, n, ring).fold(onLeft(data.id), onRight(w, data, clusterMembers)) } // TODO: rename to "onFailure" private def onLeft(id: UUID)(err: PreferenceList.Error) = Future.successful(StorageNodeFailedWrite(id)) // TODO: rename to "onSuccess" private def onRight(w: W, data: Data, clusterMembers: ClusterMembers)(preferenceList: PreferenceList) = { val updatedData = Data.updateVclock(data, preferenceList) makeWrites(w, updatedData, clusterMembers, preferenceList) .map(new ReplicaWriteAgreement().reach(w)) .map(consensus2WritingResult(updatedData.id)) } private def makeWrites(w: W, updatedData: Data, clusterMembers: ClusterMembers, preferenceList: PreferenceList) = { ResolveNodeAddresses(nodeId, preferenceList, clusterMembers) match { case ResolvedNodeAddresses(true, remotes) if remotes.size + 1 >= w.w => (writeLocal(updatedData) zip remoteDataWriter(remotes, updatedData)).map(converge) case ResolvedNodeAddresses(false, remotes) if remotes.size >= w.w => remoteDataWriter(remotes, updatedData) case _ => Future.successful(List(StorageNodeFailedWrite(updatedData.id))) } } private def consensus2WritingResult(id: UUID): WriteAgreement => StorageNodeWriteResponse = { case WriteAgreement.NotEnoughWrites => StorageNodeFailedWrite(id) case WriteAgreement.Ok => StorageNodeSuccessfulWrite(id) } }
Example 198
Source File: ReplicaLocalWriter.scala From JustinDB with Apache License 2.0 | 5 votes |
package justin.db.replica.write import justin.db.Data import justin.db.actors.protocol.{StorageNodeConflictedWrite, StorageNodeFailedWrite, StorageNodeSuccessfulWrite, StorageNodeWriteResponse} import justin.db.replica.IsPrimaryOrReplica import justin.db.storage.PluggableStorageProtocol.StorageGetData import justin.db.storage.{GetStorageProtocol, PutStorageProtocol} import justin.db.vectorclocks.VectorClockComparator import justin.db.vectorclocks.VectorClockComparator.VectorClockRelation import scala.concurrent.{ExecutionContext, Future} class ReplicaLocalWriter(storage: GetStorageProtocol with PutStorageProtocol)(implicit ec: ExecutionContext) { def apply(newData: Data, isPrimaryOrReplica: IsPrimaryOrReplica): Future[StorageNodeWriteResponse] = { storage.get(newData.id)(isPrimaryOrReplica).flatMap { case StorageGetData.None => putSingleSuccessfulWrite(newData, isPrimaryOrReplica) case StorageGetData.Single(oldData) => handleExistedSingleData(oldData, newData, isPrimaryOrReplica) } recover { case _ => StorageNodeFailedWrite(newData.id) } } private def handleExistedSingleData(oldData: Data, newData: Data, isPrimaryOrReplica: IsPrimaryOrReplica) = { new VectorClockComparator().apply(oldData.vclock, newData.vclock) match { case VectorClockRelation.Predecessor => Future.successful(StorageNodeFailedWrite(newData.id)) case VectorClockRelation.Conflict => Future.successful(StorageNodeConflictedWrite(oldData, newData)) case VectorClockRelation.Consequent => putSingleSuccessfulWrite(newData, isPrimaryOrReplica) } } private def putSingleSuccessfulWrite(newData: Data, resolveDataOriginality: IsPrimaryOrReplica) = { storage.put(newData)(resolveDataOriginality).map(_ => StorageNodeSuccessfulWrite(newData.id)) } }
Example 199
Source File: ReplicaRemoteWriter.scala From JustinDB with Apache License 2.0 | 5 votes |
package justin.db.replica.write import akka.pattern.ask import akka.util.Timeout import justin.db.Data import justin.db.actors.StorageNodeActorRef import justin.db.actors.protocol.{StorageNodeFailedWrite, StorageNodeWriteDataLocal, StorageNodeWriteResponse} import scala.concurrent.duration._ import scala.concurrent.{ExecutionContext, Future} class ReplicaRemoteWriter(implicit ec: ExecutionContext) { private implicit val timeout = Timeout(3.seconds) // TODO: tune this value def apply(storageNodeRefs: List[StorageNodeActorRef], data: Data): Future[List[StorageNodeWriteResponse]] = { Future.sequence(storageNodeRefs.map(putLocalValue(_, data))) } private def putLocalValue(node: StorageNodeActorRef, data: Data): Future[StorageNodeWriteResponse] = { (node.ref ? StorageNodeWriteDataLocal(data)) .mapTo[StorageNodeWriteResponse] .recover { case _ => StorageNodeFailedWrite(data.id) } } }
Example 200
Source File: ReplicaReadCoordinator.scala From JustinDB with Apache License 2.0 | 5 votes |
package justin.db.replica.read import java.util.UUID import justin.db._ import justin.db.actors.protocol._ import justin.db.cluster.ClusterMembers import justin.db.consistenthashing.{NodeId, Ring, UUID2RingPartitionId} import justin.db.replica._ import scala.concurrent.{ExecutionContext, Future} class ReplicaReadCoordinator( nodeId: NodeId, ring: Ring, n: N, localDataReader: ReplicaLocalReader, remoteDataReader: ReplicaRemoteReader )(implicit ec: ExecutionContext) extends ((StorageNodeReadRequest, ClusterMembers) => Future[StorageNodeReadResponse]) { override def apply(cmd: StorageNodeReadRequest, clusterMembers: ClusterMembers): Future[StorageNodeReadResponse] = cmd match { case StorageNodeLocalRead(id) => readLocalData(id) case Internal.ReadReplica(r, id) => coordinateReplicated(r, id, clusterMembers) } private def readLocalData(id: UUID) = localDataReader.apply(id, new IsPrimaryOrReplica(nodeId, ring)) private def coordinateReplicated(r: R, id: UUID, clusterMembers: ClusterMembers) = { val partitionId = UUID2RingPartitionId.apply(id, ring) PreferenceList(partitionId, n, ring).fold(onLeft(id), onRight(r, id, clusterMembers)) } private def onLeft(id: UUID)(err: PreferenceList.Error) = Future.successful(StorageNodeFailedRead(id)) private def onRight(r: R, id: UUID, clusterMembers: ClusterMembers)(preferenceList: PreferenceList) = { gatherReads(r, id, clusterMembers, preferenceList).map { reads => val consensus = new ReplicaReadAgreement().reach(r)(reads) consensus2ReadingResult(id)(consensus) } } private def gatherReads(r: R, id: UUID, clusterMembers: ClusterMembers, preferenceList: PreferenceList) = { ResolveNodeAddresses(nodeId, preferenceList, clusterMembers) match { case ResolvedNodeAddresses(true, remotes) if remotes.size + 1 >= r.r => (readLocalData(id) zip remoteDataReader.apply(remotes, id)).map(converge) case ResolvedNodeAddresses(false, remotes) if remotes.size >= r.r => remoteDataReader.apply(remotes, id) case _ => Future.successful(List(StorageNodeFailedRead(id))) } } private def consensus2ReadingResult(id: => UUID): ReadAgreement => StorageNodeReadResponse = { case ReadAgreement.Consequent(data) => StorageNodeFoundRead(data) case ReadAgreement.Found(data) => StorageNodeFoundRead(data) case ReadAgreement.Conflicts(data) => StorageNodeConflictedRead(data) case ReadAgreement.NotEnoughFound => StorageNodeNotFoundRead(id) case ReadAgreement.AllFailed => StorageNodeFailedRead(id) case ReadAgreement.AllNotFound => StorageNodeNotFoundRead(id) } }