scala.concurrent.Future Scala Examples
The following examples show how to use scala.concurrent.Future.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: TimeLimitedFutureSpec.scala From gfc-concurrent with Apache License 2.0 | 9 votes |
package com.gilt.gfc.concurrent import java.util.concurrent.TimeoutException import scala.concurrent.{ Future, Await } import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.duration.Duration import org.scalatest.{WordSpec, Matchers} class TimeLimitedFutureSpec extends WordSpec with Matchers { import TimeLimitedFutureSpec._ "RichFuture" when { import ScalaFutures._ "waiting for a result to happen" should { "return the completed original Future if it completes before the given timeout" in { val now = System.currentTimeMillis val future: Future[String] = (Future { Thread.sleep(1000); "Here I am" }).withTimeout(Duration(5, "seconds")) val msg: String = Await.result(future, Duration(10, "seconds")) val elapsed = (System.currentTimeMillis - now) msg should equal ("Here I am") elapsed should be (2000L +- 1000L) } "return the failure of the original Future if it fails before the given timeout" in { val now = System.currentTimeMillis val future = (Future { Thread.sleep(1000); throw new NullPointerException("That hurts!") }).withTimeout(Duration(5, "seconds")) a [NullPointerException] should be thrownBy { Await.result(future, Duration(10, "seconds")) } val elapsed = (System.currentTimeMillis - now) elapsed should be (2000L +- 1000L) } "return the timeout of the original Future if it had one and it went off and was shorter than the given one" in { val now = System.currentTimeMillis val timingOutEarlier = Timeouts.timeout(Duration(1, "seconds")) val future = timingOutEarlier.withTimeout(Duration(5, "seconds")) a [TimeoutException] should be thrownBy { Await.result(future, Duration(10, "seconds")) } val elapsed: Long = (System.currentTimeMillis - now) elapsed should be >= 500l elapsed should be <= 4000l } "return the timeout if the original Future does not timeout of its own" in { val now = System.currentTimeMillis val timingOutLater = Timeouts.timeout(Duration(3, "seconds")) val future = timingOutLater.withTimeout(Duration(1, "seconds")) a [TimeoutException] should be thrownBy { Await.result(future, Duration(10, "seconds")) } val elapsed: Long = (System.currentTimeMillis - now) elapsed should be >= 1000l elapsed should be <= 2500l } } // an example of how it could be used "used in our most common use case" should { "fit nicely" in { val call: Future[String] = svcCall(1000).withTimeout(Duration(5000, "milliseconds")).recover { case _: TimeoutException => "recover.timeout" case other => s"recover.${other.getMessage}" } Await.result(call, Duration(10, "seconds")) should be ("data-1000") val call2: Future[String] = svcCall(5000).withTimeout(Duration(1000, "milliseconds")).recover { case _: TimeoutException => "recover.timeout" case other => s"recover.${other.getMessage}" } Await.result(call2, Duration(10, "seconds")) should be ("recover.timeout") } } } } object TimeLimitedFutureSpec { def svcCall(latency: Long): Future[String] = Future { Thread.sleep(latency); s"data-${latency}" } }
Example 2
Source File: Launcher.scala From sparkplug with MIT License | 7 votes |
package springnz.sparkplug.client import java.net.{ URLEncoder, InetAddress } import better.files._ import com.typesafe.config.{ ConfigRenderOptions, Config } import org.apache.spark.launcher.SparkLauncher import springnz.sparkplug.util.{ BuilderOps, ConfigUtils, Logging, Pimpers } import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.Future import scala.util.{ Properties, Try } object Launcher extends Logging { import BuilderOps._ import Pimpers._ def startProcess(launcher: SparkLauncher): Future[Unit] = { val processFuture = Future { launcher.launch() }.withErrorLog("Failed to launch: ") processFuture.flatMap { process ⇒ executeProcess(process) } } private def executeProcess(process: Process): Future[Unit] = Future { val outStream = scala.io.Source.fromInputStream(process.getInputStream) for (line ← outStream.getLines()) { log.info(line) } val errorStream = scala.io.Source.fromInputStream(process.getErrorStream) for (line ← errorStream.getLines()) { log.info(line) } process.waitFor() } def launch(clientAkkaAddress: String, jarPath: File, mainJarPattern: String, mainClass: String, sparkConfig: Config, akkaRemoteConfig: Option[Config], sendJars: Boolean = true): Try[Future[Unit]] = Try { val fullExtraJarFolder = jarPath.pathAsString val sparkHome = Properties.envOrNone("SPARK_HOME") val sparkMaster = Properties.envOrElse("SPARK_MASTER", s"spark://${InetAddress.getLocalHost.getHostAddress}:7077") log.debug(s"Spark master set to: $sparkMaster") // TODO: enable this functionality (need Spark 1.5 for this) // val sparkArgs: Array[String] = config.getString("spark.submit.sparkargs").split(' ') if (!sparkMaster.startsWith("local[") && !sparkHome.isDefined) throw new RuntimeException("If 'SPARK_MASTER' is not set to local, 'SPARK_HOME' must be set.") val appName = mainClass.split('.').last val mainJar = jarPath.glob(mainJarPattern).collectFirst { case f ⇒ f.pathAsString } val configVars: Seq[(String, String)] = ConfigUtils.configFields(sparkConfig).toSeq val akkaRemoteConfigString = akkaRemoteConfig.map { config ⇒ val configString = config.root().render(ConfigRenderOptions.concise()) URLEncoder.encode(configString, "UTF-8") } val launcher = (new SparkLauncher) .setIfSome[String](mainJar) { (l, mj) ⇒ l.setAppResource(mj) } .setMainClass(mainClass) .setAppName(appName) .setMaster(sparkMaster) .setIfSome[String](sparkHome) { (l, sh) ⇒ l.setSparkHome(sh) } .addAppArgs("appName", appName) .addAppArgs("clientAkkaAddress", clientAkkaAddress) .setIfSome(akkaRemoteConfigString) { (l, config) ⇒ l.addAppArgs("remoteAkkaConfig", config) } .setFoldLeft(configVars) { case (launcher, (key, value)) ⇒ launcher.setConf(key, value) } .setDeployMode(sparkConfig.getString("spark.deploymode")) val extraJarFiles = jarPath.glob("*.jar") .map { case f ⇒ f.pathAsString } .filterNot(_.contains("/akka-")) val launcherWithJars = if (sendJars) extraJarFiles.foldLeft(launcher) { case (l, jarFile) ⇒ l.addJar(jarFile) } else if (extraJarFiles.length == 0) launcher else launcher .setConf(SparkLauncher.DRIVER_EXTRA_CLASSPATH, s"$fullExtraJarFolder/*") .setConf(SparkLauncher.EXECUTOR_EXTRA_CLASSPATH, s"$fullExtraJarFolder/*") startProcess(launcherWithJars) } }
Example 3
Source File: PreferencesFrontendService.scala From pertax-frontend with Apache License 2.0 | 6 votes |
package services import com.kenshoo.play.metrics.Metrics import config.ConfigDecorator import controllers.auth.requests.UserRequest import com.google.inject.{Inject, Singleton} import metrics.HasMetrics import models.{ActivatePaperlessActivatedResponse, ActivatePaperlessNotAllowedResponse, ActivatePaperlessRequiresUserActionResponse, ActivatePaperlessResponse} import play.api.Mode.Mode import play.api.i18n.{I18nSupport, Messages, MessagesApi} import play.api.libs.json.{JsObject, Json} import play.api.{Configuration, Environment, Logger} import uk.gov.hmrc.play.bootstrap.http.DefaultHttpClient import uk.gov.hmrc.crypto.PlainText import uk.gov.hmrc.play.bootstrap.config.ServicesConfig import uk.gov.hmrc.play.bootstrap.filters.frontend.crypto.SessionCookieCrypto import uk.gov.hmrc.play.partials.HeaderCarrierForPartialsConverter import util.Tools import scala.concurrent.{ExecutionContext, Future} @Singleton class PreferencesFrontendService @Inject()( environment: Environment, runModeConfiguration: Configuration, val simpleHttp: DefaultHttpClient, val messagesApi: MessagesApi, val metrics: Metrics, val configDecorator: ConfigDecorator, val sessionCookieCrypto: SessionCookieCrypto, val tools: Tools, servicesConfig: ServicesConfig)(implicit ec: ExecutionContext) extends HeaderCarrierForPartialsConverter with HasMetrics with I18nSupport { val mode: Mode = environment.mode val preferencesFrontendUrl = servicesConfig.baseUrl("preferences-frontend") override def crypto: String => String = cookie => cookie def getPaperlessPreference()(implicit request: UserRequest[_]): Future[ActivatePaperlessResponse] = { def absoluteUrl = configDecorator.pertaxFrontendHost + request.uri def activatePaperless: Future[ActivatePaperlessResponse] = withMetricsTimer("get-activate-paperless") { timer => val url = s"$preferencesFrontendUrl/paperless/activate?returnUrl=${tools.encryptAndEncode(absoluteUrl)}&returnLinkText=${tools .encryptAndEncode(Messages("label.continue"))}" //TODO remove ref to Messages simpleHttp.PUT[JsObject, ActivatePaperlessResponse](url, Json.obj("active" -> true)) map { case ActivatePaperlessActivatedResponse => timer.completeTimerAndIncrementSuccessCounter() ActivatePaperlessActivatedResponse case response: ActivatePaperlessRequiresUserActionResponse => timer.completeTimerAndIncrementSuccessCounter() response case ActivatePaperlessNotAllowedResponse => timer.completeTimerAndIncrementFailedCounter() ActivatePaperlessNotAllowedResponse } recover { case e => timer.completeTimerAndIncrementFailedCounter() Logger.warn("Error getting paperless preference record from preferences-frontend-service", e) ActivatePaperlessNotAllowedResponse } } if (request.isGovernmentGateway) { activatePaperless } else { Future.successful(ActivatePaperlessNotAllowedResponse) } } }
Example 4
Source File: SessionAuditor.scala From pertax-frontend with Apache License 2.0 | 6 votes |
package controllers.auth import com.google.inject.Inject import controllers.auth.SessionAuditor._ import controllers.auth.requests.AuthenticatedRequest import play.api.Logger import play.api.libs.json.{Format, Json} import play.api.mvc.Result import uk.gov.hmrc.auth.core.retrieve.Credentials import uk.gov.hmrc.auth.core.{ConfidenceLevel, Enrolment} import uk.gov.hmrc.domain.{Nino, SaUtr} import uk.gov.hmrc.http.HeaderCarrier import uk.gov.hmrc.play.audit.http.connector.AuditConnector import uk.gov.hmrc.play.audit.http.connector.AuditResult.{Failure, Success} import uk.gov.hmrc.play.audit.model.ExtendedDataEvent import util.AuditServiceTools import scala.concurrent.{ExecutionContext, Future} private[auth] class SessionAuditor @Inject()(auditConnector: AuditConnector)(implicit ec: ExecutionContext) extends AuditTags { val logger = Logger(this.getClass) def auditOnce[A](request: AuthenticatedRequest[A], result: Result)(implicit hc: HeaderCarrier): Future[Result] = request.session.get(sessionKey) match { case None => logger.info(request.profile.toString) val eventDetail = UserSessionAuditEvent(request) val sendAuditEvent = auditConnector .sendExtendedEvent( ExtendedDataEvent( auditSource = AuditServiceTools.auditSource, auditType = auditType, detail = Json.toJson(eventDetail), tags = buildTags(request)) ) .recover { case e: Exception => Logger.warn(s"Unable to audit: ${e.getMessage}") Failure("UserSessionAuditor.auditOncePerSession exception occurred whilst auditing", Some(e)) } sendAuditEvent.map { case Success => result.addingToSession(sessionKey -> "true")(request) case _ => result } case _ => Future.successful(result) } } object SessionAuditor { val sessionKey = "sessionAudited" val auditType = "user-session-visit" case class UserSessionAuditEvent( nino: Option[Nino], credentials: Credentials, confidenceLevel: ConfidenceLevel, name: Option[String], saUtr: Option[SaUtr], allEnrolments: Set[Enrolment]) object UserSessionAuditEvent { def apply[A](request: AuthenticatedRequest[A]): UserSessionAuditEvent = { val nino = request.nino val credentials = request.credentials val confidenceLevel = request.confidenceLevel val name = request.name map (_.toString) val saUtr = request.saEnrolment map (_.saUtr) val enrolments = request.enrolments UserSessionAuditEvent(nino, credentials, confidenceLevel, name, saUtr, enrolments) } implicit val credentialsFormats = Json.format[Credentials] implicit val formats: Format[UserSessionAuditEvent] = Json.format[UserSessionAuditEvent] } }
Example 5
Source File: PersonalDetailsControllerSpec.scala From pertax-frontend with Apache License 2.0 | 6 votes |
package controllers.address import config.ConfigDecorator import controllers.auth.requests.UserRequest import controllers.auth.{AuthJourney, WithActiveTabAction} import controllers.controllershelpers.{AddressJourneyCachingHelper, PersonalDetailsCardGenerator} import models.AddressJourneyTTLModel import models.dto.AddressPageVisitedDto import org.mockito.ArgumentCaptor import org.mockito.Mockito.{times, verify, when} import org.mockito.Matchers.{eq => meq, _} import org.scalatestplus.mockito.MockitoSugar import play.api.http.Status.OK import play.api.libs.json.Json import play.api.mvc.{MessagesControllerComponents, Request, Result} import play.api.test.FakeRequest import repositories.EditAddressLockRepository import services.{LocalSessionCache, NinoDisplayService} import uk.gov.hmrc.http.cache.client.CacheMap import uk.gov.hmrc.play.audit.http.connector.{AuditConnector, AuditResult} import uk.gov.hmrc.play.audit.model.DataEvent import uk.gov.hmrc.renderer.TemplateRenderer import util.UserRequestFixture.buildUserRequest import util.{ActionBuilderFixture, BaseSpec, Fixtures, LocalPartialRetriever} import views.html.interstitial.DisplayAddressInterstitialView import views.html.personaldetails.{AddressAlreadyUpdatedView, CannotUseServiceView, PersonalDetailsView} import scala.concurrent.{ExecutionContext, Future} class PersonalDetailsControllerSpec extends AddressBaseSpec { val ninoDisplayService = mock[NinoDisplayService] trait LocalSetup extends AddressControllerSetup { when(ninoDisplayService.getNino(any(), any())).thenReturn { Future.successful(Some(Fixtures.fakeNino)) } def currentRequest[A]: Request[A] = FakeRequest().asInstanceOf[Request[A]] def controller = new PersonalDetailsController( injected[PersonalDetailsCardGenerator], mockEditAddressLockRepository, ninoDisplayService, mockAuthJourney, addressJourneyCachingHelper, withActiveTabAction, mockAuditConnector, cc, displayAddressInterstitialView, injected[PersonalDetailsView] ) {} "Calling AddressController.onPageLoad" should { "call citizenDetailsService.fakePersonDetails and return 200" in new LocalSetup { override def sessionCacheResponse: Option[CacheMap] = Some(CacheMap("id", Map("addressPageVisitedDto" -> Json.toJson(AddressPageVisitedDto(true))))) val result = controller.onPageLoad()(FakeRequest()) status(result) shouldBe OK verify(mockLocalSessionCache, times(1)) .cache(meq("addressPageVisitedDto"), meq(AddressPageVisitedDto(true)))(any(), any(), any()) verify(mockEditAddressLockRepository, times(1)).get(any()) } "send an audit event when user arrives on personal details page" in new LocalSetup { override def sessionCacheResponse: Option[CacheMap] = Some(CacheMap("id", Map("addressPageVisitedDto" -> Json.toJson(AddressPageVisitedDto(true))))) val result = controller.onPageLoad()(FakeRequest()) val eventCaptor = ArgumentCaptor.forClass(classOf[DataEvent]) status(result) shouldBe OK verify(mockAuditConnector, times(1)).sendEvent(eventCaptor.capture())(any(), any()) } } } }
Example 6
Source File: TimeBoundObserver.scala From daml with Apache License 2.0 | 6 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.testing import com.daml.timer.Delayed import io.grpc.Context import io.grpc.stub.StreamObserver import scala.concurrent.duration.FiniteDuration import scala.concurrent.{ExecutionContext, Future, Promise} final class TimeBoundObserver[T](duration: FiniteDuration)( implicit executionContext: ExecutionContext) extends StreamObserver[T] { private val promise = Promise[Vector[T]] private val buffer = Vector.newBuilder[T] Delayed.by(duration)(onCompleted()) def result: Future[Vector[T]] = promise.future override def onNext(value: T): Unit = { buffer += value } override def onError(t: Throwable): Unit = { val _ = promise.tryFailure(t) } override def onCompleted(): Unit = { val _succeeded = promise.trySuccess(buffer.result()) val _cancelled = Context.current().withCancellation().cancel(null) } }
Example 7
Source File: ApiLedgerIdentityService.scala From daml with Apache License 2.0 | 6 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.apiserver.services import com.daml.dec.DirectExecutionContext import com.daml.ledger.api.domain.LedgerId import com.daml.ledger.api.v1.ledger_identity_service.LedgerIdentityServiceGrpc.{ LedgerIdentityService => GrpcLedgerIdentityService } import com.daml.ledger.api.v1.ledger_identity_service.{ GetLedgerIdentityRequest, GetLedgerIdentityResponse, LedgerIdentityServiceGrpc } import com.daml.logging.{ContextualizedLogger, LoggingContext} import com.daml.platform.api.grpc.GrpcApiService import com.daml.platform.server.api.ApiException import io.grpc.{BindableService, ServerServiceDefinition, Status} import scalaz.syntax.tag._ import scala.concurrent.Future final class ApiLedgerIdentityService private (getLedgerId: () => Future[LedgerId])( implicit logCtx: LoggingContext) extends GrpcLedgerIdentityService with GrpcApiService { @volatile var closed = false private val logger = ContextualizedLogger.get(this.getClass) override def getLedgerIdentity( request: GetLedgerIdentityRequest): Future[GetLedgerIdentityResponse] = if (closed) Future.failed( new ApiException( Status.UNAVAILABLE .withDescription("Ledger Identity Service closed."))) else { getLedgerId() .map(ledgerId => GetLedgerIdentityResponse(ledgerId.unwrap))(DirectExecutionContext) .andThen(logger.logErrorsOnCall[GetLedgerIdentityResponse])(DirectExecutionContext) } override def close(): Unit = closed = true override def bindService(): ServerServiceDefinition = LedgerIdentityServiceGrpc.bindService(this, DirectExecutionContext) } object ApiLedgerIdentityService { def create(getLedgerId: () => Future[LedgerId])( implicit logCtx: LoggingContext): ApiLedgerIdentityService with BindableService = { new ApiLedgerIdentityService(getLedgerId) } }
Example 8
Source File: GrpcServerOwner.scala From daml with Apache License 2.0 | 6 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.apiserver import java.io.IOException import java.net.{BindException, InetAddress, InetSocketAddress} import java.util.concurrent.TimeUnit.SECONDS import com.daml.metrics.Metrics import com.daml.platform.apiserver.GrpcServerOwner._ import com.daml.ports.Port import com.daml.resources.{Resource, ResourceOwner} import com.google.protobuf.Message import io.grpc.netty.NettyServerBuilder import io.grpc._ import io.netty.channel.socket.nio.NioServerSocketChannel import io.netty.handler.ssl.SslContext import scala.concurrent.{ExecutionContext, Future} import scala.util.control.NoStackTrace final class GrpcServerOwner( address: Option[String], desiredPort: Port, maxInboundMessageSize: Int, sslContext: Option[SslContext] = None, interceptors: List[ServerInterceptor] = List.empty, metrics: Metrics, eventLoopGroups: ServerEventLoopGroups, services: Iterable[BindableService], ) extends ResourceOwner[Server] { override def acquire()(implicit executionContext: ExecutionContext): Resource[Server] = { val host = address.map(InetAddress.getByName).getOrElse(InetAddress.getLoopbackAddress) Resource(Future { val builder = NettyServerBuilder.forAddress(new InetSocketAddress(host, desiredPort.value)) builder.sslContext(sslContext.orNull) builder.channelType(classOf[NioServerSocketChannel]) builder.permitKeepAliveTime(10, SECONDS) builder.permitKeepAliveWithoutCalls(true) builder.directExecutor() builder.maxInboundMessageSize(maxInboundMessageSize) interceptors.foreach(builder.intercept) builder.intercept(new MetricsInterceptor(metrics)) eventLoopGroups.populate(builder) services.foreach { service => builder.addService(service) toLegacyService(service).foreach(builder.addService) } val server = builder.build() try { server.start() } catch { case e: IOException if e.getCause != null && e.getCause.isInstanceOf[BindException] => throw new UnableToBind(desiredPort, e.getCause) } server })(server => Future(server.shutdown().awaitTermination())) } // This exposes the existing services under com.daml also under com.digitalasset. // This is necessary to allow applications built with an earlier version of the SDK // to still work. // The "proxy" services will not show up on the reflection service, because of the way it // processes service definitions via protobuf file descriptors. private def toLegacyService(service: BindableService): Option[ServerServiceDefinition] = { val `com.daml` = "com.daml" val `com.digitalasset` = "com.digitalasset" val damlDef = service.bindService() val damlDesc = damlDef.getServiceDescriptor // Only add "proxy" services if it actually contains com.daml in the service name. // There are other services registered like the reflection service, that doesn't need the special treatment. if (damlDesc.getName.contains(`com.daml`)) { val digitalassetName = damlDesc.getName.replace(`com.daml`, `com.digitalasset`) val digitalassetDef = ServerServiceDefinition.builder(digitalassetName) damlDef.getMethods.forEach { methodDef => val damlMethodDesc = methodDef.getMethodDescriptor val digitalassetMethodName = damlMethodDesc.getFullMethodName.replace(`com.daml`, `com.digitalasset`) val digitalassetMethodDesc = damlMethodDesc.toBuilder.setFullMethodName(digitalassetMethodName).build() val _ = digitalassetDef.addMethod( digitalassetMethodDesc.asInstanceOf[MethodDescriptor[Message, Message]], methodDef.getServerCallHandler.asInstanceOf[ServerCallHandler[Message, Message]] ) } Option(digitalassetDef.build()) } else None } } object GrpcServerOwner { final class UnableToBind(port: Port, cause: Throwable) extends RuntimeException( s"The API server was unable to bind to port $port. Terminate the process occupying the port, or choose a different one.", cause) with NoStackTrace }
Example 9
Source File: EventsByInterval.scala From spark-streaming-demo with Apache License 2.0 | 6 votes |
package com.datastax.examples.meetup import com.datastax.driver.core.{Cluster, Session, Row} import com.websudos.phantom.CassandraTable import com.websudos.phantom.Implicits._ import scala.concurrent.Future case class EventModel ( event: String, interval: String, dimension: String, subtotal: Long ) sealed class EventRecord extends CassandraTable[EventRecord, EventModel] { override val tableName = "events_by_interval" object event extends StringColumn(this) with PartitionKey[String] object interval extends StringColumn(this) with ClusteringOrder[String] with Descending object dimension extends StringColumn(this) with ClusteringOrder[String] with Ascending object subtotal extends CounterColumn(this) override def fromRow(row: Row): EventModel = { EventModel( event(row), interval(row), dimension(row), subtotal(row) ) } } object Event extends EventRecord { val keyspace = "demo" val cluster = Cluster.builder().addContactPoint("127.0.0.1").build() implicit val session = cluster.connect(keyspace) // def hourly(hashtag: String): Future[Seq[(String, Long)]] = { // select (_.interval, _.subtotal) where (_.event eqs hashtag) and (_.interval gte "M") and (_.interval lt "N") limit 60 fetch // } def dimensions(event: String, interval: String): Future[Seq[(String, Long)]] = { select (_.dimension, _.subtotal) where (_.event eqs event) and (_.interval eqs interval) limit 500 fetch } }
Example 10
Source File: Components.scala From gbf-raidfinder with MIT License | 6 votes |
package walfie.gbf.raidfinder.server import akka.actor.ActorSystem import akka.stream.Materializer import com.trueaccord.scalapb.json.JsonFormat import monix.execution.Scheduler import play.api.BuiltInComponents import play.api.http.{ContentTypes, DefaultHttpErrorHandler} import play.api.libs.json.Json import play.api.Mode.Mode import play.api.mvc._ import play.api.routing.Router import play.api.routing.sird._ import play.core.server._ import play.filters.cors.{CORSConfig, CORSFilter} import play.filters.gzip.GzipFilterComponents import scala.concurrent.duration.FiniteDuration import scala.concurrent.Future import walfie.gbf.raidfinder.protocol.{RaidBossesResponse, BinaryProtobuf} import walfie.gbf.raidfinder.RaidFinder import walfie.gbf.raidfinder.server.controller._ import walfie.gbf.raidfinder.server.syntax.ProtocolConverters.RaidBossDomainOps class Components( raidFinder: RaidFinder[BinaryProtobuf], translator: BossNameTranslator, port: Int, mode: Mode, websocketKeepAliveInterval: FiniteDuration, metricsCollector: MetricsCollector ) extends NettyServerComponents with BuiltInComponents with GzipFilterComponents with Controller { override lazy val serverConfig = ServerConfig(port = Some(port), mode = mode) private val corsFilter = new CORSFilter(corsConfig = CORSConfig().withAnyOriginAllowed) override lazy val httpFilters = List(gzipFilter, corsFilter) lazy val websocketController = new WebsocketController( raidFinder, translator, websocketKeepAliveInterval, metricsCollector )(actorSystem, materializer, Scheduler.Implicits.global) // The charset isn't necessary, but without it, Chrome displays Japanese incorrectly // if you try to view the JSON directly. // https://bugs.chromium.org/p/chromium/issues/detail?id=438464 private val ContentTypeJsonWithUtf8 = "application/json; charset=utf-8" lazy val router = Router.from { case GET(p"/") => controllers.Assets.at(path = "/public", "index.html") case GET(p"/api/bosses.json" ? q_s"name=$names") => val bosses = if (names.nonEmpty) { val knownBossesMap = raidFinder.getKnownBosses names.collect(knownBossesMap) } else raidFinder.getKnownBosses.values val responseProtobuf = RaidBossesResponse( raidBosses = bosses.map(_.toProtocol(translator)).toSeq ) val responseJson = JsonFormat.toJsonString(responseProtobuf) Action(Ok(responseJson).as(ContentTypeJsonWithUtf8)) case GET(p"/api/metrics.json") => val activeUsers = metricsCollector.getActiveWebSocketCount() val json = Json.obj("activeUsers" -> activeUsers) Action(Ok(json)) case GET(p"/ws/raids" ? q_o"keepAlive=${ bool(keepAlive) }") => websocketController.raids(keepAlive = keepAlive.getOrElse(false)) case GET(p"/$file*") => controllers.Assets.at(path = "/public", file = file) } override lazy val httpErrorHandler = new ErrorHandler override def serverStopHook = () => Future.successful { actorSystem.terminate() } }
Example 11
Source File: RateController.scala From drizzle-spark with Apache License 2.0 | 6 votes |
package org.apache.spark.streaming.scheduler import java.io.ObjectInputStream import java.util.concurrent.atomic.AtomicLong import scala.concurrent.{ExecutionContext, Future} import org.apache.spark.SparkConf import org.apache.spark.streaming.scheduler.rate.RateEstimator import org.apache.spark.util.{ThreadUtils, Utils} private def computeAndPublish(time: Long, elems: Long, workDelay: Long, waitDelay: Long): Unit = Future[Unit] { val newRate = rateEstimator.compute(time, elems, workDelay, waitDelay) newRate.foreach { s => rateLimit.set(s.toLong) publish(getLatestRate()) } } def getLatestRate(): Long = rateLimit.get() override def onBatchCompleted(batchCompleted: StreamingListenerBatchCompleted) { val elements = batchCompleted.batchInfo.streamIdToInputInfo for { processingEnd <- batchCompleted.batchInfo.processingEndTime workDelay <- batchCompleted.batchInfo.processingDelay waitDelay <- batchCompleted.batchInfo.schedulingDelay elems <- elements.get(streamUID).map(_.numRecords) } computeAndPublish(processingEnd, elems, workDelay, waitDelay) } } object RateController { def isBackPressureEnabled(conf: SparkConf): Boolean = conf.getBoolean("spark.streaming.backpressure.enabled", false) }
Example 12
Source File: ModelService.scala From reactive-machine-learning-systems with MIT License | 6 votes |
package com.reactivemachinelearning import akka.actor.ActorSystem import akka.event.{Logging, LoggingAdapter} import akka.http.scaladsl.Http import akka.http.scaladsl.marshalling.ToResponseMarshallable import akka.http.scaladsl.model.StatusCodes._ import akka.http.scaladsl.server.Directives._ import akka.stream.{ActorMaterializer, Materializer} //import spray.json._ import spray.json.DefaultJsonProtocol import scala.concurrent.{ExecutionContextExecutor, Future} case class Prediction(id: Long, timestamp: Long, value: Double) trait Protocols extends DefaultJsonProtocol { implicit val ipInfoFormat = jsonFormat3(Prediction.apply) } trait Service extends Protocols { implicit val system: ActorSystem implicit def executor: ExecutionContextExecutor implicit val materializer: Materializer val logger: LoggingAdapter // private def parseFeatures(features: String): Map[Long, Double] = { // features.parseJson.convertTo[Map[Long, Double]] // } def predict(features: String): Future[Prediction] = { Future(Prediction(123, 456, 0.5)) } val routes = { logRequestResult("predictive-service") { pathPrefix("ip") { (get & path(Segment)) { features => complete { predict(features).map[ToResponseMarshallable] { // case prediction: Prediction => prediction case _ => BadRequest } } } } } } } object PredictiveService extends App with Service { override implicit val system = ActorSystem() override implicit val executor = system.dispatcher override implicit val materializer = ActorMaterializer() override val logger = Logging(system, getClass) Http().bindAndHandle(routes, "0.0.0.0", 9000) }
Example 13
Source File: ScalajHttpClient.scala From telegram with Apache License 2.0 | 6 votes |
package com.bot4s.telegram.clients import java.net.Proxy import java.nio.file.Files import cats.instances.future._ import com.bot4s.telegram.api.RequestHandler import com.bot4s.telegram.methods.{Request, JsonRequest, MultipartRequest, Response} import com.bot4s.telegram.models.InputFile import com.bot4s.telegram.marshalling import io.circe.parser.parse import io.circe.{Decoder, Encoder} import scalaj.http.{Http, MultiPart} import slogging.StrictLogging import scala.concurrent.{ExecutionContext, Future, blocking} class ScalajHttpClient(token: String, proxy: Proxy = Proxy.NO_PROXY, telegramHost: String = "api.telegram.org") (implicit ec: ExecutionContext) extends RequestHandler[Future] with StrictLogging { val connectionTimeoutMs = 10000 val readTimeoutMs = 50000 private val apiBaseUrl = s"https://$telegramHost/bot$token/" def sendRequest[R, T <: Request[_]](request: T)(implicit encT: Encoder[T], decR: Decoder[R]): Future[R] = { val url = apiBaseUrl + request.methodName val scalajRequest = request match { case r: JsonRequest[_] => Http(url) .postData(marshalling.toJson(request)) .header("Content-Type", "application/json") case r: MultipartRequest[_] => // InputFile.FileIds are encoded as query params. val (fileIds, files) = r.getFiles.partition { case (key, _: InputFile.FileId) => true case _ => false } val parts = files.map { case (camelKey, inputFile) => val key = marshalling.snakenize(camelKey) inputFile match { case InputFile.FileId(id) => throw new RuntimeException("InputFile.FileId cannot must be encoded as a query param") case InputFile.Contents(filename, contents) => MultiPart(key, filename, "application/octet-stream", contents) case InputFile.Path(path) => MultiPart(key, path.getFileName.toString(), "application/octet-stream", Files.newInputStream(path), Files.size(path), _ => ()) case other => throw new RuntimeException(s"InputFile $other not supported") } } val fields = parse(marshalling.toJson(request)).fold(throw _, _.asObject.map { _.toMap.mapValues { json => json.asString.getOrElse(marshalling.printer.pretty(json)) } }) val fileIdsParams = fileIds.map { case (key, inputFile: InputFile.FileId) => marshalling.snakenize(key) -> inputFile.fileId } val params = fields.getOrElse(Map()) Http(url).params(params ++ fileIdsParams).postMulti(parts: _*) } import marshalling.responseDecoder Future { blocking { scalajRequest .timeout(connectionTimeoutMs, readTimeoutMs) .proxy(proxy) .asString } } map { x => if (x.isSuccess) marshalling.fromJson[Response[R]](x.body) else throw new RuntimeException(s"Error ${x.code} on request") } map (processApiResponse[R]) } }
Example 14
Source File: KVStore.scala From Freasy-Monad with MIT License | 6 votes |
package examples.cats import cats.Id import cats.free.Free import freasymonad.cats.free import scala.collection.mutable import scala.concurrent.duration.Duration import scala.concurrent.{Await, Future} @free trait KVStore { type KVStoreF[A] = Free[GrammarADT, A] sealed trait GrammarADT[A] def put[T](key: String, value: T): KVStoreF[Unit] def get[T](key: String): KVStoreF[Option[T]] def delete(key: String): KVStoreF[Unit] def update[T](key: String, f: T => T): KVStoreF[Unit] = for { vMaybe <- get[T](key) _ <- vMaybe.map(v => put[T](key, f(v))).getOrElse(Free.pure(())) } yield () } object Main extends App { import KVStore.ops._ def program: KVStoreF[Option[Int]] = for { _ <- put("wild-cats", 2) _ <- update[Int]("wild-cats", _ + 12) _ <- put("tame-cats", 5) n <- get[Int]("wild-cats") _ <- delete("tame-cats") } yield n val idInterpreter = new KVStore.Interp[Id] { val kvs = mutable.Map.empty[String, Any] def get[T](key: String): Id[Option[T]] = { println(s"get($key)") kvs.get(key).map(_.asInstanceOf[T]) } def put[T](key: String, value: T): Id[Unit] = { println(s"put($key, $value)") kvs(key) = value } def delete(key: String): Id[Unit] = { println(s"delete($key)") kvs.remove(key) } } val resId: Id[Option[Int]] = idInterpreter.run(program) import cats.implicits.catsStdInstancesForFuture import scala.concurrent.ExecutionContext.Implicits.global val futureInterpreter = new KVStore.Interp[Future] { val kvs = mutable.Map.empty[String, Any] def get[T](key: String): Future[Option[T]] = Future { println(s"get($key)") kvs.get(key).map(_.asInstanceOf[T]) } def put[T](key: String, value: T): Future[Unit] = Future { println(s"put($key, $value)") kvs(key) = value } def delete(key: String): Future[Unit] = Future { println(s"delete($key)") kvs.remove(key) } } val resFuture: Future[Option[Int]] = futureInterpreter.run(program) Await.ready(resFuture, Duration.Inf) }
Example 15
Source File: SelfEmploymentConnectorSpec.scala From self-assessment-api with Apache License 2.0 | 5 votes |
package router.connectors import mocks.MockHttp import mocks.config.MockAppConfig import mocks.httpParser.MockSelfAssessmentHttpParser import play.api.http.Status import play.api.libs.json.{JsValue, Json} import router.httpParsers.SelfAssessmentHttpParser.SelfAssessmentOutcome import support.UnitSpec import uk.gov.hmrc.http.HttpResponse import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.Future class SelfEmploymentConnectorSpec extends UnitSpec with MockHttp with MockAppConfig with MockSelfAssessmentHttpParser { class Setup { object TestConnector extends SelfEmploymentConnector( mockHttp, mockSelfAssessmentHttpParser, mockAppConfig ) MockAppConfig.selfEmploymentUrl returns selfEmploymentUrl } lazy val selfEmploymentUrl = "test-sa-api-url" val path = "/2.0/test-path" "post" should { "return an HttpResponse" when { "a successful HttpResponse with no content is returned" in new Setup { val response = HttpResponse(Status.NO_CONTENT) val requestJson = Json.obj("test" -> "request json") MockHttp.POST[JsValue, SelfAssessmentOutcome](s"$selfEmploymentUrl$path", requestJson).returns(Future.successful(Right(response))) await(TestConnector.post(path, requestJson)(hc)) shouldBe Right(response) } } } }
Example 16
Source File: PropertyConnectorSpec.scala From self-assessment-api with Apache License 2.0 | 5 votes |
package router.connectors import mocks.MockHttp import mocks.config.MockAppConfig import mocks.httpParser.MockSelfAssessmentHttpParser import play.api.http.Status import play.api.libs.json.{JsValue, Json} import router.httpParsers.SelfAssessmentHttpParser.SelfAssessmentOutcome import support.UnitSpec import uk.gov.hmrc.http.HttpResponse import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.Future class PropertyConnectorSpec extends UnitSpec with MockHttp with MockAppConfig with MockSelfAssessmentHttpParser { class Setup { object TestConnector extends PropertyConnector( mockHttp, mockSelfAssessmentHttpParser, mockAppConfig ) MockAppConfig.propertyUrl returns propertyUrl } lazy val propertyUrl = "test-sa-api-url" val path = "/2.0/test-path" "get" should { "return a HttpResponse" when { "a successful HttpResponse is returned" in new Setup { val response = HttpResponse(Status.OK, Some(Json.obj())) MockSelfAssessmentHttpParser.read.returns(Right(response)) MockHttp.GET[SelfAssessmentOutcome](s"$propertyUrl$path").returns(Future.successful(Right(response))) await(TestConnector.get(path)(hc)) shouldBe Right(response) } } } "post" should { "return an HttpResponse" when { "a successful HttpResponse with no content is returned" in new Setup { val response = HttpResponse(Status.NO_CONTENT) val requestJson = Json.obj("test" -> "request json") MockHttp.POST[JsValue, SelfAssessmentOutcome](s"$propertyUrl$path", requestJson).returns(Future.successful(Right(response))) await(TestConnector.post(path, requestJson)(hc)) shouldBe Right(response) } } } }
Example 17
Source File: SelfAssessmentConnectorSpec.scala From self-assessment-api with Apache License 2.0 | 5 votes |
package router.connectors import mocks.MockHttp import mocks.config.MockAppConfig import mocks.httpParser.MockSelfAssessmentHttpParser import play.api.http.Status import play.api.libs.json.Json import router.httpParsers.SelfAssessmentHttpParser.SelfAssessmentOutcome import support.UnitSpec import uk.gov.hmrc.http.HttpResponse import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.Future class SelfAssessmentConnectorSpec extends UnitSpec with MockHttp with MockAppConfig with MockSelfAssessmentHttpParser { class Setup { object TestConnector extends SelfAssessmentConnector( mockHttp, mockSelfAssessmentHttpParser, mockAppConfig ) MockAppConfig.saApiUrl returns saApiUrl } lazy val saApiUrl = "test-sa-api-url" val path = "/test-path" "get" should { "return a HttpResponse" when { "a successful HttpResponse is returned" in new Setup { val response = HttpResponse(Status.OK, Some(Json.obj())) MockSelfAssessmentHttpParser.read.returns(Right(response)) MockHttp.GET[SelfAssessmentOutcome](s"$saApiUrl$path").returns(Future.successful(Right(response))) await(TestConnector.get(path)(hc)) shouldBe Right(response) } } } }
Example 18
Source File: RateController.scala From sparkoscope with Apache License 2.0 | 5 votes |
package org.apache.spark.streaming.scheduler import java.io.ObjectInputStream import java.util.concurrent.atomic.AtomicLong import scala.concurrent.{ExecutionContext, Future} import org.apache.spark.SparkConf import org.apache.spark.streaming.scheduler.rate.RateEstimator import org.apache.spark.util.{ThreadUtils, Utils} private def computeAndPublish(time: Long, elems: Long, workDelay: Long, waitDelay: Long): Unit = Future[Unit] { val newRate = rateEstimator.compute(time, elems, workDelay, waitDelay) newRate.foreach { s => rateLimit.set(s.toLong) publish(getLatestRate()) } } def getLatestRate(): Long = rateLimit.get() override def onBatchCompleted(batchCompleted: StreamingListenerBatchCompleted) { val elements = batchCompleted.batchInfo.streamIdToInputInfo for { processingEnd <- batchCompleted.batchInfo.processingEndTime workDelay <- batchCompleted.batchInfo.processingDelay waitDelay <- batchCompleted.batchInfo.schedulingDelay elems <- elements.get(streamUID).map(_.numRecords) } computeAndPublish(processingEnd, elems, workDelay, waitDelay) } } object RateController { def isBackPressureEnabled(conf: SparkConf): Boolean = conf.getBoolean("spark.streaming.backpressure.enabled", false) }
Example 19
Source File: BlockTransferService.scala From sparkoscope with Apache License 2.0 | 5 votes |
package org.apache.spark.network import java.io.Closeable import java.nio.ByteBuffer import scala.concurrent.{Future, Promise} import scala.concurrent.duration.Duration import scala.reflect.ClassTag import org.apache.spark.internal.Logging import org.apache.spark.network.buffer.{ManagedBuffer, NioManagedBuffer} import org.apache.spark.network.shuffle.{BlockFetchingListener, ShuffleClient} import org.apache.spark.storage.{BlockId, StorageLevel} import org.apache.spark.util.ThreadUtils private[spark] abstract class BlockTransferService extends ShuffleClient with Closeable with Logging { def uploadBlockSync( hostname: String, port: Int, execId: String, blockId: BlockId, blockData: ManagedBuffer, level: StorageLevel, classTag: ClassTag[_]): Unit = { val future = uploadBlock(hostname, port, execId, blockId, blockData, level, classTag) ThreadUtils.awaitResult(future, Duration.Inf) } }
Example 20
Source File: JobWaiter.scala From sparkoscope with Apache License 2.0 | 5 votes |
package org.apache.spark.scheduler import java.util.concurrent.atomic.AtomicInteger import scala.concurrent.{Future, Promise} import org.apache.spark.internal.Logging def cancel() { dagScheduler.cancelJob(jobId) } override def taskSucceeded(index: Int, result: Any): Unit = { // resultHandler call must be synchronized in case resultHandler itself is not thread safe. synchronized { resultHandler(index, result.asInstanceOf[T]) } if (finishedTasks.incrementAndGet() == totalTasks) { jobPromise.success(()) } } override def jobFailed(exception: Exception): Unit = { if (!jobPromise.tryFailure(exception)) { logWarning("Ignore failure", exception) } } }
Example 21
Source File: BlockManagerSlaveEndpoint.scala From sparkoscope with Apache License 2.0 | 5 votes |
package org.apache.spark.storage import scala.concurrent.{ExecutionContext, Future} import org.apache.spark.{MapOutputTracker, SparkEnv} import org.apache.spark.internal.Logging import org.apache.spark.rpc.{RpcCallContext, RpcEnv, ThreadSafeRpcEndpoint} import org.apache.spark.storage.BlockManagerMessages._ import org.apache.spark.util.{ThreadUtils, Utils} private[storage] class BlockManagerSlaveEndpoint( override val rpcEnv: RpcEnv, blockManager: BlockManager, mapOutputTracker: MapOutputTracker) extends ThreadSafeRpcEndpoint with Logging { private val asyncThreadPool = ThreadUtils.newDaemonCachedThreadPool("block-manager-slave-async-thread-pool") private implicit val asyncExecutionContext = ExecutionContext.fromExecutorService(asyncThreadPool) // Operations that involve removing blocks may be slow and should be done asynchronously override def receiveAndReply(context: RpcCallContext): PartialFunction[Any, Unit] = { case RemoveBlock(blockId) => doAsync[Boolean]("removing block " + blockId, context) { blockManager.removeBlock(blockId) true } case RemoveRdd(rddId) => doAsync[Int]("removing RDD " + rddId, context) { blockManager.removeRdd(rddId) } case RemoveShuffle(shuffleId) => doAsync[Boolean]("removing shuffle " + shuffleId, context) { if (mapOutputTracker != null) { mapOutputTracker.unregisterShuffle(shuffleId) } SparkEnv.get.shuffleManager.unregisterShuffle(shuffleId) } case RemoveBroadcast(broadcastId, _) => doAsync[Int]("removing broadcast " + broadcastId, context) { blockManager.removeBroadcast(broadcastId, tellMaster = true) } case GetBlockStatus(blockId, _) => context.reply(blockManager.getStatus(blockId)) case GetMatchingBlockIds(filter, _) => context.reply(blockManager.getMatchingBlockIds(filter)) case TriggerThreadDump => context.reply(Utils.getThreadDump()) } private def doAsync[T](actionMessage: String, context: RpcCallContext)(body: => T) { val future = Future { logDebug(actionMessage) body } future.onSuccess { case response => logDebug("Done " + actionMessage + ", response is " + response) context.reply(response) logDebug("Sent response: " + response + " to " + context.senderAddress) } future.onFailure { case t: Throwable => logError("Error in " + actionMessage, t) context.sendFailure(t) } } override def onStop(): Unit = { asyncThreadPool.shutdownNow() } }
Example 22
Source File: WatchServiceObservable.scala From monix-nio with Apache License 2.0 | 5 votes |
package monix.nio import java.nio.file.WatchEvent import monix.eval.Task import monix.execution.Ack.{ Continue, Stop } import monix.execution.atomic.Atomic import monix.execution.cancelables.SingleAssignCancelable import monix.execution.exceptions.APIContractViolationException import monix.execution.{ Callback, Cancelable, Scheduler } import monix.reactive.Observable import monix.reactive.observers.Subscriber import scala.concurrent.Future import scala.util.control.NonFatal abstract class WatchServiceObservable extends Observable[Array[WatchEvent[_]]] { def watchService: Option[WatchService] private[this] val wasSubscribed = Atomic(false) override def unsafeSubscribeFn(subscriber: Subscriber[Array[WatchEvent[_]]]): Cancelable = { if (wasSubscribed.getAndSet(true)) { subscriber.onError(APIContractViolationException(this.getClass.getName)) Cancelable.empty } else try startPolling(subscriber) catch { case NonFatal(e) => subscriber.onError(e) Cancelable.empty } } def init(subscriber: Subscriber[Array[WatchEvent[_]]]): Future[Unit] = Future.successful(()) private def startPolling(subscriber: Subscriber[Array[WatchEvent[_]]]): Cancelable = { import subscriber.scheduler val taskCallback = new Callback[Throwable, Array[WatchEvent[_]]]() { override def onSuccess(value: Array[WatchEvent[_]]): Unit = {} override def onError(ex: Throwable): Unit = { subscriber.onError(ex) } } val cancelable = Task .fromFuture(init(subscriber)) .flatMap { _ => loop(subscriber) } .executeWithOptions(_.enableAutoCancelableRunLoops) .runAsync(taskCallback) val extraCancelable = Cancelable(() => { cancelable.cancel() }) SingleAssignCancelable.plusOne(extraCancelable) } private def loop(subscriber: Subscriber[Array[WatchEvent[_]]])(implicit scheduler: Scheduler): Task[Array[WatchEvent[_]]] = { import collection.JavaConverters._ watchService.map { ws => ws.take() .doOnCancel(Task.defer(ws.close())) .flatMap { key => val events = key.pollEvents().asScala.toArray key.reset() Task.fromFuture(subscriber.onNext(events)).flatMap { case Continue => loop(subscriber) case Stop => emptyTask } } } }.getOrElse(emptyTask) private val emptyTask = Task.create[Array[WatchEvent[_]]]((_, _) => Cancelable.empty) }
Example 23
Source File: AsyncChannelConsumer.scala From monix-nio with Apache License 2.0 | 5 votes |
package monix.nio import java.nio.ByteBuffer import monix.execution.Ack.{ Continue, Stop } import monix.execution.{ Ack, Callback, Cancelable, Scheduler } import monix.execution.atomic.Atomic import monix.execution.cancelables.{ AssignableCancelable, SingleAssignCancelable } import monix.reactive.Consumer import monix.reactive.observers.Subscriber import scala.concurrent.{ Future, Promise } import scala.util.control.NonFatal private[nio] abstract class AsyncChannelConsumer extends Consumer[Array[Byte], Long] { def channel: Option[AsyncChannel] def withInitialPosition: Long = 0L def init(subscriber: AsyncChannelSubscriber): Future[Unit] = Future.successful(()) class AsyncChannelSubscriber(consumerCallback: Callback[Throwable, Long])(implicit val scheduler: Scheduler) extends Subscriber[Array[Byte]] { self => private[this] lazy val initFuture = init(self) private[this] val callbackCalled = Atomic(false) private[this] var position = withInitialPosition override def onNext(elem: Array[Byte]): Future[Ack] = { def write(): Future[Ack] = { val promise = Promise[Ack]() channel.foreach { sc => try { sc .write(ByteBuffer.wrap(elem), position) .runAsync( new Callback[Throwable, Int] { override def onError(exc: Throwable) = { closeChannel() sendError(exc) promise.success(Stop) } override def onSuccess(result: Int): Unit = { position += result promise.success(Continue) } }) } catch { case NonFatal(ex) => sendError(ex) promise.success(Stop) } } promise.future } if (initFuture.value.isEmpty) { initFuture.flatMap(_ => write()) } else { write() } } override def onComplete(): Unit = { channel.collect { case sc if sc.closeOnComplete => closeChannel() } if (callbackCalled.compareAndSet(expect = false, update = true)) consumerCallback.onSuccess(position) } override def onError(ex: Throwable): Unit = { closeChannel() sendError(ex) } private[nio] def onCancel(): Unit = { callbackCalled.set(true) closeChannel() } private[nio] def sendError(t: Throwable) = if (callbackCalled.compareAndSet(expect = false, update = true)) { scheduler.execute(new Runnable { def run() = consumerCallback.onError(t) }) } private[nio] final def closeChannel()(implicit scheduler: Scheduler) = channel.foreach(_.close().runToFuture) } override def createSubscriber(cb: Callback[Throwable, Long], s: Scheduler): (Subscriber[Array[Byte]], AssignableCancelable) = { val out = new AsyncChannelSubscriber(cb)(s) val extraCancelable = Cancelable(() => out.onCancel()) val conn = SingleAssignCancelable.plusOne(extraCancelable) (out, conn) } }
Example 24
Source File: AsyncChannelObservable.scala From monix-nio with Apache License 2.0 | 5 votes |
package monix.nio import java.nio.ByteBuffer import monix.eval.Task import monix.execution.Ack.{ Continue, Stop } import monix.execution.{ Callback, Cancelable, Scheduler } import monix.execution.atomic.Atomic import monix.execution.cancelables.SingleAssignCancelable import monix.execution.exceptions.APIContractViolationException import monix.nio.internal.{ Bytes, EmptyBytes, NonEmptyBytes } import monix.reactive.Observable import monix.reactive.observers.Subscriber import scala.concurrent.Future import scala.util.control.NonFatal private[nio] abstract class AsyncChannelObservable extends Observable[Array[Byte]] { def bufferSize: Int def channel: Option[AsyncChannel] def init(subscriber: Subscriber[Array[Byte]]): Future[Unit] = Future.successful(()) private[this] val wasSubscribed = Atomic(false) override def unsafeSubscribeFn(subscriber: Subscriber[Array[Byte]]): Cancelable = { import subscriber.scheduler if (wasSubscribed.getAndSet(true)) { subscriber.onError(APIContractViolationException(this.getClass.getName)) Cancelable.empty } else try startReading(subscriber) catch { case NonFatal(e) => subscriber.onError(e) closeChannel() Cancelable.empty } } private def startReading(subscriber: Subscriber[Array[Byte]]): Cancelable = { import subscriber.scheduler val taskCallback = new Callback[Throwable, Array[Byte]]() { override def onSuccess(value: Array[Byte]): Unit = { channel.collect { case sc if sc.closeOnComplete => closeChannel() } } override def onError(ex: Throwable): Unit = { closeChannel() subscriber.onError(ex) } } val cancelable = Task .fromFuture(init(subscriber)) .flatMap { _ => loop(subscriber, 0) } .executeWithOptions(_.enableAutoCancelableRunLoops) .runAsync(taskCallback) val extraCancelable = Cancelable(() => { cancelable.cancel() closeChannel() }) SingleAssignCancelable.plusOne(extraCancelable) } private[this] val buffer = ByteBuffer.allocate(bufferSize) private def loop(subscriber: Subscriber[Array[Byte]], position: Long)(implicit scheduler: Scheduler): Task[Array[Byte]] = { buffer.clear() channel.map { ch => ch .read(buffer, position) .doOnCancel(Task.defer(ch.close())) .flatMap { result => val bytes = Bytes(buffer, result) bytes match { case EmptyBytes => subscriber.onComplete() Task.now(Bytes.emptyBytes) case NonEmptyBytes(arr) => Task.fromFuture(subscriber.onNext(arr)).flatMap { case Continue => loop(subscriber, position + result) case Stop => Task.now(Bytes.emptyBytes) } } } }.getOrElse(Task.now(Bytes.emptyBytes)) } private[nio] final def closeChannel()(implicit scheduler: Scheduler) = channel.foreach(_.close().runToFuture) }
Example 25
Source File: WatchService.scala From monix-nio with Apache License 2.0 | 5 votes |
package monix.nio.file import java.nio.file.StandardWatchEventKinds.{ ENTRY_CREATE, ENTRY_DELETE, ENTRY_MODIFY } import java.nio.file.WatchEvent.Kind import java.nio.file.{ Path, WatchEvent, WatchKey } import com.sun.nio.file.SensitivityWatchEventModifier import monix.execution.{ Callback, Cancelable, Scheduler } import scala.concurrent.{ Future, Promise } import scala.concurrent.duration.TimeUnit import scala.util.control.NonFatal abstract class WatchService extends AutoCloseable { def poll(timeout: Long, timeUnit: TimeUnit, cb: Callback[Throwable, Option[WatchKey]]): Unit def poll(timeout: Long, timeUnit: TimeUnit): Future[Option[WatchKey]] = { val p = Promise[Option[WatchKey]]() poll(timeout, timeUnit, Callback.fromPromise(p)) p.future } def poll(cb: Callback[Throwable, Option[WatchKey]]): Unit def poll(): Future[Option[WatchKey]] = { val p = Promise[Option[WatchKey]]() poll(Callback.fromPromise(p)) p.future } def take(cb: Callback[Throwable, WatchKey]): Unit def take(): Future[WatchKey] = { val p = Promise[WatchKey]() take(Callback.fromPromise(p)) p.future } } object WatchService { val SupportedEvents: Set[Kind[_]] = Set(ENTRY_CREATE, ENTRY_DELETE, ENTRY_MODIFY) def apply(path: Path, events: Kind[_]*)(implicit scheduler: Scheduler): WatchService = { val watcher = path.getFileSystem.newWatchService() val watchFor = if (events.isEmpty) SupportedEvents else events path.register( watcher, watchFor.toArray, SensitivityWatchEventModifier.HIGH.asInstanceOf[WatchEvent.Modifier]) new NIOWatcherServiceImplementation(watcher) } private final class NIOWatcherServiceImplementation(watcher: java.nio.file.WatchService)(implicit scheduler: Scheduler) extends WatchService { override def poll(timeout: Long, timeUnit: TimeUnit, cb: Callback[Throwable, Option[WatchKey]]): Unit = { try { val key = Option(watcher.poll(timeout, timeUnit)) cb.onSuccess(key) } catch { case NonFatal(ex) => cb.onError(ex) } } override def poll(cb: Callback[Throwable, Option[WatchKey]]): Unit = { try { val key = Option(watcher.poll()) cb.onSuccess(key) } catch { case NonFatal(ex) => cb.onError(ex) } } override def take(cb: Callback[Throwable, WatchKey]): Unit = { try { val key = watcher.take() cb.onSuccess(key) } catch { case NonFatal(ex) => cb.onError(ex) } } override def close(): Unit = cancelable.cancel() private[this] val cancelable: Cancelable = Cancelable { () => try watcher.close() catch { case NonFatal(ex) => scheduler.reportFailure(ex) } } } }
Example 26
Source File: LmgtfyBot.scala From telegram with Apache License 2.0 | 5 votes |
import akka.http.scaladsl.model.Uri import akka.http.scaladsl.model.Uri.Query import cats.instances.future._ import cats.syntax.functor._ import com.bot4s.telegram.Implicits._ import com.bot4s.telegram.api.declarative.{Commands, InlineQueries} import com.bot4s.telegram.future.Polling import com.bot4s.telegram.methods.ParseMode import com.bot4s.telegram.models._ import scala.concurrent.Future class LmgtfyBot(token: String) extends ExampleBot(token) with Polling with InlineQueries[Future] with Commands[Future] { def lmgtfyBtn(query: String): InlineKeyboardMarkup = InlineKeyboardMarkup.singleButton( InlineKeyboardButton.url("\uD83C\uDDECoogle it now!", lmgtfyUrl(query))) onCommand('start | 'help) { implicit msg => reply( s"""Generates ${"Let me \uD83C\uDDECoogle that for you!".italic} links. | |/start | /help - list commands | |/lmgtfy args - generate link | |/lmgtfy2 | /btn args - clickable button | |@Bot args - Inline mode """.stripMargin, parseMode = ParseMode.Markdown).void } onCommand('lmgtfy) { implicit msg => withArgs { args => val query = args.mkString(" ") replyMd( query.altWithUrl(lmgtfyUrl(query)), disableWebPagePreview = true ).void } } def lmgtfyUrl(query: String): String = Uri("http://lmgtfy.com") .withQuery(Query("q" -> query)) .toString() onCommand('btn | 'lmgtfy2) { implicit msg => withArgs { args => val query = args.mkString(" ") reply(query, replyMarkup = lmgtfyBtn(query)).void } } onInlineQuery { implicit iq => val query = iq.query if (query.isEmpty) answerInlineQuery(Seq()).void else { val textMessage = InputTextMessageContent( query.altWithUrl(lmgtfyUrl(query)), disableWebPagePreview = true, parseMode = ParseMode.Markdown) val results = List( InlineQueryResultArticle( "btn:" + query, inputMessageContent = textMessage, title = iq.query, description = "Clickable button + link", replyMarkup = lmgtfyBtn(query) ), InlineQueryResultArticle( query, inputMessageContent = textMessage, description = "Clickable link", title = iq.query ) ) answerInlineQuery(results, cacheTime = 1).void } } }
Example 27
Source File: QrCodesBot.scala From telegram with Apache License 2.0 | 5 votes |
import java.net.URLEncoder import akka.http.scaladsl.Http import akka.http.scaladsl.model.{HttpRequest, Uri} import akka.http.scaladsl.unmarshalling.Unmarshal import akka.util.ByteString import com.bot4s.telegram.api.declarative.Commands import com.bot4s.telegram.api._ import com.bot4s.telegram.future.Polling import com.bot4s.telegram.methods._ import com.bot4s.telegram.models.AkkaInputFile import scala.concurrent.Future class QrCodesBot(token: String) extends AkkaExampleBot(token) with Polling with Commands[Future] with ChatActions[Future] { // Multiple variants onCommand('qr | 'qrcode | 'qr_code) { implicit msg => withArgs { args => val url = "https://api.qrserver.com/v1/create-qr-code/?data=" + URLEncoder.encode(args mkString " ", "UTF-8") for { response <- Http().singleRequest(HttpRequest(uri = Uri(url))) if response.status.isSuccess() bytes <- Unmarshal(response).to[ByteString] photo = AkkaInputFile("qrcode.png", bytes) _ <- uploadingPhoto // Hint the user _ <- request(SendPhoto(msg.source, photo)) } yield () } } }
Example 28
Source File: VoiceFileBot.scala From telegram with Apache License 2.0 | 5 votes |
import akka.http.scaladsl.Http import akka.http.scaladsl.model.{HttpRequest, Uri} import akka.http.scaladsl.unmarshalling.Unmarshal import akka.util.ByteString import cats.instances.future._ import cats.syntax.functor._ import com.bot4s.telegram.api.declarative.Commands import com.bot4s.telegram.future.Polling import com.bot4s.telegram.methods._ import scala.concurrent.Future import scala.util.{Failure, Success} class VoiceFileBot(token: String) extends AkkaExampleBot(token) with Polling with Commands[Future] { onMessage { implicit msg => using(_.voice) { voice => request(GetFile(voice.fileId)).andThen({ case Success(file) => file.filePath match { case Some(filePath) => // See https://core.telegram.org/bots/api#getfile val url = s"https://api.telegram.org/file/bot${token}/${filePath}" for { res <- Http().singleRequest(HttpRequest(uri = Uri(url))) if res.status.isSuccess() bytes <- Unmarshal(res).to[ByteString] _ <- reply(s"File with ${bytes.size} bytes received.") } yield () case None => reply("No file_path was returned") } case Failure(e) => logger.error("Exception: " + e) // poor's man logging }).void } } }
Example 29
Source File: DogeBot.scala From telegram with Apache License 2.0 | 5 votes |
import com.bot4s.telegram.api.declarative.Commands import com.bot4s.telegram.api.ChatActions import com.bot4s.telegram.future.Polling import com.bot4s.telegram.methods._ import com.bot4s.telegram.models.InputFile import scala.concurrent.Future class DogeBot(token: String) extends ExampleBot(token) with Polling with Commands[Future] with ChatActions[Future] { onCommand("/doge") { implicit msg => withArgs { args => val url = "http://dogr.io/" + (args mkString "/") + ".png?split=false" for { res <- Future { scalaj.http.Http(url).asBytes } if res.isSuccess bytes = res.body _ = println(bytes.length) photo = InputFile("doge.png", bytes) _ <- uploadingPhoto // Hint the user _ <- request(SendPhoto(msg.source, photo)) } yield () } } }
Example 30
Source File: GitHubHosted2048Bot.scala From telegram with Apache License 2.0 | 5 votes |
import akka.http.scaladsl.model.Uri import akka.http.scaladsl.model.Uri.{Path, Query} import akka.http.scaladsl.model.headers.{HttpOrigin, HttpOriginRange} import ch.megard.akka.http.cors.scaladsl.model.{HttpHeaderRange, HttpOriginMatcher} import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server.Route import cats.instances.future._ import cats.syntax.functor._ import ch.megard.akka.http.cors.scaladsl.CorsDirectives.cors import ch.megard.akka.http.cors.scaladsl.settings.CorsSettings import com.bot4s.telegram.api.declarative.{Callbacks, Commands} import com.bot4s.telegram.api.{GameManager, Payload} import com.bot4s.telegram.future.Polling import com.bot4s.telegram.methods.SendGame import scala.concurrent.Future class GitHubHosted2048Bot(token: String, gameManagerHost: String) extends AkkaExampleBot(token) with Polling with Commands[Future] with Callbacks[Future] with GameManager { override val port: Int = 8080 val Play2048 = "play_2048" val GitHubPages = Uri("https://mukel.github.io") onCommand(Play2048 or "2048" or "start") { implicit msg => request( SendGame(msg.source, Play2048) ).void } onCallbackQuery { implicit cbq => val acked = cbq.gameShortName.collect { case Play2048 => val payload = Payload.forCallbackQuery(gameManagerHost) val url = GitHubPages .withPath(Path(s"/$Play2048/index.html")) .withQuery(Query("payload" -> payload.base64Encode)) ackCallback(url = Some(url.toString())) } acked.getOrElse(ackCallback()).void } // Enable CORS for GitHub Pages. // Allows GitHub Pages to call cross-domain getScores and setScore. private val allowGitHub = CorsSettings.defaultSettings .withAllowedOrigins(HttpOriginMatcher(HttpOrigin(GitHubPages.toString()))) override def routes: Route = super.routes ~ cors(allowGitHub) { gameManagerRoute } }
Example 31
Source File: PerChatRequestsBot.scala From telegram with Apache License 2.0 | 5 votes |
import akka.actor.{Actor, ActorRef, Props, Terminated} import cats.syntax.functor._ import cats.instances.future._ import com.bot4s.telegram.api.declarative.Commands import com.bot4s.telegram.api.{ActorBroker, AkkaDefaults} import com.bot4s.telegram.future.Polling import com.bot4s.telegram.methods.SendMessage import com.bot4s.telegram.models.{Message, Update} import scala.concurrent.Future trait PerChatRequests extends ActorBroker with AkkaDefaults { override val broker = Some(system.actorOf(Props(new Broker), "broker")) class Broker extends Actor { val chatActors = collection.mutable.Map[Long, ActorRef]() def receive = { case u: Update => u.message.foreach { m => val id = m.chat.id val handler = chatActors.getOrElseUpdate(m.chat.id, { val worker = system.actorOf(Props(new Worker), s"worker_$id") context.watch(worker) worker }) handler ! m } case Terminated(worker) => // This should be faster chatActors.find(_._2 == worker).foreach { case (k, _) => chatActors.remove(k) } case _ => } } // Fo every chat a new worker actor will be spawned. // All requests will be routed through this worker actor; allowing to maintain a per-chat state. class Worker extends Actor { def receive = { case m: Message => request(SendMessage(m.source, self.toString)) case _ => } } } class PerChatRequestsBot(token: String) extends ExampleBot(token) with Polling with Commands[Future] with PerChatRequests { // Commands work as usual. onCommand("/hello") { implicit msg => reply("Hello World!").void } }
Example 32
Source File: PollingWithWebRoutes.scala From telegram with Apache License 2.0 | 5 votes |
import cats.instances.future._ import cats.syntax.functor._ import com.bot4s.telegram.api.declarative.Commands import com.bot4s.telegram.api.WebRoutes import com.bot4s.telegram.future.Polling import scala.concurrent.Future class PollingWithWebRoutes(token: String) extends AkkaExampleBot(token) with Polling with WebRoutes with Commands[Future] { override val port: Int = 8080 onCommand("/hello") { implicit msg => reply("Hello").void } import akka.http.scaladsl.server.Directives._ override def routes = pathEndOrSingleSlash { complete("I'm running...") } ~ super.routes }
Example 33
Source File: WebhookBot.scala From telegram with Apache License 2.0 | 5 votes |
import java.net.URLEncoder import akka.http.scaladsl.Http import akka.http.scaladsl.model.{HttpRequest, Uri} import akka.http.scaladsl.unmarshalling.Unmarshal import com.bot4s.telegram.api.Webhook import com.bot4s.telegram.methods._ import com.bot4s.telegram.models.Message import scala.concurrent.Future class WebhookBot(token: String) extends AkkaExampleBot(token) with Webhook { val port = 8080 val webhookUrl = "https://88c444ab.ngrok.io" val baseUrl = "http://api.mathjs.org/v1/?expr=" override def receiveMessage(msg: Message): Future[Unit] = { msg.text.fold(Future.successful(())) { text => val url = baseUrl + URLEncoder.encode(text, "UTF-8") for { res <- Http().singleRequest(HttpRequest(uri = Uri(url))) if res.status.isSuccess() result <- Unmarshal(res).to[String] _ <- request(SendMessage(msg.source, result)) } yield () } } }
Example 34
Source File: SelfHosted2048Bot.scala From telegram with Apache License 2.0 | 5 votes |
import akka.http.scaladsl.model.Uri import akka.http.scaladsl.model.Uri.{Path, Query} import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server.Route import cats.instances.future._ import cats.syntax.functor._ import com.bot4s.telegram.api.declarative.{Callbacks, Commands} import com.bot4s.telegram.api.{AkkaDefaults, GameManager, Payload} import com.bot4s.telegram.future.Polling import com.bot4s.telegram.methods.SendGame import scala.concurrent.Future class SelfHosted2048Bot(token: String, gameManagerHost: String) extends ExampleBot(token) with Polling with AkkaDefaults with Callbacks[Future] with GameManager with Commands[Future] { override val port: Int = 8080 val Play2048 = "play_2048" onCommand(Play2048 or "2048" or "start") { implicit msg => request( SendGame(msg.source, Play2048) ).void } onCallbackQuery { implicit cbq => val acked = cbq.gameShortName.collect { case Play2048 => val payload = Payload.forCallbackQuery(gameManagerHost) val url = Uri(gameManagerHost) .withPath(Path(s"/$Play2048/index.html")) .withQuery(Query("payload" -> payload.base64Encode)) ackCallback(url = Some(url.toString())) } acked.getOrElse(ackCallback()).void } override def routes: Route = super.routes ~ gameManagerRoute ~ { pathPrefix(Play2048) { getFromResourceDirectory(Play2048) } } }
Example 35
Source File: StatefulBot.scala From telegram with Apache License 2.0 | 5 votes |
import cats.instances.future._ import cats.syntax.functor._ import com.bot4s.telegram.api.declarative.Commands import com.bot4s.telegram.future.Polling import com.bot4s.telegram.models.Message import scala.concurrent.Future class StatefulBot(token: String) extends ExampleBot(token) with Polling with Commands[Future] with PerChatState[Int] { onCommand("/inc") { implicit msg => withChatState { s => val n = s.getOrElse(0) setChatState(n + 1) reply(s"Counter: $n").void } } }
Example 36
Source File: RegexBot.scala From telegram with Apache License 2.0 | 5 votes |
import cats.instances.future._ import cats.syntax.functor._ import com.bot4s.telegram.api.declarative.RegexCommands import com.bot4s.telegram.future.Polling import scala.concurrent.Future class RegexBot(token: String) extends ExampleBot(token) with Polling with RegexCommands[Future] { onRegex("""/regex\s+(\w+)""".r) { implicit msg => groups => reply(groups mkString ", ").void } onRegex("""1?|^(11+?)\1+""".r) { implicit msg => _ => reply("Not prime!").void } }
Example 37
Source File: CovfefeBot.scala From telegram with Apache License 2.0 | 5 votes |
import cats.instances.future._ import cats.syntax.functor._ import com.softwaremill.sttp._ import com.bot4s.telegram.future.Polling import com.bot4s.telegram.api.declarative.Commands import scala.concurrent.Future case class Body(message: String) class CovfefeBot(token: String) extends ExampleBot(token) with Polling with Commands[Future] { onCommand("/start") { implicit msg => reply("Make texting great again!\nUse /covfefe to get a Trump quote.").void } onCommand("/covfefe") { implicit msg => val url = "https://api.whatdoestrumpthink.com/api/v1/quotes/random" for { r <- sttp.get(uri"$url").response(asString).send[Future]() if r.isSuccess json = r.unsafeBody t = io.circe.parser.parse(json).fold(throw _, identity) quote = t.hcursor.downField("message").as[String].right.toOption.getOrElse("") _ <- reply(quote) } yield () } }
Example 38
Source File: PollBot.scala From telegram with Apache License 2.0 | 5 votes |
import java.util.concurrent.TimeUnit import cats.instances.future._ import cats.syntax.functor._ import com.bot4s.telegram.Implicits._ import com.bot4s.telegram.api.declarative.{Callbacks, Commands} import com.bot4s.telegram.future.Polling import com.bot4s.telegram.methods._ import com.bot4s.telegram.models._ import scala.concurrent.{Await, Future} import scala.util.Failure class PollBot(token: String) extends ExampleBot(token) with Polling with Commands[Future] with Callbacks[Future] { var pollMsgId = 0 onCommand("poll") { implicit msg => val f = request(SendPoll(ChatId(msg.chat.id), "Pick A or B", Array("A", "B"))) f.onComplete { case Failure(e) => println("Error " + e) case _ => } for { poll <- f } yield { println("Poll sent") pollMsgId = poll.messageId } } onCommand("stop") { implicit msg => request(StopPoll(ChatId(msg.chat.id), pollMsgId)).void } }
Example 39
Source File: RandomBot.scala From telegram with Apache License 2.0 | 5 votes |
import cats.instances.future._ import cats.syntax.functor._ import com.bot4s.telegram.api.declarative.Commands import com.bot4s.telegram.future.Polling import scala.util.Try import scala.concurrent.Future class RandomBot(token: String) extends ExampleBot(token) with Polling with Commands[Future] { val rng = new scala.util.Random(System.currentTimeMillis()) onCommand("coin" or "flip") { implicit msg => reply(if (rng.nextBoolean()) "Head!" else "Tail!").void } onCommand('real | 'double | 'float) { implicit msg => reply(rng.nextDouble().toString).void } onCommand("/die" | "roll") { implicit msg => reply("⚀⚁⚂⚃⚄⚅" (rng.nextInt(6)).toString).void } onCommand("random" or "rnd") { implicit msg => withArgs { case Seq(Int(n)) if n > 0 => reply(rng.nextInt(n).toString).void case _ => reply("Invalid argumentヽ(ಠ_ಠ)ノ").void } } onCommand('choose | 'pick | 'select) { implicit msg => withArgs { args => replyMd(if (args.isEmpty) "No arguments provided." else args(rng.nextInt(args.size))).void } } // Extractor object Int { def unapply(s: String): Option[Int] = Try(s.toInt).toOption } }
Example 40
Source File: CallbacksBot.scala From telegram with Apache License 2.0 | 5 votes |
import cats.instances.future._ import cats.syntax.functor._ import com.bot4s.telegram.Implicits._ import com.bot4s.telegram.api.declarative.{Callbacks, Commands} import com.bot4s.telegram.future.Polling import com.bot4s.telegram.methods.EditMessageReplyMarkup import com.bot4s.telegram.models.{ChatId, InlineKeyboardButton, InlineKeyboardMarkup} import scala.concurrent.Future import scala.util.Try class CallbacksBot(token: String) extends ExampleBot(token) with Polling with Commands[Future] with Callbacks[Future] { val TAG = "COUNTER_TAG" var requestCount = 0 def markupCounter(n: Int) = { requestCount += 1 InlineKeyboardMarkup.singleButton( InlineKeyboardButton.callbackData( s"Press me!!!\n$n - $requestCount", tag(n.toString))) } def tag = prefixTag(TAG) _ onCommand("/counter") { implicit msg => reply("Press to increment!", replyMarkup = markupCounter(0)).void } onCallbackWithTag(TAG) { implicit cbq => // Notification only shown to the user who pressed the button. val ackFuture = ackCallback(cbq.from.firstName + " pressed the button!") // Or just ackCallback() val maybeEditFuture = for { data <- cbq.data Int(n) = data msg <- cbq.message response <- request( EditMessageReplyMarkup( ChatId(msg.source), // msg.chat.id msg.messageId, replyMarkup = markupCounter(n + 1))) } yield response ackFuture.zip(maybeEditFuture.getOrElse(Future.successful(()))) .void } // Extractor object Int { def unapply(s: String): Option[Int] = Try(s.toInt).toOption } }
Example 41
Source File: ExceptionBot.scala From telegram with Apache License 2.0 | 5 votes |
import cats.instances.future._ import cats.syntax.functor._ import com.bot4s.telegram.api.declarative.Commands import com.bot4s.telegram.api.TelegramApiException import com.bot4s.telegram.future.Polling import scala.concurrent.Future import scala.util.Failure class ExceptionBot(token: String) extends ExampleBot(token) with Polling with Commands[Future] { onCommand("/hello") { implicit msg => reply("Hey there") andThen { case Failure(tex: TelegramApiException) => tex.errorCode match { case 439 => // Too many requests Thread.sleep(1000) reply("Houston, we have a (congestion) problem!") case 404 => println("Not found") case 401 => println("Unauthorized") case 400 => println("Bad Request") case e => println(s"Error code: $e") } case _ => } void } }
Example 42
Source File: SelfDestructBot.scala From telegram with Apache License 2.0 | 5 votes |
import java.time.Instant import cats.instances.future._ import cats.syntax.functor._ import com.bot4s.telegram.Implicits._ import com.bot4s.telegram.api.declarative.{Callbacks, InlineQueries} import com.bot4s.telegram.future.Polling import com.bot4s.telegram.methods._ import com.bot4s.telegram.models.UpdateType.Filters._ import com.bot4s.telegram.models._ import scala.concurrent.Future import scala.concurrent.duration._ class SelfDestructBot(token: String) extends ExampleBot(token) with Polling with InlineQueries[Future] with Callbacks[Future] { val timeouts = Seq(3, 5, 10, 30) override def allowedUpdates = InlineUpdates ++ CallbackUpdates def buildResult(timeout: Int, msg: String): InlineQueryResult = { InlineQueryResultArticle(s"$timeout", s"$timeout seconds", inputMessageContent = InputTextMessageContent(msg), description = s"Message will be deleted in $timeout seconds", replyMarkup = InlineKeyboardMarkup.singleButton(button(now))) } def now = Instant.now().getEpochSecond def button(timeLeft: Long) = InlineKeyboardButton.callbackData("⏳ left?", "" + timeLeft) onCallbackQuery { implicit cbq => val left = cbq.data.map(_.toLong - now).getOrElse(-1L) ackCallback(s"$left seconds remaining.", cacheTime = 0).void } onChosenInlineResult { implicit result => val delay = result.resultId.toInt Utils.after(delay.seconds) { request(EditMessageText( text = "⌛ Expired", inlineMessageId = result.inlineMessageId)) } request(EditMessageReplyMarkup( inlineMessageId = result.inlineMessageId, replyMarkup = InlineKeyboardMarkup.singleButton(button(now + delay)))).void } onInlineQuery { implicit q => val results = if (q.query.isEmpty) Seq.empty else timeouts.map(buildResult(_, q.query)) answerInlineQuery(results, 5).void } }
Example 43
Source File: ProcessBot.scala From telegram with Apache License 2.0 | 5 votes |
import cats.instances.future._ import cats.syntax.functor._ import com.bot4s.telegram.api.declarative.Commands import com.bot4s.telegram.future.Polling import scala.concurrent.Future import scala.util.control.NonFatal class ProcessBot(token: String) extends ExampleBot(token) with Polling with Commands[Future] { onCommand('run | 'exec | 'execute | 'cmd) { implicit msg => withArgs { args => try { import sys.process._ val result = args.mkString(" ") !! reply(result).void } catch { case NonFatal(e) => reply("Exception: " + e.getMessage).void } } } }
Example 44
Source File: Utils.scala From telegram with Apache License 2.0 | 5 votes |
import java.util.{Timer, TimerTask} import scala.concurrent.duration.Duration import scala.concurrent.{Future, Promise} import scala.util.Try object Utils { def after[T](duration: Duration)(block: => T): Future[T] = { val promise = Promise[T]() val t = new Timer() t.schedule(new TimerTask { override def run(): Unit = { promise.complete(Try(block)) } }, duration.toMillis) promise.future } }
Example 45
Source File: WebRoutes.scala From telegram with Apache License 2.0 | 5 votes |
package com.bot4s.telegram.api import akka.http.scaladsl.Http import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server.Route import com.bot4s.telegram.future.BotExecutionContext import slogging.StrictLogging import scala.concurrent.{Future, Promise} trait WebRoutes extends BotBase[Future] with StrictLogging { _: BotExecutionContext with AkkaImplicits => val port: Int val interfaceIp: String = "::0" def routes: Route = reject private var bindingFuture: Future[Http.ServerBinding] = _ @volatile private var eol: Promise[Unit] = _ abstract override def run(): Future[Unit] = synchronized { if (eol != null) { throw new RuntimeException("Bot is already running") } bindingFuture = Http().bindAndHandle(routes, interfaceIp, port) bindingFuture.foreach { _ => logger.info(s"Listening on $interfaceIp:$port") } sys.addShutdownHook { shutdown() } eol = Promise[Unit]() val t = Future.sequence(Seq(eol.future, super.run())) t.map(_ => ()) } abstract override def shutdown(): Unit = synchronized { if (eol == null) { throw new RuntimeException("Bot is not running") } super.shutdown() for { b <- bindingFuture _ <- b.unbind() t <- system.terminate() } { eol.success(()) eol = null } } }
Example 46
Source File: Webhook.scala From telegram with Apache License 2.0 | 5 votes |
package com.bot4s.telegram.api import akka.http.scaladsl.model.StatusCodes import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server.Route import com.bot4s.telegram.future.BotExecutionContext import com.bot4s.telegram.methods.SetWebhook import com.bot4s.telegram.models.{InputFile, Update} import slogging.StrictLogging import scala.concurrent.Future import scala.util.control.NonFatal def certificate: Option[InputFile] = None def webhookReceiver: Route = { entity(as[Update]) { update => try { receiveUpdate(update, None) } catch { case NonFatal(e) => logger.error("Caught exception in update handler", e) } complete(StatusCodes.OK) } } abstract override def routes: Route = webhookRoute ~ super.routes abstract override def run(): Future[Unit] = { request( SetWebhook( url = webhookUrl, certificate = certificate, allowedUpdates = allowedUpdates)).flatMap { case true => super.run() // spawn WebRoutes case false => logger.error("Failed to set webhook") throw new RuntimeException("Failed to set webhook") } } }
Example 47
Source File: GameManager.scala From telegram with Apache License 2.0 | 5 votes |
package com.bot4s.telegram.api import java.net.URLDecoder import java.nio.charset.StandardCharsets import java.util.Base64 import akka.http.scaladsl.model.StatusCodes import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server.{Directive1, Route} import com.bot4s.telegram.marshalling import com.bot4s.telegram.methods.{GetGameHighScores, SetGameScore} import com.bot4s.telegram.models.{CallbackQuery, ChatId, User} import com.bot4s.telegram.future.BotExecutionContext import io.circe.generic.extras.semiauto._ import io.circe.generic.semiauto.deriveDecoder import io.circe.{Decoder, Encoder} import scala.concurrent.Future import scala.util.{Failure, Success} case class Payload( user : User, chatId : Option[ChatId] = None, messageId : Option[Int] = None, inlineMessageId : Option[String] = None, gameManagerHost : String, gameShortName : String) { def toGetGameHighScores = GetGameHighScores(user.id, chatId, messageId, inlineMessageId) def base64Encode: String = { val payloadJson = marshalling.toJson[Payload](this) val encodedPayload = Base64.getEncoder.encodeToString( payloadJson.getBytes(StandardCharsets.UTF_8)) encodedPayload } } object Payload { def base64Decode(encodedPayload: String): Payload = { val base64payload = URLDecoder.decode(encodedPayload, "UTF-8") val jsonPayload = new String(Base64.getDecoder.decode(base64payload), StandardCharsets.UTF_8) val payload = marshalling.fromJson[Payload](jsonPayload) payload } def forCallbackQuery(gameManagerHost: String)(implicit cbq: CallbackQuery): Payload = { Payload( cbq.from, cbq.message.map(_.source), cbq.message.map(_.messageId), cbq.inlineMessageId, gameManagerHost, cbq.gameShortName.get) // throws if not a game callback } import marshalling._ implicit val payloadEncoder: Encoder[Payload] = deriveEncoder[Payload] implicit val payloadDecoder: Decoder[Payload] = deriveDecoder[Payload] }
Example 48
Source File: YetAnotherAkkaClient.scala From telegram with Apache License 2.0 | 5 votes |
package com.bot4s.telegram.clients import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.marshalling.Marshal import akka.http.scaladsl.model.Uri.Path import akka.http.scaladsl.model._ import akka.http.scaladsl.unmarshalling.Unmarshal import akka.stream.Materializer import akka.stream.scaladsl.{Sink, Source} import cats.instances.future._ import com.bot4s.telegram.api.RequestHandler import com.bot4s.telegram.methods.{Request, Response} import io.circe.{Decoder, Encoder} import slogging.StrictLogging import com.bot4s.telegram.marshalling.responseDecoder import scala.concurrent.{ExecutionContext, Future} class YetAnotherAkkaClient(token: String, telegramHost: String = "api.telegram.org") (implicit system: ActorSystem, materializer: Materializer, ec: ExecutionContext) extends RequestHandler[Future] with StrictLogging { private val flow = Http().outgoingConnectionHttps(telegramHost) import com.bot4s.telegram.marshalling.AkkaHttpMarshalling._ override def sendRequest[R, T <: Request[_]](request: T)(implicit encT: Encoder[T], decR: Decoder[R]): Future[R] = { Source.fromFuture( Marshal(request).to[RequestEntity] .map { re => HttpRequest(HttpMethods.POST, Uri(path = Path(s"/bot$token/" + request.methodName)), entity = re) }) .via(flow) .mapAsync(1)(r => Unmarshal(r.entity).to[Response[R]]) .runWith(Sink.head) .map(processApiResponse[R]) } }
Example 49
Source File: AkkaHttpClient.scala From telegram with Apache License 2.0 | 5 votes |
package com.bot4s.telegram.clients import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.marshalling._ import akka.http.scaladsl.model._ import akka.http.scaladsl.unmarshalling.Unmarshal import akka.stream.Materializer import cats.instances.future._ import com.bot4s.telegram.api.RequestHandler import com.bot4s.telegram.marshalling.AkkaHttpMarshalling import com.bot4s.telegram.marshalling._ import com.bot4s.telegram.methods.{Request, Response} import io.circe.{Decoder, Encoder} import slogging.StrictLogging import scala.concurrent.{ExecutionContext, Future} class AkkaHttpClient(token: String, telegramHost: String = "api.telegram.org") (implicit system: ActorSystem, materializer: Materializer, ec: ExecutionContext) extends RequestHandler[Future] with StrictLogging { import AkkaHttpMarshalling._ private val apiBaseUrl = s"https://$telegramHost/bot$token/" private val http = Http() override def sendRequest[R, T <: Request[_]](request: T)(implicit encT: Encoder[T], decR: Decoder[R]): Future[R] = { Marshal(request).to[RequestEntity] .map { re => HttpRequest(HttpMethods.POST, Uri(apiBaseUrl + request.methodName), entity = re) } .flatMap(http.singleRequest(_)) .flatMap(r => Unmarshal(r.entity).to[Response[R]]) .map(t => processApiResponse[R](t)) } }
Example 50
Source File: Polling.scala From telegram with Apache License 2.0 | 5 votes |
package com.bot4s.telegram.future import com.bot4s.telegram.api.{Polling => BasePolling} import com.bot4s.telegram.methods.{DeleteWebhook, GetMe} import com.bot4s.telegram.models.{Update, User} import slogging.StrictLogging import scala.concurrent.Future import scala.util.control.NonFatal trait Polling extends BasePolling[Future] with BotExecutionContext with StrictLogging { private type OffsetUpdates = (Option[Long], Seq[Update], User) @volatile private var polling: Future[Unit] = _ private def poll(seed: Future[OffsetUpdates]): Future[OffsetUpdates] = { seed.flatMap { case (offset, updates, user) => val maxOffset = updates .map(_.updateId) .foldLeft(offset) { (acc, e) => Some(acc.fold(e)(e max _)) } // Spawn next request before processing updates. val f = if (polling == null) seed else poll( pollingGetUpdates(maxOffset.map(_ + 1)).recover { case NonFatal(e) => logger.error("GetUpdates failed", e) Seq.empty[Update] }.map((maxOffset, _, user)) ) for (u <- updates) { try { receiveUpdate(u, Some(user)) } catch { case NonFatal(e) => // Log and swallow, exception handling should happen on receiveUpdate. logger.error(s"receiveUpdate failed while processing: $u", e) } } f } } private def startPolling(user: User): Future[Unit] = { logger.info(s"Starting (long) polling: timeout=$pollingTimeout seconds") polling = poll(Future.successful((None, Seq(), user))).map(_ => ()) polling.onComplete { case _ => logger.info("Long polling terminated") } polling } override def run(): Future[Unit] = synchronized { if (polling != null) { throw new RuntimeException("Bot is already running") } for { deleted <- request(DeleteWebhook) if deleted getMe <- request(GetMe) p <- startPolling(getMe) } yield { p } } override def shutdown(): Unit = synchronized { if (polling == null) { throw new RuntimeException("Bot is not running") } super.shutdown() polling = null } }
Example 51
Source File: MessagesSuite.scala From telegram with Apache License 2.0 | 5 votes |
package com.bot4s.telegram.api import cats.instances.future._ import com.bot4s.telegram.api.declarative._ import com.bot4s.telegram.models.{Message, Update} import org.scalamock.scalatest.MockFactory import org.scalatest.FlatSpec import scala.concurrent.ExecutionContext.Implicits._ import scala.concurrent.Future class MessagesSuite extends FlatSpec with MockFactory with TestUtils { trait Fixture { val handler = mockFunction[Message, Future[Unit]] val bot = new TestBot with Messages[Future] } "A message filter " should "accept matches" in new Fixture { val helloMsg = textMessage("hello") handler.expects(helloMsg).returning(Future.successful(())).once() when[Future, Message](bot.onMessage, _.text.contains("hello"))(handler) bot.receiveMessage(helloMsg).get } it should "ignore non-matches" in new Fixture { handler.expects(*).never() when[Future, Message](bot.onMessage, _.text.contains("hello"))(handler) bot.receiveMessage(textMessage("abc")) } "onMessage" should "catch all messages" in new Fixture { val msgs = (0 until 100).map (t => textMessage(t.toString)) for (m <- msgs) handler.expects(m).returning(Future.successful(())).once() bot.onMessage(handler) val r = Future.traverse(msgs) { m => bot.receiveUpdate(Update(123, Some(m)), None) } r.get } "editedMessage filter " should "accept matches" in new Fixture { val helloMsg = textMessage("hello") handler.expects(helloMsg).returning(Future.successful(())).once() when[Future, Message](bot.onEditedMessage, _.text.contains("hello"))(handler) bot.receiveEditedMessage(helloMsg).get } it should "ignore non-matches" in new Fixture { handler.expects(*).never() when[Future, Message](bot.onEditedMessage, _.text.contains("hello"))(handler) bot.receiveEditedMessage(textMessage("abc")) } "onEditedMessage" should "catch all messages" in new Fixture { val msgs = (0 until 100).map (t => textMessage(t.toString)) for (m <- msgs) handler.expects(m).returning(Future.successful(())).once() bot.onEditedMessage(handler) val r = Future.traverse(msgs) { m => bot.receiveUpdate(Update(123, editedMessage = Some(m)), None) } r.get } }
Example 52
Source File: InlineQueriesSuite.scala From telegram with Apache License 2.0 | 5 votes |
package com.bot4s.telegram.api import cats.instances.future._ import com.bot4s.telegram.api.declarative._ import com.bot4s.telegram.models.{InlineQuery, Update} import org.scalamock.scalatest.MockFactory import org.scalatest.FlatSpec import scala.concurrent.ExecutionContext.Implicits._ import scala.concurrent.Future class InlineQueriesSuite extends FlatSpec with MockFactory with TestUtils { trait Fixture { val handler = mockFunction[InlineQuery, Future[Unit]] val bot = new TestBot with InlineQueries[Future] with RegexCommands[Future] } "Inline query filter" should "accept matches" in new Fixture { val q = inlineQuery("hello") handler.expects(q).returning(Future.successful(())).once() when[Future, InlineQuery](bot.onInlineQuery, _.query == "hello")(handler) bot.receiveInlineQuery(q).get } it should "ignore non-matches" in new Fixture { handler.expects(*).never() when[Future, InlineQuery](bot.onInlineQuery, _.query == "hello")(handler) bot.receiveInlineQuery(inlineQuery("abc")) } "onInlineQuery" should "catch all messages" in new Fixture { val queries = (0 until 100).map (t => inlineQuery(t.toString)) for (q <- queries) handler.expects(q).returning(Future.successful(())).once() bot.onInlineQuery(handler) val r = Future.traverse(queries) { q => bot.receiveUpdate(Update(123, inlineQuery = Some(q)), None) } r.get } "onRegexInline" should "pass matched groups" in new Fixture { val argsHandler = mockFunction[InlineQuery, Seq[String], Future[Unit]] argsHandler.expects(*, Seq("1234")).returning(Future.successful(())).once() bot.onRegexInline("""/cmd ([0-9]+)""".r)(argsHandler.curried) bot.receiveInlineQuery(inlineQuery("/cmd 1234")).get } }
Example 53
Source File: TestUtils.scala From telegram with Apache License 2.0 | 5 votes |
package com.bot4s.telegram.api import com.bot4s.telegram.models._ import scala.concurrent.duration._ import scala.concurrent.Await import scala.concurrent.Future trait TestUtils { def textMessage(text: String): Message = Message(0, chat = Chat(0, ChatType.Private), date = 0, text = Some(text)) def user(name: String): User = User(0, false, name) def inlineQuery(query: String): InlineQuery = { InlineQuery("0", from = user("Pepe"), query = query, offset = "") } implicit class FutureOps[A](f: Future[A]) { def get: A = Await.result(f, 10.seconds) } }
Example 54
Source File: RegexCommandsSuite.scala From telegram with Apache License 2.0 | 5 votes |
package com.bot4s.telegram.api import com.bot4s.telegram.api.declarative.RegexCommands import com.bot4s.telegram.models.Message import org.scalamock.scalatest.MockFactory import org.scalatest.FlatSpec import scala.concurrent.ExecutionContext.Implicits._ import scala.concurrent.Future class RegexCommandsSuite extends FlatSpec with MockFactory with TestUtils { trait Fixture { val handler = mockFunction[Message, Seq[String], Future[Unit]] val bot = new TestBot with RegexCommands[Future] } behavior of "RegexCommands" it should "match simple regex" in new Fixture { handler.expects(*, Seq("/pepe")).returning(Future.successful(())).once() handler.expects(*, Seq("/cojo")).returning(Future.successful(())).once() bot.onRegex("""(/pepe|/cojo)""".r)(handler.curried) (for { // Invalid _ <- bot.receiveMessage(textMessage("/cocou")) _ <- bot.receiveMessage(textMessage("/pepecojo")) _ <- bot.receiveMessage(textMessage("/cojopepe")) // Valid _ <- bot.receiveMessage(textMessage(" /pepe ")) _ <- bot.receiveMessage(textMessage("/cojo")) } yield ()).get } it should "pass regex groups" in new Fixture { handler.expects(*, List("1234")).returning(Future.successful(())).once bot.onRegex("""/cmd ([0-9]+)""".r)(handler.curried) bot.receiveMessage(textMessage("/cmd 1234")).get } }
Example 55
Source File: ExposedRpcInterfaces.scala From udash-demos with GNU General Public License v3.0 | 5 votes |
package io.udash.todo.rpc import io.udash.rpc._ import io.udash.todo.rpc.model.Todo import io.udash.todo.services.TodoStorage import scala.concurrent.Future import scala.concurrent.ExecutionContext.Implicits.global class ExposedRpcInterfaces(todoStorage: TodoStorage) extends MainServerRPC { override def store(todos: Seq[Todo]): Future[Boolean] = Future { if (todoStorage.store(todos)) { ClientRPC(AllClients).storeUpdated(todos) true } else false } override def load(): Future[Seq[Todo]] = Future { todoStorage.load() } }
Example 56
Source File: MainServerREST.scala From udash-demos with GNU General Public License v3.0 | 5 votes |
package io.udash.demos.rest import com.avsystem.commons.rpc.rpcName import io.udash.demos.rest.model._ import io.udash.rest._ import scala.concurrent.Future trait MainServerREST { @RESTName("book") def phoneBooks(): PhoneBooksREST @RESTName("book") @rpcName("selectBook") def phoneBooks(@URLPart id: PhoneBookId): PhoneBookManagementREST @RESTName("contact") def contacts(): ContactsREST @RESTName("contact") @rpcName("selectContact") def contacts(@URLPart id: ContactId): ContactManagementREST } object MainServerREST extends DefaultRESTFramework.RPCCompanion[MainServerREST] trait PhoneBooksREST { @GET @SkipRESTName @rpcName("loadAll") def load(): Future[Seq[PhoneBookInfo]] @POST @SkipRESTName def create(@Body book: PhoneBookInfo): Future[PhoneBookInfo] } object PhoneBooksREST extends DefaultRESTFramework.RPCCompanion[PhoneBooksREST] trait PhoneBookManagementREST { @GET @SkipRESTName def load(): Future[PhoneBookInfo] @PUT @SkipRESTName def update(@Body book: PhoneBookInfo): Future[PhoneBookInfo] @DELETE @SkipRESTName def remove(): Future[PhoneBookInfo] def contacts(): PhoneBookContactsREST } object PhoneBookManagementREST extends DefaultRESTFramework.RPCCompanion[PhoneBookManagementREST] trait PhoneBookContactsREST { @GET @SkipRESTName def load(): Future[Seq[ContactId]] @GET def count(): Future[Int] @POST @RESTName("manage") def add(@Body contactId: ContactId): Future[Unit] @DELETE @RESTName("manage") def remove(@Body contactId: ContactId): Future[Unit] } object PhoneBookContactsREST extends DefaultRESTFramework.RPCCompanion[PhoneBookContactsREST] trait ContactsREST { @GET @SkipRESTName @rpcName("loadAll") def load(): Future[Seq[Contact]] @POST @SkipRESTName def create(@Body contact: Contact): Future[Contact] } object ContactsREST extends DefaultRESTFramework.RPCCompanion[ContactsREST] trait ContactManagementREST { @GET @SkipRESTName def load(): Future[Contact] @PUT @SkipRESTName def update(@Body book: Contact): Future[Contact] @DELETE @SkipRESTName def remove(): Future[Contact] } object ContactManagementREST extends DefaultRESTFramework.RPCCompanion[ContactManagementREST]
Example 57
Source File: IndexPresenter.scala From udash-demos with GNU General Public License v3.0 | 5 votes |
package io.udash.demos.rest.views.index import io.udash._ import io.udash.demos.rest.IndexState import io.udash.demos.rest.model.{Contact, ContactId, PhoneBookId, PhoneBookInfo} import org.scalajs.dom import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.Future import scala.util.{Failure, Success} class IndexPresenter(model: ModelProperty[IndexViewModel]) extends Presenter[IndexState.type] { import io.udash.demos.rest.ApplicationContext._ override def handleState(state: IndexState.type): Unit = refresh() def removeContact(id: ContactId): Unit = { restServer.contacts(id).remove() onComplete { case Success(removedContact) => model.subSeq(_.contacts.elements).remove(removedContact) refreshPhoneBooksSizes(model.subModel(_.books)) case Failure(ex) => dom.window.alert(s"Contact removing failed! ($ex)") } } def removePhoneBook(id: PhoneBookId): Unit = { restServer.phoneBooks(id).remove() onComplete { case Success(_) => val elements = model.subSeq(_.books.elements) val removed = elements.get.find(_.id == id) removed.foreach(elements.remove) case Failure(ex) => dom.window.alert(s"Phone book removing failed! ($ex)") } } def refresh(): Unit = { refreshPhoneBooks(model.subModel(_.books), restServer.phoneBooks().load(), "Loading phone books...") refreshContacts(model.subModel(_.contacts), restServer.contacts().load(), "Loading contacts...") } private def refreshContacts(model: ModelProperty[DataLoadingModel[Contact]], elements: Future[Seq[Contact]], loadingText: String) : Unit = { model.subProp(_.loaded).set(false) model.subProp(_.loadingText).set(loadingText) elements onComplete { case Success(elems) => model.subProp(_.loaded).set(true) model.subSeq(_.elements).set(elems) case Failure(ex) => model.subProp(_.loadingText).set(s"Error: $ex") } } private def refreshPhoneBooks(model: ModelProperty[DataLoadingModel[PhoneBookExtInfo]], elements: Future[Seq[PhoneBookInfo]], loadingText: String) : Unit = { model.subProp(_.loaded).set(false) model.subProp(_.loadingText).set(loadingText) elements onComplete { case Success(elems) => model.subProp(_.loaded).set(true) model.subSeq(_.elements).clear() elems.foreach { el => model.subSeq(_.elements).append( PhoneBookExtInfo(el.id, el.name, el.description, 0) ) } refreshPhoneBooksSizes(model) case Failure(ex) => model.subProp(_.loadingText).set(s"Error: $ex") } } private def refreshPhoneBooksSizes(model: ModelProperty[DataLoadingModel[PhoneBookExtInfo]]): Unit = { model.subSeq(_.elements).elemProperties.foreach { el => val element = el.asModel restServer.phoneBooks(el.get.id).contacts().count() onComplete { case Success(count) => element.subProp(_.contactsCount).set(count) case Failure(ex) => dom.window.alert(s"Contacts count for book ${el.get.id} loading failed: $ex") element.subProp(_.contactsCount).set(-1) } } } }
Example 58
Source File: AppLoader.scala From slim-play with MIT License | 5 votes |
import play.api.ApplicationLoader.Context import play.api._ import play.api.mvc.Results._ import play.api.routing.Router import play.api.routing.sird._ import scala.concurrent.Future class AppComponents(context: Context) extends BuiltInComponentsFromContext(context) { val httpFilters = Nil override val Action = defaultActionBuilder val router: Router = Router.from { // Essentially copied verbatim from the SIRD example case GET(p"/hello/$to") => Action { Ok(s"Hello $to") } case GET(p"/sqrt/${double(num)}") => Action.async { Future { Ok(Math.sqrt(num).toString) } } } } class AppLoader extends ApplicationLoader { def load(context: Context) = new AppComponents(context).application }
Example 59
Source File: JwtOptionallyAuthenticatedActionBuilder.scala From scala-play-realworld-example-app with MIT License | 5 votes |
package users.controllers import authentication.exceptions.ExceptionWithCode import authentication.jwt.services.JwtAuthenticator import authentication.models.NotAuthenticatedUserRequest import commons.services.ActionRunner import play.api.mvc import play.api.mvc._ import users.authentication.AuthenticatedUser import users.repositories.UserRepo import scala.concurrent.{ExecutionContext, Future} private[users] class JwtOptionallyAuthenticatedActionBuilder( parsers: PlayBodyParsers, jwtAuthenticator: JwtAuthenticator, userRepo: UserRepo, actionRunner: ActionRunner )(implicit ec: ExecutionContext) extends BaseActionBuilder(jwtAuthenticator, userRepo) with OptionallyAuthenticatedActionBuilder { override val parser: BodyParser[AnyContent] = new mvc.BodyParsers.Default(parsers) override protected def executionContext: ExecutionContext = ec override def invokeBlock[A](request: Request[A], block: OptionallyAuthenticatedUserRequest[A] => Future[Result]): Future[Result] = { val authenticateAction = actionRunner.runTransactionally(authenticate(request)) authenticateAction .map(securityUserIdAndToken => new AuthenticatedUserRequest(AuthenticatedUser(securityUserIdAndToken), request)) .recover({ case _: ExceptionWithCode => new NotAuthenticatedUserRequest(request) }) .flatMap(block) } }
Example 60
Source File: JwtAuthenticatedActionBuilder.scala From scala-play-realworld-example-app with MIT License | 5 votes |
package users.controllers import authentication.exceptions.{AuthenticationExceptionCode, ExceptionWithCode} import authentication.jwt.services.JwtAuthenticator import authentication.models.NotAuthenticatedUserRequest import commons.services.ActionRunner import play.api.libs.json.Json import play.api.mvc import play.api.mvc.Results._ import play.api.mvc._ import users.authentication.AuthenticatedUser import users.repositories.UserRepo import scala.concurrent.{ExecutionContext, Future} private[users] class JwtAuthenticatedActionBuilder( parsers: PlayBodyParsers, jwtAuthenticator: JwtAuthenticator, userRepo: UserRepo, actionRunner: ActionRunner ) (implicit ec: ExecutionContext) extends BaseActionBuilder(jwtAuthenticator, userRepo) with AuthenticatedActionBuilder { override val parser: BodyParser[AnyContent] = new mvc.BodyParsers.Default(parsers) override protected def executionContext: ExecutionContext = ec private def onUnauthorized(exceptionCode: AuthenticationExceptionCode, requestHeader: RequestHeader) = { val response = HttpExceptionResponse(exceptionCode) Unauthorized(Json.toJson(response)) } override def invokeBlock[A](request: Request[A], block: AuthenticatedUserRequest[A] => Future[Result]): Future[Result] = { actionRunner.runTransactionally(authenticate(request)) .flatMap(userAndToken => { val authenticatedRequest = new AuthenticatedUserRequest(AuthenticatedUser(userAndToken), request) block(authenticatedRequest) }) .recover({ case e: ExceptionWithCode => onUnauthorized(e.exceptionCode, request) }) } }
Example 61
Source File: CommentTestHelper.scala From scala-play-realworld-example-app with MIT License | 5 votes |
package articles.test_helpers import articles.models._ import commons_test.test_helpers.WsScalaTestClientWithHost.TestWsClient import commons_test.test_helpers.{ResponseTransformer, WsScalaTestClientWithHost} import play.api.http.HeaderNames import play.api.libs.json.Json import play.api.libs.ws.WSResponse import scala.concurrent.{ExecutionContext, Future} class CommentTestHelper(executionContext: ExecutionContext) extends WsScalaTestClientWithHost { implicit private val ec: ExecutionContext = executionContext def delete(comment: CommentWithAuthor, article: ArticleWithTags, token: String) (implicit testWsClient: TestWsClient): Future[WSResponse] = { wsUrl(s"/articles/${article.slug}/comments/${comment.id.value}") .addHttpHeaders(HeaderNames.AUTHORIZATION -> s"Token $token") .delete() } def list[ReturnType](article: ArticleWithTags) (implicit testWsClient: TestWsClient, responseTransformer: ResponseTransformer[ReturnType]): Future[ReturnType] = { wsUrl(s"/articles/${article.slug}/comments") .get() .map(responseTransformer(_)) } def create[ReturnType](article: ArticleWithTags, newComment: NewComment, token: String) (implicit testWsClient: TestWsClient, responseTransformer: ResponseTransformer[ReturnType]): Future[ReturnType] = { wsUrl(s"/articles/${article.slug}/comments") .addHttpHeaders(HeaderNames.AUTHORIZATION -> s"Token $token") .post(Json.toJson(NewCommentWrapper(newComment))) .map(responseTransformer(_)) } }
Example 62
Source File: ArticleTestHelper.scala From scala-play-realworld-example-app with MIT License | 5 votes |
package articles.test_helpers import articles.models._ import commons_test.test_helpers.WsScalaTestClientWithHost.TestWsClient import commons_test.test_helpers.{ResponseTransformer, WsScalaTestClientWithHost} import play.api.http.HeaderNames import play.api.libs.json.{JsObject, Json} import play.api.libs.ws.{EmptyBody, WSResponse} import scala.concurrent.{ExecutionContext, Future} class ArticleTestHelper(executionContext: ExecutionContext) extends WsScalaTestClientWithHost { def update(article: ArticleWithTags, articleUpdate: ArticleUpdate, token: String) (implicit testWsClient: TestWsClient): Future[WSResponse] = { wsUrl(s"/articles/${article.slug}") .addHttpHeaders(HeaderNames.AUTHORIZATION -> s"Token $token") .put(JsObject(Map("article" -> Json.toJson(articleUpdate)))) } implicit private val ex: ExecutionContext = executionContext private def getQueryParams(articlesPageRequest: ArticlesPageRequest) = { import articlesPageRequest._ val requiredParams = Seq("limit" -> limit.toString, "offset" -> offset.toString) val maybeFilterParam = articlesPageRequest match { case pg: ArticlesByTag => Some("tag" -> pg.tag) case pg: ArticlesByAuthor => Some("author" -> pg.author.value) case pg: ArticlesByFavorited => Some("favorited " -> pg.favoritedBy.value) case _: ArticlesAll => None } maybeFilterParam.map(requiredParams.prepended) .getOrElse(requiredParams) } def findAll[ReturnType](mainFeedPageRequest: ArticlesPageRequest, maybeToken: Option[String] = None) (implicit testWsClient: TestWsClient, responseTransformer: ResponseTransformer[ReturnType]): Future[ReturnType] = { val queryParams = getQueryParams(mainFeedPageRequest) val request = wsUrl( s"/articles").addQueryStringParameters(queryParams: _*) maybeToken .map(token => request.addHttpHeaders(HeaderNames.AUTHORIZATION -> s"Token $token")) .getOrElse(request) .get() .map(responseTransformer(_)) } def favorite[ReturnType](slug: String, token: String) (implicit testWsClient: TestWsClient, responseTransformer: ResponseTransformer[ReturnType]): Future[ReturnType] = { wsUrl(s"/articles/$slug/favorite") .addHttpHeaders(HeaderNames.AUTHORIZATION -> s"Token $token") .post(EmptyBody) .map(responseTransformer(_)) } def getBySlug[ReturnType](slug: String)(implicit testWsClient: TestWsClient, responseTransformer: ResponseTransformer[ReturnType]): Future[ReturnType] = { wsUrl(s"/articles/$slug") .get() .map(responseTransformer(_)) } def delete(articleWithTags: ArticleWithTags, token: String)(implicit testWsClient: TestWsClient): Future[WSResponse] = { wsUrl(s"/articles/${articleWithTags.slug}") .addHttpHeaders(HeaderNames.AUTHORIZATION -> s"Token $token") .delete() } def create[ReturnType](newArticle: NewArticle, token: String) (implicit testWsClient: TestWsClient, responseTransformer: ResponseTransformer[ReturnType]): Future[ReturnType] = { val body = Json.toJson(NewArticleWrapper(newArticle)) wsUrl("/articles") .addHttpHeaders(HeaderNames.AUTHORIZATION -> s"Token $token") .post(body) .map(responseTransformer(_)) } }
Example 63
Source File: ProfileTestHelper.scala From scala-play-realworld-example-app with MIT License | 5 votes |
package users.test_helpers import commons.models.Username import commons_test.test_helpers.{ResponseTransformer, WsScalaTestClientWithHost} import commons_test.test_helpers.WsScalaTestClientWithHost.TestWsClient import play.api.http.HeaderNames import play.api.libs.ws.EmptyBody import scala.concurrent.{ExecutionContext, Future} class ProfileTestHelper(executionContext: ExecutionContext) extends WsScalaTestClientWithHost { def follow[ReturnType](username: Username, token: String) (implicit testWsClient: TestWsClient, responseTransformer: ResponseTransformer[ReturnType]): Future[ReturnType] = { wsUrl(s"/profiles/${username.value}/follow") .addHttpHeaders(HeaderNames.AUTHORIZATION -> s"Token $token") .post(EmptyBody) .map(responseTransformer(_))(executionContext) } }
Example 64
Source File: UserTestHelper.scala From scala-play-realworld-example-app with MIT License | 5 votes |
package users.test_helpers import authentication.models.PlainTextPassword import commons.models.Email import commons_test.test_helpers.{ResponseTransformer, WsScalaTestClientWithHost} import commons_test.test_helpers.WsScalaTestClientWithHost.TestWsClient import play.api.http.HeaderNames import play.api.libs.json.{JsObject, JsString, JsValue, Json} import play.api.libs.ws.WSResponse import users.models._ import scala.concurrent.{ExecutionContext, Future} class UserTestHelper(executionContext: ExecutionContext) extends WsScalaTestClientWithHost { def update(updateUser: UserUpdate, token: String)(implicit testWsClient: TestWsClient): Future[WSResponse] = { val registrationRequestBody = Json.toJson(UpdateUserWrapper(updateUser)) wsUrl(s"/user") .addHttpHeaders(HeaderNames.AUTHORIZATION -> s"Token $token") .put(registrationRequestBody) } def login(email: Email, password: PlainTextPassword)(implicit testWsClient: TestWsClient): Future[WSResponse] = { val requestBody: JsValue = buildEmailAndPasswordRequestBody(email, password) wsUrl("/users/login") .post(requestBody) } private def buildEmailAndPasswordRequestBody(email: Email, password: PlainTextPassword) = { val rawEmail = email.value val rawPassword = password.value val userJsonObj = JsObject(Map("email" -> JsString(rawEmail), "password" -> JsString(rawPassword))) JsObject(Map("user" -> userJsonObj)) } implicit private val ex: ExecutionContext = executionContext def register[ReturnType](userRegistration: UserRegistration) (implicit testWsClient: TestWsClient, responseTransformer: ResponseTransformer[ReturnType]): Future[ReturnType] = { require(userRegistration != null) wsUrl("/users") .post(Json.toJson(UserRegistrationWrapper(userRegistration))) .map(responseTransformer(_)) } }
Example 65
Source File: TestUtils.scala From scala-play-realworld-example-app with MIT License | 5 votes |
package commons_test.test_helpers import commons.services.ActionRunner import slick.dbio.DBIO import scala.concurrent.duration.{Duration, DurationInt} import scala.concurrent.{Await, Future} object TestUtils { val config: Map[String, String] = Map( "play.evolutions.enabled" -> "true", "play.evolutions.autoApply" -> "true", "slick.dbs.default.profile" -> "slick.jdbc.H2Profile$", "slick.dbs.default.db.driver" -> "org.h2.Driver", "slick.dbs.default.db.url" -> "jdbc:h2:mem:play;DATABASE_TO_UPPER=false", "slick.dbs.default.db.user" -> "user", "slick.dbs.default.db.password" -> "" ) def runAndAwaitResult[T](action: DBIO[T])(implicit actionRunner: ActionRunner, duration: Duration = new DurationInt(1).minute): T = { val future: Future[T] = actionRunner.runTransactionally(action) Await.result(future, duration) } }
Example 66
Source File: BlockTransferService.scala From SparkCore with Apache License 2.0 | 5 votes |
package org.apache.spark.network import java.io.Closeable import java.nio.ByteBuffer import scala.concurrent.{Promise, Await, Future} import scala.concurrent.duration.Duration import org.apache.spark.Logging import org.apache.spark.network.buffer.{NioManagedBuffer, ManagedBuffer} import org.apache.spark.network.shuffle.{ShuffleClient, BlockFetchingListener} import org.apache.spark.storage.{BlockManagerId, BlockId, StorageLevel} private[spark] abstract class BlockTransferService extends ShuffleClient with Closeable with Logging { def uploadBlockSync( hostname: String, port: Int, execId: String, blockId: BlockId, blockData: ManagedBuffer, level: StorageLevel): Unit = { Await.result(uploadBlock(hostname, port, execId, blockId, blockData, level), Duration.Inf) } }
Example 67
Source File: BlockManagerSlaveActor.scala From SparkCore with Apache License 2.0 | 5 votes |
package org.apache.spark.storage import scala.concurrent.Future import akka.actor.{ActorRef, Actor} import org.apache.spark.{Logging, MapOutputTracker, SparkEnv} import org.apache.spark.storage.BlockManagerMessages._ import org.apache.spark.util.ActorLogReceive private[storage] class BlockManagerSlaveActor( blockManager: BlockManager, mapOutputTracker: MapOutputTracker) extends Actor with ActorLogReceive with Logging { import context.dispatcher // Operations that involve removing blocks may be slow and should be done asynchronously override def receiveWithLogging = { case RemoveBlock(blockId) => doAsync[Boolean]("removing block " + blockId, sender) { blockManager.removeBlock(blockId) true } case RemoveRdd(rddId) => doAsync[Int]("removing RDD " + rddId, sender) { blockManager.removeRdd(rddId) } case RemoveShuffle(shuffleId) => doAsync[Boolean]("removing shuffle " + shuffleId, sender) { if (mapOutputTracker != null) { mapOutputTracker.unregisterShuffle(shuffleId) } SparkEnv.get.shuffleManager.unregisterShuffle(shuffleId) } case RemoveBroadcast(broadcastId, _) => doAsync[Int]("removing broadcast " + broadcastId, sender) { blockManager.removeBroadcast(broadcastId, tellMaster = true) } case GetBlockStatus(blockId, _) => sender ! blockManager.getStatus(blockId) case GetMatchingBlockIds(filter, _) => sender ! blockManager.getMatchingBlockIds(filter) } private def doAsync[T](actionMessage: String, responseActor: ActorRef)(body: => T) { val future = Future { logDebug(actionMessage) body } future.onSuccess { case response => logDebug("Done " + actionMessage + ", response is " + response) responseActor ! response logDebug("Sent response: " + response + " to " + responseActor) } future.onFailure { case t: Throwable => logError("Error in " + actionMessage, t) responseActor ! null.asInstanceOf[T] } } }
Example 68
Source File: LoadTest.scala From ws_to_kafka with MIT License | 5 votes |
package com.pkinsky import java.util.concurrent.atomic.AtomicInteger import akka.http.scaladsl.model.ws.{InvalidUpgradeResponse, WebsocketUpgradeResponse, WebsocketRequest, TextMessage} import akka.http.scaladsl.Http import akka.http.scaladsl.model.Uri import akka.stream.ThrottleMode import akka.stream.scaladsl.{Keep, Sink, RunnableGraph, Source} import play.api.libs.json.Json import scala.concurrent.{Future, Await} import scala.concurrent.duration._ import scala.language.postfixOps object LoadTest extends App with AppContext { val clients = 256 val eventsPerClient = 256 val eventsSent = new AtomicInteger(0) def testData(clientId: String): Source[Event, Unit] = Source.unfoldInf(1) { n => val event = Event(s"msg number $n", clientId, System.currentTimeMillis()) (n + 1, event) }.take(eventsPerClient).throttle(1, 100 millis, 1, ThrottleMode.Shaping) def wsClient(clientId: String): RunnableGraph[Future[WebsocketUpgradeResponse]] = testData(clientId).map(e => TextMessage.Strict(Json.toJson(e).toString)) .map { x => eventsSent.incrementAndGet(); x } .viaMat(Http().websocketClientFlow(WebsocketRequest(Uri(s"ws://localhost:$port/ws"))))(Keep.right).to(Sink.ignore) //set up websocket connections (1 to clients).foreach { id => wsClient(s"client $id").run() } //watch kafka for messages sent via websocket val kafkaConsumerGraph: RunnableGraph[Future[Seq[Event]]] = kafka.consume[Event](eventTopic, "group_new") .take(clients * eventsPerClient).takeWithin(2 minutes) .toMat(Sink.seq)(Keep.right) val res = Await.result(kafkaConsumerGraph.run, 5 minutes) println(s"sent ${eventsSent.get()} events total") println(s"res size: ${res.length}") }
Example 69
Source File: StreamingUpload.scala From ws_to_kafka with MIT License | 5 votes |
package com.pkinsky import akka.http.scaladsl.Http import akka.http.scaladsl.model.{HttpResponse, HttpRequest} import akka.http.scaladsl.model.ws.{TextMessage, Message} import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server.PathMatchers.PathEnd import akka.stream._ import akka.stream.scaladsl._ import com.softwaremill.react.kafka.ReactiveKafka import play.api.libs.json.Json import scala.concurrent.{Future, ExecutionContext} import scala.concurrent.duration._ import scala.util.Success import scala.language.postfixOps object StreamingUpload extends App with AppContext { val kafkaPublisherGraph: RunnableGraph[SourceQueue[Event]] = Source.queue[Event](1024, OverflowStrategy.backpressure).to(kafka.publish[Event](eventTopic)) val sourceQueue: SourceQueue[Event] = kafkaPublisherGraph.run val queueWriter: Sink[Event, Unit] = Flow[Event].mapAsync(1){ elem => sourceQueue.offer(elem) .andThen{ case Success(false) => println(s"failed to publish $elem to topic $eventTopic") case Success(true) => println(s"published $elem to topic $eventTopic") } }.to(Sink.ignore) val parseMessages: Flow[Message, Event, Unit] = Flow[Message] .collect{ case TextMessage.Strict(t) => val js = Json.parse(t) Json.fromJson[Event](js).get } val wsHandlerFlow: Flow[Message, Message, Unit] = Flow.fromSinkAndSource( sink = parseMessages.to(queueWriter), source = Source.maybe ) val routes: Flow[HttpRequest, HttpResponse, Unit] = get { path(PathEnd) { getFromResource("test.html") } ~ path("ws") { println("ws connection accepted") handleWebsocketMessages(wsHandlerFlow) } } Http().bindAndHandle(routes, "localhost", port).andThen{ case util.Failure(t) => println(s"error binding to localhost: $t")} println(s"listening on port $port, press ENTER to stop") awaitTermination() } object KafkaListener extends App with AppContext { val graph = kafka.consume[Event](eventTopic, "kafka_listener").toMat(Sink.foreach(println))(Keep.right) graph.run.onComplete(println) println(s"listening to Kafka topic $eventTopic, press ENTER to stop") awaitTermination() }
Example 70
Source File: TestData.scala From sangria with Apache License 2.0 | 5 votes |
package sangria.starWars import sangria.execution.deferred.{Deferred, DeferredResolver} import scala.concurrent.{ExecutionContext, Future} import scala.util.Try object TestData { object Episode extends Enumeration { val NEWHOPE, EMPIRE, JEDI = Value } trait Character { def id: String def name: Option[String] def friends: List[String] def appearsIn: List[Episode.Value] } case class Human(id: String, name: Option[String], friends: List[String], appearsIn: List[Episode.Value], homePlanet: Option[String]) extends Character case class Droid(id: String, name: Option[String], friends: List[String], appearsIn: List[Episode.Value], primaryFunction: Option[String]) extends Character case class DeferFriends(friends: List[String]) extends Deferred[List[Option[Character]]] val characters = List[Character]( Human( id = "1000", name = Some("Luke Skywalker"), friends = List("1002", "1003", "2000", "2001"), appearsIn = List(Episode.NEWHOPE, Episode.EMPIRE, Episode.JEDI), homePlanet = Some("Tatooine")), Human( id = "1001", name = Some("Darth Vader"), friends = List("1004"), appearsIn = List(Episode.NEWHOPE, Episode.EMPIRE, Episode.JEDI), homePlanet = Some("Tatooine")), Human( id = "1002", name = Some("Han Solo"), friends = List("1000", "1003", "2001"), appearsIn = List(Episode.NEWHOPE, Episode.EMPIRE, Episode.JEDI), homePlanet = None), Human( id = "1003", name = Some("Leia Organa"), friends = List("1000", "1002", "2000", "2001"), appearsIn = List(Episode.NEWHOPE, Episode.EMPIRE, Episode.JEDI), homePlanet = Some("Alderaan")), Human( id = "1004", name = Some("Wilhuff Tarkin"), friends = List("1001"), appearsIn = List(Episode.NEWHOPE, Episode.EMPIRE, Episode.JEDI), homePlanet = None), Droid( id = "2000", name = Some("C-3PO"), friends = List("1000", "1002", "1003", "2001"), appearsIn = List(Episode.NEWHOPE, Episode.EMPIRE, Episode.JEDI), primaryFunction = Some("Protocol")), Droid( id = "2001", name = Some("R2-D2"), friends = List("1000", "1002", "1003"), appearsIn = List(Episode.NEWHOPE, Episode.EMPIRE, Episode.JEDI), primaryFunction = Some("Astromech")) ) class FriendsResolver extends DeferredResolver[Any] { override def resolve(deferred: Vector[Deferred[Any]], ctx: Any, queryState: Any)(implicit ec: ExecutionContext) = deferred map { case DeferFriends(friendIds) => Future.fromTry(Try(friendIds map (id => characters.find(_.id == id)))) } } class CharacterRepo { def getHero(episode: Option[Episode.Value]) = episode flatMap (_ => getHuman("1000")) getOrElse characters.last def getHuman(id: String): Option[Human] = characters.find(c => c.isInstanceOf[Human] && c.id == id).asInstanceOf[Option[Human]] def getDroid(id: String): Option[Droid] = characters.find(c => c.isInstanceOf[Droid] && c.id == id).asInstanceOf[Option[Droid]] def getCharacters(ids: Seq[String]): Seq[Character] = ids.flatMap(id => characters.find(_.id == id)) } }
Example 71
Source File: Fs2SubscriptionStream.scala From sangria with Apache License 2.0 | 5 votes |
package sangria.util import cats.effect.{ContextShift, IO} import fs2.Stream import sangria.streaming.SubscriptionStream import scala.concurrent.Future import scala.language.higherKinds object Fs2Support { type IOS[A] = Stream[IO, A] class Fs2SubscriptionStream(implicit CS: ContextShift[IO]) extends SubscriptionStream[IOS] { def supported[T[_]](other: SubscriptionStream[T]) = other.isInstanceOf[Fs2SubscriptionStream] def map[A, B](source: IOS[A])(fn: A => B) = source.map(fn) def singleFuture[T](value: Future[T]) = Stream.eval(IO.fromFuture(IO(value))) def single[T](value: T) = Stream.emit(value) def mapFuture[A, B](source: IOS[A])(fn: A => Future[B]) = source.evalMap(a => IO.fromFuture(IO(fn(a)))) def first[T](s: IOS[T]) = s.compile.toVector.map(_.head).unsafeToFuture def failed[T](e: Throwable) = Stream.raiseError[IO](e) def onComplete[Ctx, Res](result: IOS[Res])(op: => Unit) = result.onFinalize(IO(op)) def flatMapFuture[Ctx, Res, T](future: Future[T])(resultFn: T => IOS[Res]) = Stream.eval(IO.fromFuture(IO(future))).flatMap(resultFn) def merge[T](streams: Vector[IOS[T]]) = if (streams.nonEmpty) streams.tail.foldLeft(streams.head)(_.merge(_)) else throw new IllegalStateException("No streams produced!") def recover[T](stream: IOS[T])(fn: Throwable => T) = stream.handleErrorWith { case e => Stream.emit(fn(e)) } } implicit def observableSubscriptionStream(implicit CS: ContextShift[IO]): SubscriptionStream[IOS] = new Fs2SubscriptionStream }
Example 72
Source File: FutureResultSupport.scala From sangria with Apache License 2.0 | 5 votes |
package sangria.util import sangria.execution.{ErrorWithResolver, QueryAnalysisError} import sangria.marshalling.ResultMarshallerForType import language.postfixOps import scala.concurrent.{Await, ExecutionContext, Future} import scala.concurrent.duration._ import scala.concurrent.ExecutionContext.Implicits.global trait FutureResultSupport { implicit class FutureResult[T](f: Future[T]) { def await = Await.result(f, 10 seconds) def await(duration: Duration) = Await.result(f, duration) def awaitAndRecoverQueryAnalysis(implicit m: ResultMarshallerForType[T]): T = Await.result(recoverQueryAnalysis, 10 seconds) def recoverQueryAnalysis(implicit m: ResultMarshallerForType[T]): Future[T] = f.recover { case analysisError: QueryAnalysisError => analysisError.resolveError(m.marshaller).asInstanceOf[T] } def awaitAndRecoverQueryAnalysisScala(implicit ev: T =:= Any) = Await.result(recoverQueryAnalysisScala, 10 seconds) def recoverQueryAnalysisScala(implicit ev: T =:= Any) = f.recover { case analysisError: ErrorWithResolver => analysisError.resolveError } } object sync { val executionContext = ExecutionContext.fromExecutor(new java.util.concurrent.Executor { def execute(command: Runnable) = command.run() }) } }
Example 73
Source File: Redis4CatsFunSuite.scala From redis4cats with Apache License 2.0 | 5 votes |
package dev.profunktor.redis4cats import cats.effect._ import cats.implicits._ import dev.profunktor.redis4cats.connection._ import dev.profunktor.redis4cats.data.RedisCodec import dev.profunktor.redis4cats.effect.Log.NoOp._ import munit.FunSuite import scala.concurrent.{ Await, ExecutionContext, Future } import scala.concurrent.duration.Duration abstract class Redis4CatsFunSuite(isCluster: Boolean) extends FunSuite { implicit val cs: ContextShift[IO] = IO.contextShift(ExecutionContext.global) implicit val timer: Timer[IO] = IO.timer(ExecutionContext.global) implicit val clock: Clock[IO] = timer.clock val flushAllFixture = new Fixture[Unit]("FLUSHALL") { def apply(): Unit = () override def afterEach(context: AfterEach): Unit = Await.result(flushAll(), Duration.Inf) } override def munitFixtures = List(flushAllFixture) override def munitFlakyOK: Boolean = true private val stringCodec = RedisCodec.Utf8 def withAbstractRedis[A, K, V](f: RedisCommands[IO, K, V] => IO[A])(codec: RedisCodec[K, V]): Future[Unit] = Redis[IO].simple("redis://localhost", codec).use(f).as(assert(true)).unsafeToFuture() def withRedis[A](f: RedisCommands[IO, String, String] => IO[A]): Future[Unit] = withAbstractRedis[A, String, String](f)(stringCodec) private def flushAll(): Future[Unit] = if (isCluster) withRedisCluster(_.flushAll) else withRedis(_.flushAll) // --- Cluster --- lazy val redisUri = List( "redis://localhost:30001", "redis://localhost:30002", "redis://localhost:30003" ).traverse(RedisURI.make[IO](_)) private def mkRedisCluster[K, V](codec: RedisCodec[K, V]): Resource[IO, RedisCommands[IO, K, V]] = for { uris <- Resource.liftF(redisUri) client <- RedisClusterClient[IO](uris: _*) cluster <- Redis[IO].fromClusterClient(client, codec) } yield cluster def withAbstractRedisCluster[A, K, V]( f: RedisCommands[IO, K, V] => IO[A] )(codec: RedisCodec[K, V]): Future[Unit] = mkRedisCluster(codec).use(f).as(assert(true)).unsafeToFuture() def withRedisCluster[A](f: RedisCommands[IO, String, String] => IO[A]): Future[Unit] = withAbstractRedisCluster[A, String, String](f)(stringCodec) }
Example 74
Source File: FutureSrc.scala From scala-library-compat with Apache License 2.0 | 5 votes |
package fix import scala.concurrent.{Future, ExecutionContext} import java.lang.Throwable import scala.concurrent.Future class FutureSrc(fs: Future[Int])(implicit ec: ExecutionContext){ class E1 extends Throwable case class E2(v: Int) extends Throwable case object E3 extends Throwable fs.onComplete { case scala.util.Failure(_: E1) => println("type pattern") case scala.util.Failure(E2(_)) => println("constructor pattern") case scala.util.Failure(E3) => println("singleton pattern") case _ => () } fs.onComplete { case scala.util.Success(x) if x > 0 => println("x > 0") case scala.util.Success(x) if x < 0 => println("x < 0") case _ => () }(ec) fs.map(identity).onComplete { case scala.util.Failure(x) => x case _ => () } fs.map(identity).onComplete { case scala.util.Success(x) => x case _ => () } val f = Future { 1 } }
Example 75
Source File: FutureSrc.scala From scala-library-compat with Apache License 2.0 | 5 votes |
package fix import scala.concurrent.{Future, ExecutionContext, future} import java.lang.Throwable class FutureSrc(fs: Future[Int])(implicit ec: ExecutionContext){ class E1 extends Throwable case class E2(v: Int) extends Throwable case object E3 extends Throwable fs.onFailure { case _: E1 => println("type pattern") case E2(_) => println("constructor pattern") case E3 => println("singleton pattern") } fs.onSuccess { case x if x > 0 => println("x > 0") case x if x < 0 => println("x < 0") }(ec) fs.map(identity).onFailure { case x => x } fs.map(identity).onSuccess { case x => x } val f = future { 1 } }
Example 76
Source File: MarathonApiServiceDiscovery.scala From akka-management with Apache License 2.0 | 5 votes |
package akka.discovery.marathon import java.net.InetAddress import akka.actor.ActorSystem import akka.discovery._ import akka.http.scaladsl._ import akka.http.scaladsl.model._ import akka.http.scaladsl.unmarshalling.Unmarshal import akka.stream.ActorMaterializer import scala.collection.immutable.Seq import scala.concurrent.Future import scala.concurrent.duration.FiniteDuration import scala.util.Try import AppList._ import JsonFormat._ import akka.annotation.ApiMayChange import akka.discovery.ServiceDiscovery.{ Resolved, ResolvedTarget } import akka.event.Logging @ApiMayChange object MarathonApiServiceDiscovery { @ApiMayChange class MarathonApiServiceDiscovery(system: ActorSystem) extends ServiceDiscovery { import MarathonApiServiceDiscovery._ import system.dispatcher private val log = Logging(system, getClass) private val http = Http()(system) private val settings = Settings(system) private implicit val mat: ActorMaterializer = ActorMaterializer()(system) override def lookup(lookup: Lookup, resolveTimeout: FiniteDuration): Future[Resolved] = { val uri = Uri(settings.appApiUrl).withQuery( Uri.Query( "embed" -> "apps.tasks", "embed" -> "apps.deployments", "label" -> settings.appLabelQuery.format(lookup.serviceName))) val request = HttpRequest(uri = uri) log.info("Requesting seed nodes by: {}", request.uri) val portName = lookup.portName match { case Some(name) => name case None => settings.appPortName } for { response <- http.singleRequest(request) entity <- response.entity.toStrict(resolveTimeout) appList <- { log.debug("Marathon API entity: [{}]", entity.data.utf8String) val unmarshalled = Unmarshal(entity).to[AppList] unmarshalled.failed.foreach { _ => log.error( "Failed to unmarshal Marathon API response status [{}], entity: [{}], uri: [{}]", response.status.value, entity.data.utf8String, uri) } unmarshalled } } yield Resolved(lookup.serviceName, targets(appList, portName)) } }
Example 77
Source File: KubernetesLease.scala From akka-management with Apache License 2.0 | 5 votes |
package akka.coordination.lease.kubernetes import java.util.concurrent.atomic.{ AtomicBoolean, AtomicInteger } import akka.actor.ExtendedActorSystem import akka.coordination.lease.{ LeaseException, LeaseSettings, LeaseTimeoutException } import akka.coordination.lease.scaladsl.Lease import akka.coordination.lease.kubernetes.LeaseActor._ import akka.coordination.lease.kubernetes.internal.KubernetesApiImpl import akka.dispatch.ExecutionContexts import akka.pattern.AskTimeoutException import akka.util.{ ConstantFun, Timeout } import scala.concurrent.Future object KubernetesLease { val configPath = "akka.coordination.lease.kubernetes" private val leaseCounter = new AtomicInteger(1) } class KubernetesLease private[akka] (system: ExtendedActorSystem, leaseTaken: AtomicBoolean, settings: LeaseSettings) extends Lease(settings) { private val k8sSettings = KubernetesSettings(settings.leaseConfig, settings.timeoutSettings) private val k8sApi = new KubernetesApiImpl(system, k8sSettings) private val leaseActor = system.systemActorOf( LeaseActor.props(k8sApi, settings, leaseTaken), s"kubernetesLease${KubernetesLease.leaseCounter.incrementAndGet}-${settings.leaseName}-${settings.ownerName}" ) def this(leaseSettings: LeaseSettings, system: ExtendedActorSystem) = this(system, new AtomicBoolean(false), leaseSettings) import akka.pattern.ask import system.dispatcher private implicit val timeout: Timeout = Timeout(settings.timeoutSettings.operationTimeout) override def checkLease(): Boolean = leaseTaken.get() override def release(): Future[Boolean] = { // replace with transform once 2.11 dropped (leaseActor ? Release()) .flatMap { case LeaseReleased => Future.successful(true) case InvalidRequest(msg) => Future.failed(new LeaseException(msg)) }(ExecutionContexts.sameThreadExecutionContext) .recoverWith { case _: AskTimeoutException => Future.failed(new LeaseTimeoutException( s"Timed out trying to release lease [${settings.leaseName}, ${settings.ownerName}]. It may still be taken.")) } } override def acquire(): Future[Boolean] = { acquire(ConstantFun.scalaAnyToUnit) } override def acquire(leaseLostCallback: Option[Throwable] => Unit): Future[Boolean] = { // replace with transform once 2.11 dropped (leaseActor ? Acquire(leaseLostCallback)) .flatMap { case LeaseAcquired => Future.successful(true) case LeaseTaken => Future.successful(false) case InvalidRequest(msg) => Future.failed(new LeaseException(msg)) } .recoverWith { case _: AskTimeoutException => Future.failed[Boolean](new LeaseTimeoutException( s"Timed out trying to acquire lease [${settings.leaseName}, ${settings.ownerName}]. It may still be taken.")) }(ExecutionContexts.sameThreadExecutionContext) } }
Example 78
Source File: LeaseContentionSpec.scala From akka-management with Apache License 2.0 | 5 votes |
package akka.coordination.lease.kubernetes import java.util.concurrent.Executors import akka.actor.ActorSystem import akka.coordination.lease.TimeoutSettings import akka.coordination.lease.kubernetes.internal.KubernetesApiImpl import akka.coordination.lease.scaladsl.LeaseProvider import akka.testkit.TestKit import com.typesafe.config.ConfigFactory import org.scalatest.concurrent.ScalaFutures import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike} import scala.collection.immutable import scala.concurrent.{ExecutionContext, Future} class LeaseContentionSpec extends TestKit(ActorSystem("LeaseContentionSpec", ConfigFactory.parseString( """ akka.loglevel = INFO akka.coordination.lease.kubernetes { api-service-host = localhost api-service-port = 8080 namespace = "lease" namespace-path = "" secure-api-server = false } """ ))) with WordSpecLike with Matchers with ScalaFutures with BeforeAndAfterAll { implicit val patience: PatienceConfig = PatienceConfig(testKitSettings.DefaultTimeout.duration) // for cleanup val k8sApi = new KubernetesApiImpl(system, KubernetesSettings(system, TimeoutSettings(system.settings.config.getConfig("akka.coordination.lease.kubernetes")))) val lease1 = "contended-lease" val lease2 = "contended-lease-2" override protected def beforeAll(): Unit = { k8sApi.removeLease(lease1).futureValue k8sApi.removeLease(lease2).futureValue } override protected def afterAll(): Unit ={ TestKit.shutdownActorSystem(system) } "A lease under contention" should { "only allow one client to get acquire lease" in { val underTest = LeaseProvider(system) val nrClients = 30 implicit val ec = ExecutionContext.fromExecutor(Executors.newFixedThreadPool(nrClients)) // too many = HTTP request queue of pool fills up // could make this more contended with a countdown latch so they all start at the same time val leases: immutable.Seq[(String, Boolean)] = Future.sequence((0 until nrClients).map(i => { val clientName = s"client$i" val lease = underTest.getLease(lease1, KubernetesLease.configPath, clientName) Future { lease.acquire() }.flatMap(identity).map(granted => (clientName, granted)) })).futureValue val numberGranted = leases.count { case (_, granted) => granted } withClue(s"More than one lease granted $leases") { numberGranted shouldEqual 1 } } } }
Example 79
Source File: ClusterBootstrap.scala From akka-management with Apache License 2.0 | 5 votes |
package akka.management.cluster.bootstrap import java.util.concurrent.atomic.AtomicReference import akka.AkkaVersion import scala.concurrent.{ Future, Promise, TimeoutException } import scala.concurrent.duration._ import akka.actor.ActorSystem import akka.actor.ClassicActorSystemProvider import akka.actor.ExtendedActorSystem import akka.actor.Extension import akka.actor.ExtensionId import akka.actor.ExtensionIdProvider import akka.annotation.InternalApi import akka.cluster.Cluster import akka.discovery.{ Discovery, ServiceDiscovery } import akka.event.Logging import akka.http.scaladsl.model.Uri import akka.http.scaladsl.server.Route import akka.management.cluster.bootstrap.contactpoint.HttpClusterBootstrapRoutes import akka.management.cluster.bootstrap.internal.BootstrapCoordinator import akka.management.scaladsl.ManagementRouteProviderSettings import akka.management.scaladsl.ManagementRouteProvider final class ClusterBootstrap(implicit system: ExtendedActorSystem) extends Extension with ManagementRouteProvider { import ClusterBootstrap.Internal._ import system.dispatcher private val log = Logging(system, classOf[ClusterBootstrap]) private final val bootstrapStep = new AtomicReference[BootstrapStep](NotRunning) AkkaVersion.require("cluster-bootstrap", "2.5.27") val settings: ClusterBootstrapSettings = ClusterBootstrapSettings(system.settings.config, log) // used for initial discovery of contact points lazy val discovery: ServiceDiscovery = settings.contactPointDiscovery.discoveryMethod match { case "akka.discovery" => val discovery = Discovery(system).discovery log.info("Bootstrap using default `akka.discovery` method: {}", Logging.simpleName(discovery)) discovery case otherDiscoveryMechanism => log.info("Bootstrap using `akka.discovery` method: {}", otherDiscoveryMechanism) Discovery(system).loadServiceDiscovery(otherDiscoveryMechanism) } private val joinDecider: JoinDecider = { system.dynamicAccess .createInstanceFor[JoinDecider]( settings.joinDecider.implClass, List((classOf[ActorSystem], system), (classOf[ClusterBootstrapSettings], settings)) ) .get } private[this] val _selfContactPointUri: Promise[Uri] = Promise() override def routes(routeProviderSettings: ManagementRouteProviderSettings): Route = { log.info(s"Using self contact point address: ${routeProviderSettings.selfBaseUri}") this.setSelfContactPoint(routeProviderSettings.selfBaseUri) new HttpClusterBootstrapRoutes(settings).routes } def start(): Unit = if (Cluster(system).settings.SeedNodes.nonEmpty) { log.warning( "Application is configured with specific `akka.cluster.seed-nodes`: {}, bailing out of the bootstrap process! " + "If you want to use the automatic bootstrap mechanism, make sure to NOT set explicit seed nodes in the configuration. " + "This node will attempt to join the configured seed nodes.", Cluster(system).settings.SeedNodes.mkString("[", ", ", "]") ) } else if (bootstrapStep.compareAndSet(NotRunning, Initializing)) { log.info("Initiating bootstrap procedure using {} method...", settings.contactPointDiscovery.discoveryMethod) ensureSelfContactPoint() val bootstrapProps = BootstrapCoordinator.props(discovery, joinDecider, settings) val bootstrap = system.systemActorOf(bootstrapProps, "bootstrapCoordinator") // Bootstrap already logs in several other execution points when it can't form a cluster, and why. selfContactPoint.foreach { uri => bootstrap ! BootstrapCoordinator.Protocol.InitiateBootstrapping(uri) } } else log.warning("Bootstrap already initiated, yet start() method was called again. Ignoring.") private[bootstrap] object Internal { sealed trait BootstrapStep case object NotRunning extends BootstrapStep case object Initializing extends BootstrapStep } }
Example 80
Source File: MockDiscovery.scala From akka-management with Apache License 2.0 | 5 votes |
package akka.discovery import java.util.concurrent.atomic.AtomicReference import akka.actor.ActorSystem import akka.annotation.InternalApi import akka.discovery.ServiceDiscovery.Resolved import akka.event.Logging import scala.concurrent.Future import scala.concurrent.duration.FiniteDuration @InternalApi object MockDiscovery { private val data = new AtomicReference[Map[Lookup, () => Future[Resolved]]](Map.empty) def set(name: Lookup, to: () => Future[Resolved]): Unit = { val d = data.get() if (data.compareAndSet(d, d.updated(name, to))) () else set(name, to) // retry } def remove(name: Lookup): Unit = { val d = data.get() if (data.compareAndSet(d, d - name)) () else remove(name) // retry } } @InternalApi final class MockDiscovery(system: ActorSystem) extends ServiceDiscovery { private val log = Logging(system, getClass) override def lookup(query: Lookup, resolveTimeout: FiniteDuration): Future[Resolved] = { MockDiscovery.data.get().get(query) match { case Some(res) => val items = res() log.info("Mock-resolved [{}] to [{}:{}]", query, items, items.value) items case None => log.info("No mock-data for [{}], resolving as 'Nil'. Current mocks: {}", query, MockDiscovery.data.get()) Future.successful(Resolved(query.serviceName, Nil)) } } }
Example 81
Source File: HealthCheckRoutesSpec.scala From akka-management with Apache License 2.0 | 5 votes |
package akka.management import akka.actor.ExtendedActorSystem import akka.http.scaladsl.model.{ StatusCodes, Uri } import akka.http.scaladsl.server._ import akka.http.scaladsl.testkit.ScalatestRouteTest import akka.management.scaladsl.{ HealthChecks, ManagementRouteProviderSettings } import org.scalatest.{ Matchers, WordSpec } import scala.concurrent.Future class HealthCheckRoutesSpec extends WordSpec with Matchers with ScalatestRouteTest { private val eas = system.asInstanceOf[ExtendedActorSystem] private def testRoute( readyResultValue: Future[Either[String, Unit]] = Future.successful(Right(())), aliveResultValue: Future[Either[String, Unit]] = Future.successful(Right(())) ): Route = { new HealthCheckRoutes(eas) { override protected val healthChecks: HealthChecks = new HealthChecks { override def readyResult(): Future[Either[String, Unit]] = readyResultValue override def ready(): Future[Boolean] = readyResultValue.map(_.isRight) override def aliveResult(): Future[Either[String, Unit]] = aliveResultValue override def alive(): Future[Boolean] = aliveResultValue.map(_.isRight) } }.routes(ManagementRouteProviderSettings(Uri("http://whocares"), readOnly = false)) } tests("/ready", result => testRoute(readyResultValue = result)) tests("/alive", result => testRoute(aliveResultValue = result)) def tests(endpoint: String, route: Future[Either[String, Unit]] => Route) = { s"Health check ${endpoint} endpoint" should { "return 200 for Right" in { Get(endpoint) ~> route(Future.successful(Right(()))) ~> check { status shouldEqual StatusCodes.OK } } "return 500 for Left" in { Get(endpoint) ~> route(Future.successful(Left("com.someclass.MyCheck"))) ~> check { status shouldEqual StatusCodes.InternalServerError responseAs[String] shouldEqual "Not Healthy: com.someclass.MyCheck" } } "return 500 for fail" in { Get(endpoint) ~> route(Future.failed(new RuntimeException("darn it"))) ~> check { status shouldEqual StatusCodes.InternalServerError responseAs[String] shouldEqual "Health Check Failed: darn it" } } } } }
Example 82
Source File: CompileOnly.scala From akka-management with Apache License 2.0 | 5 votes |
package akka.management import akka.actor.ActorSystem import akka.http.scaladsl.server.directives.Credentials import scala.concurrent.Future import akka.management.scaladsl.AkkaManagement object CompileOnly { val system: ActorSystem = null implicit val ec = system.dispatcher //#basic-auth def myUserPassAuthenticator(credentials: Credentials): Future[Option[String]] = credentials match { case p @ Credentials.Provided(id) => Future { // potentially if (p.verify("p4ssw0rd")) Some(id) else None } case _ => Future.successful(None) } // ... val management = AkkaManagement(system) management.start(_.withAuth(myUserPassAuthenticator)) //#basic-auth object stopping { //#stopping val management = AkkaManagement(system) management.start() //... val bindingFuture = management.stop() bindingFuture.onComplete { _ => println("It's stopped") } //#stopping } }
Example 83
Source File: ExampleHealthCheck.scala From akka-management with Apache License 2.0 | 5 votes |
package doc.akka.management import akka.actor.ActorSystem import akka.cluster.{ Cluster, MemberStatus } import scala.concurrent.Future //#basic class ExampleHealthCheck(system: ActorSystem) extends (() => Future[Boolean]) { override def apply(): Future[Boolean] = { Future.successful(true) } } //#basic //#cluster class ClusterHealthCheck(system: ActorSystem) extends (() => Future[Boolean]) { private val cluster = Cluster(system) override def apply(): Future[Boolean] = { Future.successful(cluster.selfMember.status == MemberStatus.Up) } } //#cluster
Example 84
Source File: ClusterMembershipCheck.scala From akka-management with Apache License 2.0 | 5 votes |
package akka.management.cluster.scaladsl import akka.actor.ActorSystem import akka.annotation.InternalApi import akka.cluster.{ Cluster, MemberStatus } import akka.util.Helpers import com.typesafe.config.Config import scala.collection.JavaConverters._ import scala.concurrent.Future @InternalApi private[akka] object ClusterMembershipCheckSettings { def memberStatus(status: String): MemberStatus = Helpers.toRootLowerCase(status) match { case "weaklyup" => MemberStatus.WeaklyUp case "up" => MemberStatus.Up case "exiting" => MemberStatus.Exiting case "down" => MemberStatus.Down case "joining" => MemberStatus.Joining case "leaving" => MemberStatus.Leaving case "removed" => MemberStatus.Removed case invalid => throw new IllegalArgumentException( s"'$invalid' is not a valid MemberStatus. See reference.conf for valid values" ) } def apply(config: Config): ClusterMembershipCheckSettings = new ClusterMembershipCheckSettings(config.getStringList("ready-states").asScala.map(memberStatus).toSet) } final class ClusterMembershipCheckSettings(val readyStates: Set[MemberStatus]) final class ClusterMembershipCheck @InternalApi private[akka] ( system: ActorSystem, selfStatus: () => MemberStatus, settings: ClusterMembershipCheckSettings) extends (() => Future[Boolean]) { def this(system: ActorSystem) = this( system, () => Cluster(system).selfMember.status, ClusterMembershipCheckSettings(system.settings.config.getConfig("akka.management.cluster.health-check"))) override def apply(): Future[Boolean] = { Future.successful(settings.readyStates.contains(selfStatus())) } }
Example 85
Source File: ParallelIteratorExecutor.scala From codepropertygraph with Apache License 2.0 | 5 votes |
package io.shiftleft.passes import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.duration.Duration import scala.concurrent.{Await, Future} class ParallelIteratorExecutor[T](iterator: Iterator[T]) { def map[D](func: T => D): Iterator[D] = { val futures = Future.traverse(iterator) { element => Future { func(element) } } Await.result(futures, Duration.Inf) } }
Example 86
Source File: DistributedCountRDD.scala From carbondata with Apache License 2.0 | 5 votes |
package org.apache.carbondata.indexserver import java.util.concurrent.Executors import scala.collection.JavaConverters._ import scala.concurrent.{Await, ExecutionContext, ExecutionContextExecutor, Future} import scala.concurrent.duration.Duration import org.apache.hadoop.mapred.TaskAttemptID import org.apache.hadoop.mapreduce.{InputSplit, TaskType} import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl import org.apache.spark.{Partition, SparkEnv, TaskContext} import org.apache.spark.sql.SparkSession import org.apache.carbondata.common.logging.LogServiceFactory import org.apache.carbondata.core.cache.CacheProvider import org.apache.carbondata.core.datastore.impl.FileFactory import org.apache.carbondata.core.index.{IndexInputFormat, IndexStoreManager} import org.apache.carbondata.core.index.dev.expr.IndexInputSplitWrapper import org.apache.carbondata.core.util.{CarbonProperties, CarbonThreadFactory} import org.apache.carbondata.spark.rdd.CarbonRDD class DistributedCountRDD(@transient ss: SparkSession, indexInputFormat: IndexInputFormat) extends CarbonRDD[(String, String)](ss, Nil) { @transient private val LOGGER = LogServiceFactory.getLogService(classOf[DistributedPruneRDD] .getName) override protected def getPreferredLocations(split: Partition): Seq[String] = { if (split.asInstanceOf[IndexRDDPartition].getLocations != null) { split.asInstanceOf[IndexRDDPartition].getLocations.toSeq } else { Seq() } } override def internalCompute(split: Partition, context: TaskContext): Iterator[(String, String)] = { val attemptId = new TaskAttemptID(DistributedRDDUtils.generateTrackerId, id, TaskType.MAP, split.index, 0) val attemptContext = new TaskAttemptContextImpl(FileFactory.getConfiguration, attemptId) val inputSplits = split.asInstanceOf[IndexRDDPartition].inputSplit val numOfThreads = CarbonProperties.getInstance().getNumOfThreadsForExecutorPruning val service = Executors .newFixedThreadPool(numOfThreads, new CarbonThreadFactory("IndexPruningPool", true)) implicit val ec: ExecutionContextExecutor = ExecutionContext .fromExecutor(service) if (indexInputFormat.ifAsyncCall()) { // to clear cache of invalid segments during pre-priming in index server IndexStoreManager.getInstance().clearInvalidSegments(indexInputFormat.getCarbonTable, indexInputFormat.getInvalidSegments) } val futures = if (inputSplits.length <= numOfThreads) { inputSplits.map { split => generateFuture(Seq(split)) } } else { DistributedRDDUtils.groupSplits(inputSplits, numOfThreads).map { splits => generateFuture(splits) } } // scalastyle:off awaitresult val results = Await.result(Future.sequence(futures), Duration.Inf).flatten // scalastyle:on awaitresult val executorIP = s"${ SparkEnv.get.blockManager.blockManagerId.host }_${ SparkEnv.get.blockManager.blockManagerId.executorId }" val cacheSize = if (CacheProvider.getInstance().getCarbonCache != null) { CacheProvider.getInstance().getCarbonCache.getCurrentSize } else { 0L } Iterator((executorIP + "_" + cacheSize.toString, results.map(_._2.toLong).sum.toString)) } override protected def internalGetPartitions: Array[Partition] = { new DistributedPruneRDD(ss, indexInputFormat).partitions } private def generateFuture(split: Seq[InputSplit]) (implicit executionContext: ExecutionContext) = { Future { val segments = split.map { inputSplit => val distributable = inputSplit.asInstanceOf[IndexInputSplitWrapper] distributable.getDistributable.getSegment .setReadCommittedScope(indexInputFormat.getReadCommittedScope) distributable.getDistributable.getSegment } val defaultIndex = IndexStoreManager.getInstance .getIndex(indexInputFormat.getCarbonTable, split.head .asInstanceOf[IndexInputSplitWrapper].getDistributable.getIndexSchema) defaultIndex.getBlockRowCount(defaultIndex, segments.toList.asJava, indexInputFormat .getPartitions).asScala } } }
Example 87
Source File: TestSegmentReadingForMultiThreading.scala From carbondata with Apache License 2.0 | 5 votes |
package org.apache.carbondata.spark.testsuite.segmentreading import java.util.concurrent.TimeUnit import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.duration.Duration import scala.concurrent.{Await, Future} import org.apache.spark.sql.{CarbonUtils, Row} import org.apache.spark.sql.test.util.QueryTest import org.scalatest.BeforeAndAfterAll class TestSegmentReadingForMultiThreading extends QueryTest with BeforeAndAfterAll { override def beforeAll: Unit = { sql("DROP TABLE IF EXISTS carbon_table_MulTI_THread") sql( "CREATE TABLE carbon_table_MulTI_THread (empno int, empname String, designation String, doj " + "Timestamp, workgroupcategory int, workgroupcategoryname String, deptno int, deptname " + "String, projectcode int, projectjoindate Timestamp, projectenddate Timestamp,attendance " + "int,utilization int,salary int) STORED AS carbondata") sql( s"LOAD DATA LOCAL INPATH '$resourcesPath/data.csv' INTO TABLE carbon_table_MulTI_THread " + "OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '\"')") sql( s"LOAD DATA LOCAL INPATH '$resourcesPath/data1.csv' INTO TABLE carbon_table_MulTI_THread " + "OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '\"')") sql( s"LOAD DATA LOCAL INPATH '$resourcesPath/data.csv' INTO TABLE carbon_table_MulTI_THread " + "OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '\"')") sql( s"LOAD DATA LOCAL INPATH '$resourcesPath/data1.csv' INTO TABLE carbon_table_MulTI_THread " + "OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '\"')") } test("test multithreading for segment reading") { CarbonUtils.threadSet("carbon.input.segments.default.carbon_table_MulTI_THread", "1,2,3") val df = sql("select count(empno) from carbon_table_MulTI_THread") checkAnswer(df, Seq(Row(30))) val four = Future { CarbonUtils.threadSet("carbon.input.segments.default.carbon_table_MulTI_THread", "1,3") val df = sql("select count(empno) from carbon_table_MulTI_THread") checkAnswer(df, Seq(Row(20))) } val three = Future { CarbonUtils.threadSet("carbon.input.segments.default.carbon_table_MulTI_THread", "0,1,2") val df = sql("select count(empno) from carbon_table_MulTI_THread") checkAnswer(df, Seq(Row(30))) } val one = Future { CarbonUtils.threadSet("carbon.input.segments.default.carbon_table_MulTI_THread", "0,2") val df = sql("select count(empno) from carbon_table_MulTI_THread") checkAnswer(df, Seq(Row(20))) } val two = Future { CarbonUtils.threadSet("carbon.input.segments.default.carbon_table_MulTI_THread", "1") val df = sql("select count(empno) from carbon_table_MulTI_THread") checkAnswer(df, Seq(Row(10))) } Await.result(Future.sequence(Seq(one, two, three, four)), Duration(300, TimeUnit.SECONDS)) } override def afterAll: Unit = { sql("DROP TABLE IF EXISTS carbon_table_MulTI_THread") CarbonUtils.threadUnset("carbon.input.segments.default.carbon_table_MulTI_THread") } }
Example 88
Source File: package.scala From chronicler with Apache License 2.0 | 5 votes |
package com.github.fsanaulla.chronicler.ahc.shared import com.github.fsanaulla.chronicler.core.alias.Id import com.github.fsanaulla.chronicler.core.model.{Apply, Failable, FunctionK, Functor} import scala.concurrent.{ExecutionContext, Future} package object implicits { implicit def futureFunctor(implicit ec: ExecutionContext): Functor[Future] = new Functor[Future] { override def map[A, B](fa: Future[A])(f: A => B): Future[B] = fa.map(f) override def flatMap[A, B](fa: Future[A])(f: A => Future[B]): Future[B] = fa.flatMap(f) } implicit def futureFailable: Failable[Future] = new Failable[Future] { override def fail[A](ex: Throwable): Future[A] = Future.failed(ex) } implicit val futureApply: Apply[Future] = new Apply[Future] { override def pure[A](v: A): Future[A] = Future.successful(v) } implicit val fkId: FunctionK[Id, Future] = new FunctionK[Id, Future] { override def apply[A](fa: Id[A]): Future[A] = Future.successful(fa) } }
Example 89
Source File: AhcRequestExecutor.scala From chronicler with Apache License 2.0 | 5 votes |
package com.github.fsanaulla.chronicler.ahc.shared.handlers import com.github.fsanaulla.chronicler.ahc.shared.Uri import com.github.fsanaulla.chronicler.core.components.RequestExecutor import com.github.fsanaulla.chronicler.core.gzip import io.netty.handler.codec.http.HttpHeaderValues.GZIP_DEFLATE import org.asynchttpclient.{AsyncHttpClient, Response} import scala.compat.java8.FutureConverters._ import scala.concurrent.Future private[ahc] final class AhcRequestExecutor()(implicit client: AsyncHttpClient) extends RequestExecutor[Future, Response, Uri, String] { override def post(uri: Uri): Future[Response] = { client .preparePost(uri.mkUrl) .execute() .toCompletableFuture .toScala } }
Example 90
Source File: AhcManagementClient.scala From chronicler with Apache License 2.0 | 5 votes |
package com.github.fsanaulla.chronicler.ahc.management import com.github.fsanaulla.chronicler.ahc.shared.handlers.{ AhcJsonHandler, AhcQueryBuilder, AhcRequestExecutor } import com.github.fsanaulla.chronicler.ahc.shared.{InfluxAhcClient, Uri} import com.github.fsanaulla.chronicler.core.ManagementClient import com.github.fsanaulla.chronicler.core.alias.{ErrorOr, Id} import com.github.fsanaulla.chronicler.core.components.ResponseHandler import com.github.fsanaulla.chronicler.core.implicits.{applyId, functorId} import com.github.fsanaulla.chronicler.core.model.{ FunctionK, Functor, InfluxCredentials, InfluxDBInfo } import org.asynchttpclient.{AsyncHttpClientConfig, Response} import scala.concurrent.{ExecutionContext, Future} final class AhcManagementClient( host: String, port: Int, credentials: Option[InfluxCredentials], asyncClientConfig: Option[AsyncHttpClientConfig] )(implicit ex: ExecutionContext, val F: Functor[Future], val FK: FunctionK[Id, Future]) extends InfluxAhcClient(asyncClientConfig) with ManagementClient[Future, Id, Response, Uri, String] { val jsonHandler: AhcJsonHandler = new AhcJsonHandler(compress = false) implicit val qb: AhcQueryBuilder = new AhcQueryBuilder(schema, host, port, credentials) implicit val re: AhcRequestExecutor = new AhcRequestExecutor implicit val rh: ResponseHandler[Id, Response] = new ResponseHandler(jsonHandler) override def ping: Future[ErrorOr[InfluxDBInfo]] = { re.get(qb.buildQuery("/ping"), compress = false) .map(rh.pingResult) } }
Example 91
Source File: AhcIOClient.scala From chronicler with Apache License 2.0 | 5 votes |
package com.github.fsanaulla.chronicler.ahc.io import com.github.fsanaulla.chronicler.ahc.shared.handlers.{ AhcJsonHandler, AhcQueryBuilder, AhcRequestExecutor } import com.github.fsanaulla.chronicler.ahc.shared.implicits.{fkId, futureFailable, futureFunctor} import com.github.fsanaulla.chronicler.ahc.shared.{InfluxAhcClient, Uri} import com.github.fsanaulla.chronicler.core.IOClient import com.github.fsanaulla.chronicler.core.alias.{ErrorOr, Id} import com.github.fsanaulla.chronicler.core.api.{DatabaseApi, MeasurementApi} import com.github.fsanaulla.chronicler.core.components.ResponseHandler import com.github.fsanaulla.chronicler.core.implicits.{applyId, functorId} import com.github.fsanaulla.chronicler.core.model.{InfluxCredentials, InfluxDBInfo} import org.asynchttpclient.{AsyncHttpClientConfig, Response} import scala.concurrent.{ExecutionContext, Future} import scala.reflect.ClassTag final class AhcIOClient( host: String, port: Int, compress: Boolean, credentials: Option[InfluxCredentials], asyncClientConfig: Option[AsyncHttpClientConfig] )(implicit ex: ExecutionContext) extends InfluxAhcClient(asyncClientConfig) with IOClient[Future, Id, Response, Uri, String] { val jsonHandler: AhcJsonHandler = new AhcJsonHandler(compress) implicit val qb: AhcQueryBuilder = new AhcQueryBuilder(schema, host, port, credentials) implicit val re: AhcRequestExecutor = new AhcRequestExecutor implicit val rh: ResponseHandler[Id, Response] = new ResponseHandler(jsonHandler) override def database(dbName: String) = new DatabaseApi(dbName, compress) override def measurement[A: ClassTag](dbName: String, measurementName: String): Measurement[A] = new MeasurementApi(dbName, measurementName, compress) override def ping: Future[ErrorOr[InfluxDBInfo]] = re.get(qb.buildQuery("/ping"), compress = false) .map(rh.pingResult) }
Example 92
Source File: CompressionSpec.scala From chronicler with Apache License 2.0 | 5 votes |
package com.github.fsanaulla.chronicler.ahc.io.it import java.nio.file.Paths import com.github.fsanaulla.chronicler.ahc.io.InfluxIO import com.github.fsanaulla.chronicler.ahc.management.InfluxMng import com.github.fsanaulla.chronicler.ahc.shared.Uri import com.github.fsanaulla.chronicler.core.alias.Id import com.github.fsanaulla.chronicler.core.api.DatabaseApi import com.github.fsanaulla.chronicler.testing.it.DockerizedInfluxDB import org.asynchttpclient.Response import org.scalatest.concurrent.{Eventually, IntegrationPatience, ScalaFutures} import org.scalatest.{FlatSpec, Matchers} import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.Future class CompressionSpec extends FlatSpec with Matchers with DockerizedInfluxDB with ScalaFutures with Eventually with IntegrationPatience { override def afterAll(): Unit = { mng.close() io.close() super.afterAll() } val testDB = "db" lazy val mng = InfluxMng(host, port, Some(creds), None) lazy val io = InfluxIO(host, port, Some(creds), compress = true) lazy val db: DatabaseApi[Future, Id, Response, Uri, String] = io.database(testDB) it should "ping database" in { eventually { io.ping.futureValue.right.get.version shouldEqual version } } it should "write data from file" in { mng.createDatabase(testDB).futureValue.right.get shouldEqual 200 db.writeFromFile(Paths.get(getClass.getResource("/large_batch.txt").getPath)) .futureValue .right .get shouldEqual 204 db.readJson("SELECT * FROM test1").futureValue.right.get.length shouldEqual 10000 } }
Example 93
Source File: InfluxAkkaClient.scala From chronicler with Apache License 2.0 | 5 votes |
package com.github.fsanaulla.chronicler.akka.shared import akka.actor.ActorSystem import akka.http.scaladsl.{Http, HttpExt, HttpsConnectionContext} import scala.concurrent.duration.Duration import scala.concurrent.{Await, ExecutionContext, Future} abstract class InfluxAkkaClient( terminateActorSystem: Boolean, httpsContext: Option[HttpsConnectionContext] )(implicit system: ActorSystem, ec: ExecutionContext) { self: AutoCloseable => private[akka] implicit val http: HttpExt = Http() private[akka] val (ctx, schema) = httpsContext .map(_ -> "https") .getOrElse(http.defaultClientHttpsContext -> "http") def close(): Unit = Await.ready(closeAsync(), Duration.Inf) def closeAsync(): Future[Unit] = { for { _ <- http.shutdownAllConnectionPools() _ <- if (terminateActorSystem) system.terminate().map(_ => {}) else Future.successful({}) } yield {} } }
Example 94
Source File: package.scala From chronicler with Apache License 2.0 | 5 votes |
package com.github.fsanaulla.chronicler.akka.shared import com.github.fsanaulla.chronicler.core.model.{Apply, Failable, Functor} import scala.concurrent.{ExecutionContext, Future} package object implicits { implicit def futureFunctor(implicit ec: ExecutionContext): Functor[Future] = new Functor[Future] { override def map[A, B](fa: Future[A])(f: A => B): Future[B] = fa.map(f) override def flatMap[A, B](fa: Future[A])(f: A => Future[B]): Future[B] = fa.flatMap(f) } implicit def futureFailable: Failable[Future] = new Failable[Future] { override def fail[A](ex: Throwable): Future[A] = Future.failed(ex) } implicit val futureApply: Apply[Future] = new Apply[Future] { override def pure[A](v: A): Future[A] = Future.successful(v) } }
Example 95
Source File: AkkaRequestExecutor.scala From chronicler with Apache License 2.0 | 5 votes |
package com.github.fsanaulla.chronicler.akka.shared.handlers import akka.http.scaladsl.coding.Gzip import akka.http.scaladsl.model._ import akka.http.scaladsl.model.headers.{`Accept-Encoding`, `Content-Encoding`, HttpEncodings} import akka.http.scaladsl.{HttpExt, HttpsConnectionContext} import com.github.fsanaulla.chronicler.core.components.RequestExecutor import scala.concurrent.Future override def post(uri: Uri): Future[HttpResponse] = { val request = HttpRequest( method = HttpMethods.GET, uri = uri ) http.singleRequest( request, connectionContext = ctx ) } }
Example 96
Source File: AkkaBodyUnmarshaller.scala From chronicler with Apache License 2.0 | 5 votes |
package com.github.fsanaulla.chronicler.akka.shared.handlers import akka.http.scaladsl.coding.Gzip import akka.http.scaladsl.model.{HttpCharsets, HttpEntity} import akka.http.scaladsl.unmarshalling.Unmarshaller import akka.stream.Materializer import akka.util.ByteString import com.github.fsanaulla.chronicler.core.alias.ErrorOr import com.github.fsanaulla.chronicler.core.jawn.RichJParser import org.typelevel.jawn.ast.{JParser, JValue} import scala.concurrent.{ExecutionContext, Future} final class AkkaBodyUnmarshaller(compressed: Boolean) extends Unmarshaller[HttpEntity, ErrorOr[JValue]] { override def apply( value: HttpEntity )(implicit ec: ExecutionContext, mat: Materializer ): Future[ErrorOr[JValue]] = { // get encoding from response content type, otherwise use UTF-8 as default val encoding = value.contentType.charsetOption .getOrElse(HttpCharsets.`UTF-8`) .nioCharset() val srcBody = if (compressed) value.dataBytes.via(Gzip.decoderFlow) else value.dataBytes srcBody .runFold(ByteString.empty)(_ ++ _) .map(_.decodeString(encoding)) .map(JParser.parseFromStringEither) } }
Example 97
Source File: AkkaJsonHandler.scala From chronicler with Apache License 2.0 | 5 votes |
package com.github.fsanaulla.chronicler.akka.shared.handlers import akka.http.scaladsl.model.HttpResponse import akka.stream.ActorMaterializer import com.github.fsanaulla.chronicler.akka.shared.implicits.futureFunctor import com.github.fsanaulla.chronicler.core.alias.ErrorOr import com.github.fsanaulla.chronicler.core.components.JsonHandler import org.typelevel.jawn.ast.JValue import scala.concurrent.{ExecutionContext, Future} final class AkkaJsonHandler( unm: AkkaBodyUnmarshaller )(implicit ex: ExecutionContext, mat: ActorMaterializer) extends JsonHandler[Future, HttpResponse] { override def responseBody(response: HttpResponse): Future[ErrorOr[JValue]] = unm(response.entity) override def responseHeader(response: HttpResponse): Seq[(String, String)] = response.headers.map(hd => hd.name() -> hd.value()) override def responseCode(response: HttpResponse): Int = response.status.intValue() }
Example 98
Source File: AkkaResponseHandler.scala From chronicler with Apache License 2.0 | 5 votes |
package com.github.fsanaulla.chronicler.akka.shared.handlers import akka.http.scaladsl.model.HttpResponse import akka.stream.scaladsl.{Framing, Source} import akka.util.ByteString import com.github.fsanaulla.chronicler.akka.shared.implicits._ import com.github.fsanaulla.chronicler.core.alias.ErrorOr import com.github.fsanaulla.chronicler.core.components.{JsonHandler, ResponseHandler} import com.github.fsanaulla.chronicler.core.either import com.github.fsanaulla.chronicler.core.either.EitherOps import com.github.fsanaulla.chronicler.core.jawn.RichJParser import com.github.fsanaulla.chronicler.core.model.{InfluxReader, ParsingException} import org.typelevel.jawn.ast.{JArray, JParser} import scala.concurrent.{ExecutionContext, Future} import scala.reflect.ClassTag class AkkaResponseHandler( jsonHandler: JsonHandler[Future, HttpResponse] )(implicit ex: ExecutionContext) extends ResponseHandler[Future, HttpResponse](jsonHandler) { final def queryChunkedResultJson(response: HttpResponse): Source[ErrorOr[Array[JArray]], Any] = { response.entity.dataBytes .via(Framing.delimiter(ByteString("\n"), Int.MaxValue)) .map(_.utf8String) .map(JParser.parseFromStringEither) .map( _.flatMapRight( jv => jsonHandler .queryResult(jv) .toRight[Throwable](new ParsingException("Can't extract query result from response")) ) ) } final def queryChunkedResult[T: ClassTag]( response: HttpResponse )(implicit rd: InfluxReader[T] ): Source[ErrorOr[Array[T]], Any] = { queryChunkedResultJson(response) .map(_.flatMapRight { arr => either.array(arr.map(rd.read)) }) } }
Example 99
Source File: AkkaManagementClient.scala From chronicler with Apache License 2.0 | 5 votes |
package com.github.fsanaulla.chronicler.akka.management import _root_.akka.actor.ActorSystem import _root_.akka.http.scaladsl.HttpsConnectionContext import akka.http.scaladsl.model.{HttpResponse, RequestEntity, Uri} import akka.stream.ActorMaterializer import com.github.fsanaulla.chronicler.akka.shared.InfluxAkkaClient import com.github.fsanaulla.chronicler.akka.shared.handlers._ import com.github.fsanaulla.chronicler.core.ManagementClient import com.github.fsanaulla.chronicler.core.alias.ErrorOr import com.github.fsanaulla.chronicler.core.model._ import scala.concurrent.{ExecutionContext, Future} final class AkkaManagementClient( host: String, port: Int, credentials: Option[InfluxCredentials], httpsContext: Option[HttpsConnectionContext], terminateActorSystem: Boolean )(implicit val ex: ExecutionContext, val system: ActorSystem, val F: Functor[Future], val FK: FunctionK[Future, Future]) extends InfluxAkkaClient(terminateActorSystem, httpsContext) with ManagementClient[Future, Future, HttpResponse, Uri, RequestEntity] { implicit val mat: ActorMaterializer = ActorMaterializer() implicit val qb: AkkaQueryBuilder = new AkkaQueryBuilder(schema, host, port, credentials) implicit val jh: AkkaJsonHandler = new AkkaJsonHandler(new AkkaBodyUnmarshaller(false)) implicit val re: AkkaRequestExecutor = new AkkaRequestExecutor(ctx) implicit val rh: AkkaResponseHandler = new AkkaResponseHandler(jh) override def ping: Future[ErrorOr[InfluxDBInfo]] = { re.get(qb.buildQuery("/ping"), compressed = false) .flatMap(rh.pingResult) } }
Example 100
Source File: AkkaMeasurementApi.scala From chronicler with Apache License 2.0 | 5 votes |
package com.github.fsanaulla.chronicler.akka.io import akka.http.scaladsl.model.{HttpResponse, RequestEntity, Uri} import akka.stream.scaladsl.Source import com.github.fsanaulla.chronicler.akka.shared.handlers.{ AkkaQueryBuilder, AkkaRequestExecutor, AkkaResponseHandler } import com.github.fsanaulla.chronicler.core.alias.ErrorOr import com.github.fsanaulla.chronicler.core.api.MeasurementApi import com.github.fsanaulla.chronicler.core.components.BodyBuilder import com.github.fsanaulla.chronicler.core.enums.{Epoch, Epochs} import com.github.fsanaulla.chronicler.core.model.{Failable, Functor, InfluxReader} import scala.concurrent.Future import scala.reflect.ClassTag final class AkkaMeasurementApi[T: ClassTag]( dbName: String, measurementName: String, gzipped: Boolean )(implicit qb: AkkaQueryBuilder, bd: BodyBuilder[RequestEntity], re: AkkaRequestExecutor, rh: AkkaResponseHandler, F: Functor[Future], FA: Failable[Future]) extends MeasurementApi[Future, Future, HttpResponse, Uri, RequestEntity, T]( dbName, measurementName, gzipped ) { def readChunked( query: String, epoch: Epoch = Epochs.None, pretty: Boolean = false, chunkSize: Int )(implicit rd: InfluxReader[T] ): Future[Source[ErrorOr[Array[T]], Any]] = { val uri = chunkedQuery(dbName, query, epoch, pretty, chunkSize) F.map(re.get(uri, compressed = false))(rh.queryChunkedResult[T]) } }
Example 101
Source File: AkkaIOClient.scala From chronicler with Apache License 2.0 | 5 votes |
package com.github.fsanaulla.chronicler.akka.io import akka.actor.ActorSystem import akka.http.scaladsl.HttpsConnectionContext import akka.http.scaladsl.model.{HttpResponse, RequestEntity, Uri} import akka.stream.ActorMaterializer import com.github.fsanaulla.chronicler.akka.shared.InfluxAkkaClient import com.github.fsanaulla.chronicler.akka.shared.handlers._ import com.github.fsanaulla.chronicler.akka.shared.implicits._ import com.github.fsanaulla.chronicler.core.IOClient import com.github.fsanaulla.chronicler.core.alias.ErrorOr import com.github.fsanaulla.chronicler.core.model.{InfluxCredentials, InfluxDBInfo} import scala.concurrent.{ExecutionContext, Future} import scala.reflect.ClassTag final class AkkaIOClient( host: String, port: Int, credentials: Option[InfluxCredentials], compress: Boolean, httpsContext: Option[HttpsConnectionContext], terminateActorSystem: Boolean )(implicit ex: ExecutionContext, system: ActorSystem) extends InfluxAkkaClient(terminateActorSystem, httpsContext) with IOClient[Future, Future, HttpResponse, Uri, RequestEntity] { implicit val mat: ActorMaterializer = ActorMaterializer() implicit val bb: AkkaBodyBuilder = new AkkaBodyBuilder() implicit val qb: AkkaQueryBuilder = new AkkaQueryBuilder(schema, host, port, credentials) implicit val jh: AkkaJsonHandler = new AkkaJsonHandler(new AkkaBodyUnmarshaller(compress)) implicit val re: AkkaRequestExecutor = new AkkaRequestExecutor(ctx) implicit val rh: AkkaResponseHandler = new AkkaResponseHandler(jh) override def database(dbName: String): AkkaDatabaseApi = new AkkaDatabaseApi(dbName, compress) override def measurement[A: ClassTag]( dbName: String, measurementName: String ): AkkaMeasurementApi[A] = new AkkaMeasurementApi[A](dbName, measurementName, compress) override def ping: Future[ErrorOr[InfluxDBInfo]] = { re.get(qb.buildQuery("/ping", Nil), compressed = false) .flatMap(rh.pingResult) } }
Example 102
Source File: AkkaDatabaseApi.scala From chronicler with Apache License 2.0 | 5 votes |
package com.github.fsanaulla.chronicler.akka.io import akka.http.scaladsl.model.{HttpResponse, RequestEntity, Uri} import akka.stream.scaladsl.Source import com.github.fsanaulla.chronicler.akka.shared.handlers.{ AkkaQueryBuilder, AkkaRequestExecutor, AkkaResponseHandler } import com.github.fsanaulla.chronicler.core.alias.{ErrorOr, JPoint} import com.github.fsanaulla.chronicler.core.api.DatabaseApi import com.github.fsanaulla.chronicler.core.components.BodyBuilder import com.github.fsanaulla.chronicler.core.enums.{Epoch, Epochs} import com.github.fsanaulla.chronicler.core.model.{FunctionK, Functor} import scala.concurrent.Future final class AkkaDatabaseApi( dbName: String, compressed: Boolean )(implicit qb: AkkaQueryBuilder, bd: BodyBuilder[RequestEntity], re: AkkaRequestExecutor, rh: AkkaResponseHandler, F: Functor[Future], FK: FunctionK[Future, Future]) extends DatabaseApi[Future, Future, HttpResponse, Uri, RequestEntity](dbName, compressed) { def readChunkedJson( query: String, epoch: Epoch = Epochs.None, pretty: Boolean = false, chunkSize: Int ): Future[Source[ErrorOr[Array[JPoint]], Any]] = { val uri = chunkedQuery(dbName, query, epoch, pretty, chunkSize) F.map(re.get(uri, compressed))(rh.queryChunkedResultJson) } }
Example 103
Source File: Main.scala From chronicler with Apache License 2.0 | 5 votes |
package com.github.fsanaulla.chronicler.example.akka.io import akka.actor.ActorSystem import com.github.fsanaulla.chronicler.akka.io.InfluxIO import com.github.fsanaulla.chronicler.macros.annotations.{field, tag} import com.github.fsanaulla.chronicler.macros.auto._ import scala.concurrent.Future import scala.util.{Failure, Success} object Main { def main(args: Array[String]): Unit = { final case class Girl(@tag name: String, @field age: Int) implicit val system: ActorSystem = ActorSystem() import system.dispatcher val t = Girl("f", 1) val host = args.headOption.getOrElse("localhost") val influx = InfluxIO(host) val meas = influx.measurement[Girl]("db", "cpu") val result = for { // write record to Influx _ <- meas.write(t) // retrieve written record from Influx girls <- meas.read("SELECT * FROM cpu") // close client _ <- Future.successful(influx.close()) } yield girls result.onComplete { case Success(Right(girls)) => girls.foreach(b => println(b.name)) case Success(Left(err)) => println(s"Can't retrieve boys coz of: $err") case Failure(exception) => println(s"Execution error: $exception") } } }
Example 104
Source File: ApplicationTimer.scala From play-webpack-typescript-react with MIT License | 5 votes |
package services import java.time.{Clock, Instant} import javax.inject._ import play.api.Logger import play.api.inject.ApplicationLifecycle import scala.concurrent.Future @Singleton class ApplicationTimer @Inject()(clock: Clock, appLifecycle: ApplicationLifecycle) { // This code is called when the application starts. private val start: Instant = clock.instant Logger.info(s"ApplicationTimer demo: Starting application at $start.") // When the application starts, register a stop hook with the // ApplicationLifecycle object. The code inside the stop hook will // be run when the application stops. appLifecycle.addStopHook { () => val stop: Instant = clock.instant val runningTime: Long = stop.getEpochSecond - start.getEpochSecond Logger.info(s"ApplicationTimer demo: Stopping application at ${clock.instant} after ${runningTime}s.") Future.successful(()) } }
Example 105
Source File: AsyncController.scala From play-webpack-typescript-react with MIT License | 5 votes |
package controllers import akka.actor.ActorSystem import javax.inject._ import play.api.mvc._ import scala.concurrent.{ExecutionContext, Future, Promise} import scala.concurrent.duration._ def message = Action.async { getFutureMessage(1.second).map { msg => Ok(msg) } } private def getFutureMessage(delayTime: FiniteDuration): Future[String] = { val promise: Promise[String] = Promise[String]() actorSystem.scheduler.scheduleOnce(delayTime) { promise.success("Hi!"); () }(actorSystem.dispatcher) promise.future } }
Example 106
Source File: ExampleFilter.scala From play-webpack-typescript-react with MIT License | 5 votes |
package filters import akka.stream.Materializer import javax.inject._ import play.api.mvc._ import scala.concurrent.{ExecutionContext, Future} @Singleton class ExampleFilter @Inject()(implicit override val mat: Materializer, exec: ExecutionContext) extends Filter { override def apply(nextFilter: RequestHeader => Future[Result])(requestHeader: RequestHeader): Future[Result] = { // Run the next filter in the chain. This will call other filters // and eventually call the action. Take the result and modify it // by adding a new header. nextFilter(requestHeader).map { result => result.withHeaders("X-ExampleFilter" -> "foo") } } }
Example 107
Source File: CouchbasePersistenceModule.scala From akka-persistence-couchbase with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.javadsl.persistence.couchbase import java.net.URI import akka.actor.ActorSystem import akka.event.Logging import akka.stream.alpakka.couchbase.javadsl.CouchbaseSession import akka.stream.alpakka.couchbase.CouchbaseSessionSettings import com.google.inject.Provider import com.lightbend.lagom.internal.javadsl.persistence.couchbase.{ CouchbasePersistentEntityRegistry, CouchbaseReadSideImpl, JavadslCouchbaseOffsetStore } import com.lightbend.lagom.internal.persistence.couchbase.{ CouchbaseConfigValidator, CouchbaseOffsetStore, ServiceLocatorAdapter, ServiceLocatorHolder } import com.lightbend.lagom.javadsl.api.ServiceLocator import com.lightbend.lagom.javadsl.persistence.PersistentEntityRegistry import com.lightbend.lagom.spi.persistence.OffsetStore import com.typesafe.config.Config import javax.inject.Inject import play.api.inject.{Binding, Injector, Module} import play.api.{Configuration, Environment} import scala.compat.java8.FutureConverters._ import scala.concurrent.duration._ import scala.concurrent.{Await, Future} import scala.util.Try class CouchbasePersistenceModule extends Module { override def bindings(environment: Environment, configuration: Configuration): Seq[Binding[_]] = Seq( bind[CouchbasePersistenceModule.InitServiceLocatorHolder].toSelf.eagerly(), bind[PersistentEntityRegistry].to[CouchbasePersistentEntityRegistry], bind[CouchbaseSession].toProvider[CouchbaseProvider], bind[CouchbaseReadSide].to[CouchbaseReadSideImpl], //TODO: add other modules similar to Cassandra // bind[CassandraReadSideSettings].toSelf, bind[CouchbaseOffsetStore].to(bind[JavadslCouchbaseOffsetStore]), bind[OffsetStore].to(bind[CouchbaseOffsetStore]) ) } private[lagom] class CouchbaseProvider @Inject() (system: ActorSystem, cfg: Config) extends Provider[CouchbaseSession] { private val log = Logging(system, classOf[CouchbaseProvider]) CouchbaseConfigValidator.validateBucket("lagom.persistence.read-side.couchbase", cfg, log) private val readSideCouchbaseConfig: Config = cfg.getConfig("lagom.persistence.read-side.couchbase") private val sessionSettings = CouchbaseSessionSettings( readSideCouchbaseConfig.getConfig("connection") ) private val bucket = readSideCouchbaseConfig.getString("bucket") // FIXME is there a way to have async component creation in lagom instead of letting every component know that the thing is async? // if not we should pass Future[CouchbaseSession] around and let the use sites mix in AsyncCouchbaseSession - but if we use // that from Lagom it needs to be made public API // FIXME this should be the Java API of CouchbaseSession, when there is one lazy val couchbase: CouchbaseSession = Await.result(CouchbaseSession.create(sessionSettings, bucket, system.dispatcher).toScala, 30.seconds) override def get(): CouchbaseSession = couchbase } private[lagom] object CouchbasePersistenceModule { class InitServiceLocatorHolder @Inject() (system: ActorSystem, injector: Injector) { def init(): Unit = Try(injector.instanceOf[ServiceLocator]).foreach { locator => ServiceLocatorHolder(system).setServiceLocator(new ServiceLocatorAdapter { override def locateAll(name: String): Future[List[URI]] = { import system.dispatcher import scala.collection.JavaConverters._ import scala.compat.java8.FutureConverters._ locator.locateAll(name).toScala.map(_.asScala.toList) } }) } } }
Example 108
Source File: CouchbaseReadSideHandler.scala From akka-persistence-couchbase with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.internal.javadsl.persistence.couchbase import java.util.concurrent.CompletionStage import akka.Done import akka.japi.Pair import akka.stream.ActorAttributes import akka.stream.alpakka.couchbase.javadsl.CouchbaseSession import akka.stream.javadsl.Flow import com.lightbend.lagom.internal.javadsl.persistence.OffsetAdapter import com.lightbend.lagom.internal.persistence.couchbase.{CouchbaseOffsetDao, CouchbaseOffsetStore} import com.lightbend.lagom.javadsl.persistence.ReadSideProcessor.ReadSideHandler import com.lightbend.lagom.javadsl.persistence.{AggregateEvent, AggregateEventTag, Offset} import org.slf4j.LoggerFactory import scala.compat.java8.FutureConverters._ import scala.concurrent.{ExecutionContext, Future} private[couchbase] final class CouchbaseReadSideHandler[Event <: AggregateEvent[Event]]( couchbaseSession: CouchbaseSession, offsetStore: CouchbaseOffsetStore, handlers: Map[Class[_ <: Event], Handler[Event]], globalPrepareCallback: CouchbaseSession => CompletionStage[Done], prepareCallback: (CouchbaseSession, AggregateEventTag[Event]) => CompletionStage[Done], readProcessorId: String, dispatcher: String )(implicit ec: ExecutionContext) extends ReadSideHandler[Event] { private val log = LoggerFactory.getLogger(this.getClass) @volatile private var offsetDao: CouchbaseOffsetDao = _ protected def invoke(handler: Handler[Event], event: Event, offset: Offset): CompletionStage[Done] = handler .asInstanceOf[(CouchbaseSession, Event, Offset) => CompletionStage[Done]] .apply(couchbaseSession, event, offset) .toScala .flatMap { _ => val akkaOffset = OffsetAdapter.dslOffsetToOffset(offset) offsetDao.bindSaveOffset(akkaOffset).execute(couchbaseSession.asScala, ec) } .toJava override def globalPrepare(): CompletionStage[Done] = globalPrepareCallback.apply(couchbaseSession) override def prepare(tag: AggregateEventTag[Event]): CompletionStage[Offset] = (for { _ <- prepareCallback.apply(couchbaseSession, tag).toScala dao <- offsetStore.prepare(readProcessorId, tag.tag) } yield { offsetDao = dao OffsetAdapter.offsetToDslOffset(dao.loadedOffset) }).toJava override def handle(): Flow[Pair[Event, Offset], Done, _] = akka.stream.scaladsl .Flow[Pair[Event, Offset]] .mapAsync(parallelism = 1) { pair => val Pair(event, offset) = pair val eventClass = event.getClass val handler = handlers.getOrElse( // lookup handler eventClass, // fallback to empty handler if none { if (log.isDebugEnabled()) log.debug("Unhandled event [{}]", eventClass.getName) CouchbaseReadSideHandler.emptyHandler } ) invoke(handler, event, offset).toScala } .withAttributes(ActorAttributes.dispatcher(dispatcher)) .asJava }
Example 109
Source File: CouchbaseReadSideHandler.scala From akka-persistence-couchbase with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.internal.scaladsl.persistence.couchbase import akka.persistence.query.Offset import akka.stream.ActorAttributes import akka.stream.alpakka.couchbase.scaladsl.CouchbaseSession import akka.stream.scaladsl.Flow import akka.{Done, NotUsed} import com.lightbend.lagom.internal.persistence.couchbase.{CouchbaseOffsetDao, CouchbaseOffsetStore} import com.lightbend.lagom.scaladsl.persistence.ReadSideProcessor.ReadSideHandler import com.lightbend.lagom.scaladsl.persistence._ import org.slf4j.LoggerFactory import scala.concurrent.{ExecutionContext, Future} private[couchbase] final class CouchbaseReadSideHandler[Event <: AggregateEvent[Event]]( couchbase: CouchbaseSession, offsetStore: CouchbaseOffsetStore, handlers: Map[Class[_ <: Event], CouchbaseReadSideHandler.Handler[Event]], globalPrepareCallback: CouchbaseSession => Future[Done], prepareCallback: (CouchbaseSession, AggregateEventTag[Event]) => Future[Done], readProcessorId: String, dispatcher: String )(implicit ec: ExecutionContext) extends ReadSideHandler[Event] { import CouchbaseReadSideHandler.Handler private val log = LoggerFactory.getLogger(this.getClass) @volatile private var offsetDao: CouchbaseOffsetDao = _ protected def invoke(handler: Handler[Event], element: EventStreamElement[Event]): Future[Done] = handler .apply(couchbase, element) .flatMap(_ => offsetDao.bindSaveOffset(element.offset).execute(couchbase, ec)) override def globalPrepare(): Future[Done] = globalPrepareCallback(couchbase) override def prepare(tag: AggregateEventTag[Event]): Future[Offset] = for { _ <- prepareCallback.apply(couchbase, tag) dao <- offsetStore.prepare(readProcessorId, tag.tag) } yield { offsetDao = dao dao.loadedOffset } override def handle(): Flow[EventStreamElement[Event], Done, NotUsed] = Flow[EventStreamElement[Event]] .mapAsync(parallelism = 1) { elem => val eventClass = elem.event.getClass val handler = handlers.getOrElse( // lookup handler eventClass, // fallback to empty handler if none { if (log.isDebugEnabled()) log.debug("Unhandled event [{}]", eventClass.getName) CouchbaseReadSideHandler.emptyHandler.asInstanceOf[Handler[Event]] } ) invoke(handler, elem) } .withAttributes(ActorAttributes.dispatcher(dispatcher)) }
Example 110
Source File: CouchbaseReadSideImpl.scala From akka-persistence-couchbase with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.internal.scaladsl.persistence.couchbase import CouchbaseReadSideHandler.Handler import akka.Done import akka.actor.ActorSystem import akka.dispatch.MessageDispatcher import akka.stream.alpakka.couchbase.scaladsl.CouchbaseSession import com.lightbend.lagom.internal.persistence.couchbase.CouchbaseOffsetStore import com.lightbend.lagom.scaladsl.persistence.ReadSideProcessor.ReadSideHandler import com.lightbend.lagom.scaladsl.persistence.couchbase.CouchbaseReadSide import com.lightbend.lagom.scaladsl.persistence.couchbase.CouchbaseReadSide.ReadSideHandlerBuilder import com.lightbend.lagom.scaladsl.persistence.{AggregateEvent, AggregateEventTag, EventStreamElement} import scala.concurrent.Future import scala.reflect.ClassTag private[lagom] final class CouchbaseReadSideImpl( system: ActorSystem, couchbaseSession: CouchbaseSession, offsetStore: CouchbaseOffsetStore ) extends CouchbaseReadSide { private val dispatcher = system.settings.config.getString("lagom.persistence.read-side.use-dispatcher") implicit val ec: MessageDispatcher = system.dispatchers.lookup(dispatcher) override def builder[Event <: AggregateEvent[Event]](readSideId: String): ReadSideHandlerBuilder[Event] = new ReadSideHandlerBuilder[Event] { var globalPrepare: CouchbaseSession => Future[Done] = (_) => Future.successful(Done) var prepare: (CouchbaseSession, AggregateEventTag[Event]) => Future[Done] = (_, _) => Future.successful(Done) private var handlers = Map.empty[Class[_ <: Event], Handler[Event]] override def setGlobalPrepare(callback: CouchbaseSession => Future[Done]): ReadSideHandlerBuilder[Event] = { globalPrepare = callback this } override def setPrepare( callback: (CouchbaseSession, AggregateEventTag[Event]) => Future[Done] ): ReadSideHandlerBuilder[Event] = { prepare = callback this } override def setEventHandler[E <: Event: ClassTag]( handler: (CouchbaseSession, EventStreamElement[E]) => Future[Done] ): ReadSideHandlerBuilder[Event] = { val eventClass = implicitly[ClassTag[E]].runtimeClass.asInstanceOf[Class[Event]] handlers += (eventClass -> handler.asInstanceOf[Handler[Event]]) this } override def build(): ReadSideHandler[Event] = new CouchbaseReadSideHandler[Event]( couchbaseSession, offsetStore, handlers, globalPrepare, prepare, readSideId, dispatcher ) } }
Example 111
Source File: CouchbaseClusteredPersistentEntitySpec.scala From akka-persistence-couchbase with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.scaladsl.persistence.couchbase import java.io.File import akka.actor.{ActorSystem, CoordinatedShutdown} import akka.persistence.couchbase.CouchbaseClusterConnection import akka.stream.{ActorMaterializer, Materializer} import com.lightbend.lagom.internal.persistence.couchbase.TestConfig import com.lightbend.lagom.internal.persistence.testkit.AwaitPersistenceInit.awaitPersistenceInit import com.lightbend.lagom.scaladsl.api.ServiceLocator import com.lightbend.lagom.scaladsl.api.ServiceLocator.NoServiceLocator import com.lightbend.lagom.scaladsl.persistence.multinode.{ AbstractClusteredPersistentEntityConfig, AbstractClusteredPersistentEntitySpec } import com.lightbend.lagom.scaladsl.persistence.{ReadSideProcessor, TestEntity} import com.lightbend.lagom.scaladsl.playjson.JsonSerializerRegistry import com.typesafe.config.Config import play.api.{Configuration, Environment, Mode} import play.api.inject.DefaultApplicationLifecycle import scala.concurrent.{ExecutionContext, Future} object CouchbaseClusteredPersistentEntityConfig extends AbstractClusteredPersistentEntityConfig { override def additionalCommonConfig(databasePort: Int): Config = TestConfig.persistenceConfig } class CouchbaseClusteredPersistentEntitySpecMultiJvmNode1 extends CouchbaseClusteredPersistentEntitySpec class CouchbaseClusteredPersistentEntitySpecMultiJvmNode2 extends CouchbaseClusteredPersistentEntitySpec class CouchbaseClusteredPersistentEntitySpecMultiJvmNode3 extends CouchbaseClusteredPersistentEntitySpec class CouchbaseClusteredPersistentEntitySpec extends AbstractClusteredPersistentEntitySpec(CouchbaseClusteredPersistentEntityConfig) { import com.lightbend.lagom.scaladsl.persistence.couchbase.CouchbaseClusteredPersistentEntityConfig._ override protected def atStartup(): Unit = { runOn(node1) { CouchbaseClusterConnection.connect().cleanUp().close() awaitPersistenceInit(system) } enterBarrier("couchbase-started") super.atStartup() } lazy val defaultApplicationLifecycle = new DefaultApplicationLifecycle override lazy val components: CouchbasePersistenceComponents = new CouchbasePersistenceComponents { override def actorSystem: ActorSystem = system override def executionContext: ExecutionContext = system.dispatcher override def materializer: Materializer = ActorMaterializer()(system) override def configuration: Configuration = Configuration(system.settings.config) override def serviceLocator: ServiceLocator = NoServiceLocator override def environment: Environment = Environment(new File("."), getClass.getClassLoader, Mode.Test) override def jsonSerializerRegistry: JsonSerializerRegistry = ??? override def coordinatedShutdown: CoordinatedShutdown = CoordinatedShutdown(system) } def testEntityReadSide = new TestEntityReadSide(components.actorSystem, components.couchbase) override protected def readSideProcessor: () => ReadSideProcessor[TestEntity.Evt] = () => new TestEntityReadSide.TestEntityReadSideProcessor(system, components.couchbaseReadSide) override protected def getAppendCount(id: String): Future[Long] = testEntityReadSide.getAppendCount(id) }
Example 112
Source File: TestEntityReadSide.scala From akka-persistence-couchbase with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.scaladsl.persistence.couchbase import akka.Done import akka.actor.ActorSystem import akka.stream.alpakka.couchbase.scaladsl.CouchbaseSession import com.couchbase.client.java.document.JsonDocument import com.couchbase.client.java.document.json.JsonObject import com.lightbend.lagom.scaladsl.persistence.ReadSideProcessor.ReadSideHandler import com.lightbend.lagom.scaladsl.persistence.{AggregateEventTag, EventStreamElement, ReadSideProcessor, TestEntity} import scala.concurrent.{ExecutionContext, Future} object TestEntityReadSide { class TestEntityReadSideProcessor(system: ActorSystem, readSide: CouchbaseReadSide) extends ReadSideProcessor[TestEntity.Evt] { @volatile private var prepared = false def buildHandler(): ReadSideHandler[TestEntity.Evt] = { import system.dispatcher def updateCount(cs: CouchbaseSession, element: EventStreamElement[TestEntity.Appended]): Future[Done] = getCount(cs, element.entityId) .flatMap((count: Long) => { if (!prepared) { throw new IllegalStateException("Prepare handler hasn't been called") } cs.upsert( JsonDocument.create(s"count-${element.entityId}", JsonObject.create().put("count", count + 1)) ) }) .map(_ => Done) readSide .builder[TestEntity.Evt]("testoffsets") .setGlobalPrepare(session => Future.successful(Done)) .setPrepare( (session, tag) => Future { prepared = true Done } ) .setEventHandler[TestEntity.Appended](updateCount) .build() } def aggregateTags: Set[AggregateEventTag[TestEntity.Evt]] = TestEntity.Evt.aggregateEventShards.allTags } def getCount(session: CouchbaseSession, entityId: String)(implicit ec: ExecutionContext): Future[Long] = session.get(s"count-$entityId").map { case Some(l) => l.content().getLong("count") case None => 0L } } class TestEntityReadSide(system: ActorSystem, couchbase: CouchbaseSession) { import system.dispatcher def getAppendCount(entityId: String): Future[Long] = TestEntityReadSide.getCount(couchbase, entityId) }
Example 113
Source File: CouchbaseReadSideSpec.scala From akka-persistence-couchbase with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.scaladsl.persistence.couchbase import akka.persistence.couchbase.CouchbaseBucketSetup import com.lightbend.lagom.internal.persistence.ReadSideConfig import com.lightbend.lagom.internal.scaladsl.persistence.couchbase.{ CouchbasePersistentEntityRegistry, CouchbaseReadSideImpl, ScaladslCouchbaseOffsetStore } import com.lightbend.lagom.scaladsl.persistence.TestEntity.Evt import com.lightbend.lagom.scaladsl.persistence._ import com.typesafe.config.{Config, ConfigFactory} import scala.concurrent.Future import scala.concurrent.duration._ object CouchbaseReadSideSpec { val defaultConfig: Config = ConfigFactory.parseString("akka.loglevel = info") } class CouchbaseReadSideSpec extends CouchbasePersistenceSpec(CouchbaseReadSideSpec.defaultConfig, TestEntitySerializerRegistry) with AbstractReadSideSpec with CouchbaseBucketSetup { override protected lazy val persistentEntityRegistry = new CouchbasePersistentEntityRegistry(system) private lazy val offsetStore = new ScaladslCouchbaseOffsetStore(system, couchbaseSession, ReadSideConfig()) private lazy val couchbaseReadSide = new CouchbaseReadSideImpl(system, couchbaseSession, offsetStore) override def processorFactory(): ReadSideProcessor[Evt] = new TestEntityReadSide.TestEntityReadSideProcessor(system, couchbaseReadSide) private lazy val readSide = new TestEntityReadSide(system, couchbaseSession) override def getAppendCount(id: String): Future[Long] = readSide.getAppendCount(id) override def afterAll(): Unit = { persistentEntityRegistry.gracefulShutdown(5.seconds) super.afterAll() } }
Example 114
Source File: CouchbaseOffsetStore.scala From akka-persistence-couchbase with Apache License 2.0 | 5 votes |
package com.lightbend.lagom.internal.persistence.couchbase import java.util.UUID import akka.Done import akka.actor.ActorSystem import akka.annotation.InternalApi import akka.persistence.query.{NoOffset, Offset, Sequence, TimeBasedUUID} import akka.stream.alpakka.couchbase.scaladsl.CouchbaseSession import com.couchbase.client.java.document.JsonDocument import com.couchbase.client.java.document.json.JsonObject import com.lightbend.lagom.internal.persistence.ReadSideConfig import com.lightbend.lagom.scaladsl.persistence.couchbase.CouchbaseAction import com.lightbend.lagom.spi.persistence.{OffsetDao, OffsetStore} import scala.concurrent.{ExecutionContext, Future} @InternalApi private[lagom] final class CouchbaseOffsetDao( couchbase: CouchbaseSession, eventProcessorId: String, tag: String, override val loadedOffset: Offset, ec: ExecutionContext ) extends OffsetDao { override def saveOffset(offset: Offset): Future[Done] = bindSaveOffset(offset).execute(couchbase, ec) // FIXME write guarantees def bindSaveOffset(offset: Offset): CouchbaseAction = offset match { case NoOffset => new CouchbaseAction { override def execute(ab: CouchbaseSession, ec: ExecutionContext): Future[Done] = Future.successful(Done) } case uuid: TimeBasedUUID => new CouchbaseAction { override def execute(ab: CouchbaseSession, ec: ExecutionContext): Future[Done] = { val id = CouchbaseOffsetStore.offsetKey(eventProcessorId, tag) val json = JsonDocument.create( id, JsonObject.create().put(CouchbaseOffsetStore.UuidOffsetField, uuid.value.toString) ) ab.upsert(json).map(_ => Done)(ec) } } case seq: Sequence => new CouchbaseAction { override def execute(ab: CouchbaseSession, ec: ExecutionContext): Future[Done] = { val id = CouchbaseOffsetStore.offsetKey(eventProcessorId, tag) val json = JsonDocument.create(id, JsonObject.create().put(CouchbaseOffsetStore.SequenceOffsetField, seq.value)) ab.upsert(json).map(_ => Done)(ec) } } } }
Example 115
Source File: TagSequenceNumbering.scala From akka-persistence-couchbase with Apache License 2.0 | 5 votes |
package akka.persistence.couchbase.internal import java.util.concurrent.ConcurrentHashMap import akka.annotation.InternalApi import akka.event.LoggingAdapter import akka.persistence.couchbase.internal.CouchbaseSchema.{Fields, Queries} import com.couchbase.client.java.query.N1qlParams import scala.concurrent.{ExecutionContext, Future} def evictSeqNrsFor(pid: PersistenceId): Unit = { val keys = taggingPerPidSequenceNumbers.keySet.iterator() while (keys.hasNext) { val key @ (keyPid, _) = keys.next() if (keyPid == pid) keys.remove() } } protected def currentTagSeqNrFromDb(pid: PersistenceId, tag: Tag): Future[Option[Long]] = withCouchbaseSession { session => val query = highestTagSequenceNumberQuery(pid, tag, queryConsistency) log.debug("currentTagSeqNrFromDb: {}", query) session.singleResponseQuery(query).map { case Some(json) => Some(json.getLong(Fields.TagSeqNr)) case None => None } } }
Example 116
Source File: AsyncCouchbaseSession.scala From akka-persistence-couchbase with Apache License 2.0 | 5 votes |
package akka.persistence.couchbase.internal import akka.annotation.InternalApi import akka.stream.alpakka.couchbase.scaladsl.CouchbaseSession import akka.stream.scaladsl.Source import akka.{Done, NotUsed} import scala.concurrent.{ExecutionContext, Future} import scala.util.Success protected def asyncSession: Future[CouchbaseSession] // optimizations avoiding an extra execution context jump when the session is already created // (most of the time after startup completed) final def withCouchbaseSession[A](f: CouchbaseSession => Future[A])(implicit ec: ExecutionContext): Future[A] = asyncSession.value match { case Some(Success(c)) => f(c) case _ => asyncSession.flatMap(f) } final def sourceWithCouchbaseSession[Out]( f: CouchbaseSession => Source[Out, NotUsed] )(implicit ec: ExecutionContext): Source[Out, NotUsed] = asyncSession.value match { case Some(Success(c)) => f(c) case _ => Source.fromFutureSource(asyncSession.map(f)).mapMaterializedValue(_ => NotUsed) } final def closeCouchbaseSession()(implicit ec: ExecutionContext): Future[Done] = //leaving it closing behind for now asyncSession.value match { case Some(Success(session)) => session.close() case _ => asyncSession.flatMap(_.close()) } }
Example 117
Source File: FutureUtilsSpec.scala From akka-persistence-couchbase with Apache License 2.0 | 5 votes |
package akka.persistence.couchbase.internal import org.scalatest.concurrent.ScalaFutures import org.scalatest.{Matchers, WordSpec} import org.scalatest.Inspectors._ import scala.concurrent.Future import scala.concurrent.ExecutionContext.Implicits.global class FutureUtilsSpec extends WordSpec with Matchers with ScalaFutures { "The future utils" must { "allow for sequential traversal" in { @volatile var counter = -1 val result = FutureUtils .traverseSequential(0 to 1000)( n => Future { counter += 1 (n, counter) } ) .futureValue forAll(result) { case (n, c) => n should ===(c) } } } }
Example 118
Source File: TransactionCommands.scala From scredis with Apache License 2.0 | 5 votes |
package scredis.commands import scredis.TransactionBuilder import scredis.exceptions.RedisTransactionBuilderException import scredis.io.{Connection, NonBlockingConnection, TransactionEnabledConnection} import scredis.protocol.requests.TransactionRequests._ import scala.concurrent.Future import scala.util.Try def withTransaction[A](build: TransactionBuilder => A): A = { val builder = new TransactionBuilder() try { val result = build(builder) send(builder.result()) result } catch { case e: Throwable => throw RedisTransactionBuilderException(cause = e) } } }
Example 119
Source File: Request.scala From scredis with Apache License 2.0 | 5 votes |
package scredis.protocol import java.nio.ByteBuffer import scredis.exceptions._ import scredis.serialization.UTF8StringReader import scala.concurrent.{Future, Promise} private[scredis] def reset(): Unit = { promise = Promise[A]() _buffer = null _bytes = null } private[scredis] def encode(): Unit = if (_buffer == null && _bytes == null) { command match { case x: ZeroArgCommand => _bytes = x.encoded case _ => _buffer = command.encode(args.toList) } } private[scredis] def encoded: Either[Array[Byte], ByteBuffer] = if (_bytes != null) { Left(_bytes) } else { Right(_buffer) } private[scredis] def complete(response: Response): Unit = { response match { case SimpleStringResponse("QUEUED") => case ClusterErrorResponse(error,message) => failure(RedisClusterErrorResponseException(error,message)) case ErrorResponse(message) => failure(RedisErrorResponseException(message)) case response => try { success(decode(response)) } catch { case e @ RedisTransactionAbortedException => failure(e) case e: RedisReaderException => failure(e) case e: Throwable => failure( RedisProtocolException(s"Unexpected response for request '$this': $response", e) ) } } } private[scredis] def success(value: Any): Unit = { try { promise.success(value.asInstanceOf[A]) } catch { case e: IllegalStateException => } finally { Protocol.release() } } private[scredis] def failure(throwable: Throwable): Unit = { try { promise.failure(throwable) } catch { case e: IllegalStateException => } finally { Protocol.release() } } def decode: Decoder[A] def argsCount: Int = args.size def isReadOnly: Boolean = command.isReadOnly override def toString: String = (command +: args).map { case bytes: Array[Byte] => UTF8StringReader.read(bytes) case x => x.toString }.mkString(" ") }
Example 120
Source File: TransactionBuilder.scala From scredis with Apache License 2.0 | 5 votes |
package scredis import scredis.io.{ Connection, NonBlockingConnection } import scredis.commands._ import scredis.protocol.Request import scredis.exceptions.RedisTransactionBuilderException import scala.collection.mutable.ListBuffer import scala.concurrent.{ ExecutionContext, Future } final class TransactionBuilder private[scredis] ()( implicit val dispatcher: ExecutionContext ) extends Connection with NonBlockingConnection with ConnectionCommands with HashCommands with HyperLogLogCommands with KeyCommands with ListCommands with PubSubCommands with ScriptingCommands with ServerCommands with SetCommands with SortedSetCommands with StringCommands { private val requests = ListBuffer[Request[_]]() @volatile private var isClosed = false override protected[scredis] def send[A](request: Request[A]): Future[A] = { if (isClosed) { throw RedisTransactionBuilderException( s"Cannot re-use a closed transaction builder; cannot queue '$request'" ) } requests += request request.future } private[scredis] def result(): Transaction = { isClosed = true Transaction(requests.toList) } }
Example 121
Source File: Connection.scala From scredis with Apache License 2.0 | 5 votes |
package scredis.io import scredis.protocol._ import scredis.{Subscription, Transaction} import scala.concurrent.duration._ import scala.concurrent.{ExecutionContext, Future} import scala.util.Try trait Connection { implicit val dispatcher: ExecutionContext } trait NonBlockingConnection { protected[scredis] def send[A](request: Request[A]): Future[A] } trait TransactionEnabledConnection { protected[scredis] def send(transaction: Transaction): Future[Vector[Try[Any]]] } trait BlockingConnection { protected[scredis] def sendBlocking[A](request: Request[A])(implicit timeout: Duration): Try[A] } trait SubscriberConnection { protected[scredis] def sendAsSubscriber(request: Request[_]): Future[Int] }
Example 122
Source File: AkkaNonBlockingConnection.scala From scredis with Apache License 2.0 | 5 votes |
package scredis.io import akka.actor._ import scredis.Transaction import scredis.exceptions._ import scredis.protocol._ import scredis.util.UniqueNameGenerator import scala.concurrent.Future import scala.concurrent.duration._ import scala.util.Try abstract class AkkaNonBlockingConnection( system: ActorSystem, host: String, port: Int, passwordOpt: Option[String], database: Int, nameOpt: Option[String], decodersCount: Int, receiveTimeoutOpt: Option[FiniteDuration], connectTimeout: FiniteDuration, maxWriteBatchSize: Int, tcpSendBufferSizeHint: Int, tcpReceiveBufferSizeHint: Int, akkaListenerDispatcherPath: String, akkaIODispatcherPath: String, akkaDecoderDispatcherPath: String, failCommandOnConnecting: Boolean ) extends AbstractAkkaConnection( system = system, host = host, port = port, passwordOpt = passwordOpt, database = database, nameOpt = nameOpt, decodersCount = decodersCount, receiveTimeoutOpt = receiveTimeoutOpt, connectTimeout = connectTimeout, maxWriteBatchSize = maxWriteBatchSize, tcpSendBufferSizeHint = tcpSendBufferSizeHint, tcpReceiveBufferSizeHint = tcpReceiveBufferSizeHint, akkaListenerDispatcherPath = akkaListenerDispatcherPath, akkaIODispatcherPath = akkaIODispatcherPath, akkaDecoderDispatcherPath = akkaDecoderDispatcherPath ) with NonBlockingConnection with TransactionEnabledConnection { protected val listenerActor: ActorRef = system.actorOf( Props( classOf[ListenerActor], host, port, passwordOpt, database, nameOpt, decodersCount, receiveTimeoutOpt, connectTimeout, maxWriteBatchSize, tcpSendBufferSizeHint, tcpReceiveBufferSizeHint, akkaIODispatcherPath, akkaDecoderDispatcherPath, failCommandOnConnecting ).withDispatcher(akkaListenerDispatcherPath), UniqueNameGenerator.getUniqueName(s"${nameOpt.getOrElse(s"$host-$port")}-listener-actor") ) override protected[scredis] def send[A](request: Request[A]): Future[A] = { if (isShuttingDown) { Future.failed(RedisIOException("Connection has been shutdown")) } else { logger.debug(s"Sending request: $request") updateState(request) Protocol.send(request, listenerActor) } } override protected[scredis] def send(transaction: Transaction): Future[Vector[Try[Any]]] = { if (isShuttingDown) { Future.failed(RedisIOException("Connection has been shutdown")) } else { logger.debug(s"Sending transaction: $transaction") transaction.requests.foreach(updateState) Protocol.send(transaction, listenerActor) } } }
Example 123
Source File: DecoderActor.scala From scredis with Apache License 2.0 | 5 votes |
package scredis.io import akka.actor.{Actor, ActorLogging} import akka.util.ByteString import scredis.PubSubMessage.Message import scredis.exceptions.RedisProtocolException import scredis.protocol.{ErrorResponse, Protocol, Request} import scredis.{PubSubMessage, Subscription} import scala.concurrent.{ExecutionContext, Future} class DecoderActor(subscriptionOption: Option[Subscription]) extends Actor with ActorLogging { import DecoderActor._ def receive: Receive = { case Partition(data, requests, skip) => val buffer = data.asByteBuffer for (i <- 1 to skip) { try { Protocol.decode(buffer) } catch { case e: Throwable => log.error("Could not decode response", e) } } while (requests.hasNext) { val request = requests.next() try { val response = Protocol.decode(buffer) request.complete(response) } catch { case e: Throwable => log.error("Could not decode response", e) request.failure(RedisProtocolException("Could not decode response", e)) } } case SubscribePartition(data) => val buffer = data.asByteBuffer while (buffer.remaining > 0) { try { val result = Protocol.decodePubSubResponse(Protocol.decode(buffer)) result match { case Left(ErrorResponse(message)) => sender ! SubscriberListenerActor.Fail(message) case Right(msgEither) => msgEither match { case Right(m: PubSubMessage.Subscribe) => sender ! SubscriberListenerActor.Complete(m) case Right(m: PubSubMessage.PSubscribe) => sender ! SubscriberListenerActor.Complete(m) case Right(m: PubSubMessage.Unsubscribe) => sender ! SubscriberListenerActor.Complete(m) case Right(m: PubSubMessage.PUnsubscribe) => sender ! SubscriberListenerActor.Complete(m) case Right(m: PubSubMessage.Error) => sender ! SubscriberListenerActor.Complete(m) case Right(m: PubSubMessage.Message) => case Right(m: PubSubMessage.PMessage) => case Left(value) => sender ! SubscriberListenerActor.Confirm(value) } } result match { case Right(Right(message)) => subscriptionOption match { case Some(subscription) => Future {subscription.apply(message)}(ExecutionContext.global) case None => log.error("Received SubscribePartition without any subscription") } case _ => } } catch { case e: Throwable => val msg = data.decodeString("UTF-8").replace("\r\n", "\\r\\n") log.error(s"Could not decode PubSubMessage: $msg", e) } } case x => log.error(s"Received unexpected message: $x") } } object DecoderActor { case class Partition(data: ByteString, requests: Iterator[Request[_]], skip: Int) case class SubscribePartition(data: ByteString) }
Example 124
Source File: ClientBenchmark.scala From scredis with Apache License 2.0 | 5 votes |
package scredis import org.scalameter.api._ import org.scalameter.picklers.Implicits._ import akka.actor.ActorSystem import org.scalameter.execution.SeparateJvmsExecutor import scala.concurrent.{Await, Future} import scala.concurrent.duration._ object ClientBenchmark extends Bench[Double] { private var system: ActorSystem = _ private var client: Client = _ performance of "Client" in { measure method "PING" in { using(sizes) config { exec.maxWarmupRuns -> 3 exec.benchRuns -> 3 exec.independentSamples -> 3 } setUp { _ => system = ActorSystem() client = Client()(system) } tearDown { _ => Await.result(client.quit(), 2.seconds) Await.result(system.terminate(), 10.seconds) client = null system = null } in { i => implicit val ec = system.dispatcher val future = Future.traverse(1 to i) { _ => client.ping() } Await.result(future, 30.seconds) } } measure method "GET" in { using(sizes) config { exec.maxWarmupRuns -> 3 exec.benchRuns -> 3 exec.independentSamples -> 3 } setUp { _ => system = ActorSystem() client = Client()(system) Await.result(client.set("foo", "bar"), 2.seconds) } tearDown { _ => Await.result(client.del("foo"), 2.seconds) Await.result(client.quit(), 2.seconds) Await.result(system.terminate(), 10.seconds) client = null system = null } in { i => implicit val ec = system.dispatcher val future = Future.traverse(1 to i) { _ => client.get("foo") } Await.result(future, 30.seconds) } } measure method "SET" in { using(sizes) config { exec.maxWarmupRuns -> 3 exec.benchRuns -> 3 exec.independentSamples -> 3 } setUp { _ => system = ActorSystem() client = Client()(system) } tearDown { _ => Await.result(client.del("foo"), 2.seconds) Await.result(client.quit(), 2.seconds) Await.result(system.terminate(), 10.seconds) client = null system = null } in { i => implicit val ec = system.dispatcher val future = Future.traverse(1 to i) { _ => client.set("foo", "bar") } Await.result(future, 30.seconds) } } } }
Example 125
Source File: basic_auth_api_yaml.scala From play-swagger with MIT License | 5 votes |
package basic.auth.api.yaml import scala.concurrent.Future import play.api.mvc._ import de.zalando.play.controllers.{FutureAuthenticatedBuilder,PlayBodyParsing} trait BasicAuthApiYamlSecurity extends SecurityExtractors { import SecurityExtractorsExecutionContext.ec object getSecureAction extends FutureAuthenticatedBuilder( req => { val secureChecks: Seq[RequestHeader => Future[Option[_]]] = Seq(basicAuth_Extractor()) val individualChecks: Future[Seq[Option[_]]] = Future.sequence(secureChecks.map(_.apply(req))) individualChecks.map { checks => checks.find(_.isEmpty).getOrElse(Option(checks.map(_.get))) } }, unauthorizedContent) }
Example 126
Source File: instagram_api_yaml.extractor.scala From play-swagger with MIT License | 5 votes |
package instagram.api.yaml import scala.concurrent.Future import play.api.mvc._ import de.zalando.play.controllers.SwaggerSecurityExtractors._ import scala.math.BigInt import scala.math.BigDecimal object SecurityExtractorsExecutionContext { // this ExecutionContext might be overridden if default configuration is not suitable for some reason implicit val ec = de.zalando.play.controllers.Contexts.tokenChecking } trait SecurityExtractors { def oauth_Extractor[User >: Any](scopes: String*): RequestHeader => Future[Option[User]] = header => oAuth(scopes)("https://instagram.com/oauth/authorize/?client_id=CLIENT-ID&redirect_uri=REDIRECT-URI&response_type=token")(header) { (token: play.api.libs.json.JsValue) => ??? } def key_Extractor[User >: Any](): RequestHeader => Future[Option[User]] = header => queryApiKey("access_token")(header) { (apiKey: String) => ??? } implicit val unauthorizedContentWriter = ??? def unauthorizedContent(req: RequestHeader) = Results.Unauthorized(???) }
Example 127
Source File: full_petstore_api_yaml.extractor.scala From play-swagger with MIT License | 5 votes |
package full.petstore.api.yaml import scala.concurrent.Future import play.api.mvc._ import de.zalando.play.controllers.SwaggerSecurityExtractors._ import de.zalando.play.controllers.ArrayWrapper import org.joda.time.DateTime object SecurityExtractorsExecutionContext { // this ExecutionContext might be overridden if default configuration is not suitable for some reason implicit val ec = de.zalando.play.controllers.Contexts.tokenChecking } trait SecurityExtractors { def api_key_Extractor[User >: Any](): RequestHeader => Future[Option[User]] = header => headerApiKey("api_key")(header) { (apiKey: String) => ??? } def petstore_auth_Extractor[User >: Any](scopes: String*): RequestHeader => Future[Option[User]] = header => oAuth(scopes)("http://petstore.swagger.wordnik.com/oauth/dialog")(header) { (token: play.api.libs.json.JsValue) => ??? } implicit val unauthorizedContentWriter = ??? def unauthorizedContent(req: RequestHeader) = Results.Unauthorized(???) }
Example 128
Source File: security_api_yaml.extractor.scala From play-swagger with MIT License | 5 votes |
package security.api.yaml import scala.concurrent.Future import play.api.mvc._ import de.zalando.play.controllers.SwaggerSecurityExtractors._ import de.zalando.play.controllers.ArrayWrapper object SecurityExtractorsExecutionContext { // this ExecutionContext might be overridden if default configuration is not suitable for some reason implicit val ec = de.zalando.play.controllers.Contexts.tokenChecking } trait SecurityExtractors { def petstoreImplicit_Extractor[User >: Any](scopes: String*): RequestHeader => Future[Option[User]] = header => oAuth(scopes)("http://petstore.swagger.wordnik.com/oauth/dialog")(header) { (token: play.api.libs.json.JsValue) => ??? } def githubAccessCode_Extractor[User >: Any](scopes: String*): RequestHeader => Future[Option[User]] = header => oAuth(scopes)("https://github.com/login/oauth/access_token")(header) { (token: play.api.libs.json.JsValue) => ??? } def petstorePassword_Extractor[User >: Any](scopes: String*): RequestHeader => Future[Option[User]] = header => oAuth(scopes)("http://petstore.swagger.wordnik.com/oauth/dialog")(header) { (token: play.api.libs.json.JsValue) => ??? } def justBasicStuff_Extractor[User >: Any](): RequestHeader => Future[Option[User]] = header => basicAuth(header) { (username: String, password: String) => ??? } def petstoreApplication_Extractor[User >: Any](scopes: String*): RequestHeader => Future[Option[User]] = header => oAuth(scopes)("http://petstore.swagger.wordnik.com/oauth/token")(header) { (token: play.api.libs.json.JsValue) => ??? } def internalApiKey_Extractor[User >: Any](): RequestHeader => Future[Option[User]] = header => headerApiKey("api_key")(header) { (apiKey: String) => ??? } implicit val unauthorizedContentWriter = ??? def unauthorizedContent(req: RequestHeader) = Results.Unauthorized(???) }
Example 129
Source File: security_api_yaml.scala From play-swagger with MIT License | 5 votes |
package security.api.yaml import scala.concurrent.Future import play.api.mvc._ import de.zalando.play.controllers.{FutureAuthenticatedBuilder,PlayBodyParsing} import de.zalando.play.controllers.ArrayWrapper trait SecurityApiYamlSecurity extends SecurityExtractors { import SecurityExtractorsExecutionContext.ec class getPetsByIdSecureAction(scopes: String*) extends FutureAuthenticatedBuilder( req => { val secureChecks: Seq[RequestHeader => Future[Option[_]]] = Seq(githubAccessCode_Extractor("user"), internalApiKey_Extractor()) val individualChecks: Future[Seq[Option[_]]] = Future.sequence(secureChecks.map(_.apply(req))) individualChecks.map { checks => checks.find(_.isEmpty).getOrElse(Option(checks.map(_.get))) } }, unauthorizedContent) }
Example 130
Source File: split_petstore_api_yaml.scala From play-swagger with MIT License | 5 votes |
package split.petstore.api.yaml import scala.concurrent.Future import play.api.mvc._ import de.zalando.play.controllers.{FutureAuthenticatedBuilder,PlayBodyParsing} import de.zalando.play.controllers.ArrayWrapper import org.joda.time.DateTime trait SplitPetstoreApiYamlSecurity extends SecurityExtractors { import SecurityExtractorsExecutionContext.ec class findPetsByTagsSecureAction(scopes: String*) extends FutureAuthenticatedBuilder( req => { val secureChecks: Seq[RequestHeader => Future[Option[_]]] = Seq(petstore_auth_Extractor("write_pets", "read_pets")) val individualChecks: Future[Seq[Option[_]]] = Future.sequence(secureChecks.map(_.apply(req))) individualChecks.map { checks => checks.find(_.isEmpty).getOrElse(Option(checks.map(_.get))) } }, unauthorizedContent) class updatePetSecureAction(scopes: String*) extends FutureAuthenticatedBuilder( req => { val secureChecks: Seq[RequestHeader => Future[Option[_]]] = Seq(petstore_auth_Extractor("write_pets", "read_pets")) val individualChecks: Future[Seq[Option[_]]] = Future.sequence(secureChecks.map(_.apply(req))) individualChecks.map { checks => checks.find(_.isEmpty).getOrElse(Option(checks.map(_.get))) } }, unauthorizedContent) class addPetSecureAction(scopes: String*) extends FutureAuthenticatedBuilder( req => { val secureChecks: Seq[RequestHeader => Future[Option[_]]] = Seq(petstore_auth_Extractor("write_pets", "read_pets")) val individualChecks: Future[Seq[Option[_]]] = Future.sequence(secureChecks.map(_.apply(req))) individualChecks.map { checks => checks.find(_.isEmpty).getOrElse(Option(checks.map(_.get))) } }, unauthorizedContent) class getPetByIdSecureAction(scopes: String*) extends FutureAuthenticatedBuilder( req => { val secureChecks: Seq[RequestHeader => Future[Option[_]]] = Seq(api_key_Extractor(), petstore_auth_Extractor("write_pets", "read_pets")) val individualChecks: Future[Seq[Option[_]]] = Future.sequence(secureChecks.map(_.apply(req))) individualChecks.map { checks => checks.find(_.isEmpty).getOrElse(Option(checks.map(_.get))) } }, unauthorizedContent) class updatePetWithFormSecureAction(scopes: String*) extends FutureAuthenticatedBuilder( req => { val secureChecks: Seq[RequestHeader => Future[Option[_]]] = Seq(petstore_auth_Extractor("write_pets", "read_pets")) val individualChecks: Future[Seq[Option[_]]] = Future.sequence(secureChecks.map(_.apply(req))) individualChecks.map { checks => checks.find(_.isEmpty).getOrElse(Option(checks.map(_.get))) } }, unauthorizedContent) class deletePetSecureAction(scopes: String*) extends FutureAuthenticatedBuilder( req => { val secureChecks: Seq[RequestHeader => Future[Option[_]]] = Seq(petstore_auth_Extractor("write_pets", "read_pets")) val individualChecks: Future[Seq[Option[_]]] = Future.sequence(secureChecks.map(_.apply(req))) individualChecks.map { checks => checks.find(_.isEmpty).getOrElse(Option(checks.map(_.get))) } }, unauthorizedContent) class findPetsByStatusSecureAction(scopes: String*) extends FutureAuthenticatedBuilder( req => { val secureChecks: Seq[RequestHeader => Future[Option[_]]] = Seq(petstore_auth_Extractor("write_pets", "read_pets")) val individualChecks: Future[Seq[Option[_]]] = Future.sequence(secureChecks.map(_.apply(req))) individualChecks.map { checks => checks.find(_.isEmpty).getOrElse(Option(checks.map(_.get))) } }, unauthorizedContent) }
Example 131
Source File: full_petstore_api_yaml.scala From play-swagger with MIT License | 5 votes |
package full.petstore.api.yaml import scala.concurrent.Future import play.api.mvc._ import de.zalando.play.controllers.{FutureAuthenticatedBuilder,PlayBodyParsing} import de.zalando.play.controllers.ArrayWrapper import org.joda.time.DateTime trait FullPetstoreApiYamlSecurity extends SecurityExtractors { import SecurityExtractorsExecutionContext.ec class findPetsByTagsSecureAction(scopes: String*) extends FutureAuthenticatedBuilder( req => { val secureChecks: Seq[RequestHeader => Future[Option[_]]] = Seq(petstore_auth_Extractor("write_pets", "read_pets")) val individualChecks: Future[Seq[Option[_]]] = Future.sequence(secureChecks.map(_.apply(req))) individualChecks.map { checks => checks.find(_.isEmpty).getOrElse(Option(checks.map(_.get))) } }, unauthorizedContent) class updatePetSecureAction(scopes: String*) extends FutureAuthenticatedBuilder( req => { val secureChecks: Seq[RequestHeader => Future[Option[_]]] = Seq(petstore_auth_Extractor("write_pets", "read_pets")) val individualChecks: Future[Seq[Option[_]]] = Future.sequence(secureChecks.map(_.apply(req))) individualChecks.map { checks => checks.find(_.isEmpty).getOrElse(Option(checks.map(_.get))) } }, unauthorizedContent) class addPetSecureAction(scopes: String*) extends FutureAuthenticatedBuilder( req => { val secureChecks: Seq[RequestHeader => Future[Option[_]]] = Seq(petstore_auth_Extractor("write_pets", "read_pets")) val individualChecks: Future[Seq[Option[_]]] = Future.sequence(secureChecks.map(_.apply(req))) individualChecks.map { checks => checks.find(_.isEmpty).getOrElse(Option(checks.map(_.get))) } }, unauthorizedContent) class getPetByIdSecureAction(scopes: String*) extends FutureAuthenticatedBuilder( req => { val secureChecks: Seq[RequestHeader => Future[Option[_]]] = Seq(api_key_Extractor(), petstore_auth_Extractor("write_pets", "read_pets")) val individualChecks: Future[Seq[Option[_]]] = Future.sequence(secureChecks.map(_.apply(req))) individualChecks.map { checks => checks.find(_.isEmpty).getOrElse(Option(checks.map(_.get))) } }, unauthorizedContent) class updatePetWithFormSecureAction(scopes: String*) extends FutureAuthenticatedBuilder( req => { val secureChecks: Seq[RequestHeader => Future[Option[_]]] = Seq(petstore_auth_Extractor("write_pets", "read_pets")) val individualChecks: Future[Seq[Option[_]]] = Future.sequence(secureChecks.map(_.apply(req))) individualChecks.map { checks => checks.find(_.isEmpty).getOrElse(Option(checks.map(_.get))) } }, unauthorizedContent) class deletePetSecureAction(scopes: String*) extends FutureAuthenticatedBuilder( req => { val secureChecks: Seq[RequestHeader => Future[Option[_]]] = Seq(petstore_auth_Extractor("write_pets", "read_pets")) val individualChecks: Future[Seq[Option[_]]] = Future.sequence(secureChecks.map(_.apply(req))) individualChecks.map { checks => checks.find(_.isEmpty).getOrElse(Option(checks.map(_.get))) } }, unauthorizedContent) class findPetsByStatusSecureAction(scopes: String*) extends FutureAuthenticatedBuilder( req => { val secureChecks: Seq[RequestHeader => Future[Option[_]]] = Seq(petstore_auth_Extractor("write_pets", "read_pets")) val individualChecks: Future[Seq[Option[_]]] = Future.sequence(secureChecks.map(_.apply(req))) individualChecks.map { checks => checks.find(_.isEmpty).getOrElse(Option(checks.map(_.get))) } }, unauthorizedContent) }
Example 132
Source File: basic_auth_api_yaml.extractor.scala From play-swagger with MIT License | 5 votes |
package basic.auth.api.yaml import scala.concurrent.Future import play.api.mvc._ import de.zalando.play.controllers.SwaggerSecurityExtractors._ object SecurityExtractorsExecutionContext { // this ExecutionContext might be overridden if default configuration is not suitable for some reason implicit val ec = de.zalando.play.controllers.Contexts.tokenChecking } trait SecurityExtractors { def basicAuth_Extractor[User >: Any](): RequestHeader => Future[Option[User]] = header => basicAuth(header) { (username: String, password: String) => ??? } implicit val unauthorizedContentWriter = ??? def unauthorizedContent(req: RequestHeader) = Results.Unauthorized(???) }
Example 133
Source File: split_petstore_api_yaml.extractor.scala From play-swagger with MIT License | 5 votes |
package split.petstore.api.yaml import scala.concurrent.Future import play.api.mvc._ import de.zalando.play.controllers.SwaggerSecurityExtractors._ import de.zalando.play.controllers.ArrayWrapper import org.joda.time.DateTime object SecurityExtractorsExecutionContext { // this ExecutionContext might be overridden if default configuration is not suitable for some reason implicit val ec = de.zalando.play.controllers.Contexts.tokenChecking } trait SecurityExtractors { def api_key_Extractor[User >: Any](): RequestHeader => Future[Option[User]] = header => headerApiKey("api_key")(header) { (apiKey: String) => ??? } def petstore_auth_Extractor[User >: Any](scopes: String*): RequestHeader => Future[Option[User]] = header => oAuth(scopes)("http://petstore.swagger.wordnik.com/oauth/dialog")(header) { (token: play.api.libs.json.JsValue) => ??? } implicit val unauthorizedContentWriter = ??? def unauthorizedContent(req: RequestHeader) = Results.Unauthorized(???) }
Example 134
Source File: KVStore.scala From Freasy-Monad with MIT License | 5 votes |
package examples.scalaz import scalaz._ import scalaz.Id.Id import freasymonad.scalaz._ import scala.collection.mutable import scala.concurrent.duration.Duration import scala.concurrent.{Await, Future} @free trait KVStore { type KVStoreF[A] = Free[GrammarADT, A] sealed trait GrammarADT[A] def put[T](key: String, value: T): KVStoreF[Unit] def get[T](key: String): KVStoreF[Option[T]] def delete(key: String): KVStoreF[Unit] def update[T](key: String, f: T => T): KVStoreF[Unit] = for { vMaybe <- get[T](key) _ <- vMaybe.map(v => put[T](key, f(v))).getOrElse(Free.pure(())) } yield () } object Main extends App { import KVStore.ops._ def program: KVStoreF[Option[Int]] = for { _ <- put("wild-cats", 2) _ <- update[Int]("wild-cats", _ + 12) _ <- put("tame-cats", 5) n <- get[Int]("wild-cats") _ <- delete("tame-cats") } yield n val idInterpreter = new KVStore.Interp[Id] { val kvs = mutable.Map.empty[String, Any] def get[T](key: String): Id[Option[T]] = { println(s"get($key)") kvs.get(key).map(_.asInstanceOf[T]) } def put[T](key: String, value: T): Id[Unit] = { println(s"put($key, $value)") kvs(key) = value } def delete(key: String): Id[Unit] = { println(s"delete($key)") kvs.remove(key) } } val resId: Id[Option[Int]] = idInterpreter.run(program) import scalaz.std.scalaFuture.futureInstance import scala.concurrent.ExecutionContext.Implicits.global val futureInterpreter = new KVStore.Interp[Future] { val kvs = mutable.Map.empty[String, Any] def get[T](key: String): Future[Option[T]] = Future { println(s"get($key)") kvs.get(key).map(_.asInstanceOf[T]) } def put[T](key: String, value: T): Future[Unit] = Future { println(s"put($key, $value)") kvs(key) = value } def delete(key: String): Future[Unit] = Future { println(s"delete($key)") kvs.remove(key) } } val resFuture: Future[Option[Int]] = futureInterpreter.run(program) Await.ready(resFuture, Duration.Inf) }
Example 135
Source File: UserManagementDAO.scala From silhouette-vuejs-app with Apache License 2.0 | 5 votes |
package models.daos import com.mohiva.play.silhouette.impl.providers.CredentialsProvider import com.mohiva.play.silhouette.impl.providers.oauth1.TwitterProvider import com.mohiva.play.silhouette.impl.providers.oauth2.{FacebookProvider, GoogleProvider} import javax.inject.Inject import models.UserManagementModel import play.api.db.slick.DatabaseConfigProvider import utils.{GridRequest, GridResponse, SlickGridQuerySupport} import scala.concurrent.{ExecutionContext, Future} class UserManagementDAO @Inject() (protected val dbConfigProvider: DatabaseConfigProvider)(implicit ec: ExecutionContext) extends DAOSlick with SlickGridQuerySupport { def fetchUsersGridData(gridRequest: GridRequest): Future[GridResponse[UserManagementModel]] = { import MyPostgresProfile.api._ import utils.DynamicGridQuerySupport.GridQuerySupportImplicits val baseQuery = slickUsers.filterOpt(gridRequest.filter)((user, key) => (user.firstName like s"%$key%") || (user.lastName like s"%$key%") || (user.email like s"%$key%") ) .join(slickUserLoginInfos).on(_.id === _.userID) .joinLeft(slickLoginInfos).on((q, li) => q._2.loginInfoId === li.id) .groupBy { case ((u, _), _) => u } .map { case (u, group) => (u, group.map(_._2.map(_.providerID)).arrayAgg[String]) } val gridQuery = baseQuery.toGridQuery.withSortableColumns { case "email" => { case (u, _) => u.email } case "lastName" => { case (u, _) => u.lastName } case "firstName" => { case (u, _) => u.firstName } case "role" => { case (u, _) => u.roleId } case "signedUpAt" => { case (u, _) => u.signedUpAt } } runGridQuery(gridQuery, gridRequest).map { gridResponse => val userManagementModelData = gridResponse.data.map { case (u, providerIds) => UserManagementModel( u.userID, u.firstName, u.lastName, u.email, u.roleId, u.activated, u.signedUpAt, providerIds.contains(CredentialsProvider.ID), providerIds.contains(GoogleProvider.ID), providerIds.contains(FacebookProvider.ID), providerIds.contains(TwitterProvider.ID)) } gridResponse.copy(data = userManagementModelData) } } }
Example 136
Source File: MailService.scala From silhouette-vuejs-app with Apache License 2.0 | 5 votes |
package models.services import com.sendgrid.{Method, Request, SendGrid} import com.sendgrid.helpers.mail.Mail import com.sendgrid.helpers.mail.objects.{Content, Email} import javax.inject.{Inject, Named} import play.api.Logging import scala.concurrent.{ExecutionContext, Future} import scala.util.Failure class MailService @Inject()(@Named("SendGridApiKey") apiKey: String)(implicit ec: ExecutionContext) extends Logging { private val sendgrid = new SendGrid(apiKey) private val from = "[email protected]" def sendResetPasswordEmail(email: String, url: String): Unit = { sendHtmlEmail( from, email, "Site password reset", "<html><body><p>Please <a href='" + url + "' rel='nofollow'>click here</a> to reset your password.</p><p>If you didn't request password reset, please ignore this mail.</p></body></html>", "Site password reset" ) } def sendActivateAccountEmail(email: String, url: String): Unit = { sendHtmlEmail( from, email, "Account confirmation", "<html><body><p>Please <a href='" + url + "' rel='nofollow'>click here</a> to confirm your account.</p><p>If you didn't create an account using this e-mail address, please ignore this message.</p></body></html>", "Account confirmation" ) } private def sendHtmlEmail(from: String, to: String, subject: String, htmlContent: String, loggerNote: String) = { val fromEmail = new Email(from) val toEmail = new Email(to) val content = new Content("text/html", htmlContent) val mail = new Mail(fromEmail, subject, toEmail, content) Future { val request = new Request() request.setMethod(Method.POST) request.setEndpoint("mail/send") request.setBody(mail.build) val response = sendgrid.api(request) logger.info(s"Sending $loggerNote email to $to. Status: ${response.getStatusCode}") }.onComplete { case Failure(e) => logger.error(s"Error on sending $loggerNote email to $to", e) case _ => logger.info(s"$loggerNote email has been sent to $to") } } }
Example 137
Source File: ReCaptchaService.scala From silhouette-vuejs-app with Apache License 2.0 | 5 votes |
package models.services.captcha import javax.inject.Inject import play.api.libs.json.{JsPath, Reads} import play.api.libs.functional.syntax._ import play.api.libs.ws.WSClient import scala.concurrent.{ExecutionContext, Future} trait CaptchaService { def validate(response: String, remoteIp: String): Future[Boolean] } class ReCaptchaService @Inject()(config: ReCaptchaConfig, ws: WSClient)(implicit ec: ExecutionContext) extends CaptchaService { def validate(recaptchaResponse: String, remoteIp: String) = { ws .url("https://www.google.com/recaptcha/api/siteverify") .withHttpHeaders("Accept" -> "application/json") .withQueryStringParameters( "secret" -> config.secretKey, "response" -> recaptchaResponse, "remoteip" -> remoteIp ) .get() .map(r => r.json.as[ReCaptchaValidationResponse]) .map { r => val e = r.errors.getOrElse(Vector()) if (e.isEmpty) { r.success } else { throw new Exception("Failed to retrieve reCaptcha confirmed response: " + e.mkString(";")) } } } } case class ReCaptchaConfig(secretKey: String) private[captcha] case class ReCaptchaValidationResponse(success: Boolean, errors: Option[Vector[String]]) private[captcha] object ReCaptchaValidationResponse { implicit val reads: Reads[ReCaptchaValidationResponse] = ( (JsPath \ "success").read[Boolean] and (JsPath \ "error-codes").readNullable[Vector[String]] ) (ReCaptchaValidationResponse.apply _) }
Example 138
Source File: AbstractAuthController.scala From silhouette-vuejs-app with Apache License 2.0 | 5 votes |
package controllers import com.mohiva.play.silhouette.api.Authenticator.Implicits._ import com.mohiva.play.silhouette.api._ import com.mohiva.play.silhouette.api.services.AuthenticatorResult import com.mohiva.play.silhouette.api.util.Clock import models.User import net.ceedubs.ficus.Ficus._ import play.api.Configuration import play.api.i18n.I18nSupport import play.api.libs.json.Json import play.api.mvc._ import utils.auth.DefaultEnv import scala.concurrent.duration._ import scala.concurrent.{ExecutionContext, Future} protected def authenticateUser(user: User, loginInfo: LoginInfo, rememberMe: Boolean)(implicit request: Request[_]): Future[AuthenticatorResult] = { val c = configuration.underlying silhouette.env.authenticatorService.create(loginInfo).map { case authenticator if rememberMe => authenticator.copy( expirationDateTime = clock.now + c.as[FiniteDuration]("silhouette.authenticator.rememberMe.authenticatorExpiry"), idleTimeout = c.getAs[FiniteDuration]("silhouette.authenticator.rememberMe.authenticatorIdleTimeout") ) case authenticator => authenticator }.flatMap { authenticator => silhouette.env.eventBus.publish(LoginEvent(user, request)) silhouette.env.authenticatorService.init(authenticator).flatMap { token => silhouette.env.authenticatorService.embed(token, Ok(Json.obj( "id" -> user.userID, "token" -> token, "firstName" -> user.firstName, "lastName" -> user.lastName, "role" -> user.role, "email" -> user.email ))) } } } }
Example 139
Source File: UserController.scala From silhouette-vuejs-app with Apache License 2.0 | 5 votes |
package controllers import java.util.UUID import com.mohiva.play.silhouette.api.Silhouette import javax.inject.Inject import models.services.AuthenticateService import play.api.libs.json.Json import play.api.mvc.{AbstractController, ControllerComponents} import utils.auth.DefaultEnv import scala.concurrent.{ExecutionContext, Future} class UserController @Inject()(silhouette: Silhouette[DefaultEnv], authenticateService: AuthenticateService, components: ControllerComponents)(implicit ec: ExecutionContext) extends AbstractController(components) { def fetchLinkedAccounts(userId: UUID) = silhouette.SecuredAction.async { implicit req => if (userId == req.identity.userID) { authenticateService.getAuthenticationProviders(req.identity.email.get).map(providers => Ok(Json.toJson(providers))) } else { Future.successful(Forbidden) } } }
Example 140
Source File: ChangePasswordController.scala From silhouette-vuejs-app with Apache License 2.0 | 5 votes |
package controllers import javax.inject.Inject import com.mohiva.play.silhouette.api._ import com.mohiva.play.silhouette.api.actions.SecuredRequest import com.mohiva.play.silhouette.api.exceptions.ProviderException import com.mohiva.play.silhouette.api.repositories.AuthInfoRepository import com.mohiva.play.silhouette.api.util.{Credentials, PasswordHasherRegistry, PasswordInfo} import com.mohiva.play.silhouette.impl.providers.CredentialsProvider import forms.ChangePasswordForm import play.api.i18n.{I18nSupport, Messages} import play.api.libs.json.Json import play.api.mvc.{AbstractController, AnyContent, ControllerComponents} import utils.auth.{DefaultEnv, HasSignUpMethod} import scala.concurrent.{ExecutionContext, Future} def submit = silhouette.SecuredAction(hasSignUpMethod[DefaultEnv#A](CredentialsProvider.ID)).async { implicit request: SecuredRequest[DefaultEnv, AnyContent] => ChangePasswordForm.form.bindFromRequest.fold( form => Future.successful(BadRequest), password => { val (currentPassword, newPassword) = password val credentials = Credentials(request.identity.email.getOrElse(""), currentPassword) credentialsProvider.authenticate(credentials).flatMap { loginInfo => val passwordInfo = passwordHasherRegistry.current.hash(newPassword) authInfoRepository.update[PasswordInfo](loginInfo, passwordInfo).map { _ => Ok(Json.obj("success" -> Messages("password.changed"))) } }.recover { case _: ProviderException => BadRequest(Json.obj("message" -> Messages("current.password.invalid"))) } } ) } }
Example 141
Source File: SignInController.scala From silhouette-vuejs-app with Apache License 2.0 | 5 votes |
package controllers import com.mohiva.play.silhouette.api._ import com.mohiva.play.silhouette.api.util.Clock import com.mohiva.play.silhouette.impl.providers.CredentialsProvider import forms.SignInForm import javax.inject.Inject import models.services._ import play.api.Configuration import play.api.i18n.I18nSupport import play.api.libs.json.Json import play.api.mvc.{AnyContent, Request} import utils.auth.DefaultEnv import scala.concurrent.{ExecutionContext, Future} def submit = silhouette.UnsecuredAction.async { implicit request: Request[AnyContent] => SignInForm.form.bindFromRequest.fold( _ => Future.successful(BadRequest), data => { authenticateService.credentials(data.email, data.password).flatMap { case Success(user) => val loginInfo = LoginInfo(CredentialsProvider.ID, user.email.get) authenticateUser(user, loginInfo, data.rememberMe) case InvalidPassword(attemptsAllowed) => Future.successful(Forbidden(Json.obj("errorCode" -> "InvalidPassword", "attemptsAllowed" -> attemptsAllowed))) case NonActivatedUserEmail => Future.successful(Forbidden(Json.obj("errorCode" -> "NonActivatedUserEmail"))) case UserNotFound => Future.successful(Forbidden(Json.obj("errorCode" -> "UserNotFound"))) case ToManyAuthenticateRequests(nextAllowedAttemptTime) => Future.successful(TooManyRequests(Json.obj("errorCode" -> "TooManyRequests", "nextAllowedAttemptTime" -> nextAllowedAttemptTime))) } .recover { case e => logger.error(s"Sign in error email = ${data.email}", e) InternalServerError(Json.obj("errorCode" -> "SystemError")) } } ) } }
Example 142
Source File: ResetPasswordController.scala From silhouette-vuejs-app with Apache License 2.0 | 5 votes |
package controllers import java.util.UUID import javax.inject.Inject import com.mohiva.play.silhouette.api._ import com.mohiva.play.silhouette.api.repositories.AuthInfoRepository import com.mohiva.play.silhouette.api.util.{PasswordHasherRegistry, PasswordInfo} import com.mohiva.play.silhouette.impl.providers.CredentialsProvider import forms.ResetPasswordForm import models.services.{AuthTokenService, UserService} import play.api.libs.json.Json import play.api.mvc.{AbstractController, AnyContent, ControllerComponents, Request} import utils.auth.DefaultEnv import scala.concurrent.{ExecutionContext, Future} def submit(token: UUID) = silhouette.UnsecuredAction.async { implicit request: Request[AnyContent] => authTokenService.validate(token).flatMap { case Some(authToken) => ResetPasswordForm.form.bindFromRequest.fold( _ => Future.successful(BadRequest), password => userService.retrieveUserLoginInfo(authToken.userID, CredentialsProvider.ID).flatMap { case Some((user, loginInfo)) => val passwordInfo = passwordHasherRegistry.current.hash(password) authInfoRepository.update[PasswordInfo](loginInfo, passwordInfo).map { _ => Ok } case _ => Future.successful(BadRequest(Json.obj("message" -> "Reset token is either invalid or has expired. Please reset your password again."))) } ) case None => Future.successful(BadRequest(Json.obj("message" -> "Reset token is either invalid or has expired. Please reset your password again."))) } } }
Example 143
Source File: SignUpController.scala From silhouette-vuejs-app with Apache License 2.0 | 5 votes |
package controllers import java.util.UUID import javax.inject.Inject import com.mohiva.play.silhouette.api._ import forms.SignUpForm import models.services._ import play.api.i18n.I18nSupport import play.api.mvc.{AbstractController, AnyContent, ControllerComponents, Request} import utils.auth.DefaultEnv import scala.concurrent.{ExecutionContext, Future} def submit = silhouette.UnsecuredAction.async { implicit request: Request[AnyContent] => SignUpForm.form.bindFromRequest.fold( _ => Future.successful(BadRequest), data => { val activationUrlProvider: UUID => String = authTokenId => routes.ActivateAccountController.activate(authTokenId).absoluteURL() signUpService.signUpByCredentials(data, request.remoteAddress, activationUrlProvider).map { case UserCreated(user) => silhouette.env.eventBus.publish(SignUpEvent(user, request)) Ok case UserAlreadyExists => Conflict case InvalidRecaptchaCode => BadRequest("Captcha code is not correct") } } ) } }
Example 144
Source File: ActivateAccountController.scala From silhouette-vuejs-app with Apache License 2.0 | 5 votes |
package controllers import java.net.URLDecoder import java.util.UUID import javax.inject.Inject import com.mohiva.play.silhouette.api._ import com.mohiva.play.silhouette.impl.providers.CredentialsProvider import models.services.{AuthTokenService, MailService, UserService} import play.api.mvc.{AbstractController, ControllerComponents} import utils.auth.DefaultEnv import scala.concurrent.{ExecutionContext, Future} def activate(token: UUID) = silhouette.UnsecuredAction.async { authTokenService.validate(token).flatMap { case Some(authToken) => userService.retrieveUserLoginInfo(authToken.userID, CredentialsProvider.ID).flatMap { case Some((user, _)) => userService.setEmailActivated(user).map { _ => Redirect("/signin?message=emailVerified") } case _ => Future.successful(Redirect("/error?message=activationTokenInvalid")) } case None => Future.successful(Redirect("/error?message=activationTokenInvalid")) } } }
Example 145
Source File: ForgotPasswordController.scala From silhouette-vuejs-app with Apache License 2.0 | 5 votes |
package controllers import javax.inject.Inject import com.mohiva.play.silhouette.api._ import com.mohiva.play.silhouette.impl.providers.CredentialsProvider import forms.ForgotPasswordForm import models.services.{AuthTokenService, MailService, UserService} import play.api.mvc._ import utils.auth.DefaultEnv import scala.concurrent.{ExecutionContext, Future} def submit = silhouette.UnsecuredAction.async { implicit request: Request[AnyContent] => ForgotPasswordForm.form.bindFromRequest.fold( _ => Future.successful(BadRequest), email => { val loginInfo = LoginInfo(CredentialsProvider.ID, email) userService.retrieve(loginInfo).flatMap { case Some(user) if user.email.isDefined => authTokenService.create(user.userID).map { authToken => val route = Call("GET", s"/reset-password?token=${authToken.id.toString}") mailService.sendResetPasswordEmail(user.email.get, route.absoluteURL()) Ok } case None => Future.successful(Ok) } } ) } }
Example 146
Source File: RetryingPubSubClient.scala From akka-cloudpubsub with Apache License 2.0 | 5 votes |
package com.qubit.pubsub.client.retry import atmos.RetryPolicy import atmos.dsl._ import com.qubit.pubsub.client.{GcpProject, _} import scala.concurrent.{ExecutionContext, Future} class RetryingPubSubClient(underlying: PubSubClient)( implicit val policy: RetryPolicy = RetryPolicyDefaults.retryPolicy, implicit val ec: ExecutionContext = RetryPolicyDefaults.retryExecCtx) extends PubSubClient { override def topicExists(pubSubTopic: PubSubTopic): Future[Boolean] = retryAsync() { underlying.topicExists(pubSubTopic) } override def createTopic(pubSubTopic: PubSubTopic): Future[Boolean] = retryAsync() { underlying.createTopic(pubSubTopic) } override def listTopics(project: GcpProject): Future[Seq[String]] = retryAsync() { underlying.listTopics(project) } override def listTopicSubscriptions( pubSubTopic: PubSubTopic): Future[Seq[String]] = retryAsync() { underlying.listTopicSubscriptions(pubSubTopic) } override def pull( subscription: PubSubSubscription, maxMessages: Int, returnImmediately: Boolean): Future[Seq[ReceivedPubSubMessage]] = retryAsync() { underlying.pull(subscription, maxMessages, returnImmediately) } override def deleteSubscription( subscription: PubSubSubscription): Future[Boolean] = retryAsync() { underlying.deleteSubscription(subscription) } override def publish(pubSubTopic: PubSubTopic, payload: Seq[PubSubMessage]): Future[Seq[String]] = retryAsync() { underlying.publish(pubSubTopic, payload) } override def subscriptionExists( subscription: PubSubSubscription): Future[Boolean] = retryAsync() { underlying.subscriptionExists(subscription) } override def deleteTopic(pubSubTopic: PubSubTopic): Future[Boolean] = retryAsync() { underlying.deleteTopic(pubSubTopic) } override def listSubscriptions(project: GcpProject): Future[Seq[String]] = retryAsync() { underlying.listSubscriptions(project) } override def createSubscription(subscription: PubSubSubscription, topic: PubSubTopic, ackDeadlineSeconds: Int): Future[Boolean] = retryAsync() { underlying.createSubscription(subscription, topic, ackDeadlineSeconds) } override def modifyAckDeadline(subscription: PubSubSubscription, ackDeadlineSeconds: Int, ackIds: Seq[String]): Future[Boolean] = retryAsync() { underlying.modifyAckDeadline(subscription, ackDeadlineSeconds, ackIds) } override def ack(subscription: PubSubSubscription, ackIds: Seq[String]): Future[Boolean] = retryAsync() { underlying.ack(subscription, ackIds) } } object RetryingPubSubClient { def apply(underlying: PubSubClient) = new RetryingPubSubClient(underlying) }
Example 147
Source File: controllers.scala From metarpheus with MIT License | 5 votes |
package io.buildo.baseexample package controllers import models._ import wiro.OperationParameters import wiro.annotation._ import scala.concurrent.{Future, ExecutionContext} @path("campings") trait CampingController { @command def create(camping: Camping, parameters: OperationParameters): Future[Either[String, Camping]] @command @metarpheusIgnore def ignoreMe(ignore: IgnoreMe): Future[Either[String, String]] @query def taglessFinalRouteV1( input: String ): F[String] @query def taglessFinalRouteV2( input: String ): F[E[Exception, String]] } class CampingControllerImpl( implicit executionContext: ExecutionContext ) extends CampingController { @query def getByCoolnessAndSize( coolness: String, size: Int, nickname: String ): Future[Either[String, List[Camping]]] = ??? @query def getBySizeAndDistance(size: Int, distance: Int): Future[Either[String, List[Camping]]] = ??? @query def getById(id: Int): Future[Either[String, Camping]] = ??? @query def getByTypedId(id: `Id`[Camping]): Future[Either[String, Camping]] = ??? @query def getByHasBeach(hasBeach: Boolean): Future[Either[String, List[Camping]]] = ??? @command def create(camping: Camping): Future[Either[String, Camping]] = ??? @command def ignoreMe(ignore: IgnoreMe): Future[Either[String, String]] = ??? @query def taglessFinalRouteV1(input: String): F[String] = ??? @query def taglessFinalRouteV2(input: String): F[E[Exception, String]] = ??? }
Example 148
package net.fehmicansaglam.pide import akka.actor.ActorRef import akka.stream.scaladsl.Source import akka.util.Timeout import net.fehmicansaglam.bson.BsonDocument import net.fehmicansaglam.bson.BsonDsl._ import net.fehmicansaglam.tepkin.MongoCollection import net.fehmicansaglam.tepkin.protocol.WriteConcern import net.fehmicansaglam.tepkin.protocol.result.{InsertResult, UpdateResult} import scala.concurrent.{ExecutionContext, Future} trait Dao[ID, E <: Entity[ID]] { def collection: MongoCollection def find(query: BsonDocument, fields: Option[BsonDocument] = None, skip: Int = 0, tailable: Boolean = false, batchMultiplier: Int = 1000) (implicit pide: Pide[ID, E], timeout: Timeout): Source[List[E], ActorRef] = { collection.find(query, fields, skip, tailable, batchMultiplier).map(_.map(pide.read)) } def findAndRemove(query: Option[BsonDocument] = None, sort: Option[BsonDocument] = None, fields: Option[Seq[String]] = None) (implicit pide: Pide[ID, E], ec: ExecutionContext, timeout: Timeout): Future[Option[E]] = { collection.findAndRemove(query, sort, fields).map(_.map(pide.read)) } def findAndUpdate(query: Option[BsonDocument] = None, sort: Option[BsonDocument] = None, update: BsonDocument, returnNew: Boolean = false, fields: Option[Seq[String]] = None, upsert: Boolean = false) (implicit pide: Pide[ID, E], ec: ExecutionContext, timeout: Timeout): Future[Option[E]] = { collection.findAndUpdate(query, sort, update, returnNew, fields, upsert).map(_.map(pide.read)) } def findOne(query: BsonDocument = BsonDocument.empty, skip: Int = 0) (implicit pide: Pide[ID, E], ec: ExecutionContext, timeout: Timeout): Future[Option[E]] = { collection.findOne(query, skip).map(_.map(pide.read)) } def findRandom(query: Option[BsonDocument] = None) (implicit pide: Pide[_, E], ec: ExecutionContext, timeout: Timeout): Future[Option[E]] = { collection.findRandom(query).map(_.map(pide.read)) } def insert(entity: E, writeConcern: Option[WriteConcern] = None) (implicit pide: Pide[ID, E], ec: ExecutionContext, timeout: Timeout): Future[InsertResult] = { val document = pide.write(entity) writeConcern match { case Some(wc) => collection.insert(document, wc) case None => collection.insert(document) } } def update(entity: E, writeConcern: Option[WriteConcern] = None) (implicit pide: Pide[ID, E], ec: ExecutionContext, timeout: Timeout): Future[UpdateResult] = { collection.update( query = "_id" := pide.id(entity.id), update = pide.write(entity), writeConcern = writeConcern) } }
Example 149
Source File: NotificationSender.scala From teamcity-slack with MIT License | 5 votes |
package com.fpd.teamcity.slack import com.fpd.teamcity.slack.ConfigManager.BuildSetting import com.fpd.teamcity.slack.ConfigManager.BuildSettingFlag.BuildSettingFlag import com.fpd.teamcity.slack.SlackGateway.{Destination, MessageSent, SlackChannel, SlackUser, attachmentToSlackMessage} import jetbrains.buildServer.serverSide.{SBuild, SQueuedBuild} import scala.collection.mutable import scala.concurrent.Future trait NotificationSender { val configManager: ConfigManager val gateway: SlackGateway val messageBuilderFactory: MessageBuilderFactory import Helpers.Implicits._ type SendResult = Vector[Future[MessageSent]] private def sendAsAttachment = configManager.sendAsAttachment.exists(x ⇒ x) def send(build: SBuild, flags: Set[BuildSettingFlag]): Future[Vector[MessageSent]] = { val settings = prepareSettings(build, flags) lazy val emails = build.committeeEmails lazy val messageBuilder = messageBuilderFactory.createForBuild(build) lazy val sendPersonal = shouldSendPersonal(build) val result = settings.foldLeft(Vector(): SendResult) { (acc, setting) ⇒ val attachment = messageBuilder.compile(setting.messageTemplate, setting) val destinations = mutable.Set.empty[Destination] if (build.isPersonal) { // If build is personal we need inform only build's owner if needed val email = build.getOwner.getEmail if (sendPersonal && email.length > 0) { destinations += SlackUser(email) } } else { if (setting.slackChannel.nonEmpty) { destinations += SlackChannel(setting.slackChannel) } if (setting.notifyCommitter || sendPersonal) { emails.foreach { email ⇒ destinations += SlackUser(email) } } } acc ++ destinations.toVector.map(x ⇒ gateway.sendMessage(x, attachmentToSlackMessage(attachment, sendAsAttachment)) ) } implicit val ec = scala.concurrent.ExecutionContext.global Future.sequence(result) } def send(build: SQueuedBuild, flags: Set[BuildSettingFlag]): Future[Vector[MessageSent]] = { val settings = prepareSettings(build, flags) lazy val messageBuilder = messageBuilderFactory.createForBuild(build) val result = settings.foldLeft(Vector(): SendResult) { (acc, setting) ⇒ val attachment = messageBuilder.compile(setting.messageTemplate, setting) val destinations = mutable.Set.empty[Destination] if (!build.isPersonal && setting.slackChannel.nonEmpty) { destinations += SlackChannel(setting.slackChannel) } acc ++ destinations.toVector.map(x ⇒ gateway.sendMessage(x, attachmentToSlackMessage(attachment, sendAsAttachment)) ) } implicit val ec = scala.concurrent.ExecutionContext.global Future.sequence(result) } def shouldSendPersonal(build: SBuild): Boolean = build.getBuildStatus.isFailed && configManager.personalEnabled.exists(x ⇒ x) def prepareSettings(build: SBuild, flags: Set[BuildSettingFlag]): Iterable[BuildSetting] = configManager.buildSettingList(build.getBuildTypeId).values.filter { x ⇒ x.pureFlags.intersect(flags).nonEmpty && build.matchBranch(x.branchMask) } def prepareSettings(build: SQueuedBuild, flags: Set[BuildSettingFlag]): Iterable[BuildSetting] = configManager.buildSettingList(build.getBuildTypeId).values.filter { x ⇒ x.pureFlags.intersect(flags).nonEmpty && build.matchBranch(x.branchMask) } }
Example 150
Source File: PublishApi.scala From sns with Apache License 2.0 | 5 votes |
package me.snov.sns.api import akka.actor.ActorRef import akka.event.Logging import akka.http.scaladsl.model.HttpResponse import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server.Route import akka.pattern.ask import akka.util.Timeout import me.snov.sns.actor.PublishActor.CmdPublish import me.snov.sns.model.{Message, MessageAttribute, TopicNotFoundException} import me.snov.sns.response.PublishResponse import spray.json.DefaultJsonProtocol._ import spray.json._ import scala.concurrent.{ExecutionContext, Future} case class InvalidTopicArnException(msg: String) extends Exception(msg) object PublishApi { private val arnPattern = """([\w+_:-]{1,512})""".r def route(actorRef: ActorRef)(implicit timeout: Timeout, ec: ExecutionContext): Route = { pathSingleSlash { formField('Action ! "Publish") { formFieldSeq { fields => val messageAttributes: Map[String, MessageAttribute] = MessageAttribute.parse(fields) formFields('TopicArn.?, 'TargetArn.?, 'MessageStructure.?, 'Message) { (topicArnMaybe, targetArnMaybe, messageStructure, message) => try { topicArn(topicArnMaybe, targetArnMaybe) match { case arnPattern(topic) => complete { val bodies = messageStructure match { case Some("json") => message.parseJson.asJsObject.convertTo[Map[String, String]] case Some(_) => throw new RuntimeException("Invalid MessageStructure value"); case None => Map("default" -> message) } (actorRef ? CmdPublish(topic, bodies, messageAttributes)).collect { case m: Message => PublishResponse.publish(m) }.recover { case t: TopicNotFoundException => PublishResponse.topicNotFound(t.getMessage) case t: Throwable => HttpResponse(500, entity = t.getMessage) } } case _ => complete(HttpResponse(400, entity = "Invalid topic ARN")) } } catch { case e: InvalidTopicArnException => complete(HttpResponse(400, entity = e.getMessage)) case e: RuntimeException => complete(HttpResponse(400, entity = e.getMessage)) } } } ~ complete(HttpResponse(400, entity = "TopicArn is required")) } } } private def topicArn(topicArnMaybe: Option[String], targetArnMaybe: Option[String]): String = { topicArnMaybe.getOrElse(targetArnMaybe.getOrElse(throw InvalidTopicArnException("Neither TopicArn nor TargetArn provided"))) } }
Example 151
Source File: DonutStore.scala From scala-for-beginners with Apache License 2.0 | 5 votes |
package com.allaboutscala.learn.scala.seven.days.day.six import scala.concurrent.Future class DonutStore { def favoriteDonut(): String = "vanilla donut" def donuts(): Seq[String] = Seq("vanilla donut", "plain donut", "glazed donut") def printName(): Unit = { throw new IllegalStateException("Some Error") } def donutPrice(donut: String): Option[Double] = { val prices = Map( "vanilla donut" -> 2.0, "plain donut" -> 1.0, "glazed donut" -> 3.0 ) val priceOfDonut = prices.get(donut) priceOfDonut.map { price => price * (1 - discountByDonut(donut)) } } private def discountByDonut(donut: String): Double = { val discounts = Map( "vanilla donut" -> 0.2, "plain donut" -> 0.1, "glazed donut" -> 0.3 ) discounts.getOrElse(donut, 0) } import scala.concurrent.ExecutionContext.Implicits.global def donutSalesTax(donut: String): Future[Double] = Future { Thread.sleep(3000) // assume an external call to calculate sales tax 0.15 } }
Example 152
Source File: DataApi.scala From scala-for-beginners with Apache License 2.0 | 5 votes |
package com.allaboutscala.donutstore.data import com.allaboutscala.donutstore.common.{Donut, Donuts} import com.typesafe.scalalogging.LazyLogging import scala.collection.concurrent.TrieMap import scala.concurrent.Future import scala.concurrent.ExecutionContext.Implicits.global private val donutDatabase = TrieMap.empty[String, Donut] override def createDonut(donut: Donut): Future[String] = Future { logger.info(s"Create donut = $donut") val donutExists = donutDatabase.putIfAbsent(donut.name, donut) donutExists match { case Some(d) => s"${d.name} already exists in database." case None => s"${donut.name} has been added to the database." } } override def fetchDonuts(): Future[Donuts] = Future { logger.info("Fetching all donuts") Donuts(donutDatabase.values.toSeq) } override def updateDonutIngredients(donut: Donut): Future[String] = Future { logger.info(s"Updating ingredients = ${donut.ingredients} for donutName = ${donut.name}") val someDonut = donutDatabase.get(donut.name) someDonut match { case Some(d) => donutDatabase.replace(d.name, donut) s"Updated donut ingredients for donutName = ${donut.name}" case None => s"Donut ${donut.name} does not exist in database. The update operation was not run." } } override def deleteDonut(donutName: String): Future[String] = Future { logger.info(s"Deleting donut = $donutName") val someDonut = donutDatabase.get(donutName) someDonut match { case Some(d) => donutDatabase.remove(d.name) s"Deleted $d from database." case None => s"$donutName does not exist in database. The delete operation was not run." } } }
Example 153
Source File: DataApi.scala From scala-for-beginners with Apache License 2.0 | 5 votes |
package com.allaboutscala.donutstore.data import com.allaboutscala.donutstore.common.{Donut, Donuts} import com.typesafe.scalalogging.LazyLogging import scala.collection.concurrent.TrieMap import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.Future private val donutDatabase = TrieMap.empty[String, Donut] override def createDonut(donut: Donut): Future[String] = Future { logger.info(s"Create donut = $donut") val donutExists = donutDatabase.putIfAbsent(donut.name, donut) donutExists match { case Some(d) => s"${d.name} already exists in database." case None => s"${donut.name} has been added to the database." } } override def fetchDonuts(): Future[Donuts] = Future { logger.info("Fetching all donuts") Donuts(donutDatabase.values.toSeq) } override def updateDonutIngredients(donut: Donut): Future[String] = Future { logger.info(s"Updating ingredients = ${donut.ingredients} for donutName = ${donut.name}") val someDonut = donutDatabase.get(donut.name) someDonut match { case Some(d) => donutDatabase.replace(d.name, donut) s"Updated donut ingredients for donutName = ${donut.name}" case None => s"Donut ${donut.name} does not exist in database. The update operation was not run." } } override def deleteDonut(donutName: String): Future[String] = Future { logger.info("Deleting donut = $donutName") val someDonut = donutDatabase.get(donutName) someDonut match { case Some(d) => donutDatabase.remove(d.name) s"Deleted ${d.name} from database." case None => s"$donutName does not exist in database. The delete operation was not run." } } }
Example 154
Source File: LoggingTestsJournal.scala From freestyle with Apache License 2.0 | 5 votes |
package freestyle.tagless import cats._ import cats.syntax.flatMap._ import cats.syntax.functor._ import freestyle.tagless._ import freestyle.tagless.algebras._ import freestyle.tagless.logging.LoggingM import journal.Logger import org.scalatest.{AsyncWordSpec, Matchers} import scala.concurrent.{ExecutionContext, Future} import scala.util.control.NoStackTrace class LoggingTestsJournal extends AsyncWordSpec with Matchers { implicit override def executionContext = ExecutionContext.Implicits.global case object Cause extends Exception("kaboom") with NoStackTrace "Logging Freestyle tagless integration journal" should { import cats.instances.future._ import freestyle.tagless.loggingJVM.journal.implicits._ "allow a log message to be interleaved inside a program monadic flow" in { def program[F[_]: Monad](implicit app: App[F]) = for { a <- app.nonLogging.x _ <- app.loggingM.debug("Debug Message", sourceAndLineInfo = true) _ <- app.loggingM.debugWithCause("Debug Message", Cause) _ <- app.loggingM.error("Error Message") _ <- app.loggingM.errorWithCause("Error Message", Cause) _ <- app.loggingM.info("Info Message") _ <- app.loggingM.infoWithCause("Info Message", Cause) _ <- app.loggingM.warn("Warning Message") _ <- app.loggingM.warnWithCause("Warning Message", Cause) b <- Monad[F].pure(1) } yield a + b program[Future] map { _ shouldBe 2 } } "allow injecting a Logger instance" in { def program[F[_]: Monad](implicit app: App[F]) = for { a <- Monad[F].pure(1) _ <- app.loggingM.info("Info Message") _ <- app.loggingM.error("Error Message") b <- Monad[F].pure(1) } yield a + b implicit val logger: Logger = journal.Logger("Potatoes") program[Future] map { _ shouldBe 2 } } } }
Example 155
Source File: LoggingTestsLog4s.scala From freestyle with Apache License 2.0 | 5 votes |
package freestyle.tagless import cats._ import cats.syntax.flatMap._ import cats.syntax.functor._ import freestyle.tagless._ import freestyle.tagless.algebras._ import freestyle.tagless.logging.LoggingM import journal.Logger import org.scalatest.{AsyncWordSpec, Matchers} import scala.concurrent.{ExecutionContext, Future} import scala.util.control.NoStackTrace class LoggingTestsLog4s extends AsyncWordSpec with Matchers { implicit override def executionContext = ExecutionContext.Implicits.global case object Cause extends Exception("kaboom") with NoStackTrace "Logging Freestyle tagless integration log4s" should { import cats.instances.future._ import freestyle.tagless.loggingJVM.log4s.implicits._ "allow a log message to be interleaved inside a program monadic flow" in { def program[F[_]: Monad](implicit app: App[F]) = for { a <- app.nonLogging.x _ <- app.loggingM.debug("Debug Message", sourceAndLineInfo = true) _ <- app.loggingM.debugWithCause("Debug Message", Cause) _ <- app.loggingM.error("Error Message") _ <- app.loggingM.errorWithCause("Error Message", Cause) _ <- app.loggingM.info("Info Message") _ <- app.loggingM.infoWithCause("Info Message", Cause) _ <- app.loggingM.warn("Warning Message") _ <- app.loggingM.warnWithCause("Warning Message", Cause) b <- Monad[F].pure(1) } yield a + b program[Future] map { _ shouldBe 2 } } "allow injecting a Logger instance" in { def program[F[_]: Monad](implicit app: App[F]) = for { a <- Monad[F].pure(1) _ <- app.loggingM.info("Info Message") _ <- app.loggingM.error("Error Message") b <- Monad[F].pure(1) } yield a + b implicit val logger: Logger = journal.Logger("Potatoes") program[Future] map { _ shouldBe 2 } } } }
Example 156
Source File: LoggingTestsJournal.scala From freestyle with Apache License 2.0 | 5 votes |
package freestyle.free import cats.instances.future._ import cats.{Id, Monad} import freestyle.free.implicits._ import freestyle.free.loggingJVM.implicits._ import org.scalatest.{AsyncWordSpec, Matchers} import scala.concurrent.{ExecutionContext, Future} import scala.util.control.NoStackTrace class LoggingTestsJournal extends AsyncWordSpec with Matchers { implicit override def executionContext = ExecutionContext.Implicits.global import algebras._ "Logging Freestyle free integration journal" should { case object Cause extends Exception("kaboom") with NoStackTrace "allow a log message to be interleaved inside a program monadic flow" in { val program = for { a <- app.nonLogging.x _ <- app.loggingM.debug("Debug Message", sourceAndLineInfo = true) _ <- app.loggingM.debugWithCause("Debug Message", Cause) _ <- app.loggingM.error("Error Message") _ <- app.loggingM.errorWithCause("Error Message", Cause) _ <- app.loggingM.info("Info Message") _ <- app.loggingM.infoWithCause("Info Message", Cause) _ <- app.loggingM.warn("Warning Message") _ <- app.loggingM.warnWithCause("Warning Message", Cause) b <- FreeS.pure(1) } yield a + b program.interpret[Future] map { _ shouldBe 2 } } "not depend on MonadError, thus allowing use of Monads without MonadError, like Id, for test algebras" in { val program = for { a <- app.nonLogging.x _ <- app.loggingM.info("Info Message") _ <- app.loggingM.infoWithCause("Info Message", Cause) b <- FreeS.pure(1) } yield a + b program.interpret[TestAlgebra].run("configHere") shouldBe 2 } "allow injecting a Logger instance" in { val program = for { a <- FreeS.pure(1) _ <- app.loggingM.info("Info Message") _ <- app.loggingM.error("Error Message") b <- FreeS.pure(1) } yield a + b implicit val logger = journal.Logger("Potatoes") program .interpret[TestAlgebra] .run("configHere") shouldEqual 2 } } }
Example 157
Source File: LoggingTestsLog4s.scala From freestyle with Apache License 2.0 | 5 votes |
package freestyle.free import cats.instances.future._ import cats.{Id, Monad} import freestyle.free.implicits._ import freestyle.free.loggingJVM.implicits._ import org.scalatest.{AsyncWordSpec, Matchers} import scala.concurrent.{ExecutionContext, Future} import scala.util.control.NoStackTrace class LoggingTestsLog4s extends AsyncWordSpec with Matchers { implicit override def executionContext = ExecutionContext.Implicits.global import algebras._ "Logging Freestyle free integration log4s" should { case object Cause extends Exception("kaboom") with NoStackTrace "allow a log message to be interleaved inside a program monadic flow" in { val program = for { a <- app.nonLogging.x _ <- app.loggingM.debug("Debug Message", sourceAndLineInfo = true) _ <- app.loggingM.debugWithCause("Debug Message", Cause) _ <- app.loggingM.error("Error Message") _ <- app.loggingM.errorWithCause("Error Message", Cause) _ <- app.loggingM.info("Info Message") _ <- app.loggingM.infoWithCause("Info Message", Cause) _ <- app.loggingM.warn("Warning Message") _ <- app.loggingM.warnWithCause("Warning Message", Cause) b <- FreeS.pure(1) } yield a + b program.interpret[Future] map { _ shouldBe 2 } } "not depend on MonadError, thus allowing use of Monads without MonadError, like Id, for test algebras" in { val program = for { a <- app.nonLogging.x _ <- app.loggingM.info("Info Message") _ <- app.loggingM.infoWithCause("Info Message", Cause) b <- FreeS.pure(1) } yield a + b program.interpret[TestAlgebra].run("configHere") shouldBe 2 } "allow injecting a Logger instance" in { val program = for { a <- FreeS.pure(1) _ <- app.loggingM.info("Info Message") _ <- app.loggingM.error("Error Message") b <- FreeS.pure(1) } yield a + b implicit val logger = journal.Logger("Potatoes") program .interpret[TestAlgebra] .run("configHere") shouldEqual 2 } } }
Example 158
Source File: algebras.scala From freestyle with Apache License 2.0 | 5 votes |
package freestyle.tagless import cats.Id import cats.data.Kleisli import freestyle.tagless.logging.LoggingM import scala.concurrent.Future object algebras { @tagless(true) trait NonLogging { def x: FS[Int] } implicit def nonLoggingFutureHandler: NonLogging.Handler[Future] = new NonLogging.Handler[Future] { def x: Future[Int] = Future.successful(1) } type TestAlgebra[A] = Kleisli[Id, String, A] implicit def nonLoggingTestAlgebraHandler: NonLogging.Handler[TestAlgebra] = new NonLogging.Handler[TestAlgebra] { def x: TestAlgebra[Int] = Kleisli.pure(1) } @module trait App { val nonLogging: NonLogging val loggingM: LoggingM } }
Example 159
Source File: algebras.scala From freestyle with Apache License 2.0 | 5 votes |
package freestyle.free import cats.Id import cats.data.Kleisli import freestyle.free.logging.LoggingM import scala.concurrent.Future object algebras { @free trait NonLogging { def x: FS[Int] } implicit def nonLoggingFutureHandler: NonLogging.Handler[Future] = new NonLogging.Handler[Future] { def x: Future[Int] = Future.successful(1) } type TestAlgebra[A] = Kleisli[Id, String, A] implicit def nonLoggingTestAlgebraHandler: NonLogging.Handler[TestAlgebra] = new NonLogging.Handler[TestAlgebra] { def x: TestAlgebra[Int] = Kleisli.pure(1) } @module trait App { val nonLogging: NonLogging val loggingM: LoggingM } val app = App[App.Op] }
Example 160
Source File: LoggingTests.scala From freestyle with Apache License 2.0 | 5 votes |
package freestyle.tagless import cats.{Applicative, Monad} import cats.syntax.flatMap._ import cats.syntax.functor._ import freestyle.tagless.algebras._ import org.scalatest.{AsyncWordSpec, Matchers} import scala.concurrent.{ExecutionContext, Future} import scala.util.control.NoStackTrace class LoggingTests extends AsyncWordSpec with Matchers { import slogging._ LoggerConfig.factory = FilterLoggerFactory() LoggerConfig.level = LogLevel.TRACE FilterLogger.filter = { // use PrintLogger for all trace statements from sources starting with "foo.bar" case (LogLevel.TRACE, source) if source.startsWith("foo.bar") => PrintLogger // ignore all other trace statements case (LogLevel.TRACE, _) => NullLogger // log all other levels case _ => PrintLogger } implicit override def executionContext = ExecutionContext.Implicits.global case object Cause extends Exception("kaboom") with NoStackTrace "Logging Freestyle tagless integration" should { import cats.instances.future._ import freestyle.tagless.loggingJS.implicits._ "allow a log message to be interleaved inside a program monadic flow" in { def program[M[_]: Monad](implicit app: App[M]) = for { a <- app.nonLogging.x _ <- app.loggingM.debug("Debug Message", sourceAndLineInfo = true) _ <- app.loggingM.debugWithCause("Debug Message", Cause) _ <- app.loggingM.error("Error Message") _ <- app.loggingM.errorWithCause("Error Message", Cause) _ <- app.loggingM.info("Info Message") _ <- app.loggingM.infoWithCause("Info Message", Cause) _ <- app.loggingM.warn("Warning Message") _ <- app.loggingM.warnWithCause("Warning Message", Cause) b <- Applicative[M].pure(1) } yield a + b program[Future] map { _ shouldBe 2 } } "not depend on MonadError, thus allowing use of Monads without MonadError, like Id, for test algebras" in { def program[M[_]: Monad](implicit app: App[M]) = for { a <- app.nonLogging.x _ <- app.loggingM.info("Info Message") _ <- app.loggingM.infoWithCause("Info Message", Cause) b <- Applicative[M].pure(1) } yield a + b program[TestAlgebra].run("configHere") shouldBe 2 } } }
Example 161
Source File: LoggingTests.scala From freestyle with Apache License 2.0 | 5 votes |
package freestyle.free import cats.instances.future._ import freestyle.free.implicits._ import freestyle.free.loggingJS.implicits._ import org.scalatest.{AsyncWordSpec, Matchers} import scala.concurrent.{ExecutionContext, Future} import scala.util.control.NoStackTrace class LoggingTests extends AsyncWordSpec with Matchers { import slogging._ LoggerConfig.factory = FilterLoggerFactory() LoggerConfig.level = LogLevel.TRACE FilterLogger.filter = { // use PrintLogger for all trace statements from sources starting with "foo.bar" case (LogLevel.TRACE, source) if source.startsWith("foo.bar") => PrintLogger // ignore all other trace statements case (LogLevel.TRACE, _) => NullLogger // log all other levels case _ => PrintLogger } implicit override def executionContext = ExecutionContext.Implicits.global import algebras._ "Logging Freestyle free integration" should { case object Cause extends Exception("kaboom") with NoStackTrace "allow a log message to be interleaved inside a program monadic flow" in { val program = for { a <- app.nonLogging.x _ <- app.loggingM.debug("Debug Message", sourceAndLineInfo = true) _ <- app.loggingM.debugWithCause("Debug Message", Cause) _ <- app.loggingM.error("Error Message") _ <- app.loggingM.errorWithCause("Error Message", Cause) _ <- app.loggingM.info("Info Message") _ <- app.loggingM.infoWithCause("Info Message", Cause) _ <- app.loggingM.warn("Warning Message") _ <- app.loggingM.warnWithCause("Warning Message", Cause) b <- FreeS.pure(1) } yield a + b program.interpret[Future] map { _ shouldBe 2 } } "not depend on MonadError, thus allowing use of Monads without MonadError, like Id, for test algebras" in { val program = for { a <- app.nonLogging.x _ <- app.loggingM.info("Info Message") _ <- app.loggingM.infoWithCause("Info Message", Cause) b <- FreeS.pure(1) } yield a + b program.interpret[TestAlgebra].run("configHere") shouldBe 2 } } }
Example 162
Source File: Kleislis.scala From freestyle with Apache License 2.0 | 5 votes |
package freestyle.free.cache.redis.rediscala import cats.data.Kleisli import scala.concurrent.Future import _root_.redis.{ Cursor, ByteStringDeserializer => Deserializer, ByteStringSerializer => Serializer } import _root_.redis.commands.{ Keys => KeyCommands, Server => ServerCommands, Strings => StringCommands } private[rediscala] trait StringCommandsCont { def append[Key, Value](key: Key, value: Value)( implicit format: Format[Key], writer: Serializer[Value] ): Ops[Future, Long] = Kleisli((client: StringCommands) => client.append(format(key), value)) def get[Key, Value](key: Key)( implicit format: Format[Key], writer: Deserializer[Value] ): Ops[Future, Option[Value]] = Kleisli((client: StringCommands) => client.get[Value](format(key))) def set[Key: Format, Value: Serializer](key: Key, value: Value): Ops[Future, Boolean] = Kleisli((client: StringCommands) => client.set(key, value)) def mset[Key, Value: Serializer](keyValues: Map[Key, Value])( implicit format: Format[Key]): Ops[Future, Boolean] = { val b = keyValues.map { case (k, v) => (format(k), v) } Kleisli((client: StringCommands) => client.mset(b)) } def setnx[Key: Format, Value: Serializer](key: Key, value: Value): Ops[Future, Boolean] = Kleisli((client: StringCommands) => client.setnx(key, value)) def setxx[Key: Format, Value: Serializer](key: Key, value: Value): Ops[Future, Boolean] = Kleisli((client: StringCommands) => client.set(key, value, XX = true)) } private[rediscala] trait KeyCommandsCont { def del[Key](keys: List[Key])(implicit format: Format[Key]): Ops[Future, Long] = Kleisli((client: KeyCommands) => client.del(keys.map(format): _*)) def exists[Key](key: Key)(implicit format: Format[Key]): Ops[Future, Boolean] = Kleisli((client: KeyCommands) => client.exists(format(key))) def keys[Key]: Ops[Future, Seq[String]] = Kleisli((client: KeyCommands) => client.keys("*")) def scan[Key]: Ops[Future, Cursor[Seq[String]]] = Kleisli((client: KeyCommands) => client.scan(0, Option(1), None)) } private[rediscala] trait ServerCommandsCont { def flushDB: Ops[Future, Boolean] = Kleisli((client: ServerCommands) => client.flushdb) } private[rediscala] object RediscalaCont extends StringCommandsCont with KeyCommandsCont with ServerCommandsCont
Example 163
Source File: RedisMap.scala From freestyle with Apache License 2.0 | 5 votes |
package freestyle.free.cache.redis.rediscala import cats.{~>, Functor} import cats.syntax.functor._ import scala.concurrent.Future import _root_.redis.{ByteStringDeserializer, ByteStringSerializer, Cursor} import freestyle.free.cache.KeyValueMap class MapWrapper[M[_], Key, Value]( implicit formatKey: Format[Key], parseKey: Parser[Key], formatVal: Format[Value], parseVal: Parser[Value], toM: Future ~> M, funcM: Functor[M] ) extends KeyValueMap[Ops[M, ?], Key, Value] { private[this] implicit val serial: ByteStringDeserializer[Option[Value]] = ByteStringDeserializer.String.map(parseVal) private[this] implicit val deserial: ByteStringSerializer[Value] = ByteStringSerializer.String.contramap(formatVal) override def get(key: Key): Ops[M, Option[Value]] = RediscalaCont.get[Key, Option[Value]](key).mapK(toM).map(_.flatten) override def put(key: Key, value: Value): Ops[M, Unit] = RediscalaCont.set(key, value).mapK(toM).void override def putAll(keyValues: Map[Key, Value]): Ops[M, Unit] = RediscalaCont.mset(keyValues).mapK(toM).void override def putIfAbsent(key: Key, newVal: Value): Ops[M, Unit] = RediscalaCont.setnx(key, newVal).mapK(toM).void override def delete(key: Key): Ops[M, Unit] = RediscalaCont.del(List(key)).mapK(toM).void override def hasKey(key: Key): Ops[M, Boolean] = RediscalaCont.exists(key).mapK(toM) override def keys: Ops[M, List[Key]] = RediscalaCont.keys.mapK(toM).map(_.toList.flatMap(parseKey.apply)) override def clear(): Ops[M, Unit] = RediscalaCont.flushDB.mapK(toM).void override def replace(key: Key, newVal: Value): Ops[M, Unit] = RediscalaCont.setxx(key, newVal).mapK(toM).void override def isEmpty: Ops[M, Boolean] = RediscalaCont.scan.mapK(toM).map(_.data.isEmpty) } object MapWrapper { implicit def apply[M[_], Key, Value]( implicit formatKey: Format[Key], parseKey: Parser[Key], formatValue: Format[Value], parseValue: Parser[Value], toM: Future ~> M, funcM: Functor[M] ): MapWrapper[M, Key, Value] = new MapWrapper }
Example 164
Source File: TestUtil.scala From freestyle with Apache License 2.0 | 5 votes |
package freestyle.free.cache.redis import cats.arrow.FunctionK import cats.instances.future import scala.concurrent.{ExecutionContext, Future} import freestyle.free.cache.redis.rediscala._ object TestUtil { def redisMap(implicit ec: ExecutionContext): MapWrapper[Future, String, Int] = new MapWrapper()( formatKey = Format((key: String) => key), parseKey = Parser((str: String) => Some(str)), formatVal = Format((age: Int) => age.toString), parseVal = Parser((str: String) => scala.util.Try(Integer.parseInt(str)).toOption), toM = FunctionK.id[Future], funcM = future.catsStdInstancesForFuture(ec) ) }
Example 165
Source File: slick.scala From freestyle with Apache License 2.0 | 5 votes |
package freestyle.free import _root_.slick.dbio.{DBIO, DBIOAction} import _root_.slick.jdbc.JdbcBackend import freestyle.free.async._ import freestyle.async.AsyncContext import scala.util.{Failure, Success} import scala.concurrent.{ExecutionContext, Future} object slick { @free sealed trait SlickM { def run[A](f: DBIO[A]): FS[A] } trait Implicits { implicit def freeStyleSlickHandler[M[_]]( implicit asyncContext: AsyncContext[M], db: JdbcBackend#DatabaseDef, ec: ExecutionContext): SlickM.Handler[M] = new SlickM.Handler[M] { def run[A](fa: DBIO[A]): M[A] = asyncContext.runAsync { cb => db.run(fa).onComplete { case Success(x) => cb(Right(x)) case Failure(e) => cb(Left(e)) } } } implicit def freeStyleSlickFutureHandler( implicit db: JdbcBackend#DatabaseDef, ec: ExecutionContext): SlickM.Handler[Future] = new SlickM.Handler[Future] { def run[A](fa: DBIO[A]): Future[A] = db.run(fa) } implicit def freeSLiftSlick[F[_]: SlickM]: FreeSLift[F, DBIO] = new FreeSLift[F, DBIO] { def liftFSPar[A](dbio: DBIO[A]): FreeS.Par[F, A] = SlickM[F].run(dbio) } } object implicits extends Implicits }
Example 166
Source File: SlickTests.scala From freestyle with Apache License 2.0 | 5 votes |
package freestyle.free import _root_.slick.dbio.DBIO import _root_.slick.jdbc.H2Profile.api._ import org.scalatest.{AsyncWordSpec, Matchers} import freestyle.free.implicits._ import freestyle.free.slick._ import freestyle.free.slick.implicits._ import cats.instances.future._ import scala.concurrent.Future class SlickTests extends AsyncWordSpec with Matchers { import algebras._ implicit override val executionContext = scala.concurrent.ExecutionContext.Implicits.global implicit val db = Database.forURL("jdbc:h2:mem:test", driver = "org.h2.Driver") val query: DBIO[Int] = sql"SELECT 1 + 1".as[Int].head "Slick Freestyle integration" should { "allow a Slick DBIO program to be interleaved inside a program monadic flow" in { val program = for { a <- app.nonSlick.x b <- app.slickM.run(query).freeS c <- FreeS.pure(1) } yield a + b + c program.interpret[Future] map { _ shouldBe 4 } } "allow slick syntax to lift to FreeS" in { val program: FreeS[App.Op, Int] = for { a <- app.nonSlick.x b <- query.liftFS[App.Op] c <- app.nonSlick.x } yield a + b + c program.interpret[Future] map { _ shouldBe 4 } } "allow slick syntax to lift to FreeS.Par" in { val program: FreeS[App.Op, Int] = for { a <- app.nonSlick.x b <- query.liftFSPar[App.Op].freeS c <- app.nonSlick.x } yield a + b + c program.interpret[Future] map { _ shouldBe 4 } } } } object algebras { @free trait NonSlick { def x: FS[Int] } implicit def nonSlickHandler: NonSlick.Handler[Future] = new NonSlick.Handler[Future] { def x: Future[Int] = Future.successful(1) } @module trait App { val nonSlick: NonSlick val slickM: SlickM } val app = App[App.Op] }
Example 167
Source File: play.scala From freestyle with Apache License 2.0 | 5 votes |
package freestyle.free.http import cats.instances.future._ import freestyle.free._ import scala.concurrent.{ExecutionContext, Future} package play { object implicits { implicit def seqToFuture[F[_], A](prog: FreeS[F, A])( implicit I: ParInterpreter[F, Future], EC: ExecutionContext ): Future[A] = prog.parInterpret[Future] implicit def parSeqToFuture[F[_], A](prog: FreeS[F, A])( implicit I: FSHandler[F, Future], EC: ExecutionContext ): Future[A] = prog.interpret[Future] implicit def parToFuture[F[_], A](prog: FreeS.Par[F, A])( implicit I: FSHandler[F, Future], EC: ExecutionContext ): Future[A] = prog.interpret[Future] } }
Example 168
Source File: nondeterminism.scala From freestyle with Apache License 2.0 | 5 votes |
package freestyle package free import cats.Eq import cats.instances.int._ import cats.instances.string._ import cats.instances.tuple._ import cats.syntax.eq._ import cats.syntax.either._ import cats.laws.discipline.arbitrary._ import cats.laws.discipline.{ApplicativeTests, FunctorTests, MonadTests} import org.scalatest.{FunSuite, Matchers} import org.typelevel.discipline.scalatest.Discipline import scala.concurrent.{Await, Future} import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.duration._ import freestyle.free.nondeterminism._ class NonDeterminismTests extends FunSuite with Discipline with Matchers { // Eq[Future[A]] / Eq[Throwable] -> cats/jvm/src/test/scala/cats/tests/FutureTests.scala val timeout = 3.seconds def futureEither[A](f: Future[A]): Future[Either[Throwable, A]] = f.map(Either.right[Throwable, A]).recover { case t => Either.left(t) } implicit def eqfa[A: Eq]: Eq[Future[A]] = new Eq[Future[A]] { def eqv(fx: Future[A], fy: Future[A]): Boolean = { val fz = futureEither(fx) zip futureEither(fy) Await.result(fz.map { case (tx, ty) => tx === ty }, timeout) } } implicit val throwableEq: Eq[Throwable] = Eq.by(_.toString) checkAll("FutureNondeterminism", FunctorTests[Future].functor[Int, Int, Int]) // fails // - flatMap consistent apply // - ap consistent with product + map // checkAll("FutureNondeterminism", ApplicativeTests[Future].applicative[Int, Int, Int]) // checkAll("FutureNondeterminism", MonadTests[Future].monad[Int, Int, Int]) }
Example 169
Source File: Capture.scala From freestyle with Apache License 2.0 | 5 votes |
package freestyle package free import cats.Id import simulacrum.typeclass import scala.concurrent.{ExecutionContext, Future} import scala.util.Try import annotation.implicitNotFound @typeclass @implicitNotFound(msg = AnnotationMessages.captureInstanceNotFoundMsg) trait Capture[F[_]] { def capture[A](a: => A): F[A] } object Capture extends CaptureInstances trait CaptureInstances { implicit def freeStyleFutureCaptureInstance(implicit ec: ExecutionContext): Capture[Future] = new Capture[Future] { override def capture[A](a: => A): Future[A] = Future(a) } implicit val freeStyleIdCaptureInstance: Capture[Id] = new Capture[Id] { override def capture[A](a: => A): Id[A] = a } implicit val freeStyleTryCaptureInstance: Capture[Try] = new Capture[Try] { override def capture[A](a: => A): Try[A] = Try(a) } }
Example 170
Source File: AsyncTests.scala From freestyle with Apache License 2.0 | 5 votes |
package freestyle.tagless import cats.Monad import cats.instances.future._ import cats.syntax.flatMap._ import cats.syntax.functor._ import freestyle.async.AsyncContext import freestyle.tagless._ import freestyle.tagless.async._ import freestyle.tagless.async.implicits._ import org.scalatest._ import scala.concurrent.{ExecutionContext, Future} class AsyncTests extends AsyncWordSpec with Matchers { implicit override def executionContext = ExecutionContext.Implicits.global "Async Freestyle integration" should { "allow an Async to be interleaved inside a program monadic flow" in { def program[F[_]: Monad: AsyncM] = { for { a <- Monad[F].pure(1) b <- AsyncM[F].async[Int](cb => cb(Right(42))) c <- Monad[F].pure(1) } yield a + b + c } program[Future] map { _ shouldBe 44 } } "allow multiple Async to be interleaved inside a program monadic flow" in { def program[F[_]: Monad: AsyncM] = for { a <- Monad[F].pure(1) b <- AsyncM[F].async[Int](cb => cb(Right(42))) c <- Monad[F].pure(1) d <- AsyncM[F].async[Int](cb => cb(Right(10))) } yield a + b + c + d program[Future] map { _ shouldBe 54 } } case class OhNoException() extends Exception "allow Async errors to short-circuit a program" in { def program[F[_]: Monad: AsyncM] = for { a <- Monad[F].pure(1) b <- AsyncM[F].async[Int](cb => cb(Left(OhNoException()))) c <- Monad[F].pure(3) } yield a + b + c program[Future] recover { case OhNoException() => 42 } map { _ shouldBe 42 } } } }
Example 171
Source File: AsyncTests.scala From freestyle with Apache License 2.0 | 5 votes |
package freestyle.free import org.scalatest._ import cats.instances.future._ import freestyle.free._ import freestyle.free.implicits._ import freestyle.free.async._ import freestyle.free.async.implicits._ import scala.concurrent.{ExecutionContext, Future} class AsyncTests extends AsyncWordSpec with Matchers { implicit override def executionContext = ExecutionContext.Implicits.global "Async Freestyle integration" should { "allow an Async to be interleaved inside a program monadic flow" in { def program[F[_]: AsyncM] = for { a <- FreeS.pure(1) b <- AsyncM[F].async[Int](cb => cb(Right(42))) c <- FreeS.pure(1) } yield a + b + c program[AsyncM.Op].interpret[Future] map { _ shouldBe 44 } } "allow multiple Async to be interleaved inside a program monadic flow" in { def program[F[_]: AsyncM] = for { a <- FreeS.pure(1) b <- AsyncM[F].async[Int](cb => cb(Right(42))) c <- FreeS.pure(1) d <- AsyncM[F].async[Int](cb => cb(Right(10))) } yield a + b + c + d program[AsyncM.Op].interpret[Future] map { _ shouldBe 54 } } case class OhNoException() extends Exception "allow Async errors to short-circuit a program" in { def program[F[_]: AsyncM] = for { a <- FreeS.pure(1) b <- AsyncM[F].async[Int](cb => cb(Left(OhNoException()))) c <- FreeS.pure(3) } yield a + b + c program[AsyncM.Op].interpret[Future] recover { case OhNoException() => 42 } map { _ shouldBe 42 } } } }
Example 172
Source File: FederationServer.scala From scala-stellar-sdk with Apache License 2.0 | 5 votes |
package stellar.sdk import java.net.HttpURLConnection.HTTP_NOT_FOUND import com.typesafe.scalalogging.LazyLogging import okhttp3.{Headers, HttpUrl, OkHttpClient, Request} import org.json4s.native.{JsonMethods, Serialization} import org.json4s.{Formats, NoTypeHints} import stellar.sdk.inet.RestException import stellar.sdk.model.response.{FederationResponse, FederationResponseDeserialiser} import scala.concurrent.{ExecutionContext, Future} import scala.util.{Failure, Success, Try} case class FederationServer(base: HttpUrl) extends LazyLogging { implicit val formats: Formats = Serialization.formats(NoTypeHints) + FederationResponseDeserialiser private val client = new OkHttpClient() private val headers = Headers.of( "X-Client-Name", BuildInfo.name, "X-Client-Version", BuildInfo.version) def byName(name: String)(implicit ec: ExecutionContext): Future[Option[FederationResponse]] = fetchFederationResponse(base.newBuilder() .addQueryParameter("q", name) .addQueryParameter("type", "name") .build(), _.copy(address = name)) def byAccount(account: PublicKey)(implicit ec: ExecutionContext): Future[Option[FederationResponse]] = fetchFederationResponse(base.newBuilder() .addQueryParameter("q", account.accountId) .addQueryParameter("type", "id") .build(), _.copy(account = account)) private def fetchFederationResponse(url: HttpUrl, fillIn: FederationResponse => FederationResponse) (implicit ec: ExecutionContext): Future[Option[FederationResponse]] = Future(client.newCall(new Request.Builder().url(url).headers(headers).build()).execute()) .map { response => response.code() match { case HTTP_NOT_FOUND => None case e if e >= 500 => throw RestException(response.body().string()) case _ => Try(response.body().string()) .map(JsonMethods.parse(_)) .map(_.extract[FederationResponse]) .map(fillIn) .map(validate) match { case Success(fr) => Some(fr) case Failure(t) => throw RestException("Could not parse document as FederationResponse.", t) } } } private def validate(fr: FederationResponse): FederationResponse = { if (fr.account == null) throw RestException(s"Document did not contain account_id") if (fr.address == null) throw RestException(s"Document did not contain stellar_address") fr } } object FederationServer { def apply(uriString: String): FederationServer = new FederationServer(HttpUrl.parse(uriString)) }
Example 173
Source File: DoNothingNetwork.scala From scala-stellar-sdk with Apache License 2.0 | 5 votes |
package stellar.sdk.util import org.json4s.CustomSerializer import stellar.sdk.Network import stellar.sdk.inet.HorizonAccess import stellar.sdk.model.response.{DataValueResponse, TransactionPostResponse} import stellar.sdk.model.{HorizonCursor, HorizonOrder, SignedTransaction} import scala.concurrent.{ExecutionContext, Future} import scala.reflect.ClassTag class DoNothingNetwork(override val passphrase: String = "Scala SDK do-nothing network") extends Network { override val horizon: HorizonAccess = new HorizonAccess { override def post(txn: SignedTransaction)(implicit ec: ExecutionContext): Future[TransactionPostResponse] = ??? override def get[T: ClassTag](path: String, params: Map[String, String]) (implicit ec: ExecutionContext, m: Manifest[T]): Future[T] = if (path.endsWith("data/data_key")) { Future(DataValueResponse("00").asInstanceOf[T])(ec) } else ??? override def getStream[T: ClassTag](path: String, de: CustomSerializer[T], cursor: HorizonCursor, order: HorizonOrder, params: Map[String, String] = Map.empty) (implicit ec: ExecutionContext, m: Manifest[T]): Future[LazyList[T]] = ??? override def getSeq[T: ClassTag](path: String, de: CustomSerializer[T], params: Map[String, String]) (implicit ec: ExecutionContext, m: Manifest[T]): Future[LazyList[T]] = Future.successful(LazyList.empty[T]) } }
Example 174
Source File: AuthenticatedUserAction.scala From PlayFrameworkLoginAuthenticationExample with GNU General Public License v3.0 | 5 votes |
package controllers import javax.inject.Inject import play.api.mvc.Results._ import play.api.mvc._ import scala.concurrent.{ExecutionContext, Future} class AuthenticatedUserAction @Inject() (parser: BodyParsers.Default)(implicit ec: ExecutionContext) extends ActionBuilderImpl(parser) { private val logger = play.api.Logger(this.getClass) override def invokeBlock[A](request: Request[A], block: (Request[A]) => Future[Result]) = { logger.info("ENTERED AuthenticatedUserAction::invokeBlock ...") val maybeUsername = request.session.get(models.Global.SESSION_USERNAME_KEY) maybeUsername match { case None => { Future.successful(Forbidden("Dude, you’re not logged in.")) } case Some(u) => { val res: Future[Result] = block(request) res } } } }
Example 175
Source File: DistShellAppMaster.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.examples.distributedshell import org.apache.gearpump.cluster.MasterToAppMaster.WorkerList import scala.concurrent.Future import akka.actor.{Deploy, Props} import akka.pattern.{ask, pipe} import akka.remote.RemoteScope import com.typesafe.config.Config import org.slf4j.Logger import org.apache.gearpump.cluster.ClientToMaster.ShutdownApplication import org.apache.gearpump.cluster.appmaster.ExecutorSystemScheduler.{ExecutorSystemJvmConfig, ExecutorSystemStarted, StartExecutorSystemTimeout} import org.apache.gearpump.cluster._ import org.apache.gearpump.examples.distributedshell.DistShellAppMaster._ import org.apache.gearpump.util.{ActorUtil, Constants, LogUtil, Util} class DistShellAppMaster(appContext: AppMasterContext, app: AppDescription) extends ApplicationMaster { import appContext._ import context.dispatcher implicit val timeout = Constants.FUTURE_TIMEOUT private val LOG: Logger = LogUtil.getLogger(getClass, app = appId) protected var currentExecutorId = 0 private var workerNum: Option[Int] = None override def preStart(): Unit = { LOG.info(s"Distributed Shell AppMaster started") ActorUtil.launchExecutorOnEachWorker(masterProxy, getExecutorJvmConfig, self) } override def receive: Receive = { case ExecutorSystemStarted(executorSystem, _) => import executorSystem.{address, resource => executorResource, worker} val executorContext = ExecutorContext(currentExecutorId, worker, appId, app.name, self, executorResource) // Start executor val executor = context.actorOf(Props(classOf[ShellExecutor], executorContext, app.userConfig) .withDeploy(Deploy(scope = RemoteScope(address))), currentExecutorId.toString) executorSystem.bindLifeCycleWith(executor) currentExecutorId += 1 ActorUtil.tellMasterIfApplicationReady(workerNum, currentExecutorId, appContext) case WorkerList(workers) => workerNum = Some(workers.length) ActorUtil.tellMasterIfApplicationReady(workerNum, currentExecutorId, appContext) case StartExecutorSystemTimeout => LOG.error(s"Failed to allocate resource in time") masterProxy ! ShutdownApplication(appId) context.stop(self) case msg: ShellCommand => Future.fold(context.children.map(_ ? msg))(new ShellCommandResultAggregator) { (aggregator, response) => { aggregator.aggregate(response.asInstanceOf[ShellCommandResult]) } }.map(_.toString()) pipeTo sender } private def getExecutorJvmConfig: ExecutorSystemJvmConfig = { val config: Config = app.clusterConfig val jvmSetting = Util.resolveJvmSetting(config.withFallback(context.system.settings.config)) .executor ExecutorSystemJvmConfig(jvmSetting.classPath, jvmSetting.vmargs, appJar, username, config) } } object DistShellAppMaster { case class ShellCommand(command: String) case class ShellCommandResult(executorId: Int, msg: Any) class ShellCommandResultAggregator { val result: StringBuilder = new StringBuilder def aggregate(response: ShellCommandResult): ShellCommandResultAggregator = { result.append(s"Execute results from executor ${response.executorId} : \n") result.append(response.msg + "\n") this } override def toString: String = result.toString() } }
Example 176
Source File: DistributedShellClientSpec.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.examples.distributedshell import scala.concurrent.Future import scala.util.{Success, Try} import akka.testkit.TestProbe import org.scalatest.{BeforeAndAfter, Matchers, PropSpec} import org.apache.gearpump.cluster.ClientToMaster.ResolveAppId import org.apache.gearpump.cluster.MasterToClient.ResolveAppIdResult import org.apache.gearpump.cluster.{MasterHarness, TestUtil} import org.apache.gearpump.examples.distributedshell.DistShellAppMaster.ShellCommand import org.apache.gearpump.util.LogUtil class DistributedShellClientSpec extends PropSpec with Matchers with BeforeAndAfter with MasterHarness { private val LOG = LogUtil.getLogger(getClass) before { startActorSystem() } after { shutdownActorSystem() } protected override def config = TestUtil.DEFAULT_CONFIG property("DistributedShellClient should succeed to submit application with required arguments") { val command = "ls /" val requiredArgs = Array("-appid", "0", "-command", command) val masterReceiver = createMockMaster() assert(Try(DistributedShellClient.main(Array.empty[String])).isFailure, "missing required arguments, print usage") Future { DistributedShellClient.main(masterConfig, requiredArgs) } masterReceiver.expectMsg(PROCESS_BOOT_TIME, ResolveAppId(0)) val mockAppMaster = TestProbe()(getActorSystem) masterReceiver.reply(ResolveAppIdResult(Success(mockAppMaster.ref))) LOG.info(s"Reply back ResolveAppIdResult, current actorRef: ${mockAppMaster.ref.path.toString}") mockAppMaster.expectMsg(PROCESS_BOOT_TIME, ShellCommand(command)) mockAppMaster.reply("result") } }
Example 177
Source File: DistributedShellSpec.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.examples.distributedshell import scala.concurrent.Future import scala.util.Success import com.typesafe.config.Config import org.scalatest.prop.PropertyChecks import org.scalatest.{BeforeAndAfter, Matchers, PropSpec} import org.apache.gearpump.cluster.ClientToMaster.SubmitApplication import org.apache.gearpump.cluster.MasterToClient.SubmitApplicationResult import org.apache.gearpump.cluster.{MasterHarness, TestUtil} class DistributedShellSpec extends PropSpec with PropertyChecks with Matchers with BeforeAndAfter with MasterHarness { before { startActorSystem() } after { shutdownActorSystem() } override def config: Config = TestUtil.DEFAULT_CONFIG property("DistributedShell should succeed to submit application with required arguments") { val requiredArgs = Array.empty[String] val masterReceiver = createMockMaster() Future { DistributedShell.main(masterConfig, requiredArgs) } masterReceiver.expectMsgType[SubmitApplication](PROCESS_BOOT_TIME) masterReceiver.reply(SubmitApplicationResult(Success(0))) } }
Example 178
Source File: SequenceFileIOSpec.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.streaming.examples.fsio import scala.concurrent.Future import scala.util.{Success, Try} import com.typesafe.config.Config import org.scalatest.prop.PropertyChecks import org.scalatest.{BeforeAndAfterAll, Matchers, PropSpec} import org.apache.gearpump.cluster.ClientToMaster.SubmitApplication import org.apache.gearpump.cluster.MasterToClient.SubmitApplicationResult import org.apache.gearpump.cluster.{MasterHarness, TestUtil} class SequenceFileIOSpec extends PropSpec with PropertyChecks with Matchers with BeforeAndAfterAll with MasterHarness { override def beforeAll { startActorSystem() } override def afterAll { shutdownActorSystem() } override def config: Config = TestUtil.DEFAULT_CONFIG property("SequenceFileIO should succeed to submit application with required arguments") { val requiredArgs = Array( "-input", "/tmp/input", "-output", "/tmp/output" ) val optionalArgs = Array( "-source", "1", "-sink", "1" ) val validArgs = { Table( ("requiredArgs", "optionalArgs"), (requiredArgs, optionalArgs) ) } val masterReceiver = createMockMaster() forAll(validArgs) { (requiredArgs: Array[String], optionalArgs: Array[String]) => val args = requiredArgs ++ optionalArgs Future { SequenceFileIO.main(masterConfig, args) } masterReceiver.expectMsgType[SubmitApplication](PROCESS_BOOT_TIME) masterReceiver.reply(SubmitApplicationResult(Success(0))) } val invalidArgs = { Table( ("requiredArgs", "optionalArgs"), (requiredArgs.take(0), optionalArgs), (requiredArgs.take(2), optionalArgs) ) } forAll(invalidArgs) { (requiredArgs: Array[String], optionalArgs: Array[String]) => val args = optionalArgs assert(Try(SequenceFileIO.main(args)).isFailure, "missing required arguments, print usage") } } }
Example 179
Source File: SOLSpec.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.streaming.examples.sol import scala.concurrent.Future import scala.util.Success import com.typesafe.config.Config import org.scalatest.prop.PropertyChecks import org.scalatest.{BeforeAndAfterAll, Matchers, PropSpec} import org.apache.gearpump.cluster.ClientToMaster.SubmitApplication import org.apache.gearpump.cluster.MasterToClient.SubmitApplicationResult import org.apache.gearpump.cluster.{MasterHarness, TestUtil} class SOLSpec extends PropSpec with PropertyChecks with Matchers with BeforeAndAfterAll with MasterHarness { override def beforeAll { startActorSystem() } override def afterAll { shutdownActorSystem() } override def config: Config = TestUtil.DEFAULT_CONFIG property("SOL should succeed to submit application with required arguments") { val requiredArgs = Array.empty[String] val optionalArgs = Array( "-streamProducer", "1", "-streamProcessor", "1", "-bytesPerMessage", "100", "-stages", "10") val args = { Table( ("requiredArgs", "optionalArgs"), (requiredArgs, optionalArgs) ) } val masterReceiver = createMockMaster() forAll(args) { (requiredArgs: Array[String], optionalArgs: Array[String]) => val args = requiredArgs ++ optionalArgs Future { SOL.main(masterConfig, args) } masterReceiver.expectMsgType[SubmitApplication](PROCESS_BOOT_TIME) masterReceiver.reply(SubmitApplicationResult(Success(0))) } } }
Example 180
Source File: DagSpec.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.streaming.examples.complexdag import org.apache.gearpump.cluster.ClientToMaster.SubmitApplication import org.apache.gearpump.cluster.MasterToClient.SubmitApplicationResult import org.apache.gearpump.cluster.{MasterHarness, TestUtil} import org.scalatest._ import org.scalatest.prop.PropertyChecks import scala.concurrent.Future import scala.util.Success class DagSpec extends PropSpec with PropertyChecks with Matchers with BeforeAndAfterAll with MasterHarness { override def beforeAll { startActorSystem() } override def afterAll { shutdownActorSystem() } protected override def config = TestUtil.DEFAULT_CONFIG property("Dag should succeed to submit application with required arguments") { val requiredArgs = Array.empty[String] val masterReceiver = createMockMaster() val args = requiredArgs Future { Dag.main(masterConfig, args) } masterReceiver.expectMsgType[SubmitApplication](PROCESS_BOOT_TIME) masterReceiver.reply(SubmitApplicationResult(Success(0))) } }
Example 181
Source File: WindowAverageAppSpec.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.streaming.examples.state import scala.concurrent.Future import scala.util.Success import com.typesafe.config.Config import org.scalatest.prop.PropertyChecks import org.scalatest.{BeforeAndAfter, Matchers, PropSpec} import org.apache.gearpump.cluster.ClientToMaster.SubmitApplication import org.apache.gearpump.cluster.MasterToClient.SubmitApplicationResult import org.apache.gearpump.cluster.{MasterHarness, TestUtil} class WindowAverageAppSpec extends PropSpec with PropertyChecks with Matchers with BeforeAndAfter with MasterHarness { before { startActorSystem() } after { shutdownActorSystem() } override def config: Config = TestUtil.DEFAULT_CONFIG property("WindowAverage should succeed to submit application with required arguments") { val requiredArgs = Array.empty[String] val optionalArgs = Array( "-gen", "2", "-window", "2", "-window_size", "5000", "-window_step", "5000" ) val args = { Table( ("requiredArgs", "optionalArgs"), (requiredArgs, optionalArgs.take(0)), (requiredArgs, optionalArgs.take(2)), (requiredArgs, optionalArgs.take(4)), (requiredArgs, optionalArgs.take(6)), (requiredArgs, optionalArgs) ) } val masterReceiver = createMockMaster() forAll(args) { (requiredArgs: Array[String], optionalArgs: Array[String]) => val args = requiredArgs ++ optionalArgs Future { WindowAverageApp.main(masterConfig, args) } masterReceiver.expectMsgType[SubmitApplication](PROCESS_BOOT_TIME) masterReceiver.reply(SubmitApplicationResult(Success(0))) } } }
Example 182
Source File: DefaultMessageCountAppSpec.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.streaming.examples.state import scala.concurrent.Future import scala.util.Success import org.scalatest.prop.PropertyChecks import org.scalatest.{BeforeAndAfter, Matchers, PropSpec} import org.apache.gearpump.cluster.ClientToMaster.SubmitApplication import org.apache.gearpump.cluster.MasterToClient.SubmitApplicationResult import org.apache.gearpump.cluster.{MasterHarness, TestUtil} import org.apache.gearpump.streaming.examples.state.MessageCountApp._ class DefaultMessageCountAppSpec extends PropSpec with PropertyChecks with Matchers with BeforeAndAfter with MasterHarness { before { startActorSystem() } after { shutdownActorSystem() } protected override def config = TestUtil.DEFAULT_CONFIG property("MessageCount should succeed to submit application with required arguments") { val requiredArgs = Array( s"-$SOURCE_TOPIC", "source", s"-$SINK_TOPIC", "sink", s"-$ZOOKEEPER_CONNECT", "localhost:2181", s"-$BROKER_LIST", "localhost:9092", s"-$DEFAULT_FS", "hdfs://localhost:9000" ) val optionalArgs = Array( s"-$SOURCE_TASK", "2", s"-$COUNT_TASK", "2", s"-$SINK_TASK", "2" ) val args = { Table( ("requiredArgs", "optionalArgs"), (requiredArgs, optionalArgs.take(0)), (requiredArgs, optionalArgs.take(2)), (requiredArgs, optionalArgs.take(4)), (requiredArgs, optionalArgs) ) } val masterReceiver = createMockMaster() forAll(args) { (requiredArgs: Array[String], optionalArgs: Array[String]) => val args = requiredArgs ++ optionalArgs Future { MessageCountApp.main(masterConfig, args) } masterReceiver.expectMsgType[SubmitApplication](PROCESS_BOOT_TIME) masterReceiver.reply(SubmitApplicationResult(Success(0))) } } }
Example 183
Source File: WordCountSpec.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.streaming.examples.wordcount import scala.concurrent.Future import scala.util.Success import org.scalatest.prop.PropertyChecks import org.scalatest.{BeforeAndAfter, Matchers, PropSpec} import org.apache.gearpump.cluster.ClientToMaster.SubmitApplication import org.apache.gearpump.cluster.MasterToClient.SubmitApplicationResult import org.apache.gearpump.cluster.{MasterHarness, TestUtil} class WordCountSpec extends PropSpec with PropertyChecks with Matchers with BeforeAndAfter with MasterHarness { before { startActorSystem() } after { shutdownActorSystem() } protected override def config = TestUtil.DEFAULT_CONFIG property("WordCount should succeed to submit application with required arguments") { val requiredArgs = Array.empty[String] val optionalArgs = Array( "-split", "1", "-sum", "1") val args = { Table( ("requiredArgs", "optionalArgs"), (requiredArgs, optionalArgs) ) } val masterReceiver = createMockMaster() forAll(args) { (requiredArgs: Array[String], optionalArgs: Array[String]) => val args = requiredArgs ++ optionalArgs Future { WordCount.main(masterConfig, args) } masterReceiver.expectMsgType[SubmitApplication](PROCESS_BOOT_TIME) masterReceiver.reply(SubmitApplicationResult(Success(0))) } } }
Example 184
Source File: WordCountSpec.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.streaming.examples.wordcountjava import org.apache.gearpump.cluster.ClientToMaster.SubmitApplication import org.apache.gearpump.cluster.MasterToClient.SubmitApplicationResult import org.apache.gearpump.cluster.{MasterHarness, TestUtil} import org.apache.gearpump.streaming.examples.wordcountjava.dsl.WordCount import org.scalatest.prop.PropertyChecks import org.scalatest.{BeforeAndAfter, Matchers, PropSpec} import scala.concurrent.Future import scala.util.Success class WordCountSpec extends PropSpec with PropertyChecks with Matchers with BeforeAndAfter with MasterHarness { before { startActorSystem() } after { shutdownActorSystem() } protected override def config = TestUtil.DEFAULT_CONFIG property("WordCount should succeed to submit application with required arguments") { val requiredArgs = Array.empty[String] val masterReceiver = createMockMaster() val args = requiredArgs Future { WordCount.main(masterConfig, args) } masterReceiver.expectMsgType[SubmitApplication](PROCESS_BOOT_TIME) masterReceiver.reply(SubmitApplicationResult(Success(0))) } }
Example 185
Source File: KafkaWordCountSpec.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.streaming.examples.kafka.wordcount import scala.concurrent.Future import scala.util.Success import com.typesafe.config.Config import org.scalatest.prop.PropertyChecks import org.scalatest.{BeforeAndAfter, Matchers, PropSpec} import org.apache.gearpump.cluster.ClientToMaster.SubmitApplication import org.apache.gearpump.cluster.MasterToClient.SubmitApplicationResult import org.apache.gearpump.cluster.{MasterHarness, TestUtil} class KafkaWordCountSpec extends PropSpec with PropertyChecks with Matchers with BeforeAndAfter with MasterHarness { before { startActorSystem() } after { shutdownActorSystem() } override def config: Config = TestUtil.DEFAULT_CONFIG property("KafkaWordCount should succeed to submit application with required arguments") { val requiredArgs = Array.empty[String] val optionalArgs = Array( "-source", "1", "-split", "1", "-sum", "1", "-sink", "1") val args = { Table( ("requiredArgs", "optionalArgs"), (requiredArgs, optionalArgs) ) } val masterReceiver = createMockMaster() forAll(args) { (requiredArgs: Array[String], optionalArgs: Array[String]) => val args = requiredArgs ++ optionalArgs Future { KafkaWordCount.main(masterConfig, args) } masterReceiver.expectMsgType[SubmitApplication](PROCESS_BOOT_TIME) masterReceiver.reply(SubmitApplicationResult(Success(0))) } } }
Example 186
Source File: DistServiceAppMaster.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.experiments.distributeservice import java.io.File import org.apache.gearpump.cluster.MasterToAppMaster.WorkerList import scala.concurrent.Future import akka.actor.{Deploy, Props} import akka.pattern.{ask, pipe} import akka.remote.RemoteScope import com.typesafe.config.Config import org.slf4j.Logger import org.apache.gearpump.cluster.ClientToMaster.ShutdownApplication import org.apache.gearpump.cluster.appmaster.ExecutorSystemScheduler.{ExecutorSystemJvmConfig, ExecutorSystemStarted, StartExecutorSystemTimeout} import org.apache.gearpump.cluster.{AppDescription, AppMasterContext, ApplicationMaster, ExecutorContext} import org.apache.gearpump.experiments.distributeservice.DistServiceAppMaster.{FileContainer, GetFileContainer, InstallService} import org.apache.gearpump.util._ class DistServiceAppMaster(appContext: AppMasterContext, app: AppDescription) extends ApplicationMaster { import appContext._ import context.dispatcher implicit val timeout = Constants.FUTURE_TIMEOUT private val LOG: Logger = LogUtil.getLogger(getClass, app = appId) private var currentExecutorId = 0 private var workerNum: Option[Int] = None private var fileServerPort = -1 val rootDirectory = new File("/") val host = context.system.settings.config.getString(Constants.GEARPUMP_HOSTNAME) val server = context.actorOf(Props(classOf[FileServer], rootDirectory, host, 0)) override def preStart(): Unit = { LOG.info(s"Distribute Service AppMaster started") ActorUtil.launchExecutorOnEachWorker(masterProxy, getExecutorJvmConfig, self) } (server ? FileServer.GetPort).asInstanceOf[Future[FileServer.Port]] pipeTo self override def receive: Receive = { case ExecutorSystemStarted(executorSystem, _) => import executorSystem.{address, resource => executorResource, worker} val executorContext = ExecutorContext(currentExecutorId, worker, appId, app.name, self, executorResource) // start executor val executor = context.actorOf(Props(classOf[DistServiceExecutor], executorContext, app.userConfig).withDeploy( Deploy(scope = RemoteScope(address))), currentExecutorId.toString) executorSystem.bindLifeCycleWith(executor) currentExecutorId += 1 ActorUtil.tellMasterIfApplicationReady(workerNum, currentExecutorId, appContext) case WorkerList(workers) => workerNum = Some(workers.length) ActorUtil.tellMasterIfApplicationReady(workerNum, currentExecutorId, appContext) case StartExecutorSystemTimeout => LOG.error(s"Failed to allocate resource in time") masterProxy ! ShutdownApplication(appId) context.stop(self) case FileServer.Port(port) => this.fileServerPort = port case GetFileContainer => val name = Math.abs(new java.util.Random().nextLong()).toString sender ! new FileContainer(s"http://$host:$fileServerPort/$name") case installService: InstallService => context.children.foreach(_ ! installService) } private def getExecutorJvmConfig: ExecutorSystemJvmConfig = { val config: Config = app.clusterConfig val jvmSetting = Util.resolveJvmSetting( config.withFallback(context.system.settings.config)).executor ExecutorSystemJvmConfig(jvmSetting.classPath, jvmSetting.vmargs, appJar, username, config) } } object DistServiceAppMaster { case object GetFileContainer case class FileContainer(url: String) case class InstallService( url: String, zipFileName: String, targetPath: String, script: Array[Byte], serviceName: String, serviceSettings: Map[String, Any]) }
Example 187
Source File: DistributeServiceClient.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.experiments.distributeservice import java.io.File import scala.concurrent.Future import scala.util.{Failure, Success} import akka.pattern.ask import org.apache.commons.io.FileUtils import org.apache.gearpump.cluster.client.ClientContext import org.apache.gearpump.cluster.main.{ArgumentsParser, CLIOption} import org.apache.gearpump.experiments.distributeservice.DistServiceAppMaster.{FileContainer, GetFileContainer, InstallService} import org.apache.gearpump.util.{AkkaApp, Constants} object DistributeServiceClient extends AkkaApp with ArgumentsParser { implicit val timeout = Constants.FUTURE_TIMEOUT override val options: Array[(String, CLIOption[Any])] = Array( "appid" -> CLIOption[Int]("<the distributed shell appid>", required = true), "file" -> CLIOption[String]("<service zip file path>", required = true), "script" -> CLIOption[String]( "<file path of service script that will be installed to /etc/init.d>", required = true), "serviceName" -> CLIOption[String]("<service name>", required = true), "target" -> CLIOption[String]("<target path on each machine>", required = true) ) override def help(): Unit = { super.help() // scalastyle:off println Console.err.println(s"-D<name>=<value> set a property to the service") // scalastyle:on println } override def main(akkaConf: Config, args: Array[String]): Unit = { val config = parse(filterCustomOptions(args)) val context = ClientContext(akkaConf) implicit val system = context.system implicit val dispatcher = system.dispatcher val appid = config.getInt("appid") val zipFile = new File(config.getString("file")) val script = new File(config.getString("script")) val serviceName = config.getString("serviceName") val appMaster = context.resolveAppID(appid) (appMaster ? GetFileContainer).asInstanceOf[Future[FileContainer]].map { container => val bytes = FileUtils.readFileToByteArray(zipFile) val result = FileServer.newClient.save(container.url, bytes) result match { case Success(_) => appMaster ! InstallService(container.url, zipFile.getName, config.getString("target"), FileUtils.readFileToByteArray(script), serviceName, parseServiceConfig(args)) context.close() case Failure(ex) => throw ex } } } private def filterCustomOptions(args: Array[String]): Array[String] = { args.filter(!_.startsWith("-D")) } private def parseServiceConfig(args: Array[String]): Map[String, Any] = { val result = Map.empty[String, Any] args.foldLeft(result) { (result, argument) => if (argument.startsWith("-D") && argument.contains("=")) { val fixedKV = argument.substring(2).split("=") result + (fixedKV(0) -> fixedKV(1)) } else { result } } } }
Example 188
Source File: SupervisorService.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.services import scala.concurrent.Future import scala.util.{Failure, Success} import akka.actor.{ActorRef, ActorSystem} import akka.http.scaladsl.server.Directives._ import akka.http.scaladsl.server.Route import akka.stream.Materializer import org.apache.gearpump.cluster.AppMasterToMaster.{GetWorkerData, WorkerData} import org.apache.gearpump.cluster.ClientToMaster._ import org.apache.gearpump.cluster.worker.WorkerId import org.apache.gearpump.services.SupervisorService.{Path, Status} import org.apache.gearpump.util.ActorUtil._ // NOTE: This cannot be removed!!! import org.apache.gearpump.services.util.UpickleUtil._ private def authorize(internal: Route): Route = { if (supervisor == null) { failWith(new Exception("API not enabled, cannot find a valid supervisor! " + "Please make sure Gearpump is running on top of YARN or other resource managers")) } else { internal } } protected override def doRoute(implicit mat: Materializer) = pathPrefix("supervisor") { pathEnd { get { val path = if (supervisor == null) { null } else { supervisor.path.toString } complete(write(Path(path))) } } ~ path("status") { post { if (supervisor == null) { complete(write(Status(enabled = false))) } else { complete(write(Status(enabled = true))) } } } ~ path("addworker" / IntNumber) { workerCount => post { authorize { onComplete(askActor[CommandResult](supervisor, AddWorker(workerCount))) { case Success(value) => complete(write(value)) case Failure(ex) => failWith(ex) } } } } ~ path("removeworker" / Segment) { workerIdString => post { authorize { val workerId = WorkerId.parse(workerIdString) def future(): Future[CommandResult] = { askWorker[WorkerData](master, workerId, GetWorkerData(workerId)).flatMap{workerData => val containerId = workerData.workerDescription.resourceManagerContainerId askActor[CommandResult](supervisor, RemoveWorker(containerId)) } } onComplete[CommandResult](future()) { case Success(value) => complete(write(value)) case Failure(ex) => failWith(ex) } } } } } } object SupervisorService { case class Status(enabled: Boolean) case class Path(path: String) }
Example 189
Source File: MockOAuth2Server.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.services.security.oauth2 import scala.concurrent.{Await, Future} import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.Http.ServerBinding import akka.http.scaladsl.model.{HttpRequest, HttpResponse} import akka.stream.ActorMaterializer import akka.stream.scaladsl.Sink import org.apache.gearpump.util.Util // NOTE: This cannot be removed!! import org.apache.gearpump.services.util.UpickleUtil._ class MockOAuth2Server( actorSystem: ActorSystem, var requestHandler: HttpRequest => HttpResponse) { implicit val system: ActorSystem = actorSystem implicit val materializer = ActorMaterializer() implicit val ec = system.dispatcher private var _port: Int = 0 private var bindingFuture: Future[ServerBinding] = null def port: Int = _port def start(): Unit = { _port = Util.findFreePort().get val serverSource = Http().bind(interface = "127.0.0.1", port = _port) bindingFuture = { serverSource.to(Sink.foreach { connection => connection handleWithSyncHandler requestHandler }).run() } } def stop(): Unit = { import scala.concurrent.duration._ Await.result(bindingFuture.map(_.unbind()), 120.seconds) } }
Example 190
Source File: Test12.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.akkastream.example import akka.stream.{ClosedShape, UniformFanInShape} import org.apache.gearpump.akkastream.GearpumpMaterializer import org.apache.gearpump.cluster.main.ArgumentsParser import org.apache.gearpump.util.AkkaApp import scala.concurrent.{Await, Future} object Test12 extends AkkaApp with ArgumentsParser{ // scalastyle:off println override def main(akkaConf: Config, args: Array[String]): Unit = { import akka.actor.ActorSystem import akka.stream.scaladsl._ import scala.concurrent.duration._ implicit val system = ActorSystem("Test12", akkaConfig) // implicit val materializer = ActorMaterializer( // ActorMaterializerSettings(system).withAutoFusing(false) // ) implicit val materializer = GearpumpMaterializer() implicit val ec = system.dispatcher val pickMaxOfThree = GraphDSL.create() { implicit b => import GraphDSL.Implicits._ val zip1 = b.add(ZipWith[Int, Int, Int](math.max)) val zip2 = b.add(ZipWith[Int, Int, Int](math.max)) zip1.out ~> zip2.in0 UniformFanInShape(zip2.out, zip1.in0, zip1.in1, zip2.in1) } val resultSink = Sink.head[Int] val g = RunnableGraph.fromGraph(GraphDSL.create(resultSink) { implicit b => sink => import GraphDSL.Implicits._ // Importing the partial shape will return its shape (inlets & outlets) val pm3 = b.add(pickMaxOfThree) Source.single(1) ~> pm3.in(0) Source.single(2) ~> pm3.in(1) Source.single(3) ~> pm3.in(2) pm3.out ~> sink.in ClosedShape }) val max: Future[Int] = g.run() max.map(x => println(s"maximum of three numbers : $x")) Await.result(system.whenTerminated, 60.minutes) } // scalastyle:on println }
Example 191
Source File: Test8.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.akkastream.example import akka.NotUsed import akka.actor.ActorSystem import akka.stream.{ActorMaterializer, ActorMaterializerSettings} import akka.stream.scaladsl._ import org.apache.gearpump.akkastream.GearpumpMaterializer import org.apache.gearpump.cluster.main.{ArgumentsParser, CLIOption} import org.apache.gearpump.util.AkkaApp import scala.concurrent.{Await, Future} import scala.concurrent.duration._ object Test8 extends AkkaApp with ArgumentsParser { // scalastyle:off println override val options: Array[(String, CLIOption[Any])] = Array( "gearpump" -> CLIOption[Boolean]("<boolean>", required = false, defaultValue = Some(false)) ) override def main(akkaConf: Config, args: Array[String]): Unit = { val config = parse(args) implicit val system = ActorSystem("Test8", akkaConf) implicit val materializer: ActorMaterializer = config.getBoolean("gearpump") match { case true => GearpumpMaterializer() case false => ActorMaterializer( ActorMaterializerSettings(system).withAutoFusing(false) ) } implicit val ec = system.dispatcher // Source gives 1 to 100 elements val source: Source[Int, NotUsed] = Source(Stream.from(1).take(100)) val sink: Sink[Int, Future[Int]] = Sink.fold[Int, Int](0)(_ + _) val result: Future[Int] = source.runWith(sink) result.map(sum => { println(s"Sum of stream elements => $sum") }) Await.result(system.whenTerminated, 60.minutes) } // scalastyle:on println }
Example 192
Source File: MapAsyncTask.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.akkastream.task import org.apache.gearpump.Message import org.apache.gearpump.cluster.UserConfig import org.apache.gearpump.streaming.task.TaskContext import scala.concurrent.Future class MapAsyncTask[In, Out](context: TaskContext, userConf : UserConfig) extends GraphTask(context, userConf) { val f = userConf.getValue[In => Future[Out]](MapAsyncTask.MAPASYNC_FUNC) implicit val ec = context.system.dispatcher override def onNext(msg: Message) : Unit = { val data = msg.value.asInstanceOf[In] val time = msg.timestamp f match { case Some(func) => val fout = func(data) fout.onComplete(value => { value.foreach(out => { val msg = Message(out, time) context.output(msg) }) }) case None => } } } object MapAsyncTask { val MAPASYNC_FUNC = "MAPASYNC_FUNC" }
Example 193
Source File: UIService.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.experiments.yarn.appmaster import akka.actor._ import com.typesafe.config.{ConfigFactory, ConfigValueFactory} import org.apache.gearpump.cluster.ClusterConfig import org.apache.gearpump.services.main.Services import org.apache.gearpump.transport.HostPort import org.apache.gearpump.util.{ActorUtil, Constants, LogUtil} import scala.concurrent.Future trait UIFactory { def props(masters: List[HostPort], host: String, port: Int): Props } class UIService(masters: List[HostPort], host: String, port: Int) extends Actor { private val LOG = LogUtil.getLogger(getClass) private val supervisor = ActorUtil.getFullPath(context.system, context.parent.path) private var configFile: java.io.File = null private implicit val dispatcher = context.dispatcher override def postStop(): Unit = { if (configFile != null) { configFile.delete() configFile = null } // TODO: fix this // Hack around to Kill the UI server Services.kill() } override def preStart(): Unit = { Future(start()) } def start(): Unit = { val mastersArg = masters.mkString(",") LOG.info(s"Launching services -master $mastersArg") configFile = java.io.File.createTempFile("uiserver", ".conf") val config = context.system.settings.config. withValue(Constants.GEARPUMP_SERVICE_HOST, ConfigValueFactory.fromAnyRef(host)). withValue(Constants.GEARPUMP_SERVICE_HTTP, ConfigValueFactory.fromAnyRef(port.toString)). withValue(Constants.NETTY_TCP_HOSTNAME, ConfigValueFactory.fromAnyRef(host)) ClusterConfig.saveConfig(config, configFile) val master = masters.head ConfigFactory.invalidateCaches() launch(supervisor, master.host, master.port, configFile.toString) } // Launch the UI server def launch(supervisor: String, masterHost: String, masterPort: Int, configFile: String): Unit = { Services.main(Array("-supervisor", supervisor, "-master", s"$masterHost:$masterPort" , "-conf", configFile)) } override def receive: Receive = { case _ => LOG.error(s"Unknown message received") } } object UIService extends UIFactory { override def props(masters: List[HostPort], host: String, port: Int): Props = { Props(new UIService(masters, host, port)) } }
Example 194
Source File: InMemoryAppStoreOnMaster.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.streaming.storage import scala.concurrent.Future import akka.actor.ActorRef import akka.pattern.ask import org.apache.gearpump.cluster.AppMasterToMaster.{GetAppData, GetAppDataResult, SaveAppData} import org.apache.gearpump.util.Constants class InMemoryAppStoreOnMaster(appId: Int, master: ActorRef) extends AppDataStore { implicit val timeout = Constants.FUTURE_TIMEOUT import scala.concurrent.ExecutionContext.Implicits.global override def put(key: String, value: Any): Future[Any] = { master.ask(SaveAppData(appId, key, value)) } override def get(key: String): Future[Any] = { master.ask(GetAppData(appId, key)).asInstanceOf[Future[GetAppDataResult]].map { result => if (result.key.equals(key)) { result.value } else { null } } } }
Example 195
Source File: JarSchedulerSpec.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.streaming.appmaster import akka.actor.ActorSystem import org.apache.gearpump.cluster.scheduler.{Resource, ResourceRequest} import org.apache.gearpump.cluster.worker.WorkerId import org.apache.gearpump.cluster.{AppJar, TestUtil} import org.apache.gearpump.jarstore.FilePath import org.apache.gearpump.streaming.partitioner.{HashPartitioner, Partitioner} import org.apache.gearpump.streaming.appmaster.TaskSchedulerSpec.{TestTask1, TestTask2} import org.apache.gearpump.streaming.task.TaskId import org.apache.gearpump.streaming.{DAG, ProcessorDescription, _} import org.apache.gearpump.util.Graph import org.apache.gearpump.util.Graph._ import org.scalatest.{Matchers, WordSpec} import scala.concurrent.{Await, Future} class JarSchedulerSpec extends WordSpec with Matchers { val mockJar1 = AppJar("jar1", FilePath("path")) val mockJar2 = AppJar("jar2", FilePath("path")) val task1 = ProcessorDescription(id = 0, taskClass = classOf[TestTask1].getName, parallelism = 1, jar = mockJar1) val task2 = ProcessorDescription(id = 1, taskClass = classOf[TestTask2].getName, parallelism = 1, jar = mockJar1) val task3 = ProcessorDescription(id = 2, taskClass = classOf[TestTask2].getName, parallelism = 2, jar = mockJar2) val dag = DAG(Graph(task1 ~ Partitioner[HashPartitioner] ~> task2)) import scala.concurrent.duration._ "JarScheduler" should { "schedule tasks depends on app jar" in { val system = ActorSystem("JarSchedulerSpec") implicit val dispatcher = system.dispatcher val manager = new JarScheduler(0, "APP", TestUtil.DEFAULT_CONFIG, system) manager.setDag(dag, Future { 0L }) val requests = Array(ResourceRequest(Resource(2), WorkerId.unspecified)) val result = Await.result(manager.getResourceRequestDetails(), 15.seconds) assert(result.length == 1) assert(result.head.jar == mockJar1) assert(result.head.requests.deep == requests.deep) val tasks = Await.result(manager.scheduleTask(mockJar1, WorkerId(0, 0L), 0, Resource(2)), 15.seconds) assert(tasks.contains(TaskId(0, 0))) assert(tasks.contains(TaskId(1, 0))) val newDag = replaceDAG(dag, 1, task3, 1) manager.setDag(newDag, Future { 0 }) val requestDetails = Await.result(manager.getResourceRequestDetails(). map(_.sortBy(_.jar.name)), 15.seconds) assert(requestDetails.length == 2) assert(requestDetails.last.jar == mockJar2) assert(requestDetails.last.requests.deep == requests.deep) system.terminate() Await.result(system.whenTerminated, Duration.Inf) } } def replaceDAG( dag: DAG, oldProcessorId: ProcessorId, newProcessor: ProcessorDescription, newVersion: Int) : DAG = { val oldProcessorLife = LifeTime(dag.processors(oldProcessorId).life.birth, newProcessor.life.birth) val newProcessorMap = dag.processors ++ Map(oldProcessorId -> dag.processors(oldProcessorId).copy(life = oldProcessorLife), newProcessor.id -> newProcessor) val newGraph = dag.graph.subGraph(oldProcessorId). replaceVertex(oldProcessorId, newProcessor.id).addGraph(dag.graph) new DAG(newVersion, newProcessorMap, newGraph) } }
Example 196
Source File: ConfigFileBasedAuthenticator.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.security import scala.concurrent.{ExecutionContext, Future} import com.typesafe.config.Config import org.apache.gearpump.security.Authenticator.AuthenticationResult import org.apache.gearpump.security.ConfigFileBasedAuthenticator._ object ConfigFileBasedAuthenticator { private val ROOT = "gearpump.ui-security.config-file-based-authenticator" private val ADMINS = ROOT + "." + "admins" private val USERS = ROOT + "." + "users" private val GUESTS = ROOT + "." + "guests" private case class Credentials( admins: Map[String, String], users: Map[String, String], guests: Map[String, String]) { def verify(user: String, password: String): AuthenticationResult = { if (admins.contains(user)) { if (verify(user, password, admins)) { Authenticator.Admin } else { Authenticator.UnAuthenticated } } else if (users.contains(user)) { if (verify(user, password, users)) { Authenticator.User } else { Authenticator.UnAuthenticated } } else if (guests.contains(user)) { if (verify(user, password, guests)) { Authenticator.Guest } else { Authenticator.UnAuthenticated } } else { Authenticator.UnAuthenticated } } private def verify(user: String, password: String, map: Map[String, String]): Boolean = { val storedPass = map(user) PasswordUtil.verify(password, storedPass) } } } class ConfigFileBasedAuthenticator(config: Config) extends Authenticator { private val credentials = loadCredentials(config) override def authenticate(user: String, password: String, ec: ExecutionContext) : Future[AuthenticationResult] = { implicit val ctx = ec Future { credentials.verify(user, password) } } private def loadCredentials(config: Config): Credentials = { val admins = configToMap(config, ADMINS) val users = configToMap(config, USERS) val guests = configToMap(config, GUESTS) new Credentials(admins, users, guests) } private def configToMap(config: Config, path: String) = { import scala.collection.JavaConverters._ config.getConfig(path).root.unwrapped.asScala.toMap map { case (k, v) => k -> v.toString } } }
Example 197
Source File: RunningApplication.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.cluster.client import akka.actor.ActorRef import akka.pattern.ask import akka.util.Timeout import org.apache.gearpump.cluster.ClientToMaster.{RegisterAppResultListener, ResolveAppId, ShutdownApplication} import org.apache.gearpump.cluster.MasterToClient._ import org.apache.gearpump.cluster.client.RunningApplication._ import org.apache.gearpump.util.{ActorUtil, LogUtil} import org.slf4j.Logger import java.time.Duration import java.util.concurrent.TimeUnit import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.Future import scala.concurrent.duration._ import scala.util.{Failure, Success} class RunningApplication(val appId: Int, master: ActorRef, timeout: Timeout) { lazy val appMaster: Future[ActorRef] = resolveAppMaster(appId) def shutDown(): Unit = { val result = ActorUtil.askActor[ShutdownApplicationResult](master, ShutdownApplication(appId), timeout) result.appId match { case Success(_) => case Failure(ex) => throw ex } } def waitUntilFinish(): Unit = { this.waitUntilFinish(INF_DURATION) } def waitUntilFinish(duration: Duration): Unit = { val result = ActorUtil.askActor[ApplicationResult](master, RegisterAppResultListener(appId), new Timeout(duration.getSeconds, TimeUnit.SECONDS)) if (result.appId == appId) { result match { case failed: ApplicationFailed => throw failed.error case _: ApplicationSucceeded => LOG.info(s"Application $appId succeeded") case _: ApplicationTerminated => LOG.info(s"Application $appId terminated") } } else { LOG.warn(s"Received unexpected result $result for application $appId") } } def askAppMaster[T](msg: Any): Future[T] = { appMaster.flatMap(_.ask(msg)(timeout).asInstanceOf[Future[T]]) } private def resolveAppMaster(appId: Int): Future[ActorRef] = { master.ask(ResolveAppId(appId))(timeout). asInstanceOf[Future[ResolveAppIdResult]].map(_.appMaster.get) } } object RunningApplication { private val LOG: Logger = LogUtil.getLogger(getClass) // This magic number is derived from Akka's configuration, which is the maximum delay private val INF_DURATION = Duration.ofSeconds(2147482) }
Example 198
Source File: JarStoreClient.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.jarstore import java.io.File import java.util.concurrent.TimeUnit import scala.collection.JavaConverters._ import scala.concurrent.duration.Duration import scala.concurrent.Await import akka.pattern.ask import akka.actor.{ActorSystem, ActorRef} import com.typesafe.config.Config import org.apache.gearpump.cluster.master.MasterProxy import org.apache.gearpump.util.{Util, Constants, LogUtil} import org.slf4j.Logger import org.apache.gearpump.cluster.ClientToMaster.{GetJarStoreServer, JarStoreServerAddress} import scala.concurrent.{Future, ExecutionContext} class JarStoreClient(config: Config, system: ActorSystem) { private def LOG: Logger = LogUtil.getLogger(getClass) private implicit val timeout = Constants.FUTURE_TIMEOUT private implicit def dispatcher: ExecutionContext = system.dispatcher private val master: ActorRef = { val masters = config.getStringList(Constants.GEARPUMP_CLUSTER_MASTERS) .asScala.flatMap(Util.parseHostList) system.actorOf(MasterProxy.props(masters), s"masterproxy${Util.randInt()}") } private lazy val client = (master ? GetJarStoreServer).asInstanceOf[Future[JarStoreServerAddress]] .map { address => val client = new FileServer.Client(system, address.url) client } def copyFromLocal(localFile: File): FilePath = { val future = client.flatMap(_.upload(localFile)) Await.result(future, Duration(60, TimeUnit.SECONDS)) } }
Example 199
Source File: RunningApplicationSpec.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.cluster.client import java.util.concurrent.TimeUnit import akka.actor.ActorSystem import akka.testkit.TestProbe import akka.util.Timeout import org.apache.gearpump.cluster.ClientToMaster.{ResolveAppId, ShutdownApplication} import org.apache.gearpump.cluster.MasterToClient.{ResolveAppIdResult, ShutdownApplicationResult} import org.apache.gearpump.cluster.TestUtil import org.apache.gearpump.cluster.client.RunningApplicationSpec.{MockAskAppMasterRequest, MockAskAppMasterResponse} import org.scalatest.{BeforeAndAfterAll, FlatSpec, Matchers} import scala.concurrent.{Await, Future} import scala.concurrent.duration.Duration import scala.util.{Failure, Success} import scala.concurrent.ExecutionContext.Implicits.global class RunningApplicationSpec extends FlatSpec with Matchers with BeforeAndAfterAll { implicit var system: ActorSystem = _ override def beforeAll(): Unit = { system = ActorSystem("test", TestUtil.DEFAULT_CONFIG) } override def afterAll(): Unit = { system.terminate() Await.result(system.whenTerminated, Duration.Inf) } "RunningApplication" should "be able to shutdown application" in { val errorMsg = "mock exception" val master = TestProbe() val timeout = Timeout(90, TimeUnit.SECONDS) val application = new RunningApplication(1, master.ref, timeout) Future { application.shutDown() } master.expectMsg(ShutdownApplication(1)) master.reply(ShutdownApplicationResult(Success(1))) val result = Future { intercept[Exception] { application.shutDown() } } master.expectMsg(ShutdownApplication(1)) master.reply(ShutdownApplicationResult(Failure(new Exception(errorMsg)))) val exception = Await.result(result, Duration.Inf) assert(exception.getMessage.equals(errorMsg)) } "RunningApplication" should "be able to ask appmaster" in { val master = TestProbe() val appMaster = TestProbe() val appId = 1 val timeout = Timeout(90, TimeUnit.SECONDS) val request = MockAskAppMasterRequest("request") val application = new RunningApplication(appId, master.ref, timeout) val future = application.askAppMaster[MockAskAppMasterResponse](request) master.expectMsg(ResolveAppId(appId)) master.reply(ResolveAppIdResult(Success(appMaster.ref))) appMaster.expectMsg(MockAskAppMasterRequest("request")) appMaster.reply(MockAskAppMasterResponse("response")) val result = Await.result(future, Duration.Inf) assert(result.res.equals("response")) // ResolveAppId should not be called multiple times val future2 = application.askAppMaster[MockAskAppMasterResponse](request) appMaster.expectMsg(MockAskAppMasterRequest("request")) appMaster.reply(MockAskAppMasterResponse("response")) val result2 = Await.result(future2, Duration.Inf) assert(result2.res.equals("response")) } } object RunningApplicationSpec { case class MockAskAppMasterRequest(req: String) case class MockAskAppMasterResponse(res: String) }
Example 200
Source File: MiniCluster.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.cluster import akka.actor.{Actor, ActorRef, ActorSystem, Props} import akka.pattern.ask import akka.testkit.TestActorRef import com.typesafe.config.ConfigValueFactory import org.apache.gearpump.cluster.AppMasterToMaster.GetAllWorkers import org.apache.gearpump.cluster.MasterToAppMaster.WorkerList import org.apache.gearpump.cluster.master.Master import org.apache.gearpump.cluster.worker.Worker import org.apache.gearpump.util.Constants import scala.concurrent.duration.Duration import scala.concurrent.{Await, Future} class MiniCluster { private val mockMasterIP = "127.0.0.1" implicit val system = ActorSystem("system", TestUtil.MASTER_CONFIG. withValue(Constants.NETTY_TCP_HOSTNAME, ConfigValueFactory.fromAnyRef(mockMasterIP))) val (mockMaster, worker) = { val master = system.actorOf(Props(classOf[Master]), "master") val worker = system.actorOf(Props(classOf[Worker], master), "worker") // Wait until worker register itself to master waitUtilWorkerIsRegistered(master) (master, worker) } def launchActor(props: Props): TestActorRef[Actor] = { TestActorRef(props) } private def waitUtilWorkerIsRegistered(master: ActorRef): Unit = { while (!isWorkerRegistered(master)) {} } private def isWorkerRegistered(master: ActorRef): Boolean = { import scala.concurrent.duration._ implicit val dispatcher = system.dispatcher implicit val futureTimeout = Constants.FUTURE_TIMEOUT val workerListFuture = (master ? GetAllWorkers).asInstanceOf[Future[WorkerList]] // Waits until the worker is registered. val workers = Await.result[WorkerList](workerListFuture, 15.seconds) workers.workers.size > 0 } def shutDown(): Unit = { system.terminate() Await.result(system.whenTerminated, Duration.Inf) } }