scala.concurrent.duration.TimeUnit Scala Examples
The following examples show how to use scala.concurrent.duration.TimeUnit.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: WatchService.scala From monix-nio with Apache License 2.0 | 5 votes |
package monix.nio.file import java.nio.file.StandardWatchEventKinds.{ ENTRY_CREATE, ENTRY_DELETE, ENTRY_MODIFY } import java.nio.file.WatchEvent.Kind import java.nio.file.{ Path, WatchEvent, WatchKey } import com.sun.nio.file.SensitivityWatchEventModifier import monix.execution.{ Callback, Cancelable, Scheduler } import scala.concurrent.{ Future, Promise } import scala.concurrent.duration.TimeUnit import scala.util.control.NonFatal abstract class WatchService extends AutoCloseable { def poll(timeout: Long, timeUnit: TimeUnit, cb: Callback[Throwable, Option[WatchKey]]): Unit def poll(timeout: Long, timeUnit: TimeUnit): Future[Option[WatchKey]] = { val p = Promise[Option[WatchKey]]() poll(timeout, timeUnit, Callback.fromPromise(p)) p.future } def poll(cb: Callback[Throwable, Option[WatchKey]]): Unit def poll(): Future[Option[WatchKey]] = { val p = Promise[Option[WatchKey]]() poll(Callback.fromPromise(p)) p.future } def take(cb: Callback[Throwable, WatchKey]): Unit def take(): Future[WatchKey] = { val p = Promise[WatchKey]() take(Callback.fromPromise(p)) p.future } } object WatchService { val SupportedEvents: Set[Kind[_]] = Set(ENTRY_CREATE, ENTRY_DELETE, ENTRY_MODIFY) def apply(path: Path, events: Kind[_]*)(implicit scheduler: Scheduler): WatchService = { val watcher = path.getFileSystem.newWatchService() val watchFor = if (events.isEmpty) SupportedEvents else events path.register( watcher, watchFor.toArray, SensitivityWatchEventModifier.HIGH.asInstanceOf[WatchEvent.Modifier]) new NIOWatcherServiceImplementation(watcher) } private final class NIOWatcherServiceImplementation(watcher: java.nio.file.WatchService)(implicit scheduler: Scheduler) extends WatchService { override def poll(timeout: Long, timeUnit: TimeUnit, cb: Callback[Throwable, Option[WatchKey]]): Unit = { try { val key = Option(watcher.poll(timeout, timeUnit)) cb.onSuccess(key) } catch { case NonFatal(ex) => cb.onError(ex) } } override def poll(cb: Callback[Throwable, Option[WatchKey]]): Unit = { try { val key = Option(watcher.poll()) cb.onSuccess(key) } catch { case NonFatal(ex) => cb.onError(ex) } } override def take(cb: Callback[Throwable, WatchKey]): Unit = { try { val key = watcher.take() cb.onSuccess(key) } catch { case NonFatal(ex) => cb.onError(ex) } } override def close(): Unit = cancelable.cancel() private[this] val cancelable: Cancelable = Cancelable { () => try watcher.close() catch { case NonFatal(ex) => scheduler.reportFailure(ex) } } } }
Example 2
Source File: FOpsTest.scala From scala-server-toolkit with MIT License | 5 votes |
package com.avast.sst.catseffect.syntax import cats.effect.concurrent.Ref import cats.effect.{Clock, IO, Timer} import com.avast.sst.catseffect.syntax.time._ import org.scalatest.funsuite.AsyncFunSuite import scala.concurrent.ExecutionContext import scala.concurrent.duration.{Duration, TimeUnit} class FOpsTest extends AsyncFunSuite { implicit private val timer: Timer[IO] = IO.timer(ExecutionContext.global) test("time") { val sleepTime = Duration.fromNanos(500000000) implicit val mockClock: Clock[IO] = new Clock[IO] { var values = List(0L, sleepTime.toNanos) override def monotonic(unit: TimeUnit): IO[Long] = { val time = values.head values = values.tail IO.pure(time) } override def realTime(unit: TimeUnit): IO[Long] = ??? } val io = for { ref <- Ref.of[IO, Option[Duration]](None) _ <- IO.sleep(sleepTime).time(d => ref.set(Some(d))) result <- ref.get } yield assert(result.isDefined && result.get.toMillis === sleepTime.toMillis) io.unsafeToFuture() } }
Example 3
Source File: Generators.scala From gospeak with Apache License 2.0 | 5 votes |
package gospeak.libs.scala.testingutils import gospeak.libs.scala.domain import gospeak.libs.scala.domain.TimePeriod import gospeak.libs.scala.domain.TimePeriod.PeriodUnit import org.scalacheck.{Arbitrary, Gen} import scala.concurrent.duration.{FiniteDuration, TimeUnit} import scala.util.Try object Generators { private def buildDuration(length: Long, unit: TimeUnit): FiniteDuration = Try(new FiniteDuration(length, unit)).getOrElse(buildDuration(length / 2, unit)) implicit val aFiniteDuration: Arbitrary[FiniteDuration] = Arbitrary(for { length <- implicitly[Arbitrary[Long]].arbitrary unit <- implicitly[Arbitrary[TimeUnit]].arbitrary } yield buildDuration(length, unit)) implicit val aPeriodUnit: Arbitrary[PeriodUnit] = Arbitrary(Gen.oneOf(PeriodUnit.all)) implicit val aTimePeriod: Arbitrary[TimePeriod] = Arbitrary(for { length <- implicitly[Arbitrary[Long]].arbitrary unit <- implicitly[Arbitrary[PeriodUnit]].arbitrary } yield domain.TimePeriod(length, unit)) }
Example 4
Source File: ResponseTimeRecorder.scala From prometheus-akka-http with MIT License | 5 votes |
package com.lonelyplanet.prometheus import io.prometheus.client.{CollectorRegistry, Histogram} import scala.concurrent.duration import scala.concurrent.duration.{FiniteDuration, TimeUnit} trait ResponseTimeRecorder { def recordResponseTime(endpoint: String, responseTime: FiniteDuration): Unit } class PrometheusResponseTimeRecorder( metricName: String, metricHelp: String, buckets: List[Double], endpointLabelName: String, registry: CollectorRegistry, timeUnit: TimeUnit) extends ResponseTimeRecorder { private val responseTimes = buildHistogram.register(registry) override def recordResponseTime(endpoint: String, responseTime: FiniteDuration): Unit = { responseTimes.labels(endpoint).observe(responseTime.toUnit(timeUnit)) } private def buildHistogram = Histogram .build() .name(metricName) .help(metricHelp) .labelNames(endpointLabelName) .buckets(buckets: _*) } object PrometheusResponseTimeRecorder { val DefaultBuckets = List(.01, .025, .05, .075, .10, .125, .15, .175, .20, .225, .25, .275, .30, .325, .35, .40, .45, .50, .60, .70, 1.0, 2.0, 3.0, 5.0, 10.0) val DefaultMetricName = "request_processing_seconds" val DefaultMetricHelp = "Time spent processing request" val DefaultEndpointLabel = "endpoint" val DefaultTimeUnit = duration.SECONDS lazy val DefaultRegistry = CollectorRegistry.defaultRegistry lazy val Default = { new PrometheusResponseTimeRecorder( DefaultMetricName, DefaultMetricHelp, DefaultBuckets, DefaultEndpointLabel, DefaultRegistry, DefaultTimeUnit) } } class NoOpResponseTimeRecorder extends ResponseTimeRecorder { def recordResponseTime(endpoint: String, responseTime: FiniteDuration): Unit = () }
Example 5
Source File: LoggerMonoidSpec.scala From odin with Apache License 2.0 | 5 votes |
package io.odin.loggers import java.util.UUID import cats.data.WriterT import cats.effect.{Clock, IO, Timer} import cats.instances.list._ import cats.instances.tuple._ import cats.instances.unit._ import cats.instances.uuid._ import cats.kernel.laws.discipline.MonoidTests import cats.syntax.all._ import io.odin.{Level, Logger, LoggerMessage, OdinSpec} import org.scalacheck.{Arbitrary, Gen} import scala.concurrent.duration.{FiniteDuration, TimeUnit} class LoggerMonoidSpec extends OdinSpec { type F[A] = WriterT[IO, List[(UUID, LoggerMessage)], A] checkAll("Logger", MonoidTests[Logger[F]].monoid) it should "(logger1 |+| logger2).log <-> (logger1.log |+| logger2.log)" in { forAll { (uuid1: UUID, uuid2: UUID, msg: LoggerMessage) => val logger1: Logger[F] = NamedLogger(uuid1) val logger2: Logger[F] = NamedLogger(uuid2) val a = (logger1 |+| logger2).log(msg) val b = logger1.log(msg) |+| logger2.log(msg) a.written.unsafeRunSync() shouldBe b.written.unsafeRunSync() } } it should "(logger1 |+| logger2).log(list) <-> (logger1.log |+| logger2.log(list))" in { forAll { (uuid1: UUID, uuid2: UUID, msg: List[LoggerMessage]) => val logger1: Logger[F] = NamedLogger(uuid1) val logger2: Logger[F] = NamedLogger(uuid2) val a = (logger1 |+| logger2).log(msg) val b = logger1.log(msg) |+| logger2.log(msg) a.written.unsafeRunSync() shouldBe b.written.unsafeRunSync() } } it should "set minimal level for underlying loggers" in { forAll { (uuid1: UUID, uuid2: UUID, level: Level, msg: List[LoggerMessage]) => val logger1: Logger[F] = NamedLogger(uuid1) val logger2: Logger[F] = NamedLogger(uuid2) val a = (logger1 |+| logger2).withMinimalLevel(level).log(msg) val b = (logger1.withMinimalLevel(level) |+| logger2.withMinimalLevel(level)).log(msg) a.written.unsafeRunSync() shouldBe b.written.unsafeRunSync() } } case class NamedLogger(loggerId: UUID) extends DefaultLogger[F] { def log(msg: LoggerMessage): F[Unit] = WriterT.tell(List(loggerId -> msg)) } implicit def timer: Timer[IO] = new Timer[IO] { def clock: Clock[IO] = new Clock[IO] { def realTime(unit: TimeUnit): IO[Long] = IO.pure(0) def monotonic(unit: TimeUnit): IO[Long] = IO.pure(0) } def sleep(duration: FiniteDuration): IO[Unit] = ??? } implicit def arbitraryWriterLogger: Arbitrary[Logger[F]] = Arbitrary( Gen.uuid.map(NamedLogger) ) }
Example 6
Source File: LoggerNatTransformSpec.scala From odin with Apache License 2.0 | 5 votes |
package io.odin.loggers import cats.data.{Writer, WriterT} import cats.effect.{Clock, IO, Timer} import cats.{~>, Id} import io.odin.{Level, Logger, LoggerMessage, OdinSpec} import scala.concurrent.duration.{FiniteDuration, TimeUnit} class LoggerNatTransformSpec extends OdinSpec { type F[A] = Writer[List[LoggerMessage], A] type FF[A] = WriterT[IO, List[LoggerMessage], A] it should "transform each method" in { forAll { (msg: String, ctx: Map[String, String], throwable: Throwable, timestamp: Long) => implicit val clk: Timer[Id] = clock(timestamp) val logF = logger.withMinimalLevel(Level.Trace) val logFF = logF.mapK(nat).withMinimalLevel(Level.Trace) check(logF.trace(msg), logFF.trace(msg)) check(logF.trace(msg, throwable), logFF.trace(msg, throwable)) check(logF.trace(msg, ctx), logFF.trace(msg, ctx)) check(logF.trace(msg, ctx, throwable), logFF.trace(msg, ctx, throwable)) check(logF.debug(msg), logFF.debug(msg)) check(logF.debug(msg, throwable), logFF.debug(msg, throwable)) check(logF.debug(msg, ctx), logFF.debug(msg, ctx)) check(logF.debug(msg, ctx, throwable), logFF.debug(msg, ctx, throwable)) check(logF.info(msg), logFF.info(msg)) check(logF.info(msg, throwable), logFF.info(msg, throwable)) check(logF.info(msg, ctx), logFF.info(msg, ctx)) check(logF.info(msg, ctx, throwable), logFF.info(msg, ctx, throwable)) check(logF.warn(msg), logFF.warn(msg)) check(logF.warn(msg, throwable), logFF.warn(msg, throwable)) check(logF.warn(msg, ctx), logFF.warn(msg, ctx)) check(logF.warn(msg, ctx, throwable), logFF.warn(msg, ctx, throwable)) check(logF.error(msg), logFF.error(msg)) check(logF.error(msg, throwable), logFF.error(msg, throwable)) check(logF.error(msg, ctx), logFF.error(msg, ctx)) check(logF.error(msg, ctx, throwable), logFF.error(msg, ctx, throwable)) } } private val nat: F ~> FF = new (F ~> FF) { private val idToIo = new (Id ~> IO) { def apply[A](fa: Id[A]): IO[A] = IO.pure(fa) } def apply[A](fa: F[A]): FF[A] = fa.mapK(idToIo) } private def clock(timestamp: Long): Timer[Id] = new Timer[Id] { def clock: Clock[Id] = new Clock[Id] { def realTime(unit: TimeUnit): Id[Long] = timestamp def monotonic(unit: TimeUnit): Id[Long] = timestamp } def sleep(duration: FiniteDuration): Id[Unit] = ??? } private def logger(implicit timer: Timer[Id]): Logger[F] = new WriterTLogger[Id] private def check(fnF: => F[Unit], fnFF: => FF[Unit]) = { val List(loggerMessageF) = fnF.written val List(loggerMessageFF) = fnFF.written.unsafeRunSync() loggerMessageEq.eqv(loggerMessageF, loggerMessageFF) shouldBe true } }
Example 7
Source File: ContextualLoggerSpec.scala From odin with Apache License 2.0 | 5 votes |
package io.odin.loggers import cats.arrow.FunctionK import cats.data.{ReaderT, WriterT} import cats.effect.{Clock, IO, Timer} import cats.instances.list._ import cats.mtl.instances.all._ import io.odin.syntax._ import io.odin.{LoggerMessage, OdinSpec} import scala.concurrent.duration.{FiniteDuration, TimeUnit} class ContextualLoggerSpec extends OdinSpec { type W[A] = WriterT[IO, List[LoggerMessage], A] type F[A] = ReaderT[W, Map[String, String], A] implicit val hasContext: HasContext[Map[String, String]] = (env: Map[String, String]) => env implicit val timer: Timer[IO] = new Timer[IO] { def clock: Clock[IO] = new Clock[IO] { def realTime(unit: TimeUnit): IO[Long] = IO.pure(0) def monotonic(unit: TimeUnit): IO[Long] = IO.pure(0) } def sleep(duration: FiniteDuration): IO[Unit] = ??? } private val logger = new WriterTLogger[IO].mapK(λ[FunctionK[W, F]](ReaderT.liftF(_))).withContext checkAll("ContContextLogger", LoggerTests[F](logger, reader => reader.run(Map()).written.unsafeRunSync()).all) it should "pick up context from F[_]" in { forAll { (loggerMessage: LoggerMessage, ctx: Map[String, String]) => val List(written) = logger.log(loggerMessage).apply(ctx).written.unsafeRunSync() written.context shouldBe loggerMessage.context ++ ctx } } it should "embed context in all messages" in { forAll { (msgs: List[LoggerMessage], ctx: Map[String, String]) => val written = logger.log(msgs).apply(ctx).written.unsafeRunSync() written.map(_.context) shouldBe msgs.map(_.context ++ ctx) } } }
Example 8
Source File: OdinSpec.scala From odin with Apache License 2.0 | 5 votes |
package io.odin import java.time.LocalDateTime import cats.effect.{Clock, Timer} import cats.{Applicative, Eval} import io.odin.formatter.Formatter import io.odin.meta.Position import org.scalacheck.{Arbitrary, Cogen, Gen} import org.scalatest.flatspec.AnyFlatSpec import org.scalatest.matchers.should.Matchers import org.scalatestplus.scalacheck.{Checkers, ScalaCheckDrivenPropertyChecks} import org.typelevel.discipline.Laws import scala.concurrent.duration.{FiniteDuration, TimeUnit} trait OdinSpec extends AnyFlatSpec with Matchers with Checkers with ScalaCheckDrivenPropertyChecks with EqInstances { def checkAll(name: String, ruleSet: Laws#RuleSet): Unit = { for ((id, prop) <- ruleSet.all.properties) it should (name + "." + id) in { check(prop) } } def zeroTimer[F[_]](implicit F: Applicative[F]): Timer[F] = new Timer[F] { def clock: Clock[F] = new Clock[F] { def realTime(unit: TimeUnit): F[Long] = F.pure(0L) def monotonic(unit: TimeUnit): F[Long] = F.pure(0L) } def sleep(duration: FiniteDuration): F[Unit] = ??? } val lineSeparator: String = System.lineSeparator() val nonEmptyStringGen: Gen[String] = Gen.nonEmptyListOf(Gen.alphaNumChar).map(_.mkString) val levelGen: Gen[Level] = Gen.oneOf(Level.Trace, Level.Debug, Level.Info, Level.Warn, Level.Error) implicit val levelArbitrary: Arbitrary[Level] = Arbitrary(levelGen) val positionGen: Gen[Position] = for { fileName <- nonEmptyStringGen enclosureName <- Gen.uuid.map(_.toString) packageName <- nonEmptyStringGen line <- Gen.posNum[Int] } yield { Position(fileName, enclosureName, packageName, line) } implicit val positionArbitrary: Arbitrary[Position] = Arbitrary(positionGen) val loggerMessageGen: Gen[LoggerMessage] = { val startTime = System.currentTimeMillis() for { level <- levelGen msg <- Gen.alphaNumStr context <- Gen.mapOfN(20, nonEmptyStringGen.flatMap(key => nonEmptyStringGen.map(key -> _))) exception <- Gen.option(Arbitrary.arbitrary[Throwable]) position <- positionGen threadName <- nonEmptyStringGen timestamp <- Gen.choose(0, startTime) } yield { LoggerMessage( level = level, message = Eval.now(msg), context = context, exception = exception, position = position, threadName = threadName, timestamp = timestamp ) } } implicit val loggerMessageArbitrary: Arbitrary[LoggerMessage] = Arbitrary(loggerMessageGen) implicit val cogenLoggerMessage: Cogen[LoggerMessage] = Cogen[LoggerMessage]((msg: LoggerMessage) => msg.level.hashCode().toLong + msg.message.value.hashCode().toLong) val formatterGen: Gen[Formatter] = Gen.oneOf(Formatter.default, Formatter.colorful) implicit val formatterArbitrary: Arbitrary[Formatter] = Arbitrary(formatterGen) val localDateTimeGen: Gen[LocalDateTime] = for { year <- Gen.choose(0, LocalDateTime.now().getYear) month <- Gen.choose(1, 12) day <- Gen.choose(1, 28) hour <- Gen.choose(0, 23) minute <- Gen.choose(0, 59) second <- Gen.choose(0, 59) } yield { LocalDateTime.of(year, month, day, hour, minute, second) } implicit val localDateTimeArbitrary: Arbitrary[LocalDateTime] = Arbitrary(localDateTimeGen) }
Example 9
Source File: TaskWatchService.scala From monix-nio with Apache License 2.0 | 5 votes |
package monix.nio.file import java.nio.file.WatchEvent.Kind import java.nio.file.{ Path, WatchKey } import monix.eval.Task import monix.execution.{ Callback, Scheduler } import scala.concurrent.duration.TimeUnit abstract class TaskWatchService { protected val watchService: WatchService def poll(timeout: Long, timeUnit: TimeUnit): Task[Option[WatchKey]] = Task.create { (scheduler, cb) => watchService.poll(timeout, timeUnit, Callback.forked(cb)(scheduler)) } def poll(): Task[Option[WatchKey]] = Task.create { (scheduler, cb) => watchService.poll(Callback.forked(cb)(scheduler)) } def take(): Task[WatchKey] = Task.create { (scheduler, cb) => watchService.take(Callback.forked(cb)(scheduler)) } def close(): Task[Unit] = Task.now(watchService.close()) } object TaskWatchService { def apply(path: Path, events: Kind[_]*)(implicit s: Scheduler): TaskWatchService = { new TaskWatchService { override val watchService: WatchService = WatchService.apply(path, events: _*) } } }
Example 10
Source File: DWSClientConfig.scala From Linkis with Apache License 2.0 | 5 votes |
package com.webank.wedatasphere.linkis.httpclient.dws.config import com.webank.wedatasphere.linkis.common.utils.RetryHandler import com.webank.wedatasphere.linkis.httpclient.authentication.AuthenticationStrategy import com.webank.wedatasphere.linkis.httpclient.config.ClientConfig import com.webank.wedatasphere.linkis.httpclient.loadbalancer.LoadBalancerStrategy import scala.concurrent.duration.TimeUnit class DWSClientConfig private[config](serverUrl: String, discoveryEnabled: Boolean, discoveryPeriod: Long, discoveryTimeUnit: TimeUnit, loadbalancerEnabled: Boolean, loadbalancerStrategy: LoadBalancerStrategy, authenticationStrategy: AuthenticationStrategy, connectTimeout: Long, readTimeout: Long, maxConnection: Int, retryEnabled: Boolean, retryHandler: RetryHandler, authTokenKey: String, authTokenValue: String) extends ClientConfig(serverUrl, discoveryEnabled, discoveryPeriod, discoveryTimeUnit, loadbalancerEnabled, loadbalancerStrategy, authenticationStrategy, connectTimeout, readTimeout, maxConnection, retryEnabled, retryHandler, authTokenKey, authTokenValue) { def this(clientConfig: ClientConfig) = this(clientConfig.getServerUrl, clientConfig.isDiscoveryEnabled, clientConfig.getDiscoveryPeriod, clientConfig.getDiscoveryTimeUnit, clientConfig.isLoadbalancerEnabled, clientConfig.getLoadbalancerStrategy, clientConfig.getAuthenticationStrategy, clientConfig.getConnectTimeout, clientConfig.getReadTimeout, clientConfig.getMaxConnection, clientConfig.isRetryEnabled, clientConfig.getRetryHandler, clientConfig.getAuthTokenKey, clientConfig.getAuthTokenValue) private var dwsVersion: String = _ def setDWSVersion(dwsVersion: String): Unit = this.dwsVersion = dwsVersion def getDWSVersion: String = dwsVersion }
Example 11
Source File: AsyncWatchServiceObservable.scala From monix-nio with Apache License 2.0 | 5 votes |
package monix.nio.file import java.nio.file.WatchKey import monix.eval.Task import monix.nio.WatchServiceObservable import scala.concurrent.duration.TimeUnit final class AsyncWatchServiceObservable(taskWatchService: TaskWatchService) extends WatchServiceObservable { override def watchService = Option { asyncWatchServiceWrapper(taskWatchService) } private[file] def asyncWatchServiceWrapper(taskWatchService: TaskWatchService) = new monix.nio.WatchService { override def poll(timeout: Long, timeUnit: TimeUnit): Task[Option[WatchKey]] = taskWatchService.poll(timeout, timeUnit) override def poll(): Task[Option[WatchKey]] = taskWatchService.poll() override def take(): Task[WatchKey] = taskWatchService.take() override def close(): Task[Unit] = taskWatchService.close() } }
Example 12
Source File: S3Repository.scala From mleap with Apache License 2.0 | 5 votes |
package ml.combust.mleap.repository.s3 import java.net.URI import java.nio.file.{Files, Path} import java.util.concurrent.Executors import akka.actor.ActorSystem import com.amazonaws.services.s3.{AmazonS3ClientBuilder, AmazonS3URI} import com.typesafe.config.Config import ml.combust.mleap.executor.repository.{Repository, RepositoryProvider} import scala.concurrent.{ExecutionContext, Future} import scala.concurrent.duration.TimeUnit import scala.util.Try class S3RepositoryConfig(config: Config) { val threads: Int = config.getInt("threads") } class S3Repository(config: S3RepositoryConfig) extends Repository { private val client = AmazonS3ClientBuilder.defaultClient() private val threadPool = Executors.newFixedThreadPool(config.threads) implicit val diskEc: ExecutionContext = ExecutionContext.fromExecutor(threadPool) override def downloadBundle(uri: URI): Future[Path] = Future { val s3Uri = new AmazonS3URI(uri) val bucket = s3Uri.getBucket val key = s3Uri.getKey val tmpFile = Files.createTempFile("mleap", ".bundle.zip") Files.copy(client.getObject(bucket, key).getObjectContent, tmpFile) tmpFile } override def canHandle(uri: URI): Boolean = Try(new AmazonS3URI(uri)).isSuccess override def shutdown(): Unit = threadPool.shutdown() override def awaitTermination(timeout: Long, unit: TimeUnit): Unit = threadPool.awaitTermination(timeout, unit) } class S3RepositoryProvider extends RepositoryProvider { override def create(config: Config) (implicit system: ActorSystem): S3Repository = { new S3Repository(new S3RepositoryConfig(config)) } }
Example 13
Source File: FileRepository.scala From mleap with Apache License 2.0 | 5 votes |
package ml.combust.mleap.executor.repository import java.io.File import java.net.URI import java.nio.file.{Files, Path, StandardCopyOption} import java.util.concurrent.Executors import akka.actor.ActorSystem import com.typesafe.config.{Config, ConfigFactory} import ml.combust.mleap.executor.error.BundleException import scala.concurrent.duration.TimeUnit import scala.concurrent.{ExecutionContext, Future} object FileRepositoryConfig { val defaults: Config = ConfigFactory.load().getConfig("ml.combust.mleap.executor.repository-defaults.file") } class FileRepositoryConfig(_config: Config) { val config: Config = _config.withFallback(FileRepositoryConfig.defaults) val move: Boolean = config.getBoolean("move") val threads: Int = config.getInt("threads") } class FileRepository(config: FileRepositoryConfig) extends Repository { private val threadPool = Executors.newFixedThreadPool(config.threads) implicit val diskEc: ExecutionContext = ExecutionContext.fromExecutor(threadPool) def this() = this(new FileRepositoryConfig(FileRepositoryConfig.defaults)) override def downloadBundle(uri: URI): Future[Path] = Future { if (uri.getPath.isEmpty) { throw new BundleException("file path cannot be empty") } val local = new File(uri.getPath).toPath if (!Files.exists(local)) { throw new BundleException(s"file does not exist $local") } if (config.move) { val tmpFile = Files.createTempFile("mleap", ".bundle.zip") Files.copy(local, tmpFile, StandardCopyOption.REPLACE_EXISTING) tmpFile.toFile.deleteOnExit() tmpFile } else { local } } override def canHandle(uri: URI): Boolean = uri.getScheme == "file" || uri.getScheme == "jar:file" override def shutdown(): Unit = threadPool.shutdown() override def awaitTermination(timeout: Long, unit: TimeUnit): Unit = threadPool.awaitTermination(timeout, unit) } object FileRepositoryProvider extends RepositoryProvider { override def create(tConfig: Config) (implicit system: ActorSystem): Repository = { val config = new FileRepositoryConfig(tConfig) new FileRepository(config) } }
Example 14
Source File: HttpRepository.scala From mleap with Apache License 2.0 | 5 votes |
package ml.combust.mleap.executor.repository import java.net.URI import java.nio.file.{Files, Path} import java.util.concurrent.Executors import akka.actor.ActorSystem import com.typesafe.config.{Config, ConfigFactory} import scala.concurrent.{ExecutionContext, Future} import scala.concurrent.duration.TimeUnit object HttpRepositoryConfig { val defaults: Config = ConfigFactory.load().getConfig("ml.combust.mleap.executor.repository-defaults.http") } class HttpRepositoryConfig(_config: Config) { val config: Config = _config.withFallback(FileRepositoryConfig.defaults) val threads: Int = config.getInt("threads") } class HttpRepository(config: HttpRepositoryConfig) extends Repository { private val threadPool = Executors.newFixedThreadPool(config.threads) implicit val diskEc: ExecutionContext = ExecutionContext.fromExecutor(threadPool) override def downloadBundle(uri: URI): Future[Path] = Future { val tmpFile = Files.createTempFile("mleap", ".bundle.zip") Files.copy(uri.toURL.openStream(), tmpFile) tmpFile } override def canHandle(uri: URI): Boolean = uri.getScheme == "http" || uri.getScheme == "https" override def shutdown(): Unit = threadPool.shutdown() override def awaitTermination(timeout: Long, unit: TimeUnit): Unit = threadPool.awaitTermination(timeout, unit) } object HttpRepositoryProvider extends RepositoryProvider { override def create(config: Config) (implicit system: ActorSystem): HttpRepository = { new HttpRepository(new HttpRepositoryConfig(config)) } }
Example 15
Source File: TimeFormat.scala From flint with Apache License 2.0 | 5 votes |
package com.twosigma.flint.timeseries.time import java.util.concurrent.TimeUnit import org.joda.time.format.{ DateTimeFormat, DateTimeFormatter, ISODateTimeFormat } import org.joda.time.{ DateTime, DateTimeZone } import scala.concurrent.duration.TimeUnit import scala.util.Try object TimeFormat { protected[flint] def parseNano(text: String, timeZone: DateTimeZone = DateTimeZone.UTC): Long = parse(text, timeZone, timeUnit = TimeUnit.NANOSECONDS) private val formatters: List[DateTimeFormatter] = List( // Double `HH` formatter DateTimeFormat.forPattern("yyyyMMdd HH:mm:ss.SSS Z"), DateTimeFormat.forPattern("yyyyMMdd HH:mm:ss Z"), DateTimeFormat.forPattern("yyyyMMdd HH:mm Z"), DateTimeFormat.forPattern("yyyyMMdd HH:mm:ss.SSS"), DateTimeFormat.forPattern("yyyyMMdd HH:mm:ss"), DateTimeFormat.forPattern("yyyyMMdd HH:mm"), DateTimeFormat.forPattern("yyyyMMdd"), DateTimeFormat.forPattern("yyyy-MM-dd HH:mm:ss.SSS Z"), DateTimeFormat.forPattern("yyyy-MM-dd HH:mm:ss Z"), DateTimeFormat.forPattern("yyyy-MM-dd HH:mm Z"), DateTimeFormat.forPattern("yyyy-MM-dd HH:mm:ss.SSS"), DateTimeFormat.forPattern("yyyy-MM-dd HH:mm:ss"), DateTimeFormat.forPattern("yyyy-MM-dd HH:mm"), DateTimeFormat.forPattern("yyyy-MM-dd"), // Single `H` formatter DateTimeFormat.forPattern("yyyyMMdd H:mm:ss.SSS"), DateTimeFormat.forPattern("yyyyMMdd H:mm:ss.SSS Z"), DateTimeFormat.forPattern("yyyy-MM-dd H:mm:ss.SSS"), DateTimeFormat.forPattern("yyyy-MM-dd H:mm:ss.SSS Z"), // ISO DateTime ISODateTimeFormat.dateTimeParser() ) }
Example 16
Source File: ClientConfig.scala From Linkis with Apache License 2.0 | 5 votes |
package com.webank.wedatasphere.linkis.httpclient.config import com.webank.wedatasphere.linkis.common.utils.RetryHandler import com.webank.wedatasphere.linkis.httpclient.authentication.{AbstractAuthenticationStrategy, AuthenticationStrategy} import com.webank.wedatasphere.linkis.httpclient.loadbalancer.LoadBalancerStrategy import scala.concurrent.duration.TimeUnit class ClientConfig private() { private var serverUrl: String = _ private var discoveryEnabled: Boolean = false private var discoveryPeriod: Long = _ private var discoveryTimeUnit: TimeUnit = _ private var loadbalancerEnabled: Boolean = false private var loadbalancerStrategy: LoadBalancerStrategy = _ private var authenticationStrategy: AuthenticationStrategy = _ private var authTokenKey: String = _ private var authTokenValue: String = _ private var connectTimeout: Long = _ private var readTimeout: Long = _ private var maxConnection: Int = _ private var retryEnabled: Boolean = _ private var retryHandler: RetryHandler = _ protected[config] def this(serverUrl: String, discoveryEnabled: Boolean, discoveryPeriod: Long, discoveryTimeUnit: TimeUnit, loadbalancerEnabled: Boolean, loadbalancerStrategy: LoadBalancerStrategy, authenticationStrategy: AuthenticationStrategy, connectTimeout: Long, readTimeout: Long, maxConnection: Int, retryEnabled: Boolean, retryHandler: RetryHandler, authTokenKey: String, authTokenValue: String) = { this() this.serverUrl = serverUrl this.discoveryEnabled = discoveryEnabled this.discoveryPeriod = discoveryPeriod this.discoveryTimeUnit = discoveryTimeUnit this.loadbalancerEnabled = loadbalancerEnabled this.loadbalancerStrategy = loadbalancerStrategy this.authenticationStrategy = authenticationStrategy this.connectTimeout = connectTimeout this.readTimeout = readTimeout this.maxConnection = maxConnection this.retryEnabled = retryEnabled this.retryHandler = retryHandler this.authTokenKey = authTokenKey this.authTokenValue = authTokenValue authenticationStrategy match { case ab: AbstractAuthenticationStrategy => ab.setClientConfig(this) case _ => } } def getServerUrl = serverUrl def isDiscoveryEnabled = discoveryEnabled def getDiscoveryPeriod = discoveryPeriod def getDiscoveryTimeUnit = discoveryTimeUnit def isLoadbalancerEnabled = loadbalancerEnabled def getLoadbalancerStrategy = loadbalancerStrategy def getAuthenticationStrategy = authenticationStrategy def getAuthTokenKey: String = authTokenKey def getAuthTokenValue: String = authTokenValue def getConnectTimeout = connectTimeout def getReadTimeout = readTimeout def getMaxConnection = maxConnection def isRetryEnabled = retryEnabled def getRetryHandler = retryHandler }
Example 17
Source File: ClientConfigBuilder.scala From Linkis with Apache License 2.0 | 5 votes |
package com.webank.wedatasphere.linkis.httpclient.config import com.webank.wedatasphere.linkis.common.utils.RetryHandler import com.webank.wedatasphere.linkis.httpclient.authentication.AuthenticationStrategy import com.webank.wedatasphere.linkis.httpclient.loadbalancer.LoadBalancerStrategy import scala.concurrent.duration.TimeUnit class ClientConfigBuilder protected() { protected var serverUrl: String = _ protected var discoveryEnabled: Boolean = false protected var discoveryPeriod: Long = _ protected var discoveryTimeUnit: TimeUnit = _ protected var loadbalancerEnabled: Boolean = false protected var loadbalancerStrategy: LoadBalancerStrategy = _ protected var authenticationStrategy: AuthenticationStrategy = _ protected var authTokenKey: String = _ protected var authTokenValue: String = _ protected var connectTimeout: Long = _ protected var readTimeout: Long = _ protected var maxConnection: Int = _ protected var retryEnabled: Boolean = _ protected var retryHandler: RetryHandler = _ def addUJESServerUrl(serverUrl: String): this.type = { this.serverUrl = serverUrl this } def discoveryEnabled(isDiscoveryEnabled: Boolean): this.type = { this.discoveryEnabled = isDiscoveryEnabled this } def discoveryFrequency(period: Long, timeUnit: TimeUnit): this.type = { this.discoveryPeriod = period this.discoveryTimeUnit = timeUnit this } def loadbalancerEnabled(isBalanceEnabled: Boolean): this.type = { this.loadbalancerEnabled = isBalanceEnabled this } def setBalancerStrategy(loadbalancerStrategy: LoadBalancerStrategy): this.type = { this.loadbalancerStrategy = loadbalancerStrategy this } def setAuthenticationStrategy(authenticationStrategy: AuthenticationStrategy): this.type = { this.authenticationStrategy = authenticationStrategy this } def connectionTimeout(connectTimeout: Long): this.type = { this.connectTimeout = connectTimeout this } def readTimeout(readTimeout: Long): this.type = { this.readTimeout = readTimeout this } def maxConnectionSize(maxConnection: Int): this.type = { this.maxConnection = maxConnection this } def retryEnabled(isRetryEnabled: Boolean): this.type = { this.retryEnabled = isRetryEnabled this } def setRetryHandler(retryHandler: RetryHandler): this.type = { this.retryHandler = retryHandler this } def setAuthTokenKey(authTokenKey: String): this.type = { this.authTokenKey = authTokenKey this } def setAuthTokenValue(authTokenValue: String): this.type = { this.authTokenValue = authTokenValue this } def build(): ClientConfig = new ClientConfig(serverUrl, discoveryEnabled, discoveryPeriod, discoveryTimeUnit, loadbalancerEnabled, loadbalancerStrategy, authenticationStrategy, connectTimeout, readTimeout, maxConnection, retryEnabled, retryHandler, authTokenKey, authTokenValue) } object ClientConfigBuilder { def newBuilder(): ClientConfigBuilder = new ClientConfigBuilder }