java.time.Duration Scala Examples
The following examples show how to use java.time.Duration.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: IntegrationTest.scala From kmq with Apache License 2.0 | 6 votes |
package com.softwaremill.kmq.redelivery import java.time.Duration import java.util.Random import akka.actor.ActorSystem import akka.kafka.scaladsl.{Consumer, Producer} import akka.kafka.{ConsumerSettings, ProducerMessage, ProducerSettings, Subscriptions} import akka.stream.ActorMaterializer import akka.testkit.TestKit import com.softwaremill.kmq._ import com.softwaremill.kmq.redelivery.infrastructure.KafkaSpec import org.apache.kafka.clients.consumer.ConsumerConfig import org.apache.kafka.clients.producer.{ProducerConfig, ProducerRecord} import org.apache.kafka.common.serialization.StringDeserializer import org.scalatest.concurrent.Eventually import org.scalatest.time.{Seconds, Span} import org.scalatest.{BeforeAndAfterAll, FlatSpecLike, Matchers} import scala.collection.mutable.ArrayBuffer class IntegrationTest extends TestKit(ActorSystem("test-system")) with FlatSpecLike with KafkaSpec with BeforeAndAfterAll with Eventually with Matchers { implicit val materializer = ActorMaterializer() import system.dispatcher "KMQ" should "resend message if not committed" in { val bootstrapServer = s"localhost:${testKafkaConfig.kafkaPort}" val kmqConfig = new KmqConfig("queue", "markers", "kmq_client", "kmq_redelivery", Duration.ofSeconds(1).toMillis, 1000) val consumerSettings = ConsumerSettings(system, new StringDeserializer, new StringDeserializer) .withBootstrapServers(bootstrapServer) .withGroupId(kmqConfig.getMsgConsumerGroupId) .withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") val markerProducerSettings = ProducerSettings(system, new MarkerKey.MarkerKeySerializer(), new MarkerValue.MarkerValueSerializer()) .withBootstrapServers(bootstrapServer) .withProperty(ProducerConfig.PARTITIONER_CLASS_CONFIG, classOf[ParititionFromMarkerKey].getName) val markerProducer = markerProducerSettings.createKafkaProducer() val random = new Random() lazy val processedMessages = ArrayBuffer[String]() lazy val receivedMessages = ArrayBuffer[String]() val control = Consumer.committableSource(consumerSettings, Subscriptions.topics(kmqConfig.getMsgTopic)) // 1. get messages from topic .map { msg => ProducerMessage.Message( new ProducerRecord[MarkerKey, MarkerValue](kmqConfig.getMarkerTopic, MarkerKey.fromRecord(msg.record), new StartMarker(kmqConfig.getMsgTimeoutMs)), msg) } .via(Producer.flow(markerProducerSettings, markerProducer)) // 2. write the "start" marker .map(_.message.passThrough) .mapAsync(1) { msg => msg.committableOffset.commitScaladsl().map(_ => msg.record) // this should be batched } .map { msg => receivedMessages += msg.value msg } .filter(_ => random.nextInt(5) != 0) .map { processedMessage => processedMessages += processedMessage.value new ProducerRecord[MarkerKey, MarkerValue](kmqConfig.getMarkerTopic, MarkerKey.fromRecord(processedMessage), EndMarker.INSTANCE) } .to(Producer.plainSink(markerProducerSettings, markerProducer)) // 5. write "end" markers .run() val redeliveryHook = RedeliveryTracker.start(new KafkaClients(bootstrapServer), kmqConfig) val messages = (0 to 20).map(_.toString) messages.foreach(msg => sendToKafka(kmqConfig.getMsgTopic,msg)) eventually { receivedMessages.size should be > processedMessages.size processedMessages.sortBy(_.toInt).distinct shouldBe messages }(PatienceConfig(timeout = Span(15, Seconds)), implicitly) redeliveryHook.close() control.shutdown() } override def afterAll(): Unit = { super.afterAll() TestKit.shutdownActorSystem(system) } }
Example 2
Source File: TestConfiguratorMain.scala From ohara with Apache License 2.0 | 5 votes |
package oharastream.ohara.configurator import java.util.concurrent.{Executors, TimeUnit} import oharastream.ohara.common.rule.OharaTest import oharastream.ohara.common.util.{CommonUtils, Releasable} import oharastream.ohara.configurator.Configurator.Mode import org.junit.{After, Test} import org.scalatest.matchers.should.Matchers._ import scala.concurrent.{ExecutionContext, Future} class TestConfiguratorMain extends OharaTest { @Test def illegalK8sUrl(): Unit = intercept[IllegalArgumentException] { Configurator.main(Array[String](Configurator.K8S_KEY, s"http://localhost:${CommonUtils.availablePort()}")) }.getMessage should include("unable to access") @Test def emptyK8sArgument(): Unit = an[IllegalArgumentException] should be thrownBy Configurator.main(Array[String](Configurator.K8S_KEY, "")) @Test def nullK8sArgument(): Unit = an[IllegalArgumentException] should be thrownBy Configurator.main(Array[String](Configurator.K8S_KEY)) @Test def fakeWithK8s(): Unit = an[IllegalArgumentException] should be thrownBy Configurator.main( Array[String](Configurator.K8S_KEY, "http://localhost", Configurator.FAKE_KEY, "true") ) @Test def k8sWithFake(): Unit = an[IllegalArgumentException] should be thrownBy Configurator.main( Array[String](Configurator.FAKE_KEY, "true", Configurator.K8S_KEY, "http://localhost") ) @Test def testFakeMode(): Unit = runMain( Array[String](Configurator.HOSTNAME_KEY, "localhost", Configurator.PORT_KEY, "0", Configurator.FAKE_KEY, "true"), configurator => configurator.mode shouldBe Mode.FAKE ) @Test def testDockerMode(): Unit = runMain( Array[String](Configurator.HOSTNAME_KEY, "localhost", Configurator.PORT_KEY, "0"), configurator => configurator.mode shouldBe Mode.DOCKER ) private[this] def runMain(args: Array[String], action: Configurator => Unit): Unit = { Configurator.GLOBAL_CONFIGURATOR_SHOULD_CLOSE = false val service = ExecutionContext.fromExecutorService(Executors.newSingleThreadExecutor()) Future[Unit](Configurator.main(args))(service) import java.time.Duration try { CommonUtils.await(() => Configurator.GLOBAL_CONFIGURATOR_RUNNING, Duration.ofSeconds(30)) action(Configurator.GLOBAL_CONFIGURATOR) } finally { Configurator.GLOBAL_CONFIGURATOR_SHOULD_CLOSE = true service.shutdownNow() service.awaitTermination(60, TimeUnit.SECONDS) } } @After def tearDown(): Unit = { Configurator.GLOBAL_CONFIGURATOR_SHOULD_CLOSE = false Releasable.close(Configurator.GLOBAL_CONFIGURATOR) Configurator.GLOBAL_CONFIGURATOR == null } }
Example 3
Source File: package.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.participant.state.index import java.time.{Duration, Instant} import akka.NotUsed import akka.stream.scaladsl.Source import com.daml.lf.data.Ref import com.daml.lf.value.Value import com.daml.ledger.api.domain._ package object v2 { object AcsUpdateEvent { final case class Create( transactionId: TransactionId, eventId: EventId, contractId: Value.ContractId, templateId: Ref.Identifier, argument: Value.VersionedValue[Value.ContractId], // TODO(JM,SM): understand witnessing parties stakeholders: Set[Ref.Party], contractKey: Option[Value.VersionedValue[Value.ContractId]], signatories: Set[Ref.Party], observers: Set[Ref.Party], agreementText: String ) } final case class ActiveContractSetSnapshot( takenAt: LedgerOffset.Absolute, activeContracts: Source[(Option[WorkflowId], AcsUpdateEvent.Create), NotUsed]) final case class CommandDeduplicationDuplicate(deduplicateUntil: Instant) extends CommandDeduplicationResult }
Example 4
Source File: SqlLedgerFactory.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.on.sql import java.time.Duration import akka.stream.Materializer import com.daml.caching import com.daml.ledger.participant.state.kvutils.api.KeyValueParticipantState import com.daml.ledger.participant.state.kvutils.app.{ Config, LedgerFactory, ParticipantConfig, ReadWriteService } import com.daml.ledger.participant.state.kvutils.caching._ import com.daml.ledger.participant.state.v1.SeedService import com.daml.lf.engine.Engine import com.daml.logging.LoggingContext import com.daml.platform.configuration.LedgerConfiguration import com.daml.resources.{Resource, ResourceOwner} import scopt.OptionParser import scala.concurrent.ExecutionContext object SqlLedgerFactory extends LedgerFactory[ReadWriteService, ExtraConfig] { override val defaultExtraConfig: ExtraConfig = ExtraConfig( jdbcUrl = None, ) override def ledgerConfig(config: Config[ExtraConfig]): LedgerConfiguration = super.ledgerConfig(config).copy(initialConfigurationSubmitDelay = Duration.ZERO) override def extraConfigParser(parser: OptionParser[Config[ExtraConfig]]): Unit = { parser .opt[String]("jdbc-url") .required() .text("The URL used to connect to the database.") .action((jdbcUrl, config) => config.copy(extra = config.extra.copy(jdbcUrl = Some(jdbcUrl)))) () } override def manipulateConfig(config: Config[ExtraConfig]): Config[ExtraConfig] = config.copy(participants = config.participants.map(_.copy(allowExistingSchemaForIndex = true))) override def readWriteServiceOwner( config: Config[ExtraConfig], participantConfig: ParticipantConfig, engine: Engine, )(implicit materializer: Materializer, logCtx: LoggingContext): ResourceOwner[ReadWriteService] = new Owner(config, participantConfig, engine) class Owner( config: Config[ExtraConfig], participantConfig: ParticipantConfig, engine: Engine, )(implicit materializer: Materializer, logCtx: LoggingContext) extends ResourceOwner[KeyValueParticipantState] { override def acquire()( implicit executionContext: ExecutionContext ): Resource[KeyValueParticipantState] = { val jdbcUrl = config.extra.jdbcUrl.getOrElse { throw new IllegalStateException("No JDBC URL provided.") } val metrics = createMetrics(participantConfig, config) new SqlLedgerReaderWriter.Owner( config.ledgerId, participantConfig.participantId, metrics = metrics, engine, jdbcUrl, stateValueCache = caching.WeightedCache.from( configuration = config.stateValueCache, metrics = metrics.daml.kvutils.submission.validator.stateValueCache, ), seedService = SeedService(config.seeding), resetOnStartup = false ).acquire() .map(readerWriter => new KeyValueParticipantState(readerWriter, readerWriter, metrics)) } } }
Example 5
Source File: LedgerConfiguration.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.configuration import java.time.Duration import com.daml.ledger.participant.state.v1.{Configuration, TimeModel} val defaultRemote: LedgerConfiguration = LedgerConfiguration( initialConfiguration = Configuration( generation = 1, timeModel = TimeModel.reasonableDefault, maxDeduplicationTime = Duration.ofDays(1) ), initialConfigurationSubmitDelay = Duration.ofSeconds(5), configurationLoadTimeout = Duration.ofSeconds(10), ) }
Example 6
Source File: MetricsReporting.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.sandbox.metrics import java.time.Duration import java.util.concurrent.TimeUnit import com.codahale.metrics.Slf4jReporter.LoggingLevel import com.codahale.metrics.jmx.JmxReporter import com.codahale.metrics.{MetricRegistry, Reporter, Slf4jReporter} import com.daml.metrics.{JvmMetricSet, Metrics} import com.daml.platform.configuration.MetricsReporter import com.daml.resources.{Resource, ResourceOwner} import scala.concurrent.{ExecutionContext, Future} final class MetricsReporting( jmxDomain: String, extraMetricsReporter: Option[MetricsReporter], extraMetricsReportingInterval: Duration, ) extends ResourceOwner[Metrics] { def acquire()(implicit executionContext: ExecutionContext): Resource[Metrics] = { val registry = new MetricRegistry registry.registerAll(new JvmMetricSet) for { slf4JReporter <- acquire(newSlf4jReporter(registry)) _ <- acquire(newJmxReporter(registry)) .map(_.start()) _ <- extraMetricsReporter.fold(Resource.unit) { reporter => acquire(reporter.register(registry)) .map(_.start(extraMetricsReportingInterval.getSeconds, TimeUnit.SECONDS)) } // Trigger a report to the SLF4J logger on shutdown. _ <- Resource(Future.successful(slf4JReporter))(reporter => Future.successful(reporter.report())) } yield new Metrics(registry) } private def newJmxReporter(registry: MetricRegistry): JmxReporter = JmxReporter .forRegistry(registry) .inDomain(jmxDomain) .build() private def newSlf4jReporter(registry: MetricRegistry): Slf4jReporter = Slf4jReporter .forRegistry(registry) .convertRatesTo(TimeUnit.SECONDS) .convertDurationsTo(TimeUnit.MILLISECONDS) .withLoggingLevel(LoggingLevel.DEBUG) .build() private def acquire[T <: Reporter](reporter: => T)( implicit executionContext: ExecutionContext ): Resource[T] = ResourceOwner .forCloseable(() => reporter) .acquire() }
Example 7
Source File: SandboxConfig.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.sandbox.config import java.io.File import java.nio.file.Path import java.time.Duration import ch.qos.logback.classic.Level import com.daml.caching.SizedCache import com.daml.ledger.api.auth.AuthService import com.daml.ledger.api.tls.TlsConfiguration import com.daml.ledger.participant.state.v1.SeedService.Seeding import com.daml.platform.common.LedgerIdMode import com.daml.platform.configuration.{CommandConfiguration, LedgerConfiguration, MetricsReporter} import com.daml.platform.services.time.TimeProviderType import com.daml.ports.Port final case class SandboxConfig( address: Option[String], port: Port, portFile: Option[Path], damlPackages: List[File], timeProviderType: Option[TimeProviderType], commandConfig: CommandConfiguration, ledgerConfig: LedgerConfiguration, tlsConfig: Option[TlsConfiguration], scenario: Option[String], implicitPartyAllocation: Boolean, ledgerIdMode: LedgerIdMode, maxInboundMessageSize: Int, jdbcUrl: Option[String], eagerPackageLoading: Boolean, logLevel: Option[Level], authService: Option[AuthService], seeding: Option[Seeding], metricsReporter: Option[MetricsReporter], metricsReportingInterval: Duration, eventsPageSize: Int, lfValueTranslationEventCacheConfiguration: SizedCache.Configuration, lfValueTranslationContractCacheConfiguration: SizedCache.Configuration, profileDir: Option[Path], stackTraces: Boolean, ) object SandboxConfig { val DefaultPort: Port = Port(6865) val DefaultMaxInboundMessageSize: Int = 4 * 1024 * 1024 val DefaultEventsPageSize: Int = 1000 val DefaultTimeProviderType: TimeProviderType = TimeProviderType.WallClock val DefaultLfValueTranslationCacheConfiguration: SizedCache.Configuration = SizedCache.Configuration.none lazy val nextDefault: SandboxConfig = SandboxConfig( address = None, port = DefaultPort, portFile = None, damlPackages = Nil, timeProviderType = None, commandConfig = CommandConfiguration.default, ledgerConfig = LedgerConfiguration.defaultLocalLedger, tlsConfig = None, scenario = None, implicitPartyAllocation = true, ledgerIdMode = LedgerIdMode.Dynamic, maxInboundMessageSize = DefaultMaxInboundMessageSize, jdbcUrl = None, eagerPackageLoading = false, logLevel = None, // the default is in logback.xml authService = None, seeding = Some(Seeding.Strong), metricsReporter = None, metricsReportingInterval = Duration.ofSeconds(10), eventsPageSize = DefaultEventsPageSize, lfValueTranslationEventCacheConfiguration = DefaultLfValueTranslationCacheConfiguration, lfValueTranslationContractCacheConfiguration = DefaultLfValueTranslationCacheConfiguration, profileDir = None, stackTraces = true, ) lazy val default: SandboxConfig = nextDefault.copy( seeding = None, ledgerConfig = LedgerConfiguration.defaultLedgerBackedIndex, ) }
Example 8
Source File: ServiceCallWithMainActorAuthTests.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.sandbox.auth import java.time.Duration import java.util.UUID trait ServiceCallWithMainActorAuthTests extends ServiceCallAuthTests { protected val mainActor: String = UUID.randomUUID.toString private val signedIncorrectly = Option(toHeader(readWriteToken(mainActor), UUID.randomUUID.toString)) it should "deny calls authorized to read/write as the wrong party" in { expectPermissionDenied(serviceCallWithToken(canActAsRandomParty)) } it should "deny calls authorized to read-only as the wrong party" in { expectPermissionDenied(serviceCallWithToken(canReadAsRandomParty)) } it should "deny calls with an invalid signature" in { expectUnauthenticated(serviceCallWithToken(signedIncorrectly)) } protected val canReadAsMainActor = Option(toHeader(readOnlyToken(mainActor))) protected val canReadAsMainActorExpired = Option(toHeader(expiringIn(Duration.ofDays(-1), readOnlyToken(mainActor)))) protected val canReadAsMainActorExpiresTomorrow = Option(toHeader(expiringIn(Duration.ofDays(1), readOnlyToken(mainActor)))) protected val canActAsMainActor = Option(toHeader(readWriteToken(mainActor))) protected val canActAsMainActorExpired = Option(toHeader(expiringIn(Duration.ofDays(-1), readWriteToken(mainActor)))) protected val canActAsMainActorExpiresTomorrow = Option(toHeader(expiringIn(Duration.ofDays(1), readWriteToken(mainActor)))) // Note: lazy val, because the ledger ID is only known after the sandbox start protected lazy val canReadAsMainActorActualLedgerId = Option(toHeader(forLedgerId(unwrappedLedgerId, readOnlyToken(mainActor)))) protected val canReadAsMainActorRandomLedgerId = Option(toHeader(forLedgerId(UUID.randomUUID.toString, readOnlyToken(mainActor)))) protected val canReadAsMainActorActualParticipantId = Option(toHeader(forParticipantId("sandbox-participant", readOnlyToken(mainActor)))) protected val canReadAsMainActorRandomParticipantId = Option(toHeader(forParticipantId(UUID.randomUUID.toString, readOnlyToken(mainActor)))) protected val canReadAsMainActorActualApplicationId = Option(toHeader(forApplicationId(serviceCallName, readOnlyToken(mainActor)))) protected val canReadAsMainActorRandomApplicationId = Option(toHeader(forApplicationId(UUID.randomUUID.toString, readOnlyToken(mainActor)))) // Note: lazy val, because the ledger ID is only known after the sandbox start protected lazy val canActAsMainActorActualLedgerId = Option(toHeader(forLedgerId(unwrappedLedgerId, readWriteToken(mainActor)))) protected val canActAsMainActorRandomLedgerId = Option(toHeader(forLedgerId(UUID.randomUUID.toString, readWriteToken(mainActor)))) protected val canActAsMainActorActualParticipantId = Option(toHeader(forParticipantId("sandbox-participant", readWriteToken(mainActor)))) protected val canActAsMainActorRandomParticipantId = Option(toHeader(forParticipantId(UUID.randomUUID.toString, readWriteToken(mainActor)))) protected val canActAsMainActorActualApplicationId = Option(toHeader(forApplicationId(serviceCallName, readWriteToken(mainActor)))) protected val canActAsMainActorRandomApplicationId = Option(toHeader(forApplicationId(UUID.randomUUID.toString, readWriteToken(mainActor)))) }
Example 9
Source File: ExpiringStreamServiceCallAuthTests.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.sandbox.auth import java.time.Duration import com.daml.grpc.{GrpcException, GrpcStatus} import com.daml.platform.sandbox.services.SubmitAndWaitDummyCommand import com.daml.platform.testing.StreamConsumer import com.daml.timer.Delayed import io.grpc.Status import io.grpc.stub.StreamObserver import scala.concurrent.duration.DurationInt import scala.concurrent.{Future, Promise} import scala.util.control.NonFatal trait ExpiringStreamServiceCallAuthTests[T] extends ReadOnlyServiceCallAuthTests with SubmitAndWaitDummyCommand { protected def stream: Option[String] => StreamObserver[T] => Unit private def expectExpiration(token: String): Future[Unit] = { val promise = Promise[Unit]() stream(Option(token))(new StreamObserver[T] { @volatile private[this] var gotSomething = false def onNext(value: T): Unit = { gotSomething = true } def onError(t: Throwable): Unit = { t match { case GrpcException(GrpcStatus(Status.Code.PERMISSION_DENIED, _), _) if gotSomething => val _ = promise.trySuccess(()) case NonFatal(e) => val _ = promise.tryFailure(e) } } def onCompleted(): Unit = { val _ = promise.tryFailure(new RuntimeException("stream completed before token expiration")) } }) promise.future } private def canActAsMainActorExpiresInFiveSeconds = toHeader(expiringIn(Duration.ofSeconds(5), readWriteToken(mainActor))) private def canReadAsMainActorExpiresInFiveSeconds = toHeader(expiringIn(Duration.ofSeconds(5), readOnlyToken(mainActor))) it should "break a stream in flight upon read-only token expiration" in { val _ = Delayed.Future.by(10.seconds)(submitAndWait()) expectExpiration(canReadAsMainActorExpiresInFiveSeconds).map(_ => succeed) } it should "break a stream in flight upon read/write token expiration" in { val _ = Delayed.Future.by(10.seconds)(submitAndWait()) expectExpiration(canActAsMainActorExpiresInFiveSeconds).map(_ => succeed) } override def serviceCallWithToken(token: Option[String]): Future[Any] = submitAndWait().flatMap(_ => new StreamConsumer[T](stream(token)).first()) }
Example 10
Source File: SandboxFixtureWithAuth.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.sandbox.services import java.time.{Duration, Instant} import java.util.UUID import com.daml.jwt.domain.DecodedJwt import com.daml.jwt.{HMAC256Verifier, JwtSigner} import com.daml.ledger.api.auth.{AuthServiceJWT, AuthServiceJWTCodec, AuthServiceJWTPayload} import com.daml.platform.sandbox.config.SandboxConfig import org.scalatest.Suite import scalaz.syntax.tag.ToTagOps trait SandboxFixtureWithAuth extends SandboxFixture { self: Suite => val emptyToken = AuthServiceJWTPayload( ledgerId = None, participantId = None, applicationId = None, exp = None, admin = false, actAs = Nil, readAs = Nil ) val adminToken: AuthServiceJWTPayload = emptyToken.copy(admin = true) def readOnlyToken(party: String): AuthServiceJWTPayload = emptyToken.copy(readAs = List(party)) def readWriteToken(party: String): AuthServiceJWTPayload = emptyToken.copy(actAs = List(party)) def expiringIn(t: Duration, p: AuthServiceJWTPayload): AuthServiceJWTPayload = p.copy(exp = Option(Instant.now().plusNanos(t.toNanos))) def forLedgerId(id: String, p: AuthServiceJWTPayload): AuthServiceJWTPayload = p.copy(ledgerId = Some(id)) def forParticipantId(id: String, p: AuthServiceJWTPayload): AuthServiceJWTPayload = p.copy(participantId = Some(id)) def forApplicationId(id: String, p: AuthServiceJWTPayload): AuthServiceJWTPayload = p.copy(applicationId = Some(id)) override protected def config: SandboxConfig = super.config.copy( authService = Some( AuthServiceJWT(HMAC256Verifier(jwtSecret) .getOrElse(sys.error("Failed to create HMAC256 verifier"))))) protected lazy val wrappedLedgerId = ledgerId(Some(toHeader(adminToken))) protected lazy val unwrappedLedgerId = wrappedLedgerId.unwrap private val jwtHeader = """{"alg": "HS256", "typ": "JWT"}""" private val jwtSecret = UUID.randomUUID.toString private def signed(payload: AuthServiceJWTPayload, secret: String): String = JwtSigner.HMAC256 .sign(DecodedJwt(jwtHeader, AuthServiceJWTCodec.compactPrint(payload)), secret) .getOrElse(sys.error("Failed to generate token")) .value def toHeader(payload: AuthServiceJWTPayload, secret: String = jwtSecret): String = signed(payload, secret) }
Example 11
Source File: SubmitRequestValidator.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.api.validation import java.time.{Duration, Instant} import com.daml.ledger.api.messages.command.submission import com.daml.ledger.api.v1.command_submission_service.SubmitRequest import com.daml.platform.server.api.validation.FieldValidations.requirePresence import com.daml.platform.server.util.context.TraceContextConversions.toBrave import io.grpc.StatusRuntimeException class SubmitRequestValidator(commandsValidator: CommandsValidator) { def validate( req: SubmitRequest, currentLedgerTime: Instant, currentUtcTime: Instant, maxDeduplicationTime: Option[Duration]) : Either[StatusRuntimeException, submission.SubmitRequest] = for { commands <- requirePresence(req.commands, "commands") validatedCommands <- commandsValidator.validateCommands( commands, currentLedgerTime, currentUtcTime, maxDeduplicationTime) } yield submission.SubmitRequest(validatedCommands, req.traceContext.map(toBrave)) }
Example 12
Source File: SubmitAndWaitRequestValidator.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.api.validation import java.time.{Duration, Instant} import com.daml.ledger.api.messages.command.submission import com.daml.ledger.api.v1.command_service.SubmitAndWaitRequest import com.daml.platform.server.api.validation.FieldValidations.requirePresence import com.daml.platform.server.util.context.TraceContextConversions.toBrave import io.grpc.StatusRuntimeException class SubmitAndWaitRequestValidator(commandsValidator: CommandsValidator) { def validate( req: SubmitAndWaitRequest, currentLedgerTime: Instant, currentUtcTime: Instant, maxDeduplicationTime: Option[Duration]) : Either[StatusRuntimeException, submission.SubmitRequest] = for { commands <- requirePresence(req.commands, "commands") validatedCommands <- commandsValidator.validateCommands( commands, currentLedgerTime, currentUtcTime, maxDeduplicationTime) } yield submission.SubmitRequest(validatedCommands, req.traceContext.map(toBrave)) }
Example 13
Source File: GrpcCommandService.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.server.api.services.grpc import java.time.{Duration, Instant} import com.daml.ledger.api.domain.LedgerId import com.daml.ledger.api.v1.command_service.CommandServiceGrpc.CommandService import com.daml.ledger.api.v1.command_service._ import com.daml.ledger.api.validation.{CommandsValidator, SubmitAndWaitRequestValidator} import com.daml.platform.api.grpc.GrpcApiService import com.daml.dec.DirectExecutionContext import com.daml.platform.server.api.ProxyCloseable import com.google.protobuf.empty.Empty import io.grpc.ServerServiceDefinition import org.slf4j.{Logger, LoggerFactory} import scala.concurrent.Future class GrpcCommandService( protected val service: CommandService with AutoCloseable, val ledgerId: LedgerId, currentLedgerTime: () => Instant, currentUtcTime: () => Instant, maxDeduplicationTime: () => Option[Duration] ) extends CommandService with GrpcApiService with ProxyCloseable { protected val logger: Logger = LoggerFactory.getLogger(CommandService.getClass) private[this] val validator = new SubmitAndWaitRequestValidator(new CommandsValidator(ledgerId)) override def submitAndWait(request: SubmitAndWaitRequest): Future[Empty] = validator .validate(request, currentLedgerTime(), currentUtcTime(), maxDeduplicationTime()) .fold(Future.failed, _ => service.submitAndWait(request)) override def submitAndWaitForTransactionId( request: SubmitAndWaitRequest): Future[SubmitAndWaitForTransactionIdResponse] = validator .validate(request, currentLedgerTime(), currentUtcTime(), maxDeduplicationTime()) .fold(Future.failed, _ => service.submitAndWaitForTransactionId(request)) override def submitAndWaitForTransaction( request: SubmitAndWaitRequest): Future[SubmitAndWaitForTransactionResponse] = validator .validate(request, currentLedgerTime(), currentUtcTime(), maxDeduplicationTime()) .fold(Future.failed, _ => service.submitAndWaitForTransaction(request)) override def submitAndWaitForTransactionTree( request: SubmitAndWaitRequest): Future[SubmitAndWaitForTransactionTreeResponse] = validator .validate(request, currentLedgerTime(), currentUtcTime(), maxDeduplicationTime()) .fold(Future.failed, _ => service.submitAndWaitForTransactionTree(request)) override def bindService(): ServerServiceDefinition = CommandServiceGrpc.bindService(this, DirectExecutionContext) }
Example 14
Source File: GrpcCommandSubmissionService.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.platform.server.api.services.grpc import java.time.{Duration, Instant} import com.daml.dec.DirectExecutionContext import com.daml.ledger.api.domain.LedgerId import com.daml.ledger.api.v1.command_submission_service.CommandSubmissionServiceGrpc.{ CommandSubmissionService => ApiCommandSubmissionService } import com.daml.ledger.api.v1.command_submission_service.{ CommandSubmissionServiceGrpc, SubmitRequest => ApiSubmitRequest } import com.daml.ledger.api.validation.{CommandsValidator, SubmitRequestValidator} import com.daml.metrics.{Metrics, Timed} import com.daml.platform.api.grpc.GrpcApiService import com.daml.platform.server.api.ProxyCloseable import com.daml.platform.server.api.services.domain.CommandSubmissionService import com.google.protobuf.empty.Empty import io.grpc.ServerServiceDefinition import org.slf4j.{Logger, LoggerFactory} import scala.concurrent.Future class GrpcCommandSubmissionService( override protected val service: CommandSubmissionService with AutoCloseable, ledgerId: LedgerId, currentLedgerTime: () => Instant, currentUtcTime: () => Instant, maxDeduplicationTime: () => Option[Duration], metrics: Metrics, ) extends ApiCommandSubmissionService with ProxyCloseable with GrpcApiService { protected val logger: Logger = LoggerFactory.getLogger(ApiCommandSubmissionService.getClass) private val validator = new SubmitRequestValidator(new CommandsValidator(ledgerId)) override def submit(request: ApiSubmitRequest): Future[Empty] = Timed.future( metrics.daml.commands.submissions, Timed .value( metrics.daml.commands.validation, validator .validate(request, currentLedgerTime(), currentUtcTime(), maxDeduplicationTime())) .fold( Future.failed, service.submit(_).map(_ => Empty.defaultInstance)(DirectExecutionContext)) ) override def bindService(): ServerServiceDefinition = CommandSubmissionServiceGrpc.bindService(this, DirectExecutionContext) }
Example 15
Source File: TimeModel.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.participant.state.v1 import java.time.{Duration, Instant} import scala.util.Try val reasonableDefault: TimeModel = TimeModel( avgTransactionLatency = Duration.ofSeconds(0L), minSkew = Duration.ofSeconds(30L), maxSkew = Duration.ofSeconds(30L), ).get def apply(avgTransactionLatency: Duration, minSkew: Duration, maxSkew: Duration): Try[TimeModel] = Try { require(!avgTransactionLatency.isNegative, "Negative average transaction latency") require(!minSkew.isNegative, "Negative min skew") require(!maxSkew.isNegative, "Negative max skew") new TimeModel(avgTransactionLatency, minSkew, maxSkew) } }
Example 16
Source File: TestConfig.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.lf.engine.script import java.io.File import java.time.Duration case class TestConfig( darPath: File, ledgerHost: Option[String], ledgerPort: Option[Int], participantConfig: Option[File], timeMode: ScriptTimeMode, commandTtl: Duration, maxInboundMessageSize: Int, ) object TestConfig { private val parser = new scopt.OptionParser[TestConfig]("test-script") { head("test-script") opt[File]("dar") .required() .action((f, c) => c.copy(darPath = f)) .text("Path to the dar file containing the script") opt[String]("ledger-host") .optional() .action((t, c) => c.copy(ledgerHost = Some(t))) .text("Ledger hostname") opt[Int]("ledger-port") .optional() .action((t, c) => c.copy(ledgerPort = Some(t))) .text("Ledger port") opt[File]("participant-config") .optional() .action((t, c) => c.copy(participantConfig = Some(t))) .text("File containing the participant configuration in JSON format") opt[Unit]('w', "wall-clock-time") .action { (t, c) => c.copy(timeMode = ScriptTimeMode.WallClock) } .text("Use wall clock time (UTC). When not provided, static time is used.") opt[Long]("ttl") .action { (t, c) => c.copy(commandTtl = Duration.ofSeconds(t)) } .text("TTL in seconds used for commands emitted by the trigger. Defaults to 30s.") opt[Int]("max-inbound-message-size") .action((x, c) => c.copy(maxInboundMessageSize = x)) .optional() .text( s"Optional max inbound message size in bytes. Defaults to ${RunnerConfig.DefaultMaxInboundMessageSize}") help("help").text("Print this usage text") checkConfig(c => { if (c.ledgerHost.isDefined != c.ledgerPort.isDefined) { failure("Must specify both --ledger-host and --ledger-port") } else if (c.ledgerHost.isDefined && c.participantConfig.isDefined) { failure("Cannot specify both --ledger-host and --participant-config") } else { success } }) } def parse(args: Array[String]): Option[TestConfig] = parser.parse( args, TestConfig( darPath = null, ledgerHost = None, ledgerPort = None, participantConfig = None, timeMode = ScriptTimeMode.Static, commandTtl = Duration.ofSeconds(30L), maxInboundMessageSize = RunnerConfig.DefaultMaxInboundMessageSize, ) ) }
Example 17
Source File: LedgerClientConfig.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.ledger.client.binding.config import java.io.File import java.time.Duration import com.daml.ledger.client.binding.LedgerClientConfigurationError.MalformedTypesafeConfig import com.daml.ledger.client.binding.config.LedgerClientConfig.ClientSslConfig import com.daml.ledger.client.configuration.{ CommandClientConfiguration, LedgerClientConfiguration, LedgerIdRequirement } import com.typesafe.config.{Config, ConfigFactory} import io.grpc.netty.GrpcSslContexts import io.netty.handler.ssl.SslContext import pureconfig._ import scala.util.Try case class LedgerClientConfig( ledgerId: Option[String], commandClient: CommandClientConfiguration, maxRetryTime: Duration, ssl: Option[ClientSslConfig] ) { def toBindingConfig(applicationId: String) = LedgerClientConfiguration( applicationId, ledgerIdRequirement, commandClient, ssl.map(_.sslContext) ) private val ledgerIdRequirement = LedgerIdRequirement(ledgerId) } object LedgerClientConfig { case class ClientSslConfig( clientKeyCertChainFile: File, clientKeyFile: File, trustedCertsFile: File) { def sslContext: SslContext = GrpcSslContexts .forClient() .keyManager(clientKeyCertChainFile, clientKeyFile) .trustManager(trustedCertsFile) .build() } def create(config: Config = ConfigFactory.load()): Try[LedgerClientConfig] = { wrapError(loadConfig[LedgerClientConfig](config, "ledger-client")) } private def wrapError[T]( failuresOrConfig: Either[pureconfig.error.ConfigReaderFailures, T]): Try[T] = { failuresOrConfig.left.map(MalformedTypesafeConfig).toTry } }
Example 18
Source File: package.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.lf.engine import java.time.Duration import java.util.UUID import com.daml.lf.data.Ref.Identifier import com.daml.platform.services.time.TimeProviderType import scala.concurrent.duration.FiniteDuration package object trigger { case class LedgerConfig( host: String, port: Int, timeProvider: TimeProviderType, commandTtl: Duration, maxInboundMessageSize: Int, ) case class TriggerRestartConfig( minRestartInterval: FiniteDuration, maxRestartInterval: FiniteDuration, restartIntervalRandomFactor: Double = 0.2, ) final case class SecretKey(value: String) final case class UserCredentials(token: EncryptedToken) final case class RunningTrigger( triggerInstance: UUID, triggerName: Identifier, credentials: UserCredentials, // TODO(SF, 2020-0610): Add access token field here in the // presence of authentication. ) }
Example 19
Source File: TimeProviderFactory.scala From daml with Apache License 2.0 | 5 votes |
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. // SPDX-License-Identifier: Apache-2.0 package com.daml.navigator.time import java.time.Duration import com.daml.api.util.TimeProvider import com.daml.ledger.client.services.testing.time.StaticTime import scala.concurrent.ExecutionContext case class TimeProviderWithType(time: TimeProvider, `type`: TimeProviderType) object TimeProviderFactory { def apply(timeProviderType: TimeProviderType, ledgerTime: Option[StaticTime])( implicit ec: ExecutionContext): Option[TimeProviderWithType] = timeProviderType match { case TimeProviderType.Auto => ledgerTime.fold( Some(TimeProviderWithType(TimeProvider.UTC, TimeProviderType.WallClock)) )( t => Some(TimeProviderWithType(t, TimeProviderType.Static)) ) case TimeProviderType.WallClock => Some(TimeProviderWithType(TimeProvider.UTC, TimeProviderType.WallClock)) case TimeProviderType.Static => ledgerTime.map(t => TimeProviderWithType(t, TimeProviderType.Static)) case TimeProviderType.Simulated => ledgerTime.map(lt => { val utc: TimeProvider = TimeProvider.UTC val diff = Duration.between(lt.getCurrentTime, utc.getCurrentTime) TimeProviderWithType( TimeProvider.MappedTimeProvider(utc, i => i minus diff), TimeProviderType.Simulated) }) } }
Example 20
Source File: HmacAuthMiddleware.scala From iotchain with MIT License | 5 votes |
package jbok.network.http.server.middleware import java.time.{Duration, Instant} import cats.data.{Kleisli, OptionT} import cats.effect.Sync import jbok.network.http.server.authentication.HMAC import org.http4s.headers.Authorization import org.http4s.util.CaseInsensitiveString import org.http4s.{AuthScheme, Credentials, HttpRoutes, Request, Response, Status} import tsec.mac.jca.{HMACSHA256, MacSigningKey} import scala.concurrent.duration.{FiniteDuration, _} sealed abstract class HmacAuthError(val message: String) extends Exception(message) object HmacAuthError { case object NoAuthHeader extends HmacAuthError("Could not find an Authorization header") case object NoDatetimeHeader extends HmacAuthError("Could not find an X-Datetime header") case object BadMAC extends HmacAuthError("Bad MAC") case object InvalidMacFormat extends HmacAuthError("The MAC is not a valid Base64 string") case object InvalidDatetime extends HmacAuthError("The datetime is not a valid UTC datetime string") case object Timeout extends HmacAuthError("The request time window is closed") } object HmacAuthMiddleware { val defaultDuration: FiniteDuration = 5.minutes private def verifyFromHeader[F[_]]( req: Request[F], key: MacSigningKey[HMACSHA256], duration: FiniteDuration ): Either[HmacAuthError, Unit] = for { authHeader <- req.headers .get(Authorization) .flatMap { t => t.credentials match { case Credentials.Token(scheme, token) if scheme == AuthScheme.Bearer => Some(token) case _ => None } } .toRight(HmacAuthError.NoAuthHeader) datetimeHeader <- req.headers .get(CaseInsensitiveString("X-Datetime")) .toRight(HmacAuthError.NoDatetimeHeader) instant <- HMAC.http.verifyFromHeader( req.method.name, req.uri.renderString, datetimeHeader.value, authHeader, key ) _ <- Either.cond( Instant.now().isBefore(instant.plus(Duration.ofNanos(duration.toNanos))), (), HmacAuthError.Timeout ) } yield () def apply[F[_]: Sync](key: MacSigningKey[HMACSHA256], duration: FiniteDuration = defaultDuration)(routes: HttpRoutes[F]): HttpRoutes[F] = Kleisli { req: Request[F] => verifyFromHeader(req, key, duration) match { case Left(error) => OptionT.some[F](Response[F](Status.Forbidden).withEntity(error.message)) case Right(_) => routes(req) } } }
Example 21
Source File: PostgresInstanceSpec.scala From fuuid with MIT License | 5 votes |
package io.chrisdavenport.fuuid.doobie.postgres import cats.effect.{ContextShift, IO} import cats.implicits._ import doobie._ import doobie.implicits._ import doobie.postgres.implicits._ import doobie.specs2._ import io.chrisdavenport.fuuid.FUUID import io.chrisdavenport.fuuid.doobie.implicits._ import io.chrisdavenport.testcontainersspecs2.ForAllTestContainer import com.dimafeng.testcontainers.GenericContainer import org.testcontainers.containers.wait.strategy.LogMessageWaitStrategy import java.time.Duration import java.time.temporal.ChronoUnit.SECONDS import org.specs2._ import scala.concurrent.ExecutionContext.Implicits.global class PostgresInstanceSpec extends mutable.Specification with IOChecker with ForAllTestContainer { sequential implicit val contextShiftIO: ContextShift[IO] = IO.contextShift(global) override lazy val container = GenericContainer( "postgres", List(5432), Map( "POSTGRES_DB" -> dbName, "POSTGRES_USER" -> dbUserName, "POSTGRES_PASSWORD" -> dbPassword ), waitStrategy = new LogMessageWaitStrategy() .withRegEx(".*database system is ready to accept connections.*\\s") .withTimes(2) .withStartupTimeout(Duration.of(60, SECONDS)) ) lazy val driverName = "org.postgresql.Driver" lazy val jdbcUrl = s"jdbc:postgresql://${container.container.getContainerIpAddress()}:${container.container.getMappedPort(5432)}/${dbName}" lazy val dbUserName = "user" lazy val dbPassword = "password" lazy val dbName = "db" lazy val transactor = Transactor.fromDriverManager[IO]( driverName, jdbcUrl, dbUserName, dbPassword ) override def afterStart(): Unit = { sql""" CREATE TABLE IF NOT EXISTS PostgresInstanceSpec ( id UUID NOT NULL ) """.update.run.transact(transactor).void.unsafeRunSync() } def insertId(fuuid: FUUID): Update0 = { sql"""INSERT into PostgresInstanceSpec (id) VALUES ($fuuid)""".update } val fuuid = FUUID.randomFUUID[IO].unsafeRunSync check(sql"SELECT id from PostgresInstanceSpec".query[FUUID]) check(insertId(fuuid)) }
Example 22
Source File: PostgresTraversalSpec.scala From fuuid with MIT License | 5 votes |
package io.chrisdavenport.fuuid.doobie.postgres import cats.effect.{ContextShift, IO} import cats.implicits._ import doobie._ import doobie.implicits._ import doobie.postgres.implicits._ import io.chrisdavenport.fuuid.doobie.implicits._ import io.chrisdavenport.fuuid._ import io.chrisdavenport.testcontainersspecs2.ForAllTestContainer import com.dimafeng.testcontainers.GenericContainer import org.testcontainers.containers.wait.strategy.LogMessageWaitStrategy import java.time.Duration import java.time.temporal.ChronoUnit.SECONDS import org.specs2._ import scala.concurrent.ExecutionContext.Implicits.global class PostgresTraversalSpec extends mutable.Specification with ScalaCheck with FUUIDArbitraries with ForAllTestContainer { sequential implicit val contextShiftIO: ContextShift[IO] = IO.contextShift(global) override lazy val container = GenericContainer( "postgres", List(5432), Map( "POSTGRES_DB" -> dbName, "POSTGRES_USER" -> dbUserName, "POSTGRES_PASSWORD" -> dbPassword ), waitStrategy = new LogMessageWaitStrategy() .withRegEx(".*database system is ready to accept connections.*\\s") .withTimes(2) .withStartupTimeout(Duration.of(60, SECONDS)) ) lazy val driverName = "org.postgresql.Driver" lazy val jdbcUrl = s"jdbc:postgresql://${container.container.getContainerIpAddress()}:${container.container.getMappedPort(5432)}/${dbName}" lazy val dbUserName = "user" lazy val dbPassword = "password" lazy val dbName = "db" lazy val transactor = Transactor.fromDriverManager[IO]( driverName, jdbcUrl, dbUserName, dbPassword ) // lazy val transactor = Transactor.fromDriverManager[IO]( // "org.postgresql.Driver", // "jdbc:postgresql:world", // "postgres", "" // ) override def afterStart(): Unit = { sql""" CREATE TABLE IF NOT EXISTS PostgresTraversalSpec ( id UUID NOT NULL ) """.update.run.transact(transactor).void.unsafeRunSync() } def queryBy(fuuid: FUUID): Query0[FUUID] = { sql"""SELECT id from PostgresTraversalSpec where id = ${fuuid}""".query[FUUID] } def insertId(fuuid: FUUID): Update0 = { sql"""INSERT into PostgresTraversalSpec (id) VALUES ($fuuid)""".update } "Doobie Postgres Meta" should { "traverse input and then extraction" in prop { fuuid: FUUID => val action = for { _ <- insertId(fuuid).run.transact(transactor) fuuid <- queryBy(fuuid).unique.transact(transactor) } yield fuuid action.unsafeRunSync must_=== fuuid } "fail on a non-present value" in prop { fuuid: FUUID => queryBy(fuuid) .unique .transact(transactor) .attempt .map(_.isLeft) .unsafeRunSync must_=== true } } }
Example 23
Source File: StatusActor.scala From devbox with Apache License 2.0 | 5 votes |
package devbox.logger import java.time.Duration object StatusActor{ sealed trait Msg case class SetIcon(iconName: String, msg: Seq[String]) extends Msg case class Debounce() extends Msg } class StatusActor(setImage: String => Unit, setTooltip: String => Unit) (implicit ac: castor.Context) extends castor.StateMachineActor[StatusActor.Msg]{ def initialState = StatusState( StatusActor.SetIcon("blue-tick", Seq("Devbox initializing")), DebounceIdle(), ) sealed trait DebounceState case class DebounceIdle() extends DebounceState case class DebounceCooldown() extends DebounceState case class DebounceFull(value: StatusActor.SetIcon) extends DebounceState case class StatusState(icon: StatusActor.SetIcon, debounced: DebounceState) extends State{ override def run = { case msg: StatusActor.SetIcon => debounceReceive(msg) case StatusActor.Debounce() => debounced match{ case DebounceFull(n) => statusMsgToState(DebounceIdle(), n) case ds => this.copy(debounced = DebounceIdle()) } } def debounceReceive(statusMsg: StatusActor.SetIcon): State = { if (debounced == DebounceIdle()) { ac.scheduleMsg(StatusActor.this, StatusActor.Debounce(), Duration.ofMillis(100)) statusMsgToState(DebounceCooldown(), statusMsg) } else { StatusState(icon, DebounceFull(statusMsg)) } } def statusMsgToState(newDebounced: DebounceState, statusMsg: StatusActor.SetIcon): StatusState = { setIcon(icon, statusMsg) this.copy(debounced = newDebounced, icon = statusMsg) } } def setIcon(icon: StatusActor.SetIcon, nextIcon: StatusActor.SetIcon) = { if (icon.iconName != nextIcon.iconName) setImage(nextIcon.iconName) if (icon.msg != nextIcon.msg) setTooltip(nextIcon.msg.mkString("\n")) } }
Example 24
Source File: IntervalSpec.scala From chronoscala with MIT License | 5 votes |
package jp.ne.opt.chronoscala import java.time.{Duration, Instant} import org.scalacheck.{Gen, Prop, Properties} object IntervalSpec extends Properties("Interval") with Gens { import Prop.forAll val startEndGen: Gen[(Instant, Instant)] = for { startEpochMillis <- Gen.choose(0L, Long.MaxValue) endEpochMillis <- Gen.choose(startEpochMillis, Long.MaxValue) } yield { val start = Instant.ofEpochMilli(startEpochMillis) val end = Instant.ofEpochMilli(endEpochMillis) (start, end) } property("empty interval") = forAll(instantGen) { instant => Interval(instant, instant).duration == Duration.ZERO } property("contains itself") = forAll(startEndGen) { case (start, end) => val interval = Interval(start, end) interval.contains(interval) } property("contains start and end") = forAll(startEndGen) { case (start, end) => val interval = Interval(start, end) interval.contains(start) && interval.contains(end) } property("contains instant between start and end") = forAll(for { (start, end) <- startEndGen middleMillis <- Gen.choose(start.toEpochMilli, end.toEpochMilli) } yield (start, Instant.ofEpochMilli(middleMillis), end)) { case (start, middle, end) => val interval = Interval(start, end) interval.contains(middle) } }
Example 25
Source File: ExpiringMap.scala From mantis with Apache License 2.0 | 5 votes |
package io.iohk.ethereum.jsonrpc import java.time.temporal.ChronoUnit import java.time.Duration import io.iohk.ethereum.jsonrpc.ExpiringMap.ValueWithDuration import scala.collection.mutable import scala.util.Try object ExpiringMap { case class ValueWithDuration[V](value: V, expiration: Duration) def empty[K, V](defaultElementRetentionTime: Duration): ExpiringMap[K, V] = new ExpiringMap(mutable.Map.empty, defaultElementRetentionTime) } //TODO: Make class thread safe class ExpiringMap[K, V] private (val underlying: mutable.Map[K, ValueWithDuration[V]], val defaultRetentionTime: Duration) { private val maxHoldDuration = ChronoUnit.CENTURIES.getDuration def addFor(k: K, v: V, duration: Duration): ExpiringMap[K, V] = { underlying += k -> ValueWithDuration(v, Try(currentPlus(duration)).getOrElse(currentPlus(maxHoldDuration))) this } def add(k: K, v: V, duration: Duration): ExpiringMap[K, V] = { addFor(k, v, duration) } def addForever(k: K, v: V): ExpiringMap[K, V] = addFor(k, v, maxHoldDuration) def add(k: K, v: V): ExpiringMap[K, V] = addFor(k, v, defaultRetentionTime) def remove(k: K): ExpiringMap[K, V] = { underlying -= k this } def get(k: K): Option[V] = { underlying.get(k).flatMap(value => if (isNotExpired(value)) Some(value.value) else { remove(k) None } ) } private def isNotExpired(value: ValueWithDuration[V]) = currentNanoDuration().minus(value.expiration).isNegative private def currentPlus(duration: Duration) = currentNanoDuration().plus(duration) private def currentNanoDuration() = Duration.ofNanos(System.nanoTime()) }
Example 26
Source File: DateLogicalType.scala From embulk-output-s3_parquet with MIT License | 5 votes |
package org.embulk.output.s3_parquet.parquet import java.time.{Duration, Instant} import org.apache.parquet.io.api.RecordConsumer import org.apache.parquet.schema.{LogicalTypeAnnotation, PrimitiveType, Types} import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName import org.embulk.config.ConfigException import org.embulk.output.s3_parquet.catalog.GlueDataType import org.embulk.spi.`type`.{ BooleanType, DoubleType, JsonType, LongType, StringType, TimestampType } import org.embulk.spi.time.{Timestamp, TimestampFormatter} import org.embulk.spi.Column import org.msgpack.value.Value object DateLogicalType extends ParquetColumnType { override def primitiveType(column: Column): PrimitiveType = { column.getType match { case _: LongType | _: TimestampType => Types .optional(PrimitiveTypeName.INT32) .as(LogicalTypeAnnotation.dateType()) .named(column.getName) case _: BooleanType | _: DoubleType | _: StringType | _: JsonType | _ => throw new ConfigException(s"Unsupported column type: ${column.getName}") } } override def glueDataType(column: Column): GlueDataType = column.getType match { case _: LongType | _: TimestampType => GlueDataType.DATE case _: BooleanType | _: DoubleType | _: StringType | _: JsonType | _ => throw new ConfigException(s"Unsupported column type: ${column.getName}") } override def consumeBoolean(consumer: RecordConsumer, v: Boolean): Unit = throw newUnsupportedMethodException("consumeBoolean") override def consumeString(consumer: RecordConsumer, v: String): Unit = throw newUnsupportedMethodException("consumeString") override def consumeLong(consumer: RecordConsumer, v: Long): Unit = consumeLongAsInteger(consumer, v) override def consumeDouble(consumer: RecordConsumer, v: Double): Unit = throw newUnsupportedMethodException("consumeDouble") override def consumeTimestamp( consumer: RecordConsumer, v: Timestamp, formatter: TimestampFormatter ): Unit = consumeLongAsInteger( consumer, Duration.between(Instant.EPOCH, v.getInstant).toDays ) override def consumeJson(consumer: RecordConsumer, v: Value): Unit = throw newUnsupportedMethodException("consumeJson") }
Example 27
Source File: Music.scala From akka-serialization-test with Apache License 2.0 | 5 votes |
package com.github.dnvriend.domain import java.time.Duration import akka.actor.ActorLogging import akka.event.LoggingReceive import akka.persistence.PersistentActor object Music { type Title = String type Year = Int final case class Song(title: Title, duration: Duration) sealed trait AlbumEvent final case class TitleChanged(title: Title) extends AlbumEvent final case class YearChanged(year: Year) extends AlbumEvent final case class SongAdded(song: Song) extends AlbumEvent final case class SongRemoved(song: Song) extends AlbumEvent sealed trait AlbumCommand final case class ChangeAlbumTitle(title: Title) extends AlbumCommand final case class ChangeAlbumYear(year: Year) extends AlbumCommand final case class AddSong(song: Song) extends AlbumCommand final case class RemoveSong(song: Song) extends AlbumCommand } class Album(val persistenceId: String) extends PersistentActor with ActorLogging { import Music._ var title: Title = _ var year: Year = _ var songs: Set[Song] = Set[Song]() override def receiveRecover: Receive = LoggingReceive { case e: TitleChanged ⇒ handleEvent(e) case e: YearChanged ⇒ handleEvent(e) case e: SongAdded ⇒ handleEvent(e) case e: SongRemoved ⇒ handleEvent(e) } def handleEvent(event: TitleChanged): Unit = { this.title = event.title log.debug(s"[TitleChanged]: Album $persistenceId => title: $title, year: $year songs: $songs") } def handleEvent(event: YearChanged): Unit = { this.year = event.year log.debug(s"[YearChanged]: Album $persistenceId => title: $title, year: $year songs: $songs") } def handleEvent(event: SongAdded): Unit = { this.songs = this.songs + event.song log.debug(s"[SongAdded]: Album $persistenceId => title: $title, year: $year songs: $songs") } def handleEvent(event: SongRemoved): Unit = { this.songs = this.songs - event.song log.debug(s"[SongRemoved]: Album $persistenceId => title: $title, year: $year songs: $songs") } override def receiveCommand: Receive = LoggingReceive { case ChangeAlbumTitle(newTitle) ⇒ persistAll(List(TitleChanged(newTitle))) { e ⇒ handleEvent(e) sender() ! akka.actor.Status.Success("") } case ChangeAlbumYear(newYear) ⇒ persistAll(List(YearChanged(newYear))) { e ⇒ handleEvent(e) sender() ! akka.actor.Status.Success("") } case AddSong(newSong) ⇒ persistAll(List(SongAdded(newSong))) { e ⇒ handleEvent(e) sender() ! akka.actor.Status.Success("") } case RemoveSong(oldSong) ⇒ persistAll(List(SongRemoved(oldSong))) { e ⇒ handleEvent(e) sender() ! akka.actor.Status.Success("") } } override def postStop(): Unit = { log.debug(s"Stopped $persistenceId") super.postStop() } }
Example 28
Source File: SongRemovedSerializerTest.scala From akka-serialization-test with Apache License 2.0 | 5 votes |
package com.github.dnvriend.serializer.avro4s import java.time.Duration import com.github.dnvriend.TestSpec import com.github.dnvriend.domain.Music.{ Song, SongRemoved } class SongRemovedSerializerTest extends TestSpec { "SongRemoved" should "be serialized to a byte array" in { val obj = SongRemoved(Song("Money", Duration.ofSeconds(390))) val serializer = serialization.findSerializerFor(obj) val bytes: Array[Byte] = serializer.toBinary(obj) bytes.toList should not be 'empty } it should "turn a byte array back into an object" in { val obj = SongRemoved(Song("Money", Duration.ofSeconds(390))) val serializer = serialization.findSerializerFor(obj) val bytes = serializer.toBinary(obj) serializer.fromBinary(bytes, Option(obj.getClass)) should matchPattern { case SongRemoved(Song("Money", x: Duration)) ⇒ } } }
Example 29
Source File: SongAddedSerializerTest.scala From akka-serialization-test with Apache License 2.0 | 5 votes |
package com.github.dnvriend.serializer.avro4s import java.time.Duration import com.github.dnvriend.TestSpec import com.github.dnvriend.domain.Music.{ Song, SongAdded } class SongAddedSerializerTest extends TestSpec { "SongAdded" should "be serialized to a byte array" in { val obj = SongAdded(Song("Money", Duration.ofSeconds(390))) val serializer = serialization.findSerializerFor(obj) val bytes: Array[Byte] = serializer.toBinary(obj) bytes.toList should not be 'empty } it should "turn a byte array back into an object" in { val obj = SongAdded(Song("Money", Duration.ofSeconds(390))) val serializer = serialization.findSerializerFor(obj) val bytes = serializer.toBinary(obj) serializer.fromBinary(bytes, Option(obj.getClass)) should matchPattern { case SongAdded(Song("Money", x: Duration)) ⇒ } } }
Example 30
Source File: AlbumTest.scala From akka-serialization-test with Apache License 2.0 | 5 votes |
// package com.github.dnvriend.domain import java.time.Duration import akka.pattern.ask import akka.stream.scaladsl.{ Sink, Source } import com.github.dnvriend.TestSpec import com.github.dnvriend.domain.Music._ import com.github.dnvriend.repository.AlbumRepository class AlbumTest extends TestSpec { "Album" should "register a title" in { val album = AlbumRepository.forId("album-1") val xs = List(ChangeAlbumTitle("Dark side of the Moon")) Source(xs).mapAsync(1)(album ? _).runWith(Sink.ignore).futureValue eventsForPersistenceIdSource("album-1").map(_.event).testProbe { tp ⇒ tp.request(Int.MaxValue) tp.expectNextN(xs.map(cmd ⇒ TitleChanged(cmd.title))) tp.expectComplete() } killActors(album) } it should "update its title and year and songs" in { val album = AlbumRepository.forId("album-2") val xs = List( ChangeAlbumTitle("Dark side of the Moon"), ChangeAlbumYear(1973), AddSong(Song("Money", Duration.ofSeconds(390))), AddSong(Song("Redemption Song", Duration.ofSeconds(227))), RemoveSong(Song("Redemption Song", Duration.ofSeconds(227))) ) val expectedEvents = xs.map { case ChangeAlbumTitle(title) ⇒ TitleChanged(title) case ChangeAlbumYear(year) ⇒ YearChanged(year) case AddSong(song) ⇒ SongAdded(song) case RemoveSong(song) ⇒ SongRemoved(song) } Source(xs).mapAsync(1)(album ? _).runWith(Sink.ignore).futureValue eventsForPersistenceIdSource("album-2").map(_.event).testProbe { tp ⇒ tp.request(Int.MaxValue) tp.expectNextN(expectedEvents) tp.expectComplete() } } }
Example 31
Source File: WavesBlockchainCachingClient.scala From matcher with MIT License | 5 votes |
package com.wavesplatform.dex.grpc.integration.clients import java.net.InetAddress import java.time.Duration import com.wavesplatform.dex.domain.account.Address import com.wavesplatform.dex.domain.asset.Asset import com.wavesplatform.dex.domain.bytes.ByteStr import com.wavesplatform.dex.domain.order.Order import com.wavesplatform.dex.domain.transaction.ExchangeTransaction import com.wavesplatform.dex.domain.utils.ScorexLogging import com.wavesplatform.dex.grpc.integration.caches.{AssetDescriptionsCache, FeaturesCache} import com.wavesplatform.dex.grpc.integration.clients.WavesBlockchainClient.SpendableBalanceChanges import com.wavesplatform.dex.grpc.integration.dto.BriefAssetDescription import monix.execution.Scheduler import monix.reactive.Observable import scala.concurrent.duration.FiniteDuration import scala.concurrent.{ExecutionContext, Future} class WavesBlockchainCachingClient(underlying: WavesBlockchainClient[Future], defaultCacheExpiration: FiniteDuration, monixScheduler: Scheduler)( implicit grpcExecutionContext: ExecutionContext) extends WavesBlockchainClient[Future] with ScorexLogging { private val cacheExpiration: Duration = Duration.ofMillis(defaultCacheExpiration.toMillis) private val featuresCache = new FeaturesCache(underlying.isFeatureActivated, invalidationPredicate = !_) // we don't keep knowledge about unactivated features private val assetDescriptionsCache = new AssetDescriptionsCache(underlying.assetDescription, cacheExpiration) // TODO remove after release 2.1.3 override def spendableBalance(address: Address, asset: Asset): Future[Long] = underlying.spendableBalance(address, asset) override def spendableBalanceChanges: Observable[SpendableBalanceChanges] = underlying.spendableBalanceChanges override def realTimeBalanceChanges: Observable[WavesBlockchainClient.BalanceChanges] = underlying.realTimeBalanceChanges override def spendableBalances(address: Address, assets: Set[Asset]): Future[Map[Asset, Long]] = underlying.spendableBalances(address, assets) override def allAssetsSpendableBalance(address: Address): Future[Map[Asset, Long]] = underlying.allAssetsSpendableBalance(address) override def isFeatureActivated(id: Short): Future[Boolean] = featuresCache.get(id) map Boolean2boolean override def assetDescription(asset: Asset.IssuedAsset): Future[Option[BriefAssetDescription]] = assetDescriptionsCache.get(asset) override def hasScript(asset: Asset.IssuedAsset): Future[Boolean] = underlying.hasScript(asset) override def runScript(asset: Asset.IssuedAsset, input: ExchangeTransaction): Future[RunScriptResult] = underlying.runScript(asset, input) override def hasScript(address: Address): Future[Boolean] = underlying.hasScript(address) override def runScript(address: Address, input: Order): Future[RunScriptResult] = underlying.runScript(address, input) override def wereForged(txIds: Seq[ByteStr]): Future[Map[ByteStr, Boolean]] = underlying.wereForged(txIds) override def broadcastTx(tx: ExchangeTransaction): Future[Boolean] = underlying.broadcastTx(tx) override def forgedOrder(orderId: ByteStr): Future[Boolean] = underlying.forgedOrder(orderId) override def getNodeAddress: Future[InetAddress] = underlying.getNodeAddress override def close(): Future[Unit] = underlying.close() }
Example 32
Source File: BlockchainCache.scala From matcher with MIT License | 5 votes |
package com.wavesplatform.dex.grpc.integration.caches import java.time.Duration import com.google.common.cache.{CacheBuilder, CacheLoader, LoadingCache} import com.wavesplatform.dex.domain.utils.ScorexLogging import scala.concurrent.{ExecutionContext, Future} import scala.util.{Failure, Success} abstract class BlockchainCache[K <: AnyRef, V <: AnyRef](loader: K => Future[V], expiration: Option[Duration], invalidationPredicate: V => Boolean)( implicit ec: ExecutionContext) extends ScorexLogging { lazy private val cache: LoadingCache[K, Future[V]] = { val builder = CacheBuilder.newBuilder expiration .fold(builder)(builder.expireAfterWrite) .build { new CacheLoader[K, Future[V]] { override def load(key: K): Future[V] = loader(key) andThen { case Success(value) if invalidationPredicate(value) => cache.invalidate(key) // value may persist for a little longer than expected due to the fact that all the threads in the EC may be busy case Failure(exception) => log.error(s"Error while value loading occurred: ", exception); cache.invalidate(key) } } } } def get(key: K): Future[V] = cache.get(key) def put(key: K, value: Future[V]): Unit = cache.put(key, value) } object BlockchainCache { def noCustomInvalidationLogic[V](value: V): Boolean = false }
Example 33
Source File: BlockchainCacheSpecification.scala From matcher with MIT License | 5 votes |
package com.wavesplatform.dex.grpc.integration.caches import java.time.Duration import java.util.concurrent.{ConcurrentHashMap, ExecutorService, Executors} import mouse.any.anySyntaxMouse import org.scalatest.BeforeAndAfterAll import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AnyWordSpecLike import scala.collection.JavaConverters._ import scala.concurrent._ class BlockchainCacheSpecification extends AnyWordSpecLike with Matchers with BeforeAndAfterAll { private val executor: ExecutorService = Executors.newCachedThreadPool implicit private val blockingContext: ExecutionContextExecutor = ExecutionContext.fromExecutor(executor) private class BlockchainCacheTest(loader: String => Future[String], expiration: Option[Duration], invalidationPredicate: String => Boolean) extends BlockchainCache[String, String](loader, expiration, invalidationPredicate) private def createCache(loader: String => Future[String], expiration: Option[Duration] = None, invalidationPredicate: String => Boolean = _ => false): BlockchainCacheTest = { new BlockchainCacheTest(loader, expiration, invalidationPredicate) } override def afterAll(): Unit = { super.afterAll() executor.shutdownNow() } private val andThenAwaitTimeout = 300 "BlockchainCache" should { "not keep failed futures" in { val goodKey = "good key" val badKey = "gRPC Error" val keyAccessMap = new ConcurrentHashMap[String, Int] unsafeTap (m => { m.put(goodKey, 0); m.put(badKey, 0) }) val gRPCError = new RuntimeException("gRPC Error occurred") val cache = createCache( key => { (if (key == badKey) Future.failed(gRPCError) else Future.successful(s"value = $key")) unsafeTap { _ => keyAccessMap.computeIfPresent(key, (_, prev) => prev + 1) } } ) val badKeyAccessCount = 10 Await.result( (1 to badKeyAccessCount).foldLeft { Future.successful("") } { (prev, _) => for { _ <- prev _ <- cache get goodKey r <- cache get badKey recover { case _ => "sad" } } yield { Thread.sleep(andThenAwaitTimeout); r } }, scala.concurrent.duration.Duration.Inf ) keyAccessMap.get(goodKey) shouldBe 1 keyAccessMap.get(badKey) should be > 1 } "not keep values according to the predicate" in { val goodKey = "111" val badKey = "222" val keyAccessMap = new ConcurrentHashMap[String, Int](Map(goodKey -> 0, badKey -> 0).asJava) val cache = createCache( key => { keyAccessMap.computeIfPresent(key, (_, prev) => prev + 1); Future.successful(key) }, invalidationPredicate = _.startsWith("2") ) Await.result( (1 to 10).foldLeft { Future.successful("") } { (prev, _) => for { _ <- prev _ <- cache get goodKey r <- cache get badKey } yield blocking { Thread.sleep(andThenAwaitTimeout); r } }, scala.concurrent.duration.Duration.Inf ) keyAccessMap.get(goodKey) shouldBe 1 keyAccessMap.get(badKey) should be > 1 } } }
Example 34
Source File: BigtableTypeSpec.scala From magnolify with Apache License 2.0 | 5 votes |
package magnolify.bigtable.test import java.net.URI import java.time.Duration import cats._ import cats.instances.all._ import com.google.bigtable.v2.Row import com.google.protobuf.ByteString import magnolify.bigtable._ import magnolify.cats.auto._ import magnolify.scalacheck.auto._ import magnolify.shared.CaseMapper import magnolify.test.Simple._ import magnolify.test._ import org.scalacheck._ import scala.reflect._ object BigtableTypeSpec extends MagnolifySpec("BigtableType") { private def test[T: Arbitrary: ClassTag](implicit t: BigtableType[T], eq: Eq[T]): Unit = { val tpe = ensureSerializable(t) property(className[T]) = Prop.forAll { t: T => val mutations = tpe(t, "cf") val row = BigtableType.mutationsToRow(ByteString.EMPTY, mutations) val copy = tpe(row, "cf") val rowCopy = BigtableType.mutationsToRow(ByteString.EMPTY, BigtableType.rowToMutations(row)) Prop.all( eq.eqv(t, copy), row == rowCopy ) } } test[Numbers] test[Required] test[Nullable] test[BigtableNested] { implicit val arbByteString: Arbitrary[ByteString] = Arbitrary(Gen.alphaNumStr.map(ByteString.copyFromUtf8)) implicit val eqByteString: Eq[ByteString] = Eq.instance(_ == _) implicit val eqByteArray: Eq[Array[Byte]] = Eq.by(_.toList) test[BigtableTypes] } { import Custom._ implicit val btfUri: BigtableField[URI] = BigtableField.from[String](x => URI.create(x))(_.toString) implicit val btfDuration: BigtableField[Duration] = BigtableField.from[Long](Duration.ofMillis)(_.toMillis) test[Custom] } { val it = BigtableType[DefaultInner] ensureSerializable(it) require(it(Row.getDefaultInstance, "cf") == DefaultInner()) val inner = DefaultInner(2, Some(2)) require(it(BigtableType.mutationsToRow(ByteString.EMPTY, it(inner, "cf")), "cf") == inner) val ot = BigtableType[DefaultOuter] ensureSerializable(ot) require(ot(Row.getDefaultInstance, "cf") == DefaultOuter()) val outer = DefaultOuter(DefaultInner(3, Some(3)), Some(DefaultInner(3, Some(3)))) require(ot(BigtableType.mutationsToRow(ByteString.EMPTY, ot(outer, "cf")), "cf") == outer) } { implicit val bt = BigtableType[LowerCamel](CaseMapper(_.toUpperCase)) test[LowerCamel] val fields = LowerCamel.fields .map(_.toUpperCase) .map(l => if (l == "INNERFIELD") "INNERFIELD.INNERFIRST" else l) val record = bt(LowerCamel.default, "cf") require(record.map(_.getSetCell.getColumnQualifier.toStringUtf8) == fields) } } // Collections are not supported case class BigtableNested(b: Boolean, i: Int, s: String, r: Required, o: Option[Required]) case class BigtableTypes(b: Byte, c: Char, s: Short, bs: ByteString, ba: Array[Byte]) // Collections are not supported case class DefaultInner(i: Int = 1, o: Option[Int] = Some(1)) case class DefaultOuter( i: DefaultInner = DefaultInner(2, Some(2)), o: Option[DefaultInner] = Some(DefaultInner(2, Some(2))) )
Example 35
Source File: TimestampConverter.scala From magnolify with Apache License 2.0 | 5 votes |
package magnolify.datastore import java.time.{Duration, Instant} import com.google.datastore.v1.Value import com.google.protobuf.Timestamp object TimestampConverter { private val millisPerSecond = Duration.ofSeconds(1).toMillis def toInstant(v: Value): Instant = { val t = v.getTimestampValue Instant.ofEpochMilli(t.getSeconds * millisPerSecond + t.getNanos / 1000000) } def fromInstant(i: Instant): Value.Builder = { val t = Timestamp .newBuilder() .setSeconds(i.toEpochMilli / millisPerSecond) .setNanos((i.toEpochMilli % 1000).toInt * 1000000) Value.newBuilder().setTimestampValue(t) } }
Example 36
Source File: ExampleTypeSpec.scala From magnolify with Apache License 2.0 | 5 votes |
package magnolify.tensorflow.test import java.net.URI import java.time.Duration import cats._ import cats.instances.all._ import com.google.protobuf.ByteString import magnolify.cats.auto._ import magnolify.scalacheck.auto._ import magnolify.shared.CaseMapper import magnolify.shims.JavaConverters._ import magnolify.tensorflow._ import magnolify.tensorflow.unsafe._ import magnolify.test.Simple._ import magnolify.test._ import org.scalacheck._ import scala.reflect._ object ExampleTypeSpec extends MagnolifySpec("ExampleType") { private def test[T: Arbitrary: ClassTag](implicit t: ExampleType[T], eq: Eq[T]): Unit = { val tpe = ensureSerializable(t) property(className[T]) = Prop.forAll { t: T => val r = tpe(t) val copy = tpe(r) eq.eqv(t, copy) } } test[Integers] test[Required] test[Nullable] test[Repeated] test[ExampleNested] { // workaround for Double to Float precision loss implicit val arbDouble: Arbitrary[Double] = Arbitrary(Arbitrary.arbFloat.arbitrary.map(_.toDouble)) test[Unsafe] } { import Collections._ test[Collections] test[MoreCollections] } { import Custom._ implicit val efUri: ExampleField.Primitive[URI] = ExampleField.from[ByteString](x => URI.create(x.toStringUtf8))(x => ByteString.copyFromUtf8(x.toString) ) implicit val efDuration: ExampleField.Primitive[Duration] = ExampleField.from[Long](Duration.ofMillis)(_.toMillis) test[Custom] } { implicit val arbByteString: Arbitrary[ByteString] = Arbitrary(Gen.alphaNumStr.map(ByteString.copyFromUtf8)) implicit val eqByteString: Eq[ByteString] = Eq.instance(_ == _) implicit val eqByteArray: Eq[Array[Byte]] = Eq.by(_.toList) test[ExampleTypes] } { implicit val et = ExampleType[LowerCamel](CaseMapper(_.toUpperCase)) test[LowerCamel] val fields = LowerCamel.fields .map(_.toUpperCase) .map(l => if (l == "INNERFIELD") "INNERFIELD.INNERFIRST" else l) val record = et(LowerCamel.default) require(record.getFeatures.getFeatureMap.keySet().asScala == fields.toSet) } } // Option[T] and Seq[T] not supported case class ExampleNested(b: Boolean, i: Int, s: String, r: Required, o: Option[Required]) case class ExampleTypes(f: Float, bs: ByteString, ba: Array[Byte]) case class Unsafe(b: Byte, c: Char, s: Short, i: Int, d: Double, bool: Boolean, str: String)
Example 37
Source File: Simple.scala From magnolify with Apache License 2.0 | 5 votes |
package magnolify.test import java.net.URI import java.time.Duration import org.scalacheck._ import cats._ import cats.instances.all._ object Simple { case class Integers(i: Int, l: Long) case class Numbers(i: Int, l: Long, f: Float, d: Double, bi: BigInt, bd: BigDecimal) case class Required(b: Boolean, i: Int, s: String) case class Nullable(b: Option[Boolean], i: Option[Int], s: Option[String]) case class Repeated(b: List[Boolean], i: List[Int], s: List[String]) case class Nested( b: Boolean, i: Int, s: String, r: Required, o: Option[Required], l: List[Required] ) case class Collections(a: Array[Int], l: List[Int], v: Vector[Int]) case class MoreCollections(i: Iterable[Int], s: Seq[Int], is: IndexedSeq[Int]) case class Custom(u: URI, d: Duration) object Collections { implicit def eqIterable[T, C[_]](implicit eq: Eq[T], tt: C[T] => Iterable[T]): Eq[C[T]] = Eq.instance { (x, y) => val xs = x.toList val ys = y.toList xs.size == ys.size && (x.iterator zip y.iterator).forall((eq.eqv _).tupled) } } object Custom { implicit val arbUri: Arbitrary[URI] = Arbitrary(Gen.alphaNumStr.map(URI.create)) implicit val arbDuration: Arbitrary[Duration] = Arbitrary(Gen.chooseNum(0, Int.MaxValue).map(Duration.ofMillis(_))) implicit val coUri: Cogen[URI] = Cogen(_.toString.hashCode()) implicit val coDuration: Cogen[Duration] = Cogen(_.toMillis) implicit val hashUri: Hash[URI] = Hash.fromUniversalHashCode[URI] implicit val hashDuration: Hash[Duration] = Hash.fromUniversalHashCode[Duration] implicit val showUri: Show[URI] = Show.fromToString implicit val showDuration: Show[Duration] = Show.fromToString } case class LowerCamel(firstField: String, secondField: String, innerField: LowerCamelInner) case class LowerCamelInner(innerFirst: String) object LowerCamel { val fields: Seq[String] = Seq("firstField", "secondField", "innerField") val default: LowerCamel = LowerCamel("first", "second", LowerCamelInner("inner.first")) } }
Example 38
Source File: MonoidDerivationSpec.scala From magnolify with Apache License 2.0 | 5 votes |
package magnolify.cats.test import java.net.URI import java.time.Duration import cats._ import cats.instances.all._ import cats.kernel.laws.discipline._ import magnolify.cats.auto._ import magnolify.scalacheck.auto._ import magnolify.test.Simple._ import magnolify.test._ import org.scalacheck._ import scala.reflect._ object MonoidDerivationSpec extends MagnolifySpec("MonoidDerivation") { private def test[T: Arbitrary: ClassTag: Eq: Monoid]: Unit = { ensureSerializable(implicitly[Monoid[T]]) include(MonoidTests[T].monoid.all, className[T] + ".") } import Types.MiniInt implicit val mMiniInt: Monoid[MiniInt] = new Monoid[MiniInt] { override def empty: MiniInt = MiniInt(0) override def combine(x: MiniInt, y: MiniInt): MiniInt = MiniInt(x.i + y.i) } case class Record(i: Int, m: MiniInt) test[Record] { implicit val mBool: Monoid[Boolean] = Monoid.instance(false, _ || _) test[Required] test[Nullable] test[Repeated] // FIXME: breaks 2.1.1: ambiguous implicit values catsKernelStdMonoidForString vs genGroup // test[Nested] } { import Custom._ implicit val mUri: Monoid[URI] = Monoid.instance(URI.create(""), (x, y) => URI.create(x.toString + y.toString)) implicit val mDuration: Monoid[Duration] = Monoid.instance(Duration.ZERO, _ plus _) test[Custom] } }
Example 39
Source File: SemigroupDerivationSpec.scala From magnolify with Apache License 2.0 | 5 votes |
package magnolify.cats.test import java.net.URI import java.time.Duration import cats._ import cats.instances.all._ import cats.kernel.laws.discipline._ import magnolify.cats.auto._ import magnolify.scalacheck.auto._ import magnolify.test.Simple._ import magnolify.test._ import org.scalacheck._ import scala.reflect._ object SemigroupDerivationSpec extends MagnolifySpec("SemigroupDerivation") { private def test[T: Arbitrary: ClassTag: Eq: Semigroup]: Unit = { ensureSerializable(implicitly[Semigroup[T]]) include(SemigroupTests[T].semigroup.all, className[T] + ".") } import Types.MiniInt implicit val sgMiniInt: Semigroup[MiniInt] = Semigroup.instance((x, y) => MiniInt(x.i + y.i)) case class Record(i: Int, m: MiniInt) test[Record] test[Integers] { implicit val sgBool: Semigroup[Boolean] = Semigroup.instance(_ ^ _) test[Required] test[Nullable] test[Repeated] // FIXME: breaks 2.1.1: ambiguous implicit values catsKernelStdMonoidForString vs genGroup // test[Nested] } { import Custom._ implicit val sgUri: Semigroup[URI] = Semigroup.instance((x, y) => URI.create(x.toString + y.toString)) implicit val sgDuration: Semigroup[Duration] = Semigroup.instance(_ plus _) test[Custom] } }
Example 40
Source File: MomentDateTime.scala From dtc with Apache License 2.0 | 5 votes |
package dtc.js import java.time.temporal.ChronoField import java.time.{DayOfWeek, Duration, LocalDate, LocalTime} import dtc._ import moment.{Date, Moment, Units} import scala.scalajs.js.Array def underlyingMoment: Date = copy def dayOfWeek: DayOfWeek = DayOfWeek.of(dayOfWeekJSToJVM(underlying.day())) def dayOfMonth: Int = underlying.date() def month: Int = underlying.month() + 1 def year: Int = underlying.year() def hour: Int = underlying.hour() def minute: Int = underlying.minute() def second: Int = underlying.second() def millisecond: Int = underlying.millisecond() def withYear(year: Int): T = updated(_.year(year.toDouble)) def withMonth(month: Int): T = updated(_.month(month.toDouble - 1)) def withDayOfMonth(dayOfMonth: Int): T = updated(_.date(dayOfMonth.toDouble)) def withHour(hour: Int): T = updated(_.hour(hour.toDouble)) def withMinute(minute: Int): T = updated(_.minute(minute.toDouble)) def withSecond(second: Int): T = updated(_.second(second.toDouble)) def withMillisecond(millisecond: Int): T = updated(_.millisecond(millisecond.toDouble)) def withTime(time: LocalTime): T = updated(_ .hour(time.getHour.toDouble) .minute(time.getMinute.toDouble) .second(time.getSecond.toDouble) .millisecond(time.get(ChronoField.MILLI_OF_SECOND).toDouble) ) def withDate(date: LocalDate): T = updated(_ .year(date.getYear.toDouble) .month(date.getMonthValue.toDouble - 1) .date(date.getDayOfMonth.toDouble) ) def toLocalDate: LocalDate = LocalDate.of(year, month, dayOfMonth) def toLocalTime: LocalTime = LocalTime.of(hour, minute, second, millisToNanos(millisecond)) def yearsUntil(other: T): Long = other.underlying.diff(underlying, Units.Year).toLong def monthsUntil(other: T): Long = other.underlying.diff(underlying, Units.Month).toLong def daysUntil(other: T): Long = other.underlying.diff(underlying, Units.Day).toLong def millisecondsUntil(other: T): Long = other.underlying.diff(underlying, Units.Millisecond).toLong def secondsUntil(other: T): Long = other.underlying.diff(underlying, Units.Second).toLong def minutesUntil(other: T): Long = other.underlying.diff(underlying, Units.Minute).toLong def hoursUntil(other: T): Long = other.underlying.diff(underlying, Units.Hour).toLong def plus(d: Duration): T = plusMillis(d.toMillis) def minus(d: Duration): T = plusMillis(-d.toMillis) def plusDays(n: Int): T = updated(_.add(n.toDouble, Units.Day)) def plusMonths(n: Int): T = updated(_.add(n.toDouble, Units.Month)) def plusYears(n: Int): T = updated(_.add(n.toDouble, Units.Year)) def plusMillis(n: Long): T def format(formatString: String): String = underlying.format(formatString) override def toString: String = underlying.toString } object MomentDateTime { def compare[T <: MomentDateTime[T]](x: T, y: T): Int = Ordering.Double.compare(x.underlying.value(), y.underlying.value()) private[js] def constructorArray(date: LocalDate, time: LocalTime): Array[Int] = Array( date.getYear, date.getMonthValue - 1, date.getDayOfMonth, time.getHour, time.getMinute, time.getSecond, time.get(ChronoField.MILLI_OF_SECOND) ) private[js] def utcMoment(date: LocalDate, time: LocalTime): Date = Moment.utc(constructorArray(date, time)) }
Example 41
Source File: MomentZonedDateTimeInstanceWithoutOrder.scala From dtc with Apache License 2.0 | 5 votes |
package dtc.instances.moment import java.time.{DayOfWeek, Duration, LocalDate, LocalTime} import dtc.js.MomentZonedDateTime import dtc.{Offset, TimeZoneId, Zoned} trait MomentZonedDateTimeInstanceWithoutOrder extends Zoned[MomentZonedDateTime] { def capture(date: LocalDate, time: LocalTime, zone: TimeZoneId): MomentZonedDateTime = MomentZonedDateTime.of(date, time, zone) def withZoneSameInstant(x: MomentZonedDateTime, zone: TimeZoneId): MomentZonedDateTime = x.withZoneSameInstant(zone) def withZoneSameLocal(x: MomentZonedDateTime, zone: TimeZoneId): MomentZonedDateTime = x.withZoneSameLocal(zone) def zone(x: MomentZonedDateTime): TimeZoneId = x.zone def date(x: MomentZonedDateTime): LocalDate = x.toLocalDate def time(x: MomentZonedDateTime): LocalTime = x.toLocalTime def plus(x: MomentZonedDateTime, d: Duration): MomentZonedDateTime = x.plus(d) def minus(x: MomentZonedDateTime, d: Duration): MomentZonedDateTime = x.minus(d) def plusDays(x: MomentZonedDateTime, days: Int): MomentZonedDateTime = x.plusDays(days) def plusMonths(x: MomentZonedDateTime, months: Int): MomentZonedDateTime = x.plusMonths(months) def plusYears(x: MomentZonedDateTime, years: Int): MomentZonedDateTime = x.plusYears(years) def offset(x: MomentZonedDateTime): Offset = x.offset def withYear(x: MomentZonedDateTime, year: Int): MomentZonedDateTime = x.withYear(year) def withMonth(x: MomentZonedDateTime, month: Int): MomentZonedDateTime = x.withMonth(month) def withDayOfMonth(x: MomentZonedDateTime, dayOfMonth: Int): MomentZonedDateTime = x.withDayOfMonth(dayOfMonth) def withHour(x: MomentZonedDateTime, hour: Int): MomentZonedDateTime = x.withHour(hour) def withMinute(x: MomentZonedDateTime, minute: Int): MomentZonedDateTime = x.withMinute(minute) def withSecond(x: MomentZonedDateTime, second: Int): MomentZonedDateTime = x.withSecond(second) def withMillisecond(x: MomentZonedDateTime, millisecond: Int): MomentZonedDateTime = x.withMillisecond(millisecond) def withTime(x: MomentZonedDateTime, time: LocalTime): MomentZonedDateTime = x.withTime(time) def withDate(x: MomentZonedDateTime, date: LocalDate): MomentZonedDateTime = x.withDate(date) def dayOfWeek(x: MomentZonedDateTime): DayOfWeek = x.dayOfWeek def dayOfMonth(x: MomentZonedDateTime): Int = x.dayOfMonth def month(x: MomentZonedDateTime): Int = x.month def year(x: MomentZonedDateTime): Int = x.year def millisecond(x: MomentZonedDateTime): Int = x.millisecond def second(x: MomentZonedDateTime): Int = x.second def minute(x: MomentZonedDateTime): Int = x.minute def hour(x: MomentZonedDateTime): Int = x.hour def yearsUntil(x: MomentZonedDateTime, until: MomentZonedDateTime): Long = x.yearsUntil(until) def monthsUntil(x: MomentZonedDateTime, until: MomentZonedDateTime): Long = x.monthsUntil(until) def daysUntil(x: MomentZonedDateTime, until: MomentZonedDateTime): Long = x.daysUntil(until) def hoursUntil(x: MomentZonedDateTime, until: MomentZonedDateTime): Long = x.hoursUntil(until) def minutesUntil(x: MomentZonedDateTime, until: MomentZonedDateTime): Long = x.minutesUntil(until) def secondsUntil(x: MomentZonedDateTime, until: MomentZonedDateTime): Long = x.secondsUntil(until) def millisecondsUntil(x: MomentZonedDateTime, until: MomentZonedDateTime): Long = x.millisecondsUntil(until) def utc(x: MomentZonedDateTime): (LocalDate, LocalTime) = { val utcTime = x.withZoneSameInstant(TimeZoneId.UTC) utcTime.toLocalDate -> utcTime.toLocalTime } }
Example 42
Source File: package.scala From dtc with Apache License 2.0 | 5 votes |
package dtc.instances import java.time.{DayOfWeek, Duration, LocalDate, LocalTime} import dtc.{Local, Capture, TimeZoneId, Zoned} import dtc.js.{MomentDateTime, MomentLocalDateTime, MomentZonedDateTime} package object moment { implicit val momentZonedWithStrictEquality: Zoned[MomentZonedDateTime] = new MomentZonedDateTimeInstanceWithoutOrder { def compare(x: MomentZonedDateTime, y: MomentZonedDateTime): Int = MomentZonedDateTime.compareStrict(x, y) } implicit val momentLocalDTC: Local[MomentLocalDateTime] = new Local[MomentLocalDateTime] { def date(x: MomentLocalDateTime): LocalDate = x.toLocalDate def time(x: MomentLocalDateTime): LocalTime = x.toLocalTime def plus(x: MomentLocalDateTime, d: Duration): MomentLocalDateTime = x.plus(d) def minus(x: MomentLocalDateTime, d: Duration): MomentLocalDateTime = x.minus(d) def plusDays(x: MomentLocalDateTime, days: Int): MomentLocalDateTime = x.plusDays(days) def plusMonths(x: MomentLocalDateTime, months: Int): MomentLocalDateTime = x.plusMonths(months) def plusYears(x: MomentLocalDateTime, years: Int): MomentLocalDateTime = x.plusYears(years) def compare(x: MomentLocalDateTime, y: MomentLocalDateTime): Int = MomentDateTime.compare(x, y) def of(date: LocalDate, time: LocalTime): MomentLocalDateTime = MomentLocalDateTime.of(date, time) def of( year: Int, month: Int, day: Int, hour: Int, minute: Int, second: Int, millisecond: Int): MomentLocalDateTime = MomentLocalDateTime.of(year, month, day, hour, minute, second, millisecond) def withYear(x: MomentLocalDateTime, year: Int): MomentLocalDateTime = x.withYear(year) def withMonth(x: MomentLocalDateTime, month: Int): MomentLocalDateTime = x.withMonth(month) def withDayOfMonth(x: MomentLocalDateTime, dayOfMonth: Int): MomentLocalDateTime = x.withDayOfMonth(dayOfMonth) def withHour(x: MomentLocalDateTime, hour: Int): MomentLocalDateTime = x.withHour(hour) def withMinute(x: MomentLocalDateTime, minute: Int): MomentLocalDateTime = x.withMinute(minute) def withSecond(x: MomentLocalDateTime, second: Int): MomentLocalDateTime = x.withSecond(second) def withMillisecond(x: MomentLocalDateTime, millisecond: Int): MomentLocalDateTime = x.withMillisecond(millisecond) def withTime(x: MomentLocalDateTime, time: LocalTime): MomentLocalDateTime = x.withTime(time) def withDate(x: MomentLocalDateTime, date: LocalDate): MomentLocalDateTime = x.withDate(date) def dayOfWeek(x: MomentLocalDateTime): DayOfWeek = x.dayOfWeek def dayOfMonth(x: MomentLocalDateTime): Int = x.dayOfMonth def month(x: MomentLocalDateTime): Int = x.month def year(x: MomentLocalDateTime): Int = x.year def millisecond(x: MomentLocalDateTime): Int = x.millisecond def second(x: MomentLocalDateTime): Int = x.second def minute(x: MomentLocalDateTime): Int = x.minute def hour(x: MomentLocalDateTime): Int = x.hour def yearsUntil(x: MomentLocalDateTime, until: MomentLocalDateTime): Long = x.yearsUntil(until) def monthsUntil(x: MomentLocalDateTime, until: MomentLocalDateTime): Long = x.monthsUntil(until) def daysUntil(x: MomentLocalDateTime, until: MomentLocalDateTime): Long = x.daysUntil(until) def hoursUntil(x: MomentLocalDateTime, until: MomentLocalDateTime): Long = x.hoursUntil(until) def minutesUntil(x: MomentLocalDateTime, until: MomentLocalDateTime): Long = x.minutesUntil(until) def secondsUntil(x: MomentLocalDateTime, until: MomentLocalDateTime): Long = x.secondsUntil(until) def millisecondsUntil(x: MomentLocalDateTime, until: MomentLocalDateTime): Long = x.millisecondsUntil(until) } implicit val captureMomentLocalDateTime: Capture[MomentLocalDateTime] = new Capture[MomentLocalDateTime] { def capture(date: LocalDate, time: LocalTime, zone: TimeZoneId): MomentLocalDateTime = MomentZonedDateTime.of(date, time, zone).withZoneSameInstant(TimeZoneId.UTC).toLocal } }
Example 43
Source File: Main.scala From dtc with Apache License 2.0 | 5 votes |
package dtc.examples import java.time.{Duration, LocalDate, LocalTime} import dtc.instances.jsDate._ import dtc.js.JSDate import scala.scalajs.js.annotation.JSExportTopLevel // scalastyle:off object Main { @JSExportTopLevel("Main") def main() = { val calendar = Calendar(List( CalendarEvent( JSDate.of(LocalDate.now(), LocalTime.of(10, 0)), JSDate.of(LocalDate.now(), LocalTime.of(11, 0)), "Breakfast" ), CalendarEvent( JSDate.of(LocalDate.now().plusDays(2), LocalTime.of(12, 0)), JSDate.of(LocalDate.now().plusDays(2), LocalTime.of(14, 0)), "Meeting" ), CalendarEvent( JSDate.of(2016, 10, 9, 11, 0), JSDate.of(2016, 10, 9, 11, 0), "Birthday party" ) )) println(calendar.eventsAfter(JSDate.now).mkString(", ")) println(calendar.onlyWorkDays.mkString(", ")) val period = Period(JSDate.now, JSDate.now.plus(Duration.ofDays(1L))) println(period.durationInMinutes) println(period.durationInSeconds) println(period.hours.mkString("\n")) } }
Example 44
Source File: ZonedDateTimeLaws.scala From dtc with Apache License 2.0 | 5 votes |
package dtc.laws import java.time.temporal.ChronoUnit import java.time.{Duration, LocalDate, LocalTime} import cats.kernel.laws.discipline.{catsLawsIsEqToProp => p} import cats.kernel.laws._ import cats.instances.long._ import dtc._ import dtc.syntax.zoned._ import org.scalacheck.Prop._ import org.scalacheck.{Arbitrary, Gen, Prop} trait ZonedDateTimeLaws[A] { implicit def D: Zoned[A] val genA: Gen[A] val genDateAndDurationWithinSameOffset: Gen[(A, Duration)] val genDataSuite: Gen[ZonedDateTimeTestData[A]] val genLocalDate: Gen[LocalDate] val genLocalTime: Gen[LocalTime] val genValidYear: Gen[Int] val genTimeZone: Gen[TimeZoneId] def crossOffsetAddition: Prop = forAll(genDataSuite) { data => val target = D.plus(data.source, data.diff) p(D.offset(target) <-> data.targetOffset) && (D.date(target) <-> data.targetDate) && (D.time(target) <-> data.targetTime.truncatedTo(ChronoUnit.MILLIS)) } def localTimeAndOffsetCorrelation: Prop = forAll(genA, genTimeZone) { (date: A, zone: TimeZoneId) => val target = D.withZoneSameInstant(date, zone) D.time(date) <-> D.time(target).plusSeconds((date.offset.seconds - target.offset.seconds).toLong) } def withZoneSameInstantGivesSameInstant: Prop = forAll(genA, genTimeZone) { (date: A, zone: TimeZoneId) => val target = D.withZoneSameInstant(date, zone) p(D.zone(target) <-> zone) && (D.millisecondsUntil(date, target) <-> 0L) } } object ZonedDateTimeLaws { def apply[A]( gDateAndDurationWithinSameDST: Gen[(A, Duration)], gDataSuite: Gen[ZonedDateTimeTestData[A]], gLocalTime: Gen[LocalTime], gLocalDate: Gen[LocalDate], gValidYear: Gen[Int], gTimeZone: Gen[TimeZoneId])( implicit ev: Zoned[A], arbA: Arbitrary[A]): ZonedDateTimeLaws[A] = new ZonedDateTimeLaws[A] { def D: Zoned[A] = ev val genTimeZone: Gen[TimeZoneId] = gTimeZone val genDateAndDurationWithinSameOffset: Gen[(A, Duration)] = gDateAndDurationWithinSameDST val genDataSuite: Gen[ZonedDateTimeTestData[A]] = gDataSuite val genLocalDate: Gen[LocalDate] = gLocalDate val genLocalTime: Gen[LocalTime] = gLocalTime val genValidYear: Gen[Int] = gValidYear val genA: Gen[A] = arbA.arbitrary } }
Example 45
Source File: ZonedDateTimeTests.scala From dtc with Apache License 2.0 | 5 votes |
package dtc.laws import java.time.{Duration, LocalDate, LocalTime} import dtc.{TimeZoneId, Zoned} import org.scalacheck.{Arbitrary, Gen} import org.typelevel.discipline.Laws trait ZonedDateTimeTests[A] extends Laws { def generalLocalDateTimeLaws: GeneralLocalDateTimeLaws[A] def laws: ZonedDateTimeLaws[A] def zonedDateTime(implicit arbA: Arbitrary[A], arbD: Arbitrary[Duration]): RuleSet = { new DefaultRuleSet( name = "ZonedDateTime", parent = None, "[within same offset] seconds addition laws" -> generalLocalDateTimeLaws.secondsAddition, "[within same offset] minutes addition laws" -> generalLocalDateTimeLaws.minutesAddition, "[within same offset] hours addition laws" -> generalLocalDateTimeLaws.hoursAddition, "[within same offset] withYear laws" -> generalLocalDateTimeLaws.withYear, "[within same offset] withMonth laws" -> generalLocalDateTimeLaws.withMonth, "[within same offset] withDayOfMonth laws" -> generalLocalDateTimeLaws.withDayOfMonth, "[within same offset] withHour laws" -> generalLocalDateTimeLaws.withHour, "[within same offset] withMinute laws" -> generalLocalDateTimeLaws.withMinute, "[within same offset] withSecond laws" -> generalLocalDateTimeLaws.withSecond, "[within same offset] withMillisecond laws" -> generalLocalDateTimeLaws.withMillisecond, "[within same offset] withTime laws" -> generalLocalDateTimeLaws.withTime, "[within same offset] withDate laws" -> generalLocalDateTimeLaws.withDate, "[within same offset] daysUntil is consistent with addition" -> generalLocalDateTimeLaws.daysUntilIsConsistentWithPlus, "[within same offset] monthsUntil is consistent with addition" -> generalLocalDateTimeLaws.monthsUntilIsConsistentWithPlus, "[within same offset] yearsUntil counts only number of full years" -> generalLocalDateTimeLaws.yearsUntilCountsOnlyFullUnits, "cross-offset addition" -> laws.crossOffsetAddition, "withZoneSameInstant gives the same instant" -> laws.withZoneSameInstantGivesSameInstant, "local time difference is the offset" -> laws.localTimeAndOffsetCorrelation ) } } object ZonedDateTimeTests { def apply[A: Zoned]( gDateAndDurationWithinSameDST: Gen[(A, Duration)], gDataSuite: Gen[ZonedDateTimeTestData[A]], gValidYear: Gen[Int], gTimeZone: Gen[TimeZoneId])( implicit arbA: Arbitrary[A], arbLocalTime: Arbitrary[LocalTime], arbLocalDate: Arbitrary[LocalDate]): ZonedDateTimeTests[A] = new ZonedDateTimeTests[A] { def generalLocalDateTimeLaws: GeneralLocalDateTimeLaws[A] = GeneralLocalDateTimeLaws[A]( gDateAndDurationWithinSameDST, arbLocalTime.arbitrary, arbLocalDate.arbitrary, gValidYear ) def laws: ZonedDateTimeLaws[A] = ZonedDateTimeLaws[A]( gDateAndDurationWithinSameDST, gDataSuite, arbLocalTime.arbitrary, arbLocalDate.arbitrary, gValidYear, gTimeZone ) } }
Example 46
Source File: DateTimeLaws.scala From dtc with Apache License 2.0 | 5 votes |
package dtc.laws import java.time.{Duration, LocalDate, LocalTime} import dtc._ import cats.kernel.instances.int._ import cats.kernel.instances.long._ import cats.kernel.laws.discipline.catsLawsIsEqToProp import dtc.TimePoint import org.scalacheck.Prop._ import org.scalacheck.{Arbitrary, Gen, Prop} import dtc.syntax.all._ import cats.kernel.laws._ trait DateTimeLaws[A] { implicit def D: TimePoint[A] val genA: Gen[A] val genAdditionSafeDateAndDuration: Gen[(A, Duration)] // take into account that nanos are always positive in the Duration. private def fullNumberOfSeconds(d: Duration) = { val seconds = d.getSeconds if (seconds >= 0 || d.getNano == 0) seconds else seconds + 1 } def additionAndSubtractionOfSameDuration: Prop = forAll(genAdditionSafeDateAndDuration) { case (x, d) => D.plus(D.plus(x, d), d.negated()) <-> x } def additionOfZero: Prop = forAll(genAdditionSafeDateAndDuration) { case (x, _) => D.plus(x, Duration.ZERO) <-> x } def additionOfNonZero: Prop = forAll(genAdditionSafeDateAndDuration) { case (x, d) => Prop(d.isZero || (d.isNegative && D.lt(D.plus(x, d), x)) || D.gt(D.plus(x, d), x)) } def millisAddition: Prop = forAll(genAdditionSafeDateAndDuration) { case (x, d) => D.plus(x, d).millisecond <-> ((x.millisecond + d.toMillis) %% 1000) } def untilSelfIsAlwaysZero: Prop = forAll(genA) { x: A => (D.millisecondsUntil(x, x) <-> 0L) && (D.secondsUntil(x, x) <-> 0L) && (D.minutesUntil(x, x) <-> 0L) && (D.hoursUntil(x, x) <-> 0L) && (D.daysUntil(x, x) <-> 0L) && (D.monthsUntil(x, x) <-> 0L) && (D.yearsUntil(x, x) <-> 0L) } def untilIsConsistentWithPlus: Prop = forAll(genAdditionSafeDateAndDuration) { case (x, d) => val altered = D.plus(x, d) val truncated = truncateToMillis(d) (D.millisecondsUntil(x, altered) <-> truncated.toMillis) && (D.secondsUntil(x, altered) <-> fullNumberOfSeconds(truncated)) && (D.minutesUntil(x, altered) <-> fullNumberOfSeconds(truncated) / SecondsInMinute) && (D.hoursUntil(x, altered) <-> fullNumberOfSeconds(truncated) / (SecondsInMinute * MinutesInHour)) } def dateMustNotThrow: Prop = forAll(genA) { x: A => D.date(x) proved } def timeMustNotThrow: Prop = forAll(genA) { x: A => D.time(x) proved } def dateFieldsAreConsistentWithToLocalDate: Prop = forAll(genA) { x: A => catsLawsIsEqToProp(x.date.getDayOfWeek <-> x.dayOfWeek) && (LocalDate.of(x.year, x.month, x.dayOfMonth) <-> x.date) } def timeFieldsAreConsistentWithToLocalTime: Prop = forAll(genA) { x: A => LocalTime.of(x.hour, x.minute, x.second, millisToNanos(x.millisecond)) <-> x.time } } object DateTimeLaws { def apply[A](gDateAndDuration: Gen[(A, Duration)])( implicit ev: TimePoint[A], arbA: Arbitrary[A]): DateTimeLaws[A] = new DateTimeLaws[A] { def D: TimePoint[A] = ev val genA: Gen[A] = arbA.arbitrary val genAdditionSafeDateAndDuration: Gen[(A, Duration)] = gDateAndDuration } }
Example 47
Source File: ProviderTests.scala From dtc with Apache License 2.0 | 5 votes |
package dtc.laws import java.time.Duration import cats.Order import dtc.{Provider, TimeZoneId} import org.scalacheck.{Arbitrary, Gen} import org.typelevel.discipline.Laws trait ProviderTests[A] extends Laws { def laws: ProviderLaws[A] def provider(implicit arbA: Arbitrary[A], arbD: Arbitrary[Duration]): RuleSet = { new DefaultRuleSet( name = "Provider", parent = None, "two consequent now calls preserve order" -> laws.twoConsequentNowCalls ) } } object ProviderTests { def apply[A: Provider : Order]( gTimeZone: Gen[TimeZoneId])( implicit arbA: Arbitrary[A]): ProviderTests[A] = new ProviderTests[A] { def laws: ProviderLaws[A] = ProviderLaws(gTimeZone) } }
Example 48
Source File: DateTimeTests.scala From dtc with Apache License 2.0 | 5 votes |
package dtc.laws import java.time.Duration import dtc.TimePoint import org.scalacheck.{Arbitrary, Gen} import org.typelevel.discipline.Laws trait DateTimeTests[A] extends Laws { def laws: DateTimeLaws[A] def dateTime(implicit arbA: Arbitrary[A]): RuleSet = { new DefaultRuleSet( name = "DateTime", parent = None, "add and substract the same duration gives original value" -> laws.additionAndSubtractionOfSameDuration, "add zero gives same value" -> laws.additionOfZero, "add non zero changes value" -> laws.additionOfNonZero, "millis addition laws" -> laws.millisAddition, "until self is always zero" -> laws.untilSelfIsAlwaysZero, "until methods are consistent with addition" -> laws.untilIsConsistentWithPlus, "date is always defined" -> laws.dateMustNotThrow, "time is always defined" -> laws.timeMustNotThrow, "date fields are consistent with toLocalDate" -> laws.dateFieldsAreConsistentWithToLocalDate, "time fields are consistent with toLocalTime" -> laws.timeFieldsAreConsistentWithToLocalTime ) } } object DateTimeTests { def apply[A: TimePoint]( gDateAndDuration: Gen[(A, Duration)])( implicit arbA: Arbitrary[A]): DateTimeTests[A] = new DateTimeTests[A] { def laws: DateTimeLaws[A] = DateTimeLaws[A](gDateAndDuration) } }
Example 49
Source File: LocalDateTimeTests.scala From dtc with Apache License 2.0 | 5 votes |
package dtc.laws import java.time.{Duration, LocalDate, LocalTime} import dtc.Local import org.scalacheck.{Arbitrary, Gen} import org.typelevel.discipline.Laws trait LocalDateTimeTests[A] extends Laws { def generalLaws: GeneralLocalDateTimeLaws[A] def laws: LocalDateTimeLaws[A] def localDateTime(implicit arbA: Arbitrary[A], arbD: Arbitrary[Duration]): RuleSet = { new DefaultRuleSet( name = "LocalDateTime", parent = None, "seconds addition laws" -> generalLaws.secondsAddition, "minutes addition laws" -> generalLaws.minutesAddition, "hours addition laws" -> generalLaws.hoursAddition, "constructor consistency" -> laws.constructorConsistency, "plain constructor consistency" -> laws.plainConstructorConsistency, "withYear laws" -> generalLaws.withYear, "withMonth laws" -> generalLaws.withMonth, "withDayOfMonth laws" -> generalLaws.withDayOfMonth, "withHour laws" -> generalLaws.withHour, "withMinute laws" -> generalLaws.withMinute, "withSecond laws" -> generalLaws.withSecond, "withMillisecond laws" -> generalLaws.withMillisecond, "withTime laws" -> generalLaws.withTime, "withDate laws" -> generalLaws.withDate, "daysUntil is consistent with addition" -> generalLaws.daysUntilIsConsistentWithPlus, "monthsUntil is consistent with addition" -> generalLaws.monthsUntilIsConsistentWithPlus, "yearsUntil counts only number of full years" -> generalLaws.yearsUntilCountsOnlyFullUnits ) } // see: https://github.com/moment/moment/issues/3029 def monthUntilFractionHandling(implicit arbA: Arbitrary[A], arbD: Arbitrary[Duration]): RuleSet = { new DefaultRuleSet( name = "LocalDateTime", parent = None, "monthsUntil counts only number of full months" -> generalLaws.monthsUntilCountsOnlyFullUnits ) } } object LocalDateTimeTests { def apply[A: Local]( gDateAndDuration: Gen[(A, Duration)], gValidYear: Gen[Int])( implicit arbA: Arbitrary[A], arbLocalTime: Arbitrary[LocalTime], arbLocalDate: Arbitrary[LocalDate]): LocalDateTimeTests[A] = new LocalDateTimeTests[A] { def laws: LocalDateTimeLaws[A] = LocalDateTimeLaws[A]( arbLocalTime.arbitrary, arbLocalDate.arbitrary ) def generalLaws: GeneralLocalDateTimeLaws[A] = GeneralLocalDateTimeLaws[A]( gDateAndDuration, arbLocalTime.arbitrary, arbLocalDate.arbitrary, gValidYear ) } }
Example 50
Source File: LocalDateTimeTests.scala From dtc with Apache License 2.0 | 5 votes |
package dtc.tests import java.time.{Duration, LocalDateTime, ZoneOffset} import cats.instances.option._ import cats.kernel.laws.discipline.OrderTests import com.fortysevendeg.scalacheck.datetime.jdk8.ArbitraryJdk8.genZonedDateTime import dtc.instances.localDateTime._ import dtc.laws.{DateTimeTests, LocalDateTimeTests, ProviderTests} import org.scalacheck.Arbitrary.arbitrary import org.scalacheck.{Arbitrary, Cogen} import dtc.instances.providers.realLocalDateTimeProvider class JVMLocalDateTimeTests extends DTCSuiteJVM { implicit val arbT: Arbitrary[LocalDateTime] = Arbitrary(genZonedDateTime.map(_.toLocalDateTime)) implicit val cogenT: Cogen[LocalDateTime] = Cogen(_.toEpochSecond(ZoneOffset.UTC)) val overflowSafePairGen = for { dt <- arbitrary[LocalDateTime] dur <- arbitrary[Duration] } yield (dt, dur) val ldtTests = LocalDateTimeTests[LocalDateTime](overflowSafePairGen, genYear) checkAll("java.time.LocalDateTime", DateTimeTests[LocalDateTime](overflowSafePairGen).dateTime) checkAll("java.time.LocalDateTime", ldtTests.localDateTime) checkAll("java.time.LocalDateTime", ldtTests.monthUntilFractionHandling) checkAll("java.time.LocalDateTime", OrderTests[LocalDateTime].order) checkAll("java.time.LocalDateTime", OrderTests[LocalDateTime].partialOrder) checkAll("java.time.LocalDateTime", OrderTests[LocalDateTime].eqv) checkAll("java.time.LocalDateTime", ProviderTests[LocalDateTime](genTimeZone).provider) }
Example 51
Source File: JVMZonedDateTimeTests.scala From dtc with Apache License 2.0 | 5 votes |
package dtc.tests import java.time.temporal.ChronoUnit import java.time.{Duration, ZonedDateTime} import cats.instances.option._ import cats.kernel.laws.discipline.OrderTests import com.fortysevendeg.scalacheck.datetime.jdk8.ArbitraryJdk8 import dtc.{Offset, Zoned} import dtc.laws.{DateTimeTests, ProviderTests, ZonedDateTimeTestData, ZonedDateTimeTests} import dtc.syntax.timeZone._ import org.scalacheck.Arbitrary.arbitrary import org.scalacheck.{Arbitrary, Cogen, Gen} import dtc.instances.providers.realZonedDateTimeProvider abstract class JVMZonedDateTimeTests(instance: Zoned[ZonedDateTime]) extends DTCSuiteJVM { implicit val zonedInstance: Zoned[ZonedDateTime] = instance implicit val arbT: Arbitrary[ZonedDateTime] = ArbitraryJdk8.arbZonedDateTimeJdk8 implicit val cogenT: Cogen[ZonedDateTime] = Cogen(_.toEpochSecond) val overflowSafePairGen: Gen[(ZonedDateTime, Duration)] = for { dt <- arbitrary[ZonedDateTime] dur <- arbitrary[Duration] } yield (dt, dur) def genDateFromPeriod(period: SameZoneOffsetPeriod): Gen[ZonedDateTime] = genDateTimeFromSameOffsetPeriod(period).map(tpl => ZonedDateTime.of(tpl._1, tpl._2, tpl._3.zoneId)) val overflowSafePairGenWithinSameOffset: Gen[(ZonedDateTime, Duration)] = for { period <- arbitrary[SameZoneOffsetPeriod] dateTime <- genDateFromPeriod(period) duration <- genDateFromPeriod(period) .map(other => dateTime.until(other, ChronoUnit.NANOS)) .map(Duration.ofNanos) } yield (dateTime, duration) val genZonedTestDataSuite: Gen[ZonedDateTimeTestData[ZonedDateTime]] = overflowSafePairGen.map { case (date, duration) => val target = date.plus(duration) ZonedDateTimeTestData(date, duration, Offset(date.plus(duration).getOffset.getTotalSeconds), target.toLocalTime, target.toLocalDate) } checkAll("java.time.ZonedDateTime", DateTimeTests[ZonedDateTime](overflowSafePairGen).dateTime) checkAll("java.time.ZonedDateTime", ZonedDateTimeTests[ZonedDateTime]( overflowSafePairGenWithinSameOffset, genZonedTestDataSuite, genYear, genTimeZone ).zonedDateTime) checkAll("java.time.ZonedDateTime", OrderTests[ZonedDateTime].order) checkAll("java.time.ZonedDateTime", OrderTests[ZonedDateTime].partialOrder) checkAll("java.time.ZonedDateTime", OrderTests[ZonedDateTime].eqv) checkAll("java.time.ZonedDateTime", ProviderTests[ZonedDateTime](genTimeZone).provider) } class ZonedDateTimeWithStrictEqualityTests extends JVMZonedDateTimeTests(dtc.instances.zonedDateTime.zonedDateTimeWithStrictEquality) class ZonedDateTimeWithCrossZoneEqualityTests extends JVMZonedDateTimeTests(dtc.instances.zonedDateTime.zonedDateTimeWithCrossZoneEquality)
Example 52
Source File: DTCSuite.scala From dtc with Apache License 2.0 | 5 votes |
package dtc.tests import java.time.temporal.ChronoField import java.time.temporal.ChronoUnit._ import java.time.{Duration, LocalDate, LocalTime} import dtc.TimeZoneId import org.scalacheck.{Arbitrary, Gen} import org.scalatest.funspec.AnyFunSpecLike import org.scalatest.matchers.should.Matchers import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks import org.typelevel.discipline.scalatest.FunSpecDiscipline trait DTCSuite extends AnyFunSpecLike with Matchers with ScalaCheckDrivenPropertyChecks with FunSpecDiscipline { override implicit val generatorDrivenConfig: PropertyCheckConfiguration = PropertyCheckConfiguration( minSuccessful = 100 ) private val nanoOfDayRange = ChronoField.NANO_OF_DAY.range() val genLocalTime: Gen[LocalTime] = Gen.choose(nanoOfDayRange.getMinimum, nanoOfDayRange.getMaximum).map(LocalTime.ofNanoOfDay) implicit val arbLocalTime: Arbitrary[LocalTime] = Arbitrary(genLocalTime) val genDuration: Gen[Duration] = Gen.choose(Long.MinValue / 1000, Long.MaxValue / 1000) .map(l => Duration.of(l, MILLIS)) implicit val arbDuration = Arbitrary(genDuration) def genDateTimeFromSameOffsetPeriod(period: SameZoneOffsetPeriod): Gen[(LocalDate, LocalTime, TimeZoneId)] = for { date <- Gen.choose(period.startDate.toEpochDay + 1L, period.endDate.toEpochDay - 1L).map(LocalDate.ofEpochDay) timeBounds <- Gen.const( if (date == period.startDate && date == period.endDate) (period.startTime, period.endTime) else if (date == period.startDate) (period.startTime, LocalTime.MAX) else if (date == period.endDate) (LocalTime.MAX, period.endTime) else (LocalTime.MIN, LocalTime.MAX) ) time <- Gen.choose(timeBounds._1.toNanoOfDay, timeBounds._2.toNanoOfDay).map(LocalTime.ofNanoOfDay) } yield (date, time, period.zone) }
Example 53
Source File: MomentZonedDateTimeTests.scala From dtc with Apache License 2.0 | 5 votes |
package dtc.tests import java.time.{Duration, LocalDate, LocalTime} import cats.instances.option._ import cats.kernel.laws.discipline.OrderTests import dtc.{TimeZoneId, Zoned} import dtc.js.MomentZonedDateTime import dtc.laws.{DateTimeTests, ProviderTests, ZonedDateTimeTestData, ZonedDateTimeTests} import org.scalacheck.Arbitrary.arbitrary import org.scalacheck.{Arbitrary, Cogen, Gen} import dtc.instances.moment.providers.realMomentZonedDateTimeProvider abstract class MomentZonedDateTimeTests(instance: Zoned[MomentZonedDateTime]) extends DTCSuiteJS { implicit val zonedInstance: Zoned[MomentZonedDateTime] = instance implicit val arbT: Arbitrary[MomentZonedDateTime] = Arbitrary(for { date <- arbitrary[LocalDate] time <- arbitrary[LocalTime] zone <- arbitrary[TimeZoneId] } yield MomentZonedDateTime.of(date, time, zone)) implicit val cogenT: Cogen[MomentZonedDateTime] = cogenMomentDateTime[MomentZonedDateTime] val pairGen: Gen[(MomentZonedDateTime, Duration)] = for { zone <- arbitrary[TimeZoneId] pair <- overflowSafePairGen } yield (MomentZonedDateTime.of(pair._1, pair._2, zone), pair._3) def genDateFromPeriod(period: SameZoneOffsetPeriod): Gen[MomentZonedDateTime] = genDateTimeFromSameOffsetPeriod(period).map(tpl => MomentZonedDateTime.of(tpl._1, tpl._2, tpl._3)) val overflowSafePairGenWithinSameOffset: Gen[(MomentZonedDateTime, Duration)] = for { period <- arbitrary[SameZoneOffsetPeriod] dateTime <- genDateFromPeriod(period) duration <- genDateFromPeriod(period) .map(other => dateTime.millisecondsUntil(other)) .map(Duration.ofMillis) } yield (dateTime, duration) val genZonedTestDataSuite: Gen[ZonedDateTimeTestData[MomentZonedDateTime]] = pairGen.map { case (date, duration) => val target = date.plus(duration) ZonedDateTimeTestData(date, duration, target.offset, target.toLocalTime, target.toLocalDate) } checkAll("MomentZonedDateTime", DateTimeTests[MomentZonedDateTime](pairGen).dateTime) checkAll("MomentZonedDateTime", ZonedDateTimeTests[MomentZonedDateTime]( overflowSafePairGenWithinSameOffset, genZonedTestDataSuite, genJSValidYear, genTimeZone ).zonedDateTime) checkAll("MomentZonedDateTime", OrderTests[MomentZonedDateTime].order) checkAll("MomentZonedDateTime", OrderTests[MomentZonedDateTime].partialOrder) checkAll("MomentZonedDateTime", OrderTests[MomentZonedDateTime].eqv) checkAll("MomentZonedDateTime", ProviderTests[MomentZonedDateTime](genTimeZone).provider) } class MomentZonedDateTimeWithStrictEqualityTests extends MomentZonedDateTimeTests(dtc.instances.moment.momentZonedWithStrictEquality) class MomentZonedDateTimeWithCrossZoneEqualityTests extends MomentZonedDateTimeTests(dtc.instances.moment.momentZonedWithCrossZoneEquality)
Example 54
Source File: package.scala From dtc with Apache License 2.0 | 5 votes |
import java.time.Duration package object dtc { private[dtc] def millisToNanos(millis: Int): Int = millis * NanosInMilli private[dtc] def roundNanosToMillis(nanos: Int): Int = (nanos / NanosInMilli) * NanosInMilli private[dtc] def truncateToMillis(d: Duration): Duration = d.withNanos(roundNanosToMillis(d.getNano)) private[dtc] val NanosInMilli: Int = 1000000 private[dtc] val MillisInSecond: Int = 1000 private[dtc] val SecondsInMinute: Int = 60 private[dtc] val MinutesInHour: Int = 60 private[dtc] val HoursInDay: Int = 24 private[dtc] val MillisInMinute: Int = MillisInSecond * SecondsInMinute private[dtc] val MillisInHour: Int = MillisInMinute * MinutesInHour private[dtc] val MillisInDay: Int = MillisInHour * HoursInDay private[dtc] val SecondsInDay: Int = SecondsInMinute * MinutesInHour * HoursInDay private[dtc] implicit class TimeIntOps(n: Long) { def %%(b: Int) = absMod(n, b) } // % with "time fraction" behaviour: negative numbers are translated to adjacent positive // e.g. -3s => 57s private[dtc] def absMod(a: Long, b: Int): Int = { val m = (a % b).toInt if (a >= 0 || m == 0) m else b + m } object Ordering { object Double { def compare(x1: Double, x2: Double): Int = if (x1 < x2) -1 else if (x1 > x2) 1 else 0 } } }
Example 55
Source File: zoned.scala From dtc with Apache License 2.0 | 5 votes |
package dtc.cats.instances import java.time.{DayOfWeek, Duration, LocalDate, LocalTime} import cats.Invariant import dtc.{Offset, TimeZoneId, Zoned} object zoned extends CatsZonedInstances trait CatsZonedInstances { implicit val zonedInvariant: Invariant[Zoned] = new Invariant[Zoned] { def imap[A, B](ev: Zoned[A])(f: A => B)(g: B => A): Zoned[B] = new Zoned[B] { def compare(x: B, y: B): Int = ev.compare(g(x), g(y)) def zone(t: B): TimeZoneId = ev.zone(g(t)) def millisecond(x: B): Int = ev.millisecond(g(x)) def second(t: B): Int = ev.second(g(t)) def minute(t: B): Int = ev.minute(g(t)) def hour(t: B): Int = ev.hour(g(t)) def dayOfMonth(t: B): Int = ev.dayOfMonth(g(t)) def dayOfWeek(x: B): DayOfWeek = ev.dayOfWeek(g(x)) def month(t: B): Int = ev.month(g(t)) def year(t: B): Int = ev.year(g(t)) def capture(date: LocalDate, time: LocalTime, zone: TimeZoneId): B = f(ev.capture(date, time, zone)) def withZoneSameInstant(x: B, zone: TimeZoneId): B = f(ev.withZoneSameInstant(g(x), zone)) def withZoneSameLocal(x: B, zone: TimeZoneId): B = f(ev.withZoneSameLocal(g(x), zone)) def offset(x: B): Offset = ev.offset(g(x)) def date(x: B): LocalDate = ev.date(g(x)) def time(x: B): LocalTime = ev.time(g(x)) def plus(x: B, d: Duration): B = f(ev.plus(g(x), d)) def minus(x: B, d: Duration): B = f(ev.minus(g(x), d)) def plusDays(x: B, days: Int): B = f(ev.plusDays(g(x), days)) def plusMonths(x: B, months: Int): B = f(ev.plusMonths(g(x), months)) def plusYears(x: B, years: Int): B = f(ev.plusYears(g(x), years)) def withYear(x: B, year: Int): B = f(ev.withYear(g(x), year)) def withMonth(x: B, month: Int): B = f(ev.withMonth(g(x), month)) def withDayOfMonth(x: B, dayOfMonth: Int): B = f(ev.withDayOfMonth(g(x), dayOfMonth)) def withHour(x: B, hour: Int): B = f(ev.withHour(g(x), hour)) def withMinute(x: B, minute: Int): B = f(ev.withMinute(g(x), minute)) def withSecond(x: B, second: Int): B = f(ev.withSecond(g(x), second)) def withMillisecond(x: B, millisecond: Int): B = f(ev.withMillisecond(g(x), millisecond)) def withTime(x: B, time: LocalTime): B = f(ev.withTime(g(x), time)) def withDate(x: B, date: LocalDate): B = f(ev.withDate(g(x), date)) def yearsUntil(x: B, until: B): Long = ev.yearsUntil(g(x), g(until)) def monthsUntil(x: B, until: B): Long = ev.monthsUntil(g(x), g(until)) def daysUntil(x: B, until: B): Long = ev.daysUntil(g(x), g(until)) def hoursUntil(x: B, until: B): Long = ev.hoursUntil(g(x), g(until)) def minutesUntil(x: B, until: B): Long = ev.minutesUntil(g(x), g(until)) def secondsUntil(x: B, until: B): Long = ev.secondsUntil(g(x), g(until)) def millisecondsUntil(x: B, until: B): Long = ev.millisecondsUntil(g(x), g(until)) def utc(x: B): (LocalDate, LocalTime) = ev.utc(g(x)) } } }
Example 56
Source File: SingletonMemorySink.scala From milan with Apache License 2.0 | 5 votes |
package com.amazon.milan.application.sinks import java.time.{Duration, Instant} import java.util.concurrent.{ConcurrentHashMap, ConcurrentLinkedQueue} import java.util.function import com.amazon.milan.Id import com.amazon.milan.application.DataSink import com.amazon.milan.typeutil.TypeDescriptor import com.fasterxml.jackson.annotation.JsonIgnore import com.fasterxml.jackson.databind.annotation.{JsonDeserialize, JsonSerialize} import scala.collection.mutable import scala.collection.mutable.ArrayBuffer import scala.concurrent.TimeoutException object SingletonMemorySink { private val values = new ConcurrentHashMap[String, ArrayBuffer[MemorySinkRecord[_]]]() private val nextSeqNum = new mutable.HashMap[String, Int]() private val locks = new ConcurrentHashMap[String, Object]() private def makeCreateBufferFunction[T]: java.util.function.Function[String, ArrayBuffer[MemorySinkRecord[_]]] = new function.Function[String, ArrayBuffer[MemorySinkRecord[_]]] { override def apply(t: String): ArrayBuffer[MemorySinkRecord[_]] = (new ArrayBuffer[MemorySinkRecord[T]]()).asInstanceOf[ArrayBuffer[MemorySinkRecord[_]]] } private val createLocker = new java.util.function.Function[String, Object] { override def apply(t: String): AnyRef = new Object() } @JsonIgnore def getRecordCount: Int = SingletonMemorySink.getBuffer(this.sinkId).size @JsonIgnore def getValues: List[T] = { SingletonMemorySink.getBuffer[T](this.sinkId).map(_.value).toList } @JsonIgnore def getRecords: List[MemorySinkRecord[T]] = { SingletonMemorySink.getBuffer[T](this.sinkId).toList } def waitForItems(itemCount: Int, timeout: Duration = null): Unit = { val endTime = if (timeout == null) Instant.MAX else Instant.now().plus(timeout) while (SingletonMemorySink.getBuffer(this.sinkId).size < itemCount) { if (Instant.now().isAfter(endTime)) { throw new TimeoutException() } Thread.sleep(1) } } override def equals(obj: Any): Boolean = { obj match { case o: SingletonMemorySink[_] => this.sinkId.equals(o.sinkId) case _ => false } } } class MemorySinkRecord[T](val seqNum: String, val createdTime: Instant, val value: T) extends Serializable
Example 57
Source File: TestWindow.scala From milan with Apache License 2.0 | 5 votes |
package com.amazon.milan.lang import java.time.Duration import com.amazon.milan.lang.aggregation._ import com.amazon.milan.program import com.amazon.milan.program.{GroupBy, _} import com.amazon.milan.test.{DateIntRecord, DateKeyValueRecord} import com.amazon.milan.typeutil.{FieldDescriptor, types} import org.junit.Assert._ import org.junit.Test import scala.language.existentials @Test class TestWindow { @Test def test_TumblingWindow_ReturnsStreamWithCorrectInputNodeAndWindowProperties(): Unit = { val stream = Stream.of[DateIntRecord] val windowed = stream.tumblingWindow(r => r.dateTime, Duration.ofHours(1), Duration.ofMinutes(30)) val TumblingWindow(_, dateExtractorFunc, period, offset) = windowed.expr // If this extraction doesn't throw an exception then the formula is correct. val FunctionDef(List(ValueDef("r", _)), SelectField(SelectTerm("r"), "dateTime")) = dateExtractorFunc assertEquals(Duration.ofHours(1), period.asJava) assertEquals(Duration.ofMinutes(30), offset.asJava) } @Test def test_TumblingWindow_ThenSelectToTuple_ReturnsStreamWithCorrectFieldComputationExpression(): Unit = { val stream = Stream.of[DateIntRecord] val grouped = stream.tumblingWindow(r => r.dateTime, Duration.ofHours(1), Duration.ofMinutes(30)) val selected = grouped.select((key, r) => fields(field("max", max(r.i)))) val Aggregate(source, FunctionDef(_, NamedFields(fieldList))) = selected.expr assertEquals(1, selected.recordType.fields.length) assertEquals(FieldDescriptor("max", types.Int), selected.recordType.fields.head) assertEquals(1, fieldList.length) assertEquals("max", fieldList.head.fieldName) // If this extraction statement doesn't crash then we're good. val Max(SelectField(SelectTerm("r"), "i")) = fieldList.head.expr } @Test def test_SlidingWindow_ReturnsStreamWithCorrectInputNodeAndWindowProperties(): Unit = { val stream = Stream.of[DateIntRecord] val windowed = stream.slidingWindow(r => r.dateTime, Duration.ofHours(1), Duration.ofMinutes(10), Duration.ofMinutes(30)) val SlidingWindow(_, dateExtractorFunc, size, slide, offset) = windowed.expr val FunctionDef(List(ValueDef("r", _)), SelectField(SelectTerm("r"), "dateTime")) = dateExtractorFunc assertEquals(Duration.ofHours(1), size.asJava) assertEquals(Duration.ofMinutes(10), slide.asJava) assertEquals(Duration.ofMinutes(30), offset.asJava) } @Test def test_GroupBy_ThenTumblingWindow_ThenSelect_ReturnsStreamWithCorrectInputNodeAndWindowProperties(): Unit = { val input = Stream.of[DateKeyValueRecord].withId("input") val output = input.groupBy(r => r.key) .tumblingWindow(r => r.dateTime, Duration.ofMinutes(5), Duration.ZERO) .select((windowStart, r) => any(r)) val Aggregate(windowExpr, FunctionDef(List(ValueDef("windowStart", _), ValueDef("r", _)), First(SelectTerm("r")))) = output.expr val TumblingWindow(groupExpr, FunctionDef(List(ValueDef("r", _)), SelectField(SelectTerm("r"), "dateTime")), program.Duration(300000), program.Duration(0)) = windowExpr val GroupBy(ExternalStream("input", "input", _), FunctionDef(List(ValueDef("r", _)), SelectField(SelectTerm("r"), "key"))) = groupExpr } }
Example 58
Source File: ReflectionTypeProvider.scala From milan with Apache License 2.0 | 5 votes |
package com.amazon.milan.typeutil import java.time.{Duration, Instant} import com.typesafe.scalalogging.Logger import org.slf4j.LoggerFactory class ReflectionTypeProvider(classLoader: ClassLoader) extends TypeProvider { private lazy val logger = Logger(LoggerFactory.getLogger(getClass)) private val knownTypesByClass = Map[Class[_], TypeDescriptor[_]]( classOf[Int] -> types.Int, classOf[Long] -> types.Long, classOf[Double] -> types.Double, classOf[Float] -> types.Float, classOf[Boolean] -> types.Boolean, classOf[Instant] -> types.Instant, classOf[Duration] -> types.Duration, classOf[String] -> types.String, classOf[Nothing] -> types.Nothing ) private val knownTypesByName = Map[String, TypeDescriptor[_]](this.knownTypesByClass.values.map(td => td.typeName -> td).toList: _*) override def getTypeDescriptor[T](typeName: String, genericArguments: List[TypeDescriptor[_]]): TypeDescriptor[T] = { this.knownTypesByName.get(typeName) match { case Some(typeDesc) => typeDesc.asInstanceOf[TypeDescriptor[T]] case None => val alternatives = Seq( typeName, this.replaceLastDotWithDollar(typeName), s"scala.$typeName") // Return the first Class we find in the sequence of alternative class names. alternatives .map(this.tryFindClass) .filter(_.nonEmpty) .map(_.get) .headOption match { case Some(cls) => this.generateTypeDescriptor[T](typeName, cls, genericArguments) case None => this.logger.error(s"Couldn't generate TypeDescriptor for type '$typeName'.") null } } } private def tryFindClass(className: String): Option[Class[_]] = { try { Some(this.classLoader.loadClass(className)) } catch { case _: ClassNotFoundException => None } } private def replaceLastDotWithDollar(className: String): String = { className.lastIndexOf('.') match { case i if (i < 0) || (i == className.length - 1) => className case i => className.substring(0, i) + "$" + className.substring(i + 1) } } private def generateTypeDescriptor[T](typeName: String, cls: Class[_], genericArguments: List[TypeDescriptor[_]]): TypeDescriptor[T] = { this.logger.debug(s"Generating type descriptor for '$typeName'.") if (TypeDescriptor.isTupleTypeName(typeName)) { new TupleTypeDescriptor[T](typeName, genericArguments, List()) } else { val fieldFields = cls.getDeclaredFields .map(field => FieldDescriptor(field.getName, this.getTypeDescriptor(field.getType, List()))) .toList new ObjectTypeDescriptor[T](typeName, genericArguments, fieldFields) } } private def getTypeDescriptor(cls: Class[_], genericArguments: List[TypeDescriptor[_]]): TypeDescriptor[_] = { this.knownTypesByClass.get(cls) match { case Some(typeDesc) => typeDesc case None => this.generateTypeDescriptor[Any](cls.getCanonicalName, cls, genericArguments) } } }
Example 59
Source File: package.scala From milan with Apache License 2.0 | 5 votes |
package com.amazon.milan.typeutil import java.time.{Duration, Instant} package object types { val Boolean = new BasicTypeDescriptor[Boolean]("Boolean") val Double = new NumericTypeDescriptor[Double]("Double") val Float = new NumericTypeDescriptor[Float]("Float") val Instant = new BasicTypeDescriptor[Instant]("java.time.Instant") val Duration = new BasicTypeDescriptor[Duration]("java.time.Duration") val Int = new NumericTypeDescriptor[Int]("Int") val Long = new NumericTypeDescriptor[Long]("Long") val String = new BasicTypeDescriptor[String]("String") val Nothing = new BasicTypeDescriptor[Nothing]("Nothing") val EmptyTuple = new TupleTypeDescriptor[Product]("Tuple0", List(), List()) }
Example 60
Source File: package.scala From milan with Apache License 2.0 | 5 votes |
package com.amazon.milan.compiler.flink import java.time.Duration import java.util.concurrent.TimeoutException import com.amazon.milan.testing.Concurrent import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment import scala.concurrent.{Await, ExecutionContext, Future, blocking} import scala.language.implicitConversions package object testing { implicit def extendStreamExecutionEnvironment(env: StreamExecutionEnvironment): StreamExecutionEnvironmentExtensions = new StreamExecutionEnvironmentExtensions(env) implicit def extendFuture[T](future: Future[T]): FutureExtensions[T] = new FutureExtensions[T](future) implicit class DurationExtensions(d: Duration) { def toConcurrent: scala.concurrent.duration.Duration = scala.concurrent.duration.Duration(this.d.toMillis, scala.concurrent.duration.MILLISECONDS) } } class StreamExecutionEnvironmentExtensions(env: StreamExecutionEnvironment) { def executeThenWaitFor(predicate: () => Boolean, secondsToWait: Int): Unit = { if (!Concurrent.executeAndWait( () => env.execute(), predicate, Duration.ofSeconds(secondsToWait))) { throw new TimeoutException("Timed out waiting for stop condition.") } } def executeAsync(maxSeconds: Int): Future[Boolean] = { Concurrent.executeAsync(() => env.execute(), () => true, Duration.ofSeconds(maxSeconds)) } def executeUntilAsync(predicate: () => Boolean, secondsToWait: Int): Future[Unit] = { val result = Concurrent.executeAsync( () => env.execute(), predicate, Duration.ofSeconds(secondsToWait)) result.transform( success => if (!success) { throw new TimeoutException("Timed out waiting for stop condition.") }, ex => throw ex)(ExecutionContext.global) } def executeAtMost(maxSeconds: Int): Unit = { if (!Concurrent.executeUntil( () => env.execute(), () => true, Duration.ofSeconds(maxSeconds))) { throw new TimeoutException("Timed out waiting for stop condition.") } } } class FutureExtensions[T](future: Future[T]) { def thenWaitFor(duration: Duration)(implicit context: ExecutionContext): Future[T] = { Future { blocking { val result = Await.result(this.future, scala.concurrent.duration.Duration.Inf) Thread.sleep(duration.toMillis) result } } } }
Example 61
Source File: TimeWindowSample.scala From milan with Apache License 2.0 | 5 votes |
package com.amazon.milan.samples import java.time.{Duration, Instant} import com.amazon.milan.SemanticVersion import com.amazon.milan.application.sinks.LogSink import com.amazon.milan.application.sources.ListDataSource import com.amazon.milan.application.{Application, ApplicationConfiguration, ApplicationInstance} import com.amazon.milan.lang._ import com.amazon.milan.lang.aggregation._ import com.amazon.milan.tools.ApplicationInstanceProvider class TimeWindowSample extends ApplicationInstanceProvider { override def getApplicationInstance(params: List[(String, String)]): ApplicationInstance = { val input = Stream.of[DateValueRecord] // Create sliding windows that are five seconds long and start every second. // In each window compute the sum of the record values. val output = input .slidingWindow(r => r.dateTime, Duration.ofSeconds(5), Duration.ofSeconds(1), Duration.ZERO) .select((windowStart, r) => fields(field("windowStart", windowStart), field("sum", sum(r.value)))) val graph = new StreamGraph(output) val config = new ApplicationConfiguration() // Create some sample data for the input streams. val now = Instant.now() val inputRecords = List.tabulate(10)(i => DateValueRecord(now.plusSeconds(i), 1)) config.setSource(input, new ListDataSource(inputRecords)) val outputSink = new LogSink[output.RecordType]() config.addSink(output, outputSink) new ApplicationInstance( new Application("TimeWindowSample", graph, SemanticVersion.ZERO), config) } }
Example 62
Source File: package.scala From azure-event-hubs-spark with Apache License 2.0 | 5 votes |
package org.apache.spark import java.time.Duration import com.microsoft.azure.eventhubs.{ EventHubClient, EventHubClientOptions, PartitionReceiver } import org.json4s.NoTypeHints import org.json4s.jackson.Serialization package object eventhubs { implicit val formats = Serialization.formats(NoTypeHints) val StartOfStream: String = "-1" val EndOfStream: String = "@latest" val DefaultEventPosition: EventPosition = EventPosition.fromEndOfStream val DefaultEndingPosition: EventPosition = EventPosition.fromEndOfStream val DefaultMaxRatePerPartition: Rate = 1000 val DefaultReceiverTimeout: Duration = Duration.ofSeconds(60) val DefaultMaxSilentTime: Duration = EventHubClientOptions.SILENT_OFF val MinSilentTime: Duration = EventHubClientOptions.SILENT_MINIMUM val DefaultOperationTimeout: Duration = Duration.ofSeconds(300) val DefaultConsumerGroup: String = EventHubClient.DEFAULT_CONSUMER_GROUP_NAME val PrefetchCountMinimum: Int = PartitionReceiver.MINIMUM_PREFETCH_COUNT val PrefetchCountMaximum: Int = PartitionReceiver.MAXIMUM_PREFETCH_COUNT val DefaultPrefetchCount: Int = PartitionReceiver.DEFAULT_PREFETCH_COUNT val DefaultFailOnDataLoss = "true" val DefaultUseSimulatedClient = "false" val DefaultPartitionPreferredLocationStrategy = "Hash" val DefaultUseExclusiveReceiver = "true" val StartingSequenceNumber = 0L val DefaultThreadPoolSize = 16 val DefaultEpoch = 0L val RetryCount = 10 val WaitInterval = 5000 val OffsetAnnotation = "x-opt-offset" val EnqueuedTimeAnnotation = "x-opt-enqueued-time" val SequenceNumberAnnotation = "x-opt-sequence-number" val SparkConnectorVersion = "2.3.16" type PartitionId = Int val PartitionId: Int.type = Int type Rate = Int val Rate: Int.type = Int type Offset = Long val Offset: Long.type = Long type EnqueueTime = Long val EnqueueTime: Long.type = Long type SequenceNumber = Long val SequenceNumber: Long.type = Long object PartitionPreferredLocationStrategy extends Enumeration { type PartitionPreferredLocationStrategy = Value val Hash, BalancedHash = Value } // Allow Strings to be converted to types defined in this library. implicit class EventHubsString(val str: String) extends AnyVal { def toPartitionId: PartitionId = str.toInt def toRate: Rate = str.toInt def toOffset: Offset = str.toLong def toEnqueueTime: EnqueueTime = str.toLong def toSequenceNumber: SequenceNumber = str.toLong } }
Example 63
Source File: ConnectionStringBuilderSuite.scala From azure-event-hubs-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.eventhubs import java.time.Duration import org.scalatest.FunSuite class ConnectionStringBuilderSuite extends FunSuite { import ConnectionStringBuilderSuite._ private val validateConnStrBuilder = (connStrBuilder: ConnectionStringBuilder) => { assert(connStrBuilder.getEventHubName == CorrectEntityPath) assert(connStrBuilder.getEndpoint.getHost == CorrectEndpoint) assert(connStrBuilder.getSasKey == CorrectKey) assert(connStrBuilder.getSasKeyName == CorrectKeyName) assert(connStrBuilder.getOperationTimeout == CorrectOperationTimeout) } test("parse invalid connection string") { intercept[Exception] { ConnectionStringBuilder("something") } } test("throw on unrecognized parts") { intercept[Exception] { ConnectionStringBuilder(correctConnectionString + ";" + "something") } } test("parse valid connection string") { val connStrBuilder = ConnectionStringBuilder(correctConnectionString) validateConnStrBuilder(connStrBuilder) } test("exchange connection string across constructors") { val connStrBuilder = ConnectionStringBuilder(correctConnectionString) val secondConnStr = ConnectionStringBuilder() .setEndpoint(connStrBuilder.getEndpoint) .setEventHubName(connStrBuilder.getEventHubName) .setSasKeyName(connStrBuilder.getSasKeyName) .setSasKey(connStrBuilder.getSasKey) secondConnStr.setOperationTimeout(connStrBuilder.getOperationTimeout) validateConnStrBuilder(ConnectionStringBuilder(secondConnStr.toString)) } test("property setters") { val connStrBuilder = ConnectionStringBuilder(correctConnectionString) val testConnStrBuilder = ConnectionStringBuilder(connStrBuilder.toString) validateConnStrBuilder(testConnStrBuilder) connStrBuilder.setOperationTimeout(Duration.ofSeconds(8)) val testConnStrBuilder1 = ConnectionStringBuilder(connStrBuilder.toString) assert(testConnStrBuilder1.getOperationTimeout.getSeconds == 8) } } object ConnectionStringBuilderSuite { private val CorrectEndpoint = "endpoint1" private val CorrectEntityPath = "eventhub1" private val CorrectKeyName = "somekeyname" private val CorrectKey = "somekey" private val CorrectOperationTimeout = Duration.ofSeconds(5) private val correctConnectionString = s"Endpoint=sb://$CorrectEndpoint;EntityPath=$CorrectEntityPath;SharedAccessKeyName=$CorrectKeyName;" + s"SharedAccessKey=$CorrectKey;OperationTimeout=$CorrectOperationTimeout;" }
Example 64
Source File: ShufflePerfToolTest.scala From splash with Apache License 2.0 | 5 votes |
package com.memverge.splash import java.time.Duration import org.assertj.core.api.Assertions.assertThat import org.testng.annotations.{AfterMethod, BeforeMethod, Test} @Test(groups = Array("UnitTest", "IntegrationTest")) class ShufflePerfToolTest { @BeforeMethod private def beforeMethod(): Unit = afterMethod() @AfterMethod private def afterMethod(): Unit = StorageFactoryHolder.getFactory.reset() def testUsage(): Unit = { val ret = ShufflePerfTool.parse(Array("-h")) assertThat(ret.isLeft).isTrue } def testWriteReadShuffleWithDefaultConfig(): Unit = { val start1 = System.nanoTime() ShufflePerfTool.execute(Array("-b", "1024")) val duration1 = Duration.ofNanos(System.nanoTime() - start1) assertThat(duration1.toMillis).isLessThan(2000) val start2 = System.nanoTime() ShufflePerfTool.execute(Array("-b", "1024", "-ro")) val duration2 = Duration.ofNanos(System.nanoTime() - start2) assertThat(duration2.toMillis).isLessThan(duration1.toMillis) } def testInvalidIntParameter(): Unit = { val ret = ShufflePerfTool.parse(Array("-b", "block")) assertThat(ret.isLeft).isTrue ret.left.map(msg => assertThat(msg).contains("invalid integer")) } def testToSizeStr(): Unit = { val scale = ShufflePerfTool.toSizeStrDouble(1000L)(_) assertThat(scale(987)).isEqualTo("987.0") assertThat(scale(512 * 1e3)).isEqualTo("512.00K") assertThat(scale(1010 * 1e3)).isEqualTo("1.01M") assertThat(scale(1237 * 1e6)).isEqualTo("1.24G") assertThat(scale(5678 * 1e9)).isEqualTo("5.68T") } }
Example 65
Source File: UsesPostgreSQLMultipleDatabases.scala From testcontainers-specs2 with MIT License | 5 votes |
package io.chrisdavenport.testcontainersspecs2 import java.time.Duration import java.time.temporal.ChronoUnit.SECONDS import com.dimafeng.testcontainers.{Container, GenericContainer} import org.testcontainers.containers.wait.strategy.LogMessageWaitStrategy trait UsesPostgreSQLMultipleDatabases { private[this] lazy val multiple = new PostgreSQLMultipleDatabases( name = "christopherdavenport/postgres-multi-db:10.3", exposedPort = 5432, dbName = dbName, dbUserName = dbUserName, dbPassword = dbPassword ) lazy val container: Container = multiple.container lazy val driverName: String = "org.postgresql.Driver" lazy val dbUserName: String = "user" lazy val dbPassword: String = "password" lazy val dbName: String = "db" lazy val jdbcUrl: String = multiple.jdbcUrl lazy val ipAddress: String = multiple.ipAddress lazy val port: Int = multiple.mappedPort final class PostgreSQLMultipleDatabases( name: String, exposedPort: Int, dbName: String, dbUserName: String, dbPassword: String ) { lazy val container: GenericContainer = GenericContainer( name, exposedPorts = Seq(exposedPort), env = Map( "REPO" -> "https://github.com/mrts/docker-postgresql-multiple-databases", "POSTGRES_USER" -> dbUserName, "POSTGRES_PASSWORD" -> dbPassword, "POSTGRES_MULTIPLE_DATABASES" -> dbName ), waitStrategy = new LogMessageWaitStrategy() .withRegEx(".*database system is ready to accept connections.*\\s") .withTimes(2) .withStartupTimeout(Duration.of(60, SECONDS)) ) lazy val ipAddress = container.containerIpAddress lazy val mappedPort = container.mappedPort(exposedPort) lazy val jdbcUrl: String = s"jdbc:postgresql://$ipAddress:$mappedPort/$dbName" } }
Example 66
Source File: TwitterExamples.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.streaming.examples.twitter import java.time.Duration import org.apache.gearpump.cluster.client.ClientContext import org.apache.gearpump.cluster.main.{ArgumentsParser, CLIOption} import org.apache.gearpump.streaming.dsl.scalaapi.{LoggerSink, StreamApp} import org.apache.gearpump.streaming.dsl.window.api.{EventTimeTrigger, FixedWindows} import org.apache.gearpump.streaming.twitter.TwitterSource import org.apache.gearpump.util.AkkaApp import twitter4j.conf.ConfigurationBuilder object TwitterExamples extends AkkaApp with ArgumentsParser { val CONSUMER_KEY = "consumer-key" val CONSUMER_SECRET = "consumer-secret" val TOKEN = "token" val TOKEN_SECRET = "token-secret" override val options: Array[(String, CLIOption[Any])] = Array( CONSUMER_KEY -> CLIOption[String]("consumer key", required = true), CONSUMER_SECRET -> CLIOption[String]("consumer secret", required = true), TOKEN -> CLIOption[String]("token", required = true), TOKEN_SECRET -> CLIOption[String]("token secret", required = true) ) override def main(akkaConf: Config, args: Array[String]): Unit = { val config = parse(args) val twitterConf = new ConfigurationBuilder() .setOAuthConsumerKey(config.getString(CONSUMER_KEY)) .setOAuthConsumerSecret(config.getString(CONSUMER_SECRET)) .setOAuthAccessToken(config.getString(TOKEN)) .setOAuthAccessTokenSecret(config.getString(TOKEN_SECRET)) .build() val twitterSource = TwitterSource(twitterConf) val context: ClientContext = ClientContext(akkaConf) val app = StreamApp("TwitterExample", context) app.source[String](twitterSource) .flatMap(tweet => tweet.split("[\\s]+")) .filter(_.startsWith("#")) .map((_, 1)) .window(FixedWindows.apply(Duration.ofMinutes(1)).triggering(EventTimeTrigger)) .groupBy(_._1) .sum .sink(new LoggerSink) context.submit(app).waitUntilFinish() context.close() } }
Example 67
Source File: WindowedWordCount.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.streaming.examples.wordcount.dsl import java.time.{Duration, Instant} import org.apache.gearpump.Message import org.apache.gearpump.cluster.client.ClientContext import org.apache.gearpump.cluster.main.{ArgumentsParser, CLIOption} import org.apache.gearpump.streaming.dsl.scalaapi.{LoggerSink, StreamApp} import org.apache.gearpump.streaming.dsl.window.api.{EventTimeTrigger, FixedWindows} import org.apache.gearpump.streaming.source.{DataSource, Watermark} import org.apache.gearpump.streaming.task.TaskContext import org.apache.gearpump.util.AkkaApp object WindowedWordCount extends AkkaApp with ArgumentsParser { override val options: Array[(String, CLIOption[Any])] = Array.empty override def main(akkaConf: Config, args: Array[String]): Unit = { val context = ClientContext(akkaConf) val app = StreamApp("dsl", context) app.source[String](new TimedDataSource). // word => (word, count) flatMap(line => line.split("[\\s]+")).map((_, 1)). // fix window window(FixedWindows.apply(Duration.ofMillis(5L)) .triggering(EventTimeTrigger)). // (word, count1), (word, count2) => (word, count1 + count2) groupBy(_._1). sum.sink(new LoggerSink) context.submit(app).waitUntilFinish() context.close() } private class TimedDataSource extends DataSource { private var data = List( Message("foo", Instant.ofEpochMilli(1L)), Message("bar", Instant.ofEpochMilli(2L)), Message("foo", Instant.ofEpochMilli(3L)), Message("foo", Instant.ofEpochMilli(5L)), Message("bar", Instant.ofEpochMilli(7L)), Message("bar", Instant.ofEpochMilli(8L)) ) private var watermark: Instant = Instant.ofEpochMilli(0) override def read(): Message = { if (data.nonEmpty) { val msg = data.head data = data.tail watermark = msg.timestamp msg } else { null } } override def open(context: TaskContext, startTime: Instant): Unit = {} override def close(): Unit = {} override def getWatermark: Instant = { if (data.isEmpty) { watermark = Watermark.MAX } watermark } } }
Example 68
Source File: DefaultWindowRunnerSpec.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.streaming.dsl.window.impl import java.time.{Duration, Instant} import org.apache.gearpump.Message import org.apache.gearpump.streaming.dsl.api.functions.ReduceFunction import org.apache.gearpump.streaming.MockUtil import org.apache.gearpump.streaming.dsl.plan.functions.FoldRunner import org.apache.gearpump.streaming.dsl.window.api.SessionWindows import org.apache.gearpump.streaming.source.Watermark import org.scalatest.{Matchers, PropSpec} import org.scalatest.mock.MockitoSugar import org.scalatest.prop.PropertyChecks class DefaultWindowRunnerSpec extends PropSpec with PropertyChecks with Matchers with MockitoSugar { property("DefaultWindowRunner should handle SessionWindow") { val data = List( Message(("foo", 1L), Instant.ofEpochMilli(1L)), Message(("foo", 1L), Instant.ofEpochMilli(15L)), Message(("foo", 1L), Instant.ofEpochMilli(25L)), Message(("foo", 1L), Instant.ofEpochMilli(26L)) ) type KV = (String, Long) implicit val system = MockUtil.system val reduce = ReduceFunction[KV]((kv1, kv2) => (kv1._1, kv1._2 + kv2._2)) val windows = SessionWindows.apply(Duration.ofMillis(4L)) val windowRunner = new WindowOperator[KV, Option[KV]](windows, new FoldRunner[KV, Option[KV]](reduce, "reduce")) data.foreach(m => windowRunner.foreach(TimestampedValue(m.value.asInstanceOf[KV], m.timestamp))) windowRunner.trigger(Watermark.MAX).outputs.toList shouldBe List( TimestampedValue(Some(("foo", 1)), Instant.ofEpochMilli(4)), TimestampedValue(Some(("foo", 1)), Instant.ofEpochMilli(18)), TimestampedValue(Some(("foo", 2)), Instant.ofEpochMilli(29)) ) } }
Example 69
Source File: RunningApplication.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.cluster.client import akka.actor.ActorRef import akka.pattern.ask import akka.util.Timeout import org.apache.gearpump.cluster.ClientToMaster.{RegisterAppResultListener, ResolveAppId, ShutdownApplication} import org.apache.gearpump.cluster.MasterToClient._ import org.apache.gearpump.cluster.client.RunningApplication._ import org.apache.gearpump.util.{ActorUtil, LogUtil} import org.slf4j.Logger import java.time.Duration import java.util.concurrent.TimeUnit import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.Future import scala.concurrent.duration._ import scala.util.{Failure, Success} class RunningApplication(val appId: Int, master: ActorRef, timeout: Timeout) { lazy val appMaster: Future[ActorRef] = resolveAppMaster(appId) def shutDown(): Unit = { val result = ActorUtil.askActor[ShutdownApplicationResult](master, ShutdownApplication(appId), timeout) result.appId match { case Success(_) => case Failure(ex) => throw ex } } def waitUntilFinish(): Unit = { this.waitUntilFinish(INF_DURATION) } def waitUntilFinish(duration: Duration): Unit = { val result = ActorUtil.askActor[ApplicationResult](master, RegisterAppResultListener(appId), new Timeout(duration.getSeconds, TimeUnit.SECONDS)) if (result.appId == appId) { result match { case failed: ApplicationFailed => throw failed.error case _: ApplicationSucceeded => LOG.info(s"Application $appId succeeded") case _: ApplicationTerminated => LOG.info(s"Application $appId terminated") } } else { LOG.warn(s"Received unexpected result $result for application $appId") } } def askAppMaster[T](msg: Any): Future[T] = { appMaster.flatMap(_.ask(msg)(timeout).asInstanceOf[Future[T]]) } private def resolveAppMaster(appId: Int): Future[ActorRef] = { master.ask(ResolveAppId(appId))(timeout). asInstanceOf[Future[ResolveAppIdResult]].map(_.appMaster.get) } } object RunningApplication { private val LOG: Logger = LogUtil.getLogger(getClass) // This magic number is derived from Akka's configuration, which is the maximum delay private val INF_DURATION = Duration.ofSeconds(2147482) }
Example 70
Source File: HttpFeeRateProvider.scala From bitcoin-s with MIT License | 5 votes |
package org.bitcoins.feeprovider import java.time.{Duration, Instant} import akka.actor.ActorSystem import akka.http.scaladsl.Http import akka.http.scaladsl.model.{HttpRequest, Uri} import akka.util.ByteString import org.bitcoins.core.api.FeeRateApi import org.bitcoins.core.util.TimeUtil import org.bitcoins.core.wallet.fee.FeeUnit import scala.concurrent.{ExecutionContextExecutor, Future} import scala.util.Try object HttpFeeRateProvider { def makeApiCall(uri: Uri)(implicit system: ActorSystem): Future[String] = { implicit val ec: ExecutionContextExecutor = system.dispatcher Http() .singleRequest(HttpRequest(uri = uri)) .flatMap(response => response.entity.dataBytes .runFold(ByteString.empty)(_ ++ _) .map(payload => payload.decodeString(ByteString.UTF_8))) } } abstract class HttpFeeRateProvider extends FeeRateApi { implicit protected val system: ActorSystem protected def uri: Uri protected def converter(str: String): Try[FeeUnit] def getFeeRate: Future[FeeUnit] = { HttpFeeRateProvider .makeApiCall(uri) .flatMap(ret => Future.fromTry(converter(ret)))(system.dispatcher) } } abstract class CachedHttpFeeRateProvider extends HttpFeeRateProvider { private var cachedFeeRateOpt: Option[(FeeUnit, Instant)] = None val cacheDuration: Duration = Duration.ofMinutes(5) private def updateFeeRate(): Future[FeeUnit] = { implicit val ec: ExecutionContextExecutor = system.dispatcher super.getFeeRate.map { feeRate => cachedFeeRateOpt = Some(feeRate, TimeUtil.now) feeRate } } override def getFeeRate: Future[FeeUnit] = { cachedFeeRateOpt match { case None => updateFeeRate() case Some((cachedFeeRate, time)) => val now = TimeUtil.now if (time.plus(cacheDuration).isAfter(now)) { updateFeeRate() } else { Future.successful(cachedFeeRate) } } } }
Example 71
Source File: KafkaConsumer.scala From eidos with Apache License 2.0 | 5 votes |
package org.clulab.wm.wmexchanger.wmconsumer import java.io.File import java.time.Duration import java.util.Collections import java.util.ConcurrentModificationException import java.util.Properties import org.apache.kafka.clients.consumer.{KafkaConsumer => ApacheKafkaConsumer} import org.clulab.wm.wmexchanger.utils.Closer.AutoCloser import org.clulab.wm.wmexchanger.utils.FileUtils import org.clulab.wm.wmexchanger.utils.FileEditor import org.json4s._ import org.slf4j.Logger import org.slf4j.LoggerFactory class KafkaConsumer(properties: Properties, closeDuration: Int, topic: String, outputDir: String) { import KafkaConsumer._ implicit val formats: DefaultFormats.type = org.json4s.DefaultFormats logger.info("Opening consumer...") protected val consumer: ApacheKafkaConsumer[String, String] = { val consumer = new ApacheKafkaConsumer[String, String](properties) consumer.subscribe(Collections.singletonList(topic)) consumer } def poll(duration: Int): Unit = { val records = consumer.poll(Duration.ofSeconds(duration)) logger.info(s"Polling ${records.count} records...") records.forEach { record => val key = record.key val value = record.value // Imply an extension on the file so that it can be replaced. val file = FileEditor(new File(key + ".")).setDir(outputDir).setExt("json").get logger.info("Consuming " + file.getName) FileUtils.printWriterFromFile(file).autoClose { printWriter => printWriter.print(value) } } } def close(): Unit = { logger.info("Closing consumer...") try { consumer.close(Duration.ofSeconds(closeDuration)) } catch { case _: ConcurrentModificationException => // KafkaConsumer is not safe for multi-threaded access } } } object KafkaConsumer { val logger: Logger = LoggerFactory.getLogger(this.getClass) }
Example 72
Source File: JwtTokenGenerator.scala From scala-play-realworld-example-app with MIT License | 5 votes |
package authentication.jwt.services import java.time.Duration import java.util.Date import authentication.api.TokenGenerator import authentication.models.{IdProfile, JwtToken} import commons.repositories.DateTimeProvider import io.jsonwebtoken.{Jwts, SignatureAlgorithm} private[authentication] class JwtTokenGenerator(dateTimeProvider: DateTimeProvider, secretProvider: SecretProvider) extends TokenGenerator[IdProfile, JwtToken] { private val tokenDuration = Duration.ofHours(1) override def generate(profile: IdProfile): JwtToken = { val signedToken = Jwts.builder .setExpiration(Date.from(expiredAt)) .claim(JwtTokenGenerator.securityUserIdClaimName, profile.securityUserId.value.toString) .signWith(SignatureAlgorithm.HS256, secretProvider.get) .compact() JwtToken(signedToken) } private def expiredAt = { val now = dateTimeProvider.now now.plus(tokenDuration) } } private[authentication] object JwtTokenGenerator { val securityUserIdClaimName: String = "security_user_id" }
Example 73
Source File: ArrayOfDurationsBenchmark.scala From jsoniter-scala with MIT License | 5 votes |
package com.github.plokhotnyuk.jsoniter_scala.benchmark import java.nio.charset.StandardCharsets.UTF_8 import java.time.Duration import org.openjdk.jmh.annotations.{Param, Setup} abstract class ArrayOfDurationsBenchmark extends CommonParams { @Param(Array("1", "10", "100", "1000", "10000", "100000", "1000000")) var size: Int = 1000 var obj: Array[Duration] = _ var jsonString: String = _ var jsonBytes: Array[Byte] = _ var preallocatedBuf: Array[Byte] = _ @Setup def setup(): Unit = { obj = (1 to size).map { i => val x = Math.abs((i * 6971258582664805397L) / Math.pow(10, i % 19).toLong) val y = Math.abs(i * Math.pow(10, i % 10)).toInt Duration.ofSeconds(x, y) }.toArray jsonString = obj.mkString("[\"", "\",\"", "\"]") jsonBytes = jsonString.getBytes(UTF_8) preallocatedBuf = new Array[Byte](jsonBytes.length + 100) } }
Example 74
Source File: ArrayOfDurationsReading.scala From jsoniter-scala with MIT License | 5 votes |
package com.github.plokhotnyuk.jsoniter_scala.benchmark import java.nio.charset.StandardCharsets.UTF_8 import java.time.Duration import com.avsystem.commons.serialization.json._ import com.github.plokhotnyuk.jsoniter_scala.benchmark.AVSystemCodecs._ import com.github.plokhotnyuk.jsoniter_scala.benchmark.BorerJsonEncodersDecoders._ import com.github.plokhotnyuk.jsoniter_scala.benchmark.JacksonSerDesers._ import com.github.plokhotnyuk.jsoniter_scala.benchmark.JsoniterScalaCodecs._ import com.github.plokhotnyuk.jsoniter_scala.benchmark.SprayFormats._ import com.github.plokhotnyuk.jsoniter_scala.benchmark.UPickleReaderWriters._ import com.github.plokhotnyuk.jsoniter_scala.core._ import io.circe.parser._ import org.openjdk.jmh.annotations.Benchmark import play.api.libs.json.Json import spray.json._ class ArrayOfDurationsReading extends ArrayOfDurationsBenchmark { @Benchmark def avSystemGenCodec(): Array[Duration] = JsonStringInput.read[Array[Duration]](new String(jsonBytes, UTF_8)) @Benchmark def borer(): Array[Duration] = io.bullet.borer.Json.decode(jsonBytes).to[Array[Duration]].value @Benchmark def circe(): Array[Duration] = decode[Array[Duration]](new String(jsonBytes, UTF_8)).fold(throw _, identity) @Benchmark def jacksonScala(): Array[Duration] = jacksonMapper.readValue[Array[Duration]](jsonBytes) @Benchmark def jsoniterScala(): Array[Duration] = readFromArray[Array[Duration]](jsonBytes) @Benchmark def playJson(): Array[Duration] = Json.parse(jsonBytes).as[Array[Duration]] @Benchmark def sprayJson(): Array[Duration] = JsonParser(jsonBytes).convertTo[Array[Duration]] @Benchmark def uPickle(): Array[Duration] = read[Array[Duration]](jsonBytes) }
Example 75
Source File: HoconInputTest.scala From scala-commons with MIT License | 5 votes |
package com.avsystem.commons package hocon import java.time.{Duration, Period} import com.avsystem.commons.serialization.json.JsonStringOutput import com.avsystem.commons.serialization.{GenCodecRoundtripTest, Input, Output} import com.typesafe.config.{ConfigFactory, ConfigMemorySize, ConfigValue, ConfigValueFactory, ConfigValueType} class HoconInputTest extends GenCodecRoundtripTest { type Raw = ConfigValue def writeToOutput(write: Output => Unit): ConfigValue = { val sb = new JStringBuilder write(new JsonStringOutput(sb)) val config = ConfigFactory.parseString(s"""{"f":${sb.toString}}""") if (config.getIsNull("f")) ConfigValueFactory.fromAnyRef(null) else config.getValue("f") } def createInput(raw: ConfigValue): Input = new HoconInput(raw) def rawInput(any: Any): HoconInput = new HoconInput(ConfigValueFactory.fromAnyRef(any)) test("value type reading") { assert(rawInput(null).valueType == ConfigValueType.NULL) assert(rawInput("kek").valueType == ConfigValueType.STRING) assert(rawInput(42).valueType == ConfigValueType.NUMBER) assert(rawInput(true).valueType == ConfigValueType.BOOLEAN) assert(rawInput(JMap()).valueType == ConfigValueType.OBJECT) assert(rawInput(JList()).valueType == ConfigValueType.LIST) } test("duration reading") { assert(rawInput("34s").readDuration() == Duration.ofSeconds(34)) } test("period reading") { assert(rawInput("5m").readPeriod() == Period.ofMonths(5)) } test("temporal amount reading") { assert(rawInput("5 minutes").readTemporal() == Duration.ofMinutes(5)) assert(rawInput("5 months").readTemporal() == Period.ofMonths(5)) } test("size in bytes reading") { assert(rawInput("100M").readSizeInBytes() == 100 * 1024 * 1024L) } test("memory size reading") { assert(rawInput("100M").readMemorySize() == ConfigMemorySize.ofBytes(100 * 1024 * 1024L)) } test("number reading") { assert(rawInput(42.0).readNumber().doubleValue == 42.0) } }
Example 76
Source File: BfsStrategyStopWatchDecorator.scala From apalache with Apache License 2.0 | 5 votes |
package at.forsyte.apalache.tla.bmcmt.search import java.io.{FileWriter, PrintWriter, Writer} import java.time.{Duration, LocalDateTime} import at.forsyte.apalache.tla.bmcmt.search.SearchStrategy.{Finish, FinishOnDeadlock, NextStep} class BfsStrategyStopWatchDecorator(strategy: SearchStrategy, filename: String) extends SearchStrategy { private var currentStep: Int = 0 private var printWriter: Option[PrintWriter] = None private var startTime: LocalDateTime = LocalDateTime.now() override def getCommand: SearchStrategy.Command = { val command = strategy.getCommand command match { case NextStep(stepNo, _, _) => if (stepNo == 0) { currentStep = 0 // create a log file and add a header printWriter = Some(new PrintWriter(new FileWriter(filename, false))) printWriter.get.println("step,total_sec,nanosec_adjustment") // start the timer startTime = LocalDateTime.now() } else { appendCsvEntry() currentStep = stepNo } case Finish() | FinishOnDeadlock() => appendCsvEntry() printWriter.get.close() } command } private def appendCsvEntry(): Unit = { val currentTime = LocalDateTime.now() val duration = Duration.between(startTime, currentTime) printWriter.get.println("%d,%d,%d".format(currentStep, duration.getSeconds, duration.getNano)) printWriter.get.flush() // get the results as soon as possible } override def registerResponse(response: SearchStrategy.Response): Unit = { strategy.registerResponse(response) } }
Example 77
Source File: WordCount.scala From kafka-streams with Apache License 2.0 | 5 votes |
import java.time.Duration import java.util.Properties import org.apache.kafka.streams.kstream.Materialized import org.apache.kafka.streams.scala.ImplicitConversions._ import org.apache.kafka.streams.scala._ import org.apache.kafka.streams.scala.kstream._ import org.apache.kafka.streams.{KafkaStreams, StreamsConfig} object WordCount extends App { import Serdes._ val props: Properties = { val p = new Properties() p.put(StreamsConfig.APPLICATION_ID_CONFIG, "wordcount-modified") p.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092") p } val builder: StreamsBuilder = new StreamsBuilder val textLines: KStream[String, String] = builder.stream[String, String]("text_lines") val wordCounts: KTable[String, Long] = textLines .flatMapValues(textLine => textLine.toLowerCase.split("\\W+")) .groupBy((_, word) => word) .count() wordCounts.toStream.to("word_count_results") val streams: KafkaStreams = new KafkaStreams(builder.build(), props) streams.start() sys.ShutdownHookThread { streams.close(Duration.ofSeconds(10)) } }
Example 78
Source File: WordCountTestable.scala From kafka-streams with Apache License 2.0 | 5 votes |
package com.supergloo import java.time.Duration import java.util.Properties import org.apache.kafka.streams.kstream.Materialized import org.apache.kafka.streams.scala.ImplicitConversions._ import org.apache.kafka.streams.{KafkaStreams, StreamsConfig, Topology} import org.apache.kafka.streams.scala.{Serdes, StreamsBuilder} import org.apache.kafka.streams.scala.kstream.{KStream, KTable} class WordCountTestable { import Serdes._ val props: Properties = { val p = new Properties() p.put(StreamsConfig.APPLICATION_ID_CONFIG, "wordcount-modified") p.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092") p } def countNumberOfWords(inputTopic: String, outputTopic: String, storeName: String): Topology = { val builder: StreamsBuilder = new StreamsBuilder val textLines: KStream[String, String] = builder.stream[String, String](inputTopic) val wordCounts: KTable[String, Long] = textLines .flatMapValues(textLine => textLine.toLowerCase.split("\\W+")) .groupBy((_, word) => word) .count()(Materialized.as("counts-store")) wordCounts.toStream.to(outputTopic) builder.build() } def toLowerCaseStream(inputTopic: String, outputTopic: String): Topology = { val builder: StreamsBuilder = new StreamsBuilder() val textLines: KStream[String, String] = builder.stream(inputTopic) val wordCounts: KStream[String, String] = textLines .mapValues(textLine => textLine.toLowerCase) wordCounts.to(outputTopic) builder.build() } } object WordCountTestable extends WordCountTestable { def main(args: Array[String]): Unit = { val builder: Topology = countNumberOfWords("input-topic", "output-topic", "counts-store") val streams: KafkaStreams = new KafkaStreams(builder, props) streams.start() sys.ShutdownHookThread { streams.close(Duration.ofSeconds(10)) } } }
Example 79
Source File: TypeMappers.scala From ticket-booking-aecor with Apache License 2.0 | 5 votes |
package ru.pavkin.booking.common.protobuf import java.time.{ Duration, Instant } import ru.pavkin.booking.common.models._ import scalapb.TypeMapper import shapeless._ import scala.util.Try trait AnyValTypeMapper { implicit def anyValTypeMapper[V, U](implicit ev: V <:< AnyVal, V: Unwrapped.Aux[V, U]): TypeMapper[U, V] = { val _ = ev TypeMapper[U, V](V.wrap)(V.unwrap) } } trait CaseClassTypeMapper { implicit def caseClassTypeMapper[A, B, Repr <: HList]( implicit aGen: Generic.Aux[A, Repr], bGen: Generic.Aux[B, Repr] ): TypeMapper[A, B] = TypeMapper { x: A => bGen.from(aGen.to(x)) } { x => aGen.from(bGen.to(x)) } } trait BaseTypeMapper { implicit val bigDecimal: TypeMapper[String, BigDecimal] = TypeMapper[String, BigDecimal] { x => val value = if (x.isEmpty) "0" else x BigDecimal(value) }(_.toString()) implicit val instant: TypeMapper[Long, Instant] = TypeMapper[Long, Instant](Instant.ofEpochMilli)(_.toEpochMilli) implicit val instantOpt: TypeMapper[Long, Option[Instant]] = instant.map2(i => if (i.toEpochMilli == 0) None else Some(i))( _.getOrElse(Instant.ofEpochMilli(0)) ) implicit val duration: TypeMapper[String, java.time.Duration] = TypeMapper[String, Duration] { s => Try(Duration.parse(s)).getOrElse(Duration.ZERO) } { _.toString } } trait TypeMapperInstances extends BaseTypeMapper with AnyValTypeMapper with CaseClassTypeMapper { implicit class TypeMapperOps[A <: Any](a: A) { def toCustom[B](implicit tm: TypeMapper[A, B]): B = tm.toCustom(a) def toBase[B](implicit tm: TypeMapper[B, A]): B = tm.toBase(a) } } object TypeMappers extends TypeMapperInstances { implicit val money: TypeMapper[String, Money] = bigDecimal.map2(Money(_))(_.amount) }
Example 80
Source File: StubConfirmationService.scala From ticket-booking-aecor with Apache License 2.0 | 5 votes |
package ru.pavkin.booking.booking.service import java.time.temporal.ChronoUnit import java.time.{ Duration, Instant } import java.util.concurrent.TimeUnit import cats.Monad import cats.data.NonEmptyList import cats.effect.{ Clock, Sync } import cats.effect.concurrent.Ref import cats.implicits._ import ru.pavkin.booking.booking.service.TicketReservationService._ import ru.pavkin.booking.booking.service.StubConfirmationService.ConcertState import ru.pavkin.booking.common.models._ class StubConfirmationService[F[_]: Monad](clock: Clock[F], state: Ref[F, Map[ConcertId, ConcertState]]) extends TicketReservationService[F] { val expireAfter: Duration = Duration.of(6, ChronoUnit.HOURS) def reserve(bookingId: BookingKey, concertId: ConcertId, seats: NonEmptyList[Seat]): F[Either[ReservationFailure, Reservation]] = clock .realTime(TimeUnit.MILLISECONDS) .map(Instant.ofEpochMilli) .flatMap( now => state.modify[Either[ReservationFailure, Reservation]]( concerts => concerts.get(concertId) match { case None => concerts -> Left(UnknownSeats) case Some(concertState) => concertState .book(bookingId, seats) .fold(e => concerts -> Left(e), { case (c, t) => concerts.updated(concertId, c) -> Right( Reservation(t, Some(now.plus(expireAfter))) ) }) } ) ) def release(bookingId: BookingKey): F[Either[ReleaseFailure, Unit]] = state.modify[Either[ReleaseFailure, Unit]]( concerts => Either .fromOption(concerts.find(_._2.bookedSeats.contains(bookingId)), UnknownBooking) .flatMap { case (concertId, concertState) => concertState.release(bookingId).map(concertId -> _) } match { case Left(value) => concerts -> Left(value) case Right((concertId, newState)) => concerts.updated(concertId, newState) -> Right(()) } ) } object StubConfirmationService { def apply[F[_]: Sync](clock: Clock[F], initial: Map[ConcertId, ConcertState]): F[StubConfirmationService[F]] = Ref.of(initial).map(new StubConfirmationService(clock, _)) case class ConcertState(prices: Map[Seat, Money], availableSeats: Set[Seat], bookedSeats: Map[BookingKey, NonEmptyList[Seat]]) { def book( bookingId: BookingKey, seats: NonEmptyList[Seat] ): Either[ReservationFailure, (ConcertState, NonEmptyList[Ticket])] = if (bookedSeats.contains(bookingId)) Left(SeatsAlreadyBooked) else if (!seats.forall(availableSeats)) Left(SeatsAlreadyBooked) else if (!seats.forall(prices.contains)) Left(UnknownSeats) else Right( copy( availableSeats = availableSeats.diff(seats.toList.toSet), bookedSeats = bookedSeats.updated(bookingId, seats) ) -> seats.map(s => Ticket(s, prices(s))) ) def release(bookingId: BookingKey): Either[ReleaseFailure, ConcertState] = bookedSeats.get(bookingId) match { case Some(booked) => Right( copy( availableSeats = availableSeats ++ booked.toList.toSet, bookedSeats = bookedSeats - bookingId ) ) case None => Left(UnknownBooking) } } }
Example 81
Source File: EventHubReceiver.scala From toketi-kafka-connect-iothub with MIT License | 5 votes |
// Copyright (c) Microsoft. All rights reserved. package com.microsoft.azure.iot.kafka.connect.source import java.time.{Duration, Instant} import java.util.concurrent.Executors import com.microsoft.azure.eventhubs.{EventHubClient, EventPosition, PartitionReceiver} import scala.collection.JavaConverters._ import scala.collection.mutable.ListBuffer class EventHubReceiver(val connectionString: String, val receiverConsumerGroup: String, val partition: String, var offset: Option[String], val startTime: Option[Instant], val receiveTimeout: Duration) extends DataReceiver { private[this] var isClosing = false private val executorService = Executors.newSingleThreadExecutor() private val eventHubClient = EventHubClient.createSync(connectionString, executorService) if (eventHubClient == null) { throw new IllegalArgumentException("Unable to create EventHubClient from the input parameters.") } private val eventPosition = if (startTime.isDefined) { EventPosition.fromEnqueuedTime(startTime.get) } else { EventPosition.fromOffset(offset.get) } private val eventHubReceiver: PartitionReceiver = eventHubClient.createReceiverSync( receiverConsumerGroup, partition.toString, eventPosition) if (this.eventHubReceiver == null) { throw new IllegalArgumentException("Unable to create PartitionReceiver from the input parameters.") } this.eventHubReceiver.setReceiveTimeout(receiveTimeout) override def close(): Unit = { if (this.eventHubReceiver != null) { this.eventHubReceiver.synchronized { this.isClosing = true eventHubReceiver.close().join() } } } override def receiveData(batchSize: Int): Iterable[IotMessage] = { var iotMessages = ListBuffer.empty[IotMessage] var curBatchSize = batchSize var endReached = false // Synchronize on the eventHubReceiver object, and make sure the task is not closing, // in which case, the eventHubReceiver might be closed. while (curBatchSize > 0 && !endReached && !this.isClosing) { this.eventHubReceiver.synchronized { if(!this.isClosing) { val batch = this.eventHubReceiver.receiveSync(curBatchSize) if (batch != null) { val batchIterable = batch.asScala iotMessages ++= batchIterable.map(e => { val content = new String(e.getBytes) val iotDeviceData = IotMessage(content, e.getSystemProperties.asScala, e.getProperties.asScala) iotDeviceData }) curBatchSize -= batchIterable.size } else { endReached = true } } } } iotMessages } }
Example 82
Source File: MockDataReceiver.scala From toketi-kafka-connect-iothub with MIT License | 5 votes |
// Copyright (c) Microsoft. All rights reserved. package com.microsoft.azure.iot.kafka.connect.source.testhelpers import java.text.SimpleDateFormat import java.time.{Duration, Instant} import com.microsoft.azure.eventhubs.impl.AmqpConstants import com.microsoft.azure.iot.kafka.connect.source.{DataReceiver, IotMessage, JsonSerialization} import org.json4s.jackson.Serialization.write import scala.collection.mutable import scala.util.Random class MockDataReceiver(val connectionString: String, val receiverConsumerGroup: String, val partition: String, var offset: Option[String], val startTime: Option[Instant], val receiveTimeout: Duration ) extends DataReceiver with JsonSerialization { private val random: Random = new Random override def receiveData(batchSize: Int): Iterable[IotMessage] = { val list = scala.collection.mutable.ListBuffer.empty[IotMessage] for (i <- 0 until batchSize) { list += generateIotMessage(i) } list } def generateIotMessage(index: Int): IotMessage = { val temp = 70 + random.nextInt(10) + random.nextDouble() val deviceTemp = DeviceTemperature(temp, "F") val deviceTempStr = write(deviceTemp) val systemProperties = mutable.Map[String, Object]( "iothub-connection-device-id" → s"device$index", AmqpConstants.SEQUENCE_NUMBER_ANNOTATION_NAME → index.toLong.asInstanceOf[Object], AmqpConstants.AMQP_PROPERTY_CORRELATION_ID → random.nextString(10), AmqpConstants.OFFSET_ANNOTATION_NAME → random.nextString(10), AmqpConstants.ENQUEUED_TIME_UTC_ANNOTATION_NAME → new SimpleDateFormat("MM/dd/yyyy").parse("12/01/2016")) val messageProperties = mutable.Map[String, Object]( "timestamp" → Instant.now().toString, "contentType" → "temperature" ) val iotMessage = IotMessage(deviceTempStr, systemProperties, messageProperties) iotMessage } override def close(): Unit = {} }
Example 83
Source File: IotHubSourceTaskTest.scala From toketi-kafka-connect-iothub with MIT License | 5 votes |
// Copyright (c) Microsoft. All rights reserved. package com.microsoft.azure.iot.kafka.connect.source import java.time.{Duration, Instant} import java.util import com.microsoft.azure.iot.kafka.connect.source.testhelpers.{DeviceTemperature, MockDataReceiver, TestConfig, TestIotHubSourceTask} import org.apache.kafka.connect.data.Struct import org.json4s.jackson.Serialization.read import org.scalatest.{FlatSpec, GivenWhenThen} class IotHubSourceTaskTest extends FlatSpec with GivenWhenThen with JsonSerialization { "IotHubSourceTask poll" should "return a list of SourceRecords with the right format" in { Given("IotHubSourceTask instance") val iotHubSourceTask = new TestIotHubSourceTask iotHubSourceTask.start(TestConfig.sourceTaskTestProps) When("IotHubSourceTask.poll is called") val sourceRecords = iotHubSourceTask.poll() Then("It returns a list of SourceRecords") assert(sourceRecords != null) assert(sourceRecords.size() == 15) for (i <- 0 until 15) { val record = sourceRecords.get(i) assert(record.topic() == TestConfig.sourceTaskTestProps.get(IotHubSourceConfig.KafkaTopic)) assert(record.valueSchema() == IotMessageConverter.schema) val messageStruct = record.value().asInstanceOf[Struct] assert(messageStruct.getString("deviceId").startsWith("device")) assert(messageStruct.getString("contentType") == "temperature") val enqueuedTime = Instant.parse(messageStruct.getString("enqueuedTime")) assert(enqueuedTime.isAfter(Instant.parse("2016-11-20T00:00:00Z"))) val systemProperties = messageStruct.getMap[String, String]("systemProperties") assert(systemProperties != null) assert(systemProperties.get("sequenceNumber") != "") assert(systemProperties.get("correlationId") != "") val properties = messageStruct.getMap[String, String]("properties") assert(properties != null) assert(properties.get("timestamp") != "") val deviceTemperature = read[DeviceTemperature](messageStruct.get("content").asInstanceOf[String]) assert(deviceTemperature != null) assert(deviceTemperature.unit == "F") assert(deviceTemperature.value != 0) } } "IotHubSourceTask start" should "initialize all properties" in { Given("A list of properties for IotHubSourceTask") val props: util.Map[String, String] = TestConfig.sourceTaskTestProps When("IotHubSourceTask is started") val task = new TestIotHubSourceTask task.start(props) Then("Data receiver should be properly initialized") assert(task.partitionSources.length == 3) assert(!task.partitionSources.exists(s => s.dataReceiver == null)) for (ps ← task.partitionSources) { val dataReceiver = ps.dataReceiver.asInstanceOf[MockDataReceiver] assert(dataReceiver.offset.isDefined) assert(dataReceiver.startTime.isEmpty) assert(dataReceiver.connectionString != "") assert(dataReceiver.receiverConsumerGroup != "") assert(dataReceiver.receiveTimeout == Duration.ofSeconds(5)) } } it should "initialize start time correctly on the data receiver when it is passed in the config" in { Given("A list of properties with StartTime for IotHubSourceTask") val props: util.Map[String, String] = TestConfig.sourceTaskTestPropsStartTime When("IotHubSourceTask is started") val task = new TestIotHubSourceTask task.start(props) Then("Data receiver should be properly initialized, with StartTime, while Offsets value should be ignored") assert(task.partitionSources.length == 3) assert(!task.partitionSources.exists(s => s.dataReceiver == null)) for (ps ← task.partitionSources) { val dataReceiver = ps.dataReceiver.asInstanceOf[MockDataReceiver] assert(dataReceiver.offset.isEmpty) assert(dataReceiver.startTime.isDefined) assert(dataReceiver.startTime.get == Instant.parse("2016-12-10T00:00:00Z")) assert(dataReceiver.connectionString != "") assert(dataReceiver.receiverConsumerGroup != "") } } }
Example 84
Source File: JettyConfig.scala From seahorse with Apache License 2.0 | 5 votes |
package ai.deepsense.commons.service.server import java.time.Duration import com.typesafe.config.Config case class JettyConfig( port: Int, stopTimeout: Duration, connectorIdleTimeout: Duration, maxFormContentSize: Int) object JettyConfig { def apply(jettyConfig: Config): JettyConfig = new JettyConfig( port = jettyConfig.getInt("port"), stopTimeout = jettyConfig.getDuration("stopTimeout"), connectorIdleTimeout = jettyConfig.getDuration("connectorIdleTimeout"), maxFormContentSize = jettyConfig.getInt("maxFormContentSize")) }
Example 85
Source File: KafkaConsumer.scala From aecor with MIT License | 5 votes |
package aecor.kafkadistributedprocessing.internal import java.time.Duration import java.util.Properties import java.util.concurrent.Executors import cats.effect.{ Async, ContextShift, Resource } import cats.~> import org.apache.kafka.clients.consumer.{ Consumer, ConsumerRebalanceListener, ConsumerRecords } import org.apache.kafka.common.PartitionInfo import org.apache.kafka.common.serialization.Deserializer import scala.collection.JavaConverters._ import scala.concurrent.ExecutionContext import scala.concurrent.duration.FiniteDuration private[kafkadistributedprocessing] final class KafkaConsumer[F[_], K, V]( withConsumer: (Consumer[K, V] => *) ~> F ) { def subscribe(topics: Set[String], listener: ConsumerRebalanceListener): F[Unit] = withConsumer(_.subscribe(topics.asJava, listener)) def subscribe(topics: Set[String]): F[Unit] = withConsumer(_.subscribe(topics.asJava)) val unsubscribe: F[Unit] = withConsumer(_.unsubscribe()) def partitionsFor(topic: String): F[Set[PartitionInfo]] = withConsumer(_.partitionsFor(topic).asScala.toSet) def close: F[Unit] = withConsumer(_.close()) def poll(timeout: FiniteDuration): F[ConsumerRecords[K, V]] = withConsumer(_.poll(Duration.ofNanos(timeout.toNanos))) } private[kafkadistributedprocessing] object KafkaConsumer { final class Create[F[_]] { def apply[K, V]( config: Properties, keyDeserializer: Deserializer[K], valueDeserializer: Deserializer[V] )(implicit F: Async[F], contextShift: ContextShift[F]): Resource[F, KafkaConsumer[F, K, V]] = { val create = F.suspend { val executor = Executors.newSingleThreadExecutor() def eval[A](a: => A): F[A] = contextShift.evalOn(ExecutionContext.fromExecutor(executor)) { F.async[A] { cb => executor.execute(new Runnable { override def run(): Unit = cb { try Right(a) catch { case e: Throwable => Left(e) } } }) } } eval { val original = Thread.currentThread.getContextClassLoader Thread.currentThread.setContextClassLoader(null) val consumer = new org.apache.kafka.clients.consumer.KafkaConsumer[K, V]( config, keyDeserializer, valueDeserializer ) Thread.currentThread.setContextClassLoader(original) val withConsumer = new ((Consumer[K, V] => *) ~> F) { def apply[A](f: Consumer[K, V] => A): F[A] = eval(f(consumer)) } new KafkaConsumer[F, K, V](withConsumer) } } Resource.make(create)(_.close) } } def create[F[_]]: Create[F] = new Create[F] }
Example 86
Source File: duration.scala From cats-time with MIT License | 5 votes |
package io.chrisdavenport.cats.time.instances import cats._ import java.time.Duration import cats.kernel.CommutativeMonoid trait duration { implicit final val durationInstances: Show[Duration] with Hash[Duration] with Order[Duration] with CommutativeMonoid[Duration] = new Hash[Duration] with Order[Duration] with Show[Duration] with CommutativeMonoid[Duration] { override def hash(x: Duration): Int = x.hashCode override def compare(x: Duration, y: Duration): Int = x.compareTo(y) override def show(x: Duration): String = x.toString override def empty: Duration = Duration.ZERO override def combine(x: Duration, y: Duration): Duration = x.plus(y) } } object duration extends duration
Example 87
Source File: LshFunctionCache.scala From elastiknn with Apache License 2.0 | 5 votes |
package com.klibisz.elastiknn.query import java.time.Duration import com.google.common.cache.{CacheBuilder, CacheLoader, LoadingCache} import com.klibisz.elastiknn.api.{Mapping, Vec} import com.klibisz.elastiknn.models.LshFunction import com.klibisz.elastiknn.storage.StoredVec // The Lsh Functions tend to be expensive to instantiate (i.e. initializing hashing parameters), hence a cache. sealed trait LshFunctionCache[M <: Mapping, V <: Vec, S <: StoredVec] extends (M => LshFunction[M, V, S]) { self => private val cache: LoadingCache[M, LshFunction[M, V, S]] = CacheBuilder.newBuilder .expireAfterWrite(Duration.ofSeconds(60)) .build(new CacheLoader[M, LshFunction[M, V, S]] { override def load(m: M): LshFunction[M, V, S] = self.load(m) }) override final def apply(mapping: M): LshFunction[M, V, S] = cache.get(mapping) protected def load(m: M): LshFunction[M, V, S] } object LshFunctionCache { implicit object Jaccard extends LshFunctionCache[Mapping.JaccardLsh, Vec.SparseBool, StoredVec.SparseBool] { def load(m: Mapping.JaccardLsh): LshFunction[Mapping.JaccardLsh, Vec.SparseBool, StoredVec.SparseBool] = new LshFunction.Jaccard(m) } implicit object Hamming extends LshFunctionCache[Mapping.HammingLsh, Vec.SparseBool, StoredVec.SparseBool] { def load(m: Mapping.HammingLsh): LshFunction[Mapping.HammingLsh, Vec.SparseBool, StoredVec.SparseBool] = new LshFunction.Hamming(m) } implicit object Angular extends LshFunctionCache[Mapping.AngularLsh, Vec.DenseFloat, StoredVec.DenseFloat] { def load(m: Mapping.AngularLsh): LshFunction[Mapping.AngularLsh, Vec.DenseFloat, StoredVec.DenseFloat] = new LshFunction.Angular(m) } implicit object L2 extends LshFunctionCache[Mapping.L2Lsh, Vec.DenseFloat, StoredVec.DenseFloat] { def load(m: Mapping.L2Lsh): LshFunction[Mapping.L2Lsh, Vec.DenseFloat, StoredVec.DenseFloat] = new LshFunction.L2(m) } }
Example 88
Source File: NsdbMiniCluster.scala From NSDb with Apache License 2.0 | 5 votes |
package io.radicalbit.nsdb.minicluster import java.io.File import java.time.Duration import java.util.UUID import com.typesafe.scalalogging.LazyLogging import org.apache.commons.io.FileUtils trait NsdbMiniCluster extends LazyLogging { protected[this] val instanceId = { UUID.randomUUID } protected[this] val startingHostname = "127.0.0." protected[this] def rootFolder: String protected[this] def nodesNumber: Int protected[this] def passivateAfter: Duration protected[this] def replicationFactor: Int lazy val nodes: Set[NSDbMiniClusterNode] = (for { i <- 0 until nodesNumber } yield new NSDbMiniClusterNode( hostname = s"$startingHostname${i + 1}", storageDir = s"$rootFolder/data$i", passivateAfter = passivateAfter, replicationFactor = replicationFactor )).toSet def start(cleanup: Boolean = false): Unit = { if (cleanup) FileUtils.deleteDirectory(new File(rootFolder)) nodes.foreach(_.start()) } def stop(): Unit = nodes.foreach(n => n.stop()) }
Example 89
Source File: NSDbMiniClusterConfigProvider.scala From NSDb with Apache License 2.0 | 5 votes |
package io.radicalbit.nsdb.minicluster import java.time.Duration import com.typesafe.config.{Config, ConfigFactory, ConfigValueFactory} import io.radicalbit.nsdb.common.configuration.NSDbConfigProvider trait NSDbMiniClusterConfigProvider extends NSDbConfigProvider { def hostname: String def storageDir: String def passivateAfter: Duration def replicationFactor: Int override lazy val userDefinedConfig: Config = ConfigFactory .parseResources("nsdb-minicluster.conf") .withValue("nsdb.node.hostname", ConfigValueFactory.fromAnyRef(hostname)) .withValue("nsdb.grpc.interface", ConfigValueFactory.fromAnyRef(hostname)) .withValue("nsdb.http.interface", ConfigValueFactory.fromAnyRef(hostname)) .withValue("nsdb.storage.base-path", ConfigValueFactory.fromAnyRef(storageDir)) .withValue("nsdb.cluster.replication-factor", ConfigValueFactory.fromAnyRef(replicationFactor)) .resolve() override lazy val lowLevelTemplateConfig: Config = mergeConf(userDefinedConfig, ConfigFactory.parseResources("application-native.conf"), ConfigFactory.parseResources("application-common.conf")) }
Example 90
Source File: LdapAuthenticator.scala From asura with MIT License | 5 votes |
package asura.app.api.auth import java.time.{Duration, LocalDate, ZoneId} import java.util.Date import org.ldaptive._ import org.ldaptive.auth.{Authenticator, BindAuthenticationHandler, SearchDnResolver} import org.pac4j.core.context.WebContext import org.pac4j.core.credentials.UsernamePasswordCredentials import org.pac4j.core.profile.CommonProfile import org.pac4j.jwt.config.signature.SecretSignatureConfiguration import org.pac4j.jwt.profile.JwtGenerator import org.pac4j.ldap.profile.service.LdapProfileService import play.api.Configuration object LdapAuthenticator { def apply(configuration: Configuration): LdapProfileService = { val connConfig = new ConnectionConfig() connConfig.setConnectTimeout(Duration.ofMillis(configuration.get[Long]("asura.ldap.connectionTimeout"))) connConfig.setResponseTimeout(Duration.ofMillis(configuration.get[Long]("asura.ldap.responseTimeout"))) connConfig.setLdapUrl(configuration.get[String]("asura.ldap.url")) connConfig.setConnectionInitializer(new BindConnectionInitializer(configuration.get[String]("asura.ldap.bindDn"), new Credential(configuration.get[String]("asura.ldap.password")))) val connFactory = new DefaultConnectionFactory(connConfig) val handler = new BindAuthenticationHandler(connFactory) val dnResolver = new SearchDnResolver(connFactory) dnResolver.setBaseDn(configuration.get[String]("asura.ldap.searchbase")) dnResolver.setSubtreeSearch(true) dnResolver.setUserFilter(configuration.get[String]("asura.ldap.userFilter")) val authenticator = new Authenticator() authenticator.setDnResolver(dnResolver) authenticator.setAuthenticationHandler(handler) new CustomLdapProfileService(configuration, connFactory, authenticator, configuration.get[String]("asura.ldap.searchbase")) } class CustomLdapProfileService( configuration: Configuration, connectionFactory: ConnectionFactory, authenticator: Authenticator, usersDn: String) extends LdapProfileService(connectionFactory, authenticator, usersDn) { this.setIdAttribute(configuration.get[String]("asura.ldap.userIdAttr")) this.setAttributes(s"${configuration.get[String]("asura.ldap.userRealNameAttr")},${configuration.get[String]("asura.ldap.userEmailAttr")}") override def validate(credentials: UsernamePasswordCredentials, context: WebContext): Unit = { super.validate(credentials, context) val jwtGenerator = new JwtGenerator[CommonProfile](new SecretSignatureConfiguration(configuration.get[String]("asura.jwt.secret"))) val tomorrow = LocalDate.now().plusDays(1).atStartOfDay().plusHours(3) jwtGenerator.setExpirationTime(Date.from(tomorrow.atZone(ZoneId.systemDefault()).toInstant())) val profile = credentials.getUserProfile val token = jwtGenerator.generate(profile) profile.addAttribute("token", token) } } }
Example 91
Source File: SettableClock.scala From metronome with Apache License 2.0 | 5 votes |
package dcos.metronome import java.time.{Clock, Instant, LocalDateTime, ZoneOffset, ZoneId, Duration} import scala.concurrent.duration.FiniteDuration object SettableClock { private val defaultJavaClock = Clock.fixed(LocalDateTime.of(2015, 4, 9, 12, 30, 0).toInstant(ZoneOffset.UTC), ZoneOffset.UTC) def ofNow() = new SettableClock(Clock.fixed(Instant.now(), ZoneOffset.UTC)) } class SettableClock(private[this] var clock: Clock = SettableClock.defaultJavaClock) extends Clock { private[this] var subscribers: List[() => Unit] = Nil def onChange(fn: () => Unit): Unit = synchronized { subscribers = fn :: subscribers } override def getZone: ZoneId = clock.getZone override def instant(): Instant = clock.instant() override def withZone(zoneId: ZoneId): Clock = new SettableClock(clock.withZone(zoneId)) def +=(duration: FiniteDuration): Unit = plus(duration) def plus(duration: FiniteDuration): this.type = plus(Duration.ofMillis(duration.toMillis)) def plus(duration: Duration): this.type = { clock = Clock.offset(clock, duration) subscribers.foreach(_()) this } def at(instant: Instant): this.type = { clock = Clock.fixed(instant, clock.getZone) subscribers.foreach(_()) this } }
Example 92
Source File: GraphiteLauncher.scala From slab with Apache License 2.0 | 5 votes |
package com.criteo.slab.example import java.time.Duration import com.criteo.slab.app.WebServer import com.criteo.slab.lib.graphite.{GraphiteStore, GraphiteCodecs} import org.slf4j.LoggerFactory object GraphiteLauncher { import scala.concurrent.ExecutionContext.Implicits.global import SimpleBoard._ import GraphiteCodecs._ val logger = LoggerFactory.getLogger(this.getClass) def main(args: Array[String]): Unit = { val maybeStore = for { host <- sys.env.get("GRAPHITE_HOST") port <- sys.env.get("GRAPHITE_PORT").map(_.toInt) webHost <- sys.env.get("GRAPHITE_WEB_HOST") } yield new GraphiteStore(host, port, webHost, Duration.ofSeconds(60), Some("slab.example"), Some("slab.example.slo")) implicit val store = maybeStore match { case Some(s) => logger.info("[Slab Example] using Graphite store") s case None => logger.error("Graphite store is not set up") sys.exit(1) } WebServer(statsDays = 14) .attach(board) .apply(8080) } }
Example 93
Source File: GraphiteStoreSpec.scala From slab with Apache License 2.0 | 5 votes |
package com.criteo.slab.lib import java.net._ import java.time.Duration import java.util.concurrent._ import com.criteo.slab.core.Context import com.criteo.slab.helper.FutureTests import com.criteo.slab.lib.Values.Latency import com.criteo.slab.lib.graphite.{DataPoint, GraphiteMetric, GraphiteStore} import org.scalatest.{FlatSpec, Matchers} import scala.io._ import com.criteo.slab.lib.graphite.GraphiteCodecs._ class GraphiteStoreSpec extends FlatSpec with Matchers with FutureTests { val port = 5000 val server = new ServerSocket(port) val store = new GraphiteStore("localhost", port, "http://localhost", Duration.ofSeconds(60)) val pool = Executors.newFixedThreadPool(1) "value store" should "be able to send metrics to Graphite server" in { val f = pool.submit(new Echo(server)) store.upload("metrics", Context.now, Latency(200)) f.get should startWith("metrics.latency 200") } "upload" should "returns the exception when failed" in { whenReady( store.upload("metrics", Context.now, Latency(200)).failed ) { res => res shouldBe a[java.net.ConnectException] } } "transform metrics" should "turn Graphite metrics into pairs" in { val metrics = List( GraphiteMetric( "metric.one", List( DataPoint(None, 2000), DataPoint(Some(1), 2060) ) ), GraphiteMetric( "metric.two", List( DataPoint(None, 2000), DataPoint(Some(2), 2060) ) ) ) GraphiteStore.transformMetrics("metric", metrics) shouldEqual Map("one" -> 1.0, "two" -> 2.0) } "transform metrics" should "return empty if some metrics are missing" in { val metrics = List( GraphiteMetric( "metric.one", List(DataPoint(Some(1), 2000)) ), GraphiteMetric( "metric.two", List(DataPoint(None, 2000)) ) ) GraphiteStore.transformMetrics("metric", metrics) shouldEqual Map.empty } "group metrics" should "group metrics" in { val metrics = List( GraphiteMetric( "metric.one", List(DataPoint(Some(1), 1000), DataPoint(Some(2), 2000)) ), GraphiteMetric( "metric.two", List(DataPoint(Some(3), 1000), DataPoint(Some(4), 2000)) ) ) GraphiteStore.groupMetrics("metric", metrics) shouldEqual Map( 1000000 -> Map("one" -> 1, "two" -> 3), 2000000 -> Map("one" -> 2, "two" -> 4) ) } class Echo(server: ServerSocket) extends Callable[String] { def call() = { val s = server.accept val lines = new BufferedSource(s.getInputStream).getLines val result = lines.mkString s.close server.close result } } }
Example 94
Source File: JavaInstantGenerators.scala From scalacheck-ops with Apache License 2.0 | 5 votes |
package org.scalacheck.ops.time import java.time.{Clock, Duration, Instant} import org.scalacheck.Gen object JavaInstantGenerators extends JavaInstantGenerators trait JavaInstantGenerators extends AbstractTimeGenerators { override type InstantType = Instant override type DurationType = Duration override type ParamsType = Clock override val defaultRange: Duration = Duration.ofDays(365) override val defaultParams: Clock = Clock.systemUTC() override protected[time] def now(implicit clock: Clock): Instant = Instant.now(clock) import JavaLocalTimeGenerators.MAX_NANOS override def between(start: Instant, end: Instant)(implicit params: Clock): Gen[Instant] = { val startSeconds = start.getEpochSecond val endSeconds = end.getEpochSecond if (startSeconds == endSeconds) { for { nanos <- Gen.choose(start.getNano, end.getNano) } yield Instant.ofEpochSecond(startSeconds, nanos) } else { for { seconds <- Gen.choose(startSeconds, endSeconds) nanos <- seconds match { case `startSeconds` => Gen.choose(start.getNano, MAX_NANOS) case `endSeconds` => Gen.choose(0, end.getNano) case _ => Gen.choose(0, MAX_NANOS) } } yield Instant.ofEpochSecond(seconds, nanos) } } override protected[time] def addToCeil( instant: Instant, duration: Duration )(implicit params: Clock): Instant = { try instant plus duration catch { case ex: ArithmeticException => Instant.MAX } } override protected[time] def subtractToFloor( instant: Instant, duration: Duration )(implicit params: Clock): Instant = { try instant minus duration catch { case ex: ArithmeticException => Instant.MIN } } }
Example 95
Source File: JavaLocalTimeGenerators.scala From scalacheck-ops with Apache License 2.0 | 5 votes |
package org.scalacheck.ops.time import java.time.{Clock, Duration, LocalTime} import org.scalacheck.Gen trait JavaLocalTimeGenerators extends AbstractTimeGenerators { override type InstantType = LocalTime override type DurationType = Duration override type ParamsType = Clock override val defaultParams: Clock = Clock.systemUTC() override val defaultRange: Duration = Duration.ofHours(24) override protected[time] def now(implicit params: Clock): LocalTime = LocalTime.now(params) override def between(start: LocalTime, end: LocalTime)(implicit params: Clock): Gen[LocalTime] = { for { nanoOfDay <- Gen.choose(start.toNanoOfDay, end.toNanoOfDay) } yield LocalTime.ofNanoOfDay(nanoOfDay) } override protected[time] def addToCeil(instant: LocalTime, duration: Duration) (implicit params: Clock): LocalTime = { instant plus duration } override protected[time] def subtractToFloor(instant: LocalTime, duration: Duration) (implicit params: Clock): LocalTime = { instant minus duration } } object JavaLocalTimeGenerators extends JavaLocalTimeGenerators { final val MAX_NANOS = 999999999 }
Example 96
Source File: ActivityStreamServiceImpl.scala From lagom-scala-chirper with Apache License 2.0 | 5 votes |
package sample.chirper.activity.impl import java.time.{Duration, Instant} import com.lightbend.lagom.scaladsl.api.ServiceCall import sample.chirper.activity.api.ActivityStreamService import sample.chirper.chirp.api.{ChirpService, HistoricalChirpsRequest, LiveChirpRequest} import sample.chirper.friend.api.FriendService import scala.concurrent.{ExecutionContext, Future} class ActivityStreamServiceImpl( friendService: FriendService, chirpService: ChirpService ) (implicit val ec: ExecutionContext) extends ActivityStreamService { override def health() = ServiceCall { _ => Future.successful("OK") } override def getLiveActivityStream(userId: String) = ServiceCall { _ => for { user <- friendService.getUser(userId).invoke() userIds = user.friends :+ userId chirpsReq = LiveChirpRequest(userIds) // Note that this stream will not include changes to friend associates, // e.g. adding a new friend. results <- chirpService.getLiveChirps().invoke(chirpsReq) } yield results } override def getHistoricalActivityStream(userId: String) = ServiceCall { _ => for { user <- friendService.getUser(userId).invoke() userIds = user.friends :+ userId // FIXME we should use HistoricalActivityStreamReq request parameter fromTime = Instant.now().minus(Duration.ofDays(7)) chirpsReq = HistoricalChirpsRequest(fromTime, userIds) results <- chirpService.getHistoricalChirps().invoke(chirpsReq) } yield results } }
Example 97
Source File: ParameterMappers.scala From neotypes with MIT License | 5 votes |
package neotypes package implicits.mappers import java.time.{Duration, LocalDate, LocalDateTime, LocalTime, Period, OffsetDateTime, OffsetTime, ZonedDateTime} import java.util.UUID import mappers.ParameterMapper import org.neo4j.driver.v1.Value import org.neo4j.driver.v1.types.{IsoDuration, Point} import scala.collection.Iterable import scala.jdk.CollectionConverters._ trait ParameterMappers { implicit final val BooleanParameterMapper: ParameterMapper[Boolean] = ParameterMapper.fromCast(Boolean.box) implicit final val ByteArrayParameterMapper: ParameterMapper[Array[Byte]] = ParameterMapper.identity implicit final val DoubleParameterMapper: ParameterMapper[Double] = ParameterMapper.fromCast(Double.box) implicit final val DurationParameterMapper: ParameterMapper[Duration] = ParameterMapper.identity implicit final val FloatParameterMapper: ParameterMapper[Float] = ParameterMapper.fromCast(Float.box) implicit final val IntParameterMapper: ParameterMapper[Int] = ParameterMapper.fromCast(Int.box) implicit final val IsoDurationParameterMapper: ParameterMapper[IsoDuration] = ParameterMapper.identity implicit final val LocalDateParameterMapper: ParameterMapper[LocalDate] = ParameterMapper.identity implicit final val LocalDateTimeParameterMapper: ParameterMapper[LocalDateTime] = ParameterMapper.identity implicit final val LocalTimeParameterMapper: ParameterMapper[LocalTime] = ParameterMapper.identity implicit final val LongParameterMapper: ParameterMapper[Long] = ParameterMapper.fromCast(Long.box) implicit final val OffsetDateTimeParameterMapper: ParameterMapper[OffsetDateTime] = ParameterMapper.identity implicit final val OffsetTimeParameterMapper: ParameterMapper[OffsetTime] = ParameterMapper.identity implicit final val PeriodParameterMapper: ParameterMapper[Period] = ParameterMapper.identity implicit final val PointParameterMapper: ParameterMapper[Point] = ParameterMapper.identity implicit final val StringParameterMapper: ParameterMapper[String] = ParameterMapper.identity implicit final val UUIDParameterMapper: ParameterMapper[UUID] = ParameterMapper[String].contramap(_.toString) implicit final val ValueParameterMapper: ParameterMapper[Value] = ParameterMapper.identity implicit final val ZonedDateTimeParameterMapper: ParameterMapper[ZonedDateTime] = ParameterMapper.identity private final def iterableParameterMapper[T](mapper: ParameterMapper[T]): ParameterMapper[Iterable[T]] = ParameterMapper.fromCast { col => col.iterator.map(v => mapper.toQueryParam(v).underlying).asJava } implicit final def collectionParameterMapper[T, C[_]](implicit mapper: ParameterMapper[T], ev: C[T] <:< Iterable[T]): ParameterMapper[C[T]] = iterableParameterMapper(mapper).contramap(ev) private final def iterableMapParameterMapper[V](mapper: ParameterMapper[V]): ParameterMapper[Iterable[(String, V)]] = ParameterMapper.fromCast { col => col.iterator.map { case (key, v) => key -> mapper.toQueryParam(v).underlying }.toMap.asJava } implicit final def mapParameterMapper[V, M[_, _]](implicit mapper: ParameterMapper[V], ev: M[String, V] <:< Iterable[(String, V)]): ParameterMapper[M[String, V]] = iterableMapParameterMapper(mapper).contramap(ev) implicit final def optionAnyRefParameterMapper[T](implicit mapper: ParameterMapper[T]): ParameterMapper[Option[T]] = ParameterMapper.fromCast { optional => optional.map(v => mapper.toQueryParam(v).underlying).orNull } }
Example 98
Source File: BaseIntegrationSpec.scala From neotypes with MIT License | 5 votes |
package neotypes import java.time.Duration import com.dimafeng.testcontainers.{ForAllTestContainer, Neo4jContainer} import org.neo4j.driver.{v1 => neo4j} import org.scalatest.flatspec.AsyncFlatSpecLike import org.testcontainers.containers.BindMode import org.testcontainers.containers.wait.strategy.HostPortWaitStrategy import org.testcontainers.images.PullPolicy import scala.concurrent.Future import com.dimafeng.testcontainers.Container import com.dimafeng.testcontainers.ContainerDef import org.testcontainers.utility.MountableFile abstract class BaseIntegrationSpec[F[_]](testkit: EffectTestkit[F]) extends BaseEffectSpec(testkit) with AsyncFlatSpecLike with ForAllTestContainer { protected def initQuery: String override final val container = Neo4jContainer(neo4jImageVersion = "neo4j:3.5") .configure(_.withoutAuthentication()) .configure(_.addEnv("NEO4JLABS_PLUGINS", "[\"graph-data-science\"]")) .configure(_.withImagePullPolicy(PullPolicy.alwaysPull())) protected lazy final val driver = neo4j.GraphDatabase.driver(container.boltUrl) protected lazy final val session = driver.session() private final def runQuery(query: String): Unit = { session.writeTransaction( new neo4j.TransactionWork[Unit] { override def execute(tx: neo4j.Transaction): Unit = tx.run(query) } ) } override final def afterStart(): Unit = { if (initQuery != null) { runQuery(initQuery) } } override final def beforeStop(): Unit = { session.close() driver.close() } protected final def cleanDb(): Unit = { runQuery("MATCH (n) DETACH DELETE n") } protected final def execute[T](work: Session[F] => F[T])(implicit F: Async[F]): F[T] = work((new Session[F](session))) protected final def executeAsFuture[T](work: Session[F] => F[T]): Future[T] = fToFuture(execute(work)) } object BaseIntegrationSpec { final val DEFAULT_INIT_QUERY: String = """CREATE (Charlize:Person {name:'Charlize Theron', born:1975}) |CREATE (ThatThingYouDo:Movie {title:'That Thing You Do', released:1996, tagline:'In every life there comes a time when that thing you dream becomes that thing you do'}) |CREATE (Charlize)-[:ACTED_IN {roles:['Tina']}]->(ThatThingYouDo) |CREATE (t:Test {added: date('2018-11-26')}) |CREATE (ThatThingYouDo)-[:TEST_EDGE]->(t)""".stripMargin final val MULTIPLE_VALUES_INIT_QUERY: String = (0 to 10).map(n => s"CREATE (:Person {name: $n})").mkString("\n") final val EMPTY_INIT_QUERY: String = null }
Example 99
Source File: TimeScheduleSpec.scala From vamp with Apache License 2.0 | 5 votes |
package io.vamp.model.workflow import java.time.{ Duration, Period } import io.vamp.model.artifact.TimeSchedule import io.vamp.model.reader.ReaderSpec import io.vamp.model.artifact.TimeSchedule.{ RepeatForever, RepeatPeriod } import org.junit.runner.RunWith import org.scalatest._ import org.scalatest.junit.JUnitRunner @RunWith(classOf[JUnitRunner]) class TimeScheduleSpec extends FlatSpec with Matchers with ReaderSpec { "TimeSchedule" should "read an empty period" in { TimeSchedule("") should have( 'period(RepeatPeriod(None, None)), 'repeat(RepeatForever), 'start(None) ) } it should "read days" in { TimeSchedule("P1Y2M3D") should have( 'period(RepeatPeriod(Some(Period.parse("P1Y2M3D")), None)), 'repeat(RepeatForever), 'start(None) ) } it should "read time" in { TimeSchedule("PT1H2M3S") should have( 'period(RepeatPeriod(None, Some(Duration.parse("PT1H2M3S")))), 'repeat(RepeatForever), 'start(None) ) } it should "read days and time" in { TimeSchedule("P1Y2M3DT1H2M3S") should have( 'period(RepeatPeriod(Some(Period.parse("P1Y2M3D")), Some(Duration.parse("PT1H2M3S")))), 'repeat(RepeatForever), 'start(None) ) } }
Example 100
Source File: FtpFileLister.scala From stream-reactor with Apache License 2.0 | 5 votes |
package com.datamountaineer.streamreactor.connect.ftp.source import java.nio.file.{FileSystems, Paths} import java.time.{Duration, Instant} import com.typesafe.scalalogging.StrictLogging import org.apache.commons.net.ftp.{FTPClient, FTPFile} // org.apache.commons.net.ftp.FTPFile only contains the relative path case class AbsoluteFtpFile(ftpFile:FTPFile, parentDir:String) { def name() = ftpFile.getName def size() = ftpFile.getSize def timestamp() = ftpFile.getTimestamp.toInstant def path() = Paths.get(parentDir, name).toString def age(): Duration = Duration.between(timestamp, Instant.now) } case class FtpFileLister(ftp: FTPClient) extends StrictLogging { def pathMatch(pattern: String, path: String):Boolean = { val g = s"glob:$pattern" FileSystems.getDefault.getPathMatcher(g).matches(Paths.get(path)) } def isGlobPattern(pattern: String): Boolean = List("*", "?", "[", "{").exists(pattern.contains(_)) def listFiles(path: String) : Seq[AbsoluteFtpFile] = { val pathParts : Seq[String] = path.split("/") val (basePath, patterns) = pathParts.zipWithIndex.view.find{case (part, _) => isGlobPattern(part)} match { case Some((_, index)) => pathParts.splitAt(index) case _ => (pathParts.init, Seq[String](pathParts.last)) } def iter(basePath: String, patterns: List[String]) : Seq[AbsoluteFtpFile] = { Option(ftp.listFiles(basePath + "/")) match { case Some(files) => patterns match { case pattern :: Nil => { files.filter(f => f.isFile && pathMatch(pattern, f.getName)) .map(AbsoluteFtpFile(_, basePath + "/")) } case pattern :: rest => { files.filter(f => f.getName() != "." && f.getName() != ".." && pathMatch(pattern, f.getName)) .flatMap(f => iter(Paths.get(basePath, f.getName).toString, rest)) } case _ => Seq() } case _ => Seq() } } iter(Paths.get("/", basePath:_*).toString, patterns.toList) } }
Example 101
Source File: DefaultMetricsOps.scala From datadog4s with MIT License | 5 votes |
package com.avast.datadog4s.extension.http4s.impl import java.time.Duration import cats.effect.Sync import cats.effect.concurrent.Ref import cats.syntax.flatMap._ import com.avast.datadog4s.api.MetricFactory import com.avast.datadog4s.api.tag.Tagger import com.avast.datadog4s.extension.http4s.DatadogMetricsOps.ClassifierTags import com.avast.datadog4s.extension.http4s._ import com.github.ghik.silencer.silent import org.http4s.metrics.{ MetricsOps, TerminationType } import org.http4s.{ Method, Status } private[http4s] class DefaultMetricsOps[F[_]]( metricFactory: MetricFactory[F], classifierTags: ClassifierTags, activeConnectionsRef: Ref[F, ActiveConnections] )(implicit F: Sync[F] ) extends MetricsOps[F] { private[this] val methodTagger = Tagger.make[Method]("method") @deprecated("please use terminationTypeTagger - this will be removed in next release 0.8.0", "0.6.3") private[this] val typeTagger = Tagger.make[TerminationType]("type") private[this] val terminationTypeTagger = Tagger.make[TerminationType]("termination_type") private[this] val statusCodeTagger = Tagger.make[Status]("status_code") private[this] val statusBucketTagger = Tagger.make[String]("status_bucket") private[this] val activeRequests = metricFactory.gauge.long("active_requests") override def increaseActiveRequests(classifier: Option[String]): F[Unit] = modifyActiveRequests(classifier, 0, 1) override def decreaseActiveRequests(classifier: Option[String]): F[Unit] = // if we try to decrement non existing classifier, make sure it's zero modifyActiveRequests(classifier, 1, -1) private def modifyActiveRequests(classifier: Option[String], default: Int, delta: Int): F[Unit] = activeConnectionsRef.modify { activeConnections => val current = activeConnections.getOrElse(classifier, default) val next = current + delta val nextActiveConnections = activeConnections.updated(classifier, next) val action = activeRequests.set( next.toLong, classifier.toList.flatMap(classifierTags): _* ) (nextActiveConnections, action) }.flatten private[this] val headersTime = metricFactory.timer("headers_time") override def recordHeadersTime(method: Method, elapsed: Long, classifier: Option[String]): F[Unit] = headersTime .record( Duration.ofNanos(elapsed), methodTagger.tag(method) :: classifier.toList.flatMap(classifierTags): _* ) private[this] val requestCount = metricFactory.count("requests_count") private[this] val requestLatency = metricFactory.timer("requests_latency") override def recordTotalTime(method: Method, status: Status, elapsed: Long, classifier: Option[String]): F[Unit] = { val tags = methodTagger.tag(method) :: statusBucketTagger.tag(s"${status.code / 100}xx") :: statusCodeTagger.tag(status) :: classifier.toList.flatMap(classifierTags) requestCount.inc(tags: _*) >> requestLatency.record(Duration.ofNanos(elapsed), tags: _*) } private[this] val abnormalCount = metricFactory.count("abnormal_count") private[this] val abnormalLatency = metricFactory.timer("abnormal_latency") override def recordAbnormalTermination( elapsed: Long, terminationType: TerminationType, classifier: Option[String] ): F[Unit] = { val terminationTpe = terminationTypeTagger.tag(terminationType) @silent("deprecated") val tpe = typeTagger.tag(terminationType) val tags = tpe :: terminationTpe :: classifier.toList.flatMap(classifierTags) abnormalCount.inc(tags: _*) >> abnormalLatency.record(Duration.ofNanos(elapsed), tags: _*) } }
Example 102
Source File: JvmMonitoring.scala From datadog4s with MIT License | 5 votes |
package com.avast.datadog4s.extension.jvm import java.time.Duration import cats.effect.{ ConcurrentEffect, Resource, Sync, Timer } import com.avast.cloud.datadog4s.helpers.Repeated import com.avast.datadog4s.api.MetricFactory object JvmMonitoring { type ErrorHandler[F[_]] = Throwable => F[Unit] case class Config( delay: Duration = Duration.ofSeconds(60), timeout: Duration = Duration.ofSeconds(10) ) def default[F[_]: ConcurrentEffect: Timer](factory: MetricFactory[F]): Resource[F, Unit] = configured(factory, Config(), defaultErrorHandler) def configured[F[_]: ConcurrentEffect: Timer]( factory: MetricFactory[F], config: Config, errorHandler: ErrorHandler[F] ): Resource[F, Unit] = { val reporter = new JvmReporter[F](factory) Repeated.run[F](config.delay, config.timeout, errorHandler)(reporter.collect).map(_ => ()) } private def defaultErrorHandler[F[_]: Sync]: ErrorHandler[F] = err => Sync[F].delay { println(s"Error during metrics collection: ${err.getMessage}") err.printStackTrace() } }
Example 103
Source File: JvmMonitoringTest.scala From datadog4s with MIT License | 5 votes |
package com.avast.datadog4s.extension.jvm import java.time.Duration import cats.effect.{ ContextShift, IO, Timer } import com.avast.cloud.datadog4s.inmemory.MockMetricsFactory import com.avast.datadog4s.extension.jvm.JvmMonitoring.Config import org.scalatest.flatspec.AnyFlatSpec import org.scalatest.matchers.must.Matchers import cats.syntax.flatMap._ import scala.concurrent.ExecutionContext import scala.concurrent.duration._ class JvmMonitoringTest extends AnyFlatSpec with Matchers { private val ec: ExecutionContext = scala.concurrent.ExecutionContext.Implicits.global implicit val contextShift: ContextShift[IO] = cats.effect.IO.contextShift(ec) implicit val timer: Timer[IO] = IO.timer(ec) val noopErrHandler: Throwable => IO[Unit] = (_: Throwable) => IO.unit "JvmMonitoring" should "create all expected metrics and update them periodically" in { val testEffect = MockMetricsFactory.make[IO].flatMap { inmemory => val runTest = JvmMonitoring .configured(inmemory, Config().copy(delay = Duration.ofMillis(10)), noopErrHandler) .use(_ => IO.never) .timeout(100.millis) .attempt runTest >> inmemory.state.get } val result = testEffect.unsafeRunSync() result.keySet must equal(expectedAspects) result.values.foreach { vector => vector.groupBy(_.tags).foreach { case (_, records) => records.size must be > 0 records.size must be < 15 } } } val minorGcParams = if (System.getProperty("java.version").startsWith("1.8.")) Set.empty else Set("jvm.gc.minor_collection_time", "jvm.gc.minor_collection_count") val expectedAspects: Set[String] = Set( "jvm.cpu.load", "jvm.cpu.time", "jvm.filedescriptor.open", "jvm.heap_memory", "jvm.heap_memory_committed", "jvm.heap_memory_init", "jvm.heap_memory_max", "jvm.heap_memory.eden", "jvm.heap_memory.eden_committed", "jvm.heap_memory.eden_max", "jvm.heap_memory.survivor", "jvm.heap_memory.survivor_committed", "jvm.heap_memory.survivor_max", "jvm.heap_memory.old_gen", "jvm.heap_memory.old_gen_committed", "jvm.heap_memory.old_gen_max", "jvm.non_heap_memory", "jvm.non_heap_memory_committed", "jvm.non_heap_memory_init", "jvm.non_heap_memory_max", "jvm.non_heap_memory.code_cache", "jvm.non_heap_memory.code_cache_committed", "jvm.non_heap_memory.code_cache_max", "jvm.non_heap_memory.metaspace", "jvm.non_heap_memory.metaspace_committed", "jvm.non_heap_memory.metaspace_max", "jvm.non_heap_memory.compressed_class_space", "jvm.non_heap_memory.compressed_class_space_committed", "jvm.non_heap_memory.compressed_class_space_max", "jvm.uptime", "jvm.thread_count", "jvm.thread_daemon", "jvm.thread_started", "jvm.loaded_classes", "jvm.bufferpool.instances", "jvm.bufferpool.bytes", "jvm.gc.major_collection_time", "jvm.gc.major_collection_count" ) ++ minorGcParams }
Example 104
Source File: TimerImpl.scala From datadog4s with MIT License | 5 votes |
package com.avast.datadog4s.statsd.metric import java.time.Duration import java.util.concurrent.TimeUnit import cats.effect.{ Clock, Sync } import cats.syntax.flatMap._ import cats.syntax.functor._ import com.avast.datadog4s.api.Tag import com.avast.datadog4s.api.metric.Timer import com.avast.datadog4s.api.tag.Tagger import com.timgroup.statsd.StatsDClient import scala.collection.immutable.Seq class TimerImpl[F[_]: Sync]( clock: Clock[F], statsDClient: StatsDClient, aspect: String, sampleRate: Double, defaultTags: Seq[Tag] ) extends Timer[F] { private[this] val F = Sync[F] private[this] val successTagger: Tagger[Boolean] = Tagger.make("success") private[this] val failedTag: Tag = successTagger.tag(false) private[this] val succeededTag: Tag = successTagger.tag(true) private[this] val exceptionTagger: Tagger[Throwable] = Tagger.make("exception") override def time[A](value: F[A], tags: Tag*): F[A] = for { start <- clock.monotonic(TimeUnit.NANOSECONDS) a <- F.recoverWith(value)(measureFailed(start)) stop <- clock.monotonic(TimeUnit.NANOSECONDS) _ <- record(Duration.ofNanos(stop - start), (tags :+ succeededTag): _*) } yield a private def measureFailed[A](startTime: Long, tags: Tag*): PartialFunction[Throwable, F[A]] = { case thr: Throwable => val finalTags = tags :+ exceptionTagger.tag(thr) :+ failedTag val computation = for { stop <- clock.monotonic(TimeUnit.NANOSECONDS) _ <- record(Duration.ofNanos(stop - startTime), finalTags: _*) } yield () computation >> F.raiseError(thr) } override def record(duration: Duration, tags: Tag*): F[Unit] = F.delay { statsDClient.recordExecutionTime(aspect, duration.toMillis, sampleRate, (tags ++ defaultTags): _*) } }
Example 105
Source File: Repeated.scala From datadog4s with MIT License | 5 votes |
package com.avast.cloud.datadog4s.helpers import java.time.Duration import cats.effect.{ Concurrent, Resource, Timer } import cats.syntax.applicativeError._ import cats.syntax.flatMap._ import cats.syntax.apply._ import cats.syntax.applicative._ import scala.concurrent.duration._ object Repeated { def run[F[_]: Concurrent: Timer]( delay: Duration, iterationTimeout: Duration, errorHandler: Throwable => F[Unit] )(task: F[Unit]): Resource[F, F[Unit]] = { val safeTask = Concurrent.timeout(task, toScala(iterationTimeout)).attempt.flatMap { case Right(a) => a.pure[F] case Left(e) => errorHandler(e) } val snooze = Timer[F].sleep(toScala(delay)) val process = (safeTask *> snooze).foreverM[Unit] Concurrent[F].background(process) } private def toScala(duration: Duration): FiniteDuration = duration.toMillis.millis }
Example 106
Source File: MockMetricsFactory.scala From datadog4s with MIT License | 5 votes |
package com.avast.cloud.datadog4s.inmemory import java.time.Duration import cats.effect.Sync import cats.effect.concurrent.Ref import cats.syntax.flatMap._ import cats.syntax.functor._ import com.avast.datadog4s.api.metric._ import com.avast.datadog4s.api.{ DistributionFactory, GaugeFactory, HistogramFactory, MetricFactory, Tag } class MockMetricsFactory[F[_]: Sync](val state: Ref[F, Map[String, Vector[Record[Any]]]]) extends MetricFactory[F] { private def updateState[A](aspect: String, value: A, tags: Tag*): F[Unit] = state.update { oldState => val updatedField = oldState.getOrElse(aspect, Vector.empty) :+ Record[Any](value, tags) oldState.updated(aspect, updatedField) }.void override def histogram: HistogramFactory[F] = new HistogramFactory[F] { override def long(aspect: String, sampleRate: Option[Double]): Histogram[F, Long] = new Histogram[F, Long] { override def record(value: Long, tags: Tag*): F[Unit] = updateState(aspect, value, tags: _*) } override def double(aspect: String, sampleRate: Option[Double]): Histogram[F, Double] = new Histogram[F, Double] { override def record(value: Double, tags: Tag*): F[Unit] = updateState(aspect, value, tags: _*) } } override def gauge: GaugeFactory[F] = new GaugeFactory[F] { override def long(aspect: String, sampleRate: Option[Double]): Gauge[F, Long] = new Gauge[F, Long] { override def set(value: Long, tags: Tag*): F[Unit] = updateState(aspect, value, tags: _*) } override def double(aspect: String, sampleRate: Option[Double]): Gauge[F, Double] = new Gauge[F, Double] { override def set(value: Double, tags: Tag*): F[Unit] = updateState(aspect, value, tags: _*) } } override def timer(aspect: String, sampleRate: Option[Double]): Timer[F] = new Timer[F] { override def time[A](f: F[A], tags: Tag*): F[A] = f.flatMap(a => updateState(aspect, a, tags: _*).as(a)) override def record(duration: Duration, tags: Tag*): F[Unit] = updateState[Duration](aspect, duration, tags: _*) } override def count(aspect: String, sampleRate: Option[Double]): Count[F] = new Count[F] { override def modify(delta: Int, tags: Tag*): F[Unit] = updateState(aspect, delta, tags: _*) } override def uniqueSet(aspect: String): UniqueSet[F] = new UniqueSet[F] { override def record(value: String, tags: Tag*): F[Unit] = updateState(aspect, value, tags: _*) } override def distribution: DistributionFactory[F] = new DistributionFactory[F] { override def long(aspect: String, sampleRate: Option[Double]): Distribution[F, Long] = new Distribution[F, Long] { override def record(value: Long, tags: Tag*): F[Unit] = updateState(aspect, value, tags: _*) } override def double(aspect: String, sampleRate: Option[Double]): Distribution[F, Double] = new Distribution[F, Double] { override def record(value: Double, tags: Tag*): F[Unit] = updateState(aspect, value, tags: _*) } } override def withTags(tags: Tag*): MetricFactory[F] = this override def withScope(name: String): MetricFactory[F] = this } object MockMetricsFactory { def make[F[_]: Sync]: F[MockMetricsFactory[F]] = Ref.of(Map.empty[String, Vector[Record[Any]]]).map(state => new MockMetricsFactory[F](state)) }
Example 107
Source File: CronExpression.scala From cuttle with Apache License 2.0 | 5 votes |
package com.criteo.cuttle.cron import cron4s.Cron import cron4s.lib.javatime._ import java.time.{Duration, Instant, ZoneId, ZoneOffset} import java.time.temporal.ChronoUnit import io.circe.{Encoder, Json} import io.circe.syntax._ import java.time.ZoneOffset import scala.concurrent.duration._ case class CronExpression(cronExpression: String, tz: ZoneId = ZoneOffset.UTC) { // https://www.baeldung.com/cron-expressions // https://www.freeformatter.com/cron-expression-generator-quartz.html private val cronExpr = Cron.unsafeParse(cronExpression) private def toZonedDateTime(instant: Instant) = instant.atZone(tz) def nextEvent(): Option[ScheduledAt] = { val instant = Instant.now() cronExpr.next(toZonedDateTime(instant)).map { next => // add 1 second as between doesn't include the end of the interval val delay = Duration.between(instant, next).get(ChronoUnit.SECONDS).seconds.plus(1.second) ScheduledAt(next.toInstant, delay) } } } object CronExpression { implicit val encodeUser: Encoder[CronExpression] = new Encoder[CronExpression] { override def apply(cronExpression: CronExpression) = Json.obj("expression" -> cronExpression.cronExpression.asJson, "tz" -> cronExpression.tz.getId.asJson) } }
Example 108
Source File: StructuredStreams.scala From kafka-examples with Apache License 2.0 | 5 votes |
package com.cloudera.streaming.refapp import java.time.Duration object StructuredStreams { def main(args: Array[String]) { if (args.length != 3) { sys.error( """Usage: |com.cloudera.streaming.refapp.StructuredStreams inputDir outputDir kudu-master |inputDir should have the same structure as the src/main/resources/samples directory of this project |outputDir is created if it does not exist and it's purged if it exists |kudu-master host:port pair pointing to a kudu master instance""".stripMargin) } val Array(inputDir, outputDir, kuduMaster) = args val spark = EmbeddedSpark.sparkSession val fileSource = new FileSources(spark, inputDir) val fileSink = new FileSinks(outputDir, defaultCheckpointLocation) val kafkaConfig = EmbeddedKafkaBroker.defaultKafkaConfig val kafkaSource = new KafkaSource(spark, kafkaConfig) val kuduDatabase = "streaming_ref" val kuduSource = new KuduSource(spark, kuduMaster, kuduDatabase) val kuduSink = new KuduSink(kuduMaster, kuduDatabase, defaultCheckpointLocation) val application = new Application( spark, Sources( statesFromCluster = kuduSource.loadTable("states"), customersFromCluster = fileSource.jsonFile("customers"), vendorsFromCluster = kuduSource.loadTable("vendors"), // customersFromStream = fileSource.jsonStream("customers", "update_timestamp"), // vendorsFromStream = fileSource.jsonStream("vendors", "update_timestamp"), // transactionsFromStream = fileSource.jsonStream("transactions", "event_timestamp") customersFromStream = kafkaSource.jsonStreamWithKafkaTimestamp("customer"), vendorsFromStream = kafkaSource.jsonStreamWithTimestampFromMessage("vendor", "update_timestamp"), transactionsFromStream = kafkaSource.jsonStreamWithTimestampFromMessage("transaction", "event_timestamp") ), Sinks( // invalidTransactions = fileSink.csv("invalidTransactions"), // validTransactions = fileSink.csv("validTransactions"), // customerOrphans = fileSink.csv("customerOrphans"), // vendorOrphans = fileSink.csv("vendorOrphans"), // customers = fileSink.csv("customers"), // vendors = fileSink.csv("vendors"), // transactionsOperationalMetadata = fileSink.csv("transactionsOperationalMetadata") validTransactions = kuduSink.writeTable("valid_transactions"), invalidTransactions = kuduSink.writeTable("invalid_transactions"), customerOrphans = kuduSink.writeTable("customer_orphans"), vendorOrphans = kuduSink.writeTable("vendor_orphans"), customers = kuduSink.writeTable("customers"), vendors = kuduSink.writeTable("vendors"), transactionsOperationalMetadata = kuduSink.writeTable("transactions_operational_metadata") ), clusterStartup = EmbeddedKafkaBroker.start(), initSources = { CustomerGenerator(kafkaConfig, "customer").start() VendorGenerator(kafkaConfig, "vendor").start() TransactionGenerator(kafkaConfig, "transaction").start() }, cleanOutput = fileSink.cleanOutputs, queryRestartDurations = Map("valid_transactions" -> Duration.ofMinutes(1)) ) application.start() spark.streams.awaitAnyTermination() } }
Example 109
Source File: Util.scala From iep-apps with Apache License 2.0 | 5 votes |
package com.netflix.atlas.slotting import java.nio.ByteBuffer import java.time.Duration import java.util.concurrent.ScheduledFuture import com.netflix.iep.NetflixEnvironment import com.netflix.spectator.api.Registry import com.netflix.spectator.impl.Scheduler import com.typesafe.config.Config import com.typesafe.scalalogging.StrictLogging object Util extends StrictLogging { def getLongOrDefault(config: Config, basePath: String): Long = { val env = NetflixEnvironment.accountEnv() val region = NetflixEnvironment.region() if (config.hasPath(s"$basePath.$env.$region")) config.getLong(s"$basePath.$env.$region") else config.getLong(s"$basePath.default") } def compress(s: String): ByteBuffer = { ByteBuffer.wrap(Gzip.compressString(s)) } def decompress(buf: ByteBuffer): String = { Gzip.decompressString(toByteArray(buf)) } def toByteArray(buf: ByteBuffer): Array[Byte] = { val bytes = new Array[Byte](buf.remaining) buf.get(bytes, 0, bytes.length) buf.clear() bytes } def startScheduler( registry: Registry, name: String, interval: Duration, fn: () => Unit ): ScheduledFuture[_] = { val scheduler = new Scheduler(registry, name, 2) val options = new Scheduler.Options() .withFrequency(Scheduler.Policy.FIXED_RATE_SKIP_IF_LONG, interval) scheduler.schedule(options, () => fn()) } }
Example 110
Source File: DynamoOpsSuite.scala From iep-apps with Apache License 2.0 | 5 votes |
package com.netflix.atlas.slotting import java.nio.ByteBuffer import java.nio.charset.StandardCharsets import java.time.Duration import org.scalatest.funsuite.AnyFunSuite class DynamoOpsSuite extends AnyFunSuite with DynamoOps { def mkByteBuffer(s: String): ByteBuffer = { ByteBuffer.wrap(s.getBytes(StandardCharsets.UTF_8)) } test("compress and decompress") { val input = "Atlas Slotting Service" val compressed = Util.compress(input) assert(input === Util.decompress(compressed)) } test("active items spec") { val scanSpec = activeItemsScanSpec() assert(scanSpec.getFilterExpression === "#a = :v1") assert(scanSpec.getNameMap.toString === s"{#a=$Active}") assert(scanSpec.getValueMap.toString === "{:v1=true}") } test("old items spec") { val scanSpec = oldItemsScanSpec(Duration.ofDays(1)) assert(scanSpec.getFilterExpression === "#t < :v1") assert(scanSpec.getProjectionExpression === "#n") assert(scanSpec.getNameMap.toString === s"{#n=$Name, #t=$Timestamp}") } test("new asg item") { val newData = mkByteBuffer("""{"name": "atlas_app-main-all-v001", "desiredCapacity": 3}""") val item = newAsgItem("atlas_app-main-all-v001", newData) assert(item.hasAttribute(Name)) assert(item.hasAttribute(Data)) assert(item.hasAttribute(Active)) assert(item.hasAttribute(Timestamp)) } test("update asg spec") { val oldData = mkByteBuffer("""{"name": "atlas_app-main-all-v001", "desiredCapacity": 3}""") val newData = mkByteBuffer("""{"name": "atlas_app-main-all-v001", "desiredCapacity": 6}""") val updateSpec = updateAsgItemSpec("atlas_app-main-all-v001", oldData, newData) assert(updateSpec.getConditionExpression === "#d = :v1") assert(updateSpec.getUpdateExpression === s"set #d = :v2, #a = :v3, #t = :v4") assert(updateSpec.getNameMap.toString === s"{#d=data, #a=$Active, #t=$Timestamp}") } test("update timestamp spec") { val updateSpec = updateTimestampItemSpec("atlas_app-main-all-v001", 1556568270713L) assert(updateSpec.getConditionExpression === "#t = :v1") assert(updateSpec.getUpdateExpression === s"set #a = :v2, #t = :v3") assert(updateSpec.getNameMap.toString === s"{#a=$Active, #t=$Timestamp}") } test("deactivate asg spec") { val updateSpec = deactivateAsgItemSpec("atlas_app-main-all-v001") assert(updateSpec.getConditionExpression === "#a = :v1") assert(updateSpec.getUpdateExpression === s"set #a = :v2, #t = :v3") assert(updateSpec.getNameMap.toString === s"{#a=$Active, #t=$Timestamp}") } }
Example 111
Source File: LoadGenServiceSuite.scala From iep-apps with Apache License 2.0 | 5 votes |
package com.netflix.iep.loadgen import java.time.Duration import org.scalatest.funsuite.AnyFunSuite class LoadGenServiceSuite extends AnyFunSuite { test("extract step from uri") { val actual = LoadGenService.extractStep("/graph?q=name,foo,:eq&step=60s") assert(actual === Some(Duration.ofSeconds(60))) } test("extract step from uri, not present") { val actual = LoadGenService.extractStep("/graph?q=name,foo,:eq") assert(actual === None) } test("extract step from uri, invalid uri") { val actual = LoadGenService.extractStep("/graph?q=name,{{ .SpinnakerApp }},:eq") assert(actual === None) } test("extract step from uri, invalid step") { val actual = LoadGenService.extractStep("/graph?q=name,foo,:eq&step=bad") assert(actual === None) } }
Example 112
Source File: ActivityStreamServiceImpl.scala From activator-lagom-scala-chirper with Apache License 2.0 | 5 votes |
package sample.chirper.activity.impl import java.time.Duration import java.time.Instant import scala.compat.java8.FutureConverters._ import scala.concurrent.ExecutionContext import com.lightbend.lagom.javadsl.api.ServiceCall import akka.NotUsed import akka.stream.javadsl.Source import javax.inject.Inject import sample.chirper.activity.api.ActivityStreamService import sample.chirper.chirp.api.Chirp import sample.chirper.chirp.api.ChirpService import sample.chirper.chirp.api.HistoricalChirpsRequest import sample.chirper.chirp.api.LiveChirpsRequest import sample.chirper.friend.api.FriendService class ActivityStreamServiceImpl @Inject() ( friendService: FriendService, chirpService: ChirpService)(implicit ec: ExecutionContext) extends ActivityStreamService { // Needed to convert some Scala types to Java import converter.ServiceCallConverter._ override def getLiveActivityStream(userId: String): ServiceCall[NotUsed, Source[Chirp, _]] = { req => for { user <- friendService.getUser(userId).invoke() userIds = user.friends :+ userId chirpsReq = LiveChirpsRequest(userIds) chirps <- chirpService.getLiveChirps().invoke(chirpsReq) } yield chirps } override def getHistoricalActivityStream(userId: String): ServiceCall[NotUsed, Source[Chirp, _]] = { req => for { user <- friendService.getUser(userId).invoke() userIds = user.friends :+ userId // FIXME we should use HistoricalActivityStreamReq request parameter fromTime = Instant.now().minus(Duration.ofDays(7)) chirpsReq = HistoricalChirpsRequest(fromTime, userIds) chirps <- chirpService.getHistoricalChirps().invoke(chirpsReq) } yield chirps } }
Example 113
Source File: MesosAgentClient.scala From marathon-vault-plugin with MIT License | 5 votes |
package com.avast.marathon.plugin.vault import java.lang.reflect.Type import java.time.{Duration, Instant} import feign.codec.{Decoder, StringDecoder} import feign.{Feign, Param, RequestLine, Response} import feign.gson.GsonDecoder import scala.collection.JavaConverters._ object MesosAgentClient { private val contentTypeHeader = "Content-Type" private val gson = new GsonDecoder private val string = new StringDecoder def apply(agentUrl: String): MesosAgentClient = { Feign.builder() .decoder(new Decoder { override def decode(response: Response, `type`: Type): AnyRef = { if (response.headers().containsKey(contentTypeHeader)) { val value = response.headers().get(contentTypeHeader).asScala.head if (value.contains("json")) return gson.decode(response, `type`) } string.decode(response, `type`) } }) .target(classOf[MesosAgentClient], agentUrl) } implicit class MesosAgentClientEx(agentClient: MesosAgentClient) { def waitForStdOutContentsMatch(envVarName: String, executor: MesosExecutor, fn: String => Option[String], timeout: Duration): String = { val stdOutPath = s"${executor.directory}/stdout" var matchOption: Option[String] = None var stdOut: String = null val maxTime = Instant.now().plus(timeout) do { if (Instant.now().compareTo(maxTime) > 1) { throw new RuntimeException("Timed out when waiting for task stdout to match.") } stdOut = agentClient.download(stdOutPath) matchOption = EnvAppCmd.extractEnvValue(envVarName, stdOut) } while (matchOption.isEmpty) matchOption.get } } } trait MesosAgentClient { @RequestLine("GET /state") def fetchState(): MesosAgentState @RequestLine("GET /files/download?path={path}") def download(@Param("path") path: String): String } case class MesosFramework(id: String, executors: Array[MesosExecutor]) case class MesosExecutor(id: String, name: String, directory: String) case class MesosAgentState(frameworks: Array[MesosFramework])
Example 114
Source File: MessageListener.scala From model-serving-tutorial with Apache License 2.0 | 5 votes |
package com.lightbend.modelserving.client import java.time.Duration import java.util.Properties import org.apache.kafka.clients.consumer.{ConsumerConfig, KafkaConsumer} import org.apache.kafka.common.serialization.ByteArrayDeserializer class MessageListener[K, V]( brokers: String, topic: String, group: String, keyDeserealizer: String, valueDeserealizer: String, processor: RecordProcessorTrait[K, V]) extends Runnable { import MessageListener._ import scala.collection.JavaConverters._ val consumer = new KafkaConsumer[K, V](consumerProperties(brokers, group, keyDeserealizer, valueDeserealizer)) consumer.subscribe(Seq(topic).asJava) var completed = false def complete(): Unit = { completed = true } override def run(): Unit = { while (!completed) { val records = consumer.poll(Duration.ofMillis(100)).asScala for (record <- records) { processor.processRecord(record) } } consumer.close() System.out.println("Listener completes") } def start(): Unit = { val t = new Thread(this) t.start() } }
Example 115
Source File: Kafka.scala From event-sourcing-kafka-streams with MIT License | 5 votes |
package org.amitayh.invoices.web import java.time.Duration import java.util.Collections.singletonList import java.util.Properties import cats.effect._ import cats.syntax.apply._ import cats.syntax.functor._ import fs2._ import org.amitayh.invoices.common.Config import org.amitayh.invoices.common.Config.Topics.Topic import org.apache.kafka.clients.consumer._ import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord, RecordMetadata} import org.log4s.{Logger, getLogger} import scala.collection.JavaConverters._ object Kafka { trait Producer[F[_], K, V] { def send(key: K, value: V): F[RecordMetadata] } object Producer { def apply[F[_]: Async, K, V](producer: KafkaProducer[K, V], topic: Topic[K, V]): Producer[F, K, V] = (key: K, value: V) => Async[F].async { cb => val record = new ProducerRecord(topic.name, key, value) producer.send(record, (metadata: RecordMetadata, exception: Exception) => { if (exception != null) cb(Left(exception)) else cb(Right(metadata)) }) } } def producer[F[_]: Async, K, V](topic: Topic[K, V]): Resource[F, Producer[F, K, V]] = Resource { val create = Sync[F].delay { val props = new Properties props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, Config.BootstrapServers) new KafkaProducer[K, V](props, topic.keySerializer, topic.valueSerializer) } create.map(producer => (Producer(producer, topic), close(producer))) } def subscribe[F[_]: Sync, K, V](topic: Topic[K, V], groupId: String): Stream[F, (K, V)] = { val create = Sync[F].delay { val props = new Properties props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, Config.BootstrapServers) props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId) val consumer = new KafkaConsumer(props, topic.keyDeserializer, topic.valueDeserializer) consumer.subscribe(singletonList(topic.name)) consumer } Stream.bracket(create)(close[F]).flatMap(consume[F, K, V]) } private val logger: Logger = getLogger def log[F[_]: Sync](msg: String): F[Unit] = Sync[F].delay(logger.info(msg)) private def consume[F[_]: Sync, K, V](consumer: KafkaConsumer[K, V]): Stream[F, (K, V)] = for { records <- Stream.repeatEval(Sync[F].delay(consumer.poll(Duration.ofSeconds(1)))) record <- Stream.emits(records.iterator.asScala.toSeq) } yield record.key -> record.value private def close[F[_]: Sync](producer: KafkaProducer[_, _]): F[Unit] = Sync[F].delay(producer.close()) *> log(s"Producer closed") private def close[F[_]: Sync](consumer: KafkaConsumer[_, _]): F[Unit] = Sync[F].delay(consumer.close()) *> log("Consumer closed") }
Example 116
Source File: MicrometerJmxModule.scala From scala-server-toolkit with MIT License | 5 votes |
package com.avast.sst.micrometer.jmx import java.time.Duration import cats.effect.{Resource, Sync} import com.codahale.metrics.MetricRegistry import com.codahale.metrics.jmx.JmxReporter import io.micrometer.core.instrument.Clock import io.micrometer.core.instrument.config.NamingConvention import io.micrometer.core.instrument.util.HierarchicalNameMapper import io.micrometer.jmx.{JmxConfig, JmxMeterRegistry} object MicrometerJmxModule { def make[F[_]: Sync]( config: MicrometerJmxConfig, clock: Clock = Clock.SYSTEM, nameMapper: HierarchicalNameMapper = HierarchicalNameMapper.DEFAULT ): Resource[F, JmxMeterRegistry] = { Resource .make { Sync[F].delay { if (config.enableTypeScopeNameHierarchy) { val dropwizardRegistry = new MetricRegistry val registry = new JmxMeterRegistry( new CustomJmxConfig(config), clock, nameMapper, dropwizardRegistry, makeJmxReporter(dropwizardRegistry, config.domain) ) registry.config.namingConvention(NamingConvention.dot) registry } else { new JmxMeterRegistry(new CustomJmxConfig(config), clock, nameMapper) } } }(registry => Sync[F].delay(registry.close())) } private def makeJmxReporter(metricRegistry: MetricRegistry, domain: String) = { JmxReporter .forRegistry(metricRegistry) .inDomain(domain) .createsObjectNamesWith(new TypeScopeNameObjectNameFactory()) .build } private class CustomJmxConfig(c: MicrometerJmxConfig) extends JmxConfig { override val domain: String = c.domain override val step: Duration = Duration.ofMillis(c.step.toMillis) // the method is @Nullable and we don't need to implement it here @SuppressWarnings(Array("scalafix:DisableSyntax.null")) override def get(key: String): String = null } }
Example 117
Source File: ResponseTimeArbiter.scala From warp-core with MIT License | 5 votes |
package com.workday.warp.arbiters import java.time.Duration import java.util.concurrent.TimeUnit import com.workday.telemetron.RequirementViolationException import com.workday.telemetron.utils.TimeUtils import com.workday.warp.arbiters.traits.ArbiterLike import com.workday.warp.common.utils.Implicits._ import com.workday.warp.persistence.CorePersistenceAware import com.workday.warp.persistence.TablesLike.TestExecutionRowLikeType import com.workday.warp.persistence.Tables._ import com.workday.warp.utils.Ballot override def vote[T: TestExecutionRowLikeType](ballot: Ballot, testExecution: T): Option[Throwable] = { val testId: String = this.persistenceUtils.getMethodSignature(testExecution) val threshold: Duration = testExecution.responseTimeRequirement.seconds val responseTime: Duration = Duration.ofNanos(TimeUtils.toNanos(testExecution.responseTime, TimeUnit.SECONDS)) if (threshold.isPositive && responseTime > threshold) { Option(new RequirementViolationException( s"$testId violated response time requirement: expected ${threshold.humanReadable} (${threshold.toMillis} ms)" + s", but measured ${responseTime.humanReadable} (${responseTime.toMillis} ms)" )) } else { None } } }
Example 118
Source File: RequirementSpec.scala From warp-core with MIT License | 5 votes |
package com.workday.telemetron.junit import java.time.Duration import java.util.concurrent.TimeUnit import com.workday.telemetron.RequirementViolationException import com.workday.telemetron.annotation.Required import com.workday.telemetron.spec.TelemetronJUnitSpec import com.workday.warp.common.category.UnitTest import org.junit.experimental.categories.Category import org.junit.Test @Test(expected = classOf[IllegalStateException]) @Category(Array(classOf[UnitTest])) def setDurationTwice(): Unit = { this.telemetron.setResponseTime(Duration.ofMillis(1040)) this.telemetron.setResponseTime(Duration.ofMillis(1050)) } @Test @Category(Array(classOf[UnitTest])) @Required(maxResponseTime = 1, timeUnit = TimeUnit.SECONDS) def testName(): Unit = { classOf[RequirementSpec].getName + ".testName" should be (this.telemetron.getTestName) } @Test @Category(Array(classOf[UnitTest])) @Required(maxResponseTime = 10, timeUnit = TimeUnit.MILLISECONDS) def exceedsThreshold(): Unit = { val thrown = this.telemetron.getThrown thrown.expect(classOf[RequirementViolationException]) thrown.expectMessage("Response time requirement exceeded, specified: 0:00:00.010 (10 ms)") Thread.sleep(11) } }
Example 119
Source File: MeasuredStatementSpec.scala From warp-core with MIT License | 5 votes |
package com.workday.telemetron.junit import java.time.Duration import com.workday.telemetron.annotation.Measure import com.workday.telemetron.spec.TelemetronJUnitSpec import com.workday.warp.common.category.UnitTest import com.workday.warp.persistence.CorePersistenceAware import org.junit.experimental.categories.Category import org.junit.{AfterClass, Test} import org.scalatest.matchers.should.Matchers @AfterClass def verifyPersistence(): Unit = { val measuredQuery = this.persistenceUtils.readTestExecutionQuery(this.MEASURED_TEST_ID) val maybeTestExecution = this.persistenceUtils.synchronously(measuredQuery).headOption maybeTestExecution shouldBe defined maybeTestExecution.get.responseTime should equal (10) val unmeasuredQuery = this.persistenceUtils.readTestExecutionQuery(this.UNMEASURED_TEST_ID) this.persistenceUtils.synchronously(unmeasuredQuery).headOption shouldBe None } }
Example 120
Source File: AdapterSpec.scala From warp-core with MIT License | 5 votes |
package com.workday.warp.adapters import java.time.Duration import com.workday.warp.TrialResult import com.workday.warp.common.spec.WarpJUnitSpec import com.workday.warp.junit.UnitTest import com.workday.warp.persistence.{ExecutionTag, Tag} import scala.util.Try object AdapterSpecConstants { val duration: Duration = Duration.ofNanos(1000000) } class SuccessfulInvokationAdapter(override val tags: List[Tag] = List.empty[Tag]) extends Adapter[String]("com.wday.warp.adap.spec", tags) { override def invoke(): Try[TrialResult[String]] = { Try(throw new RuntimeException("Failed!")) } } class AdapterSpec extends WarpJUnitSpec { @UnitTest def measureSuccessSpec(): Unit = { val specAdapter: SuccessfulInvokationAdapter = new SuccessfulInvokationAdapter(List(ExecutionTag("key", "value"))) specAdapter.measure().maybeResponseTime should be (Some(AdapterSpecConstants.duration)) } @UnitTest def measureFailureSpec(): Unit = { val specAdapter: FailureInvokationAdapter = new FailureInvokationAdapter(List(ExecutionTag("key", "value"))) Try(specAdapter.measure()).isFailure should be (true) } }
Example 121
Source File: DecoratedIntSpec.scala From warp-core with MIT License | 5 votes |
package com.workday.warp.common.utils import java.time.Duration import com.workday.warp.common.spec.WarpJUnitSpec import com.workday.warp.common.utils.Implicits._ import com.workday.warp.junit.UnitTest @UnitTest def durationTest(): Unit = { (1 nano) should be (Duration.ofNanos(1)) (5 nanos) should be (Duration.ofNanos(5)) (5 nanoseconds) should be (Duration.ofNanos(5)) (2000 microseconds) should be (Duration.ofMillis(2)) (2000 micros) should be (Duration.ofMillis(2)) (1 micro) should be (Duration.ofNanos(1000)) (1 milli) should be (Duration.ofMillis(1)) (5 millis) should be (Duration.ofMillis(5)) (5 milliseconds) should be (Duration.ofMillis(5)) (1 second) should be (Duration.ofSeconds(1)) (5 seconds) should be (Duration.ofSeconds(5)) (1 minute) should be (Duration.ofMinutes(1)) (5 minutes) should be (Duration.ofMinutes(5)) (1 hour) should be (Duration.ofHours(1)) (5 hours) should be (Duration.ofHours(5)) (1 day) should be (Duration.ofDays(1)) (5 days) should be (Duration.ofDays(5)) } }
Example 122
Source File: DecoratedLongSpec.scala From warp-core with MIT License | 5 votes |
package com.workday.warp.common.utils import java.time.Duration import java.util.concurrent.TimeUnit import com.workday.telemetron.utils.TimeUtils import com.workday.warp.common.spec.WarpJUnitSpec import com.workday.warp.common.utils.Implicits.DecoratedLong import com.workday.warp.junit.UnitTest class DecoratedLongSpec extends WarpJUnitSpec { @UnitTest def decoratedLongSpec(): Unit = { val timeUnit: Long = 10000 val expectedNanoDuration: Duration = Duration ofNanos TimeUtils.toNanos(timeUnit, TimeUnit.NANOSECONDS) timeUnit.nanoseconds should be (expectedNanoDuration) timeUnit.nanos should be (expectedNanoDuration) timeUnit.nano should be (expectedNanoDuration) val expectedMicroDuration: Duration = Duration ofNanos TimeUtils.toNanos(timeUnit, TimeUnit.MICROSECONDS) timeUnit.microseconds should be (expectedMicroDuration) timeUnit.micros should be (expectedMicroDuration) timeUnit.micro should be (expectedMicroDuration) val expectedMilliDuration: Duration = Duration ofNanos TimeUtils.toNanos(timeUnit, TimeUnit.MILLISECONDS) timeUnit.milliseconds should be (expectedMilliDuration) timeUnit.millis should be (expectedMilliDuration) timeUnit.milli should be (expectedMilliDuration) val expectedSecondDuration: Duration = Duration ofNanos TimeUtils.toNanos(timeUnit, TimeUnit.SECONDS) timeUnit.seconds should be (expectedSecondDuration) timeUnit.second should be (expectedSecondDuration) val expectedMinuteDuration: Duration = Duration ofNanos TimeUtils.toNanos(timeUnit, TimeUnit.MINUTES) timeUnit.minutes should be (expectedMinuteDuration) timeUnit.minute should be (expectedMinuteDuration) val expectedHourDuration: Duration = Duration ofNanos TimeUtils.toNanos(timeUnit, TimeUnit.HOURS) timeUnit.hours should be (expectedHourDuration) timeUnit.hour should be (expectedHourDuration) val expectedDayDuration: Duration = Duration ofNanos TimeUtils.toNanos(timeUnit, TimeUnit.DAYS) timeUnit.days should be (expectedDayDuration) timeUnit.day should be (expectedDayDuration) } }
Example 123
Source File: DurationUtilsSpec.scala From warp-core with MIT License | 5 votes |
package com.workday.warp.common.utils import java.time.Duration import com.workday.warp.common.spec.WarpJUnitSpec import com.workday.warp.common.utils.Implicits.DecoratedDuration import com.workday.warp.junit.UnitTest class DurationUtilsSpec extends WarpJUnitSpec { @UnitTest def durationComparison(): Unit = { (Duration.ofMillis(5) < Duration.ofMillis(4)) should be (false) (Duration.ofMillis(5) < Duration.ofMillis(5)) should be (false) (Duration.ofMillis(5) < Duration.ofMillis(6)) should be (true) (Duration.ofMillis(5) <= Duration.ofMillis(4)) should be (false) (Duration.ofMillis(5) <= Duration.ofMillis(5)) should be (true) (Duration.ofMillis(5) <= Duration.ofMillis(6)) should be (true) (Duration.ofMillis(5) >= Duration.ofMillis(4)) should be (true) (Duration.ofMillis(5) >= Duration.ofMillis(5)) should be (true) (Duration.ofMillis(5) >= Duration.ofMillis(6)) should be (false) (Duration.ofMillis(5) > Duration.ofMillis(4)) should be (true) (Duration.ofMillis(5) > Duration.ofMillis(5)) should be (false) (Duration.ofMillis(5) > Duration.ofMillis(6)) should be (false) (Duration.ofMillis(5) max Duration.ofMillis(4)) should be (Duration ofMillis 5) (Duration.ofMillis(4) max Duration.ofMillis(5)) should be (Duration ofMillis 5) } @UnitTest def durationArithmetic(): Unit = { (Duration.ofMillis(5) + Duration.ofMillis(4)) should be (Duration ofMillis 9) (Duration.ofMillis(5) - Duration.ofMillis(4)) should be (Duration ofMillis 1) (Duration.ofMillis(5) * 4.0) should be (Duration ofMillis 20) // Double (Duration.ofMillis(5) * 4) should be (Duration ofMillis 20) // Long } }
Example 124
Source File: DecoratedDoubleSpec.scala From warp-core with MIT License | 5 votes |
package com.workday.warp.common.utils import java.time.Duration import java.util.concurrent.TimeUnit import com.workday.telemetron.utils.TimeUtils import com.workday.warp.common.spec.WarpJUnitSpec import com.workday.warp.common.utils.Implicits.DecoratedDouble import com.workday.warp.junit.UnitTest class DecoratedDoubleSpec extends WarpJUnitSpec { @UnitTest def decoratedDoubleSpec(): Unit = { val timeUnit: Double = 10000.0 val expectedNanoDuration: Duration = Duration ofNanos TimeUtils.toNanos(timeUnit, TimeUnit.NANOSECONDS) timeUnit.nanoseconds should be (expectedNanoDuration) timeUnit.nanos should be (expectedNanoDuration) timeUnit.nano should be (expectedNanoDuration) val expectedMicroDuration: Duration = Duration ofNanos TimeUtils.toNanos(timeUnit, TimeUnit.MICROSECONDS) timeUnit.microseconds should be (expectedMicroDuration) timeUnit.micros should be (expectedMicroDuration) timeUnit.micro should be (expectedMicroDuration) val expectedMilliDuration: Duration = Duration ofNanos TimeUtils.toNanos(timeUnit, TimeUnit.MILLISECONDS) timeUnit.milliseconds should be (expectedMilliDuration) timeUnit.millis should be (expectedMilliDuration) timeUnit.milli should be (expectedMilliDuration) val expectedSecondDuration: Duration = Duration ofNanos TimeUtils.toNanos(timeUnit, TimeUnit.SECONDS) timeUnit.seconds should be (expectedSecondDuration) timeUnit.second should be (expectedSecondDuration) val expectedMinuteDuration: Duration = Duration ofNanos TimeUtils.toNanos(timeUnit, TimeUnit.MINUTES) timeUnit.minutes should be (expectedMinuteDuration) timeUnit.minute should be (expectedMinuteDuration) val expectedHourDuration: Duration = Duration ofNanos TimeUtils.toNanos(timeUnit, TimeUnit.HOURS) timeUnit.hours should be (expectedHourDuration) timeUnit.hour should be (expectedHourDuration) val expectedDayDuration: Duration = Duration ofNanos TimeUtils.toNanos(timeUnit, TimeUnit.DAYS) timeUnit.days should be (expectedDayDuration) timeUnit.day should be (expectedDayDuration) } }
Example 125
Source File: ExperimentVariantEventRedisServiceTest.scala From izanami with Apache License 2.0 | 5 votes |
package specs.redis.abtesting import java.time.Duration import domains.abtesting.events.impl.ExperimentVariantEventRedisService import domains.abtesting.AbstractExperimentServiceTest import domains.abtesting.events.ExperimentVariantEventService import env.{DbDomainConfig, DbDomainConfigDetails, Master} import org.scalactic.source.Position import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll} import store.redis.{RedisClientBuilder, RedisWrapper} import test.FakeApplicationLifecycle import zio.{Exit, Reservation} import scala.jdk.CollectionConverters._ class ExperimentVariantEventRedisServiceTest extends AbstractExperimentServiceTest("Redis") with BeforeAndAfter with BeforeAndAfterAll { import zio.interop.catz._ val redisWrapper: Reservation[Any, Throwable, Option[RedisWrapper]] = runtime.unsafeRun( RedisClientBuilder .redisClient( Some(Master("localhost", 6380, 5)), system ) .reserve ) private val maybeRedisWrapper: Option[RedisWrapper] = runtime.unsafeRun(redisWrapper.acquire) override def dataStore(name: String): ExperimentVariantEventService.Service = ExperimentVariantEventRedisService(DbDomainConfig(env.Redis, DbDomainConfigDetails(name, None), None), maybeRedisWrapper) override protected def before(fun: => Any)(implicit pos: Position): Unit = { super.before(fun) deleteAllData } override protected def afterAll(): Unit = { super.afterAll() deleteAllData runtime.unsafeRun(redisWrapper.release(Exit.unit)) } private def deleteAllData = maybeRedisWrapper.get.connection .sync() .del(maybeRedisWrapper.get.connection.sync().keys("*").asScala.toSeq: _*) }
Example 126
Source File: RedisJsonDataStoreTest.scala From izanami with Apache License 2.0 | 5 votes |
package specs.redis.store import java.time.Duration import env.Master import org.scalactic.source.Position import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll} import store.AbstractJsonDataStoreTest import test.FakeApplicationLifecycle import scala.jdk.CollectionConverters._ import store.redis.RedisWrapper import store.redis.RedisClientBuilder import store.redis.RedisJsonDataStore import zio.{Exit, Reservation} class RedisJsonDataStoreTest extends AbstractJsonDataStoreTest("Redis") with BeforeAndAfter with BeforeAndAfterAll { val redisWrapper: Reservation[Any, Throwable, Option[RedisWrapper]] = runtime.unsafeRun( RedisClientBuilder .redisClient( Some(Master("localhost", 6380, 5)), system ) .reserve ) private val maybeRedisWrapper: Option[RedisWrapper] = runtime.unsafeRun(redisWrapper.acquire) override def dataStore(name: String): RedisJsonDataStore = RedisJsonDataStore(maybeRedisWrapper.get, name) override protected def before(fun: => Any)(implicit pos: Position): Unit = { super.before(fun) deleteAllData } override protected def afterAll(): Unit = { super.afterAll() deleteAllData runtime.unsafeRun(redisWrapper.release(Exit.unit)) } private def deleteAllData = maybeRedisWrapper.get.connection.sync().del(maybeRedisWrapper.get.connection.sync().keys("*").asScala.toSeq: _*) }
Example 127
Source File: ExponentialBackOffHandler.scala From kafka-connect-common with Apache License 2.0 | 5 votes |
package com.datamountaineer.streamreactor.connect.source import java.time.Duration import com.typesafe.scalalogging.StrictLogging class ExponentialBackOffHandler(name: String, step: Duration, cap: Duration) extends StrictLogging { private var backoff = new ExponentialBackOff(step, cap) def ready = backoff.passed def failure = { backoff = backoff.nextFailure logger.info(s"$name: Next poll will be around ${backoff.endTime}") } def success = { backoff = backoff.nextSuccess logger.info(s"$name: Backing off. Next poll will be around ${backoff.endTime}") } def update(status: Boolean): Unit = { if (status) { success } else { failure } } def remaining = backoff.remaining }