org.scalatest.time.Millis Scala Examples

The following examples show how to use org.scalatest.time.Millis. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: XMLParserXMLExtractNamespaceSpec.scala    From akka-xml-parser   with Apache License 2.0 5 votes vote down vote up
package uk.gov.hmrc.akka.xml

import akka.stream.scaladsl.{Keep, Source}
import akka.util.ByteString
import org.scalatest.{FlatSpec, Matchers}
import org.scalatest.concurrent.{Eventually, ScalaFutures}
import org.scalatest.mock.MockitoSugar
import org.scalatest.time.{Millis, Seconds, Span}

class XMLParserXMLExtractNamespaceSpec extends FlatSpec
  with Matchers
  with ScalaFutures
  with MockitoSugar
  with Eventually
  with XMLParserFixtures {

  val f = fixtures
  implicit override val patienceConfig =
    PatienceConfig(timeout = Span(5, Seconds), interval = Span(5, Millis))

  import f._

  behavior of "CompleteChunkStage#parser"


  it should "Parse and extract several non-default namespaces" in {

    val testXMLX =
      <ns5:GovTalkMessage
      xmlns:ns0="http://www.govtalk.gov.uk/taxation/PAYE/RTI/EmployerPaymentSummary/13-14/2"
      xmlns:ns2="http://www.govtalk.gov.uk/taxation/PAYE/RTI/EmployerPaymentSummary/15-16/1"
      xmlns:ns5="http://www.govtalk.gov.uk/CM/envelope"
      xmlns:ns1="http://www.govtalk.gov.uk/taxation/PAYE/RTI/EmployerPaymentSummary/14-15/1"
      xmlns:ns3="http://www.govtalk.gov.uk/taxation/PAYE/RTI/EmployerPaymentSummary/16-17/1"
      xmlns:ns4="http://www.govtalk.gov.uk/taxation/PAYE/RTI/EmployerPaymentSummary/17-18/1"
      xmlns="">
        <ns5:EnvelopeVersion>2.0</ns5:EnvelopeVersion>
        <ns5:Header></ns5:Header>
        <ns5:GovTalkDetails></ns5:GovTalkDetails>
      </ns5:GovTalkMessage>

    val source = Source(List(ByteString(testXMLX.toString())))


    val paths = Seq[XMLInstruction](
      XMLExtract(Seq("GovTalkMessage"), Map("xmlns:ns2" -> "http://www.govtalk.gov.uk/taxation/PAYE/RTI/EmployerPaymentSummary/15-16/1")),
      XMLExtract(Seq("GovTalkMessage"), Map("xmlns:BLABLA" -> "http://www.govtalk.gov.uk/taxation/PAYE/RTI/EmployerPaymentSummary/13-14/2")),
      XMLExtract(Seq("GovTalkMessage"), Map("xmlns" -> "http://www.govtalk.gov.uk/taxation/PAYE/RTI/EmployerPaymentSummary/17-18/1")),
      XMLExtract(Seq("GovTalkMessage"), Map("xmlns" -> "http://www.govtalk.gov.uk/CM/envelope"))
    )

    val expected = Set(
      XMLElement(List("GovTalkMessage"), Map("xmlns:ns2" -> "http://www.govtalk.gov.uk/taxation/PAYE/RTI/EmployerPaymentSummary/15-16/1"), Some("")),
      XMLElement(List("GovTalkMessage"), Map("xmlns:ns0" -> "http://www.govtalk.gov.uk/taxation/PAYE/RTI/EmployerPaymentSummary/13-14/2"), Some("")),
      XMLElement(List("GovTalkMessage"), Map("xmlns:ns4" -> "http://www.govtalk.gov.uk/taxation/PAYE/RTI/EmployerPaymentSummary/17-18/1"), Some("")),
      XMLElement(List("GovTalkMessage"), Map("xmlns:ns5" -> "http://www.govtalk.gov.uk/CM/envelope"), Some("")),
      XMLElement(List(), Map(CompleteChunkStage.STREAM_SIZE -> "681"), Some(CompleteChunkStage.STREAM_SIZE))
    )

          whenReady(source.runWith(parseToXMLElements(paths))) { r =>
      r shouldBe expected
    }

    whenReady(source.runWith(parseToByteString(paths))) { r =>
      whenReady(source.toMat(collectByteString)(Keep.right).run()) { t =>
        r shouldBe t
      }
    }
  }
} 
Example 2
Source File: PersistenceTestSupport.scala    From akka-cqrs   with Apache License 2.0 5 votes vote down vote up
package com.productfoundry.support

import java.util.UUID

import akka.testkit.{ImplicitSender, TestKit}
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{Millis, Span}
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike}

abstract class PersistenceTestSupport
  extends TestKit(TestConfig.testSystem)
  with ImplicitSender
  with WordSpecLike
  with Matchers
  with BeforeAndAfterAll
  with Eventually {

  def randomPersistenceId = UUID.randomUUID.toString

  implicit override val patienceConfig = PatienceConfig(
    timeout = scaled(Span(500, Millis)),
    interval = scaled(Span(10, Millis))
  )

  override protected def afterAll(): Unit = {
    TestKit.shutdownActorSystem(system)
  }
} 
Example 3
Source File: UnpersistSuite.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark

import org.scalatest.concurrent.Timeouts._
import org.scalatest.time.{Millis, Span}

class UnpersistSuite extends SparkFunSuite with LocalSparkContext {
  test("unpersist RDD") {
    sc = new SparkContext("local", "test")
    val rdd = sc.makeRDD(Array(1, 2, 3, 4), 2).cache()
    rdd.count
    assert(sc.persistentRdds.isEmpty === false)
    rdd.unpersist()
    assert(sc.persistentRdds.isEmpty === true)

    failAfter(Span(3000, Millis)) {
      try {
        while (! sc.getRDDStorageInfo.isEmpty) {
          Thread.sleep(200)
        }
      } catch {
        case _: Throwable => Thread.sleep(10)
          // Do nothing. We might see exceptions because block manager
          // is racing this thread to remove entries from the driver.
      }
    }
    assert(sc.getRDDStorageInfo.isEmpty === true)
  }
} 
Example 4
Source File: UnpersistSuite.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark

import org.scalatest.concurrent.Timeouts._
import org.scalatest.time.{Millis, Span}

class UnpersistSuite extends SparkFunSuite with LocalSparkContext {
  test("unpersist RDD") {
    sc = new SparkContext("local", "test")
    val rdd = sc.makeRDD(Array(1, 2, 3, 4), 2).cache()
    rdd.count
    assert(sc.persistentRdds.isEmpty === false)
    rdd.unpersist()
    assert(sc.persistentRdds.isEmpty === true)

    failAfter(Span(3000, Millis)) {
      try {
        while (! sc.getRDDStorageInfo.isEmpty) {
          Thread.sleep(200)
        }
      } catch {
        case _: Throwable => { Thread.sleep(10) }
          // Do nothing. We might see exceptions because block manager
          // is racing this thread to remove entries from the driver.
      }
    }
    assert(sc.getRDDStorageInfo.isEmpty === true)
  }
} 
Example 5
Source File: ExampleExternalStateSpec.scala    From affinity   with Apache License 2.0 5 votes vote down vote up
package io.amient.affinity.example

import java.util.Properties

import com.typesafe.config.ConfigFactory
import io.amient.affinity.core.cluster.Node
import io.amient.affinity.core.util.AffinityTestBase
import io.amient.affinity.kafka.EmbeddedKafka
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
import org.scalatest.concurrent.TimeLimitedTests
import org.scalatest.time.{Millis, Span}
import org.scalatest.{BeforeAndAfterAll, FlatSpec, Matchers}

import scala.collection.JavaConverters._

class ExampleExternalStateSpec extends FlatSpec with AffinityTestBase with EmbeddedKafka with Matchers with BeforeAndAfterAll
  with TimeLimitedTests {

  override def numPartitions = 2

  val config = configure(ConfigFactory.load("example-external-state"))

  val topic = config.getString("affinity.keyspace.external.state.news.storage.kafka.topic")

  val node = new Node(configure(config, Some(zkConnect), Some(kafkaBootstrap)))

  override def beforeAll: Unit = try {
    createTopic(topic)
    val externalProducer = createKafkaAvroProducer[String, String]()
    try {
      externalProducer.send(new ProducerRecord(topic, "10:30", "the universe is expanding"))
      externalProducer.send(new ProducerRecord(topic, "11:00", "the universe is still expanding"))
      externalProducer.send(new ProducerRecord(topic, "11:30", "the universe briefly contracted but is expanding again"))
      externalProducer.flush()
    } finally {
      externalProducer.close()
    }
    //the external fixture is produced and the externalProducer is flushed() before the node is started
    node.start()
    node.awaitClusterReady()
    //at this point all stores have loaded everything available in the external topic so the test will be deterministic
  } finally {
    super.beforeAll()
  }

  override def afterAll: Unit = try {
    node.shutdown()
  } finally {
    super.afterAll()
  }

  behavior of "External State"

  val timeLimit = Span(5000, Millis) //it should be much faster but sometimes many tests are run at the same time

  it should "start automatically tailing state partitions on startup even when master" in {
    //we don't need an arbitrary sleep to ensure the tailing state catches up with the writes above
    //before we fetch the latest news because the watermark is built into the request to make the test fast and deterministic
    val response = node.get_text(node.http_get(s"/news/latest"))
    response should include("10:30\tthe universe is expanding")
    response should include("11:00\tthe universe is still expanding")
    response should include("11:30\tthe universe briefly contracted but is expanding again")

  }

  private def createKafkaAvroProducer[K, V]() = new KafkaProducer[K, V](new Properties {
    put("bootstrap.servers", kafkaBootstrap)
    put("acks", "1")
    put("key.serializer", "io.amient.affinity.kafka.KafkaAvroSerializer")
    put("value.serializer", "io.amient.affinity.kafka.KafkaAvroSerializer")
    //this simply adds all configs required by KafkaAvroSerializer
    config.getConfig("affinity.avro").entrySet().asScala.foreach { case (entry) =>
      put(entry.getKey, entry.getValue.unwrapped())
    }
  })


} 
Example 6
Source File: UnpersistSuite.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark

import org.scalatest.concurrent.Timeouts._
import org.scalatest.time.{Millis, Span}

class UnpersistSuite extends SparkFunSuite with LocalSparkContext {
  test("unpersist RDD") {
    sc = new SparkContext("local", "test")
    val rdd = sc.makeRDD(Array(1, 2, 3, 4), 2).cache()
    //激活后,系统持久化
    rdd.count
    println("rddId:"+rdd.id+"==persistentRdds=="+sc.persistentRdds(rdd.id).collect().mkString(","))
    println("==getRDDStorageInfo=="+sc.getRDDStorageInfo.length)
    //assert(sc.persistentRdds.isEmpty === true)
    println("size:"+sc.persistentRdds.size)
    assert(sc.persistentRdds.isEmpty === false)
    sc.persistentRdds.foreach(println)
    rdd.unpersist()
    assert(sc.persistentRdds.isEmpty === true)
    println("==getRDDStorageInfo=="+sc.getRDDStorageInfo.mkString(","))
    failAfter(Span(3000, Millis)) {
      try {
        while (! sc.getRDDStorageInfo.isEmpty) {

          Thread.sleep(200)
        }
      } catch {
        case _: Throwable => { Thread.sleep(10) }
          // Do nothing. We might see exceptions because block manager
          // is racing this thread to remove entries from the driver.
      }
    }
    assert(sc.getRDDStorageInfo.isEmpty === true)
  }
} 
Example 7
Source File: GrpcExceptionHandlerSpec.scala    From akka-grpc   with Apache License 2.0 5 votes vote down vote up
package akka.grpc.scaladsl

import akka.actor.ActorSystem
import akka.grpc.GrpcServiceException
import akka.grpc.internal.{ GrpcProtocolNative, GrpcResponseHelpers, Identity }
import akka.grpc.scaladsl.GrpcExceptionHandler.defaultMapper
import akka.http.scaladsl.model.HttpEntity._
import akka.http.scaladsl.model.HttpResponse
import akka.stream.ActorMaterializer
import io.grpc.Status
import org.scalatest._
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.matchers.should.Matchers
import org.scalatest.time.{ Millis, Seconds, Span }
import org.scalatest.wordspec.AnyWordSpec

import scala.concurrent.{ ExecutionException, Future }

class GrpcExceptionHandlerSpec extends AnyWordSpec with Matchers with ScalaFutures with BeforeAndAfterAll {
  implicit val system = ActorSystem("Test")
  implicit val materializer = ActorMaterializer()
  implicit override val patienceConfig =
    PatienceConfig(timeout = scaled(Span(2, Seconds)), interval = scaled(Span(5, Millis)))
  implicit val writer = GrpcProtocolNative.newWriter(Identity)

  val expected: Function[Throwable, Status] = {
    case e: ExecutionException =>
      if (e.getCause == null) Status.INTERNAL
      else expected(e.getCause)
    case grpcException: GrpcServiceException => grpcException.status
    case _: NotImplementedError              => Status.UNIMPLEMENTED
    case _: UnsupportedOperationException    => Status.UNIMPLEMENTED
    case _                                   => Status.INTERNAL
  }

  val otherTypes: Seq[Throwable] = Seq(
    new GrpcServiceException(status = Status.DEADLINE_EXCEEDED),
    new NotImplementedError,
    new UnsupportedOperationException,
    new NullPointerException,
    new RuntimeException)

  val executionExceptions: Seq[Throwable] =
    otherTypes.map(new ExecutionException(_)) :+ new ExecutionException("doh", null)

  "defaultMapper" should {
    (otherTypes ++ executionExceptions).foreach { e =>
      val exp = expected(e)
      s"Map $e to $exp" in {
        defaultMapper(system)(e).status shouldBe exp
      }
    }
  }

  "default(defaultMapper)" should {
    (otherTypes ++ executionExceptions).foreach { e =>
      s"Correctly map $e" in {
        val exp = GrpcResponseHelpers.status(defaultMapper(system)(e))
        val expChunks = getChunks(exp)
        val act = GrpcExceptionHandler.from(defaultMapper(system))(system, writer)(e).futureValue
        val actChunks = getChunks(act)
        // Following is because aren't equal
        act.status shouldBe exp.status
        actChunks.toString shouldEqual expChunks.toString
      }
    }
  }

  def getChunks(resp: HttpResponse): Seq[ChunkStreamPart] =
    (resp.entity match {
      case Chunked(_, chunks) =>
        chunks.runFold(Seq.empty[ChunkStreamPart]) { case (seq, chunk) => seq :+ chunk }
      case _ => Future.successful(Seq.empty[ChunkStreamPart])
    }).futureValue

  override def afterAll(): Unit = {
    super.afterAll()
    system.terminate()
  }
} 
Example 8
Source File: AkkaDiscoveryNameResolverSpec.scala    From akka-grpc   with Apache License 2.0 5 votes vote down vote up
package akka.grpc.internal

import java.net.InetSocketAddress

import akka.actor.ActorSystem
import akka.grpc.{ GrpcClientSettings, GrpcServiceException }
import akka.testkit.TestKit
import io.grpc.Status
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.matchers.should.Matchers
import org.scalatest.time.{ Millis, Seconds, Span }
import org.scalatest.wordspec.AnyWordSpecLike

import scala.collection.JavaConverters._

class AkkaDiscoveryNameResolverSpec
    extends TestKit(ActorSystem())
    with AnyWordSpecLike
    with Matchers
    with ScalaFutures {
  implicit val ex = system.dispatcher
  implicit override val patienceConfig =
    PatienceConfig(timeout = scaled(Span(2, Seconds)), interval = scaled(Span(5, Millis)))

  "The AkkaDiscovery-backed NameResolver" should {
    "correctly report an error for an unknown hostname" in {
      val host = "example.invalid"
      val resolver = AkkaDiscoveryNameResolver(GrpcClientSettings.connectToServiceAt(host, 80))
      val probe = new NameResolverListenerProbe()

      resolver.start(probe)

      val exception = probe.future.failed.futureValue.asInstanceOf[GrpcServiceException]
      exception shouldBe an[GrpcServiceException]
      exception.status.getCode == Status.UNKNOWN.getCode
      // FIXME: This description is not portable - it arises from native function response, which differs by OS
      // exception.status.getDescription should equal(host + ": Name or service not known")
    }

    "support serving a static host/port" in {
      // Unfortunately it needs to be an actually resolvable address...
      val host = "akka.io"
      val port = 4040
      val resolver = AkkaDiscoveryNameResolver(GrpcClientSettings.connectToServiceAt(host, port))
      val probe = new NameResolverListenerProbe()

      resolver.start(probe)

      val addresses = probe.future.futureValue match {
        case Seq(addressGroup) => addressGroup.getAddresses
        case _                 => fail("Expected a single address group")
      }
      addresses.asScala.toSeq match {
        case Seq(address: InetSocketAddress) =>
          address.getPort should be(port)
          address.getAddress.getHostName should be(host)
        case other =>
          fail(s"Expected a single InetSocketAddress, got $other")
      }
    }
  }
} 
Example 9
Source File: AkkaDiscoveryNameResolverProviderSpec.scala    From akka-grpc   with Apache License 2.0 5 votes vote down vote up
package akka.grpc.internal

import java.net.URI
import java.net.InetSocketAddress
import java.util.{ List => JList }

import scala.concurrent.ExecutionContext.Implicits._
import scala.concurrent.Future
import scala.concurrent.Promise
import scala.concurrent.duration._
import scala.collection.immutable

import io.grpc.Attributes
import io.grpc.NameResolver.Listener
import io.grpc.EquivalentAddressGroup

import akka.actor.ActorSystem
import akka.discovery.Lookup
import akka.discovery.ServiceDiscovery
import akka.discovery.ServiceDiscovery.Resolved
import akka.discovery.ServiceDiscovery.ResolvedTarget
import akka.testkit.TestKit

import org.scalatest.concurrent.ScalaFutures
import org.scalatest.matchers.should.Matchers
import org.scalatest.time.{ Millis, Seconds, Span }
import org.scalatest.wordspec.AnyWordSpecLike

class AkkaDiscoveryNameResolverProviderSpec
    extends TestKit(ActorSystem())
    with AnyWordSpecLike
    with Matchers
    with ScalaFutures {

  implicit override val patienceConfig =
    PatienceConfig(timeout = scaled(Span(2, Seconds)), interval = scaled(Span(5, Millis)))

  "AkkaDiscoveryNameResolverProviderSpec" should {
    "provide a NameResolver that uses the supplied serviceName" in {
      val serviceName = "testServiceName"
      val discovery = new ServiceDiscovery() {
        override def lookup(lookup: Lookup, resolveTimeout: FiniteDuration): Future[Resolved] = {
          lookup.serviceName should be(serviceName)
          Future.successful(Resolved(serviceName, immutable.Seq(ResolvedTarget("10.0.0.3", Some(4312), None))))
        }
      }
      val provider = new AkkaDiscoveryNameResolverProvider(
        discovery,
        443,
        portName = None,
        protocol = None,
        resolveTimeout = 3.seconds)

      val resolver = provider.newNameResolver(new URI("//" + serviceName), null)

      val addressGroupsPromise = Promise[List[EquivalentAddressGroup]]
      val listener = new Listener() {
        override def onAddresses(addresses: JList[EquivalentAddressGroup], attributes: Attributes): Unit = {
          import scala.collection.JavaConverters._
          addressGroupsPromise.success(addresses.asScala.toList)
        }
        override def onError(error: io.grpc.Status): Unit = ???
      }
      resolver.start(listener)
      val addressGroups = addressGroupsPromise.future.futureValue
      addressGroups.size should be(1)
      val addresses = addressGroups(0).getAddresses()
      addresses.size should be(1)
      val address = addresses.get(0).asInstanceOf[InetSocketAddress]
      address.getHostString() should be("10.0.0.3")
      address.getPort() should be(4312)
    }
  }

} 
Example 10
Source File: UnpersistSuite.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark

import org.scalatest.concurrent.{Signaler, ThreadSignaler, TimeLimits}
import org.scalatest.time.{Millis, Span}

class UnpersistSuite extends SparkFunSuite with LocalSparkContext with TimeLimits {

  // Necessary to make ScalaTest 3.x interrupt a thread on the JVM like ScalaTest 2.2.x
  implicit val defaultSignaler: Signaler = ThreadSignaler

  test("unpersist RDD") {
    sc = new SparkContext("local", "test")
    val rdd = sc.makeRDD(Array(1, 2, 3, 4), 2).cache()
    rdd.count
    assert(sc.persistentRdds.isEmpty === false)
    rdd.unpersist()
    assert(sc.persistentRdds.isEmpty === true)

    failAfter(Span(3000, Millis)) {
      try {
        while (! sc.getRDDStorageInfo.isEmpty) {
          Thread.sleep(200)
        }
      } catch {
        case _: Throwable => Thread.sleep(10)
          // Do nothing. We might see exceptions because block manager
          // is racing this thread to remove entries from the driver.
      }
    }
    assert(sc.getRDDStorageInfo.isEmpty === true)
  }
} 
Example 11
Source File: UnpersistSuite.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark

import org.scalatest.concurrent.Timeouts._
import org.scalatest.time.{Millis, Span}

class UnpersistSuite extends SparkFunSuite with LocalSparkContext {
  test("unpersist RDD") {
    sc = new SparkContext("local", "test")
    val rdd = sc.makeRDD(Array(1, 2, 3, 4), 2).cache()
    rdd.count
    assert(sc.persistentRdds.isEmpty === false)
    rdd.unpersist()
    assert(sc.persistentRdds.isEmpty === true)

    failAfter(Span(3000, Millis)) {
      try {
        while (! sc.getRDDStorageInfo.isEmpty) {
          Thread.sleep(200)
        }
      } catch {
        case _: Throwable => { Thread.sleep(10) }
          // Do nothing. We might see exceptions because block manager
          // is racing this thread to remove entries from the driver.
      }
    }
    assert(sc.getRDDStorageInfo.isEmpty === true)
  }
} 
Example 12
Source File: ServerSpec.scala    From metronome   with Apache License 2.0 5 votes vote down vote up
package dcos.metronome
package api

import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Millis, Second, Span}
import org.scalatestplus.play._
import play.api.ApplicationLoader.Context
import play.api.libs.ws.ahc.AhcWSComponents
import play.api.mvc.Results
import play.api.test.Helpers._

class ServerSpec
    extends PlaySpec
    with OneServerPerSuiteWithComponents[MockApiComponents with AhcWSComponents]
    with Results
    with ScalaFutures {

  override implicit def patienceConfig: PatienceConfig = PatienceConfig(Span(1, Second), Span(50, Millis))
  override def createComponents(context: Context) = new MockApiComponents(context) with AhcWSComponents

  "Server query" should {
    "work" in {
      implicit val ec = app.materializer.executionContext
      val wsClient = components.wsClient

      whenReady(wsUrl("/ping")(portNumber, wsClient).get) { response =>
        response.status mustBe OK
        response.body mustBe "pong"
      }
    }
  }
} 
Example 13
Source File: ZkJobSpecRepositoryTest.scala    From metronome   with Apache License 2.0 5 votes vote down vote up
package dcos.metronome
package repository.impl.kv

import dcos.metronome.model.{JobId, JobSpec}
import dcos.metronome.utils.state.PersistentStoreWithNestedPathsSupport
import dcos.metronome.utils.test.Mockito
import org.scalatest.FunSuite
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Millis, Seconds, Span}

import concurrent.Future

class ZkJobSpecRepositoryTest extends FunSuite with Mockito with ScalaFutures {

  override implicit def patienceConfig: PatienceConfig =
    PatienceConfig(timeout = Span(5, Seconds), interval = Span(500, Millis))

  test("delete") {
    val f = new Fixture
    f.store.delete(any).returns(Future.successful(true))

    f.repository.delete(f.jobId).futureValue

    verify(f.store).delete("job-runs/foo.bar")
    verify(f.store).delete("job-specs/foo.bar")
  }

  test("create") {
    val f = new Fixture

    f.store.createPath(any).returns(Future.successful(Unit))

    f.repository.create(f.jobId, f.jobSpec).failed.futureValue

    verify(f.store).createPath("job-runs/foo.bar")
    verify(f.store).create(eq("job-specs/foo.bar"), any)
  }

  class Fixture {
    val ec = scala.concurrent.ExecutionContext.global

    val store: PersistentStoreWithNestedPathsSupport = mock[PersistentStoreWithNestedPathsSupport]
    val repository = new ZkJobSpecRepository(store, ec)

    val jobId = JobId("foo.bar")
    val jobSpec = JobSpec(jobId)
  }
} 
Example 14
package com.github.j5ik2o.bank.adaptor.useCase

import akka.actor.ActorSystem
import com.github.j5ik2o.bank.adaptor.aggregate.{ BankAccountAggregate, BankAccountAggregateFlowsImpl }
import com.github.j5ik2o.bank.adaptor.dao.BankAccountReadModelFlowsImpl
import com.github.j5ik2o.bank.adaptor.readJournal.JournalReaderImpl
import com.github.j5ik2o.bank.adaptor.util.{ ActorSpec, BankAccountSpecSupport, FlywayWithMySQLSpecSupport }
import com.github.j5ik2o.bank.domain.model.{ BankAccountId, BankAccountName }
import com.github.j5ik2o.bank.useCase.{ BankAccountAggregateUseCase, BankAccountReadModelUseCase }
import com.github.j5ik2o.bank.useCase.BankAccountAggregateUseCase.Protocol._
import com.github.j5ik2o.scalatestplus.db.{ MySQLdConfig, UserWithPassword }
import com.typesafe.config.ConfigFactory
import com.wix.mysql.distribution.Version.v5_6_21
import org.scalatest.time.{ Millis, Seconds, Span }
import org.sisioh.baseunits.scala.money.Money

import scala.concurrent.duration._

class BankAccountReadModelUseCaseImplSpec
    extends ActorSpec(
      ActorSystem("BankAccountReadModelUseCaseImplSpec", ConfigFactory.load("bank-account-use-case-spec.conf"))
    )
    with FlywayWithMySQLSpecSupport
    with BankAccountSpecSupport {

  override implicit val patienceConfig: PatienceConfig =
    PatienceConfig(timeout = Span(5, Seconds), interval = Span(200, Millis))

  override protected lazy val mySQLdConfig: MySQLdConfig = MySQLdConfig(
    version = v5_6_21,
    port = Some(12345),
    userWithPassword = Some(UserWithPassword("bank", "passwd")),
    timeout = Some((30 seconds) * sys.env.getOrElse("SBT_TEST_TIME_FACTOR", "1").toDouble)
  )

  import system.dispatcher

  "BankAccountReadModelUseCaseImpl" - {
    "should be able to read read-model" in {
      val id           = bankAccountIdGenerator.generateId().futureValue
      val aggregateRef = system.actorOf(BankAccountAggregate.props, BankAccountAggregate.name(id))
      val bankAccountReadModelUseCase = new BankAccountReadModelUseCase(
        new BankAccountReadModelFlowsImpl(dbConfig.profile, dbConfig.db),
        new JournalReaderImpl()
      )
      bankAccountReadModelUseCase.execute()
      createDomainEvents(id,
                         new BankAccountAggregateUseCase(
                           new BankAccountAggregateFlowsImpl(aggregateRef)
                         ))
      awaitAssert(
        {
          val resolveBankAccountEventsSucceeded = bankAccountReadModelUseCase
            .resolveBankAccountEventsById(ResolveBankAccountEventsRequest(id))
            .futureValue
            .asInstanceOf[ResolveBankAccountEventsSucceeded]
          resolveBankAccountEventsSucceeded.bankAccountId shouldBe id
          resolveBankAccountEventsSucceeded.events.head.`type` shouldBe "deposit"
          resolveBankAccountEventsSucceeded.events.head.amount shouldBe 1000
          resolveBankAccountEventsSucceeded.events.head.currencyCode shouldBe "JPY"
        },
        3 seconds,
        50 milliseconds
      )
    }
  }

  private def createDomainEvents(id: BankAccountId, bankAccountAggregateUseCase: BankAccountAggregateUseCase) = {
    val openBankAccountSucceeded = bankAccountAggregateUseCase
      .openBankAccount(OpenBankAccountRequest(id, BankAccountName("test-1")))
      .futureValue
      .asInstanceOf[OpenBankAccountSucceeded]
    openBankAccountSucceeded.bankAccountId shouldBe id
    val depositSucceeded =
      bankAccountAggregateUseCase
        .addBankAccountEvent(DepositRequest(id, Money.yens(1000L)))
        .futureValue
        .asInstanceOf[DepositSucceeded]
    depositSucceeded.bankAccountId shouldBe id
  }
} 
Example 15
Source File: ControllerSpec.scala    From akka-ddd-cqrs-es-example   with MIT License 5 votes vote down vote up
package com.github.j5ik2o.bank.adaptor.util

import akka.http.scaladsl.model.{ HttpEntity, MediaTypes }
import akka.http.scaladsl.testkit.ScalatestRouteTest
import akka.testkit.TestKitBase
import akka.util.ByteString
import com.github.j5ik2o.scalatestplus.db.{ MySQLdConfig, UserWithPassword }
import com.wix.mysql.distribution.Version.v5_6_21
import io.circe.Encoder
import io.circe.syntax._
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.prop.PropertyChecks
import org.scalatest.time.{ Millis, Seconds, Span }
import org.scalatest.{ BeforeAndAfterAll, FreeSpecLike, Matchers }

import scala.concurrent.duration._

object ControllerSpec {

  implicit class JsonOps[A](val self: A) extends AnyVal {
    def toEntity(implicit enc: Encoder[A]): HttpEntity.Strict =
      HttpEntity(MediaTypes.`application/json`, ByteString(self.asJson.noSpaces))
  }

}

abstract class ControllerSpec
    extends FreeSpecLike
    with PropertyChecks
    with Matchers
    with BeforeAndAfterAll
    with ScalaFutures
    with FlywayWithMySQLSpecSupport
    with ScalatestRouteTest
    with TestKitBase {
  override implicit val patienceConfig: PatienceConfig =
    PatienceConfig(timeout = Span(10, Seconds), interval = Span(200, Millis))

  override def afterAll: Unit = cleanUp()

  override protected lazy val mySQLdConfig: MySQLdConfig = MySQLdConfig(
    version = v5_6_21,
    port = Some(12345),
    userWithPassword = Some(UserWithPassword("bank", "passwd")),
    timeout = Some((30 seconds) * sys.env.getOrElse("SBT_TEST_TIME_FACTOR", "1").toDouble)
  )

} 
Example 16
Source File: EntitySupport.scala    From akka-cqrs   with Apache License 2.0 5 votes vote down vote up
package com.productfoundry.akka.cqrs

import akka.actor.{ActorRef, ActorSystem, PoisonPill, Terminated}
import akka.testkit.{ImplicitSender, TestKit}
import akka.util.Timeout
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{Millis, Second, Span}
import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll, Matchers, WordSpecLike}

import scala.concurrent.duration._

abstract class EntitySupport(_system: ActorSystem)
  extends TestKit(_system)
  with ImplicitSender
  with WordSpecLike
  with Matchers
  with BeforeAndAfterAll
  with BeforeAndAfter
  with Eventually {

  
  override def afterAll(): Unit = {
    TestKit.shutdownActorSystem(system)
  }
} 
Example 17
Source File: XMLParsingStopSpec.scala    From akka-xml-parser   with Apache License 2.0 5 votes vote down vote up
package uk.gov.hmrc.akka.xml

import akka.stream.scaladsl.{Keep, Source}
import org.scalatest.{FlatSpec, Matchers}
import org.scalatest.concurrent.{Eventually, ScalaFutures}
import org.scalatest.mock.MockitoSugar
import org.scalatest.time.{Millis, Seconds, Span}

class XMLParsingStopSpec extends FlatSpec
  with Matchers
  with ScalaFutures
  with MockitoSugar
  with Eventually
  with XMLParserFixtures {

  val f = fixtures
  implicit override val patienceConfig =
    PatienceConfig(timeout = Span(5, Seconds), interval = Span(5, Millis))

  import f._

  it should "Stop parsing when the passed in xPath is encountered" in {

    val source = Source(ParserTestHelpers.getBrokenMessage(ParserTestHelpers.sa100.toString, 100))

    val paths = Seq[XMLInstruction](
      XMLExtract(Seq("GovTalkMessage", "Header", "MessageDetails", "Class")),
      XMLExtract(Seq("GovTalkMessage", "Header", "MessageDetails", "Qualifier")),
      XMLExtract(Seq("GovTalkMessage", "Header", "MessageDetails", "Function")),
      XMLExtract(Seq("GovTalkMessage", "Body", "IRenvelope", "MTR", "SA100", "YourPersonalDetails", "NationalInsuranceNumber")), //This is in the body, will not be parsed
      XMLStopParsing(Seq("GovTalkMessage", "Body"))
    )

    val expected = Set(
      XMLElement(List("GovTalkMessage", "Header", "MessageDetails", "Class"), Map(), Some("HMRC-SA-SA100")),
      XMLElement(List("GovTalkMessage", "Header", "MessageDetails", "Function"), Map(), Some("submit")),
      XMLElement(List("GovTalkMessage", "Header", "MessageDetails", "Qualifier"), Map(), Some("request"))
    )

    whenReady(source.runWith(parseToXMLElements(paths))) { r =>
      r.filterNot(a => a.value == Some(FastParsingStage.STREAM_SIZE)) shouldBe expected
    }

    whenReady(source.runWith(parseToByteString(paths))) { r =>
      whenReady(source.toMat(collectByteString)(Keep.right).run()) { t =>
        r shouldBe t
      }
    }
  }

  it should "Notify if the payload exceeded the maximum allowed size" in {
    val source = Source(ParserTestHelpers.getBrokenMessage(ParserTestHelpers.sa100.toString, 100))

    val paths = Seq[XMLInstruction](XMLExtract(Seq("GovTalkMessage", "Header", "MessageDetails", "Class")))
    val expected = Set(
      XMLElement(List("GovTalkMessage", "Header", "MessageDetails", "Class"), Map(), Some("HMRC-SA-SA100")),
      XMLElement(List(), Map(), Some("Stream max size"))
    )

    whenReady(source.runWith(parseToXMLElements(paths, Some(200)))) { r =>
      r.filterNot(a => a.value == Some(FastParsingStage.STREAM_SIZE)) shouldBe expected
    }

    whenReady(source.runWith(parseToByteString(paths))) { r =>
      whenReady(source.toMat(collectByteString)(Keep.right).run()) { t =>
        r shouldBe t
      }
    }
  }


} 
Example 18
Source File: PaginationSupportSpec.scala    From vamp   with Apache License 2.0 5 votes vote down vote up
package io.vamp.persistence

import io.vamp.common.akka.ExecutionContextProvider
import io.vamp.common.http.OffsetResponseEnvelope
import org.junit.runner.RunWith
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.junit.JUnitRunner
import org.scalatest.time.{ Millis, Seconds, Span }
import org.scalatest.{ FlatSpec, Matchers }

import scala.concurrent.{ ExecutionContext, Future }

@RunWith(classOf[JUnitRunner])
class PaginationSupportSpec extends FlatSpec with Matchers with PaginationSupport with ScalaFutures with ExecutionContextProvider {

  case class ResponseEnvelope(response: List[Int], total: Long, page: Int, perPage: Int) extends OffsetResponseEnvelope[Int]

  implicit def executionContext = ExecutionContext.global

  implicit val defaultPatience = PatienceConfig(timeout = Span(3, Seconds), interval = Span(100, Millis))

  "PaginationSupport" should "collect all from single page" in {
    val list = (1 to 5).toList

    val source = (page: Int, perPage: Int) ⇒ Future {
      ResponseEnvelope(list.take(perPage), list.size, page, perPage)
    }

    whenReady(consume(allPages(source, 5)))(_ shouldBe list)
  }

  it should "collect all from multiple pages" in {
    val list = (1 to 15).toList

    val source = (page: Int, perPage: Int) ⇒ Future {
      ResponseEnvelope(list.slice((page - 1) * perPage, page * perPage), list.size, page, perPage)
    }

    whenReady(consume(allPages(source, 5)))(_ shouldBe list)
  }

  it should "collect all from multiple pages without round total / per page" in {
    val list = (1 to 17).toList

    val source = (page: Int, perPage: Int) ⇒ Future {
      ResponseEnvelope(list.slice((page - 1) * perPage, page * perPage), list.size, page, perPage)
    }

    whenReady(consume(allPages(source, 5)))(_ shouldBe list)
  }
} 
Example 19
Source File: HiveSchemaTest.scala    From stream-reactor   with Apache License 2.0 5 votes vote down vote up
package com.landoop.streamreactor.hive.it

import java.util.concurrent.TimeUnit

import com.landoop.streamreactor.connect.hive.{DatabaseName, TableName}
import org.apache.kafka.connect.data.Schema
import org.scalatest.concurrent.Eventually
import org.scalatest.matchers.should.Matchers
import org.scalatest.time.{Millis, Span}
import org.scalatest.wordspec.AnyWordSpec

import scala.collection.JavaConverters._
import scala.io.Source
import scala.util.Random

class HiveSchemaTest extends AnyWordSpec with Matchers with PersonTestData with Eventually with HiveTests {

  private implicit val patience: PatienceConfig = PatienceConfig(Span(60000, Millis), Span(5000, Millis))

  case class Foo(s: String, l: Long, b: Boolean, d: Double)
  def foo = Foo("string", Random.nextLong, Random.nextBoolean, Random.nextDouble)

  "Hive" should {
    "create correct schema for table" in {

      val topic = createTopic()
      val taskDef = Source.fromInputStream(getClass.getResourceAsStream("/hive_sink_task_no_partitions.json")).getLines().mkString("\n")
        .replace("{{TOPIC}}", topic)
        .replace("{{TABLE}}", topic)
        .replace("{{NAME}}", topic)
      postTask(taskDef)

      val producer = stringStringProducer()
      writeRecords(producer, topic, JacksonSupport.mapper.writeValueAsString(foo), 2000)
      producer.close(30, TimeUnit.SECONDS)

      // wait for some data to have been flushed
      eventually {
        withConn { conn =>
          val stmt = conn.createStatement
          val rs = stmt.executeQuery(s"select count(*) FROM $topic")
          rs.next()
          rs.getLong(1) should be > 0L
        }
      }

      // check that the schema is correct
      val schema = com.landoop.streamreactor.connect.hive.schema(DatabaseName("default"), TableName(topic))
      schema.fields().asScala.map(_.name).toSet shouldBe Set("s", "b", "l", "d")
      schema.field("s").schema().`type`() shouldBe Schema.Type.STRING
      schema.field("l").schema().`type`() shouldBe Schema.Type.INT64
      schema.field("d").schema().`type`() shouldBe Schema.Type.FLOAT64
      schema.field("b").schema().`type`() shouldBe Schema.Type.BOOLEAN

      stopTask(topic)
    }
  }
} 
Example 20
Source File: HiveParquetWithPartitionTest.scala    From stream-reactor   with Apache License 2.0 5 votes vote down vote up
package com.landoop.streamreactor.hive.it

import java.util.concurrent.TimeUnit

import org.apache.hadoop.fs.Path
import org.scalatest.concurrent.Eventually
import org.scalatest.matchers.should.Matchers
import org.scalatest.time.{Millis, Span}
import org.scalatest.wordspec.AnyWordSpec

import scala.io.Source

class HiveParquetWithPartitionTest extends AnyWordSpec with Matchers with PersonTestData with Eventually with HiveTests {

  private implicit val patience: PatienceConfig = PatienceConfig(Span(60000, Millis), Span(5000, Millis))

  "Hive" should {
    "write partitioned records" in {

      val count = 100000L

      val topic = createTopic()
      val taskDef = Source.fromInputStream(getClass.getResourceAsStream("/hive_sink_task_with_partitions.json")).getLines().mkString("\n")
        .replace("{{TOPIC}}", topic)
        .replace("{{TABLE}}", topic)
        .replace("{{NAME}}", topic)
      postTask(taskDef)

      val producer = stringStringProducer()
      writeRecords(producer, topic, JacksonSupport.mapper.writeValueAsString(person), count)
      producer.close(30, TimeUnit.SECONDS)

      // wait for some data to have been flushed
      eventually {
        withConn { conn =>
          val stmt = conn.createStatement
          val rs = stmt.executeQuery(s"select count(*) FROM $topic")
          if (rs.next()) {
            val count = rs.getLong(1)
            println(s"Current count for $topic is $count")
            count should be > 100L
          } else {
            fail()
          }
        }
      }

      // we should see every partition created
      eventually {
        withConn { conn =>
          val stmt = conn.createStatement
          val rs = stmt.executeQuery(s"select distinct state from $topic")
          var count = 0
          while (rs.next()) {
            count = count + 1
          }
          println(s"State count is $count")
          count shouldBe states.length
        }
      }

      // check for the presence of each partition directory
      val table = metastore.getTable("default", topic)
      for (state <- states) {
        fs.exists(new Path(table.getSd.getLocation, s"state=$state")) shouldBe true
      }

      stopTask(topic)
    }
  }
} 
Example 21
Source File: HiveSourceTest.scala    From stream-reactor   with Apache License 2.0 5 votes vote down vote up
package com.landoop.streamreactor.hive.it

import java.util.Collections
import java.util.concurrent.TimeUnit

import org.scalatest.concurrent.Eventually
import org.scalatest.matchers.should.Matchers
import org.scalatest.time.{Millis, Span}
import org.scalatest.wordspec.AnyWordSpec

import scala.io.Source

class HiveSourceTest extends AnyWordSpec with Matchers with PersonTestData with Eventually with HiveTests {

  private implicit val patience: PatienceConfig = PatienceConfig(Span(60000, Millis), Span(5000, Millis))

  "Hive" should {
    "read non partitioned table" in {
      val count = 2000L

      val inputTopic = createTopic()
      val sinkTaskDef = Source.fromInputStream(getClass.getResourceAsStream("/hive_sink_task_no_partitions.json")).getLines().mkString("\n")
        .replace("{{TOPIC}}", inputTopic)
        .replace("{{TABLE}}", inputTopic)
        .replace("{{NAME}}", inputTopic)
      postTask(sinkTaskDef)

      val producer = stringStringProducer()
      writeRecords(producer, inputTopic, JacksonSupport.mapper.writeValueAsString(person), count)
      producer.close(30, TimeUnit.SECONDS)

      // we now should have 1000 records in hive which we can test via jdbc
      eventually {
        withConn { conn =>
          val stmt = conn.createStatement
          val rs = stmt.executeQuery(s"select count(*) from $inputTopic")
          rs.next()
          rs.getLong(1) shouldBe count
        }
      }

      stopTask(inputTopic)

      // now we can read them back in
      val outputTopic = createTopic()

      val sourceTaskDef = Source.fromInputStream(getClass.getResourceAsStream("/hive_source_task.json")).getLines().mkString("\n")
        .replace("{{TOPIC}}", outputTopic)
        .replace("{{TABLE}}", inputTopic)
        .replace("{{NAME}}", outputTopic)
      postTask(sourceTaskDef)

      // we should have 1000 records on the outputTopic
      var records = 0L
      val consumer = stringStringConsumer("earliest")
      consumer.subscribe(Collections.singleton(outputTopic))
      eventually {
        records = records + readRecords(consumer, outputTopic, 2, TimeUnit.SECONDS).size
        records shouldBe count
      }

      stopTask(outputTopic)
    }
  }
} 
Example 22
Source File: HiveParquetTest.scala    From stream-reactor   with Apache License 2.0 5 votes vote down vote up
package com.landoop.streamreactor.hive.it

import java.util.concurrent.TimeUnit

import org.scalatest.concurrent.Eventually
import org.scalatest.matchers.should.Matchers
import org.scalatest.time.{Millis, Span}
import org.scalatest.wordspec.AnyWordSpec

import scala.io.Source

class HiveParquetTest extends AnyWordSpec with Matchers with PersonTestData with Eventually with HiveTests {

  private implicit val patience: PatienceConfig = PatienceConfig(Span(30000, Millis), Span(2000, Millis))

  "Hive" should {
    "write records" in {

      val count = 10000L

      val topic = createTopic()
      val taskDef = Source.fromInputStream(getClass.getResourceAsStream("/hive_sink_task_no_partitions.json")).getLines().mkString("\n")
        .replace("{{TOPIC}}", topic)
        .replace("{{TABLE}}", topic)
        .replace("{{NAME}}", topic)
      postTask(taskDef)

      val producer = stringStringProducer()
      writeRecords(producer, topic, JacksonSupport.mapper.writeValueAsString(person), count)
      producer.close(30, TimeUnit.SECONDS)

      // we now should have 1000 records in hive which we can test via jdbc
      eventually {
        withConn { conn =>
          val stmt = conn.createStatement
          val rs = stmt.executeQuery(s"select count(*) from $topic")
          rs.next()
          rs.getLong(1) shouldBe count
        }
      }

      stopTask(topic)
    }
  }
} 
Example 23
Source File: HiveSchemaTest.scala    From stream-reactor   with Apache License 2.0 5 votes vote down vote up
package com.landoop.streamreactor.hive.it

import java.util.concurrent.TimeUnit

import com.landoop.streamreactor.connect.hive.{DatabaseName, TableName}
import org.apache.kafka.connect.data.Schema
import org.scalatest.concurrent.Eventually
import org.scalatest.matchers.should.Matchers
import org.scalatest.time.{Millis, Span}
import org.scalatest.wordspec.AnyWordSpec

import scala.collection.JavaConverters._
import scala.io.Source
import scala.util.Random

class HiveSchemaTest extends AnyWordSpec with Matchers with PersonTestData with Eventually with HiveTests {

  private implicit val patience: PatienceConfig = PatienceConfig(Span(60000, Millis), Span(5000, Millis))

  case class Foo(s: String, l: Long, b: Boolean, d: Double)
  def foo = Foo("string", Random.nextLong, Random.nextBoolean, Random.nextDouble)

  "Hive" should {
    "create correct schema for table" in {

      val topic = createTopic()
      val taskDef = Source.fromInputStream(getClass.getResourceAsStream("/hive_sink_task_no_partitions.json")).getLines().mkString("\n")
        .replace("{{TOPIC}}", topic)
        .replace("{{TABLE}}", topic)
        .replace("{{NAME}}", topic)
      postTask(taskDef)

      val producer = stringStringProducer()
      writeRecords(producer, topic, JacksonSupport.mapper.writeValueAsString(foo), 2000)
      producer.close(30, TimeUnit.SECONDS)

      // wait for some data to have been flushed
      eventually {
        withConn { conn =>
          val stmt = conn.createStatement
          val rs = stmt.executeQuery(s"select count(*) FROM $topic")
          rs.next()
          rs.getLong(1) should be > 0L
        }
      }

      // check that the schema is correct
      val schema = com.landoop.streamreactor.connect.hive.schema(DatabaseName("default"), TableName(topic))
      schema.fields().asScala.map(_.name).toSet shouldBe Set("s", "b", "l", "d")
      schema.field("s").schema().`type`() shouldBe Schema.Type.STRING
      schema.field("l").schema().`type`() shouldBe Schema.Type.INT64
      schema.field("d").schema().`type`() shouldBe Schema.Type.FLOAT64
      schema.field("b").schema().`type`() shouldBe Schema.Type.BOOLEAN

      stopTask(topic)
    }
  }
} 
Example 24
Source File: HiveParquetWithPartitionTest.scala    From stream-reactor   with Apache License 2.0 5 votes vote down vote up
package com.landoop.streamreactor.hive.it

import java.util.concurrent.TimeUnit

import org.apache.hadoop.fs.Path
import org.scalatest.concurrent.Eventually
import org.scalatest.matchers.should.Matchers
import org.scalatest.time.{Millis, Span}
import org.scalatest.wordspec.AnyWordSpec

import scala.io.Source

class HiveParquetWithPartitionTest extends AnyWordSpec with Matchers with PersonTestData with Eventually with HiveTests {

  private implicit val patience: PatienceConfig = PatienceConfig(Span(60000, Millis), Span(5000, Millis))

  "Hive" should {
    "write partitioned records" in {

      val count = 100000L

      val topic = createTopic()
      val taskDef = Source.fromInputStream(getClass.getResourceAsStream("/hive_sink_task_with_partitions.json")).getLines().mkString("\n")
        .replace("{{TOPIC}}", topic)
        .replace("{{TABLE}}", topic)
        .replace("{{NAME}}", topic)
      postTask(taskDef)

      val producer = stringStringProducer()
      writeRecords(producer, topic, JacksonSupport.mapper.writeValueAsString(person), count)
      producer.close(30, TimeUnit.SECONDS)

      // wait for some data to have been flushed
      eventually {
        withConn { conn =>
          val stmt = conn.createStatement
          val rs = stmt.executeQuery(s"select count(*) FROM $topic")
          if (rs.next()) {
            val count = rs.getLong(1)
            println(s"Current count for $topic is $count")
            count should be > 100L
          } else {
            fail()
          }
        }
      }

      // we should see every partition created
      eventually {
        withConn { conn =>
          val stmt = conn.createStatement
          val rs = stmt.executeQuery(s"select distinct state from $topic")
          var count = 0
          while (rs.next()) {
            count = count + 1
          }
          println(s"State count is $count")
          count shouldBe states.length
        }
      }

      // check for the presence of each partition directory
      val table = metastore.getTable("default", topic)
      for (state <- states) {
        fs.exists(new Path(table.getSd.getLocation, s"state=$state")) shouldBe true
      }

      stopTask(topic)
    }
  }
} 
Example 25
Source File: HiveSourceTest.scala    From stream-reactor   with Apache License 2.0 5 votes vote down vote up
package com.landoop.streamreactor.hive.it

import java.util.Collections
import java.util.concurrent.TimeUnit

import org.scalatest.concurrent.Eventually
import org.scalatest.matchers.should.Matchers
import org.scalatest.time.{Millis, Span}
import org.scalatest.wordspec.AnyWordSpec

import scala.io.Source

class HiveSourceTest extends AnyWordSpec with Matchers with PersonTestData with Eventually with HiveTests {

  private implicit val patience: PatienceConfig = PatienceConfig(Span(60000, Millis), Span(5000, Millis))

  "Hive" should {
    "read non partitioned table" in {
      val count = 2000L

      val inputTopic = createTopic()
      val sinkTaskDef = Source.fromInputStream(getClass.getResourceAsStream("/hive_sink_task_no_partitions.json")).getLines().mkString("\n")
        .replace("{{TOPIC}}", inputTopic)
        .replace("{{TABLE}}", inputTopic)
        .replace("{{NAME}}", inputTopic)
      postTask(sinkTaskDef)

      val producer = stringStringProducer()
      writeRecords(producer, inputTopic, JacksonSupport.mapper.writeValueAsString(person), count)
      producer.close(30, TimeUnit.SECONDS)

      // we now should have 1000 records in hive which we can test via jdbc
      eventually {
        withConn { conn =>
          val stmt = conn.createStatement
          val rs = stmt.executeQuery(s"select count(*) from $inputTopic")
          rs.next()
          rs.getLong(1) shouldBe count
        }
      }

      stopTask(inputTopic)

      // now we can read them back in
      val outputTopic = createTopic()

      val sourceTaskDef = Source.fromInputStream(getClass.getResourceAsStream("/hive_source_task.json")).getLines().mkString("\n")
        .replace("{{TOPIC}}", outputTopic)
        .replace("{{TABLE}}", inputTopic)
        .replace("{{NAME}}", outputTopic)
      postTask(sourceTaskDef)

      // we should have 1000 records on the outputTopic
      var records = 0L
      val consumer = stringStringConsumer("earliest")
      consumer.subscribe(Collections.singleton(outputTopic))
      eventually {
        records = records + readRecords(consumer, outputTopic, 2, TimeUnit.SECONDS).size
        records shouldBe count
      }

      stopTask(outputTopic)
    }
  }
} 
Example 26
Source File: HiveParquetTest.scala    From stream-reactor   with Apache License 2.0 5 votes vote down vote up
package com.landoop.streamreactor.hive.it

import java.util.concurrent.TimeUnit

import org.scalatest.concurrent.Eventually
import org.scalatest.matchers.should.Matchers
import org.scalatest.time.{Millis, Span}
import org.scalatest.wordspec.AnyWordSpec

import scala.io.Source

class HiveParquetTest extends AnyWordSpec with Matchers with PersonTestData with Eventually with HiveTests {

  private implicit val patience: PatienceConfig = PatienceConfig(Span(30000, Millis), Span(2000, Millis))

  "Hive" should {
    "write records" in {

      val count = 10000L

      val topic = createTopic()
      val taskDef = Source.fromInputStream(getClass.getResourceAsStream("/hive_sink_task_no_partitions.json")).getLines().mkString("\n")
        .replace("{{TOPIC}}", topic)
        .replace("{{TABLE}}", topic)
        .replace("{{NAME}}", topic)
      postTask(taskDef)

      val producer = stringStringProducer()
      writeRecords(producer, topic, JacksonSupport.mapper.writeValueAsString(person), count)
      producer.close(30, TimeUnit.SECONDS)

      // we now should have 1000 records in hive which we can test via jdbc
      eventually {
        withConn { conn =>
          val stmt = conn.createStatement
          val rs = stmt.executeQuery(s"select count(*) from $topic")
          rs.next()
          rs.getLong(1) shouldBe count
        }
      }

      stopTask(topic)
    }
  }
} 
Example 27
Source File: AuthenticationRouterSpec.scala    From akka-http-rest-api   with MIT License 5 votes vote down vote up
package authentication

import akka.actor.ActorSystem
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.server.Route
import core.ErrorWrapper
import token.TokenRepository
import user.UserRepository
import utils.{FlatSpecWithRedis, FlatSpecWithSql, BaseRoutesSpec}
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._
import UserAuthJsonProtocol._
import core.CommonJsonProtocol._
import org.scalatest.time.{Millis, Seconds, Span}

class AuthenticationRouterSpec extends BaseRoutesSpec with FlatSpecWithSql with FlatSpecWithRedis {
  spec =>

  override implicit val actorSystem: ActorSystem = spec.system

  val tokenRepo = new TokenRepository
  val userRepo = new UserRepository
  val userAuthRepo = new UserAuthRepository
  val userAuthService = new UserAuthService(tokenRepo, userAuthRepo, userRepo)

  val authRouter = new AuthenticationRouter with TestRoutesSupport {
    override val userAuthService = spec.userAuthService

    override implicit val redis = spec.redis
  }

  val routes = Route.seal(authRouter.authenticationRoutes)

  // how long it should wait before declaring that the future has timed out
  implicit val defaultPatience = PatienceConfig(timeout = Span(2, Seconds), interval = Span(50, Millis))

  "POST /register" should "register new user" in {
    Post("/auth/register", UserRegistrationRequest("[email protected]", "password", "Jan", "Kowalski", 1, 0)) ~> routes ~> check {
      userAuthRepo.findByUserEmail("[email protected]").futureValue should be('defined)
      status should be(StatusCodes.Created)
    }
  }

  "POST /register with invalid data" should "result in an error" in {
    Post("/auth/register") ~> routes ~> check {
      status should be(StatusCodes.BadRequest)
    }
  }

  "POST /register with an existing email" should "return 409 with an error message" in {
    userAuthService.register(UserRegistrationRequest("[email protected]", "password", "Jan", "Kowalski", 1, 0)).futureValue

    Post("/auth/register", UserRegistrationRequest("[email protected]", "password", "Jan", "Kowalski", 1, 0)) ~> routes ~> check {
      status should be(StatusCodes.Conflict)
      entityAs[ErrorWrapper].userMessage should be("E-mail already in use")
    }
  }

  "POST /login with valid data" should "result in login user" in {
    userAuthService.register(UserRegistrationRequest("[email protected]", "password", "Jan", "Kowalski", 1, 0)).futureValue

    Post("/auth/login", UserLoginRequest("[email protected]", "password")) ~> routes ~> check {
      status should be(StatusCodes.OK)
    }
  }

  "POST /login with invalid data" should "result in an error" in {
    Post("/auth/login") ~> routes ~> check {
      status should be(StatusCodes.BadRequest)
    }
  }

  "POST /login with invalid password" should "result in an error" in {
    userAuthService.register(UserRegistrationRequest("[email protected]", "password", "Jan", "Kowalski", 1, 0)).futureValue

    Post("/auth/login", UserLoginRequest("[email protected]", "pass")) ~> routes ~> check {
      status should be(StatusCodes.BadRequest)
      entityAs[ErrorWrapper].userMessage should be("Password is invalid")
    }
  }

  "POST /login with unknown email" should "result in an error" in {
    Post("/auth/login", UserLoginRequest("[email protected]", "password")) ~> routes ~> check {
      status should be(StatusCodes.BadRequest)
      entityAs[ErrorWrapper].userMessage should be("E-mail does not exist")
    }
  }

  "POST /auth/whatever" should "not be bound to /auth - reject unmatchedPath request" in {
    Post("/auth/whatever") ~> routes ~> check {
      status should be(StatusCodes.NotFound)
    }
  }
} 
Example 28
Source File: FuturesTest.scala    From courscala   with Apache License 2.0 5 votes vote down vote up
package org.coursera.common.concurrent

import org.junit.Test
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.junit.AssertionsForJUnit
import org.scalatest.time.Millis
import org.scalatest.time.Seconds
import org.scalatest.time.Span

import scala.concurrent.Future

class FuturesTest extends AssertionsForJUnit with ScalaFutures {

  implicit override val patienceConfig =
    PatienceConfig(timeout = scaled(Span(1, Seconds)), interval = scaled(Span(20, Millis)))

  import scala.concurrent.ExecutionContext.Implicits.global

  @Test
  def map(): Unit = {
    val raw = Map("a" -> Future.successful(1), "b" -> Future.successful(2))
    val expected = Map("a" -> 1, "b" -> 2)

    assertResult(expected)(Futures.map(raw).futureValue)
  }

  @Test
  def extract(): Unit = {
    val Futures.Extract(future1, future2) = Futures.immediate((1, 2))

    assertResult(1)(future1.futureValue)
    assertResult(2)(future2.futureValue)
  }

} 
Example 29
Source File: SQLQuerySpec.scala    From scruid   with Apache License 2.0 5 votes vote down vote up
package ing.wbaa.druid

import java.time.{ LocalDateTime, ZonedDateTime }

import akka.stream.scaladsl.Sink
import ing.wbaa.druid.SQL._
import ing.wbaa.druid.client.CirceDecoders
import io.circe.generic.auto._
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.matchers.should.Matchers
import org.scalatest.time.{ Millis, Seconds, Span }
import org.scalatest.wordspec.AnyWordSpec

class SQLQuerySpec extends AnyWordSpec with Matchers with ScalaFutures with CirceDecoders {
  implicit override val patienceConfig =
    PatienceConfig(timeout = Span(20, Seconds), interval = Span(5, Millis))
  private val totalNumberOfEntries  = 39244
  private val usOnlyNumberOfEntries = 528

  implicit val config = DruidConfig()
  implicit val mat    = config.client.actorMaterializer

  case class Result(hourTime: ZonedDateTime, count: Int)

  "SQL query" should {

    val sqlQuery: SQLQuery = dsql"""
      |SELECT FLOOR(__time to HOUR) AS hourTime, count(*) AS "count"
      |FROM wikipedia
      |WHERE "__time" BETWEEN TIMESTAMP '2015-09-12 00:00:00' AND TIMESTAMP '2015-09-13 00:00:00'
      |GROUP BY 1
      |""".stripMargin

    "successfully be interpreted by Druid" in {
      val resultsF = sqlQuery.execute()
      whenReady(resultsF) { response =>
        response.list[Result].map(_.count).sum shouldBe totalNumberOfEntries
      }
    }

    "support streaming" in {
      val resultsF = sqlQuery.streamAs[Result]().runWith(Sink.seq)

      whenReady(resultsF) { results =>
        results.map(_.count).sum shouldBe totalNumberOfEntries
      }
    }
  }

  "SQL parameterized query" should {

    val fromDateTime   = LocalDateTime.of(2015, 9, 12, 0, 0, 0, 0)
    val untilDateTime  = fromDateTime.plusDays(1)
    val countryIsoCode = "US"

    val sqlQuery: SQLQuery =
      dsql"""
      |SELECT FLOOR(__time to HOUR) AS hourTime, count(*) AS "count"
      |FROM wikipedia
      |WHERE "__time" BETWEEN ${fromDateTime} AND ${untilDateTime} AND countryIsoCode = ${countryIsoCode}
      |GROUP BY 1
      |""".stripMargin

    "be expressed as a parameterized query with three parameters" in {
      sqlQuery.query.count(_ == '?') shouldBe 3
      sqlQuery.parameters.size shouldBe 3

      sqlQuery.parameters(0) shouldBe SQLQueryParameter(SQLQueryParameterType.Timestamp,
                                                        "2015-09-12 00:00:00")
      sqlQuery.parameters(1) shouldBe SQLQueryParameter(SQLQueryParameterType.Timestamp,
                                                        "2015-09-13 00:00:00")
      sqlQuery.parameters(2) shouldBe SQLQueryParameter(SQLQueryParameterType.Varchar, "US")
    }

    "successfully be interpreted by Druid" in {
      val resultsF = sqlQuery.execute()
      whenReady(resultsF) { response =>
        response.list[Result].map(_.count).sum shouldBe usOnlyNumberOfEntries
      }
    }

    "support streaming" in {
      val resultsF = sqlQuery.streamAs[Result]().runWith(Sink.seq)

      whenReady(resultsF) { results =>
        results.map(_.count).sum shouldBe usOnlyNumberOfEntries
      }

    }

  }
} 
Example 30
Source File: FutureAwaitWithFailFastFnTest.scala    From kafka-connect-common   with Apache License 2.0 5 votes vote down vote up
package com.datamountaineer.streamreactor.connect.concurrent

import java.util.concurrent.Executors

import com.datamountaineer.streamreactor.connect.concurrent.ExecutorExtension._
import org.scalactic.source.Position
import org.scalatest.concurrent.{Eventually, TimeLimits}
import org.scalatest.matchers.should.Matchers
import org.scalatest.time.{Millis, Span}
import org.scalatest.wordspec.AnyWordSpec

import scala.util.{Failure, Try}


class FutureAwaitWithFailFastFnTest extends AnyWordSpec with Matchers with Eventually with TimeLimits {


  "FutureAwaitWithFailFastFn" should {
    "return when all the futures have completed" in {
      val exec = Executors.newFixedThreadPool(10)
      val futures = (1 to 5).map(i => exec.submit {
        Thread.sleep(300)
        i
      })
      eventually {
        val result = FutureAwaitWithFailFastFn(exec, futures)
        exec.isTerminated shouldBe true
        result shouldBe Seq(1, 2, 3, 4, 5)
      }
    }

    "stop when the first futures times out" in {
      val exec = Executors.newFixedThreadPool(6)
      val futures = for (i <- 1 to 10) yield {
        exec.submit {
          if (i == 4) {
            Thread.sleep(1000)
            sys.error("this task failed.")
          } else {
            Thread.sleep(50000)
          }
        }
      }

      eventually {
        val t = Try(FutureAwaitWithFailFastFn(exec, futures))
        t.isFailure shouldBe true
        t.asInstanceOf[Failure[_]].exception.getMessage shouldBe "this task failed."
        exec.isTerminated shouldBe true
      }
    }
  }

} 
Example 31
Source File: HttpContactPointRoutesSpec.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package akka.management.cluster.bootstrap.contactpoint

import akka.cluster.{ Cluster, ClusterEvent }
import akka.event.NoLogging
import akka.http.scaladsl.testkit.ScalatestRouteTest
import akka.management.cluster.bootstrap.ClusterBootstrapSettings
import akka.testkit.{ SocketUtil, TestProbe }
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{ Millis, Seconds, Span }
import org.scalatest.{ Matchers, WordSpecLike }

class HttpContactPointRoutesSpec
    extends WordSpecLike
    with Matchers
    with ScalatestRouteTest
    with HttpBootstrapJsonProtocol
    with Eventually {

  implicit override val patienceConfig: PatienceConfig =
    PatienceConfig(timeout = scaled(Span(3, Seconds)), interval = scaled(Span(50, Millis)))

  override def testConfigSource =
    s"""
    akka {
      remote {
        netty.tcp {
          hostname = "127.0.0.1"
          port = ${SocketUtil.temporaryServerAddress("127.0.0.1").getPort}
        }
      }
    }
    """.stripMargin

  "Http Bootstrap routes" should {

    val settings = ClusterBootstrapSettings(system.settings.config, NoLogging)
    val httpBootstrap = new HttpClusterBootstrapRoutes(settings)

    "empty list if node is not part of a cluster" in {
      ClusterBootstrapRequests.bootstrapSeedNodes("") ~> httpBootstrap.routes ~> check {
        responseAs[String] should include(""""seedNodes":[]""")
      }
    }

    "include seed nodes when part of a cluster" in {
      val cluster = Cluster(system)
      cluster.join(cluster.selfAddress)

      val p = TestProbe()
      cluster.subscribe(p.ref, ClusterEvent.InitialStateAsEvents, classOf[ClusterEvent.MemberUp])
      val up = p.expectMsgType[ClusterEvent.MemberUp]
      up.member should ===(cluster.selfMember)

      eventually {
        ClusterBootstrapRequests.bootstrapSeedNodes("") ~> httpBootstrap.routes ~> check {
          val response = responseAs[HttpBootstrapJsonProtocol.SeedNodes]
          response.seedNodes should !==(Set.empty)
          response.seedNodes.map(_.node) should contain(cluster.selfAddress)
        }
      }
    }
  }

} 
Example 32
Source File: UnpersistSuite.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark

import org.scalatest.concurrent.Timeouts._
import org.scalatest.time.{Millis, Span}

class UnpersistSuite extends SparkFunSuite with LocalSparkContext {
  test("unpersist RDD") {
    sc = new SparkContext("local", "test")
    val rdd = sc.makeRDD(Array(1, 2, 3, 4), 2).cache()
    rdd.count
    assert(sc.persistentRdds.isEmpty === false)
    rdd.unpersist()
    assert(sc.persistentRdds.isEmpty === true)

    failAfter(Span(3000, Millis)) {
      try {
        while (! sc.getRDDStorageInfo.isEmpty) {
          Thread.sleep(200)
        }
      } catch {
        case _: Throwable => Thread.sleep(10)
          // Do nothing. We might see exceptions because block manager
          // is racing this thread to remove entries from the driver.
      }
    }
    assert(sc.getRDDStorageInfo.isEmpty === true)
  }
} 
Example 33
Source File: KafkaIntSpec.scala    From kafka-configurator   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package common

import cakesolutions.kafka.testkit.KafkaServer
import kafka.utils.ZkUtils
import org.apache.kafka.clients.admin.AdminClient
import org.apache.kafka.clients.admin.AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG
import org.scalatest.BeforeAndAfterAll
import org.scalatest.concurrent.PatienceConfiguration
import org.scalatest.time.{ Millis, Seconds, Span }

import scala.collection.JavaConverters._
import scala.concurrent.duration._

abstract class KafkaIntSpec extends BaseSpec with BeforeAndAfterAll with PatienceConfiguration {

  override implicit val patienceConfig = PatienceConfig(Span(3, Seconds), Span(250, Millis))

  val kafkaServer = new KafkaServer()
  val kafkaPort = kafkaServer.kafkaPort

  val zkSessionTimeout = 30 seconds
  val zkConnectionTimeout = 30 seconds

  lazy val zkUtils = ZkUtils(s"localhost:${kafkaServer.zookeeperPort}", zkSessionTimeout.toMillis.toInt,
    zkConnectionTimeout.toMillis.toInt, isZkSecurityEnabled = false)

  lazy val kafkaAdminClient = AdminClient.create(Map[String, AnyRef](
    BOOTSTRAP_SERVERS_CONFIG -> s"localhost:$kafkaPort"
  ).asJava)

  override def beforeAll() = kafkaServer.startup()

  override def afterAll() = {
    kafkaAdminClient.close()
    zkUtils.close()
    kafkaServer.close()
  }

} 
Example 34
Source File: HydraMetricsSpec.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.core.monitor

import akka.japi.Option.Some
import kamon.Kamon
import kamon.metric.{Counter, Gauge}
import org.scalamock.scalatest.proxy.MockFactory
import org.scalatest.{BeforeAndAfterAll, _}
import org.scalatest.concurrent.{Eventually, ScalaFutures}
import org.scalatest.flatspec.AnyFlatSpecLike
import org.scalatest.matchers.should.Matchers
import org.scalatest.time.{Millis, Seconds, Span}
import scalacache.guava.GuavaCache

import scala.concurrent.ExecutionContext.Implicits.global
import scala.util.{Random, Try}

class HydraMetricsSpec
    extends Matchers
    with AnyFlatSpecLike
    with Eventually
    with BeforeAndAfterAll
    with BeforeAndAfterEach
    with MockFactory
    with ScalaFutures {

  import HydraMetrics._
  import scalacache.modes.try_._

  implicit override val patienceConfig =
    PatienceConfig(
      timeout = scaled(Span(2, Seconds)),
      interval = scaled(Span(5, Millis))
    )

  override def beforeEach() = {
    gaugesCache.removeAll()
    countersCache.removeAll()
    histogramsCache.removeAll()
  }

  override def afterAll = Try(Kamon.stopModules())

  val lookup = "lookup.xyz"
  val lookup2 = "lookup.abc"

  def generateTags: Seq[(String, String)] = Seq("tag1" -> "Everything's fine.")

  "An object mixing in HydraMetrics" should
    "create new counters with new lookup keys + metric names" in {
    shouldCreateNewMetric[Counter](incrementCounter _, countersCache)
  }

  it should
    "create new gauges with new lookup keys + metric names" in {
    shouldCreateNewMetric[Gauge](incrementGauge _, gaugesCache)
  }

  it should "lookup existing counters" in {
    shouldLookupExistingMetric[Counter](incrementCounter _, countersCache)
  }

  it should
    "lookup an existing gauge" in {
    shouldLookupExistingMetric[Gauge](decrementGauge _, gaugesCache)
  }

  it should
    "lookup an existing histogram" in {
    val f = recordToHistogram _

    whenReady(f(lookup, "histogram.metric", 100, generateTags)) { r =>
      whenReady(f(lookup, "histogram.metric", 100, generateTags)) { x =>
        r shouldEqual x
      }
    }
  }

  private def shouldCreateNewMetric[A](
      f: (String, String, => Seq[(String, String)]) => Unit,
      cache: GuavaCache[A]
  ) = {
    cache.get(lookup).map { result => result shouldBe None }

    f(lookup, "metric" + Random.nextInt(Integer.MAX_VALUE), generateTags)

    cache.get(lookup).map { result => result shouldBe a[Some[_]] }
  }

  private def shouldLookupExistingMetric[A](
      f: (String, String, => Seq[(String, String)]) => Unit,
      cache: GuavaCache[A]
  ) = {
    val metric = "metric" + Random.nextInt(Integer.MAX_VALUE)

    f(lookup, metric, generateTags) shouldEqual f(lookup, metric, generateTags)
  }
} 
Example 35
Source File: KinesisProducerIntegrationSpec.scala    From reactive-kinesis   with Apache License 2.0 5 votes vote down vote up
package com.weightwatchers.reactive.kinesis

import java.io.File

import com.amazonaws.services.kinesis.producer.{KinesisProducer => AWSKinesisProducer}
import com.typesafe.config.ConfigFactory
import com.weightwatchers.reactive.kinesis.common.{
  KinesisSuite,
  KinesisTestConsumer,
  TestCredentials
}
import com.weightwatchers.reactive.kinesis.consumer.KinesisConsumer.ConsumerConf
import com.weightwatchers.reactive.kinesis.models.ProducerEvent
import com.weightwatchers.reactive.kinesis.producer.{KinesisProducer, ProducerConf}
import org.scalatest.concurrent.Eventually
import org.scalatest.mockito.MockitoSugar
import org.scalatest.time.{Millis, Seconds, Span}
import org.scalatest.{BeforeAndAfterAll, FreeSpec, Matchers}

import scala.concurrent.duration._
import scala.language.postfixOps
import scala.util.Random

//scalastyle:off magic.number
class KinesisProducerIntegrationSpec
    extends FreeSpec
    with Matchers
    with MockitoSugar
    with BeforeAndAfterAll
    with Eventually
    with KinesisSuite {

  implicit val ece = scala.concurrent.ExecutionContext.global

  val TestStreamNrOfMessagesPerShard: Long = 0

  implicit override val patienceConfig: PatienceConfig =
    PatienceConfig(timeout = Span(5, Seconds), interval = Span(100, Millis))

  "The KinesisProducer" - {

    "Should publish a message to a stream" in new withKinesisConfForApp(
      "int-test-stream-producer-1"
    ) {

      val conf     = producerConf()
      val producer = KinesisProducer(conf)

      val existingRecordCount = testConsumer.retrieveRecords(conf.streamName, 10).size

      val event = ProducerEvent("1234", Random.alphanumeric.take(10).mkString)
      producer.addUserRecord(event)

      eventually {
        val records: Seq[String] = testConsumer.retrieveRecords(conf.streamName, 10)
        records.size shouldBe (existingRecordCount + 1)
        records should contain(
          new String(event.payload.array(), java.nio.charset.StandardCharsets.UTF_8)
        )
      }
    }
  }
}

//scalastyle:on 
Example 36
Source File: AggregationFunctionsIT.scala    From clickhouse-scala-client   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.crobox.clickhouse.dsl

import java.util.UUID

import com.crobox.clickhouse.TestSchemaClickhouseQuerySpec
import com.crobox.clickhouse.ClickhouseClientSpec
import org.scalatest.time.{Millis, Seconds, Span}
import spray.json.DefaultJsonProtocol._
import spray.json.RootJsonFormat

class AggregationFunctionsIT
    extends ClickhouseClientSpec
    with TestSchemaClickhouseQuerySpec {

  private val entries = 200145
  override val table1Entries: Seq[Table1Entry] =
    Seq.fill(entries)(Table1Entry(UUID.randomUUID(), numbers = Seq(1, 2, 3)))
  override val table2Entries: Seq[Table2Entry] =
    Seq.fill(entries)(Table2Entry(UUID.randomUUID(), randomString, randomInt, randomString, None))

  override implicit def patienceConfig =
    PatienceConfig(timeout = scaled(Span(10, Seconds)), interval = scaled(Span(20, Millis)))

  "Combinators" should "apply for aggregations" in {
    case class Result(columnResult: String) {
      def result = columnResult.toInt
    }
    implicit val resultFormat: RootJsonFormat[Result] = jsonFormat[String, Result](Result.apply, "result")
    val resultSimple = chExecutor
      .execute[Result](select(uniq(shieldId) as "result") from OneTestTable)
      .futureValue
    val resultExact = chExecutor
      .execute[Result](select(uniqExact(shieldId) as "result") from OneTestTable)
      .futureValue
    resultSimple.rows.head.result shouldBe (entries +- entries / 100)
    resultSimple.rows.head.result should not be entries
    resultExact.rows.head.result shouldBe entries
  }

  it should "run quantiles" in {
    case class Result(result: Seq[Int])
    implicit val resultFormat: RootJsonFormat[Result] = jsonFormat[Seq[Int], Result](Result.apply, "result")
    val result = chExecutor
      .execute[Result](
        select(quantiles(col2, 0.1F, 0.2F, 0.3F, 0.4F, 0.5F, 0.99F) as ref[Seq[Int]]("result")) from TwoTestTable
      )
      .futureValue
    result.rows.head.result should have length 6
  }

  it should "run for each" in {
    case class Result(result: Seq[String])
    implicit val resultFormat: RootJsonFormat[Result] = jsonFormat[Seq[String], Result](Result.apply, "result")
    val result = chExecutor
      .execute[Result](
        select(forEach[Int, TableColumn[Seq[Int]], Double](numbers) { column =>
          sum(column)
        } as "result") from OneTestTable
      )
      .futureValue
    val queryResult = result.rows.head.result.map(_.toInt)
    queryResult should have length 3
    queryResult should contain theSameElementsAs Seq(entries, entries * 2, entries * 3)
  }

} 
Example 37
Source File: StringFunctionsIT.scala    From clickhouse-scala-client   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.crobox.clickhouse.dsl

import java.util.UUID

import com.crobox.clickhouse.{ClickhouseClientSpec, TestSchemaClickhouseQuerySpec}
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Millis, Seconds, Span}
import spray.json.DefaultJsonProtocol.{jsonFormat, _}
import spray.json.RootJsonFormat

class StringFunctionsIT
    extends ClickhouseClientSpec
    with TestSchemaClickhouseQuerySpec
    with ScalaFutures {

  private val columnString = "oneem,twoem,threeem"
  override val table2Entries: Seq[Table2Entry] =
    Seq(Table2Entry(UUID.randomUUID(), columnString, randomInt, randomString, None))

  override implicit def patienceConfig =
    PatienceConfig(timeout = scaled(Span(10, Seconds)), interval = scaled(Span(20, Millis)))

  case class Result(result: String)
  implicit val resultFormat: RootJsonFormat[Result] = jsonFormat[String, Result](Result.apply, "result")

  it should "split by character" in {
    val resultRows =
      chExecutor.execute[Result](select(arrayJoin(splitByChar(",", col1)) as "result") from TwoTestTable).futureValue.rows
    resultRows.length shouldBe 3
    resultRows.map(_.result) should contain theSameElementsAs Seq("oneem", "twoem", "threeem")
  }

  it should "split by string" in {
    val resultRows =
      chExecutor.execute[Result](select(arrayJoin(splitByString("em,", col1)) as "result") from TwoTestTable).futureValue.rows
    resultRows.length shouldBe 3
    resultRows.map(_.result) should contain theSameElementsAs Seq("one", "two", "threeem")
  }

  it should "concatenate string back" in {
    val resultRows =
      chExecutor
        .execute[Result](select(arrayStringConcat(splitByChar(",", col1), ",") as "result") from TwoTestTable)
        .futureValue
        .rows
    resultRows.length shouldBe 1
    resultRows.map(_.result).head shouldBe columnString

  }

} 
Example 38
Source File: ArrayFunctionsIT.scala    From clickhouse-scala-client   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.crobox.clickhouse.dsl

import com.crobox.clickhouse.ClickhouseClientSpec
import com.crobox.clickhouse.dsl.execution.ClickhouseQueryExecutor
import com.crobox.clickhouse.dsl.language.ClickhouseTokenizerModule
import com.crobox.clickhouse.internal.QuerySettings
import com.crobox.clickhouse.testkit.ClickhouseSpec
import org.scalatest.time.{Millis, Seconds, Span}

import scala.concurrent.Future

class ArrayFunctionsIT extends ClickhouseClientSpec with ClickhouseSpec {
  implicit lazy val chExecutor: ClickhouseQueryExecutor            = ClickhouseQueryExecutor.default(clickClient)
  implicit lazy val clickhouseTokenizer: ClickhouseTokenizerModule = new ClickhouseTokenizerModule {}

  override implicit def patienceConfig =
    PatienceConfig(timeout = scaled(Span(10, Seconds)), interval = scaled(Span(20, Millis)))

  private def execute(query: Query): Future[String] = {
    implicit val settings: QuerySettings = QuerySettings()
    clickClient.query(clickhouseTokenizer.toSql(query.internalQuery, None)).map(_.trim)
  }

  it should "arrayFunction: has" in {
    execute(select(has(Array(1, 2, 3, 4), 2))).futureValue.toInt should be(1)
  }

  it should "arrayFunction: hasAny" in {
    execute(select(hasAny(Array(1, 2, 3, 4), Array(2)))).futureValue.toInt should be(1)
    execute(select(hasAny(Array(1, 2, 3, 4), Array(5)))).futureValue.toInt should be(0)
    execute(select(hasAny(Array(1, 2, 3, 4), Array(1,2)))).futureValue.toInt should be(1)
    execute(select(hasAny(Array(1, 2, 3, 4), Array(1,5)))).futureValue.toInt should be(1)
  }

  it should "arrayFunction: hasAll" in {
    execute(select(hasAll(Array(1, 2, 3, 4), Array(1,2)))).futureValue.toInt should be(1)
    execute(select(hasAll(Array(1, 2, 3, 4), Array(1,5)))).futureValue.toInt should be(0)
  }

  it should "arrayFunction: resize" in {
    execute(select(arrayResize(Array(1, 2, 3, 4), 3, 0))).futureValue should be("[1,2,3]")
    execute(select(arrayResize(Array(1, 2, 3, 4), 4, 0))).futureValue should be("[1,2,3,4]")
    execute(select(arrayResize(Array(1, 2, 3, 4), 5, 0))).futureValue should be("[1,2,3,4,0]")

    execute(select(arrayResize(Array("a", "b", "c", "d"), 3, "z"))).futureValue should be("['a','b','c']")
    execute(select(arrayResize(Array("a", "b", "c", "d"), 4, "z"))).futureValue should be("['a','b','c','d']")
    execute(select(arrayResize(Array("a", "b", "c", "d"), 5, "z"))).futureValue should be("['a','b','c','d','z']")
  }
} 
Example 39
Source File: AccessTokenSpec.scala    From akka-http-oauth2-client   with Apache License 2.0 5 votes vote down vote up
package com.github.dakatsuka.akka.http.oauth2.client

import akka.actor.ActorSystem
import akka.http.scaladsl.model.{ HttpEntity, HttpResponse, StatusCodes }
import akka.http.scaladsl.model.ContentTypes.`application/json`
import akka.stream.{ ActorMaterializer, Materializer }
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{ Millis, Seconds, Span }
import org.scalatest.{ BeforeAndAfterAll, DiagrammedAssertions, FlatSpec }

import scala.concurrent.{ Await, ExecutionContext }
import scala.concurrent.duration.Duration

class AccessTokenSpec extends FlatSpec with DiagrammedAssertions with ScalaFutures with BeforeAndAfterAll {
  implicit val system: ActorSystem        = ActorSystem()
  implicit val ec: ExecutionContext       = system.dispatcher
  implicit val materializer: Materializer = ActorMaterializer()
  implicit val defaultPatience: PatienceConfig =
    PatienceConfig(timeout = Span(5, Seconds), interval = Span(700, Millis))

  override def afterAll(): Unit = {
    Await.ready(system.terminate(), Duration.Inf)
  }

  behavior of "AccessToken"

  it should "apply from HttpResponse" in {
    val accessToken  = "xxx"
    val tokenType    = "bearer"
    val expiresIn    = 86400
    val refreshToken = "yyy"

    val httpResponse = HttpResponse(
      status = StatusCodes.OK,
      headers = Nil,
      entity = HttpEntity(
        `application/json`,
        s"""
           |{
           |  "access_token": "$accessToken",
           |  "token_type": "$tokenType",
           |  "expires_in": $expiresIn,
           |  "refresh_token": "$refreshToken"
           |}
         """.stripMargin
      )
    )

    val result = AccessToken(httpResponse)

    whenReady(result) { token =>
      assert(token.accessToken == accessToken)
      assert(token.tokenType == tokenType)
      assert(token.expiresIn == expiresIn)
      assert(token.refreshToken.contains(refreshToken))
    }
  }
} 
Example 40
Source File: SeleniumTest.scala    From udash-core   with Apache License 2.0 5 votes vote down vote up
package io.udash.web

import java.util.concurrent.TimeUnit

import org.openqa.selenium.firefox.{FirefoxDriver, FirefoxOptions}
import org.openqa.selenium.remote.RemoteWebDriver
import org.openqa.selenium.{Dimension, WebElement}
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{Millis, Seconds, Span}
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec

private trait ServerConfig {
  def init(): Unit
  def createUrl(part: String): String
  def destroy(): Unit
}

// Doesn't launch embedded guide app server
private final class ExternalServerConfig(urlPrefix: String) extends ServerConfig {
  require(!urlPrefix.endsWith("/"))

  override def createUrl(part: String): String = {
    require(part.startsWith("/"))
    urlPrefix + part
  }

  override def init(): Unit = {}
  override def destroy(): Unit = {}
}

// Launches embedded guide server
private final class InternalServerConfig extends ServerConfig {
  private val server = Launcher.createApplicationServer()

  override def init(): Unit = server.start()

  override def destroy(): Unit = server.stop()

  override def createUrl(part: String): String = {
    require(part.startsWith("/"))
    s"http://127.0.0.2:${server.port}$part"
  }
}

abstract class SeleniumTest extends AnyWordSpec with Matchers with BeforeAndAfterAll with BeforeAndAfterEach with Eventually {
  override implicit val patienceConfig: PatienceConfig = PatienceConfig(scaled(Span(10, Seconds)), scaled(Span(50, Millis)))

  protected final val driver: RemoteWebDriver = new FirefoxDriver(new FirefoxOptions().setHeadless(true))
  driver.manage().timeouts().implicitlyWait(200, TimeUnit.MILLISECONDS)
  driver.manage().window().setSize(new Dimension(1440, 800))

  protected final def findElementById(id: String): WebElement = eventually {
    driver.findElementById(id)
  }

  protected def url: String

  private val server: ServerConfig = new InternalServerConfig

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    server.init()
  }

  override protected def beforeEach(): Unit = {
    super.beforeEach()
    driver.get(server.createUrl(url))
  }

  override protected def afterAll(): Unit = {
    super.afterAll()
    server.destroy()
    driver.close()
  }
} 
Example 41
Source File: XmlrpcConnection.scala    From xmlrpc   with MIT License 5 votes vote down vote up
package xmlrpc

import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.util.Timeout
import org.scalatest.FunSpec
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Millis, Seconds, Span}
import xmlrpc.protocol.XmlrpcProtocol

import scala.concurrent.duration._
import scala.language.postfixOps
import scalaz.{Success, Failure}

class XmlrpcConnection extends FunSpec with ScalaFutures {
  // Xmlrpc imports
  import Xmlrpc._
  import XmlrpcProtocol._

  // Scalatest setup
  implicit val default: PatienceConfig = PatienceConfig(timeout = Span(5, Seconds), interval = Span(500, Millis))

  // Xmrpc setup, server is up but it is not mine, found on Internet
  implicit val testServer = XmlrpcServer("http://betty.userland.com/RPC2")

  // Spray setup
  implicit val system = ActorSystem()
  implicit val ma = ActorMaterializer()
  implicit val timeout = Timeout(5 seconds)
  import system.dispatcher

  describe("The connection with a XML-RPC server") {
    it("should invoke the test method successfully in the server") {
      val invocation = invokeMethod[Int, String]("examples.getStateName", 41).underlying
      val responseMessage = "South Dakota"

      whenReady(invocation) {
        case Success(value) => assertResult(responseMessage) {value}
        case Failure(errors) => fail("Errors when deserializing\n" + errors)
      }
    }
  }
} 
Example 42
Source File: SpecHelpers.scala    From money   with Apache License 2.0 5 votes vote down vote up
package com.comcast.money.core

import com.comcast.money.api.{ SpanId, SpanInfo }
import com.comcast.money.core.handlers.LoggingSpanHandler
import com.typesafe.config.Config
import org.scalatest.Matchers
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{ Millis, Span }

import scala.collection.{ Set, mutable }
import scala.concurrent.duration._

object LogRecord {
  private val spans = new mutable.ArrayBuffer[SpanInfo]
  private val messages = new mutable.HashMap[String, mutable.Set[String]] with mutable.MultiMap[String, String]

  def clear(): Unit = {
    messages.clear()
    spans.clear()
  }

  def add(log: String, message: String): Unit = messages.addBinding(log, message)

  def add(spanInfo: SpanInfo): Unit = spans.append(spanInfo)

  def contains(log: String)(cond: String => Boolean): Boolean = messages.entryExists(log, cond)

  def contains(cond: SpanInfo => Boolean): Boolean = spans.exists(cond)

  def log(name: String): Set[String] = messages.getOrElse(name, mutable.Set.empty)
}

class LogRecorderSpanHandler extends LoggingSpanHandler {

  override def configure(config: Config): Unit = {
    super.configure(config)
    logFunction = record
  }

  override def handle(spanInfo: SpanInfo): Unit = {
    LogRecord.add(spanInfo)
    super.handle(spanInfo)
  }

  def record(message: String): Unit = LogRecord.add("log", message)
}

trait SpecHelpers extends Eventually { this: Matchers =>

  def awaitCond(condition: => Boolean, max: FiniteDuration = 2.seconds, interval: Duration = 100.millis, message: String = "failed waiting"): Unit = {
    implicit val patienceConfig: PatienceConfig = PatienceConfig(Span(max.toMillis, Millis), Span(interval.toMillis, Millis))
    eventually {
      assert(condition, message)
    }
  }

  def expectSpanInfoThat(message: String, condition: SpanInfo => Boolean, wait: FiniteDuration = 2.seconds): Unit = {
    awaitCond(
      LogRecord.contains(condition), wait, 100 milliseconds,
      s"Expected span info that $message not found after $wait")
  }

  def dontExpectSpanInfoThat(message: String, condition: SpanInfo => Boolean, wait: FiniteDuration = 2.seconds): Unit = {
    awaitCond(
      !LogRecord.contains(condition), wait, 100 milliseconds,
      s"Not expected span info that $message found after $wait")
  }

  def expectLogMessageContaining(contains: String, wait: FiniteDuration = 2.seconds) {
    awaitCond(
      LogRecord.contains("log")(_.contains(contains)), wait, 100 milliseconds,
      s"Expected log message containing string $contains not found after $wait")
  }

  def expectLogMessageContainingStrings(strings: Seq[String], wait: FiniteDuration = 2.seconds) {
    awaitCond(
      LogRecord.contains("log")(s => strings.forall(s.contains)), wait, 100 milliseconds,
      s"Expected log message containing $strings not found after $wait")
  }

  def testSpan(id: SpanId) = Money.Environment.factory.newSpan(id, "test")
} 
Example 43
Source File: UnpersistSuite.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark

import org.scalatest.concurrent.Timeouts._
import org.scalatest.time.{Millis, Span}

class UnpersistSuite extends SparkFunSuite with LocalSparkContext {
  test("unpersist RDD") {
    sc = new SparkContext("local", "test")
    val rdd = sc.makeRDD(Array(1, 2, 3, 4), 2).cache()
    rdd.count
    assert(sc.persistentRdds.isEmpty === false)
    rdd.unpersist()
    assert(sc.persistentRdds.isEmpty === true)

    failAfter(Span(3000, Millis)) {
      try {
        while (! sc.getRDDStorageInfo.isEmpty) {
          Thread.sleep(200)
        }
      } catch {
        case _: Throwable => Thread.sleep(10)
          // Do nothing. We might see exceptions because block manager
          // is racing this thread to remove entries from the driver.
      }
    }
    assert(sc.getRDDStorageInfo.isEmpty === true)
  }
} 
Example 44
Source File: UnpersistSuite.scala    From SparkCore   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark

import org.scalatest.FunSuite
import org.scalatest.concurrent.Timeouts._
import org.scalatest.time.{Millis, Span}

class UnpersistSuite extends FunSuite with LocalSparkContext {
  test("unpersist RDD") {
    sc = new SparkContext("local", "test")
    val rdd = sc.makeRDD(Array(1, 2, 3, 4), 2).cache()
    rdd.count
    assert(sc.persistentRdds.isEmpty === false)
    rdd.unpersist()
    assert(sc.persistentRdds.isEmpty === true)

    failAfter(Span(3000, Millis)) {
      try {
        while (! sc.getRDDStorageInfo.isEmpty) {
          Thread.sleep(200)
        }
      } catch {
        case _: Throwable => { Thread.sleep(10) }
          // Do nothing. We might see exceptions because block manager
          // is racing this thread to remove entries from the driver.
      }
    }
    assert(sc.getRDDStorageInfo.isEmpty === true)
  }
} 
Example 45
Source File: InitialSpec.scala    From embedded-kafka   with Apache License 2.0 5 votes vote down vote up
package com.tuplejump.embedded.kafka

import java.util.concurrent.{TimeUnit, CountDownLatch}

import org.apache.kafka.common.serialization.StringDeserializer
import org.scalatest.concurrent.Eventually
import org.scalatest.concurrent.PatienceConfiguration.Timeout
import org.scalatest.time.{Millis, Span}

class InitialSpec extends AbstractSpec with Eventually with Logging {

  private val timeout = Timeout(Span(10000, Millis))

  "Initially, EmbeddedKafka" must {
    val kafka = new EmbeddedKafka()
    val topic = "test"
    val total = 1000
    val latch = new CountDownLatch(total)

    "start embedded zookeeper and embedded kafka" in {
      kafka.isRunning should be (false)
      kafka.start()
      eventually(timeout)(kafka.isRunning)
    }
    "create a topic" in {
      kafka.createTopic(topic, 1, 1)
    }
    "publish messages to the embedded kafka instance" in {
      val config = kafka.consumerConfig(
        group = "some.group",
        kafkaConnect = kafka.kafkaConfig.hostName + ":" + kafka.kafkaConfig.port,
        zkConnect = kafka.kafkaConfig.zkConnect,
        offsetPolicy = "largest",//latest with new consumer
        autoCommitEnabled = true,
        kDeserializer = classOf[StringDeserializer],
        vDeserializer = classOf[StringDeserializer])
      val consumer = new SimpleConsumer(latch, config, topic, "consumer.group", 1, 1)

      val batch1 = for (n <- 0 until total) yield s"message-test-$n"

      logger.info(s"Publishing ${batch1.size} messages...")

      kafka.sendMessages(topic, batch1)
      latch.await(10000, TimeUnit.MILLISECONDS)
      latch.getCount should be (0)

      consumer.shutdown()
    }
    "shut down relatively cleanly for now" in {
      kafka.shutdown()
      eventually(timeout)(!kafka.isRunning)
    }
  }
} 
Example 46
Source File: ScenarioLoadingITDivulgence.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.sandbox

import akka.stream.scaladsl.Sink
import com.daml.ledger.api.domain.LedgerId
import com.daml.ledger.api.testing.utils.{SuiteResourceManagementAroundEach, MockMessages => M}
import com.daml.ledger.api.v1.active_contracts_service.ActiveContractsServiceGrpc
import com.daml.ledger.api.v1.transaction_filter._
import com.daml.ledger.client.services.acs.ActiveContractSetClient
import com.daml.dec.DirectExecutionContext
import com.daml.platform.sandbox.services.{SandboxFixture, TestCommands}
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Millis, Span}
import org.scalatest.{Matchers, WordSpec}

@SuppressWarnings(Array("org.wartremover.warts.StringPlusAny"))
class ScenarioLoadingITDivulgence
    extends WordSpec
    with Matchers
    with ScalaFutures
    with TestCommands
    with SandboxFixture
    with SuiteResourceManagementAroundEach {

  override def scenario: Option[String] = Some("Test:testDivulgenceSuccess")

  private def newACClient(ledgerId: LedgerId) =
    new ActiveContractSetClient(ledgerId, ActiveContractsServiceGrpc.stub(channel))

  override implicit def patienceConfig: PatienceConfig =
    PatienceConfig(scaled(Span(15000, Millis)), scaled(Span(150, Millis)))

  private val allTemplatesForParty = M.transactionFilter

  private def getSnapshot(transactionFilter: TransactionFilter = allTemplatesForParty) =
    newACClient(ledgerId())
      .getActiveContracts(transactionFilter)
      .runWith(Sink.seq)

  implicit val ec = DirectExecutionContext

  "ScenarioLoading" when {
    "running a divulgence scenario" should {
      "not fail" in {
        // The testDivulgenceSuccess scenario uses divulgence
        // This test checks whether the scenario completes without failing
        whenReady(getSnapshot()) { resp =>
          resp.size should equal(1)
        }
      }
    }
  }

} 
Example 47
Source File: ConsulDiscoverySpec.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package akka.cluster.bootstrap.discovery

import java.net.InetAddress

import akka.actor.ActorSystem
import akka.discovery.ServiceDiscovery.ResolvedTarget
import akka.discovery.consul.ConsulServiceDiscovery
import akka.testkit.TestKitBase
import com.google.common.net.HostAndPort
import com.orbitz.consul.Consul
import com.orbitz.consul.model.catalog.ImmutableCatalogRegistration
import com.orbitz.consul.model.health.ImmutableService
import com.pszymczyk.consul.{ ConsulProcess, ConsulStarterBuilder }
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{ Millis, Seconds, Span }
import org.scalatest.{ BeforeAndAfterAll, Matchers, WordSpecLike }

import scala.concurrent.duration._

class ConsulDiscoverySpec extends WordSpecLike with Matchers with BeforeAndAfterAll with TestKitBase with ScalaFutures {

  private val consul: ConsulProcess = ConsulStarterBuilder.consulStarter().withHttpPort(8500).build().start()

  "Consul Discovery" should {
    "work for defaults" in {
      val consulAgent =
        Consul.builder().withHostAndPort(HostAndPort.fromParts(consul.getAddress, consul.getHttpPort)).build()
      consulAgent
        .catalogClient()
        .register(
          ImmutableCatalogRegistration
            .builder()
            .service(
              ImmutableService
                .builder()
                .addTags(s"system:${system.name}", "akka-management-port:1234")
                .address("127.0.0.1")
                .id("test")
                .service("test")
                .port(1235)
                .build()
            )
            .node("testNode")
            .address("localhost")
            .build()
        )

      val lookupService = new ConsulServiceDiscovery(system)
      val resolved = lookupService.lookup("test", 10.seconds).futureValue
      resolved.addresses should contain(
        ResolvedTarget(
          host = "127.0.0.1",
          port = Some(1234),
          address = Some(InetAddress.getByName("127.0.0.1"))
        )
      )
    }
  }

  override def afterAll(): Unit = {
    super.afterAll()
    consul.close()
  }

  override implicit lazy val system: ActorSystem = ActorSystem("test")

  implicit override val patienceConfig: PatienceConfig =
    PatienceConfig(timeout = scaled(Span(30, Seconds)), interval = scaled(Span(50, Millis)))

} 
Example 48
Source File: MultiDcSpec.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package akka.management.cluster

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.{ HttpRequest, StatusCodes }
import akka.http.scaladsl.unmarshalling.Unmarshal
import akka.management.scaladsl.ManagementRouteProviderSettings
import akka.stream.ActorMaterializer
import akka.testkit.SocketUtil
import com.typesafe.config.ConfigFactory
import org.scalatest.{ Matchers, WordSpec }
import org.scalatest.concurrent.{ Eventually, ScalaFutures }
import org.scalatest.time.{ Millis, Seconds, Span }

class MultiDcSpec
    extends WordSpec
    with Matchers
    with ScalaFutures
    with ClusterHttpManagementJsonProtocol
    with Eventually {

  implicit val patience: PatienceConfig = PatienceConfig(timeout = Span(10, Seconds), interval = Span(50, Millis))

  val config = ConfigFactory.parseString(
    """
      |akka.actor.provider = "cluster"
      |akka.remote.log-remote-lifecycle-events = off
      |akka.remote.netty.tcp.hostname = "127.0.0.1"
      |#akka.loglevel = DEBUG
    """.stripMargin
  )

  "Http cluster management" must {
    "allow multiple DCs" in {
      val Vector(httpPortA, portA, portB) = SocketUtil.temporaryServerAddresses(3, "127.0.0.1").map(_.getPort)
      val dcA = ConfigFactory.parseString(
        s"""
           |akka.management.http.hostname = "127.0.0.1"
           |akka.management.http.port = $httpPortA
           |akka.cluster.seed-nodes = ["akka.tcp://[email protected]:$portA"]
           |akka.cluster.multi-data-center.self-data-center = "DC-A"
           |akka.remote.netty.tcp.port = $portA
          """.stripMargin
      )
      val dcB = ConfigFactory.parseString(
        s"""
           |akka.cluster.seed-nodes = ["akka.tcp://[email protected]:$portA"]
           |akka.cluster.multi-data-center.self-data-center = "DC-B"
           |akka.remote.netty.tcp.port = $portB
          """.stripMargin
      )

      implicit val dcASystem = ActorSystem("MultiDcSystem", config.withFallback(dcA))
      val dcBSystem = ActorSystem("MultiDcSystem", config.withFallback(dcB))
      implicit val materializer = ActorMaterializer()

      val routeSettings =
        ManagementRouteProviderSettings(selfBaseUri = s"http://127.0.0.1:$httpPortA", readOnly = false)

      try {
        Http()
          .bindAndHandle(ClusterHttpManagementRouteProvider(dcASystem).routes(routeSettings), "127.0.0.1", httpPortA)
          .futureValue

        eventually {
          val response =
            Http().singleRequest(HttpRequest(uri = s"http://127.0.0.1:$httpPortA/cluster/members")).futureValue
          response.status should equal(StatusCodes.OK)
          val members = Unmarshal(response.entity).to[ClusterMembers].futureValue
          members.members.size should equal(2)
          members.members.map(_.status) should equal(Set("Up"))
        }
      } finally {
        dcASystem.terminate()
        dcBSystem.terminate()
      }
    }
  }
} 
Example 49
Source File: KafkaEventLogSpec.scala    From akka-stream-eventsourcing   with Apache License 2.0 5 votes vote down vote up
package com.github.krasserm.ases.log

import akka.actor.ActorSystem
import akka.stream.scaladsl.{Sink, Source}
import akka.testkit.TestKit
import com.github.krasserm.ases._
import org.apache.kafka.common.TopicPartition
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Millis, Seconds, Span}
import org.scalatest.{Matchers, WordSpecLike}

import scala.collection.immutable.Seq

class KafkaEventLogSpec extends TestKit(ActorSystem("test")) with WordSpecLike with Matchers with ScalaFutures with StreamSpec with KafkaSpec {
  implicit val pc = PatienceConfig(timeout = Span(5, Seconds), interval = Span(10, Millis))

  val kafkaEventLog: KafkaEventLog = new KafkaEventLog(host, port)

  "A Kafka event log" must {
    "provide a sink for writing events and a source for delivering replayed events" in {
      val topicPartition = new TopicPartition("p-1", 0)
      val events = Seq("a", "b", "c").map(Emitted(_, emitterId))
      val expected = durables(events).map(Delivered(_)) :+ Recovered

      Source(events).runWith(kafkaEventLog.sink(topicPartition)).futureValue
      kafkaEventLog.source[String](topicPartition).take(4).runWith(Sink.seq).futureValue should be(expected)
    }
    "provide a flow with an input port for writing events and and output port for delivering replayed and live events" in {
      val topicPartition = new TopicPartition("p-2", 0)
      val events1 = Seq("a", "b", "c").map(Emitted(_, emitterId))
      val events2 = Seq("d", "e", "f").map(Emitted(_, emitterId))
      val expected = (durables(events1).map(Delivered(_)) :+ Recovered) ++ durables(events2, offset = 3).map(Delivered(_))

      Source(events1).runWith(kafkaEventLog.sink(topicPartition)).futureValue
      Source(events2).via(kafkaEventLog.flow(topicPartition)).take(7).runWith(Sink.seq).futureValue should be(expected)
    }
    "provide a source that only delivers events of compatible types" in {
      val topicPartition = new TopicPartition("p-3", 0)
      val events = Seq("a", "b", 1, 2).map(Emitted(_, emitterId))
      val expected = durables(events).drop(2).map(Delivered(_)) :+ Recovered

      Source(events).runWith(kafkaEventLog.sink(topicPartition)).futureValue
      kafkaEventLog.source[Int](topicPartition).take(3).runWith(Sink.seq).futureValue should be(expected)
    }
  }
} 
Example 50
Source File: EventCollaborationSpec.scala    From akka-stream-eventsourcing   with Apache License 2.0 5 votes vote down vote up
package com.github.krasserm.ases

import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.scaladsl.{Flow, Sink}
import akka.testkit.TestKit
import com.github.krasserm.ases.log.{KafkaEventLog, KafkaSpec}
import org.apache.kafka.common.TopicPartition
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Millis, Seconds, Span}
import org.scalatest.{Matchers, WordSpecLike}

import scala.collection.immutable.Seq

class EventCollaborationSpec extends TestKit(ActorSystem("test")) with WordSpecLike with Matchers with ScalaFutures with StreamSpec with KafkaSpec {
  import EventSourcingSpec._

  implicit val pc = PatienceConfig(timeout = Span(5, Seconds), interval = Span(10, Millis))

  val emitterId1 = "processor1"
  val emitterId2 = "processor2"

  val kafkaEventLog: KafkaEventLog =
    new log.KafkaEventLog(host, port)

  def processor(emitterId: String, topicPartition: TopicPartition): Flow[Request, Response, NotUsed] =
    EventSourcing(emitterId, 0, requestHandler, eventHandler).join(kafkaEventLog.flow(topicPartition))

  "A group of EventSourcing stages" when {
    "joined with a shared event log" can {
      "collaborate via publish-subscribe" in {
        val topicPartition = new TopicPartition("p-1", 0)    // shared topic partition
        val (pub1, sub1) = probes(processor(emitterId1, topicPartition)) // processor 1
        val (pub2, sub2) = probes(processor(emitterId2, topicPartition)) // processor 2

        pub1.sendNext(Increment(3))
        // Both processors receive event but
        // only processor 1 creates response
        sub1.requestNext(Response(3))

        pub2.sendNext(Increment(-4))
        // Both processors receive event but
        // only processor 2 creates response
        sub2.requestNext(Response(-1))

        // consume and verify events emitted by both processors
        kafkaEventLog.source[Incremented](topicPartition).via(log.replayed).map {
          case Durable(event, eid, _, sequenceNr) => (event, eid, sequenceNr)
        }.runWith(Sink.seq).futureValue should be(Seq(
          (Incremented(3), emitterId1, 0L),
          (Incremented(-4), emitterId2, 1L)
        ))
      }
    }
  }
} 
Example 51
Source File: ViewTestSupport.scala    From ddd-leaven-akka-v2   with MIT License 5 votes vote down vote up
package ecommerce.sales.view

import com.typesafe.config.Config
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Millis, Seconds, Span}
import org.scalatest.{BeforeAndAfterAll, Suite}
import org.slf4j.Logger
import org.slf4j.LoggerFactory.getLogger
import pl.newicom.dddd.view.sql.SqlViewStore
import slick.dbio._

import scala.concurrent.ExecutionContext
import slick.jdbc.H2Profile

trait ViewTestSupport extends BeforeAndAfterAll with ScalaFutures {
  this: Suite =>

  def config: Config
  lazy val viewStore = new SqlViewStore(config)
  val log: Logger = getLogger(getClass)

  implicit val profile = H2Profile

  implicit class ViewStoreAction[A](a: DBIO[A])(implicit ex: ExecutionContext) {
    private val future = viewStore.run(a)

    def run(): Unit = future.map(_ => ()).futureValue
    def result: A = future.futureValue
  }

  def ensureSchemaDropped: DBIO[Unit]
  def ensureSchemaCreated: DBIO[Unit]

  implicit override val patienceConfig = PatienceConfig(
    timeout = scaled(Span(10, Seconds)),
    interval = scaled(Span(200, Millis))
  )

  override def beforeAll() {
    viewStore.run {
      ensureSchemaDropped >> ensureSchemaCreated
    }.futureValue

  }

} 
Example 52
Source File: ViewTestSupport.scala    From ddd-leaven-akka-v2   with MIT License 5 votes vote down vote up
package ecommerce.sales.view

import com.typesafe.config.Config
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Millis, Seconds, Span}
import org.scalatest.{BeforeAndAfterAll, Suite}
import org.slf4j.Logger
import org.slf4j.LoggerFactory.getLogger
import pl.newicom.dddd.view.sql.SqlViewStore
import slick.dbio._
import slick.jdbc.H2Profile

import scala.concurrent.ExecutionContext

trait ViewTestSupport extends BeforeAndAfterAll with ScalaFutures {
  this: Suite =>

  def config: Config
  lazy val viewStore = new SqlViewStore(config)
  val log: Logger = getLogger(getClass)

  implicit val profile = H2Profile

  implicit override val patienceConfig = PatienceConfig(
    timeout = scaled(Span(5, Seconds)),
    interval = scaled(Span(200, Millis))
  )

  implicit class ViewStoreAction[A](a: DBIO[A])(implicit ex: ExecutionContext) {
    private val future = viewStore.run(a)

    def run(): Unit = future.map(_ => ()).futureValue
    def result: A = future.futureValue
  }

  def ensureSchemaDropped: DBIO[Unit]
  def ensureSchemaCreated: DBIO[Unit]

  override def beforeAll() {
    val setup = viewStore.run {
      ensureSchemaDropped >> ensureSchemaCreated
    }
    assert(setup.isReadyWithin(Span(5, Seconds)))

  }

} 
Example 53
Source File: elasticsearchExtensionsSpec.scala    From akka-stream-extensions   with Apache License 2.0 5 votes vote down vote up
package com.mfglabs.stream
package extensions.elasticsearch

import akka.actor.ActorSystem
import akka.stream._
import akka.stream.scaladsl.Sink

import org.elasticsearch.index.query.QueryBuilders
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Millis, Minutes, Span}
import org.scalatest.{BeforeAndAfterAll, Matchers, FlatSpec}

import scala.concurrent.duration._
import scala.util.Try

import org.elasticsearch.common.settings.Settings
import org.elasticsearch.node.Node

class ElasticExtensionsSpec extends FlatSpec with Matchers with ScalaFutures with BeforeAndAfterAll {
  implicit override val patienceConfig = PatienceConfig(timeout = Span(1, Minutes), interval = Span(100, Millis))

  implicit val as = ActorSystem()
  implicit val fm = ActorMaterializer()
  implicit val blockingEc = ExecutionContextForBlockingOps(scala.concurrent.ExecutionContext.Implicits.global)

  val settings = Settings.builder()
    .put("path.data", "target/elasticsearch-data")
    .put("path.home", "/")
    .put("transport.type", "local")
    .put("http.enabled", false)
    .build();

  lazy val node = new Node(settings).start();
  implicit lazy val client = node.client()

  val index = "test"
  val `type` = "type"

  "EsStream" should "execute a query a get the result as a stream" in {
    Try(client.admin.indices().prepareDelete(index).get())

    val toIndex = for (i <- 1 to 5002) yield (i, s"""{i: $i}""")
    toIndex.foreach { case (i, json) =>
      client.prepareIndex(index, `type`).setSource(json).setId(i.toString).get()
    }

    client.admin.indices.prepareRefresh(index).get() // to be sure that the data is indexed

    val res = EsStream.queryAsStream(QueryBuilders.matchAllQuery(), index, `type`, 1 minutes, 50)
      .runWith(Sink.seq)
      .futureValue

    res.sorted shouldEqual toIndex.map(_._2).sorted
  }

  override def afterAll(): Unit = {
    client.close()
    node.close()
  }

} 
Example 54
Source File: HttpDeleteSpec.scala    From http-verbs   with Apache License 2.0 5 votes vote down vote up
package uk.gov.hmrc.http

import akka.actor.ActorSystem
import com.typesafe.config.Config
import org.mockito.ArgumentCaptor
import org.mockito.Matchers.{any, eq => is}
import org.mockito.Mockito._
import org.scalatest.concurrent.PatienceConfiguration.{Interval, Timeout}
import org.scalatest.time.{Millis, Seconds, Span}
import org.scalatest.wordspec.AnyWordSpecLike
import org.scalatest.matchers.should.Matchers
import org.scalatestplus.mockito.MockitoSugar
import uk.gov.hmrc.http.hooks.HttpHook

import scala.concurrent.{ExecutionContext, Future}

import uk.gov.hmrc.http.HttpReads.Implicits._

class HttpDeleteSpec extends AnyWordSpecLike with Matchers with MockitoSugar with CommonHttpBehaviour {

  import ExecutionContext.Implicits.global

  class StubbedHttpDelete(doDeleteResult: Future[HttpResponse], doDeleteWithHeaderResult: Future[HttpResponse]) extends HttpDelete with ConnectionTracingCapturing {
    val testHook1                                   = mock[HttpHook]
    val testHook2                                   = mock[HttpHook]
    val hooks                                       = Seq(testHook1, testHook2)
    override def configuration: Option[Config]      = None
    override protected def actorSystem: ActorSystem = ActorSystem("test-actor-system")

    def appName: String = ???

    def doDelete(url: String, headers: Seq[(String, String)])(implicit hc: HeaderCarrier, ec: ExecutionContext) =
      doDeleteResult
  }

  "HttpDelete" should {
    "be able to return plain responses" in {
      val response   = HttpResponse(200, testBody)
      val testDelete = new StubbedHttpDelete(Future.successful(response), Future.successful(response))
      testDelete.DELETE[HttpResponse](url, Seq("foo" -> "bar")).futureValue shouldBe response
    }

    "be able to return objects deserialised from JSON" in {
      val testDelete = new StubbedHttpDelete(Future.successful(HttpResponse(200, """{"foo":"t","bar":10}""")),
        Future.successful(HttpResponse(200, """{"foo":"t","bar":10}""")))
      testDelete
        .DELETE[TestClass](url, Seq("foo" -> "bar"))
        .futureValue(Timeout(Span(2, Seconds)), Interval(Span(15, Millis))) shouldBe TestClass("t", 10)
    }

    behave like anErrorMappingHttpCall("DELETE", (url, responseF) => new StubbedHttpDelete(responseF, responseF).DELETE[HttpResponse](url, Seq("foo" -> "bar")))
    behave like aTracingHttpCall("DELETE", "DELETE", new StubbedHttpDelete(defaultHttpResponse, defaultHttpResponse)) { _.DELETE[HttpResponse](url, Seq("foo" -> "bar")) }

    "Invoke any hooks provided" in {
      val dummyResponse       = HttpResponse(200, testBody)
      val dummyResponseFuture = Future.successful(dummyResponse)
      val dummyHeader         = Future.successful(dummyResponse)
      val testDelete          = new StubbedHttpDelete(dummyResponseFuture, dummyHeader)

      testDelete.DELETE[HttpResponse](url, Seq("header" -> "foo")).futureValue

      val respArgCaptor1 = ArgumentCaptor.forClass(classOf[Future[HttpResponse]])
      val respArgCaptor2 = ArgumentCaptor.forClass(classOf[Future[HttpResponse]])

      verify(testDelete.testHook1).apply(is(url), is("DELETE"), is(None), respArgCaptor1.capture())(any(), any())
      verify(testDelete.testHook2).apply(is(url), is("DELETE"), is(None), respArgCaptor2.capture())(any(), any())

      // verifying directly without ArgumentCaptor didn't work as Futures were different instances
      // e.g. Future.successful(5) != Future.successful(5)
      respArgCaptor1.getValue.futureValue shouldBe dummyResponse
      respArgCaptor2.getValue.futureValue shouldBe dummyResponse
    }
  }
} 
Example 55
Source File: AcceptanceSpecPatience.scala    From renku   with Apache License 2.0 5 votes vote down vote up
package ch.renku.acceptancetests.tooling

import org.scalatest.concurrent.{AbstractPatienceConfiguration, PatienceConfiguration}
import org.scalatest.time.{Millis, Seconds, Span}

trait AcceptanceSpecPatience extends AbstractPatienceConfiguration { this: PatienceConfiguration =>

  implicit abstract override val patienceConfig: PatienceConfig = PatienceConfig(
    timeout  = scaled(Span(AcceptanceSpecPatience.WAIT_SCALE * 30, Seconds)),
    interval = scaled(Span(150, Millis))
  )
}

object AcceptanceSpecPatience {
  // Scale all wait times by a constant value.
  val WAIT_SCALE = 2;
} 
Example 56
Source File: SecurityTest.scala    From Conseil   with Apache License 2.0 5 votes vote down vote up
package tech.cryptonomic.conseil.api.security

import akka.http.scaladsl.testkit.ScalatestRouteTest
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Millis, Seconds, Span}
import org.scalatest.{Matchers, WordSpec}
import tech.cryptonomic.conseil.api.security.Security.SecurityApi

class SecurityTest extends WordSpec with Matchers with ScalatestRouteTest with ScalaFutures {

  implicit override val patienceConfig = PatienceConfig(timeout = Span(2, Seconds), interval = Span(20, Millis))

  "The SecurityApi" should {

      "valid itself" in {
        SecurityApi(Set.empty, None).isValid shouldBe false
        SecurityApi(Set.empty, Some(false)).isValid shouldBe false

        SecurityApi(Set("some-key"), Some(false)).isValid shouldBe true
        SecurityApi(Set("some-key"), None).isValid shouldBe true
        SecurityApi(Set.empty, Some(true)).isValid shouldBe true
        SecurityApi(Set("some-key"), Some(true)).isValid shouldBe true
      }

      "validate a given key" in {
        SecurityApi(Set("some-key"), None).validateApiKey(Some("some-key")).futureValue shouldBe true
        SecurityApi(Set("some-key"), Some(true)).validateApiKey(Some("some-key")).futureValue shouldBe true

        SecurityApi(Set.empty, None).validateApiKey(Some("some-key")).futureValue shouldBe false
        SecurityApi(Set.empty, Some(true)).validateApiKey(Some("some-key")).futureValue shouldBe false

        SecurityApi(Set.empty, None).validateApiKey(None).futureValue shouldBe false
        SecurityApi(Set.empty, Some(true)).validateApiKey(None).futureValue shouldBe true
      }

    }
} 
Example 57
Source File: FutureUtilSpec.scala    From graphcool-framework   with Apache License 2.0 5 votes vote down vote up
package cool.graph.utils.future

import org.scalatest.{Matchers, WordSpec}
import cool.graph.utils.future.FutureUtils._
import org.scalatest.concurrent.ScalaFutures._
import org.scalatest.time.{Millis, Seconds, Span}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future

class FutureUtilSpec extends WordSpec with Matchers {
  val patienceConfig = PatienceConfig(timeout = Span(5, Seconds), interval = Span(5, Millis))

  "runSequentially" should {
    "run all given futures in sequence" in {

      val testList = List[() => Future[Long]](
        () => { Thread.sleep(500); Future.successful(System.currentTimeMillis()) },
        () => { Thread.sleep(250); Future.successful(System.currentTimeMillis()) },
        () => { Thread.sleep(100); Future.successful(System.currentTimeMillis()) }
      )

      val values: Seq[Long] = testList.runSequentially.futureValue(patienceConfig)
      (values, values.tail).zipped.forall((a, b) => a < b)
    }
  }

  "andThenFuture" should {

    "Should work correctly in error and success cases" in {
      val f1 = Future.successful(100)
      val f2 = Future.failed(new Exception("This is a test"))

      whenReady(
        f1.andThenFuture(
          handleSuccess = x => Future.successful("something"),
          handleFailure = e => Future.successful("another something")
        )) { res =>
        res should be(100)
      }

      whenReady(
        f2.andThenFuture(
            handleSuccess = (x: Int) => Future.successful("something"),
            handleFailure = e => Future.successful("another something")
          )
          .failed) { res =>
        res shouldBe a[Exception]
      }
    }
  }

} 
Example 58
Source File: ElasticsearchIntegrationTest.scala    From elasticsearch-client   with Apache License 2.0 5 votes vote down vote up
package com.sumologic.elasticsearch.restlastic

import com.sumologic.elasticsearch.restlastic.RestlasticSearchClient.ReturnTypes
import com.sumologic.elasticsearch.restlastic.dsl.Dsl._
import org.junit.runner.RunWith
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Millis, Span}
import org.scalatest.{BeforeAndAfterAll, Suite}
import org.scalatestplus.junit.JUnitRunner

import scala.util.{Random, Try}



@RunWith(classOf[JUnitRunner])
trait ElasticsearchIntegrationTest extends BeforeAndAfterAll with ScalaFutures {
  this: Suite =>
  private val indexPrefix = "test-index"

  def restClient: RestlasticSearchClient

  val IndexName = s"$indexPrefix-${math.abs(Random.nextLong())}"

  protected def createIndices(cnt: Int = 1): IndexedSeq[Index] = {
    (1 to cnt).map(idx => {
      val index = dsl.Dsl.Index(s"${IndexName}-${idx}")
      val analyzerName = Name("keyword_lowercase")
      val lowercaseAnalyzer = Analyzer(analyzerName, Keyword, Lowercase)
      val notAnalyzed = Analyzer(Name("not_analyzed"), Keyword)
      val analyzers = Analyzers(
        AnalyzerArray(lowercaseAnalyzer, notAnalyzed),
        FilterArray(),
        NormalizerArray(Normalizer(Name("lowercase"), Lowercase)))
      val indexSetting = IndexSetting(12, 1, analyzers, 30)
      val indexFut = restClient.createIndex(index, Some(indexSetting))
      indexFut.futureValue
      index
    })
  }

  override def beforeAll(): Unit = {
    super.beforeAll()
    Try(delete(Index(s"$indexPrefix*")))
  }

  override def afterAll(): Unit = {
    Try(delete(Index(s"$indexPrefix*")))
    super.afterAll()
  }

  private def delete(index: Index): ReturnTypes.RawJsonResponse = {
    implicit val patienceConfig = PatienceConfig(scaled(Span(1500, Millis)), scaled(Span(15, Millis)))
    restClient.deleteIndex(index).futureValue
  }
} 
Example 59
Source File: TestAggregateSpec.scala    From akka-cqrs   with Apache License 2.0 5 votes vote down vote up
package com.productfoundry.akka.cluster

import akka.actor.Props
import com.productfoundry.akka.PassivationConfig
import com.productfoundry.akka.cqrs.{AggregateStatus, AggregateFactory, AggregateIdResolution, EntityIdResolution}
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{Seconds, Millis, Span}
import test.support.ClusterSpec
import test.support.ClusterConfig._

class TestAggregateSpecMultiJvmNode1 extends TestAggregateSpec
class TestAggregateSpecMultiJvmNode2 extends TestAggregateSpec

object TestActorFactory extends AggregateFactory[TestAggregate] {
  override def props(config: PassivationConfig): Props = {
    Props(classOf[TestAggregate], config)
  }
}

class TestAggregateSpec  extends ClusterSpec with Eventually {

  implicit def entityIdResolution: EntityIdResolution[TestAggregate] = new AggregateIdResolution[TestAggregate]()

  implicit def aggregateFactory: AggregateFactory[TestAggregate] = TestActorFactory

  implicit override val patienceConfig = PatienceConfig(
    timeout = scaled(Span(5, Seconds)),
    interval = scaled(Span(100, Millis))
  )

  "Test aggregate" must {

    "given cluster joined" in {
      setupSharedJournal()
      joinCluster()
    }

    enterBarrier("when")

    val entityContext = new ClusterSingletonEntityContext(system)

    "send all commands to same aggregate" in {

      def test(): Unit = {
        val aggregate = entityContext.entitySupervisorFactory[TestAggregate].getOrCreate
        val id = TestId("1")

        aggregate ! Count(id)
        expectMsgType[AggregateStatus.Success]

        eventually {
          aggregate ! GetCount(id)
          expectMsgType[GetCountResult].count shouldBe 2
        }
      }

      on(node1)(test())

      on(node2)(test())
    }
  }
}