org.scalatest.concurrent.Eventually Scala Examples

The following examples show how to use org.scalatest.concurrent.Eventually. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: IntegrationTest.scala    From kmq   with Apache License 2.0 6 votes vote down vote up
package com.softwaremill.kmq.redelivery

import java.time.Duration
import java.util.Random

import akka.actor.ActorSystem
import akka.kafka.scaladsl.{Consumer, Producer}
import akka.kafka.{ConsumerSettings, ProducerMessage, ProducerSettings, Subscriptions}
import akka.stream.ActorMaterializer
import akka.testkit.TestKit
import com.softwaremill.kmq._
import com.softwaremill.kmq.redelivery.infrastructure.KafkaSpec
import org.apache.kafka.clients.consumer.ConsumerConfig
import org.apache.kafka.clients.producer.{ProducerConfig, ProducerRecord}
import org.apache.kafka.common.serialization.StringDeserializer
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{Seconds, Span}
import org.scalatest.{BeforeAndAfterAll, FlatSpecLike, Matchers}

import scala.collection.mutable.ArrayBuffer

class IntegrationTest extends TestKit(ActorSystem("test-system")) with FlatSpecLike with KafkaSpec with BeforeAndAfterAll with Eventually with Matchers {

  implicit val materializer = ActorMaterializer()
  import system.dispatcher

  "KMQ" should "resend message if not committed" in {
    val bootstrapServer = s"localhost:${testKafkaConfig.kafkaPort}"
    val kmqConfig = new KmqConfig("queue", "markers", "kmq_client", "kmq_redelivery", Duration.ofSeconds(1).toMillis,
    1000)

    val consumerSettings = ConsumerSettings(system, new StringDeserializer, new StringDeserializer)
      .withBootstrapServers(bootstrapServer)
      .withGroupId(kmqConfig.getMsgConsumerGroupId)
      .withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")

    val markerProducerSettings = ProducerSettings(system,
      new MarkerKey.MarkerKeySerializer(), new MarkerValue.MarkerValueSerializer())
      .withBootstrapServers(bootstrapServer)
      .withProperty(ProducerConfig.PARTITIONER_CLASS_CONFIG, classOf[ParititionFromMarkerKey].getName)
    val markerProducer = markerProducerSettings.createKafkaProducer()

    val random = new Random()

    lazy val processedMessages = ArrayBuffer[String]()
    lazy val receivedMessages = ArrayBuffer[String]()

    val control = Consumer.committableSource(consumerSettings, Subscriptions.topics(kmqConfig.getMsgTopic)) // 1. get messages from topic
      .map { msg =>
      ProducerMessage.Message(
        new ProducerRecord[MarkerKey, MarkerValue](kmqConfig.getMarkerTopic, MarkerKey.fromRecord(msg.record), new StartMarker(kmqConfig.getMsgTimeoutMs)), msg)
    }
      .via(Producer.flow(markerProducerSettings, markerProducer)) // 2. write the "start" marker
      .map(_.message.passThrough)
      .mapAsync(1) { msg =>
        msg.committableOffset.commitScaladsl().map(_ => msg.record) // this should be batched
      }
      .map { msg =>
        receivedMessages += msg.value
        msg
      }
      .filter(_ => random.nextInt(5) != 0)
      .map { processedMessage =>
        processedMessages += processedMessage.value
        new ProducerRecord[MarkerKey, MarkerValue](kmqConfig.getMarkerTopic, MarkerKey.fromRecord(processedMessage), EndMarker.INSTANCE)
      }
      .to(Producer.plainSink(markerProducerSettings, markerProducer)) // 5. write "end" markers
      .run()

    val redeliveryHook = RedeliveryTracker.start(new KafkaClients(bootstrapServer), kmqConfig)

    val messages = (0 to 20).map(_.toString)
    messages.foreach(msg => sendToKafka(kmqConfig.getMsgTopic,msg))

    eventually {
      receivedMessages.size should be > processedMessages.size
      processedMessages.sortBy(_.toInt).distinct shouldBe messages
    }(PatienceConfig(timeout = Span(15, Seconds)), implicitly)

    redeliveryHook.close()
    control.shutdown()
  }

  override def afterAll(): Unit = {
    super.afterAll()
    TestKit.shutdownActorSystem(system)
  }
} 
Example 2
Source File: CacheEvictionSpecBase.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.caching

import org.scalatest.{Matchers, WordSpecLike}
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{Second, Span}

import scala.util.Random

trait CacheEvictionSpecBase
    extends CacheBehaviorSpecBase
    with WordSpecLike
    with Matchers
    with Eventually {
  override implicit def patienceConfig: PatienceConfig = PatienceConfig(scaled(Span(1, Second)))

  protected def newLargeCache(): Cache[Integer, String]

  name should {
    "evict values eventually, once the limit has been reached" in {
      val cache = newLargeCache()
      val values = Iterator.continually[Integer](Random.nextInt).take(1000).toSet.toVector

      values.foreach { value =>
        cache.get(value, _.toString)
      }

      // The cache may not evict straight away. We should keep trying.
      eventually {
        val cachedValues = values.map(cache.getIfPresent).filter(_.isDefined)
        // It may evict more than expected, and it might grow past the bounds again before we check.
        cachedValues.length should (be > 16 and be < 500)
      }
    }
  }
} 
Example 3
Source File: ActorSystemMetricsSpec.scala    From prometheus-akka   with Apache License 2.0 5 votes vote down vote up
package com.workday.prometheus.akka

import scala.collection.JavaConverters._
import scala.concurrent.duration._

import org.scalatest.BeforeAndAfterEach
import org.scalatest.concurrent.Eventually

import com.workday.prometheus.akka.ActorSystemMetrics._

import akka.actor._
import io.prometheus.client.Collector

class ActorSystemMetricsSpec extends TestKitBaseSpec("ActorSystemMetricsSpec") with BeforeAndAfterEach with Eventually {

  override def beforeEach(): Unit = {
    super.beforeEach()
    clearSystemMetrics
  }

  "the actor system metrics" should {
    "count actors" in {
      val trackedActor = system.actorOf(Props[ActorMetricsTestActor])
      eventually(timeout(5 seconds)) {
        findSystemMetricsRecorder(system.name) should not be empty
        val map = findSystemMetricsRecorder(system.name)
        map.getOrElse(ActorCountMetricName, -1.0) shouldEqual 1.0
      }
      system.stop(trackedActor)
      eventually(timeout(5 seconds)) {
        val metrics = findSystemMetricsRecorder(system.name)
        metrics.getOrElse(ActorCountMetricName, -1.0) shouldEqual 0.0
      }
    }
    "count unhandled messages" in {
      val count = findSystemMetricsRecorder(system.name).getOrElse(UnhandledMessageCountMetricName, 0.0)
      val trackedActor = system.actorOf(Props[ActorMetricsTestActor])
      trackedActor ! "unhandled"
      eventually(timeout(5 seconds)) {
        findSystemMetricsRecorder(system.name).getOrElse(UnhandledMessageCountMetricName, -1.0) shouldEqual (count + 1.0)
      }
    }
    "count dead letters" in {
      val count = findSystemMetricsRecorder(system.name).getOrElse(DeadLetterCountMetricName, 0.0)
      val trackedActor = system.actorOf(Props[ActorMetricsTestActor])
      system.stop(trackedActor)
      eventually(timeout(5 seconds)) {
        trackedActor ! "dead"
        findSystemMetricsRecorder(system.name).getOrElse(DeadLetterCountMetricName, -1.0) shouldBe > (count)
      }
    }
  }

  def findSystemMetricsRecorder(name: String): Map[String, Double] = {
    val metrics: List[Collector.MetricFamilySamples] =
      ActorSystemMetrics.actorCount.collect().asScala.toList ++
        ActorSystemMetrics.deadLetterCount.collect().asScala.toList ++
        ActorSystemMetrics.unhandledMessageCount.collect().asScala.toList
    val values = for(samples <- metrics;
      sample <- samples.samples.asScala if sample.labelValues.contains(name))
      yield (sample.name, sample.value)
    values.toMap
  }

  def clearSystemMetrics: Unit = {
    ActorSystemMetrics.actorCount.clear()
  }
} 
Example 4
Source File: KafkaStreamSuite.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.kafka

import scala.collection.mutable
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.util.Random

import kafka.serializer.StringDecoder
import org.scalatest.BeforeAndAfterAll
import org.scalatest.concurrent.Eventually

import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.{Milliseconds, StreamingContext}

class KafkaStreamSuite extends SparkFunSuite with Eventually with BeforeAndAfterAll {
  private var ssc: StreamingContext = _
  private var kafkaTestUtils: KafkaTestUtils = _

  override def beforeAll(): Unit = {
    kafkaTestUtils = new KafkaTestUtils
    kafkaTestUtils.setup()
  }

  override def afterAll(): Unit = {
    if (ssc != null) {
      ssc.stop()
      ssc = null
    }

    if (kafkaTestUtils != null) {
      kafkaTestUtils.teardown()
      kafkaTestUtils = null
    }
  }

  test("Kafka input stream") {
    val sparkConf = new SparkConf().setMaster("local[4]").setAppName(this.getClass.getSimpleName)
    ssc = new StreamingContext(sparkConf, Milliseconds(500))
    val topic = "topic1"
    val sent = Map("a" -> 5, "b" -> 3, "c" -> 10)
    kafkaTestUtils.createTopic(topic)
    kafkaTestUtils.sendMessages(topic, sent)

    val kafkaParams = Map("zookeeper.connect" -> kafkaTestUtils.zkAddress,
      "group.id" -> s"test-consumer-${Random.nextInt(10000)}",
      "auto.offset.reset" -> "smallest")

    val stream = KafkaUtils.createStream[String, String, StringDecoder, StringDecoder](
      ssc, kafkaParams, Map(topic -> 1), StorageLevel.MEMORY_ONLY)
    val result = new mutable.HashMap[String, Long]()
    stream.map(_._2).countByValue().foreachRDD { r =>
      r.collect().foreach { kv =>
        result.synchronized {
          val count = result.getOrElseUpdate(kv._1, 0) + kv._2
          result.put(kv._1, count)
        }
      }
    }

    ssc.start()

    eventually(timeout(10000 milliseconds), interval(100 milliseconds)) {
      assert(result.synchronized { sent === result })
    }
  }
} 
Example 5
Source File: FastSyncStateStorageActorSpec.scala    From mantis   with Apache License 2.0 5 votes vote down vote up
package io.iohk.ethereum.blockchain.sync

import akka.actor.ActorSystem
import akka.pattern._
import akka.testkit.TestActorRef
import akka.util.ByteString
import io.iohk.ethereum.NormalPatience
import io.iohk.ethereum.blockchain.sync.FastSync.SyncState
import io.iohk.ethereum.blockchain.sync.FastSyncStateStorageActor.GetStorage
import io.iohk.ethereum.db.dataSource.EphemDataSource
import io.iohk.ethereum.db.storage.FastSyncStateStorage
import io.iohk.ethereum.domain.BlockHeader
import org.scalatest.concurrent.Eventually
import org.scalatest.{AsyncFlatSpec, Matchers}

class FastSyncStateStorageActorSpec extends AsyncFlatSpec with Matchers with Eventually with NormalPatience {

  "FastSyncStateActor" should "eventually persist a newest state of a fast sync" in {

    val dataSource = EphemDataSource()
    implicit val system = ActorSystem("FastSyncStateActorSpec_System")
    val syncStateActor = TestActorRef(new FastSyncStateStorageActor)
    val maxN = 10

    val targetBlockHeader = BlockHeader(ByteString(""), ByteString(""), ByteString(""), ByteString(""), ByteString(""),
      ByteString(""), ByteString(""), 0, 0, 0, 0, 0, ByteString(""), ByteString(""), ByteString(""))
    syncStateActor ! new FastSyncStateStorage(dataSource)
    (0 to maxN).foreach(n => syncStateActor ! SyncState(targetBlockHeader).copy(downloadedNodesCount = n))

    eventually {
      (syncStateActor ? GetStorage).mapTo[Option[SyncState]].map { syncState =>
        val expected = SyncState(targetBlockHeader).copy(downloadedNodesCount = maxN)
        syncState shouldEqual Some(expected)
      }
    }

  }

} 
Example 6
Source File: TodoListTest.scala    From scala-json-rpc   with MIT License 5 votes vote down vote up
package io.github.shogowada.scala.jsonrpc.example.e2e.websocket.integrationtest

import io.github.shogowada.scala.jsonrpc.example.e2e.websocket.ElementIds
import org.openqa.selenium.support.ui.{ExpectedCondition, ExpectedConditions, WebDriverWait}
import org.openqa.selenium.{By, WebDriver}
import org.scalatest.concurrent.Eventually
import org.scalatest.selenium.{Chrome, Firefox}
import org.scalatest.{Matchers, path}

class TodoListTest extends path.FreeSpec
    with Chrome
    with Eventually
    with Matchers {

  def waitFor[T](condition: ExpectedCondition[T])(implicit webDriver: WebDriver): T = {
    new WebDriverWait(webDriver, 3).until[T](condition)
  }

  "given I am on TODO list" - {
    go to Target.url

    waitFor(ExpectedConditions.textToBe(By.id(ElementIds.Ready), "Ready!"))

    clearTodos()

    "when I add TODO item" - {
      val newTodoDescription = "Say hello"

      waitFor(ExpectedConditions.visibilityOfElementLocated(By.id(ElementIds.NewTodoDescription)))
      textField(id(ElementIds.NewTodoDescription)).value = newTodoDescription
      clickOn(id(ElementIds.AddTodo))

      "then it should add the item" in {
        verifyTodoExists(newTodoDescription)
      }

      "and I reload the page" - {
        reloadPage()

        "then it should still show the item" in {
          verifyTodoExists(newTodoDescription)
        }
      }

      "and removed the item" - {
        find(cssSelector("li>button")).foreach(element => clickOn(element))

        "then it should remove the item" in {
          eventually {
            findAll(tagName("li")) shouldBe empty
          }
        }
      }
    }
  }

  def clearTodos(): Unit = {
    findAll(cssSelector("li>button")).foreach(element => clickOn(element))
  }

  def verifyTodoExists(description: String): Unit = {
    eventually {
      findAll(tagName("li")).exists(element => element.text.contains(description)) should equal(true)
    }
  }

  quit()
} 
Example 7
Source File: LoggerTest.scala    From scala-json-rpc   with MIT License 5 votes vote down vote up
package io.github.shogowada.scala.jsonrpc.example.e2e.integrationtest

import io.github.shogowada.scala.jsonrpc.example.e2e.ElementIds
import org.scalatest.concurrent.Eventually
import org.scalatest.selenium.{Chrome, Firefox}
import org.scalatest.{Matchers, path}

class LoggerTest extends path.FreeSpec
    with Matchers
    with Eventually
    with Chrome {

  "given I am on logger page" - {
    go to Target.url

    "when I log something" - {
      val log = "Ah, looks like something happened?"

      textField(ElementIds.LoggerLogText).value = log
      clickOn(ElementIds.LoggerLog)

      "and I get logs" - {
        clickOn(ElementIds.LoggerGetLogs)

        "then it should log the text" in {
          eventually {
            find(ElementIds.LoggerLogs).get.text should equal(log)
          }
        }
      }
    }
  }

  quit()
} 
Example 8
Source File: CalculatorTest.scala    From scala-json-rpc   with MIT License 5 votes vote down vote up
package io.github.shogowada.scala.jsonrpc.example.e2e.integrationtest

import io.github.shogowada.scala.jsonrpc.example.e2e.ElementIds
import org.scalatest.concurrent.Eventually
import org.scalatest.selenium.{Chrome, Firefox}
import org.scalatest.{Matchers, path}

class CalculatorTest extends path.FreeSpec
    with Eventually
    with Matchers
    with Chrome {

  "given I am on the calculator page" - {
    go to Target.url

    "then it should display the page" in {
      eventually {
        find(ElementIds.CalculatorCalculate) shouldBe defined
      }
    }

    "and I entered 2 and 3" - {
      textField(ElementIds.CalculatorLhs).value = "2"
      textField(ElementIds.CalculatorRhs).value = "3"

      "when I clicked on calculate button" - {
        clickOn(ElementIds.CalculatorCalculate)

        "then it should add the numbers" in {
          eventually {
            find(ElementIds.CalculatorAdded).get.text should equal("2 + 3 = 5")
          }
        }

        "then it should subtract the numbers" in {
          eventually {
            find(ElementIds.CalculatorSubtracted).get.text should equal("2 - 3 = -1")
          }
        }
      }
    }
  }

  quit()
} 
Example 9
Source File: EchoTest.scala    From scala-json-rpc   with MIT License 5 votes vote down vote up
package io.github.shogowada.scala.jsonrpc.example.e2e.integrationtest

import io.github.shogowada.scala.jsonrpc.example.e2e.ElementIds
import org.scalatest.concurrent.Eventually
import org.scalatest.selenium.{Chrome, Firefox}
import org.scalatest.{Matchers, path}

class EchoTest extends path.FreeSpec
    with Matchers
    with Eventually
    with Chrome {

  "given I am on the echo page" - {
    go to Target.url

    "when I typed in something" - {
      val text = "Hello, server!"

      textField(ElementIds.EchoText).value = text

      "then it should echo the text" in {
        eventually {
          find(ElementIds.EchoEchoedText).get.text should equal(text)
        }
      }
    }
  }

  quit()
} 
Example 10
Source File: TestSpec.scala    From akka-serialization-test   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend

import akka.actor.{ ActorRef, ActorSystem, PoisonPill }
import akka.event.{ Logging, LoggingAdapter }
import akka.serialization.SerializationExtension
import akka.stream.{ ActorMaterializer, Materializer }
import akka.testkit.TestProbe
import akka.util.Timeout
import org.scalatest.concurrent.{ Eventually, ScalaFutures }
import org.scalatest.prop.PropertyChecks
import org.scalatest.{ BeforeAndAfterAll, FlatSpec, GivenWhenThen, Matchers }

import scala.concurrent.duration._
import scala.concurrent.{ ExecutionContext, Future }
import scala.util.Try

trait TestSpec extends FlatSpec
    with Matchers
    with GivenWhenThen
    with ScalaFutures
    with BeforeAndAfterAll
    with Eventually
    with PropertyChecks
    with AkkaPersistenceQueries
    with AkkaStreamUtils
    with InMemoryCleanup {

  implicit val timeout: Timeout = Timeout(10.seconds)
  implicit val system: ActorSystem = ActorSystem()
  implicit val ec: ExecutionContext = system.dispatcher
  implicit val mat: Materializer = ActorMaterializer()
  implicit val log: LoggingAdapter = Logging(system, this.getClass)
  implicit val pc: PatienceConfig = PatienceConfig(timeout = 50.seconds)
  val serialization = SerializationExtension(system)

  implicit class FutureToTry[T](f: Future[T]) {
    def toTry: Try[T] = Try(f.futureValue)
  }

  def killActors(actors: ActorRef*): Unit = {
    val probe = TestProbe()
    actors.foreach { actor ⇒
      probe watch actor
      actor ! PoisonPill
      probe expectTerminated actor
    }
  }

  override protected def afterAll(): Unit = {
    system.terminate()
    system.whenTerminated.toTry should be a 'success
  }
} 
Example 11
Source File: SharedSparkSessionBase.scala    From spark-alchemy   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.test

import org.apache.spark.sql.internal.StaticSQLConf
import org.apache.spark.sql.{SQLContext, SparkSession}
import org.apache.spark.{DebugFilesystem, SparkConf}
import org.scalatest.Suite
import org.scalatest.concurrent.Eventually

import scala.concurrent.duration._



  protected override def afterAll(): Unit = {
    try {
      super.afterAll()
    } finally {
      try {
        if (_spark != null) {
          try {
            _spark.sessionState.catalog.reset()
          } finally {
            try {
              waitForTasksToFinish()
            } finally {
              _spark.stop()
              _spark = null
            }
          }
        }
      } finally {
        SparkSession.clearActiveSession()
        SparkSession.clearDefaultSession()
      }
    }
  }

  protected override def beforeEach(): Unit = {
    super.beforeEach()
    DebugFilesystem.clearOpenStreams()
  }

  protected override def afterEach(): Unit = {
    super.afterEach()
    // Clear all persistent datasets after each test
    spark.sharedState.cacheManager.clearCache()
    // files can be closed from other threads, so wait a bit
    // normally this doesn't take more than 1s
    eventually(timeout(30.seconds), interval(2.seconds)) {
      DebugFilesystem.assertNoOpenStreams()
    }
  }
} 
Example 12
Source File: MatcherSuiteBase.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.it

import java.nio.charset.StandardCharsets
import java.util.concurrent.ThreadLocalRandom

import cats.instances.FutureInstances
import com.wavesplatform.dex.asset.DoubleOps
import com.wavesplatform.dex.domain.account.KeyPair
import com.wavesplatform.dex.domain.asset.Asset
import com.wavesplatform.dex.domain.bytes.ByteStr
import com.wavesplatform.dex.domain.utils.ScorexLogging
import com.wavesplatform.dex.it.api.BaseContainersKit
import com.wavesplatform.dex.it.api.node.HasWavesNode
import com.wavesplatform.dex.it.config.{GenesisConfig, PredefinedAccounts, PredefinedAssets}
import com.wavesplatform.dex.it.dex.HasDex
import com.wavesplatform.dex.it.matchers.ItMatchers
import com.wavesplatform.dex.it.test.InformativeTestStart
import com.wavesplatform.dex.it.waves.{MkWavesEntities, ToWavesJConversions}
import com.wavesplatform.dex.test.matchers.DiffMatcherWithImplicits
import com.wavesplatform.dex.waves.WavesFeeConstants
import com.wavesplatform.it.api.ApiExtensions
import org.scalatest.concurrent.Eventually
import org.scalatest.freespec.AnyFreeSpec
import org.scalatest.matchers.should.Matchers
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, CancelAfterFailure}

import scala.concurrent.duration.DurationInt

trait MatcherSuiteBase
    extends AnyFreeSpec
    with Matchers
    with CancelAfterFailure
    with BeforeAndAfterAll
    with BeforeAndAfterEach
    with Eventually
    with BaseContainersKit
    with HasDex
    with HasWavesNode
    with MkWavesEntities
    with ApiExtensions
    with ItMatchers
    with DoubleOps
    with WavesFeeConstants
    with PredefinedAssets
    with PredefinedAccounts
    with DiffMatcherWithImplicits
    with InformativeTestStart
    with FutureInstances
    with ToWavesJConversions
    with ScorexLogging {

  GenesisConfig.setupAddressScheme()

  override protected val moduleName: String = "dex-it"

  override implicit def patienceConfig: PatienceConfig = super.patienceConfig.copy(timeout = 30.seconds, interval = 1.second)

  override protected def beforeAll(): Unit = {
    log.debug(s"Perform beforeAll")
    kafkaServer.foreach { _ =>
      createKafkaTopic(dexRunConfig.getString("waves.dex.events-queue.kafka.topic"))
    }
    wavesNode1.start()
    dex1.start()
  }

  override protected def afterAll(): Unit = {
    log.debug(s"Perform afterAll")
    stopBaseContainers()
    super.afterAll()
  }

  def createAccountWithBalance(balances: (Long, Asset)*): KeyPair = {
    val account = KeyPair(ByteStr(s"account-test-${ThreadLocalRandom.current().nextInt()}".getBytes(StandardCharsets.UTF_8)))

    balances.foreach {
      case (balance, asset) =>
        assert(
          wavesNode1.api.balance(alice, asset) >= balance,
          s"Alice doesn't have enough balance in ${asset.toString} to make a transfer"
        )
        broadcastAndAwait(mkTransfer(alice, account.toAddress, balance, asset))
    }
    account
  }
} 
Example 13
Source File: IntegrationSuiteBase.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.grpc.integration

import com.wavesplatform.dex.asset.DoubleOps
import com.wavesplatform.dex.domain.utils.ScorexLogging
import com.wavesplatform.dex.it.api.BaseContainersKit
import com.wavesplatform.dex.it.api.node.{HasWavesNode, NodeApiExtensions}
import com.wavesplatform.dex.it.config.{GenesisConfig, PredefinedAccounts, PredefinedAssets}
import com.wavesplatform.dex.it.test.InformativeTestStart
import com.wavesplatform.dex.it.waves.{MkWavesEntities, ToWavesJConversions}
import com.wavesplatform.dex.test.matchers.DiffMatcherWithImplicits
import com.wavesplatform.dex.waves.WavesFeeConstants
import org.scalatest.concurrent.Eventually
import org.scalatest.freespec.AnyFreeSpec
import org.scalatest.matchers.should.Matchers
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}

import scala.concurrent.duration.DurationInt

trait IntegrationSuiteBase
    extends AnyFreeSpec
    with Matchers
    with BeforeAndAfterAll
    with BeforeAndAfterEach
    with Eventually
    with BaseContainersKit
    with HasWavesNode
    with MkWavesEntities
    with WavesFeeConstants
    with NodeApiExtensions
    with PredefinedAssets
    with PredefinedAccounts
    with DoubleOps
    with DiffMatcherWithImplicits
    with InformativeTestStart
    with ToWavesJConversions
    with ScorexLogging {

  GenesisConfig.setupAddressScheme()

  override protected val moduleName: String = "waves-integration-it"

  override implicit def patienceConfig: PatienceConfig = super.patienceConfig.copy(timeout = 30.seconds, interval = 1.second)

  override protected def beforeAll(): Unit = {
    log.debug(s"Perform beforeAll")
    wavesNode1.start()
  }

  override protected def afterAll(): Unit = {
    log.debug(s"Perform afterAll")
    stopBaseContainers()
    super.afterAll()
  }
} 
Example 14
Source File: HasWebSockets.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.it.api.websockets

import java.lang
import java.util.concurrent.ConcurrentHashMap

import akka.actor.ActorSystem
import akka.stream.Materializer
import com.wavesplatform.dex.api.ws.connection.{WsConnection, WsConnectionOps}
import com.wavesplatform.dex.api.ws.entities.{WsBalances, WsOrder}
import com.wavesplatform.dex.api.ws.protocol.{WsAddressSubscribe, WsInitial, WsOrderBookSubscribe}
import com.wavesplatform.dex.domain.account.KeyPair
import com.wavesplatform.dex.domain.asset.{Asset, AssetPair}
import com.wavesplatform.dex.error.ErrorFormatterContext
import com.wavesplatform.dex.it.config.PredefinedAssets
import com.wavesplatform.dex.it.docker.DexContainer
import com.wavesplatform.dex.test.matchers.DiffMatcherWithImplicits
import mouse.any._
import org.scalatest.concurrent.Eventually
import org.scalatest.matchers.should.Matchers
import org.scalatest.{BeforeAndAfterAll, Suite}

import scala.concurrent.duration._

trait HasWebSockets extends BeforeAndAfterAll with HasJwt with WsConnectionOps with WsMessageOps {
  _: Suite with Eventually with Matchers with DiffMatcherWithImplicits with PredefinedAssets =>

  implicit protected val system: ActorSystem        = ActorSystem()
  implicit protected val materializer: Materializer = Materializer.matFromSystem(system)
  implicit protected val efc: ErrorFormatterContext = assetDecimalsMap.apply

  protected def getWsStreamUri(dex: DexContainer): String = s"ws://127.0.0.1:${dex.restApiAddress.getPort}/ws/v0"

  protected val knownWsConnections: ConcurrentHashMap.KeySetView[WsConnection, lang.Boolean] =
    ConcurrentHashMap.newKeySet[WsConnection]()

  protected def addConnection(connection: WsConnection): Unit = knownWsConnections.add(connection)

  protected def mkWsAddressConnection(client: KeyPair,
                                      dex: DexContainer,
                                      keepAlive: Boolean = true,
                                      subscriptionLifetime: FiniteDuration = 1.hour): WsConnection = {
    val jwt        = mkJwt(client, lifetime = subscriptionLifetime)
    val connection = mkDexWsConnection(dex, keepAlive)
    connection.send(WsAddressSubscribe(client.toAddress, WsAddressSubscribe.defaultAuthType, jwt))
    connection
  }

  protected def mkWsOrderBookConnection(assetPair: AssetPair, dex: DexContainer, depth: Int = 1): WsConnection = {
    val connection = mkDexWsConnection(dex)
    connection.send(WsOrderBookSubscribe(assetPair, depth))
    connection
  }

  protected def mkWsInternalConnection(dex: DexContainer, keepAlive: Boolean = true): WsConnection =
    mkWsConnection(s"${getWsStreamUri(dex)}/internal", keepAlive)

  protected def mkDexWsConnection(dex: DexContainer, keepAlive: Boolean = true): WsConnection =
    mkWsConnection(getWsStreamUri(dex), keepAlive)

  protected def mkWsConnection(uri: String, keepAlive: Boolean = true): WsConnection = {
    new WsConnection(uri, keepAlive) unsafeTap { wsc =>
      addConnection(wsc)
      eventually { wsc.collectMessages[WsInitial] should have size 1 }
      wsc.clearMessages()
    }
  }

  protected def assertChanges(c: WsConnection, squash: Boolean = true)(expBs: Map[Asset, WsBalances]*)(expOs: WsOrder*): Unit = {
    eventually {
      if (squash) {
        c.balanceChanges.size should be <= expBs.size
        c.balanceChanges.squashed should matchTo { expBs.toList.squashed }
        c.orderChanges.size should be <= expOs.size
        c.orderChanges.squashed should matchTo { expOs.toList.squashed }
      } else {
        c.balanceChanges should matchTo(expBs)
        c.orderChanges should matchTo(expOs)
      }
    }

    c.clearMessages()
  }

  protected def cleanupWebSockets(): Unit = {
    if (!knownWsConnections.isEmpty) {
      knownWsConnections.forEach { _.close() }
      materializer.shutdown()
    }
  }

  override def afterAll(): Unit = {
    super.afterAll()
    cleanupWebSockets()
  }
} 
Example 15
Source File: TestSpec.scala    From intro-to-akka-streams   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend.streams

import akka.NotUsed
import akka.actor.{ ActorRef, ActorSystem, PoisonPill }
import akka.event.{ Logging, LoggingAdapter }
import akka.stream.Materializer
import akka.stream.scaladsl.Source
import akka.stream.testkit.TestSubscriber
import akka.stream.testkit.scaladsl.TestSink
import akka.testkit.TestProbe
import akka.util.Timeout
import com.github.dnvriend.streams.util.ClasspathResources
import org.scalatest._
import org.scalatest.concurrent.{ Eventually, ScalaFutures }
import org.scalatestplus.play.guice.GuiceOneServerPerSuite
import play.api.inject.BindingKey
import play.api.libs.json.{ Format, Json }
import play.api.test.WsTestClient

import scala.collection.immutable._
import scala.concurrent.duration._
import scala.concurrent.{ ExecutionContext, Future }
import scala.reflect.ClassTag
import scala.util.Try

object Person {
  implicit val format: Format[Person] = Json.format[Person]
}

final case class Person(firstName: String, age: Int)

class TestSpec extends FlatSpec
    with Matchers
    with GivenWhenThen
    with OptionValues
    with TryValues
    with ScalaFutures
    with WsTestClient
    with BeforeAndAfterAll
    with BeforeAndAfterEach
    with Eventually
    with ClasspathResources
    with GuiceOneServerPerSuite {

  def getComponent[A: ClassTag] = app.injector.instanceOf[A]
  def getNamedComponent[A](name: String)(implicit ct: ClassTag[A]): A =
    app.injector.instanceOf[A](BindingKey(ct.runtimeClass.asInstanceOf[Class[A]]).qualifiedWith(name))

  // set the port number of the HTTP server
  override lazy val port: Int = 8081
  implicit val timeout: Timeout = 1.second
  implicit val pc: PatienceConfig = PatienceConfig(timeout = 30.seconds, interval = 300.millis)
  implicit val system: ActorSystem = getComponent[ActorSystem]
  implicit val ec: ExecutionContext = getComponent[ExecutionContext]
  implicit val mat: Materializer = getComponent[Materializer]
  val log: LoggingAdapter = Logging(system, this.getClass)

  // ================================== Supporting Operations ====================================
  def id: String = java.util.UUID.randomUUID().toString

  implicit class FutureToTry[T](f: Future[T]) {
    def toTry: Try[T] = Try(f.futureValue)
  }

  implicit class SourceOps[A](src: Source[A, NotUsed]) {
    def testProbe(f: TestSubscriber.Probe[A] ⇒ Unit): Unit =
      f(src.runWith(TestSink.probe(system)))
  }

  def withIterator[T](start: Int = 0)(f: Source[Int, NotUsed] ⇒ T): T =
    f(Source.fromIterator(() ⇒ Iterator from start))

  def fromCollection[A](xs: Iterable[A])(f: TestSubscriber.Probe[A] ⇒ Unit): Unit =
    f(Source(xs).runWith(TestSink.probe(system)))

  def killActors(refs: ActorRef*): Unit = {
    val tp = TestProbe()
    refs.foreach { ref ⇒
      tp watch ref
      tp.send(ref, PoisonPill)
      tp.expectTerminated(ref)
    }
  }
} 
Example 16
Source File: KafkaConfiguratorIntSpec.scala    From kafka-configurator   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package com.sky.kafka.configurator

import common.KafkaIntSpec
import kafka.admin.AdminUtils
import org.scalatest.concurrent.Eventually

import scala.util.Success

class KafkaConfiguratorIntSpec extends KafkaIntSpec with Eventually {

  "KafkaConfigurator" should "create new topics in Kafka from multiple input files" in {
    val topics = List("topic1", "topic2", "topic3")

    topics.map(AdminUtils.topicExists(zkUtils, _) shouldBe false)

    Main.run(testArgs(Seq("/topic-configuration.yml", "/topic-configuration-2.yml")), Map.empty) shouldBe a[Success[_]]

    eventually {
      withClue("Topic exists: ") {
        topics.map(AdminUtils.topicExists(zkUtils, _) shouldBe true)
      }
    }
  }

  it should "still configure all topics when one fails" in {
    val correctTopics = List("correctConfig1", "correctConfig2")
    val errorTopic = "errorConfig"

    (correctTopics :+ errorTopic).map(AdminUtils.topicExists(zkUtils, _) shouldBe false)

    Main.run(testArgs(Seq("/topic-configuration-with-error.yml")), Map.empty) shouldBe a[Success[_]]

    eventually {
      withClue("Topic exists: ") {
        correctTopics.map(AdminUtils.topicExists(zkUtils, _) shouldBe true)
      }
      withClue("Topic doesn't exist: ") {
        AdminUtils.topicExists(zkUtils, errorTopic) shouldBe false
      }
    }
  }

  it should "configure topics from correct files if another input file is empty" in {
    val topic = "topic4"

    AdminUtils.topicExists(zkUtils, topic) shouldBe false

    Main.run(testArgs(Seq("/topic-configuration-3.yml", "/no-topics.yml")), Map.empty) shouldBe a[Success[_]]

    eventually {
      withClue("Topic exists: ") {
        AdminUtils.topicExists(zkUtils, topic) shouldBe true
      }
    }
  }

  private def testArgs(filePaths: Seq[String]): Array[String] =
    Array(
      "-f", filePaths.map(path => getClass.getResource(path).getPath).mkString(","),
      "--bootstrap-servers", s"localhost:${kafkaServer.kafkaPort}"
    )
} 
Example 17
Source File: SKRSpec.scala    From spark-kafka-writer   with Apache License 2.0 5 votes vote down vote up
package com.github.benfradet.spark.kafka.writer

import java.util.concurrent.atomic.AtomicInteger

import org.apache.kafka.common.serialization.{StringDeserializer, StringSerializer}
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.scalatest.concurrent.Eventually
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}

import scala.collection.mutable.ArrayBuffer
import scala.util.Random
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec

case class Foo(a: Int, b: String)

trait SKRSpec
  extends AnyWordSpec
  with Matchers
  with BeforeAndAfterEach
  with BeforeAndAfterAll
  with Eventually {

  val sparkConf = new SparkConf()
    .setMaster("local[1]")
    .setAppName(getClass.getSimpleName)

  var ktu: KafkaTestUtils = _
  override def beforeAll(): Unit = {
    ktu = new KafkaTestUtils
    ktu.setup()
  }
  override def afterAll(): Unit = {
    SKRSpec.callbackTriggerCount.set(0)
    if (ktu != null) {
      ktu.tearDown()
      ktu = null
    }
  }

  var topic: String = _
  var ssc: StreamingContext = _
  var spark: SparkSession = _
  override def afterEach(): Unit = {
    if (ssc != null) {
      ssc.stop()
      ssc = null
    }
    if (spark != null) {
      spark.stop()
      spark = null
    }
  }
  override def beforeEach(): Unit = {
    ssc = new StreamingContext(sparkConf, Seconds(1))
    spark = SparkSession.builder
      .config(sparkConf)
      .getOrCreate()
    topic = s"topic-${Random.nextInt()}"
    ktu.createTopics(topic)
  }

  def collect(ssc: StreamingContext, topic: String): ArrayBuffer[String] = {
    val kafkaParams = Map(
      "bootstrap.servers" -> ktu.brokerAddress,
      "auto.offset.reset" -> "earliest",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> "test-collect"
    )
    val results = new ArrayBuffer[String]
    KafkaUtils.createDirectStream[String, String](
      ssc,
      LocationStrategies.PreferConsistent,
      ConsumerStrategies.Subscribe[String, String](Set(topic), kafkaParams)
    ).map(_.value())
      .foreachRDD { rdd =>
        results ++= rdd.collect()
        ()
      }
    results
  }

  val producerConfig = Map(
    "bootstrap.servers" -> "127.0.0.1:9092",
    "key.serializer" -> classOf[StringSerializer].getName,
    "value.serializer" -> classOf[StringSerializer].getName
  )
}

object SKRSpec {
  val callbackTriggerCount = new AtomicInteger()
} 
Example 18
Source File: BaseTest.scala    From scalajs-reactjs   with MIT License 5 votes vote down vote up
package io.github.shogowada.scalajs.reactjs.example

import org.openqa.selenium.UnexpectedAlertBehaviour
import org.openqa.selenium.chrome.ChromeDriver
import org.openqa.selenium.remote.{CapabilityType, DesiredCapabilities}
import org.scalatest.concurrent.Eventually
import org.scalatest.selenium.{Driver, WebBrowser}
import org.scalatest.{Matchers, path}

object BaseTest {
  val webDriver = {
    val capabilities = new DesiredCapabilities()
    capabilities.setCapability(CapabilityType.UNEXPECTED_ALERT_BEHAVIOUR, UnexpectedAlertBehaviour.IGNORE)
    new ChromeDriver(capabilities)
  }

  Runtime.getRuntime.addShutdownHook(new Thread(() => webDriver.quit()))
}

trait BaseTest extends path.FreeSpec
    with WebBrowser with Driver
    with Matchers
    with Eventually {
  override implicit val webDriver = BaseTest.webDriver
} 
Example 19
Source File: BaseSpec.scala    From process   with Apache License 2.0 5 votes vote down vote up
package processframework

import akka.actor.ActorSystem

import org.scalatest._
import org.scalatest.concurrent.Eventually

import akka.testkit.{ ImplicitSender, TestKit }

abstract class BaseSpec extends TestKit(ActorSystem(getClass.getSimpleName.stripSuffix("$")))
    with WordSpecLike
    with Suite
    with Matchers
    with BeforeAndAfterAll
    with BeforeAndAfterEach
    with ImplicitSender
    with Eventually {

  override def afterAll() {
    TestKit.shutdownActorSystem(system)
  }
} 
Example 20
Source File: ProcessTest.scala    From process   with Apache License 2.0 5 votes vote down vote up
package processframework

import java.lang

import akka.actor.{ ActorContext, ActorRef, ActorSystem, Props }
import akka.testkit.{ ImplicitSender, TestKit, TestProbe }
import org.scalatest._
import org.scalatest.concurrent.Eventually

import scala.concurrent.duration._

object ProcessTest {
  case object Start
  case object Response
  case class Command(i: Int)
  case object Completed extends Process.Event

  class MockStep(service: ActorRef, retryInt: Duration)(implicit val context: ActorContext) extends ProcessStep[Int] {
    override val retryInterval = retryInt
    def execute()(implicit process: akka.actor.ActorRef) = { state ⇒
      service ! Command(state)
    }
    def receiveCommand = {
      case Response ⇒
        Completed
    }
    def updateState = {
      case Completed ⇒ state ⇒ markDone(state + 1)
    }
  }

  class Process1(service: ActorRef, retryInterval: Duration) extends Process[Int] {
    import context.dispatcher
    var state = 0
    val process = new MockStep(service, retryInterval)

    def receive = {
      case Start ⇒
        process.run()
    }
  }
}

class ProcessTest extends BaseSpec {
  import ProcessTest._

  "Process" should {
    "have a happy flow" in {
      val service = TestProbe()
      val process = system.actorOf(Props(new Process1(service.ref, Duration.Inf)), "Process1")
      process ! processframework.Process.GetState
      expectMsg(0)
      process ! Start

      service.expectMsg(Command(0))
      service.reply(Response)

      eventually {
        process ! processframework.Process.GetState
        expectMsg(1)
      }
      process ! Start
      expectNoMsg(250 millis)
      process ! processframework.Process.GetState
      expectMsg(1)
    }

    "does not retry by default" in {
      val service = TestProbe()
      val process = system.actorOf(Props(new Process1(service.ref, Duration.Inf)), "Process2")
      process ! processframework.Process.GetState
      expectMsg(0)
      process ! Start

      service.expectMsg(Command(0))
      expectNoMsg()
    }

    "retries execution until succeeded" in {
      val service = TestProbe()
      val process = system.actorOf(Props(new Process1(service.ref, 150 millis)), "Process3")
      process ! processframework.Process.GetState
      expectMsg(0)
      process ! Start

      service.expectMsg(Command(0))
      service.expectMsg(1000.millis, Command(0))
      service.expectMsg(1000.millis, Command(0))
      service.reply(Response)
      expectNoMsg()
    }
  }
} 
Example 21
Source File: KubernetesTestComponents.scala    From spark-integration   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.deploy.k8s.integrationtest

import java.nio.file.{Path, Paths}
import java.util.UUID

import scala.collection.mutable
import scala.collection.JavaConverters._
import io.fabric8.kubernetes.client.DefaultKubernetesClient
import org.scalatest.concurrent.Eventually

private[spark] class KubernetesTestComponents(defaultClient: DefaultKubernetesClient) {

  val namespaceOption = Option(System.getProperty("spark.kubernetes.test.namespace"))
  val hasUserSpecifiedNamespace = namespaceOption.isDefined
  val namespace = namespaceOption.getOrElse(UUID.randomUUID().toString.replaceAll("-", ""))
  private val serviceAccountName =
    Option(System.getProperty("spark.kubernetes.test.serviceAccountName"))
      .getOrElse("default")
  val kubernetesClient = defaultClient.inNamespace(namespace)
  val clientConfig = kubernetesClient.getConfiguration

  def createNamespace(): Unit = {
    defaultClient.namespaces.createNew()
      .withNewMetadata()
      .withName(namespace)
      .endMetadata()
      .done()
  }

  def deleteNamespace(): Unit = {
    defaultClient.namespaces.withName(namespace).delete()
    Eventually.eventually(KubernetesSuite.TIMEOUT, KubernetesSuite.INTERVAL) {
      val namespaceList = defaultClient
        .namespaces()
        .list()
        .getItems
        .asScala
      require(!namespaceList.exists(_.getMetadata.getName == namespace))
    }
  }

  def newSparkAppConf(): SparkAppConf = {
    new SparkAppConf()
      .set("spark.master", s"k8s://${kubernetesClient.getMasterUrl}")
      .set("spark.kubernetes.namespace", namespace)
      .set("spark.executor.memory", "500m")
      .set("spark.executor.cores", "1")
      .set("spark.executors.instances", "1")
      .set("spark.app.name", "spark-test-app")
      .set("spark.ui.enabled", "true")
      .set("spark.testing", "false")
      .set("spark.kubernetes.submission.waitAppCompletion", "false")
      .set("spark.kubernetes.authenticate.driver.serviceAccountName", serviceAccountName)
  }
}

private[spark] class SparkAppConf {

  private val map = mutable.Map[String, String]()

  def set(key: String, value: String): SparkAppConf = {
    map.put(key, value)
    this
  }

  def get(key: String): String = map.getOrElse(key, "")

  def setJars(jars: Seq[String]): Unit = set("spark.jars", jars.mkString(","))

  override def toString: String = map.toString

  def toStringArray: Iterable[String] = map.toList.flatMap(t => List("--conf", s"${t._1}=${t._2}"))
}

private[spark] case class SparkAppArguments(
    mainAppResource: String,
    mainClass: String,
    appArgs: Array[String])

private[spark] object SparkAppLauncher extends Logging {

  def launch(
      appArguments: SparkAppArguments,
      appConf: SparkAppConf,
      timeoutSecs: Int,
      sparkHomeDir: Path): Unit = {
    val sparkSubmitExecutable = sparkHomeDir.resolve(Paths.get("bin", "spark-submit"))
    logInfo(s"Launching a spark app with arguments $appArguments and conf $appConf")
    val commandLine = mutable.ArrayBuffer(sparkSubmitExecutable.toFile.getAbsolutePath,
      "--deploy-mode", "cluster",
      "--class", appArguments.mainClass,
      "--master", appConf.get("spark.master")
    ) ++ appConf.toStringArray :+
      appArguments.mainAppResource
    if (appArguments.appArgs.nonEmpty) {
      commandLine += appArguments.appArgs.mkString(" ")
    }
    logInfo(s"Launching a spark app with command line: ${commandLine.mkString(" ")}")
    ProcessUtils.executeProcess(commandLine.toArray, timeoutSecs)
  }
} 
Example 22
Source File: IngestorRegistrarSpec.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.ingest.services

import java.util.concurrent.TimeUnit

import akka.actor.{ActorSystem, Props}
import akka.pattern.ask
import akka.testkit.{ImplicitSender, TestKit}
import akka.util.Timeout
import hydra.common.util.ActorUtils
import hydra.ingest.services.IngestorRegistrar.UnregisterAll
import hydra.ingest.services.IngestorRegistry.{
  FindAll,
  FindByName,
  LookupResult
}
import hydra.ingest.test.TestIngestor
import org.scalatest.concurrent.{Eventually, ScalaFutures}
import org.scalatest.time.{Seconds, Span}
import org.scalatest.matchers.should.Matchers
import org.scalatest.funspec.AnyFunSpecLike
import org.scalatest.BeforeAndAfterAll

import scala.concurrent.duration._


class IngestorRegistrarSpec
    extends TestKit(ActorSystem("IngestorRegistrarSpec"))
    with Matchers
    with AnyFunSpecLike
    with ImplicitSender
    with ScalaFutures
    with BeforeAndAfterAll
    with Eventually {

  override def afterAll =
    TestKit.shutdownActorSystem(system, verifySystemShutdown = true)

  implicit override val patienceConfig =
    PatienceConfig(timeout = Span(10, Seconds), interval = Span(1, Seconds))

  val registry = system.actorOf(Props[IngestorRegistry], "ingestor_registry")

  val act = system.actorOf(Props[IngestorRegistrar])

  implicit val timeout = Timeout(3, TimeUnit.SECONDS)

  describe("The ingestor registrar actor") {
    it("registers from classpath on bootstrap") {
      eventually {
        whenReady(
          (registry ? FindByName(ActorUtils.actorName(classOf[TestIngestor])))
            .mapTo[LookupResult]
        ) { i =>
          i.ingestors.size shouldBe 1
          i.ingestors(0).name shouldBe ActorUtils.actorName(
            classOf[TestIngestor]
          )
        }
      }
    }

    it("unregisters") {
      act ! UnregisterAll
      eventually {
        whenReady((registry ? FindAll).mapTo[LookupResult]) { i =>
          i.ingestors.size shouldBe 0
        }
      }
    }
  }
} 
Example 23
Source File: HydraMetricsSpec.scala    From hydra   with Apache License 2.0 5 votes vote down vote up
package hydra.core.monitor

import akka.japi.Option.Some
import kamon.Kamon
import kamon.metric.{Counter, Gauge}
import org.scalamock.scalatest.proxy.MockFactory
import org.scalatest.{BeforeAndAfterAll, _}
import org.scalatest.concurrent.{Eventually, ScalaFutures}
import org.scalatest.flatspec.AnyFlatSpecLike
import org.scalatest.matchers.should.Matchers
import org.scalatest.time.{Millis, Seconds, Span}
import scalacache.guava.GuavaCache

import scala.concurrent.ExecutionContext.Implicits.global
import scala.util.{Random, Try}

class HydraMetricsSpec
    extends Matchers
    with AnyFlatSpecLike
    with Eventually
    with BeforeAndAfterAll
    with BeforeAndAfterEach
    with MockFactory
    with ScalaFutures {

  import HydraMetrics._
  import scalacache.modes.try_._

  implicit override val patienceConfig =
    PatienceConfig(
      timeout = scaled(Span(2, Seconds)),
      interval = scaled(Span(5, Millis))
    )

  override def beforeEach() = {
    gaugesCache.removeAll()
    countersCache.removeAll()
    histogramsCache.removeAll()
  }

  override def afterAll = Try(Kamon.stopModules())

  val lookup = "lookup.xyz"
  val lookup2 = "lookup.abc"

  def generateTags: Seq[(String, String)] = Seq("tag1" -> "Everything's fine.")

  "An object mixing in HydraMetrics" should
    "create new counters with new lookup keys + metric names" in {
    shouldCreateNewMetric[Counter](incrementCounter _, countersCache)
  }

  it should
    "create new gauges with new lookup keys + metric names" in {
    shouldCreateNewMetric[Gauge](incrementGauge _, gaugesCache)
  }

  it should "lookup existing counters" in {
    shouldLookupExistingMetric[Counter](incrementCounter _, countersCache)
  }

  it should
    "lookup an existing gauge" in {
    shouldLookupExistingMetric[Gauge](decrementGauge _, gaugesCache)
  }

  it should
    "lookup an existing histogram" in {
    val f = recordToHistogram _

    whenReady(f(lookup, "histogram.metric", 100, generateTags)) { r =>
      whenReady(f(lookup, "histogram.metric", 100, generateTags)) { x =>
        r shouldEqual x
      }
    }
  }

  private def shouldCreateNewMetric[A](
      f: (String, String, => Seq[(String, String)]) => Unit,
      cache: GuavaCache[A]
  ) = {
    cache.get(lookup).map { result => result shouldBe None }

    f(lookup, "metric" + Random.nextInt(Integer.MAX_VALUE), generateTags)

    cache.get(lookup).map { result => result shouldBe a[Some[_]] }
  }

  private def shouldLookupExistingMetric[A](
      f: (String, String, => Seq[(String, String)]) => Unit,
      cache: GuavaCache[A]
  ) = {
    val metric = "metric" + Random.nextInt(Integer.MAX_VALUE)

    f(lookup, metric, generateTags) shouldEqual f(lookup, metric, generateTags)
  }
} 
Example 24
Source File: KinesisProducerIntegrationSpec.scala    From reactive-kinesis   with Apache License 2.0 5 votes vote down vote up
package com.weightwatchers.reactive.kinesis

import java.io.File

import com.amazonaws.services.kinesis.producer.{KinesisProducer => AWSKinesisProducer}
import com.typesafe.config.ConfigFactory
import com.weightwatchers.reactive.kinesis.common.{
  KinesisSuite,
  KinesisTestConsumer,
  TestCredentials
}
import com.weightwatchers.reactive.kinesis.consumer.KinesisConsumer.ConsumerConf
import com.weightwatchers.reactive.kinesis.models.ProducerEvent
import com.weightwatchers.reactive.kinesis.producer.{KinesisProducer, ProducerConf}
import org.scalatest.concurrent.Eventually
import org.scalatest.mockito.MockitoSugar
import org.scalatest.time.{Millis, Seconds, Span}
import org.scalatest.{BeforeAndAfterAll, FreeSpec, Matchers}

import scala.concurrent.duration._
import scala.language.postfixOps
import scala.util.Random

//scalastyle:off magic.number
class KinesisProducerIntegrationSpec
    extends FreeSpec
    with Matchers
    with MockitoSugar
    with BeforeAndAfterAll
    with Eventually
    with KinesisSuite {

  implicit val ece = scala.concurrent.ExecutionContext.global

  val TestStreamNrOfMessagesPerShard: Long = 0

  implicit override val patienceConfig: PatienceConfig =
    PatienceConfig(timeout = Span(5, Seconds), interval = Span(100, Millis))

  "The KinesisProducer" - {

    "Should publish a message to a stream" in new withKinesisConfForApp(
      "int-test-stream-producer-1"
    ) {

      val conf     = producerConf()
      val producer = KinesisProducer(conf)

      val existingRecordCount = testConsumer.retrieveRecords(conf.streamName, 10).size

      val event = ProducerEvent("1234", Random.alphanumeric.take(10).mkString)
      producer.addUserRecord(event)

      eventually {
        val records: Seq[String] = testConsumer.retrieveRecords(conf.streamName, 10)
        records.size shouldBe (existingRecordCount + 1)
        records should contain(
          new String(event.payload.array(), java.nio.charset.StandardCharsets.UTF_8)
        )
      }
    }
  }
}

//scalastyle:on 
Example 25
Source File: ClickhouseIndexingSubscriberTest.scala    From clickhouse-scala-client   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.crobox.clickhouse.stream

import akka.stream.scaladsl._
import com.crobox.clickhouse.{ClickhouseClient, ClickhouseClientAsyncSpec}
import org.scalatest.concurrent.{Eventually, ScalaFutures}

import scala.concurrent.duration._
import scala.concurrent.{Await, Future, Promise}
import scala.util.{Random, Try}

class ClickhouseIndexingSubscriberTest extends ClickhouseClientAsyncSpec with ScalaFutures with Eventually {

  import system.dispatcher

  val client: ClickhouseClient = new ClickhouseClient(Some(config))

  var subscriberCompletes: Promise[Unit] = Promise[Unit]

  val createDb    = "CREATE DATABASE IF NOT EXISTS test"
  val dropDb      = "DROP DATABASE IF EXISTS test"
  val createTable = """CREATE TABLE test.insert
                      |(
                      |    i UInt64,
                      |    s String,
                      |    a Array(UInt32)
                      |) ENGINE = Memory""".stripMargin

  override protected def beforeEach(): Unit = {
    super.beforeAll()

    Await.ready(for {
      _      <- client.execute(createDb)
      create <- client.execute(createTable)
    } yield create, timeout.duration)

    subscriberCompletes = Promise[Unit]
  }

  override protected def afterEach(): Unit = {
    super.afterEach()

    Await.ready(client.execute(dropDb), timeout.duration)
  }

  def unparsedInserts(key: String): Seq[Map[String, Any]] = (1 to 10).map(
    _ =>
      Map(
        "i" -> Random.nextInt(100),
        "s" -> key,
        "a" -> (1 to Random.nextInt(20)).map(_ => Random.nextInt(200))
    )
  )

  def parsedInserts(key: String) = unparsedInserts(key).map(
    _.mapValues({
      case value: Int           => value.toString
      case value: String        => "\"" + value + "\""
      case value: IndexedSeq[_] => "[" + value.mkString(", ") + "]"
    }).map { case (k, v) => s""""$k" : $v""" }
      .mkString(", ")
  )

  it should "index items" in {
    val inserts = parsedInserts("two")
    val res = Source
      .fromIterator(() => inserts.toIterator)
      .map(data => Insert("test.insert", "{" + data + "}"))
      .runWith(ClickhouseSink.insertSink(config, client, Some("no-overrides")))
    Await.ready(res, 5.seconds)
    checkRowCount("two").map(_ shouldBe inserts.size)
  }

  private def checkRowCount(key: String): Future[Int] =
    client
      .query(s"SELECT count(*) FROM test.insert WHERE s = '$key'")
      .map(res => Try(res.stripLineEnd.toInt).getOrElse(0))
} 
Example 26
Source File: SharedSparkSession.scala    From XSQL   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.test

import scala.concurrent.duration._

import org.scalatest.{BeforeAndAfterEach, Suite}
import org.scalatest.concurrent.Eventually

import org.apache.spark.{DebugFilesystem, SparkConf}
import org.apache.spark.sql.{SparkSession, SQLContext}
import org.apache.spark.sql.catalyst.optimizer.ConvertToLocalRelation
import org.apache.spark.sql.internal.SQLConf


  protected override def afterAll(): Unit = {
    try {
      super.afterAll()
    } finally {
      try {
        if (_spark != null) {
          try {
            _spark.sessionState.catalog.reset()
          } finally {
            _spark.stop()
            _spark = null
          }
        }
      } finally {
        SparkSession.clearActiveSession()
        SparkSession.clearDefaultSession()
      }
    }
  }

  protected override def beforeEach(): Unit = {
    super.beforeEach()
    DebugFilesystem.clearOpenStreams()
  }

  protected override def afterEach(): Unit = {
    super.afterEach()
    // Clear all persistent datasets after each test
    spark.sharedState.cacheManager.clearCache()
    // files can be closed from other threads, so wait a bit
    // normally this doesn't take more than 1s
    eventually(timeout(10.seconds), interval(2.seconds)) {
      DebugFilesystem.assertNoOpenStreams()
    }
  }
} 
Example 27
Source File: ServiceBrokerIntegrationTest.scala    From reactive-consul   with MIT License 5 votes vote down vote up
package stormlantern.consul.client

import java.net.URL

import org.scalatest._
import org.scalatest.concurrent.{ Eventually, IntegrationPatience, ScalaFutures }
import stormlantern.consul.client.dao.akka.AkkaHttpConsulClient
import stormlantern.consul.client.dao.{ ConsulHttpClient, ServiceRegistration }
import stormlantern.consul.client.discovery.{ ConnectionProvider, ConnectionProviderFactory, ConnectionStrategy, ServiceDefinition }
import stormlantern.consul.client.loadbalancers.RoundRobinLoadBalancer
import stormlantern.consul.client.util.{ ConsulDockerContainer, Logging, TestActorSystem }

import scala.concurrent.Future

class ServiceBrokerIntegrationTest extends FlatSpec with Matchers with ScalaFutures with Eventually with IntegrationPatience with ConsulDockerContainer with TestActorSystem with Logging {

  import scala.concurrent.ExecutionContext.Implicits.global

  "The ServiceBroker" should "provide a usable connection to consul" in withConsulHost { (host, port) ⇒
    withActorSystem { implicit actorSystem ⇒
      val akkaHttpClient = new AkkaHttpConsulClient(new URL(s"http://$host:$port"))
      // Register the HTTP interface
      akkaHttpClient.putService(ServiceRegistration("consul-http", Some("consul-http-1"), address = Some(host), port = Some(port)))
      akkaHttpClient.putService(ServiceRegistration("consul-http", Some("consul-http-2"), address = Some(host), port = Some(port)))
      val connectionProviderFactory = new ConnectionProviderFactory {
        override def create(host: String, port: Int): ConnectionProvider = new ConnectionProvider {
          logger.info(s"Asked to create connection provider for $host:$port")
          val httpClient: ConsulHttpClient = new AkkaHttpConsulClient(new URL(s"http://$host:$port"))
          override def getConnection: Future[Any] = Future.successful(httpClient)
        }
      }
      val connectionStrategy = ConnectionStrategy(ServiceDefinition("consul-http"), connectionProviderFactory, new RoundRobinLoadBalancer)
      val sut = ServiceBroker(actorSystem, akkaHttpClient, Set(connectionStrategy))
      eventually {
        sut.withService("consul-http") { connection: ConsulHttpClient ⇒
          connection.getService("bogus").map(_.resource should have size 0)
        }
        sut
      }
    }
  }
} 
Example 28
Source File: SeleniumTest.scala    From udash-core   with Apache License 2.0 5 votes vote down vote up
package io.udash.web

import java.util.concurrent.TimeUnit

import org.openqa.selenium.firefox.{FirefoxDriver, FirefoxOptions}
import org.openqa.selenium.remote.RemoteWebDriver
import org.openqa.selenium.{Dimension, WebElement}
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{Millis, Seconds, Span}
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec

private trait ServerConfig {
  def init(): Unit
  def createUrl(part: String): String
  def destroy(): Unit
}

// Doesn't launch embedded guide app server
private final class ExternalServerConfig(urlPrefix: String) extends ServerConfig {
  require(!urlPrefix.endsWith("/"))

  override def createUrl(part: String): String = {
    require(part.startsWith("/"))
    urlPrefix + part
  }

  override def init(): Unit = {}
  override def destroy(): Unit = {}
}

// Launches embedded guide server
private final class InternalServerConfig extends ServerConfig {
  private val server = Launcher.createApplicationServer()

  override def init(): Unit = server.start()

  override def destroy(): Unit = server.stop()

  override def createUrl(part: String): String = {
    require(part.startsWith("/"))
    s"http://127.0.0.2:${server.port}$part"
  }
}

abstract class SeleniumTest extends AnyWordSpec with Matchers with BeforeAndAfterAll with BeforeAndAfterEach with Eventually {
  override implicit val patienceConfig: PatienceConfig = PatienceConfig(scaled(Span(10, Seconds)), scaled(Span(50, Millis)))

  protected final val driver: RemoteWebDriver = new FirefoxDriver(new FirefoxOptions().setHeadless(true))
  driver.manage().timeouts().implicitlyWait(200, TimeUnit.MILLISECONDS)
  driver.manage().window().setSize(new Dimension(1440, 800))

  protected final def findElementById(id: String): WebElement = eventually {
    driver.findElementById(id)
  }

  protected def url: String

  private val server: ServerConfig = new InternalServerConfig

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    server.init()
  }

  override protected def beforeEach(): Unit = {
    super.beforeEach()
    driver.get(server.createUrl(url))
  }

  override protected def afterAll(): Unit = {
    super.afterAll()
    server.destroy()
    driver.close()
  }
} 
Example 29
Source File: HttpMetricsSpec.scala    From kamon-http4s   with Apache License 2.0 5 votes vote down vote up
package kamon.http4s

import cats.effect._
import kamon.testkit.InstrumentInspection
import org.http4s.HttpRoutes
import org.http4s.dsl.io._
import org.http4s.server.Server
import org.http4s.server.blaze.BlazeServerBuilder
import org.scalatest.concurrent.Eventually
import org.scalatest.time.SpanSugar
import org.scalatest.{Matchers, OptionValues, WordSpec}
import cats.implicits._
import kamon.http4s.middleware.server.KamonSupport
import kamon.instrumentation.http.HttpServerMetrics
import org.http4s.client.blaze.BlazeClientBuilder
import org.http4s.client.Client

import scala.concurrent.ExecutionContext
import org.http4s.implicits._

class HttpMetricsSpec extends WordSpec
  with Matchers
  with Eventually
  with SpanSugar
  with InstrumentInspection.Syntax
  with OptionValues
 {

  implicit val contextShift: ContextShift[IO] = IO.contextShift(ExecutionContext.global)
  implicit val timer: Timer[IO] = IO.timer(ExecutionContext.global)

  val srv =
    BlazeServerBuilder[IO]
      .bindLocal(43567)
      .withHttpApp(KamonSupport(HttpRoutes.of[IO] {
        case GET -> Root / "tracing" / "ok" =>  Ok("ok")
        case GET -> Root / "tracing" / "not-found"  => NotFound("not-found")
        case GET -> Root / "tracing" / "error"  => InternalServerError("This page will generate an error!")
      }, "/127.0.0.1", 43567).orNotFound)
      .resource

  val client =
    BlazeClientBuilder[IO](ExecutionContext.global).withMaxTotalConnections(10).resource

   val metrics =
    Resource.liftF(IO(HttpServerMetrics.of("http4s.server", "/127.0.0.1", 43567)))


  def withServerAndClient[A](f: (Server[IO], Client[IO], HttpServerMetrics.HttpServerInstruments) => IO[A]): A =
   (srv, client, metrics).tupled.use(f.tupled).unsafeRunSync()

  private def get[F[_]: ConcurrentEffect](path: String)(server: Server[F], client: Client[F]): F[String] = {
    client.expect[String](s"http://127.0.0.1:${server.address.getPort}$path")
  }

  "The HttpMetrics" should {

    "track the total of active requests" in withServerAndClient { (server, client, serverMetrics) =>

      val requests = List
        .fill(100) {
          get("/tracing/ok")(server, client)
        }.parSequence_

      val test = IO {
        serverMetrics.activeRequests.distribution().max should be > 1L
        serverMetrics.activeRequests.distribution().min shouldBe 0L
      }
      requests *> test
    }

    "track the response time with status code 2xx" in withServerAndClient { (server, client, serverMetrics) =>
      val requests: IO[Unit] = List.fill(100)(get("/tracing/ok")(server, client)).sequence_

      val test = IO(serverMetrics.requestsSuccessful.value should be >= 0L)

      requests *> test
    }

    "track the response time with status code 4xx" in withServerAndClient { (server, client, serverMetrics) =>
      val requests: IO[Unit] = List.fill(100)(get("/tracing/not-found")(server, client).attempt).sequence_

      val test = IO(serverMetrics.requestsClientError.value should be >= 0L)

      requests *> test
    }

    "track the response time with status code 5xx" in withServerAndClient { (server, client, serverMetrics) =>
      val requests: IO[Unit] = List.fill(100)(get("/tracing/error")(server, client).attempt).sequence_

      val test = IO(serverMetrics.requestsServerError.value should be >= 0L)

      requests *> test
    }
  }
} 
Example 30
Source File: KafkaSpecBase.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.core.monitoring.metrics

import akka.kafka.testkit.scaladsl.{EmbeddedKafkaLike, ScalatestKafkaSpec}
import akka.stream.ActorMaterializer
import net.manub.embeddedkafka.EmbeddedKafka
import org.scalatest._
import org.scalatest.concurrent.{Eventually, IntegrationPatience, ScalaFutures}

import scala.concurrent.duration.{DurationInt, FiniteDuration}

abstract class KafkaSpecBase
    extends ScalatestKafkaSpec(6065)
    with Matchers
    with ScalaFutures
    with FlatSpecLike
    with EmbeddedKafka
    with EmbeddedKafkaLike
    with IntegrationPatience
    with Eventually
    with EventsTestHelper { this: Suite =>
  implicit val timeoutConfig: PatienceConfig = PatienceConfig(1.minute)
  implicit val materializer: ActorMaterializer = ActorMaterializer()
  override val sleepAfterProduce: FiniteDuration = 10.seconds
  override protected val topicCreationTimeout = 60.seconds
} 
Example 31
Source File: TestSpec.scala    From akka-http-test   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend

import akka.actor.{ ActorRef, ActorSystem, PoisonPill }
import akka.stream.Materializer
import akka.stream.scaladsl.Source
import akka.stream.testkit.TestSubscriber
import akka.stream.testkit.scaladsl.TestSink
import akka.testkit.TestProbe
import akka.util.Timeout
import org.scalatest._
import org.scalatest.concurrent.{ Eventually, ScalaFutures }
import org.scalatestplus.play.guice.GuiceOneServerPerSuite
import play.api.inject.BindingKey
import play.api.test.WsTestClient

import scala.concurrent.duration._
import scala.concurrent.{ ExecutionContext, Future }
import scala.reflect.ClassTag
import scala.util.Try

class TestSpec extends FlatSpec
    with Matchers
    with GivenWhenThen
    with OptionValues
    with TryValues
    with ScalaFutures
    with WsTestClient
    with BeforeAndAfterAll
    with BeforeAndAfterEach
    with Eventually
    with GuiceOneServerPerSuite {

  def getComponent[A: ClassTag] = app.injector.instanceOf[A]

  def getAnnotatedComponent[A](name: String)(implicit ct: ClassTag[A]): A =
    app.injector.instanceOf[A](BindingKey(ct.runtimeClass.asInstanceOf[Class[A]]).qualifiedWith(name))

  // set the port number of the HTTP server
  override lazy val port: Int = 8080
  implicit val timeout: Timeout = 10.seconds
  implicit val pc: PatienceConfig = PatienceConfig(timeout = 30.seconds, interval = 300.millis)
  implicit val system: ActorSystem = getComponent[ActorSystem]
  implicit val ec: ExecutionContext = getComponent[ExecutionContext]
  implicit val mat: Materializer = getComponent[Materializer]

  // ================================== Supporting Operations ====================================
  implicit class PimpedByteArray(self: Array[Byte]) {
    def getString: String = new String(self)
  }

  implicit class PimpedFuture[T](self: Future[T]) {
    def toTry: Try[T] = Try(self.futureValue)
  }

  implicit class SourceOps[A](src: Source[A, _]) {
    def testProbe(f: TestSubscriber.Probe[A] => Unit): Unit =
      f(src.runWith(TestSink.probe(system)))
  }

  def killActors(actors: ActorRef*): Unit = {
    val tp = TestProbe()
    actors.foreach { (actor: ActorRef) =>
      tp watch actor
      actor ! PoisonPill
      tp.expectTerminated(actor)
    }
  }

  override protected def beforeEach(): Unit = {
  }
} 
Example 32
Source File: RerunnableClockSuite.scala    From catbird   with Apache License 2.0 5 votes vote down vote up
package io.catbird.util.effect

import java.time.Instant
import java.util.concurrent.TimeUnit

import cats.effect.Clock
import com.twitter.util.Await
import io.catbird.util.Rerunnable
import org.scalatest.Outcome
import org.scalatest.concurrent.Eventually
import org.scalatest.funsuite.FixtureAnyFunSuite


class RerunnableClockSuite extends FixtureAnyFunSuite with Eventually {

  protected final class FixtureParam {
    def now: Instant = Instant.now()

    val clock: Clock[Rerunnable] = RerunnableClock()
  }

  test("Retrieval of real time") { f =>
    eventually {
      val result = Await.result(
        f.clock.realTime(TimeUnit.MILLISECONDS).map(Instant.ofEpochMilli).run
      )

      assert(java.time.Duration.between(result, f.now).abs().toMillis < 50)
    }
  }

  test("Retrieval of monotonic time") { f =>
    eventually {
      val result = Await.result(
        f.clock.monotonic(TimeUnit.NANOSECONDS).run
      )

      val durationBetween = Math.abs(System.nanoTime() - result)
      assert(TimeUnit.MILLISECONDS.convert(durationBetween, TimeUnit.NANOSECONDS) < 5)
    }
  }

  override protected def withFixture(test: OneArgTest): Outcome = withFixture(test.toNoArgTest(new FixtureParam))
} 
Example 33
Source File: EmbeddedKafkaSpecSupport.scala    From embedded-kafka   with MIT License 5 votes vote down vote up
package net.manub.embeddedkafka

import java.net.{InetAddress, Socket}

import net.manub.embeddedkafka.EmbeddedKafkaSpecSupport.{
  Available,
  NotAvailable,
  ServerStatus
}
import org.scalatest.Assertion
import org.scalatest.concurrent.{Eventually, IntegrationPatience}
import org.scalatest.matchers.should.Matchers
import org.scalatest.time.{Milliseconds, Seconds, Span}
import org.scalatest.wordspec.AnyWordSpecLike

import scala.util.{Failure, Success, Try}

trait EmbeddedKafkaSpecSupport
    extends AnyWordSpecLike
    with Matchers
    with Eventually
    with IntegrationPatience {

  implicit val config: PatienceConfig =
    PatienceConfig(Span(1, Seconds), Span(100, Milliseconds))

  def expectedServerStatus(port: Int, expectedStatus: ServerStatus): Assertion =
    eventually {
      status(port) shouldBe expectedStatus
    }

  private def status(port: Int): ServerStatus = {
    Try(new Socket(InetAddress.getByName("localhost"), port)) match {
      case Failure(_) => NotAvailable
      case Success(_) => Available
    }
  }
}

object EmbeddedKafkaSpecSupport {
  sealed trait ServerStatus
  case object Available    extends ServerStatus
  case object NotAvailable extends ServerStatus
} 
Example 34
Source File: SpecHelpers.scala    From money   with Apache License 2.0 5 votes vote down vote up
package com.comcast.money.core

import com.comcast.money.api.{ SpanId, SpanInfo }
import com.comcast.money.core.handlers.LoggingSpanHandler
import com.typesafe.config.Config
import org.scalatest.Matchers
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{ Millis, Span }

import scala.collection.{ Set, mutable }
import scala.concurrent.duration._

object LogRecord {
  private val spans = new mutable.ArrayBuffer[SpanInfo]
  private val messages = new mutable.HashMap[String, mutable.Set[String]] with mutable.MultiMap[String, String]

  def clear(): Unit = {
    messages.clear()
    spans.clear()
  }

  def add(log: String, message: String): Unit = messages.addBinding(log, message)

  def add(spanInfo: SpanInfo): Unit = spans.append(spanInfo)

  def contains(log: String)(cond: String => Boolean): Boolean = messages.entryExists(log, cond)

  def contains(cond: SpanInfo => Boolean): Boolean = spans.exists(cond)

  def log(name: String): Set[String] = messages.getOrElse(name, mutable.Set.empty)
}

class LogRecorderSpanHandler extends LoggingSpanHandler {

  override def configure(config: Config): Unit = {
    super.configure(config)
    logFunction = record
  }

  override def handle(spanInfo: SpanInfo): Unit = {
    LogRecord.add(spanInfo)
    super.handle(spanInfo)
  }

  def record(message: String): Unit = LogRecord.add("log", message)
}

trait SpecHelpers extends Eventually { this: Matchers =>

  def awaitCond(condition: => Boolean, max: FiniteDuration = 2.seconds, interval: Duration = 100.millis, message: String = "failed waiting"): Unit = {
    implicit val patienceConfig: PatienceConfig = PatienceConfig(Span(max.toMillis, Millis), Span(interval.toMillis, Millis))
    eventually {
      assert(condition, message)
    }
  }

  def expectSpanInfoThat(message: String, condition: SpanInfo => Boolean, wait: FiniteDuration = 2.seconds): Unit = {
    awaitCond(
      LogRecord.contains(condition), wait, 100 milliseconds,
      s"Expected span info that $message not found after $wait")
  }

  def dontExpectSpanInfoThat(message: String, condition: SpanInfo => Boolean, wait: FiniteDuration = 2.seconds): Unit = {
    awaitCond(
      !LogRecord.contains(condition), wait, 100 milliseconds,
      s"Not expected span info that $message found after $wait")
  }

  def expectLogMessageContaining(contains: String, wait: FiniteDuration = 2.seconds) {
    awaitCond(
      LogRecord.contains("log")(_.contains(contains)), wait, 100 milliseconds,
      s"Expected log message containing string $contains not found after $wait")
  }

  def expectLogMessageContainingStrings(strings: Seq[String], wait: FiniteDuration = 2.seconds) {
    awaitCond(
      LogRecord.contains("log")(s => strings.forall(s.contains)), wait, 100 milliseconds,
      s"Expected log message containing $strings not found after $wait")
  }

  def testSpan(id: SpanId) = Money.Environment.factory.newSpan(id, "test")
} 
Example 35
Source File: OapSharedSQLContext.scala    From OAP   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.test

import scala.concurrent.duration._

import org.scalatest.{BeforeAndAfterEach, Suite}
import org.scalatest.concurrent.Eventually

import org.apache.spark.{DebugFilesystem, SparkConf}
import org.apache.spark.sql.{SparkSession, SQLContext}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.oap.OapRuntime


trait OapSharedSQLContext extends SQLTestUtils with OapSharedSparkSession


  protected override def afterAll(): Unit = {
    try {
      super.afterAll()
    } finally {
      try {
        if (_spark != null) {
          try {
            _spark.sessionState.catalog.reset()
          } finally {
            OapRuntime.stop()
            _spark.stop()
            _spark = null
          }
        }
      } finally {
        SparkSession.clearActiveSession()
        SparkSession.clearDefaultSession()
      }
    }
  }

  protected override def beforeEach(): Unit = {
    super.beforeEach()
    DebugFilesystem.clearOpenStreams()
  }

  protected override def afterEach(): Unit = {
    super.afterEach()
    // Clear all persistent datasets after each test
    spark.sharedState.cacheManager.clearCache()
    // files can be closed from other threads, so wait a bit
    // normally this doesn't take more than 1s
    eventually(timeout(10.seconds), interval(2.seconds)) {
      DebugFilesystem.assertNoOpenStreams()
    }
  }
} 
Example 36
Source File: StdinForSystemSpec.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package system

import org.apache.toree.kernel.protocol.v5.client.SparkKernelClient
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{Seconds, Milliseconds, Span}
import org.scalatest.{BeforeAndAfterAll, FunSpec, Matchers}
import test.utils.root.{SparkKernelClientDeployer, SparkKernelDeployer}



  describe("Stdin for System") {
    describe("when the kernel requests input") {
      ignore("should receive input based on the client's response function") {
        var response: String = ""
        client.setResponseFunction((_, _) => TestReplyString)

        // Read in a chunk of data (our reply string) and return it as a string
        // to be verified by the test
        client.execute(
          """
            |var result: Array[Byte] = Array()
            |val in = kernel.in
            |do {
            |    result = result :+ in.read().toByte
            |} while(in.available() > 0)
            |new String(result)
          """.stripMargin
        ).onResult { result =>
          response = result.data("text/plain")
        }.onError { _ =>
          fail("Client execution to trigger kernel input request failed!")
        }

        eventually {
          response should contain (TestReplyString)
        }
      }
    }
  }

} 
Example 37
Source File: BrokerTransformerSpec.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.interpreter.broker

import org.apache.toree.interpreter.{ExecuteError, Results}
import org.scalatest.concurrent.Eventually
import scala.concurrent.Promise
import org.scalatest.{FunSpec, Matchers, OneInstancePerTest}

class BrokerTransformerSpec extends FunSpec with Matchers
  with OneInstancePerTest with Eventually
{
  private val brokerTransformer = new BrokerTransformer

  describe("BrokerTransformer") {
    describe("#transformToInterpreterResult") {
      it("should convert to success with result output if no failure") {
        val codeResultPromise = Promise[BrokerTypes.CodeResults]()

        val transformedFuture = brokerTransformer.transformToInterpreterResult(
          codeResultPromise.future
        )

        val successOutput = "some success"
        codeResultPromise.success(successOutput)

        eventually {
          val result = transformedFuture.value.get.get
          result should be((Results.Success, Left(Map("text/plain" -> successOutput))))
        }
      }

      it("should convert to error with broker exception if failure") {
        val codeResultPromise = Promise[BrokerTypes.CodeResults]()

        val transformedFuture = brokerTransformer.transformToInterpreterResult(
          codeResultPromise.future
        )

        val failureException = new BrokerException("some failure")
        codeResultPromise.failure(failureException)

        eventually {
          val result = transformedFuture.value.get.get
          result should be((Results.Error, Right(ExecuteError(
            name = failureException.getClass.getName,
            value = failureException.getLocalizedMessage,
            stackTrace = failureException.getStackTrace.map(_.toString).toList
          ))))
        }
      }
    }
  }
} 
Example 38
Source File: SynchronySpec.scala    From censorinus   with MIT License 5 votes vote down vote up
package github.gphat.censorinus

import org.scalatest._
import org.scalatest.concurrent.Eventually
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers

import github.gphat.censorinus.statsd.Encoder

class SynchronySpec extends AnyFlatSpec with Matchers with Eventually {

  "Client" should "deal with gauges" in {
    val s = new TestSender(1)
    val client = new Client(encoder = Encoder, sender = s)

    // Queue up a message in the sender to ensure we can't publish yet.
    s.buffer.offer("BOO!")
    s.buffer.size should be (1)

    client.enqueue(GaugeMetric(name = "foobar", value = 1.0))
    s.buffer.size should be (1) // New metric won't be there yet
    s.awaitMessage() should be ("BOO!")
    s.awaitMessage() should include ("foobar")
    client.shutdown
  }

  it should "be synchronous" in {
    val s = new TestSender()
    val client = new Client(encoder = Encoder, sender = s, asynchronous = false)

    client.enqueue(GaugeMetric(name = "foobar", value = 1.0))
    val m = s.buffer.poll()
    m should include ("foobar")
    client.shutdown
  }
} 
Example 39
Source File: SessionSpec.scala    From incubator-livy   with Apache License 2.0 5 votes vote down vote up
package org.apache.livy.repl

import java.util.Properties
import java.util.concurrent.{ConcurrentLinkedQueue, CountDownLatch, TimeUnit}

import org.apache.spark.SparkConf
import org.scalatest.{BeforeAndAfter, FunSpec}
import org.scalatest.Matchers._
import org.scalatest.concurrent.Eventually
import org.scalatest.time._

import org.apache.livy.LivyBaseUnitTestSuite
import org.apache.livy.repl.Interpreter.ExecuteResponse
import org.apache.livy.rsc.RSCConf
import org.apache.livy.sessions._

class SessionSpec extends FunSpec with Eventually with LivyBaseUnitTestSuite with BeforeAndAfter {
  override implicit val patienceConfig =
    PatienceConfig(timeout = scaled(Span(30, Seconds)), interval = scaled(Span(100, Millis)))

  private val rscConf = new RSCConf(new Properties()).set(RSCConf.Entry.SESSION_KIND, "spark")

  describe("Session") {
    var session: Session = null

    after {
      if (session != null) {
        session.close()
        session = null
      }
    }

    it("should call state changed callbacks in happy path") {
      val expectedStateTransitions =
        Array("not_started", "starting", "idle", "busy", "idle", "busy", "idle")
      val actualStateTransitions = new ConcurrentLinkedQueue[String]()

      session = new Session(rscConf, new SparkConf(), None,
        { s => actualStateTransitions.add(s.toString) })
      session.start()
      session.execute("")

      eventually {
        actualStateTransitions.toArray shouldBe expectedStateTransitions
      }
    }

    it("should not transit to idle if there're any pending statements.") {
      val expectedStateTransitions =
        Array("not_started", "starting", "idle", "busy", "busy", "busy", "idle", "busy", "idle")
      val actualStateTransitions = new ConcurrentLinkedQueue[String]()

      val blockFirstExecuteCall = new CountDownLatch(1)
      val interpreter = new SparkInterpreter(new SparkConf()) {
        override def execute(code: String): ExecuteResponse = {
          blockFirstExecuteCall.await(10, TimeUnit.SECONDS)
          super.execute(code)
        }
      }
      session = new Session(rscConf, new SparkConf(), Some(interpreter),
        { s => actualStateTransitions.add(s.toString) })
      session.start()

      for (_ <- 1 to 2) {
        session.execute("")
      }

      blockFirstExecuteCall.countDown()
      eventually {
        actualStateTransitions.toArray shouldBe expectedStateTransitions
      }
    }

    it("should remove old statements when reaching threshold") {
      rscConf.set(RSCConf.Entry.RETAINED_STATEMENTS, 2)
      session = new Session(rscConf, new SparkConf())
      session.start()

      session.statements.size should be (0)
      session.execute("")
      session.statements.size should be (1)
      session.statements.map(_._1).toSet should be (Set(0))
      session.execute("")
      session.statements.size should be (2)
      session.statements.map(_._1).toSet should be (Set(0, 1))
      session.execute("")
      eventually {
        session.statements.size should be (2)
        session.statements.map(_._1).toSet should be (Set(1, 2))
      }

      // Continue submitting statements, total statements in memory should be 2.
      session.execute("")
      eventually {
        session.statements.size should be (2)
        session.statements.map(_._1).toSet should be (Set(2, 3))
      }
    }
  }
} 
Example 40
Source File: HelloWorldServiceSpec.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.example.helloworld.impl

import com.lightbend.lagom.scaladsl.server.LocalServiceLocator
import com.lightbend.lagom.scaladsl.testkit.ServiceTest
import org.scalatest.BeforeAndAfterAll
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AsyncWordSpec
import com.example.helloworld.api._
import org.scalatest.concurrent.Eventually

import scala.concurrent.Await
import scala.concurrent.duration._

class HelloWorldServiceSpec
    extends AsyncWordSpec
    with Matchers
    with BeforeAndAfterAll
    with Eventually {

  private val server = ServiceTest.startServer(
    ServiceTest.defaultSetup
      .withCassandra()
  ) { ctx =>
    new HelloWorldApplication(ctx) with LocalServiceLocator
  }

  val client: HelloWorldService =
    server.serviceClient.implement[HelloWorldService]

  override protected def afterAll(): Unit = server.stop()

  "Hello World service" should {

    "say hello" in {
      client.hello("Alice").invoke().map { answer =>
        answer should ===("""Hello, Alice!
            |Started reports: default-projected-message
            |Stopped reports: default-projected-message
            |""".stripMargin)
      }
    }

    "allow responding with a custom message" in {
      for {
        _ <- client.useGreeting("Bob", "Hi").invoke()
        answer <- client.hello("Bob").invoke()
      } yield {
        answer should ===("""Hi, Bob!
              |Started reports: default-projected-message
              |Stopped reports: default-projected-message
              |""".stripMargin)
      }

      implicit val patienceConfig: PatienceConfig = PatienceConfig(timeout = 25.seconds, interval = 300.millis)
      eventually{
        client.hello("Bob").invoke().map(_ should ===(
            """Hi, Bob!
              |Started reports: Hi
              |Stopped reports: default-projected-message
              |""".stripMargin
          )
        )
      }

    }
  }
} 
Example 41
Source File: ClusterDistributionSpec.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.internal.cluster

import akka.actor.Props
import com.lightbend.lagom.internal.cluster.ClusterDistribution.EnsureActive
import akka.pattern._
import org.scalatest.concurrent.Eventually
import org.scalatest.concurrent.ScalaFutures
import scala.concurrent.duration._

import akka.cluster.sharding.ShardRegion.CurrentShardRegionState
import akka.cluster.sharding.ShardRegion.GetShardRegionState

import com.typesafe.config.Config
import com.typesafe.config.ConfigFactory

class ClusterDistributionSpecMultiJvmNode1 extends ClusterDistributionSpec
class ClusterDistributionSpecMultiJvmNode2 extends ClusterDistributionSpec
class ClusterDistributionSpecMultiJvmNode3 extends ClusterDistributionSpec

object ClusterDistributionSpec extends ClusterMultiNodeConfig {
  protected override def systemConfig: Config =
    ConfigFactory.parseString("""
      akka.cluster.sharding.rebalance-interval = 1s
      """).withFallback(super.systemConfig)
}

class ClusterDistributionSpec
    extends ClusteredMultiNodeUtils(numOfNodes = 3, ClusterDistributionSpec)
    with ScalaFutures
    with Eventually {
  private val ensureActiveInterval: FiniteDuration = 1.second
  private val distributionSettings: ClusterDistributionSettings =
    ClusterDistributionSettings(system)
      .copy(ensureActiveInterval = ensureActiveInterval)

  "A ClusterDistribution" must {
    "distribute the entityIds across nodes (so all nodes get a response)" in {
      val numOfEntities        = 20
      val minimalShardsPerNode = numOfEntities / numOfNodes

      val typeName = "CDTest"
      // There'll be 3 nodes in this test and each node will host a TestProbe.
      // Cluster Distribution on each node will create a `FakeActor.props` pointing
      // back to its own TestProbe. We request the creation of 10 FakeActor to ClusterDistribution
      // with the expectation that there'll be 3 or 4 FakeActor's for each TestProbe.
      val props: Props = FakeActor.props
      val entityIds    = (1 to numOfEntities).map(i => s"test-entity-id-$i").toSet

      // Load the extension and wait for other nodes to be ready before proceeding
      val cdExtension = ClusterDistribution(system)
      enterBarrier("cluster-distribution-extension-is-loaded")

      val shardRegion =
        cdExtension.start(
          typeName,
          props,
          entityIds,
          distributionSettings
        )

      // don't complete the test in a node, until other nodes had the change to complete their assertions too.
      completionBarrier("assertion-completed") {
        implicit val patienceConfig: PatienceConfig = PatienceConfig(timeout = 45.seconds, interval = 200.millis)
        eventually {
          val shardRegionState =
            shardRegion
              .ask(GetShardRegionState)(3.seconds)
              .mapTo[CurrentShardRegionState]
              .futureValue

          shardRegionState.shards.size should be >= minimalShardsPerNode
        }
      }
    }
  }

  private def completionBarrier[T](barrierName: String)(block: => T): T = {
    try {
      block
    } finally {
      enterBarrier(barrierName)
    }
  }
}

import akka.actor.Actor
import akka.actor.Props

object FakeActor {
  def props: Props = Props(new FakeActor)
}

// This actor keeps a reference to the test instance that created it (this is a multi-node test
// so there are multiple tests instances).
// Each node in the cluster may send messages to this actor but this actor will only report back
// to its creator.
class FakeActor extends Actor {
  override def receive = {
    case EnsureActive(_) =>
  }
} 
Example 42
Source File: ProducerStubSpec.scala    From lagom   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.lagom.scaladsl.testkit

import com.lightbend.lagom.scaladsl.api.broker.Topic
import com.lightbend.lagom.scaladsl.server.LagomApplicationContext
import com.lightbend.lagom.scaladsl.server.LocalServiceLocator
import com.lightbend.lagom.scaladsl.testkit.services._
import org.scalatest.concurrent.Eventually
import org.scalatest.time.Seconds
import org.scalatest.time.Span
import org.scalatest.BeforeAndAfterAll
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AsyncWordSpec

class ProducerStubSpec extends AsyncWordSpec with Matchers with BeforeAndAfterAll with Eventually {
  var producerStub: ProducerStub[AlphaEvent] = _

  private val stubbedApplication: LagomApplicationContext => DownstreamApplication = { ctx =>
    new DownstreamApplication(ctx) with LocalServiceLocator with TestTopicComponents {
      val stubFactory = new ProducerStubFactory(actorSystem, materializer)
      producerStub = stubFactory.producer[AlphaEvent](AlphaService.TOPIC_ID)
      override lazy val alphaService = new AlphaServiceStub(producerStub)
    }
  }

  "The ProducerStub" should {
    "send message to consuming services" in ServiceTest.withServer(ServiceTest.defaultSetup.withCluster())(
      stubbedApplication
    ) { server =>
      implicit val exCtx = server.application.actorSystem.dispatcher
      producerStub.send(AlphaEvent(22))
      eventually(timeout(Span(5, Seconds))) {
        server.serviceClient
          .implement[CharlieService]
          .messages
          .invoke()
          .map { response =>
            response should ===(Seq(ReceivedMessage("A", 22)))
          }
          .recover {
            case t: Throwable => fail(t)
          }
      }
    }
  }
}

class AlphaServiceStub(stub: ProducerStub[AlphaEvent]) extends AlphaService {
  override def messages: Topic[AlphaEvent] = stub.topic
} 
Example 43
Source File: KafkaStreamSuite.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.kafka

import scala.collection.mutable
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.util.Random

import kafka.serializer.StringDecoder
import org.scalatest.BeforeAndAfterAll
import org.scalatest.concurrent.Eventually

import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.{Milliseconds, StreamingContext}

class KafkaStreamSuite extends SparkFunSuite with Eventually with BeforeAndAfterAll {
  private var ssc: StreamingContext = _
  private var kafkaTestUtils: KafkaTestUtils = _

  override def beforeAll(): Unit = {
    kafkaTestUtils = new KafkaTestUtils
    kafkaTestUtils.setup()
  }

  override def afterAll(): Unit = {
    if (ssc != null) {
      ssc.stop()
      ssc = null
    }

    if (kafkaTestUtils != null) {
      kafkaTestUtils.teardown()
      kafkaTestUtils = null
    }
  }

  test("Kafka input stream") {
    val sparkConf = new SparkConf().setMaster("local[4]").setAppName(this.getClass.getSimpleName)
    ssc = new StreamingContext(sparkConf, Milliseconds(500))
    val topic = "topic1"
    val sent = Map("a" -> 5, "b" -> 3, "c" -> 10)
    kafkaTestUtils.createTopic(topic)
    kafkaTestUtils.sendMessages(topic, sent)

    val kafkaParams = Map("zookeeper.connect" -> kafkaTestUtils.zkAddress,
      "group.id" -> s"test-consumer-${Random.nextInt(10000)}",
      "auto.offset.reset" -> "smallest")

    val stream = KafkaUtils.createStream[String, String, StringDecoder, StringDecoder](
      ssc, kafkaParams, Map(topic -> 1), StorageLevel.MEMORY_ONLY)
    val result = new mutable.HashMap[String, Long]()
    stream.map(_._2).countByValue().foreachRDD { r =>
      r.collect().foreach { kv =>
        result.synchronized {
          val count = result.getOrElseUpdate(kv._1, 0) + kv._2
          result.put(kv._1, count)
        }
      }
    }

    ssc.start()

    eventually(timeout(10000 milliseconds), interval(100 milliseconds)) {
      assert(result.synchronized { sent === result })
    }
  }
} 
Example 44
Source File: InitialSpec.scala    From embedded-kafka   with Apache License 2.0 5 votes vote down vote up
package com.tuplejump.embedded.kafka

import java.util.concurrent.{TimeUnit, CountDownLatch}

import org.apache.kafka.common.serialization.StringDeserializer
import org.scalatest.concurrent.Eventually
import org.scalatest.concurrent.PatienceConfiguration.Timeout
import org.scalatest.time.{Millis, Span}

class InitialSpec extends AbstractSpec with Eventually with Logging {

  private val timeout = Timeout(Span(10000, Millis))

  "Initially, EmbeddedKafka" must {
    val kafka = new EmbeddedKafka()
    val topic = "test"
    val total = 1000
    val latch = new CountDownLatch(total)

    "start embedded zookeeper and embedded kafka" in {
      kafka.isRunning should be (false)
      kafka.start()
      eventually(timeout)(kafka.isRunning)
    }
    "create a topic" in {
      kafka.createTopic(topic, 1, 1)
    }
    "publish messages to the embedded kafka instance" in {
      val config = kafka.consumerConfig(
        group = "some.group",
        kafkaConnect = kafka.kafkaConfig.hostName + ":" + kafka.kafkaConfig.port,
        zkConnect = kafka.kafkaConfig.zkConnect,
        offsetPolicy = "largest",//latest with new consumer
        autoCommitEnabled = true,
        kDeserializer = classOf[StringDeserializer],
        vDeserializer = classOf[StringDeserializer])
      val consumer = new SimpleConsumer(latch, config, topic, "consumer.group", 1, 1)

      val batch1 = for (n <- 0 until total) yield s"message-test-$n"

      logger.info(s"Publishing ${batch1.size} messages...")

      kafka.sendMessages(topic, batch1)
      latch.await(10000, TimeUnit.MILLISECONDS)
      latch.getCount should be (0)

      consumer.shutdown()
    }
    "shut down relatively cleanly for now" in {
      kafka.shutdown()
      eventually(timeout)(!kafka.isRunning)
    }
  }
} 
Example 45
Source File: LeaseSpec.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package akka.coordination.lease.kubernetes

import akka.actor.ActorSystem
import akka.coordination.lease.TimeoutSettings
import akka.coordination.lease.kubernetes.internal.KubernetesApiImpl
import akka.coordination.lease.scaladsl.LeaseProvider
import org.scalatest.concurrent.{ Eventually, ScalaFutures }
import org.scalatest.time.{ Milliseconds, Seconds, Span }
import org.scalatest.{ BeforeAndAfterAll, Matchers, WordSpec }


abstract class LeaseSpec() extends WordSpec with ScalaFutures with BeforeAndAfterAll with Matchers with Eventually {

  def system: ActorSystem

  implicit val patience = PatienceConfig(Span(3, Seconds), Span(500, Milliseconds))

  lazy val underTest = LeaseProvider(system)
  // for cleanup
  val config = system.settings.config.getConfig(KubernetesLease.configPath)
  lazy val k8sApi = new KubernetesApiImpl(system, KubernetesSettings(config, TimeoutSettings(config)))
  val leaseName = "lease-1"
  val client1 = "client1"
  val client2 = "client2"

  // two leases instances for the same lease name
  lazy val lease1Client1 = underTest.getLease(leaseName, "akka.coordination.lease.kubernetes", client1)
  lazy val lease1Client2 = underTest.getLease(leaseName, "akka.coordination.lease.kubernetes", client2)

  "A lease" should {

    "be different instances" in {
      assert(lease1Client1 ne lease1Client2)
    }

    "work" in {
      lease1Client1.acquire().futureValue shouldEqual true
      lease1Client1.checkLease() shouldEqual true
    }

    "be reentrant" in {
      lease1Client1.acquire().futureValue shouldEqual true
      lease1Client1.checkLease() shouldEqual true
      lease1Client2.checkLease() shouldEqual false
    }

    "not allow another client to acquire the lease" in {
      lease1Client2.acquire().futureValue shouldEqual false
      lease1Client2.checkLease() shouldEqual false
    }

    "maintain the lease for a prolonged period" in {
      lease1Client1.acquire().futureValue shouldEqual true
      lease1Client1.checkLease() shouldEqual true
      Thread.sleep(200)
      lease1Client1.checkLease() shouldEqual true
      Thread.sleep(200)
      lease1Client1.checkLease() shouldEqual true
      Thread.sleep(200)
      lease1Client1.checkLease() shouldEqual true
    }

    "not allow another client to release the lease" in {
      lease1Client2.release().failed.futureValue.getMessage shouldEqual s"Tried to release a lease that is not acquired"
    }

    "allow removing the lease" in {
      lease1Client1.release().futureValue shouldEqual true
      eventually {
        lease1Client1.checkLease() shouldEqual false
      }
    }

    "allow a new client to get the lease once released" in {
      lease1Client2.acquire().futureValue shouldEqual true
      lease1Client2.checkLease() shouldEqual true
      lease1Client1.checkLease() shouldEqual false
    }
  }

} 
Example 46
Source File: HttpContactPointRoutesSpec.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package akka.management.cluster.bootstrap.contactpoint

import akka.cluster.{ Cluster, ClusterEvent }
import akka.event.NoLogging
import akka.http.scaladsl.testkit.ScalatestRouteTest
import akka.management.cluster.bootstrap.ClusterBootstrapSettings
import akka.testkit.{ SocketUtil, TestProbe }
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{ Millis, Seconds, Span }
import org.scalatest.{ Matchers, WordSpecLike }

class HttpContactPointRoutesSpec
    extends WordSpecLike
    with Matchers
    with ScalatestRouteTest
    with HttpBootstrapJsonProtocol
    with Eventually {

  implicit override val patienceConfig: PatienceConfig =
    PatienceConfig(timeout = scaled(Span(3, Seconds)), interval = scaled(Span(50, Millis)))

  override def testConfigSource =
    s"""
    akka {
      remote {
        netty.tcp {
          hostname = "127.0.0.1"
          port = ${SocketUtil.temporaryServerAddress("127.0.0.1").getPort}
        }
      }
    }
    """.stripMargin

  "Http Bootstrap routes" should {

    val settings = ClusterBootstrapSettings(system.settings.config, NoLogging)
    val httpBootstrap = new HttpClusterBootstrapRoutes(settings)

    "empty list if node is not part of a cluster" in {
      ClusterBootstrapRequests.bootstrapSeedNodes("") ~> httpBootstrap.routes ~> check {
        responseAs[String] should include(""""seedNodes":[]""")
      }
    }

    "include seed nodes when part of a cluster" in {
      val cluster = Cluster(system)
      cluster.join(cluster.selfAddress)

      val p = TestProbe()
      cluster.subscribe(p.ref, ClusterEvent.InitialStateAsEvents, classOf[ClusterEvent.MemberUp])
      val up = p.expectMsgType[ClusterEvent.MemberUp]
      up.member should ===(cluster.selfMember)

      eventually {
        ClusterBootstrapRequests.bootstrapSeedNodes("") ~> httpBootstrap.routes ~> check {
          val response = responseAs[HttpBootstrapJsonProtocol.SeedNodes]
          response.seedNodes should !==(Set.empty)
          response.seedNodes.map(_.node) should contain(cluster.selfAddress)
        }
      }
    }
  }

} 
Example 47
Source File: MultiDcSpec.scala    From akka-management   with Apache License 2.0 5 votes vote down vote up
package akka.management.cluster

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.{ HttpRequest, StatusCodes }
import akka.http.scaladsl.unmarshalling.Unmarshal
import akka.management.scaladsl.ManagementRouteProviderSettings
import akka.stream.ActorMaterializer
import akka.testkit.SocketUtil
import com.typesafe.config.ConfigFactory
import org.scalatest.{ Matchers, WordSpec }
import org.scalatest.concurrent.{ Eventually, ScalaFutures }
import org.scalatest.time.{ Millis, Seconds, Span }

class MultiDcSpec
    extends WordSpec
    with Matchers
    with ScalaFutures
    with ClusterHttpManagementJsonProtocol
    with Eventually {

  implicit val patience: PatienceConfig = PatienceConfig(timeout = Span(10, Seconds), interval = Span(50, Millis))

  val config = ConfigFactory.parseString(
    """
      |akka.actor.provider = "cluster"
      |akka.remote.log-remote-lifecycle-events = off
      |akka.remote.netty.tcp.hostname = "127.0.0.1"
      |#akka.loglevel = DEBUG
    """.stripMargin
  )

  "Http cluster management" must {
    "allow multiple DCs" in {
      val Vector(httpPortA, portA, portB) = SocketUtil.temporaryServerAddresses(3, "127.0.0.1").map(_.getPort)
      val dcA = ConfigFactory.parseString(
        s"""
           |akka.management.http.hostname = "127.0.0.1"
           |akka.management.http.port = $httpPortA
           |akka.cluster.seed-nodes = ["akka.tcp://[email protected]:$portA"]
           |akka.cluster.multi-data-center.self-data-center = "DC-A"
           |akka.remote.netty.tcp.port = $portA
          """.stripMargin
      )
      val dcB = ConfigFactory.parseString(
        s"""
           |akka.cluster.seed-nodes = ["akka.tcp://[email protected]:$portA"]
           |akka.cluster.multi-data-center.self-data-center = "DC-B"
           |akka.remote.netty.tcp.port = $portB
          """.stripMargin
      )

      implicit val dcASystem = ActorSystem("MultiDcSystem", config.withFallback(dcA))
      val dcBSystem = ActorSystem("MultiDcSystem", config.withFallback(dcB))
      implicit val materializer = ActorMaterializer()

      val routeSettings =
        ManagementRouteProviderSettings(selfBaseUri = s"http://127.0.0.1:$httpPortA", readOnly = false)

      try {
        Http()
          .bindAndHandle(ClusterHttpManagementRouteProvider(dcASystem).routes(routeSettings), "127.0.0.1", httpPortA)
          .futureValue

        eventually {
          val response =
            Http().singleRequest(HttpRequest(uri = s"http://127.0.0.1:$httpPortA/cluster/members")).futureValue
          response.status should equal(StatusCodes.OK)
          val members = Unmarshal(response.entity).to[ClusterMembers].futureValue
          members.members.size should equal(2)
          members.members.map(_.status) should equal(Set("Up"))
        }
      } finally {
        dcASystem.terminate()
        dcBSystem.terminate()
      }
    }
  }
} 
Example 48
Source File: DummyCpgProviderSpec.scala    From codepropertygraph   with Apache License 2.0 5 votes vote down vote up
package io.shiftleft.cpgserver.cpg

import java.util.UUID

import scala.concurrent.ExecutionContext
import cats.data.OptionT
import cats.effect.{ContextShift, IO}
import org.scalatest.concurrent.Eventually

import io.shiftleft.codepropertygraph.Cpg
import io.shiftleft.cpgserver.BaseSpec
import io.shiftleft.cpgserver.query.CpgOperationResult

import scala.concurrent.duration._
import scala.language.postfixOps

class DummyCpgProviderSpec extends BaseSpec with Eventually {

  private implicit val cs: ContextShift[IO] = IO.contextShift(ExecutionContext.global)

  private def withNewCpgProvider[T](f: DummyCpgProvider => T): T = {
    f(new DummyCpgProvider)
  }

  "Creating a CPG" should {
    "return a UUID referencing the eventual CPG" in withNewCpgProvider { cpgProvider =>
      noException should be thrownBy cpgProvider.createCpg(Set.empty).unsafeRunSync()
    }
  }

  "Retrieving a CPG" should {
    "return a success if the CPG was created successfully" in withNewCpgProvider { cpgProvider =>
      val cpgId = cpgProvider.createCpg(Set.empty).unsafeRunSync()

      eventually(timeout(10 seconds), interval(1 seconds)) {
        cpgProvider.retrieveCpg(cpgId).value.unsafeRunSync() shouldBe defined
      }
    }

    "return an empty OptionT if the CPG does not exist" in withNewCpgProvider { cpgProvider =>
      cpgProvider.retrieveCpg(UUID.randomUUID) shouldBe OptionT.none[IO, CpgOperationResult[Cpg]]
    }
  }
} 
Example 49
Source File: CompressionSpec.scala    From chronicler   with Apache License 2.0 5 votes vote down vote up
package com.github.fsanaulla.chronicler.ahc.io.it

import java.nio.file.Paths

import com.github.fsanaulla.chronicler.ahc.io.InfluxIO
import com.github.fsanaulla.chronicler.ahc.management.InfluxMng
import com.github.fsanaulla.chronicler.ahc.shared.Uri
import com.github.fsanaulla.chronicler.core.alias.Id
import com.github.fsanaulla.chronicler.core.api.DatabaseApi
import com.github.fsanaulla.chronicler.testing.it.DockerizedInfluxDB
import org.asynchttpclient.Response
import org.scalatest.concurrent.{Eventually, IntegrationPatience, ScalaFutures}
import org.scalatest.{FlatSpec, Matchers}

import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future

class CompressionSpec
  extends FlatSpec
  with Matchers
  with DockerizedInfluxDB
  with ScalaFutures
  with Eventually
  with IntegrationPatience {

  override def afterAll(): Unit = {
    mng.close()
    io.close()
    super.afterAll()
  }

  val testDB = "db"

  lazy val mng =
    InfluxMng(host, port, Some(creds), None)

  lazy val io =
    InfluxIO(host, port, Some(creds), compress = true)

  lazy val db: DatabaseApi[Future, Id, Response, Uri, String] =
    io.database(testDB)

  it should "ping database" in {
    eventually {
      io.ping.futureValue.right.get.version shouldEqual version
    }
  }

  it should "write data from file" in {
    mng.createDatabase(testDB).futureValue.right.get shouldEqual 200

    db.writeFromFile(Paths.get(getClass.getResource("/large_batch.txt").getPath))
      .futureValue
      .right
      .get shouldEqual 204

    db.readJson("SELECT * FROM test1").futureValue.right.get.length shouldEqual 10000
  }
} 
Example 50
Source File: CompressionSpec.scala    From chronicler   with Apache License 2.0 5 votes vote down vote up
package com.github.fsanaulla.chronicler.urlhttp

import java.nio.file.Paths

import com.github.fsanaulla.chronicler.testing.it.DockerizedInfluxDB
import com.github.fsanaulla.chronicler.urlhttp.io.{InfluxIO, UrlIOClient}
import com.github.fsanaulla.chronicler.urlhttp.management.{InfluxMng, UrlManagementClient}
import org.scalatest.concurrent.{Eventually, IntegrationPatience}
import org.scalatest.{FlatSpec, Matchers}

class CompressionSpec
  extends FlatSpec
  with Matchers
  with DockerizedInfluxDB
  with Eventually
  with IntegrationPatience {

  override def afterAll(): Unit = {
    mng.close()
    io.close()
    super.afterAll()
  }

  val testDB = "db"

  lazy val mng: UrlManagementClient =
    InfluxMng(s"http://$host", port, Some(creds))

  lazy val io: UrlIOClient =
    InfluxIO(s"http://$host", port, Some(creds), compress = true)

  lazy val db: io.Database = io.database(testDB)

  it should "ping database" in {
    eventually {
      io.ping.get.right.get.version shouldEqual version
    }
  }

  it should "write data from file" in {
    mng.createDatabase(testDB).get.right.get shouldEqual 200

    db.writeFromFile(Paths.get(getClass.getResource("/large_batch.txt").getPath))
      .get
      .right
      .get shouldEqual 204

    db.readJson("SELECT * FROM test1").get.right.get.length shouldEqual 10000
  }
} 
Example 51
Source File: SystemSpec.scala    From ddd-leaven-akka-v2   with MIT License 5 votes vote down vote up
package ecommerce.tests.e2e

import ecommerce.invoicing.ReceivePayment
import ecommerce.sales._
import ecommerce.shipping.ShippingSerializationHintsProvider
import ecommerce.tests.e2e.SystemSpec._
import org.json4s.Formats
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{Seconds, Span}
import pl.newicom.dddd.utils.UUIDSupport.uuid7
class SystemSpec extends TestDriver with Eventually {

  implicit override val patienceConfig: PatienceConfig = PatienceConfig(
    timeout = scaled(Span(10, Seconds)),
    interval = scaled(Span(2, Seconds))
  )

  "Ecommerce system" should {
    val reservationId = new ReservationId(uuid7)
    val invoiceId     = reservationId
    val customerId    = uuid7

    using(sales_write) { implicit b =>
      "create reservation" in eventually {
        POST command {
          CreateReservation(reservationId, customerId)
        }
      }
    }

    using(sales_read) { implicit b =>
      "respond to reservation/{reservationId} query" in eventually {
        GET / s"reservation/$reservationId"
      }
    }

    using(sales_write) { implicit b =>
      "reserve product" in eventually {
        val product = Product(
          productId = uuid7,
          name = "DDDD For Dummies - 7th Edition",
          productType = ProductType.Standard,
          price = Money(10.0)
        )

        POST command {
          ReserveProduct(reservationId, product, quantity = 1)
        }
      }

      "confirm reservation" in eventually {
        POST command {
          ConfirmReservation(reservationId)
        }
      }
    }

    using(invoicing_write) { implicit b =>
      "pay" in eventually {
        POST command {
          ReceivePayment(invoiceId, reservationId.value, Money(10.0), paymentId = "230982342")
        }
      }
    }

    using(shipping_read) { implicit b =>
      "respond to /shipment/order/{orderId}" in eventually {
        GET / s"shipment/order/$reservationId"
      }
    }
  }
}
import pl.newicom.dddd.serialization.JsonSerHints._

object SystemSpec {

  val sales     = EndpointConfig(path = "ecommerce/sales")
  val invoicing = EndpointConfig(path = "ecommerce/invoicing")
  val shipping  = EndpointConfig(path = "ecommerce/shipping")

  val sales_write: EndpointConfig     = sales.copy(port = 9100)
  val invoicing_write: EndpointConfig = invoicing.copy(port = 9200)

  val sales_read: EndpointConfig    = sales.copy(port = 9110)
  val shipping_read: EndpointConfig = shipping.copy(port = 9310)

  implicit val formats: Formats =
    new SalesSerializationHintsProvider().hints() ++
      new ShippingSerializationHintsProvider().hints()

} 
Example 52
Source File: AMQPServerStreamSuite.scala    From streaming-amqp   with Apache License 2.0 5 votes vote down vote up
package io.radanalytics.streaming.amqp

import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.amqp.AMQPUtils
import org.apache.spark.streaming.{Duration, Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkFunSuite}
import org.scalatest.BeforeAndAfter
import org.scalatest.concurrent.Eventually

import scala.concurrent.duration._


class AMQPServerStreamSuite extends SparkFunSuite with Eventually with BeforeAndAfter {
  
  private val batchDuration: Duration = Seconds(1)
  private val master: String = "local[2]"
  private val appName: String = this.getClass().getSimpleName()
  private val address: String = "my_address"
  private val checkpointDir: String = "/tmp/spark-streaming-amqp-tests"
  
  private var conf: SparkConf = _
  private var ssc: StreamingContext = _
  private var amqpTestUtils: AMQPTestUtils = _

  before {
    
    conf = new SparkConf().setMaster(master).setAppName(appName)
    conf.set("spark.streaming.receiver.writeAheadLog.enable", "true")
    ssc = new StreamingContext(conf, batchDuration)
    ssc.checkpoint(checkpointDir)
    
    amqpTestUtils = new AMQPTestUtils()
    amqpTestUtils.setup()
  }
  
  after {

    if (ssc != null) {
      ssc.stop()
    }

    if (amqpTestUtils != null) {
      amqpTestUtils.teardown()
    }
  }

  test("AMQP receive server") {

    val sendMessage = "Spark Streaming & AMQP"
    val max = 10
    val delay = 100l

    amqpTestUtils.startAMQPServer(sendMessage, max, delay)

    val converter = new AMQPBodyFunction[String]

    val receiveStream =
      AMQPUtils.createStream(ssc, amqpTestUtils.host, amqpTestUtils.port,
        amqpTestUtils.username, amqpTestUtils.password, address, converter, StorageLevel.MEMORY_ONLY)

    var receivedMessage: List[String] = List()
    receiveStream.foreachRDD(rdd => {
      if (!rdd.isEmpty()) {
        receivedMessage = receivedMessage ::: rdd.collect().toList
      }
    })

    ssc.start()

    eventually(timeout(10000 milliseconds), interval(1000 milliseconds)) {

      assert(receivedMessage.length == max)
    }
    ssc.stop()

    amqpTestUtils.stopAMQPServer()
  }
} 
Example 53
Source File: ServiceSteps.scala    From sbt-docker-compose   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package example

import cucumber.api.scala.{EN, ScalaDsl}
import org.scalatest.Matchers
import org.scalatest.concurrent.{Eventually, ScalaFutures}

object ServiceSteps {
  lazy val defaultStartedService = {
    CalculatorServer.start(8080)
  }
}

class ServiceSteps extends ScalaDsl with EN with Matchers with ScalaFutures with Eventually {

  var lastResult = Int.MinValue
  var client: CalculatorClient = null

  
  Given("""^a calculator client against (.+)$""") { hostPort: String =>
    client = CalculatorClient(hostPort)

    // prove connectivity eagerly within this step
    client.add(0, 0) shouldBe 0
  }

  Given("""^a remote request to add (.+) and (.+)$""") { (lhs: Int, rhs: Int) =>
    lastResult = client.add(lhs, rhs)
  }
  Given("""^a remote request to subtract (.+) from (.+)$""") { (rhs: Int, lhs: Int) =>
    lastResult = client.subtract(lhs, rhs)
  }
  Then("""^The response should be ([-0-9]+)$""") { (expected: Int) =>
    lastResult shouldBe expected
  }
} 
Example 54
Source File: HepekSeleniumTest.scala    From hepek   with Apache License 2.0 5 votes vote down vote up
package ba.sake.hepek.selenium

import java.nio.file.Paths
import org.scalatest._
import org.scalatest.concurrent.Eventually
import org.scalatest.selenium.HtmlUnit
import ba.sake.hepek.core.Renderable

trait HepekSeleniumTest extends FlatSpec with Matchers with HtmlUnit with Eventually {
  java.util.logging.Logger
    .getLogger("com.gargoylesoftware.htmlunit")
    .setLevel(java.util.logging.Level.OFF) // disable annoying HtmlUnit warnings

  val basePath = "hepek-tests/target/web/public/main/"

  def filePath(renderable: Renderable): String = {
    val pagePath = basePath + renderable.relPath
    val path     = Paths.get(pagePath)
    path.toUri.toString
  }

  def getByCss(selector: String): Option[Element] =
    find(cssSelector(selector))
} 
Example 55
Source File: Page.scala    From renku   with Apache License 2.0 5 votes vote down vote up
package ch.renku.acceptancetests.pages

import ch.renku.acceptancetests.pages.Page._
import ch.renku.acceptancetests.pages.RenkuPage.RenkuBaseUrl
import ch.renku.acceptancetests.tooling._
import eu.timepit.refined.W
import eu.timepit.refined.api.Refined
import eu.timepit.refined.collection.NonEmpty
import eu.timepit.refined.string._
import org.openqa.selenium.{By, WebDriver, WebElement}
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{Seconds, Span}
import org.scalatest.{Matchers => ScalatestMatchers}
import org.scalatestplus.selenium.WebBrowser

import scala.concurrent.duration._
import scala.language.{implicitConversions, postfixOps}

abstract class Page[Url <: BaseUrl] extends ScalatestMatchers with Eventually with AcceptanceSpecPatience {

  val path:  Path
  val title: Title
  def pageReadyElement(implicit webDriver: WebDriver): Option[WebElement]
  def url(implicit baseUrl:                Url): String = s"$baseUrl$path"

  protected implicit def toWebElement(element: WebBrowser.Element): WebElement =
    element.underlying
  protected implicit def toMaybeWebElement(maybeElement: Option[WebBrowser.Element]): Option[WebElement] =
    maybeElement.map(_.underlying)

  protected implicit class ElementOps(element: WebBrowser.Element) {

    def parent: WebElement = element.findElement(By.xpath("./.."))

    def enterValue(value: String): Unit = value foreach { char =>
      element.sendKeys(char.toString) sleep (100 millis)
    }
  }

  protected implicit class WebElementOps(element: WebElement) {

    def enterValue(value: String): Unit = value foreach { char =>
      element.sendKeys(char.toString) sleep (100 millis)
    }
  }

  object sleep {
    def apply(duration: Duration): Unit = Page.SleepThread(duration)
  }

  protected implicit class OperationOps(unit: Unit) {
    def sleep(duration: Duration): Unit = Page.SleepThread(duration)
  }

  protected def waitUpTo(duration: Duration): PatienceConfig =
    PatienceConfig(
      // Wait up to 2 minutes for this operation
      timeout  = scaled(Span(AcceptanceSpecPatience.WAIT_SCALE * duration.toSeconds, Seconds)),
      interval = scaled(Span(2, Seconds))
    )
}

object Page {
  type Path  = String Refined StartsWith[W.`"/"`.T]
  type Title = String Refined NonEmpty

  // Use a unique name to avoid problems on case-insensitive and preserving file systems
  object SleepThread {
    def apply(duration: Duration): Unit = Thread sleep duration.toMillis
  }
}

abstract class RenkuPage extends Page[RenkuBaseUrl]

object RenkuPage {
  case class RenkuBaseUrl(value: String Refined Url) extends BaseUrl(value)
} 
Example 56
Source File: AkkaStreamSuite.scala    From bahir   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.akka

import java.util.concurrent.ConcurrentLinkedQueue

import scala.collection.JavaConverters._
import scala.concurrent.Await
import scala.concurrent.duration._

import akka.actor._
import com.typesafe.config.ConfigFactory
import org.scalatest.BeforeAndAfter
import org.scalatest.concurrent.Eventually

import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.streaming.{Milliseconds, StreamingContext}

class AkkaStreamSuite extends SparkFunSuite with Eventually with BeforeAndAfter {

  private var ssc: StreamingContext = _

  private var actorSystem: ActorSystem = _

  after {
    if (ssc != null) {
      ssc.stop()
      ssc = null
    }
    if (actorSystem != null) {
      Await.ready(actorSystem.terminate(), 30.seconds)
      actorSystem = null
    }
  }

  test("actor input stream") {
    val sparkConf = new SparkConf().setMaster("local[4]").setAppName(this.getClass.getSimpleName)
    ssc = new StreamingContext(sparkConf, Milliseconds(500))

    // we set the TCP port to "0" to have the port chosen automatically for the Feeder actor and
    // the Receiver actor will "pick it up" from the Feeder URI when it subscribes to the Feeder
    // actor (http://doc.akka.io/docs/akka/2.3.11/scala/remoting.html)
    val akkaConf = ConfigFactory.parseMap(
      Map(
        "akka.actor.provider" -> "akka.remote.RemoteActorRefProvider",
        "akka.remote.netty.tcp.transport-class" -> "akka.remote.transport.netty.NettyTransport",
        "akka.remote.netty.tcp.port" -> "0").
        asJava)
    actorSystem = ActorSystem("test", akkaConf)
    actorSystem.actorOf(Props(classOf[FeederActor]), "FeederActor")
    val feederUri =
      actorSystem.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress + "/user/FeederActor"

    val actorStream =
      AkkaUtils.createStream[String](ssc, Props(classOf[TestActorReceiver], feederUri),
        "TestActorReceiver")
    val result = new ConcurrentLinkedQueue[String]
    actorStream.foreachRDD { rdd =>
      rdd.collect().foreach(result.add)
    }
    ssc.start()

    eventually(timeout(10.seconds), interval(10.milliseconds)) {
      assert((1 to 10).map(_.toString) === result.asScala.toList)
    }
  }
}

case class SubscribeReceiver(receiverActor: ActorRef)

class FeederActor extends Actor {

  def receive: Receive = {
    case SubscribeReceiver(receiverActor: ActorRef) =>
      (1 to 10).foreach(i => receiverActor ! i.toString())
  }
}

class TestActorReceiver(uriOfPublisher: String) extends ActorReceiver {

  lazy private val remotePublisher = context.actorSelection(uriOfPublisher)

  override def preStart(): Unit = {
    remotePublisher ! SubscribeReceiver(self)
  }

  def receive: PartialFunction[Any, Unit] = {
    case msg: String => store(msg)
  }

} 
Example 57
Source File: TwitterStreamSuite.scala    From bahir   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.twitter

import java.util.UUID

import scala.collection.mutable

import org.scalatest.BeforeAndAfter
import org.scalatest.concurrent.Eventually
import org.scalatest.time
import org.scalatest.time.Span
import twitter4j.{FilterQuery, Status, TwitterFactory}
import twitter4j.auth.{Authorization, NullAuthorization}

import org.apache.spark.ConditionalSparkFunSuite
import org.apache.spark.internal.Logging
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.dstream.ReceiverInputDStream

class TwitterStreamSuite extends ConditionalSparkFunSuite
    with Eventually with BeforeAndAfter with Logging {
  def shouldRunTest(): Boolean = sys.env.get("ENABLE_TWITTER_TESTS").contains("1")

  var ssc: StreamingContext = _

  before {
    ssc = new StreamingContext("local[2]", this.getClass.getSimpleName, Seconds(1))
  }

  after {
    if (ssc != null) {
      ssc.stop()
    }
  }

  test("twitter input stream") {
    val filters = Seq("filter1", "filter2")
    val query = new FilterQuery().language("fr,es")
    val authorization: Authorization = NullAuthorization.getInstance()

    // tests the API, does not actually test data receiving
    val test1: ReceiverInputDStream[Status] = TwitterUtils.createStream(ssc, None)
    val test2: ReceiverInputDStream[Status] =
      TwitterUtils.createStream(ssc, None, filters)
    val test3: ReceiverInputDStream[Status] =
      TwitterUtils.createStream(ssc, None, filters, StorageLevel.MEMORY_AND_DISK_SER_2)
    val test4: ReceiverInputDStream[Status] =
      TwitterUtils.createStream(ssc, Some(authorization))
    val test5: ReceiverInputDStream[Status] =
      TwitterUtils.createStream(ssc, Some(authorization), filters)
    val test6: ReceiverInputDStream[Status] = TwitterUtils.createStream(
      ssc, Some(authorization), filters, StorageLevel.MEMORY_AND_DISK_SER_2)
    val test7: ReceiverInputDStream[Status] = TwitterUtils.createFilteredStream(
      ssc, Some(authorization), Some(query), StorageLevel.MEMORY_AND_DISK_SER_2)
  }

  testIf("messages received", () => TwitterStreamSuite.this.shouldRunTest()) {
    val userId = TwitterFactory.getSingleton.updateStatus(
      UUID.randomUUID().toString
    ).getUser.getId

    val receiveStream = TwitterUtils.createFilteredStream(
      ssc, None, Some(new FilterQuery().follow(userId))
    )
    @volatile var receivedMessages: mutable.Set[Status] = mutable.Set()
    receiveStream.foreachRDD { rdd =>
      for (element <- rdd.collect()) {
        receivedMessages += element
      }
      receivedMessages
    }
    ssc.start()

    val nbOfMsg = 2
    var publishedMessages: List[String] = List()

    (1 to nbOfMsg).foreach(
      _ => {
        publishedMessages = UUID.randomUUID().toString :: publishedMessages
      }
    )

    eventually(timeout(Span(15, time.Seconds)), interval(Span(1000, time.Millis))) {
      publishedMessages.foreach(
        m => if (!receivedMessages.map(m => m.getText).contains(m.toString)) {
          TwitterFactory.getSingleton.updateStatus(m)
        }
      )
      assert(
        publishedMessages.map(m => m.toString).toSet
          .subsetOf(receivedMessages.map(m => m.getText))
      )
    }
  }
} 
Example 58
Source File: ProjectionsSpec.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.sourcing.projections

import akka.actor.ActorSystem
import akka.persistence.query.Offset
import akka.stream.scaladsl.Source
import akka.testkit.{TestKit, TestKitBase}
import cats.effect.{ContextShift, IO}
import ch.epfl.bluebrain.nexus.sourcing.projections.Fixture.memoize
import ch.epfl.bluebrain.nexus.sourcing.projections.ProjectionProgress._
import ch.epfl.bluebrain.nexus.sourcing.projections.ProjectionsSpec.SomeEvent
import io.circe.generic.auto._
import org.scalatest.concurrent.Eventually
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpecLike
import org.scalatest.{BeforeAndAfterAll, DoNotDiscover}

import scala.concurrent.duration._

//noinspection TypeAnnotation
@DoNotDiscover
class ProjectionsSpec
    extends TestKitBase
    with AnyWordSpecLike
    with Matchers
    with TestHelpers
    with IOValues
    with Eventually
    with BeforeAndAfterAll {

  implicit override lazy val system: ActorSystem      = SystemBuilder.persistence("ProjectionsSpec")
  implicit private val contextShift: ContextShift[IO] = IO.contextShift(system.dispatcher)

  override protected def afterAll(): Unit = {
    TestKit.shutdownActorSystem(system)
    super.afterAll()
  }

  "A Projection" should {
    val id            = genString()
    val persistenceId = s"/some/${genString()}"
    val projections   = memoize(Projections[IO, SomeEvent]).unsafeRunSync()
    val progress      = OffsetProgress(Offset.sequence(42), 42, 42, 0)

    "store progress" in {
      projections.ioValue.recordProgress(id, progress).ioValue
    }

    "retrieve stored progress" in {
      projections.ioValue.progress(id).ioValue shouldEqual progress
    }

    "retrieve NoProgress for unknown projections" in {
      projections.ioValue.progress(genString()).ioValue shouldEqual NoProgress
    }

    val firstOffset: Offset  = Offset.sequence(42)
    val secondOffset: Offset = Offset.sequence(98)
    val firstEvent           = SomeEvent(1L, "description")
    val secondEvent          = SomeEvent(2L, "description2")

    "store an event" in {
      projections.ioValue.recordFailure(id, persistenceId, 1L, firstOffset, firstEvent).ioValue
    }

    "store another event" in {
      projections.ioValue.recordFailure(id, persistenceId, 2L, secondOffset, secondEvent).ioValue
    }

    "retrieve stored events" in {
      val expected = Seq((firstEvent, firstOffset), (secondEvent, secondOffset))
      eventually {
        logOf(projections.ioValue.failures(id)) should contain theSameElementsInOrderAs expected
      }
    }

    "retrieve empty list of events for unknown failure log" in {
      eventually {
        logOf(projections.ioValue.failures(genString())) shouldBe empty
      }
    }

  }

  private def logOf(source: Source[(SomeEvent, Offset), _]): Vector[(SomeEvent, Offset)] = {
    val f = source.runFold(Vector.empty[(SomeEvent, Offset)])(_ :+ _)
    IO.fromFuture(IO(f)).ioValue
  }

  implicit override def patienceConfig: PatienceConfig =
    PatienceConfig(30.seconds, 50.milliseconds)
}

object ProjectionsSpec {
  final case class SomeEvent(rev: Long, description: String)
} 
Example 59
Source File: ArchiveCacheSpec.scala    From nexus   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.kg.archives

import java.time.{Clock, Instant, ZoneId}

import cats.effect.{IO, Timer}
import ch.epfl.bluebrain.nexus.admin.client.types.Project
import ch.epfl.bluebrain.nexus.commons.test.ActorSystemFixture
import ch.epfl.bluebrain.nexus.commons.test.io.IOOptionValues
import ch.epfl.bluebrain.nexus.iam.types.Identity.Anonymous
import ch.epfl.bluebrain.nexus.kg.TestHelper
import ch.epfl.bluebrain.nexus.kg.archives.Archive.{File, Resource, ResourceDescription}
import ch.epfl.bluebrain.nexus.kg.resources.Id
import ch.epfl.bluebrain.nexus.kg.resources.syntax._
import ch.epfl.bluebrain.nexus.service.config.Settings
import org.scalatest.concurrent.Eventually
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpecLike

import scala.concurrent.duration._

class ArchiveCacheSpec
    extends ActorSystemFixture("ArchiveCacheSpec", true)
    with TestHelper
    with AnyWordSpecLike
    with Matchers
    with IOOptionValues
    with Eventually {

  implicit override def patienceConfig: PatienceConfig = PatienceConfig(10.second, 50.milliseconds)

  private val appConfig                 = Settings(system).serviceConfig
  implicit private val config           =
    appConfig.copy(kg =
      appConfig.kg.copy(archives = appConfig.kg.archives.copy(cacheInvalidateAfter = 500.millis, maxResources = 100))
    )
  implicit private val timer: Timer[IO] = IO.timer(system.dispatcher)
  implicit private val archivesCfg      = config.kg.archives

  private val cache: ArchiveCache[IO] = ArchiveCache[IO].unsafeToFuture().futureValue
  implicit private val clock          = Clock.fixed(Instant.EPOCH, ZoneId.systemDefault())
  private val instant                 = clock.instant()

  def randomProject() = {
    val instant = Instant.EPOCH
    // format: off
    Project(genIri, genString(), genString(), None, genIri, genIri, Map.empty, genUUID, genUUID, 1L, false, instant, genIri, instant, genIri)
    // format: on
  }

  "An archive cache" should {

    "write and read an Archive" in {
      val resId     = Id(randomProject().ref, genIri)
      val resource1 = Resource(genIri, randomProject(), None, None, originalSource = true, None)
      val file1     = File(genIri, randomProject(), None, None, None)
      val archive   = Archive(resId, instant, Anonymous, Set(resource1, file1))
      val _         = cache.put(archive).value.some
      cache.get(archive.resId).value.some shouldEqual archive
    }

    "read a non existing resource" in {
      val resId = Id(randomProject().ref, genIri)
      cache.get(resId).value.ioValue shouldEqual None
    }

    "read after timeout" in {
      val resId   = Id(randomProject().ref, genIri)
      val set     = Set[ResourceDescription](Resource(genIri, randomProject(), None, None, originalSource = true, None))
      val archive = Archive(resId, instant, Anonymous, set)
      val _       = cache.put(archive).value.some
      val time    = System.currentTimeMillis()
      cache.get(resId).value.some shouldEqual archive
      eventually {
        cache.get(resId).value.ioValue shouldEqual None
      }
      val diff    = System.currentTimeMillis() - time
      diff should be > config.kg.archives.cacheInvalidateAfter.toMillis
      diff should be < config.kg.archives.cacheInvalidateAfter.toMillis + 300
    }
  }
} 
Example 60
Source File: TestSpec.scala    From study-category-theory   with Apache License 2.0 5 votes vote down vote up
package com.github.dnvriend

import akka.actor.ActorSystem
import akka.stream.Materializer
import akka.util.Timeout
import org.scalatest._
import org.scalatest.concurrent.{ Eventually, ScalaFutures }
import org.scalatestplus.play.guice.GuiceOneServerPerSuite
import play.api.inject.BindingKey
import play.api.libs.json.{ JsValue, Json, Writes }
import play.api.test.WsTestClient

import scala.concurrent.duration._
import scala.concurrent.{ ExecutionContext, Future }
import scala.reflect.ClassTag
import scala.util.Try

object Person {
  implicit val format = Json.format[Person]
  implicit class ValueObjectOps(val self: Person) {
    def toJson: JsValue = Json.toJson(self)
  }
  implicit class IterableOps(val self: Iterable[Person]) {
    def toJson: JsValue = Json.toJson(self)
  }
}
final case class Person(firstName: String, age: Int)

class TestSpec extends FlatSpec
    with Matchers
    with GivenWhenThen
    with OptionValues
    with TryValues
    with ScalaFutures
    with WsTestClient
    with BeforeAndAfterAll
    with BeforeAndAfterEach
    with Eventually
    with GuiceOneServerPerSuite {

  def getComponent[A: ClassTag] = app.injector.instanceOf[A]
  def getNamedComponent[A](name: String)(implicit ct: ClassTag[A]): A =
    app.injector.instanceOf[A](BindingKey(ct.runtimeClass.asInstanceOf[Class[A]]).qualifiedWith(name))

  // set the port number of the HTTP server
  override lazy val port: Int = getNamedComponent[Int]("test.port")
  implicit val timeout: Timeout = getComponent[Timeout]
  implicit val pc: PatienceConfig = PatienceConfig(timeout = 30.seconds, interval = 300.millis)
  implicit val system: ActorSystem = getComponent[ActorSystem]
  implicit val ec: ExecutionContext = getComponent[ExecutionContext]
  implicit val mat: Materializer = getComponent[Materializer]

  // ================================== Supporting Operations ====================================
  def id: String = java.util.UUID.randomUUID().toString

  implicit class PimpedFuture[T](self: Future[T]) {
    def toTry: Try[T] = Try(self.futureValue)
  }

  
  final val FirstName: String = "John"
  final val LastName: String = "Doe"

  override protected def beforeEach(): Unit = {
  }
} 
Example 61
Source File: ITTestDynamoDB.scala    From aws-spi-akka-http   with Apache License 2.0 5 votes vote down vote up
package com.github.matsluni.akkahttpspi.dynamodb

import com.github.matsluni.akkahttpspi.{AkkaHttpAsyncHttpService, TestBase}
import org.scalatest.concurrent.{Eventually, Futures, IntegrationPatience}
import org.scalatest.wordspec.AnyWordSpec
import org.scalatest.matchers.should.Matchers
import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient
import software.amazon.awssdk.services.dynamodb.model._
import org.scalatest.concurrent.ScalaFutures._

import scala.compat.java8.FutureConverters._

class ITTestDynamoDB extends AnyWordSpec with Matchers with Futures with Eventually with IntegrationPatience with TestBase {

  def withClient(testCode: DynamoDbAsyncClient => Any): Any = {

    val akkaClient = new AkkaHttpAsyncHttpService().createAsyncHttpClientFactory().build()

    val client = DynamoDbAsyncClient
      .builder()
      .credentialsProvider(credentialProviderChain)
      .region(defaultRegion)
      .httpClient(akkaClient)
      .build()

    try {
      testCode(client)
    }
    finally { // clean up
      akkaClient.close()
      client.close()
    }
  }

  "DynamoDB" should {
    "create a table" in withClient { implicit client =>
      val tableName = s"Movies-${randomIdentifier(5)}"
      val attributes = AttributeDefinition.builder.attributeName("film_id").attributeType(ScalarAttributeType.S).build()
      val keySchema = KeySchemaElement.builder.attributeName("film_id").keyType(KeyType.HASH).build()

      val result = client.createTable(
        CreateTableRequest.builder()
          .tableName(tableName)
          .attributeDefinitions(attributes)
          .keySchema(keySchema)
          .provisionedThroughput(ProvisionedThroughput
                                  .builder
                                  .readCapacityUnits(1L)
                                  .writeCapacityUnits(1L)
                                  .build())
          .build()).join

      val desc = result.tableDescription()
      desc.tableName() should be (tableName)

      eventually {
        val response = client.describeTable(DescribeTableRequest.builder().tableName(tableName).build()).toScala
        response.futureValue.table().tableStatus() should be (TableStatus.ACTIVE)
      }
      client.deleteTable(DeleteTableRequest.builder().tableName(tableName).build()).toScala

    }
  }

} 
Example 62
Source File: BaseAwsClientTest.scala    From aws-spi-akka-http   with Apache License 2.0 5 votes vote down vote up
package com.github.matsluni.akkahttpspi

import java.net.URI

import com.dimafeng.testcontainers.{ForAllTestContainer, GenericContainer}
import com.github.matsluni.akkahttpspi.testcontainers.LocalStackReadyLogWaitStrategy
import org.scalatest.concurrent.{Eventually, Futures, IntegrationPatience}
import org.scalatest.BeforeAndAfter
import software.amazon.awssdk.core.SdkClient
import software.amazon.awssdk.regions.Region

import scala.util.Random
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec

trait BaseAwsClientTest[C <: SdkClient]
  extends AnyWordSpec
    with Matchers
    with Futures
    with Eventually
    with BeforeAndAfter
    with IntegrationPatience
    with ForAllTestContainer {

  lazy val defaultRegion: Region = Region.EU_WEST_1

  def client: C
  def exposedServicePort: Int
  val container: GenericContainer

  def endpoint = new URI(s"http://localhost:${container.mappedPort(exposedServicePort)}")
  def randomIdentifier(length: Int): String = Random.alphanumeric.take(length).mkString
}

trait LocalstackBaseAwsClientTest[C <: SdkClient] extends BaseAwsClientTest[C] {
  def service: String

  lazy val exposedServicePort: Int = LocalstackServicePorts.services(service)

  override lazy val container: GenericContainer =
    new GenericContainer(
      dockerImage = "localstack/localstack",
      exposedPorts = Seq(exposedServicePort),
      env = Map("SERVICES" -> service),
      waitStrategy = Some(LocalStackReadyLogWaitStrategy)
    )
}

object LocalstackServicePorts {
  //services and ports based on https://github.com/localstack/localstack
  val services: Map[String, Int] = Map(
    "s3" -> 4572,
    "sqs" -> 4576,
    "sns" -> 4575,
    "dynamodb" -> 4569
  )
} 
Example 63
Source File: BulkIndexerActorTest.scala    From elasticsearch-client   with Apache License 2.0 5 votes vote down vote up
package com.sumologic.elasticsearch.akkahelpers

import java.util.concurrent.TimeUnit

import akka.actor.{ActorSystem, Terminated}
import akka.testkit.{ImplicitSender, TestActorRef, TestKit}
import com.sumologic.elasticsearch.akkahelpers.BulkIndexerActor.{BulkSession, CreateRequest, DocumentIndexed, ForceFlush}
import com.sumologic.elasticsearch.restlastic.{RestlasticSearchClient, RestlasticSearchClient6}
import com.sumologic.elasticsearch.restlastic.RestlasticSearchClient.ReturnTypes.BulkItem
import com.sumologic.elasticsearch.restlastic.dsl.Dsl._
import org.junit.runner.RunWith
import org.mockito.ArgumentMatchers._
import org.mockito.Matchers._
import org.mockito.Mockito._
import org.scalatest._
import org.scalatest.concurrent.Eventually
import org.scalatest.mock.MockitoSugar
import org.scalatestplus.junit.JUnitRunner

import scala.concurrent.duration._
import scala.concurrent.{Await, Future}
import scala.concurrent.duration.Duration

@RunWith(classOf[JUnitRunner])
class BulkIndexerActorTest extends TestKit(ActorSystem("TestSystem")) with WordSpecLike with Matchers
with BeforeAndAfterAll with BeforeAndAfterEach with MockitoSugar with ImplicitSender with Eventually {

  val executionContext = scala.concurrent.ExecutionContext.Implicits.global
  var indexerActor: TestActorRef[BulkIndexerActor] = _
  var mockEs = mock[RestlasticSearchClient]
  var flushTimeoutMs = 100000L
  var maxMessages = 100000

  override def beforeEach(): Unit = {
    mockEs = mock[RestlasticSearchClient]
    when(mockEs.indexExecutionCtx).thenReturn(executionContext)
    def timeout() = Duration(flushTimeoutMs, TimeUnit.MILLISECONDS)
    def max() = maxMessages
    val config = BulkConfig(timeout, max)
    indexerActor = TestActorRef[BulkIndexerActor](BulkIndexerActor.props(mockEs, config))

  }

  override def afterAll(): Unit = {
    val terminationFuture: Future[Terminated] = system.terminate()
    Await.result(terminationFuture, 5.seconds)
  }

  "BulkIndexerActor" should {
    "flush every message when set to 1" in {
      maxMessages = 1
      when(mockEs.bulkIndex(any())).thenReturn(Future.successful(Seq(BulkItem("index","type", "_id", 201, None))))
      val sess = BulkSession.create()
      indexerActor ! CreateRequest(sess, Index("i"), Type("tpe"), Document("id", Map("k" -> "v")))
      eventually {
        mockEs.bulkIndex(any())
      }
      val msg = expectMsgType[DocumentIndexed]
      msg.sessionId should be(sess)
    }

    "not flush when set to 2" in {
      maxMessages = 2
      indexerActor ! CreateRequest(BulkSession.create(), Index("i"), Type("tpe"), Document("id", Map("k" -> "v")))
      verify(mockEs, times(0)).bulkIndex(any())
    }

    "not flush when there are no messages" in {
      indexerActor ! ForceFlush
      verify(mockEs, times(0)).bulkIndex(any())
    }
  }


} 
Example 64
Source File: PrintTest.scala    From ncdbg   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package com.programmaticallyspeaking.ncd.nashorn

import com.programmaticallyspeaking.ncd.host.{HitBreakpoint, PrintMessage, ScriptEvent, StackFrame}
import com.programmaticallyspeaking.ncd.messaging.Observer
import com.programmaticallyspeaking.ncd.testing.UnitTest
import org.scalatest.concurrent.Eventually

import scala.concurrent.{ExecutionContext, Promise}

trait PrintTestFixture extends NashornScriptHostTestFixture with Eventually with FairAmountOfPatience {
  override implicit val executionContext: ExecutionContext = ExecutionContext.global

  def runScriptAndCollectEvents(script: String)(handler: Seq[PrintMessage] => Unit) = {
    var events = Seq.empty[PrintMessage]
    val obs = Observer.from[ScriptEvent] {
      case ev: PrintMessage => events :+= ev
    }

    observeAndRunScriptSync(script, obs) { host =>
      eventually {
        assert(events.nonEmpty)
      }
      handler(events)
    }
  }

  def runScriptAndCollectEventsWhilePaused(code: String)(handler: Seq[PrintMessage] => Unit) = {
    var events = Seq.empty[PrintMessage]
    val stackframesPromise = Promise[Seq[StackFrame]]()
    val obs = Observer.from[ScriptEvent] {
      case ev: PrintMessage => events :+= ev
      case bp: HitBreakpoint => stackframesPromise.success(bp.stackFrames)
    }

    observeAndRunScriptAsync("debugger;", obs) { host =>
      stackframesPromise.future.map { sf =>

        host.evaluateOnStackFrame(sf.head.id, code)

        handler(events)
      }
    }
  }
}

class PrintTest extends UnitTest with PrintTestFixture {

  "Capture of Nashorn's print extension" - {

    "emits a PrintMessage event" in {
      expectMessage("print('hello world');", "hello world")
    }

    "puts space inbetween arguments" in {
      expectMessage("print('hello', 'world');", "hello world")
    }

    "uses JS stringification" in {
      expectMessage("print({});", "[object Object]")
    }

    "handles null" in {
      expectMessage("print(null, 'foo');", "null foo")
    }

    "emits a PrintMessage even if the no-newline version is used" in {
      useNashornArguments(Seq("print-no-newline"))
      expectMessage("print('hello world');", "hello world")
    }

    "is ignored when the debugger is paused to avoid deadlock" in {
      runScriptAndCollectEventsWhilePaused("print('ignored');") { events =>
        events should be ('empty)
      }
    }
  }

  private def expectMessage(script: String, message: String): Unit = {
    runScriptAndCollectEvents(script) { events =>
      expectMessage(events, message)
    }
  }

  private def expectMessage(events: Seq[PrintMessage], message: String): Unit = {
    val found = events.find(_.message == message)
    found should be ('defined)
  }

} 
Example 65
Source File: MultiThreadingTest.scala    From ncdbg   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package com.programmaticallyspeaking.ncd.nashorn
import java.io.{BufferedReader, InputStreamReader}
import java.util.concurrent.TimeoutException
import java.util.concurrent.atomic.AtomicInteger

import com.programmaticallyspeaking.ncd.host._
import com.programmaticallyspeaking.ncd.messaging.Observer
import com.programmaticallyspeaking.ncd.testing.{SharedInstanceActorTesting, UnitTest}
import jdk.nashorn.api.scripting.NashornScriptEngineFactory
import org.scalatest.concurrent.{Eventually, ScalaFutures}
import org.scalatest.exceptions.TestFailedException
import org.slf4s.Logging

import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContext, Future, Promise}

trait MultiThreadingTestFixture extends UnitTest with Logging with SharedInstanceActorTesting with VirtualMachineLauncher with ScalaFutures with FairAmountOfPatience with Eventually {
  override val scriptExecutor: ScriptExecutorBase = MultiThreadedScriptExecutor
  override implicit val executionContext: ExecutionContext = ExecutionContext.global
}

class MultiThreadingTest extends MultiThreadingTestFixture {
  def location(ln: Int) = ScriptLocation(ln, None)

  "Breakpoint requests from other threads should be ignore in a paused state" in {
    val scriptAddedPromise = Promise[Script]()
    val hitBreakpointPromise = Promise[String]()
    val breakpointCounter = new AtomicInteger()
    val host = getHost
    observeScriptEvents(new Observer[ScriptEvent] {

      override def onNext(item: ScriptEvent): Unit = item match {
        case ScriptAdded(script) =>
          scriptAddedPromise.success(script)
        case hb: HitBreakpoint =>
          breakpointCounter.incrementAndGet()
          hitBreakpointPromise.trySuccess("")
        case _ =>
      }

      override def onError(error: Throwable): Unit = {}

      override def onComplete(): Unit = {}
    })

    whenReady(scriptAddedPromise.future) { script =>
      val scriptLocation = eventually {
        host.getBreakpointLocations(ScriptIdentity.fromId(script.id), location(1), None).headOption.getOrElse(fail(s"No line numbers for script ${script.id}"))
      }
      host.setBreakpoint(ScriptIdentity.fromURL(script.url), scriptLocation, BreakpointOptions.empty)

      try {
        whenReady(hitBreakpointPromise.future) { _ =>
          // Ugly, but wait for a while to see if the counter increases over 1 (which it shouldn't).
          Thread.sleep(200)
          breakpointCounter.get() should be(1)
        }
      } catch {
        case t: TestFailedException if t.getMessage().contains("timeout") =>
          val progress = summarizeProgress()
          throw new TimeoutException("Timed out: " + progress)
      }
    }
  }
}

object MultiThreadedScriptExecutor extends App with ScriptExecutorBase {
  println("MultiThreadedScriptExecutor starting. Java version: " + System.getProperty("java.version"))
  val scriptEngine = new NashornScriptEngineFactory().getScriptEngine("--no-syntax-extensions")
  val reader = new BufferedReader(new InputStreamReader(System.in))
  println(Signals.ready)
  waitForSignal(Signals.go)

  // Used a compiled script here before, stopped working with JDK 10
  var src =
    """(function () {
      |  return Math.floor(5.5);
      |})();
    """.stripMargin

  implicit val ec = ExecutionContext.global

  val futures = (1 to 5).map { _ =>
    Future {
      while (true) {
        scriptEngine.eval(src)
      }
    }
  }

  Await.result(Future.sequence(futures), 30.seconds)
} 
Example 66
Source File: RebuildAllPersisetceIdsSpec.scala    From akka-persistence-cassandra   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.cassandra.reconciler

import akka.persistence.cassandra.CassandraSpec
import akka.stream.scaladsl.Sink
import org.scalatest.concurrent.Eventually

class RebuildAllPersisetceIdsSpec extends CassandraSpec with Eventually {

  "RebuildAllPersisetceIds" should {

    val tag1 = "tag1"
    val pid1 = "pid1"
    val pid2 = "pid2"
    val pid3 = "pid3"
    val pid4 = "pid4"
    val pid5 = "pid5"

    "build from messages table" in {
      writeEventsFor(tag1, pid1, 2)
      writeEventsFor(tag1, pid2, 1)
      writeEventsFor(tag1, pid3, 5)

      val reconciliation = new Reconciliation(system)
      reconciliation.rebuildAllPersistenceIds().futureValue

      queries
        .currentPersistenceIds()
        .runWith(Sink.seq)
        .futureValue
        .toSet
        .filterNot(_.startsWith("persistenceInit")) should ===(Set(pid1, pid2, pid3))

      // add some more
      writeEventsFor(tag1, pid4, 2)
      writeEventsFor(tag1, pid5, 4)

      reconciliation.rebuildAllPersistenceIds().futureValue

      queries
        .currentPersistenceIds()
        .runWith(Sink.seq)
        .futureValue
        .toSet
        .filterNot(_.startsWith("persistenceInit")) should ===(Set(pid1, pid2, pid3, pid4, pid5))
    }
  }
} 
Example 67
Source File: TagQuerySpec.scala    From akka-persistence-cassandra   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.cassandra.reconciler

import akka.persistence.cassandra.CassandraSpec
import akka.stream.scaladsl.Sink
import org.scalatest.concurrent.Eventually

class TagQuerySpec extends CassandraSpec with Eventually {

  private lazy val reconciliation = new Reconciliation(system)

  "Tag querying" should {
    "return distinct tags for all tags" in {
      val pid1 = "pid1"
      val pid2 = "pid2"
      val tag1 = "tag1"
      val tag2 = "tag2"
      val tag3 = "tag3"
      reconciliation.allTags().runWith(Sink.seq).futureValue shouldEqual Nil
      writeEventsFor(Set(tag1, tag2), pid1, 3)
      writeEventsFor(Set(tag2, tag3), pid2, 3)
      eventually {
        val allTags = reconciliation.allTags().runWith(Sink.seq).futureValue
        allTags.size shouldEqual 3
        allTags.toSet shouldEqual Set(tag1, tag2, tag3)
      }
    }

    "return tags only if that pid has used them" in {
      val pid1 = "p11"
      val pid2 = "p12"
      val tag1 = "tag11"
      val tag2 = "tag12"
      val tag3 = "tag13"
      writeEventsFor(tag1, pid1, 3)
      writeEventsFor(Set(tag2, tag3), pid2, 3)
      eventually {
        val tags = reconciliation.tagsForPersistenceId(pid2).futureValue
        tags.size shouldEqual 2
        tags.toSet shouldEqual Set(tag2, tag3)
      }
    }
  }

} 
Example 68
Source File: BuildTagViewForPersistenceIdSpec.scala    From akka-persistence-cassandra   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.cassandra.reconciler

import akka.persistence.cassandra.CassandraSpec
import org.scalatest.concurrent.Eventually

class BuildTagViewForPersisetceIdSpec extends CassandraSpec with Eventually {

  "BuildTagViewForPersistenceId" should {

    val tag1 = "tag1"
    val pid1 = "pid1"
    val pid2 = "pid2"

    "build from scratch" in {
      writeEventsFor(tag1, pid1, 2)
      writeEventsFor(tag1, pid2, 1)
      eventually {
        expectEventsForTag(tag1, "pid1 event-1", "pid1 event-2", "pid2 event-1")
      }
      val reconciliation = new Reconciliation(system)
      reconciliation.truncateTagView().futureValue
      expectEventsForTag(tag1)
      reconciliation.rebuildTagViewForPersistenceIds(pid1).futureValue
      eventually {
        expectEventsForTag(tag1, "pid1 event-1", "pid1 event-2")
      }
      reconciliation.rebuildTagViewForPersistenceIds(pid2).futureValue
      eventually {
        expectEventsForTag(tag1, "pid1 event-1", "pid1 event-2", "pid2 event-1")
      }
    }
  }
} 
Example 69
Source File: TruncateAllSpec.scala    From akka-persistence-cassandra   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.cassandra.reconciler

import akka.persistence.cassandra.CassandraSpec
import org.scalatest.concurrent.Eventually
import akka.persistence.cassandra.TestTaggingActor

class TruncateAllSpec extends CassandraSpec with Eventually {

  val pid1 = "pid1"
  val pid2 = "pid2"
  val tag1 = "tag1"
  val tag2 = "tag2"
  val tag3 = "tag3"

  "Truncate " should {
    "remove all tags from the table" in {
      writeEventsFor(tag1, pid1, 2)
      writeEventsFor(Set(tag1, tag2, tag3), pid2, 2)

      expectEventsForTag(tag1, "pid1 event-1", "pid1 event-2", "pid2 event-1", "pid2 event-2")
      expectEventsForTag(tag2, "pid2 event-1", "pid2 event-2")
      expectEventsForTag(tag3, "pid2 event-1", "pid2 event-2")

      val reconciliation = new Reconciliation(system)
      reconciliation.truncateTagView().futureValue

      eventually {
        expectEventsForTag(tag1)
        expectEventsForTag(tag2)
        expectEventsForTag(tag3)
      }
    }

    "recover if actors are started again" in {
      system.actorOf(TestTaggingActor.props(pid1))
      system.actorOf(TestTaggingActor.props(pid2))
      eventually {
        expectEventsForTag(tag1, "pid1 event-1", "pid1 event-2", "pid2 event-1", "pid2 event-2")
        expectEventsForTag(tag2, "pid2 event-1", "pid2 event-2")
        expectEventsForTag(tag3, "pid2 event-1", "pid2 event-2")
      }

    }
  }

} 
Example 70
Source File: TokenSpec.scala    From lucene4s   with MIT License 5 votes vote down vote up
package tests

import java.util.UUID

import org.scalatest.concurrent.Eventually
import org.scalatest.{Matchers, WordSpec}

import scala.util.Try
import com.outr.lucene4s._
import com.outr.lucene4s.field.{Field, FieldType}

class TokenSpec extends WordSpec with Matchers with Eventually {
  val lucene: Lucene = new DirectLucene(uniqueFields = List.empty, defaultFullTextSearchable = true, autoCommit = true)

  val sessionTokenField: Field[String] = lucene.create.field[String](name = "sessionTokenField", FieldType.Untokenized)
  val sessionEmailField: Field[String] = lucene.create.field[String](name = "sessionEmailField")
  val sessionExpireField: Field[Long] = lucene.create.field[Long](name = "sessionExpireField")

  "generate session tokens for emails, expire tokens and assert that expire time is set" in {
    val userEmails = (1 to 10) map (i => s"[email protected]")

    // Generate UUID -> e-mail tuples and index them
    val tokensAndEmails: Seq[(String, String)] = userEmails map { email =>
      val token = UUID.randomUUID().toString.replaceAllLiterally("-", "")
      lucene.doc().fields(sessionTokenField(token), sessionEmailField(email)).index()
      (token, email)
    }

    // Update all the tokens to be expired
    tokensAndEmails foreach {
      case (token, email) => lucene
        .update(sessionTokenField(token))
        .fields(
          sessionTokenField(token),
          sessionEmailField(email),
          sessionExpireField(System.currentTimeMillis())
        ).index()
    }
    lucene.commit()

    // Query each token and verify values are correct
    tokensAndEmails foreach {
      case (token, email) => {
//        val searchTerm = sessionTokenField(token.replaceAllLiterally("-", ""))
        val searchTerm = parse(sessionTokenField, token.replaceAllLiterally("-", ""))
        val results = lucene.query().filter(searchTerm).search().results
        results should have size 1
        val headResult = results.head

        headResult(sessionTokenField) shouldBe token
        headResult(sessionEmailField) shouldBe email
        Try(headResult(sessionExpireField)).toOption shouldBe defined
      }
    }
  }
} 
Example 71
Source File: TestAggregateSpec.scala    From akka-cqrs   with Apache License 2.0 5 votes vote down vote up
package com.productfoundry.akka.cluster

import akka.actor.Props
import com.productfoundry.akka.PassivationConfig
import com.productfoundry.akka.cqrs.{AggregateStatus, AggregateFactory, AggregateIdResolution, EntityIdResolution}
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{Seconds, Millis, Span}
import test.support.ClusterSpec
import test.support.ClusterConfig._

class TestAggregateSpecMultiJvmNode1 extends TestAggregateSpec
class TestAggregateSpecMultiJvmNode2 extends TestAggregateSpec

object TestActorFactory extends AggregateFactory[TestAggregate] {
  override def props(config: PassivationConfig): Props = {
    Props(classOf[TestAggregate], config)
  }
}

class TestAggregateSpec  extends ClusterSpec with Eventually {

  implicit def entityIdResolution: EntityIdResolution[TestAggregate] = new AggregateIdResolution[TestAggregate]()

  implicit def aggregateFactory: AggregateFactory[TestAggregate] = TestActorFactory

  implicit override val patienceConfig = PatienceConfig(
    timeout = scaled(Span(5, Seconds)),
    interval = scaled(Span(100, Millis))
  )

  "Test aggregate" must {

    "given cluster joined" in {
      setupSharedJournal()
      joinCluster()
    }

    enterBarrier("when")

    val entityContext = new ClusterSingletonEntityContext(system)

    "send all commands to same aggregate" in {

      def test(): Unit = {
        val aggregate = entityContext.entitySupervisorFactory[TestAggregate].getOrCreate
        val id = TestId("1")

        aggregate ! Count(id)
        expectMsgType[AggregateStatus.Success]

        eventually {
          aggregate ! GetCount(id)
          expectMsgType[GetCountResult].count shouldBe 2
        }
      }

      on(node1)(test())

      on(node2)(test())
    }
  }
} 
Example 72
Source File: EntitySupport.scala    From akka-cqrs   with Apache License 2.0 5 votes vote down vote up
package com.productfoundry.akka.cqrs

import akka.actor.{ActorRef, ActorSystem, PoisonPill, Terminated}
import akka.testkit.{ImplicitSender, TestKit}
import akka.util.Timeout
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{Millis, Second, Span}
import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll, Matchers, WordSpecLike}

import scala.concurrent.duration._

abstract class EntitySupport(_system: ActorSystem)
  extends TestKit(_system)
  with ImplicitSender
  with WordSpecLike
  with Matchers
  with BeforeAndAfterAll
  with BeforeAndAfter
  with Eventually {

  
  override def afterAll(): Unit = {
    TestKit.shutdownActorSystem(system)
  }
} 
Example 73
Source File: PersistenceTestSupport.scala    From akka-cqrs   with Apache License 2.0 5 votes vote down vote up
package com.productfoundry.support

import java.util.UUID

import akka.testkit.{ImplicitSender, TestKit}
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{Millis, Span}
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike}

abstract class PersistenceTestSupport
  extends TestKit(TestConfig.testSystem)
  with ImplicitSender
  with WordSpecLike
  with Matchers
  with BeforeAndAfterAll
  with Eventually {

  def randomPersistenceId = UUID.randomUUID.toString

  implicit override val patienceConfig = PatienceConfig(
    timeout = scaled(Span(500, Millis)),
    interval = scaled(Span(10, Millis))
  )

  override protected def afterAll(): Unit = {
    TestKit.shutdownActorSystem(system)
  }
} 
Example 74
Source File: KafkaStreamSuite.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.kafka

import scala.collection.mutable
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.util.Random

import kafka.serializer.StringDecoder
import org.scalatest.BeforeAndAfterAll
import org.scalatest.concurrent.Eventually

import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.{Milliseconds, StreamingContext}

class KafkaStreamSuite extends SparkFunSuite with Eventually with BeforeAndAfterAll {
  private var ssc: StreamingContext = _
  private var kafkaTestUtils: KafkaTestUtils = _

  override def beforeAll(): Unit = {
    kafkaTestUtils = new KafkaTestUtils
    kafkaTestUtils.setup()
  }

  override def afterAll(): Unit = {
    if (ssc != null) {
      ssc.stop()
      ssc = null
    }

    if (kafkaTestUtils != null) {
      kafkaTestUtils.teardown()
      kafkaTestUtils = null
    }
  }

  test("Kafka input stream") {
    val sparkConf = new SparkConf().setMaster("local[4]").setAppName(this.getClass.getSimpleName)
    ssc = new StreamingContext(sparkConf, Milliseconds(500))
    val topic = "topic1"
    val sent = Map("a" -> 5, "b" -> 3, "c" -> 10)
    kafkaTestUtils.createTopic(topic)
    kafkaTestUtils.sendMessages(topic, sent)

    val kafkaParams = Map("zookeeper.connect" -> kafkaTestUtils.zkAddress,
      "group.id" -> s"test-consumer-${Random.nextInt(10000)}",
      "auto.offset.reset" -> "smallest")

    val stream = KafkaUtils.createStream[String, String, StringDecoder, StringDecoder](
      ssc, kafkaParams, Map(topic -> 1), StorageLevel.MEMORY_ONLY)
    val result = new mutable.HashMap[String, Long]()
    stream.map(_._2).countByValue().foreachRDD { r =>
      r.collect().foreach { kv =>
        result.synchronized {
          val count = result.getOrElseUpdate(kv._1, 0) + kv._2
          result.put(kv._1, count)
        }
      }
    }

    ssc.start()

    eventually(timeout(10000 milliseconds), interval(100 milliseconds)) {
      assert(result.synchronized { sent === result })
    }
  }
} 
Example 75
Source File: SpecBase.scala    From kafka-lag-exporter   with Apache License 2.0 5 votes vote down vote up
package com.lightbend.kafkalagexporter.integration

import akka.actor.typed.ActorSystem
import akka.kafka.testkit.scaladsl.{EmbeddedKafkaLike, ScalatestKafkaSpec}
import com.lightbend.kafkalagexporter.MainApp
import com.lightbend.kafkalagexporter.KafkaClusterManager
import com.typesafe.config.{Config, ConfigFactory}
import net.manub.embeddedkafka.EmbeddedKafkaConfig
import org.scalatest.concurrent.{Eventually, ScalaFutures}
import org.scalatest.{BeforeAndAfterEach, Matchers, WordSpecLike}

import scala.concurrent.Await
import scala.concurrent.duration._

abstract class SpecBase(kafkaPort: Int, val exporterPort: Int)
  extends ScalatestKafkaSpec(kafkaPort)
    with WordSpecLike
    with BeforeAndAfterEach
    with EmbeddedKafkaLike
    with Matchers
    with ScalaFutures
    with Eventually
    with PrometheusUtils
    with LagSim {

  override def createKafkaConfig: EmbeddedKafkaConfig =
    EmbeddedKafkaConfig(kafkaPort,
      zooKeeperPort,
      Map(
        "offsets.topic.replication.factor" -> "1"
      ))

  var kafkaLagExporter: ActorSystem[KafkaClusterManager.Message] = _

  val clusterName = "default"

  val config: Config = ConfigFactory.parseString(s"""
                                            |kafka-lag-exporter {
                                            |  port: $exporterPort
                                            |  clusters = [
                                            |    {
                                            |      name: "$clusterName"
                                            |      bootstrap-brokers: "localhost:$kafkaPort"
                                            |    }
                                            |  ]
                                            |  poll-interval = 5 seconds
                                            |  lookup-table-size = 20
                                            |}""".stripMargin).withFallback(ConfigFactory.load())

  override def beforeEach(): Unit = {
    kafkaLagExporter = MainApp.start(config)
  }

  override def afterEach(): Unit = {
    kafkaLagExporter ! KafkaClusterManager.Stop
    Await.result(kafkaLagExporter.whenTerminated, 10 seconds)
  }
} 
Example 76
Source File: KafkaStreamSuite.scala    From iolap   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.kafka

import scala.collection.mutable
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.util.Random

import kafka.serializer.StringDecoder
import org.scalatest.BeforeAndAfterAll
import org.scalatest.concurrent.Eventually

import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.{Milliseconds, StreamingContext}

class KafkaStreamSuite extends SparkFunSuite with Eventually with BeforeAndAfterAll {
  private var ssc: StreamingContext = _
  private var kafkaTestUtils: KafkaTestUtils = _

  override def beforeAll(): Unit = {
    kafkaTestUtils = new KafkaTestUtils
    kafkaTestUtils.setup()
  }

  override def afterAll(): Unit = {
    if (ssc != null) {
      ssc.stop()
      ssc = null
    }

    if (kafkaTestUtils != null) {
      kafkaTestUtils.teardown()
      kafkaTestUtils = null
    }
  }

  test("Kafka input stream") {
    val sparkConf = new SparkConf().setMaster("local[4]").setAppName(this.getClass.getSimpleName)
    ssc = new StreamingContext(sparkConf, Milliseconds(500))
    val topic = "topic1"
    val sent = Map("a" -> 5, "b" -> 3, "c" -> 10)
    kafkaTestUtils.createTopic(topic)
    kafkaTestUtils.sendMessages(topic, sent)

    val kafkaParams = Map("zookeeper.connect" -> kafkaTestUtils.zkAddress,
      "group.id" -> s"test-consumer-${Random.nextInt(10000)}",
      "auto.offset.reset" -> "smallest")

    val stream = KafkaUtils.createStream[String, String, StringDecoder, StringDecoder](
      ssc, kafkaParams, Map(topic -> 1), StorageLevel.MEMORY_ONLY)
    val result = new mutable.HashMap[String, Long]() with mutable.SynchronizedMap[String, Long]
    stream.map(_._2).countByValue().foreachRDD { r =>
      val ret = r.collect()
      ret.toMap.foreach { kv =>
        val count = result.getOrElseUpdate(kv._1, 0) + kv._2
        result.put(kv._1, count)
      }
    }

    ssc.start()

    eventually(timeout(10000 milliseconds), interval(100 milliseconds)) {
      assert(sent === result)
    }
  }
} 
Example 77
Source File: MethodEntryListCommandIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.tool.commands

import org.scaladebugger.test.helpers.ParallelMockFunSpec
import org.scalatest.concurrent.Eventually
import test.{ToolConstants, ToolFixtures, ToolTestUtilities}

class MethodEntryListCommandIntegrationSpec extends ParallelMockFunSpec
  with ToolFixtures
  with ToolTestUtilities
  with Eventually
{
  implicit override val patienceConfig = PatienceConfig(
    timeout = scaled(ToolConstants.EventuallyTimeout),
    interval = scaled(ToolConstants.EventuallyInterval)
  )

  describe("MethodEntryListCommand") {
    it("should list pending and active method entry requests") {
      val testClass = "org.scaladebugger.test.methods.MethodEntry"
      val testClassName = "org.scaladebugger.test.methods.MethodEntryTestClass"
      val testMethodName = "testMethod"

      val testFakeClassName = "invalid.class"
      val testFakeMethodName = "fakeMethod"

      // Create method entry request before JVM starts
      val q = "\""
      val virtualTerminal = newVirtualTerminal()

      // Valid, so will be active
      virtualTerminal.newInputLine(s"mentry $q$testClassName$q $q$testMethodName$q")

      // Invalid, so will be inactive
      virtualTerminal.newInputLine(s"mentry $q$testFakeClassName$q $q$testFakeMethodName$q")

      withToolRunningUsingTerminal(
        className = testClass,
        virtualTerminal = virtualTerminal
      ) { (vt, sm, start) =>
        logTimeTaken({
          // Verify our method entry requests were set
          validateNextLine(vt, s"Set method entry for class $testClassName and method $testMethodName\n")
          validateNextLine(vt, s"Set method entry for class $testFakeClassName and method $testFakeMethodName\n")

          // Verify that we have attached to the JVM
          validateNextLine(vt, "Attached with id",
            success = (text, line) => line should startWith(text))

          // Assert that we hit the first methodEntry
          validateNextLine(vt, s"Method entry hit for $testClassName.$testMethodName\n")

          // List all available method entry requests
          vt.newInputLine("mentrylist")

          // First prints out JVM id
          validateNextLine(vt, "JVM",
            success = (text, line) => line should include(text))

          // Verify expected pending and active requests show up
          // by collecting the two available and checking their content
          val lines = Seq(nextLine(vt), nextLine(vt)).flatten
          lines should contain allOf(
            s"$testClassName.$testMethodName (Active)\n",
            s"$testFakeClassName.$testFakeMethodName (Pending)\n"
          )
        })
      }
    }
  }
} 
Example 78
Source File: BreakpointListCommandIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.tool.commands

import java.io.File

import org.scaladebugger.api.utils.JDITools
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import org.scalatest.concurrent.Eventually
import test.{ToolConstants, ToolFixtures, ToolTestUtilities}

class BreakpointListCommandIntegrationSpec extends ParallelMockFunSpec
  with ToolFixtures
  with ToolTestUtilities
  with Eventually
{
  implicit override val patienceConfig = PatienceConfig(
    timeout = scaled(ToolConstants.EventuallyTimeout),
    interval = scaled(ToolConstants.EventuallyInterval)
  )

  describe("BreakpointListCommand") {
    it("should list pending and active breakpoints") {
      val testClass = "org.scaladebugger.test.breakpoints.DelayedInit"
      val testFile = JDITools.scalaClassStringToFileString(testClass)
      val testFileName = new File(testFile).getName

      // Create two breakpoints before connecting to the JVM that are valid
      // and one breakpoint that is not (so always pending)
      val q = "\""
      val virtualTerminal = newVirtualTerminal()
      virtualTerminal.newInputLine(s"bp $q$testFile$q 10")
      virtualTerminal.newInputLine(s"bp $q$testFile$q 11")
      virtualTerminal.newInputLine("bp \"some/file.scala\" 999")

      withToolRunningUsingTerminal(
        className = testClass,
        virtualTerminal = virtualTerminal
      ) { (vt, sm, start) =>
        logTimeTaken({
          // Verify our breakpoints were set
          validateNextLine(vt, s"Set breakpoint at $testFile:10\n")
          validateNextLine(vt, s"Set breakpoint at $testFile:11\n")
          validateNextLine(vt, "Set breakpoint at some/file.scala:999\n")

          // Verify that we have attached to the JVM
          validateNextLine(vt, "Attached with id",
            success = (text, line) => line should startWith(text))

          // Assert that we hit the first breakpoint
          validateNextLine(vt, s"Breakpoint hit at $testFileName:10\n")

          // List all available breakpoints
          vt.newInputLine("bplist")

          // First prints out JVM id
          validateNextLine(vt, "JVM",
            success = (text, line) => line should include(text))

          // Verify expected pending and active breakpoints show up
          // by collecting the three available and checking their content
          val lines = Seq(nextLine(vt), nextLine(vt), nextLine(vt)).flatten
          lines should contain allOf(
            s"$testFile:10 (Active)\n",
            s"$testFile:11 (Active)\n",
            "some/file.scala:999 (Pending)\n"
          )
        })
      }
    }
  }
} 
Example 79
Source File: AttachpCommandIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.tool.commands

import org.scaladebugger.api.utils.JDITools
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import org.scaladebugger.tool.Repl
import org.scalatest.concurrent.Eventually
import test.{ToolConstants, ToolFixtures, ToolTestUtilities}

class AttachpCommandIntegrationSpec extends ParallelMockFunSpec
  with ToolFixtures
  with ToolTestUtilities
  with Eventually
{
  implicit override val patienceConfig = PatienceConfig(
    timeout = scaled(ToolConstants.EventuallyTimeout),
    interval = scaled(ToolConstants.EventuallyInterval)
  )

  describe("AttachpCommand") {
    it("should attach successfully using a pid") {
      val testClass = "org.scaladebugger.test.misc.AttachingMain"
      val testFile = JDITools.scalaClassStringToFileString(testClass)

      withProcessPid(testClass) { (pid) =>
        val terminal = newVirtualTerminal()

        val repl = Repl.newInstance(newTerminal = (_,_) => terminal)

        // Queue up attach action
        terminal.newInputLine(s"attachp $pid")

        // Start processing input
        // TODO: Add repl stop code regardless of test success
        repl.start()

        // Eventually, attach should complete
        logTimeTaken(eventually {
          repl.stateManager.state.activeDebugger should not be None
          repl.stateManager.state.scalaVirtualMachines should not be (empty)
        })

        // Finished
        repl.stop()
      }
    }
  }
} 
Example 80
Source File: WatchListCommandIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.tool.commands

import org.scaladebugger.api.utils.JDITools
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import org.scalatest.concurrent.Eventually
import test.{ToolConstants, ToolFixtures, ToolTestUtilities}

class WatchListCommandIntegrationSpec extends ParallelMockFunSpec
  with ToolFixtures
  with ToolTestUtilities
  with Eventually
{
  implicit override val patienceConfig = PatienceConfig(
    timeout = scaled(ToolConstants.EventuallyTimeout),
    interval = scaled(ToolConstants.EventuallyInterval)
  )

  describe("WatchListCommand") {
    it("should list pending and active watchpoint requests") {
      val testClass = "org.scaladebugger.test.watchpoints.AccessWatchpoint"
      val testFile = JDITools.scalaClassStringToFileString(testClass)

      val className = "org.scaladebugger.test.watchpoints.SomeAccessClass"
      val fieldName = "field"

      val fakeClassName = "invalid.class"
      val fakeFieldName = "fakeField"

      // Create watch request before connecting to the JVM
      val q = "\""
      val virtualTerminal = newVirtualTerminal()
      virtualTerminal.newInputLine(s"watch $q$className$q $q$fieldName$q")
      virtualTerminal.newInputLine(s"watcha $q$className$q $q$fieldName$q")
      virtualTerminal.newInputLine(s"watchm $q$className$q $q$fieldName$q")
      virtualTerminal.newInputLine(s"watch $q$fakeClassName$q $q$fakeFieldName$q")
      virtualTerminal.newInputLine(s"watcha $q$fakeClassName$q $q$fakeFieldName$q")
      virtualTerminal.newInputLine(s"watchm $q$fakeClassName$q $q$fakeFieldName$q")

      withToolRunningUsingTerminal(
        className = testClass,
        virtualTerminal = virtualTerminal
      ) { (vt, sm, start) =>
        logTimeTaken({
          // Verify our watch requests were made
          eventually {
            val svm = sm.state.scalaVirtualMachines.head

            val awrs = svm.accessWatchpointRequests.map(awr =>
              (awr.className, awr.fieldName, awr.isPending))
            awrs should contain theSameElementsAs Seq(
              (className, fieldName, false),
              (className, fieldName, false),
              (fakeClassName, fakeFieldName, true),
              (fakeClassName, fakeFieldName, true)
            )

            val mwrs = svm.modificationWatchpointRequests.map(awr =>
              (awr.className, awr.fieldName, awr.isPending))
            mwrs should contain theSameElementsAs Seq(
              (className, fieldName, false),
              (className, fieldName, false),
              (fakeClassName, fakeFieldName, true),
              (fakeClassName, fakeFieldName, true)
            )
          }

          // List all available watchpoints
          vt.newInputLine("watchlist")

          // First prints out JVM id
          eventually {
            validateNextLine(vt, "JVM",
              success = (text, line) => line should include(text))
          }

          // Verify expected pending and active requests show up
          // by collecting the three available and checking their content
          val lines = Seq(nextLine(vt), nextLine(vt), nextLine(vt), nextLine(vt)).flatten
          lines should contain allOf(
            s"{Class $className}\n",
            s"-> Field '$fieldName' [Access: Active] [Modification: Active]\n",
            s"{Class $fakeClassName}\n",
            s"-> Field '$fakeFieldName' [Access: Pending] [Modification: Pending]\n"
          )
        })
      }
    }
  }
} 
Example 81
Source File: ThreadCommandIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.tool.commands

import java.io.File

import org.scaladebugger.api.profiles.traits.info.ThreadInfo
import org.scaladebugger.api.utils.JDITools
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import org.scalatest.concurrent.Eventually
import test.{ToolConstants, ToolFixtures, ToolTestUtilities}

class ThreadCommandIntegrationSpec extends ParallelMockFunSpec
  with ToolFixtures
  with ToolTestUtilities
  with Eventually
{
  implicit override val patienceConfig = PatienceConfig(
    timeout = scaled(ToolConstants.EventuallyTimeout),
    interval = scaled(ToolConstants.EventuallyInterval)
  )

  describe("ThreadCommand") {
    it("should set the active thread if a name is provided") {
      val testClass = "org.scaladebugger.test.breakpoints.DelayedInit"
      val testFile = JDITools.scalaClassStringToFileString(testClass)
      val testFileName = new File(testFile).getName

      val threadName = "main"

      // Create a breakpoint before connecting to the JVM
      val q = "\""
      val virtualTerminal = newVirtualTerminal()
      virtualTerminal.newInputLine(s"bp $q$testFile$q 10")

      withToolRunningUsingTerminal(
        className = testClass,
        virtualTerminal = virtualTerminal
      ) { (vt, sm, start) =>
        logTimeTaken({
          // Assert that we hit the breakpoint
          eventually {
            validateNextLine(vt, s"Breakpoint hit at $testFileName:10\n")
          }

          // Set active thread
          vt.newInputLine(s"thread $q$threadName$q")

          eventually {
            sm.state.activeThread.get.name should be (threadName)
          }
        })
      }
    }

    it("should clear the active thread if a name is not provided") {
      val testClass = "org.scaladebugger.test.breakpoints.DelayedInit"
      val testFile = JDITools.scalaClassStringToFileString(testClass)
      val testFileName = new File(testFile).getName

      // Create a breakpoint before connecting to the JVM
      val q = "\""
      val virtualTerminal = newVirtualTerminal()
      virtualTerminal.newInputLine(s"bp $q$testFile$q 10")

      withToolRunningUsingTerminal(
        className = testClass,
        virtualTerminal = virtualTerminal
      ) { (vt, sm, start) =>
        logTimeTaken({
          // Assert that we hit the breakpoint
          eventually {
            validateNextLine(vt, s"Breakpoint hit at $testFileName:10\n")
          }

          // Set a fake active thread
          sm.updateActiveThread(mock[ThreadInfo])

          // Clear the active thread
          vt.newInputLine("thread")

          eventually {
            sm.state.activeThread should be (None)
          }
        })
      }
    }
  }
} 
Example 82
Source File: LocalsCommandIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.tool.commands

import java.io.File

import org.scaladebugger.api.utils.JDITools
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import org.scalatest.concurrent.Eventually
import test.{ToolConstants, ToolFixtures, ToolTestUtilities}

class LocalsCommandIntegrationSpec extends ParallelMockFunSpec
  with ToolFixtures
  with ToolTestUtilities
  with Eventually
{
  implicit override val patienceConfig = PatienceConfig(
    timeout = scaled(ToolConstants.EventuallyTimeout),
    interval = scaled(ToolConstants.EventuallyInterval)
  )

  describe("LocalsCommand") {
    it("should be able to list fields and local variables") {
      val testClass = "org.scaladebugger.test.info.Variables"
      val testFile = JDITools.scalaClassStringToFileString(testClass)
      val testFileName = new File(testFile).getName

      val threadName = "main"

      // Create one breakpoint before connecting to the JVM
      val q = "\""
      val virtualTerminal = newVirtualTerminal()
      virtualTerminal.newInputLine(s"bp $q$testFile$q 44")

      withToolRunningUsingTerminal(
        className = testClass,
        virtualTerminal = virtualTerminal
      ) { (vt, sm, start) =>
        logTimeTaken({
          // Assert that we hit the breakpoint
          eventually {
            validateNextLine(vt, s"Breakpoint hit at $testFileName:44\n")
          }

          // Set our active thread to examine values
          vt.newInputLine(s"thread $q$threadName$q")

          // Request list of local variables and fields
          vt.newInputLine("locals")

          // Gather all data (after delay to allow accumulation of all text)
          val prefix = "-> "
          val waitTime = ToolConstants.AccumulationTimeout.millisPart
          val lines = Stream.continually(vt.nextOutputLine(waitTime = waitTime))
            .takeWhile(_.nonEmpty).flatten.map(_.trim).map(_.stripPrefix(prefix))

          // NOTE: Not taking exhaustive list because
          //       a) we are not validating the logic, just the output
          //       b) some output is harder to test, such as objects
          //          with unique ids, without breaking the contain allOf
          lines should contain allOf(
            "[FIELDS]",
            "z1 = 1",
            "z2 = \"something\"",
            "z3 = null",
            "[LOCALS]",
            "a = true",
            "b = 'c'",
            "c = 3",
            "d = 4",
            "e = 5",
            "f = 1.0",
            "g = 2.0"
          )
        })
      }
    }
  }
} 
Example 83
Source File: ThreadGroupListCommandIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.tool.commands

import java.io.File

import org.scaladebugger.api.utils.JDITools
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import org.scalatest.concurrent.Eventually
import test.{ToolConstants, ToolFixtures, ToolTestUtilities}

class ThreadGroupListCommandIntegrationSpec extends ParallelMockFunSpec
  with ToolFixtures
  with ToolTestUtilities
  with Eventually
{
  implicit override val patienceConfig = PatienceConfig(
    timeout = scaled(ToolConstants.EventuallyTimeout),
    interval = scaled(ToolConstants.EventuallyInterval)
  )

  describe("ThreadGroupListCommand") {
    it("should list all thread groups of the active JVMs") {
      val testClass = "org.scaladebugger.test.breakpoints.DelayedInit"
      val testFile = JDITools.scalaClassStringToFileString(testClass)
      val testFileName = new File(testFile).getName

      // Create a breakpoint before connecting to the JVM
      val q = "\""
      val virtualTerminal = newVirtualTerminal()
      virtualTerminal.newInputLine(s"bp $q$testFile$q 10")

      withToolRunningUsingTerminal(
        className = testClass,
        virtualTerminal = virtualTerminal
      ) { (vt, sm, start) =>
        logTimeTaken({
          // Assert that we hit the breakpoint
          eventually {
            validateNextLine(vt, s"Breakpoint hit at $testFileName:10\n")
          }

          // List all thread groups
          vt.newInputLine("threadgroups")

          // First prints out JVM id
          validateNextLine(vt, "JVM",
            success = (text, line) => line should include(text))

          // Verify that we have expected thread groups
          // of 'main' and 'system'
          // (class.name)0xID threadGroupName
          val lines = Seq(nextLine(vt), nextLine(vt))
            .flatten.map(_.split(" ").last.trim)
          lines should contain allOf("main", "system")
        })
      }
    }
  }
} 
Example 84
Source File: ThreadGroupCommandIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.tool.commands

import java.io.File

import org.scaladebugger.api.profiles.traits.info.ThreadGroupInfo
import org.scaladebugger.api.utils.JDITools
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import org.scalatest.concurrent.Eventually
import test.{ToolConstants, ToolFixtures, ToolTestUtilities}

class ThreadGroupCommandIntegrationSpec extends ParallelMockFunSpec
  with ToolFixtures
  with ToolTestUtilities
  with Eventually
{
  implicit override val patienceConfig = PatienceConfig(
    timeout = scaled(ToolConstants.EventuallyTimeout),
    interval = scaled(ToolConstants.EventuallyInterval)
  )

  describe("ThreadGroupCommand") {
    it("should clear the active thread group if no name provided") {
      val testClass = "org.scaladebugger.test.breakpoints.DelayedInit"
      val testFile = JDITools.scalaClassStringToFileString(testClass)
      val testFileName = new File(testFile).getName

      // Create a breakpoint before connecting to the JVM
      val q = "\""
      val virtualTerminal = newVirtualTerminal()
      virtualTerminal.newInputLine(s"bp $q$testFile$q 10")

      withToolRunningUsingTerminal(
        className = testClass,
        virtualTerminal = virtualTerminal
      ) { (vt, sm, start) =>
        logTimeTaken({
          // Assert that we hit the breakpoint
          eventually {
            validateNextLine(vt, s"Breakpoint hit at $testFileName:10\n")
          }

          // Set a fake active thread group
          sm.updateActiveThreadGroup(mock[ThreadGroupInfo])

          // Clear our thread group
          vt.newInputLine(s"threadgroup")

          // Verify that the active thread group is empty
          eventually {
            sm.state.activeThreadGroup should be (None)
          }
        })
      }
    }

    it("should set the active thread group by name if provided") {
      val testClass = "org.scaladebugger.test.breakpoints.DelayedInit"
      val testFile = JDITools.scalaClassStringToFileString(testClass)
      val testFileName = new File(testFile).getName

      val threadGroupName = "main"

      // Create a breakpoint before connecting to the JVM
      val q = "\""
      val virtualTerminal = newVirtualTerminal()
      virtualTerminal.newInputLine(s"bp $q$testFile$q 10")

      withToolRunningUsingTerminal(
        className = testClass,
        virtualTerminal = virtualTerminal
      ) { (vt, sm, start) =>
        logTimeTaken({
          // Assert that we hit the breakpoint
          eventually {
            validateNextLine(vt, s"Breakpoint hit at $testFileName:10\n")
          }

          // Set our active thread group
          vt.newInputLine(s"threadgroup $q$threadGroupName$q")

          // Verify that the active thread group is set
          eventually {
            sm.state.activeThreadGroup.get.name should be (threadGroupName)
          }
        })
      }
    }
  }
} 
Example 85
Source File: LaunchCommandIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.tool.commands

import org.scaladebugger.api.utils.JDITools
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import org.scaladebugger.tool.Repl
import org.scalatest.concurrent.Eventually
import test.{ToolConstants, ToolFixtures, ToolTestUtilities}

class LaunchCommandIntegrationSpec extends ParallelMockFunSpec
  with ToolFixtures
  with ToolTestUtilities
  with Eventually
{
  implicit override val patienceConfig = PatienceConfig(
    timeout = scaled(ToolConstants.EventuallyTimeout),
    interval = scaled(ToolConstants.EventuallyInterval)
  )

  describe("LaunchCommand") {
    it("should launch and attach to the specified class") {
      val testClass = "org.scaladebugger.test.misc.LaunchingMain"
      val testFile = JDITools.scalaClassStringToFileString(testClass)

      val q = "\""
      val terminal = newVirtualTerminal()
      val repl = Repl.newInstance(newTerminal = (_,_) => terminal)

      // Queue up attach action
      terminal.newInputLine(s"launch $q$testClass$q")

      // Start processing input
      // TODO: Add repl stop code regardless of test success
      repl.start()

      // Eventually, launch should complete
      logTimeTaken(eventually {
        repl.stateManager.state.activeDebugger should not be (None)
        repl.stateManager.state.scalaVirtualMachines should not be (empty)
      })

      // Finished
      repl.stop()
    }
  }
} 
Example 86
Source File: SourcepathClearCommandIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.tool.commands

import java.nio.file.Paths

import org.scaladebugger.test.helpers.ParallelMockFunSpec
import org.scaladebugger.tool.Repl
import org.scalatest.concurrent.Eventually
import test.{ToolConstants, ToolFixtures, ToolTestUtilities}

class SourcepathClearCommandIntegrationSpec extends ParallelMockFunSpec
  with ToolFixtures
  with ToolTestUtilities
  with Eventually
{
  implicit override val patienceConfig = PatienceConfig(
    timeout = scaled(ToolConstants.EventuallyTimeout),
    interval = scaled(ToolConstants.EventuallyInterval)
  )

  describe("SourcepathCommand") {
    it("should list all current source paths if no argument provided") {
      val vt = newVirtualTerminal()
      val repl = Repl.newInstance(newTerminal = (_,_) => vt)
      repl.start()

      // Set some paths to be displayed
      repl.stateManager.updateSourcePaths(Seq(
        Paths.get("a"),
        Paths.get("b"),
        Paths.get("c")
      ))

      // Clear the source paths
      vt.newInputLine("sourcepathclear")

      eventually {
        val state = repl.stateManager.state
        state.sourcePaths should be (empty)
      }
    }
  }
} 
Example 87
Source File: ListenCommandIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.tool.commands

import org.scaladebugger.api.utils.JDITools
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import org.scaladebugger.tool.Repl
import org.scalatest.concurrent.Eventually
import test.{ToolConstants, ToolFixtures, ToolTestUtilities}

class ListenCommandIntegrationSpec extends ParallelMockFunSpec
  with ToolFixtures
  with ToolTestUtilities
  with Eventually
{
  implicit override val patienceConfig = PatienceConfig(
    timeout = scaled(ToolConstants.EventuallyTimeout),
    interval = scaled(ToolConstants.EventuallyInterval)
  )

  describe("ListenCommand") {
    it("should receive a connection from a remote JVM on the desired port") {
      val testClass = "org.scaladebugger.test.misc.ListeningMain"
      val testFile = JDITools.scalaClassStringToFileString(testClass)

      JDITools.usingOpenPort(port => {
        val terminal = newVirtualTerminal()

        val repl = Repl.newInstance(newTerminal = (_,_) => terminal)

        // Listen on provided port
        terminal.newInputLine(s"listen $port")

        // Start processing input
        // TODO: Add repl stop code regardless of test success
        repl.start()

        // Wait for debugger to be running before spawning process
        eventually {
          val d = repl.stateManager.state.activeDebugger
          d should not be None
          d.get.isRunning should be (true)
        }

        // Create a process to attach to our listening debugger
        // TODO: Destroy process regardless of test success
        val p = JDITools.spawn(
          className = testClass,
          options = Seq("-classpath", JDITools.jvmClassPath),
          port = port,
          server = false
        )

        // Eventually, listen should complete
        logTimeTaken(eventually {
          repl.stateManager.state.activeDebugger should not be None
          repl.stateManager.state.scalaVirtualMachines should not be (empty)
        })

        // Finished
        p.destroy()
        repl.stop()
      })
    }
  }
} 
Example 88
Source File: StepOutOfLineCommandIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.tool.commands

import java.io.File

import org.scaladebugger.api.utils.JDITools
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import org.scalatest.concurrent.Eventually
import test.{ToolConstants, ToolFixtures, ToolTestUtilities}

class StepOutOfLineCommandIntegrationSpec extends ParallelMockFunSpec
  with ToolFixtures
  with ToolTestUtilities
  with Eventually
{
  implicit override val patienceConfig = PatienceConfig(
    timeout = scaled(ToolConstants.EventuallyTimeout),
    interval = scaled(ToolConstants.EventuallyInterval)
  )

  describe("StepOutOfLineCommand") {
    it("should step out of a line successfully") {
      val testClass = "org.scaladebugger.test.steps.MethodCalls"
      val testFile = JDITools.scalaClassStringToFileString(testClass)
      val testFileName = new File(testFile).getName

      val threadName = "main"
      val className = s"$testClass$$"
      val methodName = "main"
      val startingLine = 15
      val expectedLine = 35

      // Create a breakpoint to start us off
      val q = "\""
      val virtualTerminal = newVirtualTerminal()
      virtualTerminal.newInputLine(s"bp $q$testFile$q $startingLine")

      withToolRunningUsingTerminal(
        className = testClass,
        virtualTerminal = virtualTerminal
      ) { (vt, sm, start) =>
        logTimeTaken({
          // Assert that we hit the starting line via breakpoint
          eventually {
            validateNextLine(vt, s"Breakpoint hit at $testFileName:$startingLine\n")
          }

          // Set our active thread
          vt.newInputLine(s"thread $q$threadName$q")

          // Perform our step into
          vt.newInputLine("stepout")

          // Assert we end up where expected
          eventually {
            validateNextLine(
              vt,
              s"Step completed: 'thread=$threadName', $className.$methodName ($testFileName:$expectedLine)\n"
            )
          }
        })
      }
    }
  }
} 
Example 89
Source File: ExamineCommandIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.tool.commands

import java.io.File

import org.scaladebugger.api.utils.JDITools
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import org.scalatest.concurrent.Eventually
import test.{ToolConstants, ToolFixtures, ToolTestUtilities}

class ExamineCommandIntegrationSpec extends ParallelMockFunSpec
  with ToolFixtures
  with ToolTestUtilities
  with Eventually
{
  implicit override val patienceConfig = PatienceConfig(
    timeout = scaled(ToolConstants.EventuallyTimeout),
    interval = scaled(ToolConstants.EventuallyInterval)
  )

  describe("ExamineCommand") {
    it("should be able to inspect values (including nested)") {
      val testClass = "org.scaladebugger.test.info.NestedObjects"
      val testFile = JDITools.scalaClassStringToFileString(testClass)
      val testFileName = new File(testFile).getName

      val threadName = "main"
      val nestedName = "container.immutableData"

      // Create one breakpoint before connecting to the JVM
      val q = "\""
      val virtualTerminal = newVirtualTerminal()
      virtualTerminal.newInputLine(s"bp $q$testFile$q 22")

      withToolRunningUsingTerminal(
        className = testClass,
        virtualTerminal = virtualTerminal
      ) { (vt, sm, start) =>
        logTimeTaken({
          // Assert that we hit the breakpoint
          eventually {
            validateNextLine(vt, s"Breakpoint hit at $testFileName:22\n")
          }

          // Set our active thread to examine values
          vt.newInputLine(s"thread $q$threadName$q")

          // Request examining a nested value
          vt.newInputLine(s"examine $q$nestedName$q")

          // Gather all data (after delay to allow accumulation of all text)
          val prefix = "-> "
          val waitTime = ToolConstants.AccumulationTimeout.millisPart
          val dataLines = Stream.continually(vt.nextOutputLine(waitTime = waitTime))
            .takeWhile(_.nonEmpty).flatten.map(_.trim).map(_.stripPrefix(prefix))

          // Validate data is as expected
          dataLines.head should startWith ("immutableData = Instance of")
          dataLines.tail should contain allOf(
            "x = 3",
            "y = \"immutable\""
          )
        })
      }
    }
  }
} 
Example 90
Source File: StepOverLineCommandIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.tool.commands

import java.io.File

import org.scaladebugger.api.utils.JDITools
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import org.scalatest.concurrent.Eventually
import test.{ToolConstants, ToolFixtures, ToolTestUtilities}

class StepOverLineCommandIntegrationSpec extends ParallelMockFunSpec
  with ToolFixtures
  with ToolTestUtilities
  with Eventually
{
  implicit override val patienceConfig = PatienceConfig(
    timeout = scaled(ToolConstants.EventuallyTimeout),
    interval = scaled(ToolConstants.EventuallyInterval)
  )

  describe("StepOverLineCommand") {
    it("should step over a line successfully") {
      val testClass = "org.scaladebugger.test.steps.MethodCalls"
      val testFile = JDITools.scalaClassStringToFileString(testClass)
      val testFileName = new File(testFile).getName

      val threadName = "main"
      val className = s"$testClass$$"
      val methodName = "main"
      val startingLine = 31
      val expectedLine = 33

      // Create a breakpoint to start us off
      val q = "\""
      val virtualTerminal = newVirtualTerminal()
      virtualTerminal.newInputLine(s"bp $q$testFile$q $startingLine")

      withToolRunningUsingTerminal(
        className = testClass,
        virtualTerminal = virtualTerminal
      ) { (vt, sm, start) =>
        logTimeTaken({
          // Assert that we hit the starting line via breakpoint
          eventually {
            validateNextLine(vt, s"Breakpoint hit at $testFileName:$startingLine\n")
          }

          // Set our active thread
          vt.newInputLine(s"thread $q$threadName$q")

          // Perform our step into
          vt.newInputLine("stepover")

          // Assert we end up where expected
          eventually {
            validateNextLine(
              vt,
              s"Step completed: 'thread=$threadName', $className.$methodName ($testFileName:$expectedLine)\n"
            )
          }
        })
      }
    }
  }
} 
Example 91
Source File: MethodExitCommandIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.tool.commands

import org.scaladebugger.test.helpers.ParallelMockFunSpec
import org.scalatest.concurrent.Eventually
import test.{ToolConstants, ToolFixtures, ToolTestUtilities}

class MethodExitCommandIntegrationSpec extends ParallelMockFunSpec
  with ToolFixtures
  with ToolTestUtilities
  with Eventually
{
  implicit override val patienceConfig = PatienceConfig(
    timeout = scaled(ToolConstants.EventuallyTimeout),
    interval = scaled(ToolConstants.EventuallyInterval)
  )

  describe("MethodExitCommand") {
    it("should create method exit requests successfully") {
      val testClass = "org.scaladebugger.test.methods.MethodExit"
      val testClassName = "org.scaladebugger.test.methods.MethodExitTestClass"
      val testMethodName = "testMethod"

      // Create method exit request before JVM starts
      val q = "\""
      val virtualTerminal = newVirtualTerminal()
      virtualTerminal.newInputLine(s"mexit $q$testClassName$q $q$testMethodName$q")

      withToolRunningUsingTerminal(
        className = testClass,
        virtualTerminal = virtualTerminal
      ) { (vt, sm, start) =>
        logTimeTaken({
          // Verify our method exit request was set
          eventually {
            val svm = sm.state.scalaVirtualMachines.head
            val mers = svm.methodExitRequests
              .map(mei => (mei.className, mei.methodName, mei.isPending))
            mers should contain theSameElementsAs Seq(
              (testClassName, testMethodName, false)
            )
          }

          // Assert that we hit the method
          eventually {
            validateNextLine(vt, s"Method exit hit for $testClassName.$testMethodName\n")
          }
        })
      }
    }
  }
} 
Example 92
Source File: MethodExitListCommandIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.tool.commands

import org.scaladebugger.test.helpers.ParallelMockFunSpec
import org.scalatest.concurrent.Eventually
import test.{ToolConstants, ToolFixtures, ToolTestUtilities}

class MethodExitListCommandIntegrationSpec extends ParallelMockFunSpec
  with ToolFixtures
  with ToolTestUtilities
  with Eventually
{
  implicit override val patienceConfig = PatienceConfig(
    timeout = scaled(ToolConstants.EventuallyTimeout),
    interval = scaled(ToolConstants.EventuallyInterval)
  )

  describe("MethodExitListCommand") {
    it("should list pending and active method exit requests") {
      val testClass = "org.scaladebugger.test.methods.MethodExit"
      val testClassName = "org.scaladebugger.test.methods.MethodExitTestClass"
      val testMethodName = "testMethod"

      val testFakeClassName = "invalid.class"
      val testFakeMethodName = "fakeMethod"

      // Create method exit request before JVM starts
      val q = "\""
      val virtualTerminal = newVirtualTerminal()

      // Valid, so will be active
      virtualTerminal.newInputLine(s"mexit $q$testClassName$q $q$testMethodName$q")

      // Invalid, so will be inactive
      virtualTerminal.newInputLine(s"mexit $q$testFakeClassName$q $q$testFakeMethodName$q")

      withToolRunningUsingTerminal(
        className = testClass,
        virtualTerminal = virtualTerminal
      ) { (vt, sm, start) =>
        logTimeTaken({
          // Verify our method exit requests were set
          validateNextLine(vt, s"Set method exit for class $testClassName and method $testMethodName\n")
          validateNextLine(vt, s"Set method exit for class $testFakeClassName and method $testFakeMethodName\n")

          // Verify that we have attached to the JVM
          validateNextLine(vt, "Attached with id",
            success = (text, line) => line should startWith(text))

          // Assert that we hit the first methodExit
          validateNextLine(vt, s"Method exit hit for $testClassName.$testMethodName\n")

          // List all available method exit requests
          vt.newInputLine("mexitlist")

          // First prints out JVM id
          validateNextLine(vt, "JVM",
            success = (text, line) => line should include(text))

          // Verify expected pending and active requests show up
          // by collecting the two available and checking their content
          val lines = Seq(nextLine(vt), nextLine(vt)).flatten
          lines should contain allOf(
            s"$testClassName.$testMethodName (Active)\n",
            s"$testFakeClassName.$testFakeMethodName (Pending)\n"
          )
        })
      }
    }
  }
} 
Example 93
Source File: SourcepathCommandIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.tool.commands

import java.nio.file.Paths

import org.scaladebugger.test.helpers.FixedParallelSuite
import org.scaladebugger.tool.Repl
import org.scalamock.scalatest.MockFactory
import org.scalatest.concurrent.Eventually
import org.scalatest.{FunSpec, Matchers, ParallelTestExecution}
import test.{ToolConstants, ToolTestUtilities, ToolFixtures}

class SourcepathCommandIntegrationSpec extends FunSpec with Matchers
  with ParallelTestExecution with ToolFixtures with MockFactory
  with ToolTestUtilities with Eventually with FixedParallelSuite
{
  implicit override val patienceConfig = PatienceConfig(
    timeout = scaled(ToolConstants.EventuallyTimeout),
    interval = scaled(ToolConstants.EventuallyInterval)
  )

  describe("SourcepathCommand") {
    it("should add the provided path to the current list and load sources") {
      val vt = newVirtualTerminal()
      val repl = Repl.newInstance(newTerminal = (_,_) => vt)
      repl.start()

      val q = "\""
      val s = java.io.File.separator

      // Set some paths to be displayed
      repl.stateManager.updateSourcePaths(Seq(
        Paths.get("a"),
        Paths.get("b"),
        Paths.get("c")
      ))

      // Add '.' as sourcepath
      vt.newInputLine("sourcepath \".\"")

      // Verify that we have finished loading our source files
      eventually {
        validateNextLine(vt, """Loaded \d+ source files""",
          success = (text, line) => line should startWith regex text)
      }

      eventually {
        val state = repl.stateManager.state
        state.sourcePaths.last.getFileName.toString should contain ('.')
      }
    }

    it("should list all current source paths if no argument provided") {
      val vt = newVirtualTerminal()
      val repl = Repl.newInstance(newTerminal = (_,_) => vt)
      repl.start()

      // Set some paths to be displayed
      repl.stateManager.updateSourcePaths(Seq(
        Paths.get("a"),
        Paths.get("b"),
        Paths.get("c")
      ))

      // Display the source paths
      vt.newInputLine("sourcepath")

      val line = vt.nextOutputLine(
        waitTime = ToolConstants.NextOutputLineTimeout.millisPart
      )
      val s = java.io.File.pathSeparator
      line.get should be (s"Source paths: a${s}b${s}c\n")
    }
  }
} 
Example 94
Source File: WatchModificationCommandIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.tool.commands

import org.scaladebugger.api.utils.JDITools
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import org.scalatest.concurrent.Eventually
import test.{ToolConstants, ToolFixtures, ToolTestUtilities}

class WatchModificationCommandIntegrationSpec extends ParallelMockFunSpec
  with ToolFixtures
  with ToolTestUtilities
  with Eventually
{
  implicit override val patienceConfig = PatienceConfig(
    timeout = scaled(ToolConstants.EventuallyTimeout),
    interval = scaled(ToolConstants.EventuallyInterval)
  )

  describe("WatchModificationCommand") {
    it("should watch the specified variable when modified") {
      val testClass = "org.scaladebugger.test.watchpoints.ModificationWatchpoint"
      val testFile = JDITools.scalaClassStringToFileString(testClass)

      val className = "org.scaladebugger.test.watchpoints.SomeModificationClass"
      val fieldName = "field"

      // Create watch request before connecting to the JVM
      val q = "\""
      val virtualTerminal = newVirtualTerminal()
      virtualTerminal.newInputLine(s"watchm $q$className$q $q$fieldName$q")

      withToolRunningUsingTerminal(
        className = testClass,
        virtualTerminal = virtualTerminal
      ) { (vt, sm, start) =>
        logTimeTaken({
          // Verify our watch request was made
          eventually {
            val svm = sm.state.scalaVirtualMachines.head
            val mwrs = svm.modificationWatchpointRequests.map(mwr =>
              (mwr.className, mwr.fieldName, mwr.isPending))
            mwrs should contain theSameElementsAs Seq(
              (className, fieldName, false)
            )
          }

          // Verify that we had a variable accessed
          eventually {
            validateNextLine(
              vt, s"'$fieldName' of '$className' modified",
              success = (text, line) => line should startWith regex text
            )
          }

          // Main thread should be suspended
          eventually {
            val svm = sm.state.scalaVirtualMachines.head

            // NOTE: Using assert for better error message
            assert(svm.thread("main").status.isSuspended,
              "Main thread was not suspended!")
          }
        })
      }
    }
  }
} 
Example 95
Source File: BreakpointCommandIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.tool.commands

import java.io.File

import org.scaladebugger.api.utils.JDITools
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import org.scalatest.concurrent.Eventually
import test.{ToolConstants, ToolFixtures, ToolTestUtilities}

class BreakpointCommandIntegrationSpec extends ParallelMockFunSpec
  with ToolFixtures
  with ToolTestUtilities
  with Eventually
{
  implicit override val patienceConfig = PatienceConfig(
    timeout = scaled(ToolConstants.EventuallyTimeout),
    interval = scaled(ToolConstants.EventuallyInterval)
  )

  describe("BreakpointCommand") {
    it("should create breakpoints successfully") {
      val testClass = "org.scaladebugger.test.breakpoints.DelayedInit"
      val testFile = JDITools.scalaClassStringToFileString(testClass)
      val testFileName = new File(testFile).getName

      // Create two breakpoints before connecting to the JVM
      val q = "\""
      val virtualTerminal = newVirtualTerminal()
      virtualTerminal.newInputLine(s"bp $q$testFile$q 10")
      virtualTerminal.newInputLine(s"bp $q$testFile$q 11")

      withToolRunningUsingTerminal(
        className = testClass,
        virtualTerminal = virtualTerminal
      ) { (vt, sm, start) =>
        logTimeTaken({
          // Verify our breakpoints were set
          eventually {
            val svm = sm.state.scalaVirtualMachines.head
            val brs = svm.breakpointRequests
              .map(bri => (bri.fileName, bri.lineNumber, bri.isPending))
            brs should contain theSameElementsAs Seq(
              (testFile, 10, false),
              (testFile, 11, false)
            )
          }

          // Assert that we hit the first breakpoint
          eventually {
            validateNextLine(vt, s"Breakpoint hit at $testFileName:10\n")
          }

          // Continue on to the next breakpoint (resume main thread)
          vt.newInputLine("resume \"main\"")

          // Assert that we hit the second breakpoint
          eventually {
            validateNextLine(vt, s"Breakpoint hit at $testFileName:11\n")
          }
        })
      }
    }
  }
} 
Example 96
Source File: MethodEntryCommandIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.tool.commands
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import org.scalatest.concurrent.Eventually
import test.{ToolConstants, ToolFixtures, ToolTestUtilities}

class MethodEntryCommandIntegrationSpec extends ParallelMockFunSpec
  with ToolFixtures
  with ToolTestUtilities
  with Eventually
{
  implicit override val patienceConfig = PatienceConfig(
    timeout = scaled(ToolConstants.EventuallyTimeout),
    interval = scaled(ToolConstants.EventuallyInterval)
  )

  describe("MethodEntryCommand") {
    it("should create method entry requests successfully") {
      val testClass = "org.scaladebugger.test.methods.MethodEntry"
      val testClassName = "org.scaladebugger.test.methods.MethodEntryTestClass"
      val testMethodName = "testMethod"

      // Create method entry request before JVM starts
      val q = "\""
      val virtualTerminal = newVirtualTerminal()
      virtualTerminal.newInputLine(s"mentry $q$testClassName$q $q$testMethodName$q")

      withToolRunningUsingTerminal(
        className = testClass,
        virtualTerminal = virtualTerminal
      ) { (vt, sm, start) =>
        logTimeTaken({
          // Verify our method entry request was set
          eventually {
            val svm = sm.state.scalaVirtualMachines.head
            val mers = svm.methodEntryRequests
              .map(mei => (mei.className, mei.methodName, mei.isPending))
            mers should contain theSameElementsAs Seq(
              (testClassName, testMethodName, false)
            )
          }

          // Assert that we hit the method
          eventually {
            validateNextLine(vt, s"Method entry hit for $testClassName.$testMethodName\n")
          }
        })
      }
    }
  }
} 
Example 97
Source File: ThreadSuspendCommandIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.tool.commands

import java.io.File

import org.scaladebugger.api.utils.JDITools
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import org.scalatest.concurrent.Eventually
import test.{ToolConstants, ToolFixtures, ToolTestUtilities}

class ThreadSuspendCommandIntegrationSpec extends ParallelMockFunSpec
  with ToolFixtures
  with ToolTestUtilities
  with Eventually
{
  implicit override val patienceConfig = PatienceConfig(
    timeout = scaled(ToolConstants.EventuallyTimeout),
    interval = scaled(ToolConstants.EventuallyInterval)
  )

  describe("ThreadSuspendCommand") {
    it("should suspend the specific thread if given a name") {
      val testClass = "org.scaladebugger.test.misc.MainUsingApp"
      val testFile = JDITools.scalaClassStringToFileString(testClass)
      val testFileName = new File(testFile).getName

      // Create a breakpoint before connecting to the JVM
      val q = "\""
      val virtualTerminal = newVirtualTerminal()
      virtualTerminal.newInputLine(s"bp $q$testFile$q 11")

      withToolRunningUsingTerminal(
        className = testClass,
        virtualTerminal = virtualTerminal
      ) { (vt, sm, start) =>
        logTimeTaken({
          // Assert that we hit the breakpoint
          eventually {
            validateNextLine(vt, s"Breakpoint hit at $testFileName:11\n")
          }

          // Find a thread that isn't the main thread currently suspended
          val svm = sm.state.scalaVirtualMachines.head
          val thread = svm.threads.find(_.name != "main").get
          val threadName = thread.name

          // Suspend the thread
          vt.newInputLine(s"suspend $q$threadName$q")

          // Verify the thread was suspended by us
          eventually {
            thread.status.isSuspended should be (true)
            thread.status.isAtBreakpoint should be (false)
          }
        })
      }
    }

    it("should suspend all threads if no thread name provided") {
      val testClass = "org.scaladebugger.test.misc.MainUsingApp"
      val testFile = JDITools.scalaClassStringToFileString(testClass)
      val testFileName = new File(testFile).getName

      // Create a breakpoint before connecting to the JVM
      val q = "\""
      val virtualTerminal = newVirtualTerminal()
      virtualTerminal.newInputLine(s"bp $q$testFile$q 11")

      withToolRunningUsingTerminal(
        className = testClass,
        virtualTerminal = virtualTerminal
      ) { (vt, sm, start) =>
        logTimeTaken({
          // Assert that we hit the breakpoint
          eventually {
            validateNextLine(vt, s"Breakpoint hit at $testFileName:11\n")
          }

          // Suspend the threads
          vt.newInputLine("suspend")

          // Verify all threads suspended
          eventually {
            val svm = sm.state.scalaVirtualMachines.head
            svm.threads.forall(_.status.isSuspended) should be (true)
          }
        })
      }
    }
  }
} 
Example 98
Source File: WatchAccessCommandIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.tool.commands

import org.scaladebugger.api.utils.JDITools
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import org.scalatest.concurrent.Eventually
import test.{ToolConstants, ToolFixtures, ToolTestUtilities}

class WatchAccessCommandIntegrationSpec extends ParallelMockFunSpec
  with ToolFixtures
  with ToolTestUtilities
  with Eventually
{
  implicit override val patienceConfig = PatienceConfig(
    timeout = scaled(ToolConstants.EventuallyTimeout),
    interval = scaled(ToolConstants.EventuallyInterval)
  )

  describe("WatchAccessCommand") {
    it("should watch the specified variable when accessed") {
      val testClass = "org.scaladebugger.test.watchpoints.AccessWatchpoint"
      val testFile = JDITools.scalaClassStringToFileString(testClass)

      val className = "org.scaladebugger.test.watchpoints.SomeAccessClass"
      val fieldName = "field"

      // Create watch request before connecting to the JVM
      val q = "\""
      val virtualTerminal = newVirtualTerminal()
      virtualTerminal.newInputLine(s"watcha $q$className$q $q$fieldName$q")

      withToolRunningUsingTerminal(
        className = testClass,
        virtualTerminal = virtualTerminal
      ) { (vt, sm, start) =>
        logTimeTaken({
          // Verify our watch request was made
          eventually {
            val svm = sm.state.scalaVirtualMachines.head
            val awrs = svm.accessWatchpointRequests.map(awr =>
              (awr.className, awr.fieldName, awr.isPending))
            awrs should contain theSameElementsAs Seq(
              (className, fieldName, false)
            )
          }

          // Verify that we had a variable accessed
          eventually {
            validateNextLine(
              vt, s"'$fieldName' of '$className' accessed",
              success = (text, line) => line should startWith (text)
            )
          }

          // Main thread should be suspended
          eventually {
            val svm = sm.state.scalaVirtualMachines.head

            // NOTE: Using assert for better error message
            assert(svm.thread("main").status.isSuspended,
              "Main thread was not suspended!")
          }
        })
      }
    }
  }
} 
Example 99
Source File: AttachCommandIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.tool.commands

import org.scaladebugger.api.utils.JDITools
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import org.scaladebugger.tool.Repl
import org.scalatest.concurrent.Eventually
import test.{ToolConstants, ToolFixtures, ToolTestUtilities}

class AttachCommandIntegrationSpec extends ParallelMockFunSpec
  with ToolFixtures
  with ToolTestUtilities
  with Eventually
{
  implicit override val patienceConfig = PatienceConfig(
    timeout = scaled(ToolConstants.EventuallyTimeout),
    interval = scaled(ToolConstants.EventuallyInterval)
  )

  describe("AttachCommand") {
    it("should attach successfully using a port") {
      val testClass = "org.scaladebugger.test.misc.AttachingMain"
      val testFile = JDITools.scalaClassStringToFileString(testClass)

      withProcessPort(testClass) { (port) =>
        val terminal = newVirtualTerminal()

        val repl = Repl.newInstance(newTerminal = (_,_) => terminal)

        // Queue up attach action
        terminal.newInputLine(s"attach $port")

        // Start processing input
        // TODO: Add repl stop code regardless of test success
        repl.start()

        // Eventually, attach should complete
        logTimeTaken(eventually {
          repl.stateManager.state.activeDebugger should not be None
          repl.stateManager.state.scalaVirtualMachines should not be (empty)
        })

        // Finished
        repl.stop()
      }
    }
  }
} 
Example 100
Source File: StepIntoLineCommandIntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.tool.commands

import java.io.File

import org.scaladebugger.api.utils.JDITools
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import org.scalatest.concurrent.Eventually
import test.{ToolConstants, ToolFixtures, ToolTestUtilities}

class StepIntoLineCommandIntegrationSpec extends ParallelMockFunSpec
  with ToolFixtures
  with ToolTestUtilities
  with Eventually
{
  implicit override val patienceConfig = PatienceConfig(
    timeout = scaled(ToolConstants.EventuallyTimeout),
    interval = scaled(ToolConstants.EventuallyInterval)
  )

  describe("StepIntoLineCommand") {
    it("should step into a line successfully") {
      val testClass = "org.scaladebugger.test.steps.MethodCalls"
      val testFile = JDITools.scalaClassStringToFileString(testClass)
      val testFileName = new File(testFile).getName

      val threadName = "main"
      val className = s"$testClass$$InnerClass"
      val methodName = "innerMethod"
      val startingLine = 42
      val expectedLine = 49

      // Create a breakpoint to start us off
      val q = "\""
      val virtualTerminal = newVirtualTerminal()
      virtualTerminal.newInputLine(s"bp $q$testFile$q $startingLine")

      withToolRunningUsingTerminal(
        className = testClass,
        virtualTerminal = virtualTerminal
      ) { (vt, sm, start) =>
        logTimeTaken({
          // Assert that we hit the starting line via breakpoint
          eventually {
            validateNextLine(vt, s"Breakpoint hit at $testFileName:$startingLine\n")
          }

          // Set our active thread
          vt.newInputLine(s"thread $q$threadName$q")

          // Perform our step into
          vt.newInputLine("stepin")

          // Assert we end up where expected
          eventually {
            validateNextLine(
              vt,
              s"Step completed: 'thread=$threadName', $className.$methodName ($testFileName:$expectedLine)\n"
            )
          }
        })
      }
    }
  }
} 
Example 101
Source File: ScalaVirtualMachine212IntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.api.virtualmachines

import org.scaladebugger.api.utils.JDITools
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import org.scalatest.concurrent.Eventually
import test.{ApiTestUtilities, VirtualMachineFixtures}


class ScalaVirtualMachine212IntegrationSpec extends ParallelMockFunSpec
  with VirtualMachineFixtures
  with ApiTestUtilities
  with Eventually
{
  describe("ScalaVirtualMachine for 2.12") {
    it("should return the breakpointable line numbers for the file") {
      val testClass = "org.scaladebugger.test.misc.AvailableLines"

      withVirtualMachine(testClass) { (s) =>
        // NOTE: In Scala 2.12, there is a breakpoint available on the object
        //       itself (line 11), and there is one on the last line of the
        //       object (72)
        val expected = Seq(
          11, 12, 13, 14, 15, 16, 20, 21, 22, 26, 27, 28, 32, 34, 35, 37, 39,
          40, 41, 42, 45, 46, 47, 50, 52, 53, 57, 58, 59, 60, 63, 65, 72
        )

        val file = JDITools.scalaClassStringToFileString(testClass)

        // There is some delay while receiving the Java classes that make up
        // our file, so must wait for enough responses to get all of our lines
        eventually {
          val actual = s.availableLinesForFile(file).get
          actual should contain theSameElementsInOrderAs expected
        }
      }
    }
  }
} 
Example 102
Source File: JavaFieldInfoScala212IntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.api.profiles.java.info

import org.scaladebugger.api.lowlevel.events.misc.NoResume
import org.scaladebugger.api.profiles.java.JavaDebugProfile
import org.scaladebugger.api.profiles.traits.info.ThreadInfo
import org.scaladebugger.api.utils.JDITools
import org.scaladebugger.api.virtualmachines.DummyScalaVirtualMachine
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import org.scalatest.concurrent.Eventually
import test.{ApiTestUtilities, VirtualMachineFixtures}

class JavaFieldInfoScala212IntegrationSpec extends ParallelMockFunSpec
  with VirtualMachineFixtures
  with ApiTestUtilities
  with Eventually
{
  describe("JavaFieldInfo for 2.12") {
    // $outer does not appear in this scenario for Scala 2.12
    it("should not expand $outer to its underlying fields (no $outer in 2.12)") {
      val testClass = "org.scaladebugger.test.info.OuterScope"
      val testFile = JDITools.scalaClassStringToFileString(testClass)

      @volatile var t: Option[ThreadInfo] = None
      val s = DummyScalaVirtualMachine.newInstance()

      // NOTE: Do not resume so we can check the variables at the stack frame
      s.withProfile(JavaDebugProfile.Name)
        .getOrCreateBreakpointRequest(testFile, 17, NoResume)
        .foreach(e => t = Some(e.thread))

      withVirtualMachine(testClass, pendingScalaVirtualMachines = Seq(s)) { (s) =>
        logTimeTaken(eventually {
          val variableNames = t
            .flatMap(_.tryTopFrame.toOption)
            .map(_.allVariables)
            .map(_.map(_.name))
            .get

          // Should expand $outer field of closure into outer fields
          variableNames should contain theSameElementsAs Seq(
            "MODULE$",
            "x",
            "executionStart",
            "scala$App$$_args", // Scala 2.12 (_Args is now _args)
            "scala$App$$initCode",
            "newValue"
          )
        })
      }
    }

    it("should not encounter Scala-specific field names like org$scaladebugger$test$bugs$BugFromGitter$$name") {
      val testClass = "org.scaladebugger.test.bugs.BugFromGitter"
      val testFile = JDITools.scalaClassStringToFileString(testClass)

      @volatile var t: Option[ThreadInfo] = None
      val s = DummyScalaVirtualMachine.newInstance()

      // NOTE: Do not resume so we can check the variables at the stack frame
      s.withProfile(JavaDebugProfile.Name)
        .getOrCreateBreakpointRequest(testFile, 20, NoResume)
        .foreach(e => t = Some(e.thread))

      withVirtualMachine(testClass, pendingScalaVirtualMachines = Seq(s)) { (s) =>
        logTimeTaken(eventually {
          val fieldNames = t.get.topFrame.allVariables.map(_.name)

          fieldNames should contain theSameElementsAs Seq(
            "actualTimes",
            "times",
            "name"
          )
        })
      }
    }
  }
} 
Example 103
Source File: JavaObjectInfoScala212IntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.api.profiles.java.info

import org.scaladebugger.api.lowlevel.events.misc.NoResume
import org.scaladebugger.api.profiles.java.JavaDebugProfile
import org.scaladebugger.api.profiles.traits.info.ThreadInfo
import org.scaladebugger.api.utils.JDITools
import org.scaladebugger.api.virtualmachines.DummyScalaVirtualMachine
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import org.scalatest.concurrent.Eventually
import test.{ApiTestUtilities, VirtualMachineFixtures}

class JavaObjectInfoScala212IntegrationSpec extends ParallelMockFunSpec
  with VirtualMachineFixtures
  with ApiTestUtilities
  with Eventually
{
  describe("JavaObjectInfo for 2.12") {
    it("should be able to get a list of methods for the object") {
      val testClass = "org.scaladebugger.test.info.Methods"
      val testFile = JDITools.scalaClassStringToFileString(testClass)

      @volatile var t: Option[ThreadInfo] = None
      val s = DummyScalaVirtualMachine.newInstance()

      // NOTE: Do not resume so we can check the variables at the stack frame
      s.withProfile(JavaDebugProfile.Name)
        .getOrCreateBreakpointRequest(testFile, 22, NoResume)
        .foreach(e => t = Some(e.thread))

      withVirtualMachine(testClass, pendingScalaVirtualMachines = Seq(s)) { (s) =>
        logTimeTaken(eventually {
          val methodNames = t.get.topFrame.thisObject.methods.map(_.name)

          methodNames should contain theSameElementsAs Seq(
            // Defined methods
            "main",
            "innerMethod$1", // Nested method has different Java signature
            "publicMethod",
            "privateMethod",
            "protectedMethod",
            "zeroArgMethod",
            "functionMethod", // Scala provides a method for the function
                              // object since it would be treated as a field
            "$anonfun$functionMethod$1", // Scala 2.12
            "$deserializeLambda$", // Scala 2.12

            // Inherited methods
            "<clinit>",
            "<init>",
            "registerNatives",
            "getClass",
            "hashCode",
            "equals",
            "clone",
            "toString",
            "notify",
            "notifyAll",
            "wait", // Overloaded method
            "wait",
            "wait",
            "finalize"
          )
        })
      }
    }
  }
} 
Example 104
Source File: ScalaVirtualMachine210IntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.api.virtualmachines

import org.scaladebugger.api.utils.JDITools
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import org.scalatest.concurrent.Eventually
import test.{ApiTestUtilities, VirtualMachineFixtures}


class ScalaVirtualMachine210IntegrationSpec extends ParallelMockFunSpec
  with VirtualMachineFixtures
  with ApiTestUtilities
  with Eventually
{
  describe("ScalaVirtualMachine for 2.10") {
    it("should return the breakpointable line numbers for the file") {
      val testClass = "org.scaladebugger.test.misc.AvailableLines"

      withVirtualMachine(testClass) { (s) =>
        // NOTE: In Scala 2.10, there is a breakpoint available on the object
        //       itself (line 11), but there is not one on the last line of the
        //       object (72) - verified with IntelliJ
        val expected = Seq(
          11, 12, 13, 14, 15, 16, 20, 21, 22, 26, 27, 28, 32, 34, 35, 37, 39,
          40, 41, 42, 45, 46, 47, 50, 52, 53, 57, 58, 59, 60, 63, 65
        )

        val file = JDITools.scalaClassStringToFileString(testClass)

        // There is some delay while receiving the Java classes that make up
        // our file, so must wait for enough responses to get all of our lines
        eventually {
          val actual = s.availableLinesForFile(file).get
          actual should contain theSameElementsInOrderAs expected
        }
      }
    }
  }
} 
Example 105
Source File: JavaFieldInfoScala210IntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.api.profiles.java.info

import org.scaladebugger.api.lowlevel.events.misc.NoResume
import org.scaladebugger.api.profiles.java.JavaDebugProfile
import org.scaladebugger.api.profiles.traits.info.ThreadInfo
import org.scaladebugger.api.utils.JDITools
import org.scaladebugger.api.virtualmachines.DummyScalaVirtualMachine
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import org.scalatest.concurrent.Eventually
import test.{ApiTestUtilities, VirtualMachineFixtures}

class JavaFieldInfoScala210IntegrationSpec extends ParallelMockFunSpec
  with VirtualMachineFixtures
  with ApiTestUtilities
  with Eventually
{
  describe("JavaFieldInfo for 2.10") {
    it("should not expand $outer to its underlying fields") {
      val testClass = "org.scaladebugger.test.info.OuterScope"
      val testFile = JDITools.scalaClassStringToFileString(testClass)

      @volatile var t: Option[ThreadInfo] = None
      val s = DummyScalaVirtualMachine.newInstance()

      // NOTE: Do not resume so we can check the variables at the stack frame
      s.withProfile(JavaDebugProfile.Name)
        .getOrCreateBreakpointRequest(testFile, 17, NoResume)
        .foreach(e => t = Some(e.thread))

      withVirtualMachine(testClass, pendingScalaVirtualMachines = Seq(s)) { (s) =>
        logTimeTaken(eventually {
          val variableNames = t
            .flatMap(_.tryTopFrame.toOption)
            .map(_.allVariables)
            .map(_.map(_.name))
            .get

          // Should expand $outer field of closure into outer fields
          variableNames should contain theSameElementsAs Seq(
            "$outer",
            "newValue"
          )
        })
      }
    }

    it("should not fix Scala-specific field names like org$scaladebugger$test$bugs$BugFromGitter$$name") {
      val testClass = "org.scaladebugger.test.bugs.BugFromGitter"
      val testFile = JDITools.scalaClassStringToFileString(testClass)

      @volatile var t: Option[ThreadInfo] = None
      val s = DummyScalaVirtualMachine.newInstance()

      // NOTE: Do not resume so we can check the variables at the stack frame
      s.withProfile(JavaDebugProfile.Name)
        .getOrCreateBreakpointRequest(testFile, 20, NoResume)
        .foreach(e => t = Some(e.thread))

      withVirtualMachine(testClass, pendingScalaVirtualMachines = Seq(s)) { (s) =>
        logTimeTaken(eventually {
          val fieldNames = t.get.topFrame.allVariables.map(_.name)

          fieldNames should contain theSameElementsAs Seq(
            "actualTimes",
            "times",
            "org$scaladebugger$test$bugs$BugFromGitter$$name"
          )
        })
      }
    }

  }
} 
Example 106
Source File: JavaObjectInfoScala210IntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.api.profiles.java.info

import org.scaladebugger.api.lowlevel.events.misc.NoResume
import org.scaladebugger.api.profiles.java.JavaDebugProfile
import org.scaladebugger.api.profiles.traits.info.ThreadInfo
import org.scaladebugger.api.utils.JDITools
import org.scaladebugger.api.virtualmachines.DummyScalaVirtualMachine
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import org.scalatest.concurrent.Eventually
import test.{ApiTestUtilities, VirtualMachineFixtures}

class JavaObjectInfoScala210IntegrationSpec extends ParallelMockFunSpec
  with VirtualMachineFixtures
  with ApiTestUtilities
  with Eventually
{
  describe("JavaObjectInfo for 2.10") {
    it("should be able to get a list of methods for the object") {
      val testClass = "org.scaladebugger.test.info.Methods"
      val testFile = JDITools.scalaClassStringToFileString(testClass)

      @volatile var t: Option[ThreadInfo] = None
      val s = DummyScalaVirtualMachine.newInstance()

      // NOTE: Do not resume so we can check the variables at the stack frame
      s.withProfile(JavaDebugProfile.Name)
        .getOrCreateBreakpointRequest(testFile, 22, NoResume)
        .foreach(e => t = Some(e.thread))

      withVirtualMachine(testClass, pendingScalaVirtualMachines = Seq(s)) { (s) =>
        logTimeTaken(eventually {
          val methodNames = t.get.topFrame.thisObject.methods.map(_.name)

          methodNames should contain theSameElementsAs Seq(
            // Defined methods
            "main",
            "innerMethod$1", // Nested method has different Java signature
            "publicMethod",
            "privateMethod",
            "protectedMethod",
            "zeroArgMethod",
            "functionMethod", // Scala provides a method for the function
                              // object since it would be treated as a field

            // Inherited methods
            "<clinit>",
            "<init>",
            "registerNatives",
            "getClass",
            "hashCode",
            "equals",
            "clone",
            "toString",
            "notify",
            "notifyAll",
            "wait", // Overloaded method
            "wait",
            "wait",
            "finalize"
          )
        })
      }
    }
  }
} 
Example 107
Source File: ScalaVirtualMachine211IntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.api.virtualmachines

import org.scaladebugger.api.utils.JDITools
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import org.scalatest.concurrent.Eventually
import test.{ApiTestUtilities, VirtualMachineFixtures}


class ScalaVirtualMachine211IntegrationSpec extends ParallelMockFunSpec
  with VirtualMachineFixtures
  with ApiTestUtilities
  with Eventually
{
  describe("ScalaVirtualMachine for 2.11") {
    it("should return the breakpointable line numbers for the file") {
      val testClass = "org.scaladebugger.test.misc.AvailableLines"

      withVirtualMachine(testClass) { (s) =>
        // NOTE: In Scala 2.11, there is no breakpoint available on the object
        //       itself (line 11), but there is one on the last line of the
        //       object (72) - verified with IntelliJ
        val expected = Seq(
          12, 13, 14, 15, 16, 20, 21, 22, 26, 27, 28, 32, 34, 35, 37, 39,
          40, 41, 42, 45, 46, 47, 50, 52, 53, 57, 58, 59, 60, 63, 65, 72
        )

        val file = JDITools.scalaClassStringToFileString(testClass)

        // There is some delay while receiving the Java classes that make up
        // our file, so must wait for enough responses to get all of our lines
        eventually {
          val actual = s.availableLinesForFile(file).get
          actual should contain theSameElementsInOrderAs expected
        }
      }
    }
  }
} 
Example 108
Source File: JavaFieldInfoScala211IntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.api.profiles.java.info

import org.scaladebugger.api.lowlevel.events.misc.NoResume
import org.scaladebugger.api.profiles.java.JavaDebugProfile
import org.scaladebugger.api.profiles.traits.info.ThreadInfo
import org.scaladebugger.api.utils.JDITools
import org.scaladebugger.api.virtualmachines.DummyScalaVirtualMachine
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import org.scalatest.concurrent.Eventually
import test.{ApiTestUtilities, VirtualMachineFixtures}

class JavaFieldInfoScala211IntegrationSpec extends ParallelMockFunSpec
  with VirtualMachineFixtures
  with ApiTestUtilities
  with Eventually
{
  describe("JavaFieldInfo for 2.11") {
    // $outer does not appear in this scenario for Scala 2.11
    ignore("should not expand $outer to its underlying fields") {
      val testClass = "org.scaladebugger.test.info.OuterScope"
      val testFile = JDITools.scalaClassStringToFileString(testClass)

      @volatile var t: Option[ThreadInfo] = None
      val s = DummyScalaVirtualMachine.newInstance()

      // NOTE: Do not resume so we can check the variables at the stack frame
      s.withProfile(JavaDebugProfile.Name)
        .getOrCreateBreakpointRequest(testFile, 17, NoResume)
        .foreach(e => t = Some(e.thread))

      withVirtualMachine(testClass, pendingScalaVirtualMachines = Seq(s)) { (s) =>
        logTimeTaken(eventually {
          val variableNames = t
            .flatMap(_.tryTopFrame.toOption)
            .map(_.allVariables)
            .map(_.map(_.name))
            .get

          // Should expand $outer field of closure into outer fields
          variableNames should contain theSameElementsAs Seq(
            "MODULE$",
            "x",
            "executionStart",
            "scala$App$$_Args",
            "scala$App$$initCode",
            "$outer",
            "newValue"
          )
        })
      }
    }

    it("should not fix Scala-specific field names like org$scaladebugger$test$bugs$BugFromGitter$$name") {
      val testClass = "org.scaladebugger.test.bugs.BugFromGitter"
      val testFile = JDITools.scalaClassStringToFileString(testClass)

      @volatile var t: Option[ThreadInfo] = None
      val s = DummyScalaVirtualMachine.newInstance()

      // NOTE: Do not resume so we can check the variables at the stack frame
      s.withProfile(JavaDebugProfile.Name)
        .getOrCreateBreakpointRequest(testFile, 20, NoResume)
        .foreach(e => t = Some(e.thread))

      withVirtualMachine(testClass, pendingScalaVirtualMachines = Seq(s)) { (s) =>
        logTimeTaken(eventually {
          val fieldNames = t.get.topFrame.allVariables.map(_.name)

          fieldNames should contain theSameElementsAs Seq(
            "actualTimes",
            "times",
            "org$scaladebugger$test$bugs$BugFromGitter$$name"
          )
        })
      }
    }

  }
} 
Example 109
Source File: JavaObjectInfoScala211IntegrationSpec.scala    From scala-debugger   with Apache License 2.0 5 votes vote down vote up
package org.scaladebugger.api.profiles.java.info

import org.scaladebugger.api.lowlevel.events.misc.NoResume
import org.scaladebugger.api.profiles.java.JavaDebugProfile
import org.scaladebugger.api.profiles.traits.info.ThreadInfo
import org.scaladebugger.api.utils.JDITools
import org.scaladebugger.api.virtualmachines.DummyScalaVirtualMachine
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import org.scalatest.concurrent.Eventually
import test.{ApiTestUtilities, VirtualMachineFixtures}

class JavaObjectInfoScala211IntegrationSpec extends ParallelMockFunSpec
  with VirtualMachineFixtures
  with ApiTestUtilities
  with Eventually
{
  describe("JavaObjectInfo for 2.11") {
    it("should be able to get a list of methods for the object") {
      val testClass = "org.scaladebugger.test.info.Methods"
      val testFile = JDITools.scalaClassStringToFileString(testClass)

      @volatile var t: Option[ThreadInfo] = None
      val s = DummyScalaVirtualMachine.newInstance()

      // NOTE: Do not resume so we can check the variables at the stack frame
      s.withProfile(JavaDebugProfile.Name)
        .getOrCreateBreakpointRequest(testFile, 22, NoResume)
        .foreach(e => t = Some(e.thread))

      withVirtualMachine(testClass, pendingScalaVirtualMachines = Seq(s)) { (s) =>
        logTimeTaken(eventually {
          val methodNames = t.get.topFrame.thisObject.methods.map(_.name)

          methodNames should contain theSameElementsAs Seq(
            // Defined methods
            "main",
            "innerMethod$1", // Nested method has different Java signature
            "publicMethod",
            "privateMethod",
            "protectedMethod",
            "zeroArgMethod",
            "functionMethod", // Scala provides a method for the function
                              // object since it would be treated as a field

            // Inherited methods
            "<clinit>",
            "<init>",
            "registerNatives",
            "getClass",
            "hashCode",
            "equals",
            "clone",
            "toString",
            "notify",
            "notifyAll",
            "wait", // Overloaded method
            "wait",
            "wait",
            "finalize"
          )
        })
      }
    }
  }
} 
Example 110
Source File: HiveParquetWithPartitionTest.scala    From stream-reactor   with Apache License 2.0 5 votes vote down vote up
package com.landoop.streamreactor.hive.it

import java.util.concurrent.TimeUnit

import org.apache.hadoop.fs.Path
import org.scalatest.concurrent.Eventually
import org.scalatest.matchers.should.Matchers
import org.scalatest.time.{Millis, Span}
import org.scalatest.wordspec.AnyWordSpec

import scala.io.Source

class HiveParquetWithPartitionTest extends AnyWordSpec with Matchers with PersonTestData with Eventually with HiveTests {

  private implicit val patience: PatienceConfig = PatienceConfig(Span(60000, Millis), Span(5000, Millis))

  "Hive" should {
    "write partitioned records" in {

      val count = 100000L

      val topic = createTopic()
      val taskDef = Source.fromInputStream(getClass.getResourceAsStream("/hive_sink_task_with_partitions.json")).getLines().mkString("\n")
        .replace("{{TOPIC}}", topic)
        .replace("{{TABLE}}", topic)
        .replace("{{NAME}}", topic)
      postTask(taskDef)

      val producer = stringStringProducer()
      writeRecords(producer, topic, JacksonSupport.mapper.writeValueAsString(person), count)
      producer.close(30, TimeUnit.SECONDS)

      // wait for some data to have been flushed
      eventually {
        withConn { conn =>
          val stmt = conn.createStatement
          val rs = stmt.executeQuery(s"select count(*) FROM $topic")
          if (rs.next()) {
            val count = rs.getLong(1)
            println(s"Current count for $topic is $count")
            count should be > 100L
          } else {
            fail()
          }
        }
      }

      // we should see every partition created
      eventually {
        withConn { conn =>
          val stmt = conn.createStatement
          val rs = stmt.executeQuery(s"select distinct state from $topic")
          var count = 0
          while (rs.next()) {
            count = count + 1
          }
          println(s"State count is $count")
          count shouldBe states.length
        }
      }

      // check for the presence of each partition directory
      val table = metastore.getTable("default", topic)
      for (state <- states) {
        fs.exists(new Path(table.getSd.getLocation, s"state=$state")) shouldBe true
      }

      stopTask(topic)
    }
  }
} 
Example 111
Source File: DeploymentSpec.scala    From skuber   with Apache License 2.0 5 votes vote down vote up
package skuber

import org.scalatest.Matchers
import org.scalatest.concurrent.{Eventually, ScalaFutures}
import skuber.LabelSelector.IsEqualRequirement
import skuber.apps.v1.Deployment

import scala.concurrent.duration._
import scala.concurrent.{Await, Future}
import scala.util.{Failure, Success}

class DeploymentSpec extends K8SFixture with Eventually with Matchers {
  val nginxDeploymentName: String = java.util.UUID.randomUUID().toString

  behavior of "Deployment"

  it should "create a deployment" in { k8s =>
    k8s.create(getNginxDeployment(nginxDeploymentName, "1.7.9")) map { d =>
      assert(d.name == nginxDeploymentName)
    }
  }

  it should "get the newly created deployment" in { k8s =>
    k8s.get[Deployment](nginxDeploymentName) map { d =>
      assert(d.name == nginxDeploymentName)
    }
  }

  it should "upgrade the newly created deployment" in { k8s =>
    k8s.get[Deployment](nginxDeploymentName).flatMap { d =>
      println(s"DEPLOYMENT TO UPDATE ==> $d")
      val updatedDeployment = d.updateContainer(getNginxContainer("1.9.1"))
      k8s.update(updatedDeployment).flatMap { _ =>
        eventually(timeout(200.seconds), interval(5.seconds)) {
          val retrieveDeployment=k8s.get[Deployment](nginxDeploymentName)
          ScalaFutures.whenReady(retrieveDeployment, timeout(2.seconds), interval(1.second)) { deployment =>
            deployment.status.get.updatedReplicas shouldBe 1
          }
        }
      }
    }
  }

  it should "delete a deployment" in { k8s =>
    k8s.deleteWithOptions[Deployment](nginxDeploymentName, DeleteOptions(propagationPolicy = Some(DeletePropagation.Foreground))).map { _ =>
      eventually(timeout(200.seconds), interval(3.seconds)) {
        val retrieveDeployment = k8s.get[Deployment](nginxDeploymentName)
        val deploymentRetrieved=Await.ready(retrieveDeployment, 2.seconds).value.get
        deploymentRetrieved match {
          case s: Success[_] => assert(false)
          case Failure(ex) => ex match {
            case ex: K8SException if ex.status.code.contains(404) => assert(true)
            case _ => assert(false)
          }
        }
      }
    }
  }

  def getNginxContainer(version: String): Container = Container(name = "nginx", image = "nginx:" + version).exposePort(80)

  def getNginxDeployment(name: String, version: String): Deployment = {
    import LabelSelector.dsl._
    val nginxContainer = getNginxContainer(version)
    val nginxTemplate = Pod.Template.Spec.named("nginx").addContainer(nginxContainer).addLabel("app" -> "nginx")
    Deployment(name).withTemplate(nginxTemplate).withLabelSelector("app" is "nginx")
  }
} 
Example 112
Source File: OutputXMLMatchesInputXMLSpec.scala    From akka-xml-parser   with Apache License 2.0 5 votes vote down vote up
import akka.stream.scaladsl.{Keep, Source}
import akka.util.ByteString
import org.scalatest
import org.scalatest.concurrent.{Eventually, ScalaFutures}
import org.scalatest.mockito.MockitoSugar
import org.scalatest.{BeforeAndAfterEach, Matchers}
import uk.gov.hmrc.akka.xml._
import uk.gov.hmrc.play.test.UnitSpec

import scala.concurrent.ExecutionContext.Implicits.global

class OutputXMLMatchesInputXMLSpec extends UnitSpec with BeforeAndAfterEach with Matchers with ScalaFutures with MockitoSugar with Eventually with XMLParserFixtures {

  val inputXml                        = "<Address xmlns=\"http://www.govtalk.gov.uk/CM/address\"><Line>Line 1</Line><Line>Line 2</Line><PostCode>Tf3 4NT</PostCode></Address>"
  val inputXmlWithSelfClosingElement  = "<Address xmlns=\"http://www.govtalk.gov.uk/CM/address\"><Line>Line 1</Line><Line>Line 2</Line><Line/><PostCode>Tf3 4NT</PostCode></Address>"
  val inputXmlWithBlankElement        = "<Address xmlns=\"http://www.govtalk.gov.uk/CM/address\"><Line>Line 1</Line><Line>Line 2</Line><Line></Line><PostCode>Tf3 4NT</PostCode></Address>"

  val f = fixtures

  def xpathValue(xmlElements: Set[XMLElement], xPath: Seq[String]): Option[String] = xmlElements.collectFirst { case XMLElement(`xPath`, _, Some(xpathValue)) => xpathValue }

  def parseAndCompare(inputXml: String): scalatest.Assertion = {
    val inputXmlSource: Source[ByteString, _] = Source.single(ByteString(inputXml))

    await(
      for {
        parsedXmlElements <- inputXmlSource
          .via(CompleteChunkStage.parser())
          .via(ParsingStage.parser(Seq(XMLExtract(Seq("Address"), Map.empty, true))))
          .via(f.flowXMLElements)
          .toMat(f.collectXMLElements)(Keep.right)
          .run()(f.mat)

        parsedXml = xpathValue(parsedXmlElements, Seq("Address"))
      } yield {

        val outputXml = parsedXml.get

        println(s"INPUT  XML = $inputXml")
        println(s"OUTPUT XML = $outputXml")
        println()

        outputXml shouldBe inputXml
      }
    )
  }


  "The output XML" should {
    "match the input XML" when {
      "blank elements *** ARE *** present"            in parseAndCompare(inputXmlWithBlankElement)
      "self closing elements are *** NOT *** present" in parseAndCompare(inputXml)
      "self closing elements *** ARE *** present"     in parseAndCompare(inputXmlWithSelfClosingElement)
    }
  }


} 
Example 113
Source File: XMLParserXMLExtractNamespaceSpec.scala    From akka-xml-parser   with Apache License 2.0 5 votes vote down vote up
package uk.gov.hmrc.akka.xml

import akka.stream.scaladsl.{Keep, Source}
import akka.util.ByteString
import org.scalatest.{FlatSpec, Matchers}
import org.scalatest.concurrent.{Eventually, ScalaFutures}
import org.scalatest.mock.MockitoSugar
import org.scalatest.time.{Millis, Seconds, Span}

class XMLParserXMLExtractNamespaceSpec extends FlatSpec
  with Matchers
  with ScalaFutures
  with MockitoSugar
  with Eventually
  with XMLParserFixtures {

  val f = fixtures
  implicit override val patienceConfig =
    PatienceConfig(timeout = Span(5, Seconds), interval = Span(5, Millis))

  import f._

  behavior of "CompleteChunkStage#parser"


  it should "Parse and extract several non-default namespaces" in {

    val testXMLX =
      <ns5:GovTalkMessage
      xmlns:ns0="http://www.govtalk.gov.uk/taxation/PAYE/RTI/EmployerPaymentSummary/13-14/2"
      xmlns:ns2="http://www.govtalk.gov.uk/taxation/PAYE/RTI/EmployerPaymentSummary/15-16/1"
      xmlns:ns5="http://www.govtalk.gov.uk/CM/envelope"
      xmlns:ns1="http://www.govtalk.gov.uk/taxation/PAYE/RTI/EmployerPaymentSummary/14-15/1"
      xmlns:ns3="http://www.govtalk.gov.uk/taxation/PAYE/RTI/EmployerPaymentSummary/16-17/1"
      xmlns:ns4="http://www.govtalk.gov.uk/taxation/PAYE/RTI/EmployerPaymentSummary/17-18/1"
      xmlns="">
        <ns5:EnvelopeVersion>2.0</ns5:EnvelopeVersion>
        <ns5:Header></ns5:Header>
        <ns5:GovTalkDetails></ns5:GovTalkDetails>
      </ns5:GovTalkMessage>

    val source = Source(List(ByteString(testXMLX.toString())))


    val paths = Seq[XMLInstruction](
      XMLExtract(Seq("GovTalkMessage"), Map("xmlns:ns2" -> "http://www.govtalk.gov.uk/taxation/PAYE/RTI/EmployerPaymentSummary/15-16/1")),
      XMLExtract(Seq("GovTalkMessage"), Map("xmlns:BLABLA" -> "http://www.govtalk.gov.uk/taxation/PAYE/RTI/EmployerPaymentSummary/13-14/2")),
      XMLExtract(Seq("GovTalkMessage"), Map("xmlns" -> "http://www.govtalk.gov.uk/taxation/PAYE/RTI/EmployerPaymentSummary/17-18/1")),
      XMLExtract(Seq("GovTalkMessage"), Map("xmlns" -> "http://www.govtalk.gov.uk/CM/envelope"))
    )

    val expected = Set(
      XMLElement(List("GovTalkMessage"), Map("xmlns:ns2" -> "http://www.govtalk.gov.uk/taxation/PAYE/RTI/EmployerPaymentSummary/15-16/1"), Some("")),
      XMLElement(List("GovTalkMessage"), Map("xmlns:ns0" -> "http://www.govtalk.gov.uk/taxation/PAYE/RTI/EmployerPaymentSummary/13-14/2"), Some("")),
      XMLElement(List("GovTalkMessage"), Map("xmlns:ns4" -> "http://www.govtalk.gov.uk/taxation/PAYE/RTI/EmployerPaymentSummary/17-18/1"), Some("")),
      XMLElement(List("GovTalkMessage"), Map("xmlns:ns5" -> "http://www.govtalk.gov.uk/CM/envelope"), Some("")),
      XMLElement(List(), Map(CompleteChunkStage.STREAM_SIZE -> "681"), Some(CompleteChunkStage.STREAM_SIZE))
    )

          whenReady(source.runWith(parseToXMLElements(paths))) { r =>
      r shouldBe expected
    }

    whenReady(source.runWith(parseToByteString(paths))) { r =>
      whenReady(source.toMat(collectByteString)(Keep.right).run()) { t =>
        r shouldBe t
      }
    }
  }
} 
Example 114
Source File: XMLParsingStopSpec.scala    From akka-xml-parser   with Apache License 2.0 5 votes vote down vote up
package uk.gov.hmrc.akka.xml

import akka.stream.scaladsl.{Keep, Source}
import org.scalatest.{FlatSpec, Matchers}
import org.scalatest.concurrent.{Eventually, ScalaFutures}
import org.scalatest.mock.MockitoSugar
import org.scalatest.time.{Millis, Seconds, Span}

class XMLParsingStopSpec extends FlatSpec
  with Matchers
  with ScalaFutures
  with MockitoSugar
  with Eventually
  with XMLParserFixtures {

  val f = fixtures
  implicit override val patienceConfig =
    PatienceConfig(timeout = Span(5, Seconds), interval = Span(5, Millis))

  import f._

  it should "Stop parsing when the passed in xPath is encountered" in {

    val source = Source(ParserTestHelpers.getBrokenMessage(ParserTestHelpers.sa100.toString, 100))

    val paths = Seq[XMLInstruction](
      XMLExtract(Seq("GovTalkMessage", "Header", "MessageDetails", "Class")),
      XMLExtract(Seq("GovTalkMessage", "Header", "MessageDetails", "Qualifier")),
      XMLExtract(Seq("GovTalkMessage", "Header", "MessageDetails", "Function")),
      XMLExtract(Seq("GovTalkMessage", "Body", "IRenvelope", "MTR", "SA100", "YourPersonalDetails", "NationalInsuranceNumber")), //This is in the body, will not be parsed
      XMLStopParsing(Seq("GovTalkMessage", "Body"))
    )

    val expected = Set(
      XMLElement(List("GovTalkMessage", "Header", "MessageDetails", "Class"), Map(), Some("HMRC-SA-SA100")),
      XMLElement(List("GovTalkMessage", "Header", "MessageDetails", "Function"), Map(), Some("submit")),
      XMLElement(List("GovTalkMessage", "Header", "MessageDetails", "Qualifier"), Map(), Some("request"))
    )

    whenReady(source.runWith(parseToXMLElements(paths))) { r =>
      r.filterNot(a => a.value == Some(FastParsingStage.STREAM_SIZE)) shouldBe expected
    }

    whenReady(source.runWith(parseToByteString(paths))) { r =>
      whenReady(source.toMat(collectByteString)(Keep.right).run()) { t =>
        r shouldBe t
      }
    }
  }

  it should "Notify if the payload exceeded the maximum allowed size" in {
    val source = Source(ParserTestHelpers.getBrokenMessage(ParserTestHelpers.sa100.toString, 100))

    val paths = Seq[XMLInstruction](XMLExtract(Seq("GovTalkMessage", "Header", "MessageDetails", "Class")))
    val expected = Set(
      XMLElement(List("GovTalkMessage", "Header", "MessageDetails", "Class"), Map(), Some("HMRC-SA-SA100")),
      XMLElement(List(), Map(), Some("Stream max size"))
    )

    whenReady(source.runWith(parseToXMLElements(paths, Some(200)))) { r =>
      r.filterNot(a => a.value == Some(FastParsingStage.STREAM_SIZE)) shouldBe expected
    }

    whenReady(source.runWith(parseToByteString(paths))) { r =>
      whenReady(source.toMat(collectByteString)(Keep.right).run()) { t =>
        r shouldBe t
      }
    }
  }


} 
Example 115
Source File: StdinForSystemSpec.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package system

import org.apache.toree.kernel.protocol.v5.client.SparkKernelClient
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{Seconds, Milliseconds, Span}
import org.scalatest.{BeforeAndAfterAll, FunSpec, Matchers}
import test.utils.root.{SparkKernelClientDeployer, SparkKernelDeployer}



  describe("Stdin for System") {
    describe("when the kernel requests input") {
      ignore("should receive input based on the client's response function") {
        var response: String = ""
        client.setResponseFunction((_, _) => TestReplyString)

        // Read in a chunk of data (our reply string) and return it as a string
        // to be verified by the test
        client.execute(
          """
            |var result: Array[Byte] = Array()
            |val in = kernel.in
            |do {
            |    result = result :+ in.read().toByte
            |} while(in.available() > 0)
            |new String(result)
          """.stripMargin
        ).onResult { result =>
          response = result.data("text/plain")
        }.onError { _ =>
          fail("Client execution to trigger kernel input request failed!")
        }

        eventually {
          response should contain (TestReplyString)
        }
      }
    }
  }

} 
Example 116
Source File: BrokerTransformerSpec.scala    From incubator-toree   with Apache License 2.0 5 votes vote down vote up
package org.apache.toree.interpreter.broker

import org.apache.toree.interpreter.{ExecuteError, Results}
import org.scalatest.concurrent.Eventually
import scala.concurrent.Promise
import org.scalatest.{FunSpec, Matchers, OneInstancePerTest}

class BrokerTransformerSpec extends FunSpec with Matchers
  with OneInstancePerTest with Eventually
{
  private val brokerTransformer = new BrokerTransformer

  describe("BrokerTransformer") {
    describe("#transformToInterpreterResult") {
      it("should convert to success with result output if no failure") {
        val codeResultPromise = Promise[BrokerTypes.CodeResults]()

        val transformedFuture = brokerTransformer.transformToInterpreterResult(
          codeResultPromise.future
        )

        val successOutput = "some success"
        codeResultPromise.success(successOutput)

        eventually {
          val result = transformedFuture.value.get.get
          result should be((Results.Success, Left(Map("text/plain" -> successOutput))))
        }
      }

      it("should convert to error with broker exception if failure") {
        val codeResultPromise = Promise[BrokerTypes.CodeResults]()

        val transformedFuture = brokerTransformer.transformToInterpreterResult(
          codeResultPromise.future
        )

        val failureException = new BrokerException("some failure")
        codeResultPromise.failure(failureException)

        eventually {
          val result = transformedFuture.value.get.get
          result should be((Results.Error, Right(ExecuteError(
            name = failureException.getClass.getName,
            value = failureException.getLocalizedMessage,
            stackTrace = failureException.getStackTrace.map(_.toString).toList
          ))))
        }
      }
    }
  }
} 
Example 117
Source File: JMSSourceTaskTest.scala    From stream-reactor   with Apache License 2.0 5 votes vote down vote up
package com.datamountaineer.streamreactor.connect.source

import java.io.File
import java.util.UUID

import com.datamountaineer.streamreactor.connect.TestBase
import com.datamountaineer.streamreactor.connect.jms.source.JMSSourceTask
import com.datamountaineer.streamreactor.connect.jms.source.domain.JMSStructMessage
import javax.jms.Session
import org.apache.activemq.ActiveMQConnectionFactory
import org.apache.activemq.broker.BrokerService
import org.apache.activemq.broker.jmx.QueueViewMBean
import org.apache.kafka.connect.source.SourceTaskContext
import org.mockito.MockitoSugar
import org.scalatest.BeforeAndAfterAll
import org.scalatest.concurrent.Eventually

import scala.collection.JavaConverters._
import scala.reflect.io.Path


class JMSSourceTaskTest extends TestBase with BeforeAndAfterAll with Eventually with MockitoSugar {

  override def afterAll(): Unit = {
    Path(AVRO_FILE).delete()
  }


  "should start a JMSSourceTask, read records and ack messages" in {
    implicit val broker = new BrokerService()
    broker.setPersistent(false)
    broker.setUseJmx(true)
    broker.setDeleteAllMessagesOnStartup(true)
    val brokerUrl = "tcp://localhost:61640"
    broker.addConnector(brokerUrl)
    broker.setUseShutdownHook(false)
    val property = "java.io.tmpdir"
    val tempDir = System.getProperty(property)
    broker.setTmpDataDirectory( new File(tempDir))
    broker.start()

    val kafkaTopic = s"kafka-${UUID.randomUUID().toString}"
    val queueName = UUID.randomUUID().toString

    val kcql = getKCQL(kafkaTopic, queueName, "QUEUE")
    val props = getProps(kcql, brokerUrl)

    val context = mock[SourceTaskContext]
    when(context.configs()).thenReturn(props.asJava)

    val task = new JMSSourceTask()
    task.initialize(context)
    task.start(props.asJava)

    //send in some records to the JMS queue
    //KCQL_SOURCE_QUEUE

    val connectionFactory = new ActiveMQConnectionFactory()
    connectionFactory.setBrokerURL(brokerUrl)
    val conn = connectionFactory.createConnection()
    conn.start()
    val session = conn.createSession(false, Session.CLIENT_ACKNOWLEDGE)
    val queue = session.createQueue(queueName)
    val queueProducer = session.createProducer(queue)
    val messages = getTextMessages(10, session)
    messages.foreach(m => {
      queueProducer.send(m)
      m.acknowledge()
    })

    Thread.sleep(2000)

    val records = task.poll().asScala
    records.size shouldBe 10
    records.head.valueSchema().toString shouldBe JMSStructMessage.getSchema().toString
    messagesLeftToAckShouldBe(10)

    records.foreach(task.commitRecord)

    messagesLeftToAckShouldBe(0)

    task.stop()
    Path(AVRO_FILE).delete()
  }

  private def messagesLeftToAckShouldBe(numLeft: Int)(implicit broker: BrokerService) = eventually {
    val messagesLeft = broker.getManagementContext()
      .newProxyInstance(broker.getAdminView.getQueues()(0), classOf[QueueViewMBean], false)
      .asInstanceOf[QueueViewMBean]
      .getQueueSize
    messagesLeft shouldBe numLeft
  }
} 
Example 118
Source File: HiveSchemaTest.scala    From stream-reactor   with Apache License 2.0 5 votes vote down vote up
package com.landoop.streamreactor.hive.it

import java.util.concurrent.TimeUnit

import com.landoop.streamreactor.connect.hive.{DatabaseName, TableName}
import org.apache.kafka.connect.data.Schema
import org.scalatest.concurrent.Eventually
import org.scalatest.matchers.should.Matchers
import org.scalatest.time.{Millis, Span}
import org.scalatest.wordspec.AnyWordSpec

import scala.collection.JavaConverters._
import scala.io.Source
import scala.util.Random

class HiveSchemaTest extends AnyWordSpec with Matchers with PersonTestData with Eventually with HiveTests {

  private implicit val patience: PatienceConfig = PatienceConfig(Span(60000, Millis), Span(5000, Millis))

  case class Foo(s: String, l: Long, b: Boolean, d: Double)
  def foo = Foo("string", Random.nextLong, Random.nextBoolean, Random.nextDouble)

  "Hive" should {
    "create correct schema for table" in {

      val topic = createTopic()
      val taskDef = Source.fromInputStream(getClass.getResourceAsStream("/hive_sink_task_no_partitions.json")).getLines().mkString("\n")
        .replace("{{TOPIC}}", topic)
        .replace("{{TABLE}}", topic)
        .replace("{{NAME}}", topic)
      postTask(taskDef)

      val producer = stringStringProducer()
      writeRecords(producer, topic, JacksonSupport.mapper.writeValueAsString(foo), 2000)
      producer.close(30, TimeUnit.SECONDS)

      // wait for some data to have been flushed
      eventually {
        withConn { conn =>
          val stmt = conn.createStatement
          val rs = stmt.executeQuery(s"select count(*) FROM $topic")
          rs.next()
          rs.getLong(1) should be > 0L
        }
      }

      // check that the schema is correct
      val schema = com.landoop.streamreactor.connect.hive.schema(DatabaseName("default"), TableName(topic))
      schema.fields().asScala.map(_.name).toSet shouldBe Set("s", "b", "l", "d")
      schema.field("s").schema().`type`() shouldBe Schema.Type.STRING
      schema.field("l").schema().`type`() shouldBe Schema.Type.INT64
      schema.field("d").schema().`type`() shouldBe Schema.Type.FLOAT64
      schema.field("b").schema().`type`() shouldBe Schema.Type.BOOLEAN

      stopTask(topic)
    }
  }
} 
Example 119
Source File: HiveParquetWithPartitionTest.scala    From stream-reactor   with Apache License 2.0 5 votes vote down vote up
package com.landoop.streamreactor.hive.it

import java.util.concurrent.TimeUnit

import org.apache.hadoop.fs.Path
import org.scalatest.concurrent.Eventually
import org.scalatest.matchers.should.Matchers
import org.scalatest.time.{Millis, Span}
import org.scalatest.wordspec.AnyWordSpec

import scala.io.Source

class HiveParquetWithPartitionTest extends AnyWordSpec with Matchers with PersonTestData with Eventually with HiveTests {

  private implicit val patience: PatienceConfig = PatienceConfig(Span(60000, Millis), Span(5000, Millis))

  "Hive" should {
    "write partitioned records" in {

      val count = 100000L

      val topic = createTopic()
      val taskDef = Source.fromInputStream(getClass.getResourceAsStream("/hive_sink_task_with_partitions.json")).getLines().mkString("\n")
        .replace("{{TOPIC}}", topic)
        .replace("{{TABLE}}", topic)
        .replace("{{NAME}}", topic)
      postTask(taskDef)

      val producer = stringStringProducer()
      writeRecords(producer, topic, JacksonSupport.mapper.writeValueAsString(person), count)
      producer.close(30, TimeUnit.SECONDS)

      // wait for some data to have been flushed
      eventually {
        withConn { conn =>
          val stmt = conn.createStatement
          val rs = stmt.executeQuery(s"select count(*) FROM $topic")
          if (rs.next()) {
            val count = rs.getLong(1)
            println(s"Current count for $topic is $count")
            count should be > 100L
          } else {
            fail()
          }
        }
      }

      // we should see every partition created
      eventually {
        withConn { conn =>
          val stmt = conn.createStatement
          val rs = stmt.executeQuery(s"select distinct state from $topic")
          var count = 0
          while (rs.next()) {
            count = count + 1
          }
          println(s"State count is $count")
          count shouldBe states.length
        }
      }

      // check for the presence of each partition directory
      val table = metastore.getTable("default", topic)
      for (state <- states) {
        fs.exists(new Path(table.getSd.getLocation, s"state=$state")) shouldBe true
      }

      stopTask(topic)
    }
  }
} 
Example 120
Source File: HiveSourceTest.scala    From stream-reactor   with Apache License 2.0 5 votes vote down vote up
package com.landoop.streamreactor.hive.it

import java.util.Collections
import java.util.concurrent.TimeUnit

import org.scalatest.concurrent.Eventually
import org.scalatest.matchers.should.Matchers
import org.scalatest.time.{Millis, Span}
import org.scalatest.wordspec.AnyWordSpec

import scala.io.Source

class HiveSourceTest extends AnyWordSpec with Matchers with PersonTestData with Eventually with HiveTests {

  private implicit val patience: PatienceConfig = PatienceConfig(Span(60000, Millis), Span(5000, Millis))

  "Hive" should {
    "read non partitioned table" in {
      val count = 2000L

      val inputTopic = createTopic()
      val sinkTaskDef = Source.fromInputStream(getClass.getResourceAsStream("/hive_sink_task_no_partitions.json")).getLines().mkString("\n")
        .replace("{{TOPIC}}", inputTopic)
        .replace("{{TABLE}}", inputTopic)
        .replace("{{NAME}}", inputTopic)
      postTask(sinkTaskDef)

      val producer = stringStringProducer()
      writeRecords(producer, inputTopic, JacksonSupport.mapper.writeValueAsString(person), count)
      producer.close(30, TimeUnit.SECONDS)

      // we now should have 1000 records in hive which we can test via jdbc
      eventually {
        withConn { conn =>
          val stmt = conn.createStatement
          val rs = stmt.executeQuery(s"select count(*) from $inputTopic")
          rs.next()
          rs.getLong(1) shouldBe count
        }
      }

      stopTask(inputTopic)

      // now we can read them back in
      val outputTopic = createTopic()

      val sourceTaskDef = Source.fromInputStream(getClass.getResourceAsStream("/hive_source_task.json")).getLines().mkString("\n")
        .replace("{{TOPIC}}", outputTopic)
        .replace("{{TABLE}}", inputTopic)
        .replace("{{NAME}}", outputTopic)
      postTask(sourceTaskDef)

      // we should have 1000 records on the outputTopic
      var records = 0L
      val consumer = stringStringConsumer("earliest")
      consumer.subscribe(Collections.singleton(outputTopic))
      eventually {
        records = records + readRecords(consumer, outputTopic, 2, TimeUnit.SECONDS).size
        records shouldBe count
      }

      stopTask(outputTopic)
    }
  }
} 
Example 121
Source File: HiveParquetTest.scala    From stream-reactor   with Apache License 2.0 5 votes vote down vote up
package com.landoop.streamreactor.hive.it

import java.util.concurrent.TimeUnit

import org.scalatest.concurrent.Eventually
import org.scalatest.matchers.should.Matchers
import org.scalatest.time.{Millis, Span}
import org.scalatest.wordspec.AnyWordSpec

import scala.io.Source

class HiveParquetTest extends AnyWordSpec with Matchers with PersonTestData with Eventually with HiveTests {

  private implicit val patience: PatienceConfig = PatienceConfig(Span(30000, Millis), Span(2000, Millis))

  "Hive" should {
    "write records" in {

      val count = 10000L

      val topic = createTopic()
      val taskDef = Source.fromInputStream(getClass.getResourceAsStream("/hive_sink_task_no_partitions.json")).getLines().mkString("\n")
        .replace("{{TOPIC}}", topic)
        .replace("{{TABLE}}", topic)
        .replace("{{NAME}}", topic)
      postTask(taskDef)

      val producer = stringStringProducer()
      writeRecords(producer, topic, JacksonSupport.mapper.writeValueAsString(person), count)
      producer.close(30, TimeUnit.SECONDS)

      // we now should have 1000 records in hive which we can test via jdbc
      eventually {
        withConn { conn =>
          val stmt = conn.createStatement
          val rs = stmt.executeQuery(s"select count(*) from $topic")
          rs.next()
          rs.getLong(1) shouldBe count
        }
      }

      stopTask(topic)
    }
  }
} 
Example 122
Source File: HiveOrcTest.scala    From stream-reactor   with Apache License 2.0 5 votes vote down vote up
package com.landoop.streamreactor.hive.it

import java.util.concurrent.TimeUnit

import org.scalatest.concurrent.Eventually
import org.scalatest.matchers.should.Matchers
import org.scalatest.time.{Seconds, Span}
import org.scalatest.wordspec.AnyWordSpec

import scala.io.Source

class HiveOrcTest extends AnyWordSpec with Matchers with PersonTestData with Eventually with HiveTests {

  private implicit val patience: PatienceConfig = PatienceConfig(Span(120, Seconds), Span(10, Seconds))

  "Hive" should {
    "write non partitioned orc records" in {
      val count = 10000L

      val topic = createTopic()
      val taskDef = Source.fromInputStream(getClass.getResourceAsStream("/hive_sink_task_no_partitions-orc.json")).getLines().mkString("\n")
        .replace("{{TOPIC}}", topic)
        .replace("{{TABLE}}", topic)
        .replace("{{NAME}}", topic)
      postTask(taskDef)

      val producer = stringStringProducer()
      writeRecords(producer, topic, JacksonSupport.mapper.writeValueAsString(person), count)
      producer.close(30, TimeUnit.SECONDS)

      // we now should have 1000 records in hive which we can test via jdbc
      eventually {
        withConn { conn =>
          val stmt = conn.createStatement
          val rs = stmt.executeQuery(s"select count(*) from $topic")
          rs.next()
          rs.getLong(1) shouldBe count
        }
      }

      stopTask(topic)
    }
  }
} 
Example 123
Source File: HiveSchemaTest.scala    From stream-reactor   with Apache License 2.0 5 votes vote down vote up
package com.landoop.streamreactor.hive.it

import java.util.concurrent.TimeUnit

import com.landoop.streamreactor.connect.hive.{DatabaseName, TableName}
import org.apache.kafka.connect.data.Schema
import org.scalatest.concurrent.Eventually
import org.scalatest.matchers.should.Matchers
import org.scalatest.time.{Millis, Span}
import org.scalatest.wordspec.AnyWordSpec

import scala.collection.JavaConverters._
import scala.io.Source
import scala.util.Random

class HiveSchemaTest extends AnyWordSpec with Matchers with PersonTestData with Eventually with HiveTests {

  private implicit val patience: PatienceConfig = PatienceConfig(Span(60000, Millis), Span(5000, Millis))

  case class Foo(s: String, l: Long, b: Boolean, d: Double)
  def foo = Foo("string", Random.nextLong, Random.nextBoolean, Random.nextDouble)

  "Hive" should {
    "create correct schema for table" in {

      val topic = createTopic()
      val taskDef = Source.fromInputStream(getClass.getResourceAsStream("/hive_sink_task_no_partitions.json")).getLines().mkString("\n")
        .replace("{{TOPIC}}", topic)
        .replace("{{TABLE}}", topic)
        .replace("{{NAME}}", topic)
      postTask(taskDef)

      val producer = stringStringProducer()
      writeRecords(producer, topic, JacksonSupport.mapper.writeValueAsString(foo), 2000)
      producer.close(30, TimeUnit.SECONDS)

      // wait for some data to have been flushed
      eventually {
        withConn { conn =>
          val stmt = conn.createStatement
          val rs = stmt.executeQuery(s"select count(*) FROM $topic")
          rs.next()
          rs.getLong(1) should be > 0L
        }
      }

      // check that the schema is correct
      val schema = com.landoop.streamreactor.connect.hive.schema(DatabaseName("default"), TableName(topic))
      schema.fields().asScala.map(_.name).toSet shouldBe Set("s", "b", "l", "d")
      schema.field("s").schema().`type`() shouldBe Schema.Type.STRING
      schema.field("l").schema().`type`() shouldBe Schema.Type.INT64
      schema.field("d").schema().`type`() shouldBe Schema.Type.FLOAT64
      schema.field("b").schema().`type`() shouldBe Schema.Type.BOOLEAN

      stopTask(topic)
    }
  }
} 
Example 124
Source File: HorizontalPodAutoscalerV2Beta1Spec.scala    From skuber   with Apache License 2.0 5 votes vote down vote up
package skuber

import org.scalatest.Matchers
import org.scalatest.concurrent.Eventually
import skuber.apps.v1.Deployment
import skuber.autoscaling.v2beta1.HorizontalPodAutoscaler
import skuber.autoscaling.v2beta1.HorizontalPodAutoscaler.ResourceMetricSource

class HorizontalPodAutoscalerV2Beta1Spec extends K8SFixture with Eventually with Matchers {
  behavior of "HorizontalPodAutoscalerV2Beta1"

  it should "create a HorizontalPodAutoscaler" in { k8s =>
    val name: String = java.util.UUID.randomUUID().toString
    println(name)
    k8s.create(getNginxDeployment(name, "1.7.9")) flatMap { d =>
      k8s.create(
        HorizontalPodAutoscaler(name).withSpec(
          HorizontalPodAutoscaler.Spec("v1", "Deployment", "nginx")
            .withMinReplicas(1)
            .withMaxReplicas(2)
            .addResourceMetric(ResourceMetricSource(Resource.cpu, Some(80), None))
        )
      ).map { result =>
        assert(result.name == name)
        assert(result.spec.contains(
          HorizontalPodAutoscaler.Spec("v1", "Deployment", "nginx")
            .withMinReplicas(1)
            .withMaxReplicas(2)
            .addResourceMetric(ResourceMetricSource(Resource.cpu, Some(80), None)))
        )
      }
    }
  }

  it should "update a HorizontalPodAutoscaler" in { k8s =>
    val name: String = java.util.UUID.randomUUID().toString
    k8s.create(getNginxDeployment(name, "1.7.9")) flatMap { d =>
      k8s.create(
        HorizontalPodAutoscaler(name).withSpec(
          HorizontalPodAutoscaler.Spec("v1", "Deployment", "nginx")
            .withMinReplicas(1)
            .withMaxReplicas(2)
            .addResourceMetric(ResourceMetricSource(Resource.cpu, Some(80), None))
        )
      ).flatMap(created =>
        eventually(
          k8s.get[HorizontalPodAutoscaler](created.name).flatMap { existing =>
            val udpated = existing.withSpec(HorizontalPodAutoscaler.Spec("v1", "Deployment", "nginx")
              .withMinReplicas(1)
              .withMaxReplicas(3)
              .addResourceMetric(ResourceMetricSource(Resource.cpu, Some(80), None)))

            k8s.update(udpated).map { result =>
              assert(result.name == name)
              assert(result.spec.contains(
                HorizontalPodAutoscaler.Spec("v1", "Deployment", "nginx")
                  .withMinReplicas(1)
                  .withMaxReplicas(3)
                  .addResourceMetric(ResourceMetricSource(Resource.cpu, Some(80), None))
              ))
            }
          }
        )
      )
    }
  }

  it should "delete a HorizontalPodAutoscaler" in { k8s =>
    val name: String = java.util.UUID.randomUUID().toString
    k8s.create(getNginxDeployment(name, "1.7.9")) flatMap { d =>
      k8s.create(
        HorizontalPodAutoscaler(name).withSpec(
          HorizontalPodAutoscaler.Spec("v1", "Deployment", "nginx")
            .withMinReplicas(1)
            .withMaxReplicas(2)
            .addResourceMetric(ResourceMetricSource(Resource.cpu, Some(80), None))
        )
      ).flatMap { created =>
        k8s.delete[HorizontalPodAutoscaler](created.name).flatMap { deleteResult =>
          k8s.get[HorizontalPodAutoscaler](created.name).map { x =>
            assert(false)
          } recoverWith {
            case ex: K8SException if ex.status.code.contains(404) => assert(true)
            case _ => assert(false)
          }
        }
      }
    }
  }

  def getNginxDeployment(name: String, version: String): Deployment = {
    import LabelSelector.dsl._
    val nginxContainer = getNginxContainer(version)
    val nginxTemplate = Pod.Template.Spec.named("nginx").addContainer(nginxContainer).addLabel("app" -> "nginx")
    Deployment(name).withTemplate(nginxTemplate).withLabelSelector("app" is "nginx")
  }

  def getNginxContainer(version: String): Container = {
    Container(name = "nginx", image = "nginx:" + version).exposePort(80)
  }
} 
Example 125
Source File: HiveSourceTest.scala    From stream-reactor   with Apache License 2.0 5 votes vote down vote up
package com.landoop.streamreactor.hive.it

import java.util.Collections
import java.util.concurrent.TimeUnit

import org.scalatest.concurrent.Eventually
import org.scalatest.matchers.should.Matchers
import org.scalatest.time.{Millis, Span}
import org.scalatest.wordspec.AnyWordSpec

import scala.io.Source

class HiveSourceTest extends AnyWordSpec with Matchers with PersonTestData with Eventually with HiveTests {

  private implicit val patience: PatienceConfig = PatienceConfig(Span(60000, Millis), Span(5000, Millis))

  "Hive" should {
    "read non partitioned table" in {
      val count = 2000L

      val inputTopic = createTopic()
      val sinkTaskDef = Source.fromInputStream(getClass.getResourceAsStream("/hive_sink_task_no_partitions.json")).getLines().mkString("\n")
        .replace("{{TOPIC}}", inputTopic)
        .replace("{{TABLE}}", inputTopic)
        .replace("{{NAME}}", inputTopic)
      postTask(sinkTaskDef)

      val producer = stringStringProducer()
      writeRecords(producer, inputTopic, JacksonSupport.mapper.writeValueAsString(person), count)
      producer.close(30, TimeUnit.SECONDS)

      // we now should have 1000 records in hive which we can test via jdbc
      eventually {
        withConn { conn =>
          val stmt = conn.createStatement
          val rs = stmt.executeQuery(s"select count(*) from $inputTopic")
          rs.next()
          rs.getLong(1) shouldBe count
        }
      }

      stopTask(inputTopic)

      // now we can read them back in
      val outputTopic = createTopic()

      val sourceTaskDef = Source.fromInputStream(getClass.getResourceAsStream("/hive_source_task.json")).getLines().mkString("\n")
        .replace("{{TOPIC}}", outputTopic)
        .replace("{{TABLE}}", inputTopic)
        .replace("{{NAME}}", outputTopic)
      postTask(sourceTaskDef)

      // we should have 1000 records on the outputTopic
      var records = 0L
      val consumer = stringStringConsumer("earliest")
      consumer.subscribe(Collections.singleton(outputTopic))
      eventually {
        records = records + readRecords(consumer, outputTopic, 2, TimeUnit.SECONDS).size
        records shouldBe count
      }

      stopTask(outputTopic)
    }
  }
} 
Example 126
Source File: HiveParquetTest.scala    From stream-reactor   with Apache License 2.0 5 votes vote down vote up
package com.landoop.streamreactor.hive.it

import java.util.concurrent.TimeUnit

import org.scalatest.concurrent.Eventually
import org.scalatest.matchers.should.Matchers
import org.scalatest.time.{Millis, Span}
import org.scalatest.wordspec.AnyWordSpec

import scala.io.Source

class HiveParquetTest extends AnyWordSpec with Matchers with PersonTestData with Eventually with HiveTests {

  private implicit val patience: PatienceConfig = PatienceConfig(Span(30000, Millis), Span(2000, Millis))

  "Hive" should {
    "write records" in {

      val count = 10000L

      val topic = createTopic()
      val taskDef = Source.fromInputStream(getClass.getResourceAsStream("/hive_sink_task_no_partitions.json")).getLines().mkString("\n")
        .replace("{{TOPIC}}", topic)
        .replace("{{TABLE}}", topic)
        .replace("{{NAME}}", topic)
      postTask(taskDef)

      val producer = stringStringProducer()
      writeRecords(producer, topic, JacksonSupport.mapper.writeValueAsString(person), count)
      producer.close(30, TimeUnit.SECONDS)

      // we now should have 1000 records in hive which we can test via jdbc
      eventually {
        withConn { conn =>
          val stmt = conn.createStatement
          val rs = stmt.executeQuery(s"select count(*) from $topic")
          rs.next()
          rs.getLong(1) shouldBe count
        }
      }

      stopTask(topic)
    }
  }
} 
Example 127
Source File: ArchiveCacheSpec.scala    From nexus-kg   with Apache License 2.0 5 votes vote down vote up
package ch.epfl.bluebrain.nexus.kg.archives

import java.time.{Clock, Instant, ZoneId}

import cats.effect.{IO, Timer}
import ch.epfl.bluebrain.nexus.admin.client.types.Project
import ch.epfl.bluebrain.nexus.commons.test.ActorSystemFixture
import ch.epfl.bluebrain.nexus.commons.test.io.IOOptionValues
import ch.epfl.bluebrain.nexus.iam.client.types.Identity.Anonymous
import ch.epfl.bluebrain.nexus.kg.TestHelper
import ch.epfl.bluebrain.nexus.kg.archives.Archive.{File, Resource, ResourceDescription}
import ch.epfl.bluebrain.nexus.kg.config.Settings
import ch.epfl.bluebrain.nexus.kg.resources.Id
import ch.epfl.bluebrain.nexus.kg.resources.syntax._
import org.scalatest.concurrent.Eventually
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpecLike

import scala.concurrent.duration._

class ArchiveCacheSpec
    extends ActorSystemFixture("ArchiveCacheSpec", true)
    with TestHelper
    with AnyWordSpecLike
    with Matchers
    with IOOptionValues
    with Eventually {

  override implicit def patienceConfig: PatienceConfig = PatienceConfig(10.second, 50.milliseconds)

  private val appConfig = Settings(system).appConfig
  private implicit val config =
    appConfig.copy(archives = appConfig.archives.copy(cacheInvalidateAfter = 500.millis, maxResources = 100))
  private implicit val timer: Timer[IO] = IO.timer(system.dispatcher)

  private val cache: ArchiveCache[IO] = ArchiveCache[IO].unsafeToFuture().futureValue
  private implicit val clock          = Clock.fixed(Instant.EPOCH, ZoneId.systemDefault())
  private val instant                 = clock.instant()

  def randomProject() = {
    val instant = Instant.EPOCH
    // format: off
    Project(genIri, genString(), genString(), None, genIri, genIri, Map.empty, genUUID, genUUID, 1L, false, instant, genIri, instant, genIri)
    // format: on
  }

  "An archive cache" should {

    "write and read an Archive" in {
      val resId     = Id(randomProject().ref, genIri)
      val resource1 = Resource(genIri, randomProject(), None, None, originalSource = true, None)
      val file1     = File(genIri, randomProject(), None, None, None)
      val archive   = Archive(resId, instant, Anonymous, Set(resource1, file1))
      val _         = cache.put(archive).value.some
      cache.get(archive.resId).value.some shouldEqual archive
    }

    "read a non existing resource" in {
      val resId = Id(randomProject().ref, genIri)
      cache.get(resId).value.ioValue shouldEqual None
    }

    "read after timeout" in {
      val resId   = Id(randomProject().ref, genIri)
      val set     = Set[ResourceDescription](Resource(genIri, randomProject(), None, None, originalSource = true, None))
      val archive = Archive(resId, instant, Anonymous, set)
      val _       = cache.put(archive).value.some
      val time    = System.currentTimeMillis()
      cache.get(resId).value.some shouldEqual archive
      eventually {
        cache.get(resId).value.ioValue shouldEqual None
      }
      val diff = System.currentTimeMillis() - time
      diff should be > config.archives.cacheInvalidateAfter.toMillis
      diff should be < config.archives.cacheInvalidateAfter.toMillis + 300
    }
  }
} 
Example 128
Source File: MicroBlockInvSpecSpec.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.network

import com.wavesplatform.common.state.ByteStr
import com.wavesplatform.crypto._
import com.wavesplatform.{EitherMatchers, TransactionGen}
import org.scalacheck.Gen
import org.scalatest.concurrent.Eventually
import org.scalatest.{FreeSpec, Matchers}
import org.scalatestplus.scalacheck.{ScalaCheckPropertyChecks => PropertyChecks}

class MicroBlockInvSpecSpec extends FreeSpec with Matchers with EitherMatchers with PropertyChecks with Eventually with TransactionGen {

  private val microBlockInvGen: Gen[MicroBlockInv] = for {
    acc          <- accountGen
    totalSig     <- byteArrayGen(SignatureLength)
    prevBlockSig <- byteArrayGen(SignatureLength)
  } yield MicroBlockInv(acc, ByteStr(totalSig), ByteStr(prevBlockSig))

  "MicroBlockInvMessageSpec" - {
    import MicroBlockInvSpec._

    "deserializeData(serializedData(data)) == data" in forAll(microBlockInvGen) { inv =>
      inv.signaturesValid() should beRight
      val restoredInv = deserializeData(serializeData(inv)).get
      restoredInv.signaturesValid() should beRight

      restoredInv shouldBe inv
    }
  }

} 
Example 129
Source File: AssetsRouteSpec.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.http

import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.server.Route
import com.wavesplatform.account.Address
import com.wavesplatform.api.common.{CommonAccountsApi, CommonAssetsApi}
import com.wavesplatform.api.http.assets.AssetsApiRoute
import com.wavesplatform.api.http.requests.{TransferV1Request, TransferV2Request}
import com.wavesplatform.http.ApiMarshallers._
import com.wavesplatform.state.Blockchain
import com.wavesplatform.transaction.transfer._
import com.wavesplatform.wallet.Wallet
import com.wavesplatform.{RequestGen, TestTime}
import org.scalamock.scalatest.PathMockFactory
import org.scalatest.concurrent.Eventually
import play.api.libs.json.Writes

class AssetsRouteSpec extends RouteSpec("/assets") with RequestGen with PathMockFactory with Eventually with RestAPISettingsHelper {

  private val wallet = stub[Wallet]
  private val state  = stub[Blockchain]

  private val seed               = "seed".getBytes("UTF-8")
  private val senderPrivateKey   = Wallet.generateNewAccount(seed, 0)
  private val receiverPrivateKey = Wallet.generateNewAccount(seed, 1)

  (wallet.privateKeyAccount _).when(senderPrivateKey.toAddress).onCall((_: Address) => Right(senderPrivateKey)).anyNumberOfTimes()

  "/transfer" - {
    val route: Route = AssetsApiRoute(
      restAPISettings,
      wallet,
      DummyUtxPoolSynchronizer.accepting,
      state,
      new TestTime(),
      mock[CommonAccountsApi],
      mock[CommonAssetsApi]
    ).route

    def posting[A: Writes](v: A): RouteTestResult = Post(routePath("/transfer"), v).addHeader(ApiKeyHeader) ~> route

    "accepts TransferRequest" in {
      val req = TransferV1Request(
        assetId = None,
        feeAssetId = None,
        amount = 1 * Waves,
        fee = Waves / 3,
        sender = senderPrivateKey.toAddress.toString,
        attachment = Some("attachment"),
        recipient = receiverPrivateKey.toAddress.toString,
        timestamp = Some(System.currentTimeMillis())
      )

      posting(req) ~> check {
        status shouldBe StatusCodes.OK

        responseAs[TransferTransaction]
      }
    }

    "accepts VersionedTransferRequest" in {
      val req = TransferV2Request(
        assetId = None,
        amount = 1 * Waves,
        feeAssetId = None,
        fee = Waves / 3,
        sender = senderPrivateKey.toAddress.toString,
        attachment = None,
        recipient = receiverPrivateKey.toAddress.toString,
        timestamp = Some(System.currentTimeMillis())
      )

      posting(req) ~> check {
        status shouldBe StatusCodes.OK
        responseAs[TransferV2Request]
      }
    }

    "returns a error if it is not a transfer request" in {
      val req = issueReq.sample.get
      posting(req) ~> check {
        status shouldNot be(StatusCodes.OK)
      }
    }
  }

} 
Example 130
Source File: SharedSQLContext.scala    From sona   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.test

import scala.concurrent.duration._

import org.scalatest.BeforeAndAfterEach
import org.scalatest.concurrent.Eventually

import org.apache.spark.{DebugFilesystem, SparkConf}
import org.apache.spark.sql.{SparkSession, SQLContext}


  protected override def afterAll(): Unit = {
    super.afterAll()
    if (_spark != null) {
      _spark.sessionState.catalog.reset()
      _spark.stop()
      _spark = null
    }
  }

  protected override def beforeEach(): Unit = {
    super.beforeEach()
    DebugFilesystem.clearOpenStreams()
  }

  protected override def afterEach(): Unit = {
    super.afterEach()
    // files can be closed from other threads, so wait a bit
    // normally this doesn't take more than 1s
    eventually(timeout(10.seconds)) {
      DebugFilesystem.assertNoOpenStreams()
    }
  }
} 
Example 131
Source File: GlowBaseTest.scala    From glow   with Apache License 2.0 5 votes vote down vote up
package io.projectglow.sql

import htsjdk.samtools.util.Log
import org.apache.spark.sql.SparkSession
import org.apache.spark.{DebugFilesystem, SparkConf}
import org.scalatest.concurrent.{AbstractPatienceConfiguration, Eventually}
import org.scalatest.time.{Milliseconds, Seconds, Span}
import org.scalatest.{Args, FunSuite, Status, Tag}

import io.projectglow.Glow
import io.projectglow.SparkTestShim.SharedSparkSessionBase
import io.projectglow.common.{GlowLogging, TestUtils}
import io.projectglow.sql.util.BGZFCodec

abstract class GlowBaseTest
    extends FunSuite
    with SharedSparkSessionBase
    with GlowLogging
    with GlowTestData
    with TestUtils
    with JenkinsTestPatience {

  override protected def sparkConf: SparkConf = {
    super
      .sparkConf
      .set("spark.hadoop.io.compression.codecs", classOf[BGZFCodec].getCanonicalName)
  }

  override def initializeSession(): Unit = ()

  override protected implicit def spark: SparkSession = {
    val sess = SparkSession.builder().config(sparkConf).master("local[2]").getOrCreate()
    Glow.register(sess)
    SparkSession.setActiveSession(sess)
    Log.setGlobalLogLevel(Log.LogLevel.ERROR)
    sess
  }

  protected def gridTest[A](testNamePrefix: String, testTags: Tag*)(params: Seq[A])(
      testFun: A => Unit): Unit = {
    for (param <- params) {
      test(testNamePrefix + s" ($param)", testTags: _*)(testFun(param))
    }
  }

  override def afterEach(): Unit = {
    DebugFilesystem.assertNoOpenStreams()
    eventually {
      assert(spark.sparkContext.getPersistentRDDs.isEmpty)
      assert(spark.sharedState.cacheManager.isEmpty, "Cache not empty.")
    }
    super.afterEach()
  }

  override def runTest(testName: String, args: Args): Status = {
    logger.info(s"Running test '$testName'")
    val res = super.runTest(testName, args)
    if (res.succeeds()) {
      logger.info(s"Done running test '$testName'")
    } else {
      logger.info(s"Done running test '$testName' with a failure")
    }
    res
  }

  protected def withSparkConf[T](configs: Map[String, String])(f: => T): T = {
    val initialConfigValues = configs.keys.map(k => (k, spark.conf.getOption(k)))
    try {
      configs.foreach { case (k, v) => spark.conf.set(k, v) }
      f
    } finally {
      initialConfigValues.foreach {
        case (k, Some(v)) => spark.conf.set(k, v)
        case (k, None) => spark.conf.unset(k)
      }
    }
  }
}


  final override implicit val patienceConfig: PatienceConfig =
    if (sys.env.get("JENKINS_HOST").nonEmpty) {
      // increase the timeout on jenkins where parallelizing causes things to be very slow
      PatienceConfig(Span(10, Seconds), Span(50, Milliseconds))
    } else {
      // use the default timeout on local machines so failures don't hang for a long time
      PatienceConfig(Span(5, Seconds), Span(15, Milliseconds))
    }
} 
Example 132
Source File: EmbeddedKafkaSpecSupport.scala    From embedded-kafka-schema-registry   with MIT License 5 votes vote down vote up
package net.manub.embeddedkafka.schemaregistry

import java.net.{InetAddress, Socket}

import net.manub.embeddedkafka.schemaregistry.EmbeddedKafkaSpecSupport.{
  Available,
  NotAvailable,
  ServerStatus
}
import org.scalatest.Assertion
import org.scalatest.concurrent.{Eventually, IntegrationPatience}
import org.scalatest.matchers.should.Matchers
import org.scalatest.time.{Milliseconds, Seconds, Span}
import org.scalatest.wordspec.AnyWordSpecLike

import scala.util.{Failure, Success, Try}

trait EmbeddedKafkaSpecSupport
    extends AnyWordSpecLike
    with Matchers
    with Eventually
    with IntegrationPatience {

  implicit val config: PatienceConfig =
    PatienceConfig(Span(1, Seconds), Span(100, Milliseconds))

  def expectedServerStatus(port: Int, expectedStatus: ServerStatus): Assertion =
    eventually {
      status(port) shouldBe expectedStatus
    }

  private def status(port: Int): ServerStatus = {
    Try(new Socket(InetAddress.getByName("localhost"), port)) match {
      case Failure(_) => NotAvailable
      case Success(_) => Available
    }
  }
}

object EmbeddedKafkaSpecSupport {
  sealed trait ServerStatus
  case object Available    extends ServerStatus
  case object NotAvailable extends ServerStatus
} 
Example 133
Source File: TestSpec.scala    From reactive-programming   with Apache License 2.0 5 votes vote down vote up
package com.test

import java.io.IOException
import java.util.UUID

import akka.actor.{ ActorRef, ActorSystem, PoisonPill }
import akka.event.{ Logging, LoggingAdapter }
import akka.testkit.TestProbe
import akka.util.Timeout
import org.scalatest.concurrent.{ Eventually, ScalaFutures }
import org.scalatest.exceptions.TestFailedException
import org.scalatest._
import rx.lang.scala._

import scala.concurrent.duration._
import scala.concurrent.{ ExecutionContextExecutor, Future }
import scala.util.{ Random ⇒ Rnd, Try }

object Random {
  def apply(): Rnd = new Rnd()
}

trait TestSpec extends FlatSpec with Matchers with ScalaFutures with TryValues with OptionValues with Eventually with BeforeAndAfterAll {
  implicit val system: ActorSystem = ActorSystem("test")
  implicit val ec: ExecutionContextExecutor = system.dispatcher
  val log: LoggingAdapter = Logging(system, this.getClass)
  implicit val pc: PatienceConfig = PatienceConfig(timeout = 50.seconds)
  implicit val timeout = Timeout(50.seconds)

  override protected def afterAll(): Unit = {
    system.terminate()
  }

  
  def cleanup(actors: ActorRef*): Unit = {
    actors.foreach { (actor: ActorRef) ⇒
      actor ! PoisonPill
      probe watch actor
    }
  }

  implicit class PimpedByteArray(self: Array[Byte]) {
    def getString: String = new String(self)
  }

  implicit class PimpedFuture[T](self: Future[T]) {
    def toTry: Try[T] = Try(self.futureValue)
  }

  implicit class PimpedObservable[T](self: Observable[T]) {
    def waitFor: Unit = {
      self.toBlocking.toIterable.last
    }
  }

  implicit class MustBeWord[T](self: T) {
    def mustBe(pf: PartialFunction[T, Unit]): Unit =
      if (!pf.isDefinedAt(self)) throw new TestFailedException("Unexpected: " + self, 0)
  }

  object Socket { def apply() = new Socket }
  class Socket {
    def readFromMemory: Future[Array[Byte]] = Future {
      Thread.sleep(100) // sleep 100 millis
      "fromMemory".getBytes
    }

    def send(payload: Array[Byte], from: String, failed: Boolean): Future[Array[Byte]] =
      if (failed) Future.failed(new IOException(s"Network error: $from"))
      else {
        Future {
          Thread.sleep(250) // sleep 250 millis, not real life time, but hey
          s"${payload.getString}->$from".getBytes
        }
      }

    def sendToEurope(payload: Array[Byte], failed: Boolean = false): Future[Array[Byte]] =
      send(payload, "fromEurope", failed)

    def sendToUsa(payload: Array[Byte], failed: Boolean = false): Future[Array[Byte]] =
      send(payload, "fromUsa", failed)
  }
} 
Example 134
Source File: AllCodecTest.scala    From aws-lambda-scala   with MIT License 5 votes vote down vote up
package io.github.mkotsur.aws.codecs

import java.io.ByteArrayOutputStream

import com.amazonaws.services.lambda.runtime.Context
import io.circe.generic.auto._
import io.github.mkotsur.StringInputStream
import org.scalatest.EitherValues._
import org.scalatest.concurrent.Eventually
import org.mockito.MockitoSugar
import org.scalatest.funsuite.AnyFunSuite
import org.scalatest.matchers.should
import org.scalatest.{FunSuite, Matchers}

class AllCodecTest extends AnyFunSuite with should.Matchers with MockitoSugar with Eventually {

  test("should decode null") {
    new AllCodec {
      val is = new StringInputStream("""null""")

      val value = canDecodeAll[None.type].readStream(is)
      value.right.value shouldBe Option.empty[None.type]
    }
  }

  test("should decode empty string") {
    new AllCodec {
      val is = new StringInputStream("")

      val value = canDecodeAll[None.type].readStream(is)
      value.right.value shouldBe Option.empty[None.type]
    }
  }

  test("should encode null") {
    new AllCodec {
      val os = new ByteArrayOutputStream()

      val context: Context = mock[Context]

      canEncodeAll[None.type].writeStream(os, Right(None), context)
      os.toString shouldBe "null"
    }
  }

} 
Example 135
Source File: LithiumMultiNodeSpec.scala    From lithium   with Apache License 2.0 5 votes vote down vote up
package com.swissborg.lithium

import akka.actor.{ActorSystem, Address}
import akka.cluster.Cluster
import akka.cluster.MemberStatus._
import akka.remote.testconductor.RoleName
import akka.remote.testkit.{MultiNodeConfig, MultiNodeSpec, MultiNodeSpecCallbacks}
import akka.testkit.ImplicitSender
import org.scalatest.BeforeAndAfterAll
import org.scalatest.concurrent.{Eventually, IntegrationPatience}
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpecLike

abstract class LithiumMultiNodeSpec(val config: MultiNodeConfig)
    extends MultiNodeSpec(config)
    with MultiNodeSpecCallbacks
    with AnyWordSpecLike
    with Matchers
    with BeforeAndAfterAll
    with ImplicitSender
    with Eventually
    with IntegrationPatience {
  override def beforeAll(): Unit = multiNodeSpecBeforeAll()
  override def afterAll(): Unit  = multiNodeSpecAfterAll()

  private val addresses: Map[RoleName, Address] = roles.map(r => r -> node(r).address).toMap

  protected def addressOf(roleName: RoleName): Address = addresses(roleName)

  protected def waitToBecomeUnreachable(roleNames: RoleName*): Unit =
    awaitCond(allUnreachable(roleNames: _*))

  protected def waitForSurvivors(roleNames: RoleName*): Unit =
    awaitCond(allSurvivors(roleNames: _*))

  protected def waitForUp(roleNames: RoleName*): Unit = awaitCond(allUp(roleNames: _*))

  protected def waitForSelfDowning(implicit system: ActorSystem): Unit = awaitCond(downedItself)

  protected def waitForAllLeaving(roleNames: RoleName*): Unit =
    awaitCond(allLeaving(roleNames: _*))

  protected def waitExistsAllDownOrGone(groups: Seq[Seq[RoleName]]): Unit =
    awaitCond(existsAllDownOrGone(groups))

  private def allUnreachable(roleNames: RoleName*): Boolean =
    roleNames.forall(
      role => Cluster(system).state.unreachable.exists(_.address === addressOf(role))
    )

  private def allSurvivors(roleNames: RoleName*): Boolean =
    roleNames.forall(role => Cluster(system).state.members.exists(_.address === addressOf(role)))

  private def allUp(roleNames: RoleName*): Boolean =
    roleNames.forall(
      role => Cluster(system).state.members.exists(m => m.address === addressOf(role) && m.status === Up)
    )

  private def existsAllDownOrGone(groups: Seq[Seq[RoleName]]): Boolean =
    groups.exists(group => allLeaving(group: _*))

  private def downedItself(implicit system: ActorSystem): Boolean = {
    val selfAddress = Cluster(system).selfAddress
    Cluster(system).state.members
      .exists(
        m => m.address === selfAddress && (m.status === Exiting || m.status === Down || m.status === Removed)
      )
  }

  private def allLeaving(roleNames: RoleName*): Boolean =
    roleNames.forall { role =>
      val members     = Cluster(system).state.members
      val unreachable = Cluster(system).state.unreachable

      val address = addressOf(role)

      unreachable.isEmpty &&                                                                        // no unreachable members
      (members.exists(m => m.address === address && (m.status === Down || m.status === Exiting)) || // member is down
      !members.exists(_.address === address))                                                       // member is not in the cluster
    }
} 
Example 136
Source File: BotPluginTestKit.scala    From sumobot   with Apache License 2.0 5 votes vote down vote up
package com.sumologic.sumobot.test.annotated

import akka.actor.ActorSystem
import akka.testkit.{TestKit, TestProbe}
import com.sumologic.sumobot.core.model.{IncomingMessage, InstantMessageChannel, OutgoingMessage, UserSender}
import org.junit.runner.RunWith
import org.scalatest.concurrent.Eventually
import org.scalatest.junit.JUnitRunner
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike}
import slack.models.User

import scala.concurrent.duration.{FiniteDuration, _}

@RunWith(classOf[JUnitRunner])
abstract class BotPluginTestKit(actorSystem: ActorSystem)
  extends TestKit(actorSystem)
    with WordSpecLike with Eventually with Matchers
    with BeforeAndAfterAll {

  protected val outgoingMessageProbe = TestProbe()
  system.eventStream.subscribe(outgoingMessageProbe.ref, classOf[OutgoingMessage])

  protected def confirmOutgoingMessage(test: OutgoingMessage => Unit, timeout: FiniteDuration = 1.second): Unit = {
    outgoingMessageProbe.expectMsgClass(timeout, classOf[OutgoingMessage]) match {
      case msg: OutgoingMessage =>
        test(msg)
    }
  }

  protected def instantMessage(text: String, user: User = mockUser("123", "jshmoe")): IncomingMessage = {
    IncomingMessage(text, true, InstantMessageChannel("125", user), "1527239216000090", sentBy = UserSender(user))
  }

  protected def mockUser(id: String, name: String): User = {
    User(id, name, None, None, None, None, None, None, None, None, None, None, None, None, None, None)
  }

  protected def send(message: IncomingMessage): Unit = {
    system.eventStream.publish(message)
  }

  override protected def afterAll(): Unit = {
    TestKit.shutdownActorSystem(system)
  }
} 
Example 137
Source File: FutureAwaitWithFailFastFnTest.scala    From kafka-connect-common   with Apache License 2.0 5 votes vote down vote up
package com.datamountaineer.streamreactor.connect.concurrent

import java.util.concurrent.Executors

import com.datamountaineer.streamreactor.connect.concurrent.ExecutorExtension._
import org.scalactic.source.Position
import org.scalatest.concurrent.{Eventually, TimeLimits}
import org.scalatest.matchers.should.Matchers
import org.scalatest.time.{Millis, Span}
import org.scalatest.wordspec.AnyWordSpec

import scala.util.{Failure, Try}


class FutureAwaitWithFailFastFnTest extends AnyWordSpec with Matchers with Eventually with TimeLimits {


  "FutureAwaitWithFailFastFn" should {
    "return when all the futures have completed" in {
      val exec = Executors.newFixedThreadPool(10)
      val futures = (1 to 5).map(i => exec.submit {
        Thread.sleep(300)
        i
      })
      eventually {
        val result = FutureAwaitWithFailFastFn(exec, futures)
        exec.isTerminated shouldBe true
        result shouldBe Seq(1, 2, 3, 4, 5)
      }
    }

    "stop when the first futures times out" in {
      val exec = Executors.newFixedThreadPool(6)
      val futures = for (i <- 1 to 10) yield {
        exec.submit {
          if (i == 4) {
            Thread.sleep(1000)
            sys.error("this task failed.")
          } else {
            Thread.sleep(50000)
          }
        }
      }

      eventually {
        val t = Try(FutureAwaitWithFailFastFn(exec, futures))
        t.isFailure shouldBe true
        t.asInstanceOf[Failure[_]].exception.getMessage shouldBe "this task failed."
        exec.isTerminated shouldBe true
      }
    }
  }

} 
Example 138
Source File: KafkaStreamSuite.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.kafka

import scala.collection.mutable
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.util.Random

import kafka.serializer.StringDecoder
import org.scalatest.BeforeAndAfterAll
import org.scalatest.concurrent.Eventually

import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.{Milliseconds, StreamingContext}

class KafkaStreamSuite extends SparkFunSuite with Eventually with BeforeAndAfterAll {
  private var ssc: StreamingContext = _
  private var kafkaTestUtils: KafkaTestUtils = _

  override def beforeAll(): Unit = {
    kafkaTestUtils = new KafkaTestUtils
    kafkaTestUtils.setup()
  }

  override def afterAll(): Unit = {
    if (ssc != null) {
      ssc.stop()
      ssc = null
    }

    if (kafkaTestUtils != null) {
      kafkaTestUtils.teardown()
      kafkaTestUtils = null
    }
  }

  test("Kafka input stream") {
    val sparkConf = new SparkConf().setMaster("local[4]").setAppName(this.getClass.getSimpleName)
    ssc = new StreamingContext(sparkConf, Milliseconds(500))
    val topic = "topic1"
    val sent = Map("a" -> 5, "b" -> 3, "c" -> 10)
    kafkaTestUtils.createTopic(topic)
    kafkaTestUtils.sendMessages(topic, sent)

    val kafkaParams = Map("zookeeper.connect" -> kafkaTestUtils.zkAddress,
      "group.id" -> s"test-consumer-${Random.nextInt(10000)}",
      "auto.offset.reset" -> "smallest")

    val stream = KafkaUtils.createStream[String, String, StringDecoder, StringDecoder](
      ssc, kafkaParams, Map(topic -> 1), StorageLevel.MEMORY_ONLY)
    val result = new mutable.HashMap[String, Long]() with mutable.SynchronizedMap[String, Long]
    stream.map(_._2).countByValue().foreachRDD { r =>
      val ret = r.collect()
      ret.toMap.foreach { kv =>
        val count = result.getOrElseUpdate(kv._1, 0) + kv._2
        result.put(kv._1, count)
      }
    }

    ssc.start()

    eventually(timeout(10000 milliseconds), interval(100 milliseconds)) {
      assert(sent === result)
    }
  }
} 
Example 139
Source File: MQTTStreamSuite.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.mqtt

import scala.concurrent.duration._
import scala.language.postfixOps

import org.scalatest.BeforeAndAfter
import org.scalatest.concurrent.Eventually

import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.{Milliseconds, StreamingContext}

class MQTTStreamSuite extends SparkFunSuite with Eventually with BeforeAndAfter {

  private val batchDuration = Milliseconds(500)
  private val master = "local[2]"
  private val framework = this.getClass.getSimpleName
  private val topic = "def"

  private var ssc: StreamingContext = _
  private var mqttTestUtils: MQTTTestUtils = _

  before {
    ssc = new StreamingContext(master, framework, batchDuration)
    mqttTestUtils = new MQTTTestUtils
    mqttTestUtils.setup()
  }

  after {
    if (ssc != null) {
      ssc.stop()
      ssc = null
    }
    if (mqttTestUtils != null) {
      mqttTestUtils.teardown()
      mqttTestUtils = null
    }
  }

  test("mqtt input stream") {
    val sendMessage = "MQTT demo for spark streaming"
    val receiveStream = MQTTUtils.createStream(ssc, "tcp://" + mqttTestUtils.brokerUri, topic,
      StorageLevel.MEMORY_ONLY)

    @volatile var receiveMessage: List[String] = List()
    receiveStream.foreachRDD { rdd =>
      if (rdd.collect.length > 0) {
        receiveMessage = receiveMessage ::: List(rdd.first)
        receiveMessage
      }
    }

    ssc.start()

    // Retry it because we don't know when the receiver will start.
    eventually(timeout(10000 milliseconds), interval(100 milliseconds)) {
      mqttTestUtils.publishData(topic, sendMessage)
      assert(sendMessage.equals(receiveMessage(0)))
    }
    ssc.stop()
  }
} 
Example 140
Source File: KafkaStreamSuite.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.kafka

import scala.collection.mutable
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.util.Random

import kafka.serializer.StringDecoder
import org.scalatest.BeforeAndAfterAll
import org.scalatest.concurrent.Eventually

import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.{Milliseconds, StreamingContext}

class KafkaStreamSuite extends SparkFunSuite with Eventually with BeforeAndAfterAll {
  private var ssc: StreamingContext = _
  private var kafkaTestUtils: KafkaTestUtils = _

  override def beforeAll(): Unit = {
    kafkaTestUtils = new KafkaTestUtils
    kafkaTestUtils.setup()
  }

  override def afterAll(): Unit = {
    if (ssc != null) {
      ssc.stop()
      ssc = null
    }

    if (kafkaTestUtils != null) {
      kafkaTestUtils.teardown()
      kafkaTestUtils = null
    }
  }

  test("Kafka input stream") {//Kafka输入流
    val sparkConf = new SparkConf().setMaster("local[4]").setAppName(this.getClass.getSimpleName)
    ssc = new StreamingContext(sparkConf, Milliseconds(500))
    val topic = "topic1"
    val sent = Map("a" -> 5, "b" -> 3, "c" -> 10)
    kafkaTestUtils.createTopic(topic)
    kafkaTestUtils.sendMessages(topic, sent)

    val kafkaParams = Map("zookeeper.connect" -> kafkaTestUtils.zkAddress,
      "group.id" -> s"test-consumer-${Random.nextInt(10000)}",
      "auto.offset.reset" -> "smallest")

    val stream = KafkaUtils.createStream[String, String, StringDecoder, StringDecoder](
      ssc, kafkaParams, Map(topic -> 1), StorageLevel.MEMORY_ONLY)
    val result = new mutable.HashMap[String, Long]() with mutable.SynchronizedMap[String, Long]
    stream.map(_._2).countByValue().foreachRDD { r =>
      val ret = r.collect()
      ret.toMap.foreach { kv =>
        val count = result.getOrElseUpdate(kv._1, 0) + kv._2
        result.put(kv._1, count)
      }
    }

    ssc.start()

    eventually(timeout(10000 milliseconds), interval(100 milliseconds)) {
      assert(sent === result)
    }
  }
} 
Example 141
Source File: JobStatusFlusherSpec.scala    From mist   with Apache License 2.0 5 votes vote down vote up
package io.hydrosphere.mist.master.execution.status

import java.util.concurrent.atomic.AtomicReference

import akka.actor.ActorSystem
import akka.testkit.{TestActorRef, TestKit}
import io.hydrosphere.mist.master.JobDetails.Status
import io.hydrosphere.mist.master.Messages.StatusMessages._
import io.hydrosphere.mist.master.{ActorSpec, JobDetails, TestData}
import io.hydrosphere.mist.master.logging.JobLogger
import mist.api.data._
import org.scalatest.concurrent.Eventually
import org.scalatest.{FunSpecLike, Matchers}
import org.scalatest.prop.TableDrivenPropertyChecks._
import org.scalatest.time.{Seconds, Span}

import scala.concurrent.{Future, Promise}

class JobStatusFlusherSpec extends ActorSpec("job-status-flusher") with TestData with Eventually {

  it("should flush status correctly") {
    val initial = Promise[JobDetails]

    val updateResult = new AtomicReference[Option[JobDetails]](None)
    val props = JobStatusFlusher.props(
      id = "id",
      get = (_) => initial.future,
      update = (d: JobDetails) => {updateResult.set(Some(d));Future.successful(())},
      loggerF = _ => JobLogger.NOOP
    )
    val flusher = TestActorRef(props)

    flusher ! ReportedEvent.plain(QueuedEvent("id"))
    flusher ! ReportedEvent.plain(StartedEvent("id", System.currentTimeMillis()))
    flusher ! ReportedEvent.plain(FinishedEvent("id", System.currentTimeMillis(), JsNumber(42)))

    initial.success(mkDetails(JobDetails.Status.Initialized))

    eventually(timeout(Span(3, Seconds))) {
      val value = updateResult.get
      value.isDefined shouldBe true

      val d = value.get
      d.status shouldBe JobDetails.Status.Finished
    }
  }

  describe("event conversion") {

    val baseDetails = mkDetails(JobDetails.Status.Initialized)

    val expected = Table(
      ("event", "details"),
      (QueuedEvent("id"), baseDetails.copy(status = Status.Queued)),
      (StartedEvent("id", 1), baseDetails.copy(status = Status.Started, startTime = Some(1))),
      (CancellingEvent("id", 1), baseDetails.copy(status = Status.Cancelling)),
      (CancelledEvent("id", 1), baseDetails.copy(status = Status.Canceled, endTime = Some(1))),
      (FinishedEvent("id", 1, JsMap("1" -> JsNumber(2))),
        baseDetails.copy(
          status = Status.Finished,
          endTime = Some(1),
          jobResult =
            Some(
              Right(JsMap("1" -> JsNumber(2)))
            )
        )),
      (FailedEvent("id", 1, "error"),
        baseDetails.copy(status = Status.Failed, endTime = Some(1), jobResult = Some(Left("error")))),
      (WorkerAssigned("id", "workerId"), baseDetails.copy(workerId = Some("workerId")))
    )

    it("should correct update job details") {
      forAll(expected) { (e: UpdateStatusEvent, d: JobDetails) =>
        JobStatusFlusher.applyStatusEvent(baseDetails, e) shouldBe d
      }
    }
  }
} 
Example 142
Source File: StatusReporterSpec.scala    From mist   with Apache License 2.0 5 votes vote down vote up
package io.hydrosphere.mist.master.execution.status

import io.hydrosphere.mist.core.MockitoSugar
import io.hydrosphere.mist.master.Messages.StatusMessages.{QueuedEvent, UpdateStatusEvent}
import io.hydrosphere.mist.master.logging.{JobLogger, LogService}
import io.hydrosphere.mist.master.store.JobRepository
import io.hydrosphere.mist.master.{ActorSpec, EventsStreamer, JobDetails, TestData}
import org.mockito.Mockito._
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{Seconds, Span}

class StatusReporterSpec extends ActorSpec("status-reporter") with TestData with Eventually with MockitoSugar {

  it("should flush and stream") {
    val repo = mock[JobRepository]
    when(repo.get(any[String])).thenSuccess(Some(mkDetails(JobDetails.Status.Initialized)))
    when(repo.update(any[JobDetails])).thenSuccess(())
    val logService = mock[LogService]
    when(logService.getJobLogger(any[String]))
      .thenReturn(JobLogger.NOOP)

    val streamer = mock[EventsStreamer]
    val reporter = StatusReporter.reporter(repo, streamer, logService)

    reporter.reportPlain(QueuedEvent("id"))

    verify(streamer).push(any[UpdateStatusEvent])

    eventually(timeout(Span(3, Seconds))) {
      verify(repo).get(any[String])
      verify(repo).update(any[JobDetails])
    }
  }

} 
Example 143
Source File: StoreFlusherSpec.scala    From mist   with Apache License 2.0 5 votes vote down vote up
package io.hydrosphere.mist.master.execution.status

import java.util.concurrent.atomic.AtomicReference

import akka.testkit.TestActorRef
import io.hydrosphere.mist.master.Messages.StatusMessages._
import io.hydrosphere.mist.master.logging.JobLogger
import io.hydrosphere.mist.master.{ActorSpec, JobDetails, TestData}
import mist.api.data.JsNumber
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{Seconds, Span}

import scala.concurrent.{Future, Promise}

class StoreFlusherSpec extends ActorSpec("store-flusher") with TestData with Eventually {

  it("should flush job statuses") {
    val initial1 = Promise[JobDetails]
    val initial2 = Promise[JobDetails]

    val updateResult1 = new AtomicReference[Option[JobDetails]](None)
    val updateResult2 = new AtomicReference[Option[JobDetails]](None)
    val props = StoreFlusher.props(
      get = (id: String) => id match {
        case "1" => initial1.future
        case "2" => initial2.future
      },
      update = (d: JobDetails) => {
        d.jobId match {
          case "1" => updateResult1.set(Some(d))
          case "2" => updateResult2.set(Some(d))
        }
        Future.successful(())
      },
      jobLoggerF = _ => JobLogger.NOOP
    )
    val flusher = TestActorRef(props)

    Seq("1", "2").foreach(id => {
      flusher ! ReportedEvent.plain(QueuedEvent(id))
      flusher ! ReportedEvent.plain(StartedEvent(id, System.currentTimeMillis()))
      flusher ! ReportedEvent.plain(FinishedEvent(id, System.currentTimeMillis(), JsNumber(42)))
    })
    initial1.success(mkDetails(JobDetails.Status.Initialized).copy(jobId = "1"))
    initial2.success(mkDetails(JobDetails.Status.Initialized).copy(jobId = "2"))

    def test(ref: AtomicReference[Option[JobDetails]]): Unit = {
      val value = ref.get
      value.isDefined shouldBe true

      val d = value.get
      d.status shouldBe JobDetails.Status.Finished
    }

    eventually(timeout(Span(3, Seconds))) {
      test(updateResult1)
      test(updateResult2)
    }
  }
} 
Example 144
Source File: WorkerHubSpec.scala    From mist   with Apache License 2.0 5 votes vote down vote up
package io.hydrosphere.mist.master.execution.workers

import akka.actor.ActorRef
import io.hydrosphere.mist.core.{CommonData, MockitoSugar}
import io.hydrosphere.mist.master.TestData
import io.hydrosphere.mist.master.models.ContextConfig
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{Seconds, Span}
import org.scalatest.{FunSpec, Matchers}

import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future, Promise}

class WorkerHubSpec extends FunSpec with Matchers with TestData with Eventually with MockitoSugar {

  it("should mirror connections") {
    val termination = Promise[Unit]

    val runner = new WorkerRunner {
      override def apply(id: String, ctx: ContextConfig): Future[WorkerConnection] =
        Future.successful(WorkerConnection(id, null, workerLinkData.copy(name = id), termination.future))
    }
    val hub = new WorkerHub(runner, TestConnector.apply)

    val connector = hub.start("id", FooContext)

    Await.result(connector.askConnection(), Duration.Inf)

    eventually(timeout(Span(3, Seconds))) {
      hub.workerConnections().size shouldBe 1
    }
    termination.success(())
    eventually(timeout(Span(3, Seconds))) {
      hub.workerConnections().size shouldBe 0
    }
  }


  case class TestConnector(
    id: String,
    ctx: ContextConfig,
    runner: WorkerRunner) extends WorkerConnector {

    import scala.concurrent.ExecutionContext.Implicits.global

    def askConnection(): Future[PerJobConnection] = runner(id, ctx).map(conn => mock[PerJobConnection])

    def warmUp(): Unit = ()

    def shutdown(force: Boolean): Future[Unit] = ???

    def whenTerminated(): Future[Unit] = ???

    def releaseConnection(connectionId: String): Unit = ???
  }
} 
Example 145
Source File: WorkerRunnerSpec.scala    From mist   with Apache License 2.0 5 votes vote down vote up
package io.hydrosphere.mist.master.execution.workers

import java.util.concurrent.atomic.AtomicBoolean

import io.hydrosphere.mist.core.CommonData.WorkerInitInfo
import io.hydrosphere.mist.core.MockitoSugar
import io.hydrosphere.mist.master.execution.workers.starter.{WorkerProcess, WorkerStarter}
import io.hydrosphere.mist.master.execution.{SpawnSettings, workers}
import io.hydrosphere.mist.master.{ActorSpec, FilteredException, TestData}
import io.hydrosphere.mist.utils.akka.ActorRegHub
import org.mockito.Mockito._
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{Seconds, Span}

import scala.concurrent.duration._
import scala.concurrent.{Await, Future, Promise}

class WorkerRunnerSpec extends ActorSpec("worker-runner") with TestData with MockitoSugar with Eventually {

  describe("default runner") {

    def mkSpawnSettings(starter: WorkerStarter): SpawnSettings = SpawnSettings(
      runnerCmd = starter,
      timeout = 10 seconds,
      readyTimeout = 10 seconds,
      akkaAddress = "akkaAddr",
      logAddress = "logAddr",
      httpAddress = "httpAddr",
      maxArtifactSize = 100L
    )

    it("should run worker") {
      val starter = new WorkerStarter {
        override def onStart(name: String, initInfo: WorkerInitInfo): WorkerProcess = WorkerProcess.NonLocal
        override def stopAction: StopAction = StopAction.Remote
      }
      val regHub = mock[ActorRegHub]
      when(regHub.waitRef(any[String], any[Duration])).thenSuccess(null)

      val termination = Promise[Unit]
      val runner = new workers.WorkerRunner.DefaultRunner(
        spawn = mkSpawnSettings(starter),
        regHub = regHub,
        connect = (_, _, _, _, _) => Future.successful(WorkerConnection("id", null, workerLinkData, termination.future))
      )

      Await.result(runner("id", FooContext), Duration.Inf)
    }

    it("should call onStop if connect was failed") {
      val check = new AtomicBoolean(false)

      val runnerCmd = new WorkerStarter {
        override def onStart(name: String, initInfo: WorkerInitInfo): WorkerProcess = WorkerProcess.NonLocal
        override def stopAction: StopAction = StopAction.CustomFn(_ => check.set(true))
      }

      val regHub = mock[ActorRegHub]
      when(regHub.waitRef(any[String], any[Duration])).thenFailure(FilteredException())

      val runner = new workers.WorkerRunner.DefaultRunner(
        spawn = mkSpawnSettings(runnerCmd),
        regHub = regHub,
        connect = (_, _, _, _, _) => Future.failed(FilteredException())
      )

      intercept[Throwable] {
        Await.result(runner("id", FooContext), Duration.Inf)
      }

      eventually(timeout(Span(3, Seconds))) {
        check.get shouldBe true
      }
    }

  }
} 
Example 146
Source File: PgSpec.scala    From mist   with Apache License 2.0 5 votes vote down vote up
package io.hydrosphere.mist

import com.zaxxer.hikari.HikariConfig
import io.hydrosphere.mist.core.CommonData.{Action, JobParams}
import io.hydrosphere.mist.master.{DbConfig, JobDetails}
import io.hydrosphere.mist.master.JobDetails.Source
import io.hydrosphere.mist.master.store.{HikariDataSourceTransactor, HikariJobRepository, JobRepository, PgJobRequestSql}
import org.scalatest.concurrent.Eventually
import org.scalatest.time.{Seconds, Span}
import org.scalatest.{BeforeAndAfterAll, FunSpec, Matchers}
import mist.api.data.JsMap

import scala.concurrent.{Await, Future}
import scala.concurrent.duration.Duration

class PgSpec extends FunSpec
  with BeforeAndAfterAll
  with Matchers
  with Eventually {
  
  implicit override val patienceConfig = PatienceConfig(timeout = scaled(Span(2, Seconds)), interval = scaled(Span(1, Seconds)))
  
  var pgContainer: TestContainer = _
  var repo: HikariJobRepository = _
  
  override def beforeAll = {
    pgContainer = TestContainer.run(DockerImage("postgres","latest"), Map(5433 -> 5432))
    
    val cfg = DbConfig.JDBCDbConfig(
      10,
      "org.postgresql.Driver",
      "jdbc:postgresql://localhost:5433/postgres",
      Some("postgres"),
      Some("postgres"),
      true
    )
    
    repo = JobRepository.create(cfg) match {
      case Left(e) => throw e
      case Right(r) => r
    }
  }
  override def afterAll = {
    pgContainer.close()
    repo.shutdown()
  }
  
  private def await[A](f: Future[A]): A = Await.result(f, Duration.Inf)
  
  
  it("remove") {
    val details = fixtureJobDetails("id")
    await(repo.remove(details.jobId))
    await(repo.get(details.jobId)) shouldBe None
  }
  
  it("update") {
    val details = fixtureJobDetails("id")
    await(repo.update(details))
    await(repo.get(details.jobId)) shouldBe Some(details)
  }
  
  it("clear") {
    (1 to 10).foreach(i => await(repo.update(fixtureJobDetails(s"jobId $i"))))
    await(repo.clear())
    await(repo.getAll(10, 0)).size shouldBe 0
  }
  
  it("filter by status") {
    (1 to 2).foreach(i => {
      val details = fixtureJobDetails(s"jobId $i", JobDetails.Status.Started)
      await(repo.update(details))
    })
    await(repo.update(fixtureJobDetails("ignore")))
    
    val runningJobs = repo.filteredByStatuses(List(JobDetails.Status.Started))
    await(runningJobs).size shouldBe 2
  }
  
  it("decode failure") {
    val details = fixtureJobDetails("failed").withFailure("Test Error")
    await(repo.update(details))
    await(repo.get("failed")) shouldBe Some(details)
  }
  
  
  // Helper functions
  private def fixtureJobDetails(
    jobId: String,
    status: JobDetails.Status = JobDetails.Status.Initialized): JobDetails = {
    JobDetails(
      params = JobParams("path", "className", JsMap.empty, Action.Execute),
      jobId = jobId,
      source = Source.Http,
      function = "function",
      context = "context",
      externalId = None,
      status = status
    )
  }
} 
Example 147
Source File: ClientStateSpec.scala    From akka-grpc   with Apache License 2.0 5 votes vote down vote up
package akka.grpc.internal

import scala.concurrent.duration._
import scala.concurrent.Promise

import io.grpc.ConnectivityState._

import akka.Done
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer

import org.scalatest.concurrent.{ Eventually, ScalaFutures }
import org.scalatest.BeforeAndAfterAll
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec

class ClientStateSpec extends AnyWordSpec with Matchers with ScalaFutures with Eventually with BeforeAndAfterAll {
  implicit val sys = ActorSystem()
  implicit val mat = ActorMaterializer()
  implicit val ec = sys.dispatcher
  implicit val patience = PatienceConfig(timeout = 10.seconds, interval = 150.milliseconds)

  private def clientState(channelCompletion: Promise[Done] = Promise[Done]()) = {
    val channel =
      new InternalChannel(new ChannelUtilsSpec.FakeChannel(Stream(IDLE, CONNECTING, READY)), channelCompletion.future)
    new ClientState(channel)
  }

  "Client State" should {
    "successfully provide a channel" in {
      // given a state
      val state = clientState()
      // it provides a channel when needed
      state.internalChannel should not be null
    }
    "reuse a valid channel" in {
      // given a state
      val state = clientState()
      // it provides a channel when needed
      val c1 = state.internalChannel.managedChannel
      val c2 = state.internalChannel.managedChannel
      c1 should be(c2)
    }
  }

  override def afterAll(): Unit = {
    super.afterAll()
    sys.terminate()
  }
} 
Example 148
Source File: KafkaStreamSuite.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.kafka

import scala.collection.mutable
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.util.Random

import kafka.serializer.StringDecoder
import org.scalatest.BeforeAndAfterAll
import org.scalatest.concurrent.Eventually

import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.{Milliseconds, StreamingContext}

class KafkaStreamSuite extends SparkFunSuite with Eventually with BeforeAndAfterAll {
  private var ssc: StreamingContext = _
  private var kafkaTestUtils: KafkaTestUtils = _

  override def beforeAll(): Unit = {
    kafkaTestUtils = new KafkaTestUtils
    kafkaTestUtils.setup()
  }

  override def afterAll(): Unit = {
    if (ssc != null) {
      ssc.stop()
      ssc = null
    }

    if (kafkaTestUtils != null) {
      kafkaTestUtils.teardown()
      kafkaTestUtils = null
    }
  }

  test("Kafka input stream") {
    val sparkConf = new SparkConf().setMaster("local[4]").setAppName(this.getClass.getSimpleName)
    ssc = new StreamingContext(sparkConf, Milliseconds(500))
    val topic = "topic1"
    val sent = Map("a" -> 5, "b" -> 3, "c" -> 10)
    kafkaTestUtils.createTopic(topic)
    kafkaTestUtils.sendMessages(topic, sent)

    val kafkaParams = Map("zookeeper.connect" -> kafkaTestUtils.zkAddress,
      "group.id" -> s"test-consumer-${Random.nextInt(10000)}",
      "auto.offset.reset" -> "smallest")

    val stream = KafkaUtils.createStream[String, String, StringDecoder, StringDecoder](
      ssc, kafkaParams, Map(topic -> 1), StorageLevel.MEMORY_ONLY)
    val result = new mutable.HashMap[String, Long]()
    stream.map(_._2).countByValue().foreachRDD { r =>
      r.collect().foreach { kv =>
        result.synchronized {
          val count = result.getOrElseUpdate(kv._1, 0) + kv._2
          result.put(kv._1, count)
        }
      }
    }

    ssc.start()

    eventually(timeout(10000 milliseconds), interval(100 milliseconds)) {
      assert(result.synchronized { sent === result })
    }
    ssc.stop()
  }
} 
Example 149
Source File: SharedSparkSession.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.test

import scala.concurrent.duration._

import org.scalatest.{BeforeAndAfterEach, Suite}
import org.scalatest.concurrent.Eventually

import org.apache.spark.{DebugFilesystem, SparkConf}
import org.apache.spark.sql.{SparkSession, SQLContext}
import org.apache.spark.sql.internal.SQLConf


  protected override def afterAll(): Unit = {
    try {
      super.afterAll()
    } finally {
      try {
        if (_spark != null) {
          try {
            _spark.sessionState.catalog.reset()
          } finally {
            _spark.stop()
            _spark = null
          }
        }
      } finally {
        SparkSession.clearActiveSession()
        SparkSession.clearDefaultSession()
      }
    }
  }

  protected override def beforeEach(): Unit = {
    super.beforeEach()
    DebugFilesystem.clearOpenStreams()
  }

  protected override def afterEach(): Unit = {
    super.afterEach()
    // Clear all persistent datasets after each test
    spark.sharedState.cacheManager.clearCache()
    // files can be closed from other threads, so wait a bit
    // normally this doesn't take more than 1s
    eventually(timeout(10.seconds), interval(2.seconds)) {
      DebugFilesystem.assertNoOpenStreams()
    }
  }
} 
Example 150
Source File: ElasticsearchTestServer.scala    From flink-elasticsearch-source-connector   with Apache License 2.0 5 votes vote down vote up
package com.mnubo.flink.streaming.connectors
package elasticsearch

import org.scalatest.concurrent.Eventually
import org.scalatest.time.SpanSugar

import scala.sys.process._
import scala.util.Try

class ElasticsearchTestServer(version: String, isClusterGreen: (String, Int) => Boolean) extends AutoCloseable with Eventually with SpanSugar {
  private val hasRecoveredIndicesStateRegex = """recovered \[\d+\] indices into cluster_state""".r
  val host = {
    val hostVar = System.getenv("DOCKER_HOST")
    if (hostVar != null)
      """\d+\.[0-9\.]+""".r
        .findFirstIn(hostVar)
        .getOrElse("127.0.0.1")
    else
      "127.0.0.1"
  }
  val containerId = s"docker run -d -P elasticsearch:$version --network.publish_host $host".!!.trim
  val httpPort = esPort(9200)
  val esTransportPort = esPort(9300)

  eventually(timeout(20.seconds), interval(500.millis)) {
    require(hasRecoveredIndicesState && isClusterGreen(host, esTransportPort), "ES Still not started...")
  }

  override def close() = {
    Try(s"docker stop $containerId".!)
    Try(s"docker rm $containerId".!)
  }

  private def hasRecoveredIndicesState = {
    val logs = s"docker logs $containerId".!!
    hasRecoveredIndicesStateRegex.findFirstIn(logs).isDefined
  }

  private def esPort(exposedPort: Int) = Seq(
    "docker",
    "inspect",
    s"""--format='{{(index (index .NetworkSettings.Ports "$exposedPort/tcp") 0).HostPort}}'""",
    containerId
  ).!!.trim.toInt
} 
Example 151
Source File: MQTTStreamSuite.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.streaming.mqtt

import scala.concurrent.duration._
import scala.language.postfixOps

import org.scalatest.BeforeAndAfter
import org.scalatest.concurrent.Eventually

import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.{Milliseconds, StreamingContext}

class MQTTStreamSuite extends SparkFunSuite with Eventually with BeforeAndAfter {

  private val batchDuration = Milliseconds(500)
  private val master = "local[2]"
  private val framework = this.getClass.getSimpleName
  private val topic = "def"

  private var ssc: StreamingContext = _
  private var mqttTestUtils: MQTTTestUtils = _

  before {
    ssc = new StreamingContext(master, framework, batchDuration)
    mqttTestUtils = new MQTTTestUtils
    mqttTestUtils.setup()
  }

  after {
    if (ssc != null) {
      ssc.stop()
      ssc = null
    }
    if (mqttTestUtils != null) {
      mqttTestUtils.teardown()
      mqttTestUtils = null
    }
  }

  test("mqtt input stream") {
    val sendMessage = "MQTT demo for spark streaming"
    val receiveStream = MQTTUtils.createStream(ssc, "tcp://" + mqttTestUtils.brokerUri, topic,
      StorageLevel.MEMORY_ONLY)

    @volatile var receiveMessage: List[String] = List()
    receiveStream.foreachRDD { rdd =>
      if (rdd.collect.length > 0) {
        receiveMessage = receiveMessage ::: List(rdd.first)
        receiveMessage
      }
    }

    ssc.start()

    // Retry it because we don't know when the receiver will start.
    eventually(timeout(10000 milliseconds), interval(100 milliseconds)) {
      mqttTestUtils.publishData(topic, sendMessage)
      assert(sendMessage.equals(receiveMessage(0)))
    }
    ssc.stop()
  }
} 
Example 152
Source File: RegionSpec.scala    From affinity   with Apache License 2.0 5 votes vote down vote up
package io.amient.affinity.core.actor

import akka.actor.{ActorPath, ActorSystem, PoisonPill, Props}
import akka.util.Timeout
import com.typesafe.config.ConfigFactory
import io.amient.affinity.AffinityActorSystem
import io.amient.affinity.core.cluster.Coordinator
import org.scalatest.concurrent.{Eventually, IntegrationPatience}
import org.scalatest.{Matchers, WordSpecLike}

import scala.concurrent.duration._
import scala.language.postfixOps


class RegionSpec extends WordSpecLike with Matchers with Eventually with IntegrationPatience {

  val system: ActorSystem = AffinityActorSystem.create(ConfigFactory.load("regionspec"))

  val testPartition = Props(new Partition {
    override def preStart(): Unit = {
      Thread.sleep(100)
      super.preStart()
    }

    override def handle: Receive = {
      case _: IllegalStateException => context.stop(self)
      case _ =>
    }
  })


  "A Region Actor" must {
    "must keep Coordinator Updated during partition failure & restart scenario" in {
      //      val zk = new EmbeddedZookeperServer {}
      try {
        val coordinator = Coordinator.create(system, "region")
        try {
          val d = 1 second
          implicit val timeout = Timeout(d)

          val region = system.actorOf(Props(new Container("region") {
            val partitions = List(0, 1, 2, 3)
            for (partition <- partitions) {
              context.actorOf(testPartition, name = partition.toString)
            }
          }), name = "region")
          eventually {
            coordinator.members.size should be(4)
          }

          //first stop Partition explicitly - it shouldn't be restarted
          import system.dispatcher
          system.actorSelection(ActorPath.fromString(coordinator.members.head._2)).resolveOne.foreach {
            case actorRef => system.stop(actorRef)
          }
          eventually {
            coordinator.members.size should be(3)
          }

          //now simulate error in one of the partitions
          val partitionToFail = coordinator.members.head._2
          system.actorSelection(ActorPath.fromString(partitionToFail)).resolveOne.foreach {
            case actorRef => actorRef ! new IllegalStateException("Exception expected by the Test")
          }
          eventually {
            coordinator.members.size should be(2)
          }
          eventually {
            coordinator.members should not contain (partitionToFail)
          }

          region ! PoisonPill

        } finally {
          coordinator.close
        }
      } finally {
        //        zk.close()
      }
    }
  }

}

class RegionSpecPartition extends Partition {
  override def preStart(): Unit = {
    Thread.sleep(100)
    super.preStart()
  }

  override def handle: Receive = {
    case _: IllegalStateException => context.stop(self)
    case _ =>
  }
} 
Example 153
Source File: BroadcastBufferCommitOrderSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.pattern.stream

import akka.actor.ActorSystem
import akka.stream.{ClosedShape, ActorMaterializer}
import akka.stream.scaladsl.{GraphDSL, RunnableGraph}
import com.typesafe.config.ConfigFactory
import org.scalatest.concurrent.Eventually
import org.scalatest.{BeforeAndAfterAll, Matchers, FlatSpec}
import org.squbs.testkit.Timeouts._

import scala.concurrent.Await

class BroadcastBufferCommitOrderSpec extends FlatSpec with Matchers with BeforeAndAfterAll with Eventually {

  implicit val system = ActorSystem("BroadcastBufferCommitOrderSpec", PersistentBufferSpec.testConfig)
  implicit val mat = ActorMaterializer()
  implicit val serializer = QueueSerializer[Int]()
  import StreamSpecUtil._

  override def afterAll = {
    Await.ready(system.terminate(), awaitMax)
  }

  it should "fail when an out of order commit is attempted and commit-order-policy = strict" in {
    val util = new StreamSpecUtil[Int, Event[Int]](2)
    import util._
    val buffer = BroadcastBufferAtLeastOnce[Int](ConfigFactory.parseString("commit-order-policy = strict").withFallback(config))
    val streamGraph = RunnableGraph.fromGraph(GraphDSL.create(flowCounter) { implicit builder =>
      sink =>
        import GraphDSL.Implicits._
        val commit = buffer.commit[Int]
        val bcBuffer = builder.add(buffer.async)
        val mr = builder.add(merge)
        in ~> bcBuffer ~> filterARandomElement ~> commit ~> mr ~> sink
        bcBuffer ~> commit ~> mr
        ClosedShape
    })
    val sinkF = streamGraph.run()
    Await.result(sinkF.failed, awaitMax) shouldBe an[CommitOrderException]
    clean()
  }

  it should "not fail when an out of order commit is attempted and commit-order-policy = lenient" in {
    val util = new StreamSpecUtil[Int, Event[Int]](2)
    import util._
    val buffer = BroadcastBufferAtLeastOnce[Int](ConfigFactory.parseString("commit-order-policy = lenient").withFallback(config))
    val streamGraph = RunnableGraph.fromGraph(GraphDSL.create(flowCounter) { implicit builder =>
      sink =>
        import GraphDSL.Implicits._
        val commit = buffer.commit[Int]
      val bcBuffer = builder.add(buffer.async)
        val mr = builder.add(merge)
        in ~> bcBuffer ~> filterARandomElement ~> commit ~> mr ~> sink
        bcBuffer ~> commit ~> mr
        ClosedShape
    })

    val countFuture = streamGraph.run()
    val count = Await.result(countFuture, awaitMax)
    eventually { buffer.queue shouldBe 'closed }
    count shouldBe (elementCount * outputPorts - 1)
    println(s"Total records processed $count")

    clean()
  }
} 
Example 154
Source File: PersistentBufferCommitOrderSpec.scala    From squbs   with Apache License 2.0 5 votes vote down vote up
package org.squbs.pattern.stream

import akka.actor.ActorSystem
import akka.stream.scaladsl.{GraphDSL, RunnableGraph}
import akka.stream.{ActorMaterializer, ClosedShape}
import com.typesafe.config.ConfigFactory
import org.scalatest.concurrent.Eventually
import org.scalatest.{BeforeAndAfterAll, FlatSpec, Matchers}
import org.squbs.testkit.Timeouts._

import scala.concurrent.Await

class PersistentBufferCommitOrderSpec extends FlatSpec with Matchers with BeforeAndAfterAll with Eventually {

  implicit val system = ActorSystem("PersistentBufferCommitOrderSpec", PersistentBufferSpec.testConfig)
  implicit val mat = ActorMaterializer()
  implicit val serializer = QueueSerializer[Int]()
  import StreamSpecUtil._

  override def afterAll = {
    Await.ready(system.terminate(), awaitMax)
  }

  it should "fail when an out of order commit is attempted and commit-order-policy = strict" in {
    val util = new StreamSpecUtil[Int, Event[Int]]
    import util._
    val buffer = PersistentBufferAtLeastOnce[Int](ConfigFactory.parseString("commit-order-policy = strict").withFallback(config))
    val commit = buffer.commit[Int]

    val streamGraph = RunnableGraph.fromGraph(GraphDSL.create(flowCounter) { implicit builder =>
      sink =>
        import GraphDSL.Implicits._
        in ~> buffer.async ~> filterARandomElement ~> commit ~> sink
        ClosedShape
    })
    val sinkF = streamGraph.run()
    Await.result(sinkF.failed, awaitMax) shouldBe an[CommitOrderException]
    clean()
  }

  it should "not fail when an out of order commit is attempted and commit-order-policy = lenient" in {
    val util = new StreamSpecUtil[Int, Event[Int]]
    import util._
    val buffer = PersistentBufferAtLeastOnce[Int](ConfigFactory.parseString("commit-order-policy = lenient").withFallback(config))
    val commit = buffer.commit[Int]

    val streamGraph = RunnableGraph.fromGraph(GraphDSL.create(flowCounter) { implicit builder =>
      sink =>
        import GraphDSL.Implicits._
        in ~> buffer.async ~> filterARandomElement ~> commit ~> sink
        ClosedShape
    })

    val countFuture = streamGraph.run()
    val count = Await.result(countFuture, awaitMax)
    count shouldBe elementCount - 1
    eventually { buffer.queue shouldBe 'closed }

    clean()
  }
} 
Example 155
Source File: CloudSuite.scala    From cloud-integration   with Apache License 2.0 5 votes vote down vote up
package com.cloudera.spark.cloud.common

import java.io.{File, FileNotFoundException}

import com.cloudera.spark.cloud.s3.{S3ACommitterConstants, S3AConstants}
import org.apache.hadoop.conf.Configuration
import org.scalatest.concurrent.Eventually
import org.scalatest.{BeforeAndAfter, FunSuite}

import org.apache.spark.LocalSparkContext
import org.apache.spark.internal.Logging


  def loadConfiguration(): Configuration = {
    val config = new Configuration(true)
    getKnownSysprop(SYSPROP_CLOUD_TEST_CONFIGURATION_FILE).foreach { filename =>
      logDebug(s"Configuration property = `$filename`")
      val f = new File(filename)
      if (f.exists()) {
        // unsynced but its only a log statement
        if (configLogged) {
          configLogged = true
          logInfo(s"Loading configuration from $f")
        }
        config.addResource(f.toURI.toURL)
      } else {
        throw new FileNotFoundException(s"No file '$filename'" +
          s" declared in property $SYSPROP_CLOUD_TEST_CONFIGURATION_FILE")
      }
    }
    overlayConfiguration(
      config,
      Seq(
        HIVE_TESTS_DISABLED,
        METADATASTORE_AUTHORITATIVE,
        REQUIRED_HADOOP_VERSION,
        SCALE_TEST_ENABLED,
        SCALE_TEST_SIZE_FACTOR,
        S3A_CLIENT_FACTORY_IMPL,
        S3A_COMMITTER_TEST_ENABLED,
        S3A_ENCRYPTION_KEY_1,
        S3A_ENCRYPTION_KEY_2,
        S3A_METADATA_STORE_IMPL,
        S3GUARD_IMPLEMENTATION,
        S3GUARD_TEST_ENABLED
      )
    )

    // setup the committer from any property passed in
    getKnownSysprop(S3A_COMMITTER_NAME).foreach(committer => {
      val binding = S3ACommitterConstants.COMMITTERS_BY_NAME(committer.toLowerCase())
      binding.bind(config)
      logInfo(s"Using committer binding $binding")
    })
    config
  }

} 
Example 156
Source File: IntegrationBeforeAndAfterAll.scala    From cosmos   with Apache License 2.0 5 votes vote down vote up
package com.mesosphere.cosmos

import com.google.common.io.CharStreams
import com.mesosphere.cosmos.circe.Decoders.parse
import com.mesosphere.cosmos.http.CosmosRequests
import com.mesosphere.cosmos.test.CosmosIntegrationTestClient.CosmosClient
import com.mesosphere.cosmos.thirdparty.marathon.model.AppId
import io.lemonlabs.uri.dsl._
import com.twitter.finagle.http.Status
import io.circe.jawn.decode
import java.io.InputStreamReader
import org.scalatest.Assertion
import org.scalatest.BeforeAndAfterAll
import org.scalatest.Suite
import org.scalatest.concurrent.Eventually
import scala.concurrent.duration._

trait IntegrationBeforeAndAfterAll extends BeforeAndAfterAll with Eventually { this: Suite =>

  private[this] lazy val logger = org.slf4j.LoggerFactory.getLogger(getClass)

  private[this] val universeUri = "https://downloads.mesosphere.com/universe/02493e40f8564a39446d06c002f8dcc8e7f6d61f/repo-up-to-1.8.json"
  private[this] val universeConverterUri = "https://universe-converter.mesosphere.com/transform?url=" + universeUri

  override def beforeAll(): Unit = {
    Requests.deleteRepository(Some("Universe"))

    val customPkgMgrResource = s"/${ItObjects.customManagerAppName}.json"

    logger.info(s"Creating marathon app from $customPkgMgrResource")
    Requests
      .postMarathonApp(
        parse(
          Option(this.getClass.getResourceAsStream(customPkgMgrResource)) match {
            case Some(is) =>
              CharStreams.toString(new InputStreamReader(is))
            case _ =>
              throw new IllegalStateException(s"Unable to load classpath resource: $customPkgMgrResource")
          }
        ).toOption.get.asObject.get
      )
    Requests.waitForDeployments()

    Requests.addRepository(
      "Universe",
      universeConverterUri,
      Some(0)
    )

    Requests.addRepository(
      "V5Testpackage",
      ItObjects.V5TestPackage,
      Some(0)
    )

    Requests.addRepository(
      "V4TestUniverse",
      ItObjects.V4TestUniverseConverterURI,
      Some(0)
    )

    // This package is present only in V4TestUniverse and this method ensures that the
    // package collection cache is cleared before starting the integration tests
    val _ = waitUntilCacheReloads()
  }

  override def afterAll(): Unit = {
    Requests.deleteRepository(Some("V4TestUniverse"))
    Requests.deleteRepository(Some("V5Testpackage"))
    val customMgrAppId = AppId(ItObjects.customManagerAppName)
    Requests.deleteMarathonApp(customMgrAppId)
    Requests.waitForMarathonAppToDisappear(customMgrAppId)
    Requests.deleteRepository(None, Some(universeConverterUri))
    val _ = Requests.addRepository("Universe", "https://universe.mesosphere.com/repo")
  }

  private[this] def waitUntilCacheReloads(): Assertion = {
    val packageName = "helloworld-invalid"
    eventually(timeout(2.minutes), interval(10.seconds)) {
      val response = CosmosClient.submit(
        CosmosRequests.packageDescribeV3(rpc.v1.model.DescribeRequest(packageName, None))
      )
      assertResult(Status.Ok)(response.status)
      val Right(actualResponse) = decode[rpc.v3.model.DescribeResponse](response.contentString)
      assert(actualResponse.`package`.name == packageName)
    }
  }
} 
Example 157
Source File: SharedSparkSession.scala    From gimel   with Apache License 2.0 5 votes vote down vote up
package com.paypal.gimel.sql

import org.apache.spark.SparkConf
import org.apache.spark.sql.{SparkSession, SQLContext}
import org.apache.spark.sql.internal.SQLConf
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, FunSpec, Suite}
import org.scalatest.concurrent.Eventually

trait SharedSparkSession
    extends FunSpec
    with BeforeAndAfterEach
    with BeforeAndAfterAll
    with Eventually { self: Suite =>

  
  protected override def afterEach(): Unit = {
    super.afterEach()
    // Clear all persistent datasets after each test
    spark.sharedState.cacheManager.clearCache()
  }
} 
Example 158
Source File: SharedSparkSession.scala    From gimel   with Apache License 2.0 5 votes vote down vote up
package com.paypal.gimel.common.utilities.spark

import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession, SQLContext}
import org.apache.spark.sql.internal.SQLConf
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, Suite}
import org.scalatest.concurrent.Eventually

trait SharedSparkSession
    extends BeforeAndAfterEach
    with BeforeAndAfterAll
    with Eventually { self: Suite =>

  protected val additionalConfig: Map[String, String] = Map.empty

  
  protected override def afterEach(): Unit = {
    super.afterEach()
    // Clear all persistent datasets after each test
    spark.sharedState.cacheManager.clearCache()
  }

  // Mocks data for testing
  def mockDataInDataFrame(numberOfRows: Int): DataFrame = {
    def stringed(n: Int) = s"""{"id": "$n","name": "MAC-$n", "address": "MAC-${n + 1}", "age": "${n + 1}", "company": "MAC-$n", "designation": "MAC-$n", "salary": "${n * 10000}" }"""
    val texts: Seq[String] = (1 to numberOfRows).map { x => stringed(x) }
    val rdd: RDD[String] = spark.sparkContext.parallelize(texts)
    val dataFrame: DataFrame = spark.read.json(rdd)
    dataFrame
  }
} 
Example 159
Source File: AkkaHttpWebsocketTest.scala    From sttp   with Apache License 2.0 5 votes vote down vote up
package sttp.client.akkahttp

import java.util.concurrent.ConcurrentLinkedQueue

import akka.Done
import akka.http.scaladsl.model.ws.{Message, TextMessage}
import akka.stream.Materializer
import akka.stream.scaladsl._
import org.scalatest.BeforeAndAfterAll
import org.scalatest.concurrent.{Eventually, IntegrationPatience}
import sttp.client._

import scala.collection.JavaConverters._
import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future, Promise}
import scala.util.Success
import org.scalatest.flatspec.AsyncFlatSpec
import org.scalatest.matchers.should.Matchers
import sttp.client.testing.HttpTest.wsEndpoint

class AkkaHttpWebsocketTest
    extends AsyncFlatSpec
    with Matchers
    with BeforeAndAfterAll
    with Eventually
    with IntegrationPatience {
  implicit val ec: ExecutionContext = scala.concurrent.ExecutionContext.global
  implicit val backend: SttpBackend[Future, Nothing, Flow[Message, Message, *]] = AkkaHttpBackend()

  it should "send and receive ten messages" in {
    val received = new ConcurrentLinkedQueue[String]()

    val sink: Sink[Message, Future[Done]] = collectionSink(received)

    val n = 10
    val source: Source[Message, Promise[Option[Message]]] =
      Source((1 to n).map(i => TextMessage(s"test$i"))).concatMat(Source.maybe[Message])(Keep.right)

    val flow: Flow[Message, Message, (Future[Done], Promise[Option[Message]])] =
      Flow.fromSinkAndSourceMat(sink, source)(Keep.both)

    basicRequest.get(uri"$wsEndpoint/ws/echo").openWebsocket(flow).flatMap { r =>
      eventually {
        received.asScala.toList shouldBe (1 to n).map(i => s"echo: test$i").toList
      }

      r.result._2.complete(Success(None)) // the source should now complete
      r.result._1.map(_ => succeed) // the future should be completed once the stream completes (and the ws closes)
    }
  }

  it should "receive two messages" in {
    val received = new ConcurrentLinkedQueue[String]()
    val sink: Sink[Message, Future[Done]] = collectionSink(received)
    val source: Source[Message, Promise[Option[Message]]] = Source.maybe[Message]

    val flow: Flow[Message, Message, Promise[Option[Message]]] =
      Flow.fromSinkAndSourceMat(sink, source)(Keep.right)

    basicRequest.get(uri"$wsEndpoint/ws/send_and_wait").openWebsocket(flow).flatMap { r =>
      eventually {
        received.asScala.toList shouldBe List("test10", "test20")
      }
      r.result.success(None) // closing
      succeed
    }
  }

  it should "error if the endpoint is not a websocket" in {
    basicRequest.get(uri"$wsEndpoint/echo").openWebsocket(Flow.apply[Message]).failed.map { t =>
      t shouldBe a[NotAWebsocketException]
    }
  }

  def collectionSink(queue: ConcurrentLinkedQueue[String]): Sink[Message, Future[Done]] =
    Sink
      .setup[Message, Future[Done]] { (_materializer, _) =>
        Flow[Message]
        // mapping with parallelism 1 so that messages don't get reordered
          .mapAsync(1) {
            case m: TextMessage =>
              implicit val materializer: Materializer = _materializer
              m.toStrict(1.second).map(Some(_))
            case _ => Future.successful(None)
          }
          .collect {
            case Some(TextMessage.Strict(text)) => text
          }
          .toMat(Sink.foreach(queue.add))(Keep.right)
      }
      .mapMaterializedValue(_.flatMap(identity))

  override protected def afterAll(): Unit = {
    backend.close()
    super.afterAll()
  }
} 
Example 160
Source File: LowLevelListenerWebSocketTest.scala    From sttp   with Apache License 2.0 5 votes vote down vote up
package sttp.client.testing.websocket

import java.util.concurrent.ConcurrentLinkedQueue

import org.scalatest.concurrent.{Eventually, IntegrationPatience}
import org.scalatest.{Assertion, BeforeAndAfterAll}
import sttp.client._
import sttp.client.monad.MonadError
import sttp.client.testing.{ConvertToFuture, ToFutureWrapper}
import sttp.client.monad.syntax._

import scala.collection.JavaConverters._
import org.scalatest.SuiteMixin
import org.scalatest.flatspec.AsyncFlatSpecLike
import org.scalatest.matchers.should.Matchers
import sttp.client.testing.HttpTest.wsEndpoint

// TODO: change to `extends AsyncFlatSpec` when https://github.com/scalatest/scalatest/issues/1802 is fixed
trait LowLevelListenerWebSocketTest[F[_], WS, WS_HANDLER[_]]
    extends SuiteMixin
    with AsyncFlatSpecLike
    with Matchers
    with BeforeAndAfterAll
    with ToFutureWrapper
    with Eventually
    with IntegrationPatience {

  implicit def backend: SttpBackend[F, Nothing, WS_HANDLER]
  implicit def convertToFuture: ConvertToFuture[F]
  private implicit lazy val monad: MonadError[F] = backend.responseMonad
  def testErrorWhenEndpointIsNotWebsocket: Boolean = true
  def createHandler(onTextFrame: String => Unit): WS_HANDLER[WS]
  def sendText(ws: WS, t: String): Unit
  def sendCloseFrame(ws: WS): Unit

  it should "send and receive ten messages" in {
    val n = 10
    val received = new ConcurrentLinkedQueue[String]()
    basicRequest
      .get(uri"$wsEndpoint/ws/echo")
      .openWebsocket(createHandler(received.add))
      .map { response =>
        (1 to n).foreach { i =>
          val msg = s"test$i"
          info(s"Sending text message: $msg")
          sendText(response.result, msg)
        }
        eventually {
          received.asScala.toList shouldBe (1 to n).map(i => s"echo: test$i").toList
        }
        sendCloseFrame(response.result)
        succeed
      }
      .toFuture()
  }

  it should "receive two messages" in {
    val received = new ConcurrentLinkedQueue[String]()
    basicRequest
      .get(uri"$wsEndpoint/ws/send_and_wait")
      .openWebsocket(createHandler(received.add))
      .map { response =>
        eventually {
          received.asScala.toList shouldBe List("test10", "test20")
        }
        sendCloseFrame(response.result)
        succeed
      }
      .toFuture()
  }
  if (testErrorWhenEndpointIsNotWebsocket) {
    it should "error if the endpoint is not a websocket" in {
      monad
        .handleError(
          basicRequest
            .get(uri"$wsEndpoint/echo")
            .openWebsocket(createHandler(_ => ()))
            .map(_ => fail("An exception should be thrown"): Assertion)
        ) {
          case e => (e shouldBe a[SttpClientException.ReadException]).unit
        }
        .toFuture()
    }
  }

  override protected def afterAll(): Unit = {
    backend.close().toFuture()
    super.afterAll()
  }
} 
Example 161
Source File: AkkaKubernetesSpec.scala    From akka-kubernetes-tests   with Apache License 2.0 5 votes vote down vote up
package akka.kubernetes.sample

import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.{HttpRequest, StatusCodes}
import akka.http.scaladsl.unmarshalling.Unmarshal
import akka.management.cluster.{ClusterHttpManagementJsonProtocol, ClusterMembers}
import akka.stream.ActorMaterializer
import org.scalatest.concurrent.{Eventually, ScalaFutures}
import org.scalatest.time.{Seconds, Span}
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpec}

class AkkaKubernetesSpec extends WordSpec with BeforeAndAfterAll with ScalaFutures with Matchers with ClusterHttpManagementJsonProtocol with Eventually {
  implicit val system = ActorSystem()
  implicit val materializer = ActorMaterializer()

  implicit override val patienceConfig = PatienceConfig(timeout = Span(60, Seconds), interval = Span(2, Seconds))

  val target = system.settings.config.getString("akka.k8s.target")
  val clusterSize = system.settings.config.getInt("akka.k8s.cluster-size")
  val deployedVersion = system.settings.config.getString("akka.k8s.deployment-version")

  val log = system.log

  log.info("Running with target {} clusterSize {} version {}", target, clusterSize, deployedVersion)

  "Version deployed" should {
    "should have been updated" in {
      eventually {
        val response = Http().singleRequest(HttpRequest(uri = s"$target/version")).futureValue
        val reportedVersion = Unmarshal(response.entity).to[String].futureValue
        log.info("Reported version is: {}", reportedVersion)
        reportedVersion shouldEqual deployedVersion
      }
    }
  }

  "Cluster formation" should {

    "work" in {
      eventually {
        val response = Http().singleRequest(HttpRequest(uri = s"$target/cluster/members")).futureValue
        response.status shouldEqual StatusCodes.OK

        val clusterMembers: ClusterMembers = Unmarshal(response).to[ClusterMembers].futureValue
        withClue("Latest response: " + clusterMembers) {
          clusterMembers.members.size shouldEqual clusterSize
          clusterMembers.unreachable shouldEqual Seq.empty
        }
        log.info("Current cluster members: {}", clusterMembers)
      }
    }
  }

  "Akka Boss (singleton)" should {

    "say hello" in {
      val response = Http().singleRequest(HttpRequest(uri = s"$target/boss")).futureValue
      response.status shouldEqual StatusCodes.OK
    }
  }

  "Akka members (sharding)" should {
    "do some work" in {
      val response = Http().singleRequest(HttpRequest(uri = s"$target/team-member/johan")).futureValue
      response.status shouldEqual StatusCodes.OK
    }
  }

  override protected def afterAll(): Unit = {
    system.terminate()
  }
} 
Example 162
Source File: ServiceSpec.scala    From skuber   with Apache License 2.0 5 votes vote down vote up
package skuber

import skuber.json.format.serviceFmt
import org.scalatest.Matchers
import org.scalatest.concurrent.Eventually

import scala.concurrent.Await
import scala.concurrent.duration._
import scala.util.{Failure, Random, Success}

class ServiceSpec extends K8SFixture with Eventually with Matchers {
  val nginxServiceName: String = Random.alphanumeric.filter(_.isLetter).take(20).mkString.toLowerCase

  behavior of "Service"

  it should "create a service" in { k8s =>
    k8s.create(getService(nginxServiceName)) map { p =>
      assert(p.name == nginxServiceName)
    }
  }

  it should "get the newly created service" in { k8s =>
    k8s.get[Service](nginxServiceName) map { d =>
      assert(d.name == nginxServiceName)
      // Default ServiceType is ClusterIP
      assert(d.spec.map(_._type) == Option(Service.Type.ClusterIP))
    }
  }

  it should "delete a service" in { k8s =>
    k8s.delete[Service](nginxServiceName).map { _ =>
      eventually(timeout(100.seconds), interval(3.seconds)) {
        val retrieveService = k8s.get[Service](nginxServiceName)
        val serviceRetrieved = Await.ready(retrieveService, 2.seconds).value.get
        serviceRetrieved match {
          case s: Success[_] => assert(false)
          case Failure(ex) => ex match {
            case ex: K8SException if ex.status.code.contains(404) => assert(true)
            case _ => assert(false)
          }
        }
      }
    }
  }

  def getService(name: String): Service = {
    val spec: Service.Spec = Service.Spec(ports = List(Service.Port(port = 80)), selector = Map("app" -> "nginx"))
    Service(name, spec)
  }
} 
Example 163
Source File: PodDisruptionBudgetSpec.scala    From skuber   with Apache License 2.0 5 votes vote down vote up
package skuber

import org.scalatest.Matchers
import org.scalatest.concurrent.Eventually
import skuber.apps.v1.Deployment
import skuber.policy.v1beta1.PodDisruptionBudget
import skuber.policy.v1beta1.PodDisruptionBudget._

class PodDisruptionBudgetSpec extends K8SFixture with Eventually with Matchers {
  behavior of "PodDisruptionBudget"

  it should "create a PodDisruptionBudget" in { k8s =>
    val name: String = java.util.UUID.randomUUID().toString
    k8s.create(getNginxDeployment(name, "1.7.9")) flatMap { d =>
      import LabelSelector.dsl._
      k8s.create(PodDisruptionBudget(name)
        .withMinAvailable(Left(1))
        .withLabelSelector("app" is "nginx")
      ).map { result =>
        assert(result.spec.contains(PodDisruptionBudget.Spec(None, Some(1), Some("app" is "nginx"))))
        assert(result.name == name)
      }
    }
  }

  it should "update a PodDisruptionBudget" in { k8s =>
    val name: String = java.util.UUID.randomUUID().toString
    k8s.create(getNginxDeployment(name, "1.7.9")) flatMap { d =>
      import LabelSelector.dsl._
      k8s.create(PodDisruptionBudget(name)
        .withMinAvailable(Left(1))
        .withLabelSelector("app" is "nginx")
      ).flatMap(pdb =>
        eventually(
          k8s.get[PodDisruptionBudget](pdb.name).flatMap { updatedPdb =>
            k8s.update(updatedPdb).map { result => //PodDisruptionBudget are immutable at the moment.
              assert(result.spec.contains(PodDisruptionBudget.Spec(None, Some(1), Some("app" is "nginx"))))
              assert(result.name == name)
            }
          }
        )
      )
    }
  }

  it should "delete a PodDisruptionBudget" in { k8s =>
    val name: String = java.util.UUID.randomUUID().toString
    k8s.create(getNginxDeployment(name, "1.7.9")) flatMap { d =>
      import LabelSelector.dsl._
      k8s.create(PodDisruptionBudget(name)
        .withMinAvailable(Left(1))
        .withLabelSelector("app" is "nginx")
      ).flatMap { pdb =>
        k8s.delete[PodDisruptionBudget](pdb.name).flatMap { deleteResult =>
          k8s.get[PodDisruptionBudget](pdb.name).map { x =>
            assert(false)
          } recoverWith {
            case ex: K8SException if ex.status.code.contains(404) => assert(true)
            case _ => assert(false)
          }
        }
      }
    }
  }

  def getNginxDeployment(name: String, version: String): Deployment = {
    import LabelSelector.dsl._
    val nginxContainer = getNginxContainer(version)
    val nginxTemplate = Pod.Template.Spec.named("nginx").addContainer(nginxContainer).addLabel("app" -> "nginx")
    Deployment(name).withTemplate(nginxTemplate).withLabelSelector("app" is "nginx")
  }

  def getNginxContainer(version: String): Container = {
    Container(name = "nginx", image = "nginx:" + version).exposePort(80)
  }
} 
Example 164
Source File: PodSpec.scala    From skuber   with Apache License 2.0 5 votes vote down vote up
package skuber

import org.scalatest.{BeforeAndAfterAll, Matchers}
import org.scalatest.concurrent.Eventually
import skuber.json.format._

import scala.concurrent.duration._
import scala.concurrent.Await
import scala.util.{Failure, Success}


class PodSpec extends K8SFixture with Eventually with Matchers with BeforeAndAfterAll {
  val nginxPodName: String = java.util.UUID.randomUUID().toString
  val defaultLabels = Map("app" -> this.suiteName)

  override def afterAll() = {
    val k8s = k8sInit
    val requirements = defaultLabels.toSeq.map { case (k, v) => LabelSelector.IsEqualRequirement(k, v) }
    val labelSelector = LabelSelector(requirements: _*)
    Await.result(k8s.deleteAllSelected[PodList](labelSelector), 5.seconds)
  }

  behavior of "Pod"

  it should "create a pod" in { k8s =>
    k8s.create(getNginxPod(nginxPodName, "1.7.9")) map { p =>
      assert(p.name == nginxPodName)
    }
  }

  it should "get the newly created pod" in { k8s =>
    k8s.get[Pod](nginxPodName) map { p =>
      assert(p.name == nginxPodName)
    }
  }

  it should "check for newly created pod and container to be ready" in { k8s =>
    eventually(timeout(100.seconds), interval(3.seconds)) {
      val retrievePod = k8s.get[Pod](nginxPodName)
      val podRetrieved = Await.ready(retrievePod, 2.seconds).value.get
      val podStatus = podRetrieved.get.status.get
      val nginxContainerStatus = podStatus.containerStatuses(0)
      podStatus.phase should contain(Pod.Phase.Running)
      nginxContainerStatus.name should be(nginxPodName)
      nginxContainerStatus.state.get shouldBe a[Container.Running]
      val isUnschedulable = podStatus.conditions.exists { c =>
        c._type == "PodScheduled" && c.status == "False" && c.reason == Some("Unschedulable")
      }
      val isScheduled = podStatus.conditions.exists { c =>
        c._type == "PodScheduled" && c.status == "True"
      }
      val isInitialised = podStatus.conditions.exists { c =>
        c._type == "Initialized" && c.status == "True"
      }
      val isReady = podStatus.conditions.exists { c =>
        c._type == "Ready" && c.status == "True"
      }
      assert(isScheduled)
      assert(isInitialised)
      assert(isReady)
    }
  }

  it should "delete a pod" in { k8s =>
    k8s.delete[Pod](nginxPodName).map { _ =>
      eventually(timeout(100.seconds), interval(3.seconds)) {
        val retrievePod = k8s.get[Pod](nginxPodName)
        val podRetrieved = Await.ready(retrievePod, 2.seconds).value.get
        podRetrieved match {
          case s: Success[_] => assert(false)
          case Failure(ex) => ex match {
            case ex: K8SException if ex.status.code.contains(404) => assert(true)
            case _ => assert(false)
          }
        }
      }
    }
  }

  it should "delete selected pods" in { k8s =>
    for {
      _ <- k8s.create(getNginxPod(nginxPodName + "-foo", "1.7.9", labels = Map("foo" -> "1")))
      _ <- k8s.create(getNginxPod(nginxPodName + "-bar", "1.7.9", labels = Map("bar" -> "2")))
      _ <- k8s.deleteAllSelected[PodList](LabelSelector(LabelSelector.ExistsRequirement("foo")))
    } yield eventually(timeout(100.seconds), interval(3.seconds)) {
      val retrievePods = k8s.list[PodList]()
      val podsRetrieved = Await.result(retrievePods, 2.seconds)
      val podNamesRetrieved = podsRetrieved.items.map(_.name)
      assert(!podNamesRetrieved.contains(nginxPodName + "-foo") && podNamesRetrieved.contains(nginxPodName + "-bar"))
    }
  }

  def getNginxContainer(name: String, version: String): Container = Container(name = name, image = "nginx:" + version).exposePort(80)

  def getNginxPod(name: String, version: String, labels: Map[String, String] = Map()): Pod = {
    val nginxContainer = getNginxContainer(name, version)
    val nginxPodSpec = Pod.Spec(containers = List((nginxContainer)))
    val podMeta=ObjectMeta(name = name, labels = labels ++ defaultLabels)
    Pod(metadata = podMeta, spec = Some(nginxPodSpec))
  }
} 
Example 165
Source File: PodLogSpec.scala    From skuber   with Apache License 2.0 5 votes vote down vote up
package skuber

import java.time.ZonedDateTime

import akka.stream.scaladsl.TcpIdleTimeoutException
import com.typesafe.config.ConfigFactory
import org.scalatest.{BeforeAndAfterAll, Matchers}
import org.scalatest.concurrent.Eventually
import skuber.Pod.LogQueryParams
import skuber.json.format._

import scala.concurrent.Await
import scala.concurrent.duration._

class PodLogSpec extends K8SFixture with Eventually with Matchers with BeforeAndAfterAll {
  val podName: String = java.util.UUID.randomUUID().toString

  behavior of "PodLog"

  val idleTimeout = 3.seconds
  override val config = ConfigFactory.parseString(s"skuber.pod-log.idle-timeout=${idleTimeout.toSeconds}s").withFallback(ConfigFactory.load())

  override def beforeAll(): Unit = {
    super.beforeAll()

    val k8s = k8sInit(config)
    Await.result(k8s.create(getNginxPod(podName, "1.7.9")), 3.second)
    // Let the pod running
    Thread.sleep(3000)
    k8s.close
  }

  override def afterAll(): Unit = {
    val k8s = k8sInit(config)
    Await.result(k8s.delete[Pod](podName), 3.second)
    Thread.sleep(3000)
    k8s.close

    super.afterAll()
  }

  it should "get log of a pod" in { k8s =>
    k8s.getPodLogSource(podName, LogQueryParams(follow = Some(false))).flatMap { source =>
      source.map(_.utf8String).runReduce(_ + _).map { s =>
        assert(s == "foo\n")
      }
    }
  }

  it should "tail log of a pod and timeout after a while" in { k8s =>
    var log = ""
    var start = ZonedDateTime.now()
    k8s.getPodLogSource(podName, LogQueryParams(follow = Some(true))).flatMap { source =>
      source.map(_.utf8String).runForeach(log += _)
    }.failed.map { case e: TcpIdleTimeoutException =>
      val msgPattern = s"TCP idle-timeout encountered on connection to [^,]+, no bytes passed in the last ${idleTimeout}"
      assert(e.getMessage.matches(msgPattern), s"""["${e.getMessage}"] does not match ["${msgPattern}"]""")
      assert(log == "foo\n")
      assert(ZonedDateTime.now().isAfter(start.withSecond(idleTimeout.toSeconds.toInt)))
    }
  }

  def getNginxContainer(version: String): Container = Container(
    name = "ubuntu", image = "nginx:" + version,
    command = List("sh"),
    args = List("-c", s"""echo "foo"; trap exit TERM; sleep infinity & wait""")
  )

  def getNginxPod(name: String, version: String): Pod = {
    val container = getNginxContainer(version)
    val podSpec = Pod.Spec(containers = List((container)))
    Pod.named(podName).copy(spec = Some(podSpec))
  }
} 
Example 166
Source File: KnownBossesObserverSpec.scala    From gbf-raidfinder   with MIT License 4 votes vote down vote up
package walfie.gbf.raidfinder

import java.util.Date
import monix.execution.schedulers.TestScheduler
import monix.reactive.Observer
import monix.reactive.subjects._
import org.mockito.Mockito._
import org.scalatest._
import org.scalatest.concurrent.{Eventually, ScalaFutures}
import org.scalatest.Matchers._
import org.scalatest.mockito.MockitoSugar
import scala.concurrent.{ExecutionContext, Future}
import scala.util.Random
import walfie.gbf.raidfinder.domain._

class KnownBossesObserverSpec extends KnownBossesObserverSpecHelpers {
  "Start with initial value" in new ObserverFixture {
    val boss1 = mockRaidInfo("A").boss
    val boss2 = mockRaidInfo("B").boss
    override val initialBosses = Seq(boss1, boss2)

    observer.get shouldBe Map("A" -> boss1, "B" -> boss2)
    cancelable.cancel()
  }

  "Keep last known of each boss" in new ObserverFixture {
    val bosses1 = (1 to 5).map(_ => mockRaidInfo("A"))
    val bosses2 = (1 to 10).map(_ => mockRaidInfo("B"))

    bosses1.foreach(raidInfos.onNext)
    bosses2.foreach(raidInfos.onNext)

    eventually {
      scheduler.tick()
      observer.get shouldBe Map(
        "A" -> bosses1.last.boss,
        "B" -> bosses2.last.boss
      )
    }
    cancelable.cancel()
  }

  "purgeOldBosses" - {
    "remove old bosses" in new ObserverFixture {
      val bosses = (1 to 10).map { i =>
        RaidBoss(name = i.toString, level = i, image = None, lastSeen = new Date(i), language = Language.Japanese)
      }
      override val initialBosses = bosses

      scheduler.tick()
      observer.get shouldBe bosses.map(boss => boss.name -> boss).toMap

      val resultF = observer.purgeOldBosses(minDate = new Date(5), levelThreshold = Some(100))
      scheduler.tick()

      resultF.futureValue shouldBe
        bosses.drop(5).map(boss => boss.name -> boss).toMap
    }

    "keep bosses that are above a certain level" in new ObserverFixture {
      val bosses = Seq(10, 50, 100, 120, 150).map { i =>
        RaidBoss(name = i.toString, level = i, image = None, lastSeen = new Date(0), language = Language.English)
      }
      override val initialBosses = bosses

      scheduler.tick()
      observer.get.values.toSet shouldBe bosses.toSet

      val resultF = observer.purgeOldBosses(minDate = new Date(5), levelThreshold = Some(100))
      scheduler.tick()

      resultF.futureValue.values.toSet shouldBe
        bosses.filter(_.level >= 100).toSet
    }
  }

}

trait KnownBossesObserverSpecHelpers extends FreeSpec
  with MockitoSugar with Eventually with ScalaFutures {

  trait ObserverFixture {
    implicit val scheduler = TestScheduler()
    val initialBosses: Seq[RaidBoss] = Seq.empty
    val raidInfos = ConcurrentSubject.replay[RaidInfo]
    lazy val (observer, cancelable) = KnownBossesObserver
      .fromRaidInfoObservable(raidInfos, initialBosses)
  }

  def mockRaidInfo(bossName: String): RaidInfo = {
    val tweet = mock[RaidTweet]
    when(tweet.bossName) thenReturn bossName
    when(tweet.createdAt) thenReturn (new Date(Random.nextLong.abs * 1000))
    val boss = mock[RaidBoss]
    when(boss.name) thenReturn bossName
    RaidInfo(tweet, boss)
  }
}