org.scalatest.concurrent.IntegrationPatience Scala Examples
The following examples show how to use org.scalatest.concurrent.IntegrationPatience.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: BaseAwsClientTest.scala From aws-spi-akka-http with Apache License 2.0 | 5 votes |
package com.github.matsluni.akkahttpspi import java.net.URI import com.dimafeng.testcontainers.{ForAllTestContainer, GenericContainer} import com.github.matsluni.akkahttpspi.testcontainers.LocalStackReadyLogWaitStrategy import org.scalatest.concurrent.{Eventually, Futures, IntegrationPatience} import org.scalatest.BeforeAndAfter import software.amazon.awssdk.core.SdkClient import software.amazon.awssdk.regions.Region import scala.util.Random import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AnyWordSpec trait BaseAwsClientTest[C <: SdkClient] extends AnyWordSpec with Matchers with Futures with Eventually with BeforeAndAfter with IntegrationPatience with ForAllTestContainer { lazy val defaultRegion: Region = Region.EU_WEST_1 def client: C def exposedServicePort: Int val container: GenericContainer def endpoint = new URI(s"http://localhost:${container.mappedPort(exposedServicePort)}") def randomIdentifier(length: Int): String = Random.alphanumeric.take(length).mkString } trait LocalstackBaseAwsClientTest[C <: SdkClient] extends BaseAwsClientTest[C] { def service: String lazy val exposedServicePort: Int = LocalstackServicePorts.services(service) override lazy val container: GenericContainer = new GenericContainer( dockerImage = "localstack/localstack", exposedPorts = Seq(exposedServicePort), env = Map("SERVICES" -> service), waitStrategy = Some(LocalStackReadyLogWaitStrategy) ) } object LocalstackServicePorts { //services and ports based on https://github.com/localstack/localstack val services: Map[String, Int] = Map( "s3" -> 4572, "sqs" -> 4576, "sns" -> 4575, "dynamodb" -> 4569 ) }
Example 2
Source File: ExperimentVariantEventTest.scala From izanami with Apache License 2.0 | 5 votes |
package domains.abtesting import java.time.LocalDateTime import java.time.temporal.ChronoUnit import akka.NotUsed import akka.actor.ActorSystem import akka.stream.scaladsl.{Flow, Sink, Source} import domains.Key import domains.abtesting.events._ import org.scalatest.concurrent.{IntegrationPatience, ScalaFutures} import test.IzanamiSpec class ExperimentVariantEventTest extends IzanamiSpec with ScalaFutures with IntegrationPatience { "ExperimentVariantEvent" must { "aggregate event" in { implicit val system: ActorSystem = ActorSystem() val variantId = "vId" val variant = Variant(variantId, "None", None, Traffic(0), None) val flow: Flow[ExperimentVariantEvent, VariantResult, NotUsed] = ExperimentVariantEvent.eventAggregation("experiment.id", 1, ChronoUnit.HOURS) val firstDate = LocalDateTime.now().minus(5, ChronoUnit.HOURS) val experimentKey = Key(s"experiment:id") def experimentVariantEventKey(counter: Int): ExperimentVariantEventKey = ExperimentVariantEventKey(experimentKey, variantId, s"client:id:$counter", "namespace", s"$counter") def clientId(i: Int): String = s"client:id:$i" def date(i: Int): LocalDateTime = firstDate.plus(15 * i, ChronoUnit.MINUTES) val source = (1 to 20) .flatMap { counter => val d = date(counter) val key = experimentVariantEventKey(counter) counter match { case i if i % 2 > 0 => List(ExperimentVariantDisplayed(key, experimentKey, clientId(i), variant, d, 0, variantId)) case i => List( ExperimentVariantDisplayed(key, experimentKey, clientId(i), variant, d, 0, variantId), ExperimentVariantWon(key, experimentKey, clientId(i), variant, d, 0, variantId) ) } } val expectedEvents = Seq( ExperimentResultEvent(experimentKey, variant, date(1), 0.0, "vId"), ExperimentResultEvent(experimentKey, variant, date(5), 40.0, "vId"), ExperimentResultEvent(experimentKey, variant, date(9), 44.44444444444444, "vId"), ExperimentResultEvent(experimentKey, variant, date(13), 46.15384615384615, "vId"), ExperimentResultEvent(experimentKey, variant, date(17), 47.05882352941177, "vId") ) val evts = Source(source).via(flow).runWith(Sink.seq).futureValue val allEvents = evts.flatMap(_.events) allEvents must be(expectedEvents) } } }
Example 3
Source File: FeatureControllerAccessSpec.scala From izanami with Apache License 2.0 | 5 votes |
package controllers import domains.AuthorizedPatterns import domains.user.{IzanamiUser, User} import org.scalatest.concurrent.IntegrationPatience import org.scalatestplus.play._ import play.api.Configuration import play.api.libs.json.Json import play.api.libs.ws.JsonBodyWritables._ import test.{IzanamiMatchers, OneServerPerSuiteWithMyComponents} import scala.util.Random import org.scalatest.BeforeAndAfterAll abstract class FeatureControllerWildcardAccessSpec(name: String, configurationSpec: Configuration) extends PlaySpec with IzanamiMatchers with OneServerPerSuiteWithMyComponents with IntegrationPatience { override def getConfiguration(configuration: Configuration) = configurationSpec withFallback configuration private lazy val ws = izanamiComponents.wsClient private lazy val rootPath = s"http://localhost:$port" override def user: IzanamiUser = IzanamiUser( id = "id", name = "Ragnar Lodbrok", email = "[email protected]", admin = false, password = None, authorizedPatterns = AuthorizedPatterns.fromString("a:key2:*") ) s"$name FeatureControllerWildcardAccessSpec" should { "wildcard access with a:key1:*" in { ws.url(s"$rootPath/api/features") .post(Json.obj("id" -> "a:key2:12345", "enabled" -> true, "activationStrategy" -> "NO_STRATEGY")) .futureValue must beAStatus(403) ws.url(s"$rootPath/api/features") .post(Json.obj("id" -> "a:key:12345", "enabled" -> true, "activationStrategy" -> "NO_STRATEGY")) .futureValue must beAStatus(403) ws.url(s"$rootPath/api/features") .post(Json.obj("id" -> "a:key", "enabled" -> true, "activationStrategy" -> "NO_STRATEGY")) .futureValue must beAStatus(201) } } }
Example 4
Source File: ConfigControllerSpec.scala From izanami with Apache License 2.0 | 5 votes |
package controllers import org.scalatest.concurrent.IntegrationPatience import org.scalatestplus.play._ import play.api.Configuration import play.api.libs.json.Json import test.{IzanamiMatchers, OneServerPerSuiteWithMyComponents} import play.api.libs.ws.JsonBodyWritables._ import play.api.libs.ws.WSResponse import scala.util.Random import org.scalatest.BeforeAndAfterAll abstract class ConfigControllerSpec(name: String, configurationSpec: Configuration) extends PlaySpec with IzanamiMatchers with OneServerPerSuiteWithMyComponents with IntegrationPatience { override def getConfiguration(configuration: Configuration) = configurationSpec withFallback configuration private lazy val ws = izanamiComponents.wsClient private lazy val rootPath = s"http://localhost:$port" s"$name ConfigController" should { "create read update delete" in { val key = "my:path" val getByIdUpdated = ws.url(s"$rootPath/api/configs/$key2").get().futureValue getByIdUpdated must beAResponse(200, configUpdated) ws.url(s"$rootPath/api/configs/$key").get().futureValue must beAStatus(404) } } }
Example 5
Source File: LithiumMultiNodeSpec.scala From lithium with Apache License 2.0 | 5 votes |
package com.swissborg.lithium import akka.actor.{ActorSystem, Address} import akka.cluster.Cluster import akka.cluster.MemberStatus._ import akka.remote.testconductor.RoleName import akka.remote.testkit.{MultiNodeConfig, MultiNodeSpec, MultiNodeSpecCallbacks} import akka.testkit.ImplicitSender import org.scalatest.BeforeAndAfterAll import org.scalatest.concurrent.{Eventually, IntegrationPatience} import org.scalatest.matchers.should.Matchers import org.scalatest.wordspec.AnyWordSpecLike abstract class LithiumMultiNodeSpec(val config: MultiNodeConfig) extends MultiNodeSpec(config) with MultiNodeSpecCallbacks with AnyWordSpecLike with Matchers with BeforeAndAfterAll with ImplicitSender with Eventually with IntegrationPatience { override def beforeAll(): Unit = multiNodeSpecBeforeAll() override def afterAll(): Unit = multiNodeSpecAfterAll() private val addresses: Map[RoleName, Address] = roles.map(r => r -> node(r).address).toMap protected def addressOf(roleName: RoleName): Address = addresses(roleName) protected def waitToBecomeUnreachable(roleNames: RoleName*): Unit = awaitCond(allUnreachable(roleNames: _*)) protected def waitForSurvivors(roleNames: RoleName*): Unit = awaitCond(allSurvivors(roleNames: _*)) protected def waitForUp(roleNames: RoleName*): Unit = awaitCond(allUp(roleNames: _*)) protected def waitForSelfDowning(implicit system: ActorSystem): Unit = awaitCond(downedItself) protected def waitForAllLeaving(roleNames: RoleName*): Unit = awaitCond(allLeaving(roleNames: _*)) protected def waitExistsAllDownOrGone(groups: Seq[Seq[RoleName]]): Unit = awaitCond(existsAllDownOrGone(groups)) private def allUnreachable(roleNames: RoleName*): Boolean = roleNames.forall( role => Cluster(system).state.unreachable.exists(_.address === addressOf(role)) ) private def allSurvivors(roleNames: RoleName*): Boolean = roleNames.forall(role => Cluster(system).state.members.exists(_.address === addressOf(role))) private def allUp(roleNames: RoleName*): Boolean = roleNames.forall( role => Cluster(system).state.members.exists(m => m.address === addressOf(role) && m.status === Up) ) private def existsAllDownOrGone(groups: Seq[Seq[RoleName]]): Boolean = groups.exists(group => allLeaving(group: _*)) private def downedItself(implicit system: ActorSystem): Boolean = { val selfAddress = Cluster(system).selfAddress Cluster(system).state.members .exists( m => m.address === selfAddress && (m.status === Exiting || m.status === Down || m.status === Removed) ) } private def allLeaving(roleNames: RoleName*): Boolean = roleNames.forall { role => val members = Cluster(system).state.members val unreachable = Cluster(system).state.unreachable val address = addressOf(role) unreachable.isEmpty && // no unreachable members (members.exists(m => m.address === address && (m.status === Down || m.status === Exiting)) || // member is down !members.exists(_.address === address)) // member is not in the cluster } }
Example 6
Source File: AdvicesRepositoryTest.scala From scala-clippy with Apache License 2.0 | 5 votes |
package dal import com.softwaremill.clippy._ import com.softwaremill.id.DefaultIdGenerator import util.BaseSqlSpec import org.scalatest.concurrent.ScalaFutures import org.scalatest.concurrent.IntegrationPatience class AdvicesRepositoryTest extends BaseSqlSpec with ScalaFutures with IntegrationPatience { it should "store & read an advice" in { // given val ar = new AdvicesRepository(database, new DefaultIdGenerator()) // when val stored = ar .store( "zzz", "yyy", TypeMismatchError[RegexT](RegexT("x"), None, RegexT("y"), None, None), "z", AdviceState.Pending, Library("g", "a", "1"), Contributor(None, None, Some("t")), Some("c") ) .futureValue // then val r = ar.findAll().futureValue r should have size (1) val found = r.head stored should be(found) found.errorTextRaw should be("zzz") found.patternRaw should be("yyy") found.compilationError should be(TypeMismatchError(RegexT("x"), None, RegexT("y"), None, None)) found.advice should be("z") found.state should be(AdviceState.Pending) found.library should be(Library("g", "a", "1")) found.contributor should be(Contributor(None, None, Some("t"))) found.comment should be(Some("c")) } }
Example 7
Source File: EmbeddedKafkaSpecSupport.scala From embedded-kafka-schema-registry with MIT License | 5 votes |
package net.manub.embeddedkafka.schemaregistry import java.net.{InetAddress, Socket} import net.manub.embeddedkafka.schemaregistry.EmbeddedKafkaSpecSupport.{ Available, NotAvailable, ServerStatus } import org.scalatest.Assertion import org.scalatest.concurrent.{Eventually, IntegrationPatience} import org.scalatest.matchers.should.Matchers import org.scalatest.time.{Milliseconds, Seconds, Span} import org.scalatest.wordspec.AnyWordSpecLike import scala.util.{Failure, Success, Try} trait EmbeddedKafkaSpecSupport extends AnyWordSpecLike with Matchers with Eventually with IntegrationPatience { implicit val config: PatienceConfig = PatienceConfig(Span(1, Seconds), Span(100, Milliseconds)) def expectedServerStatus(port: Int, expectedStatus: ServerStatus): Assertion = eventually { status(port) shouldBe expectedStatus } private def status(port: Int): ServerStatus = { Try(new Socket(InetAddress.getByName("localhost"), port)) match { case Failure(_) => NotAvailable case Success(_) => Available } } } object EmbeddedKafkaSpecSupport { sealed trait ServerStatus case object Available extends ServerStatus case object NotAvailable extends ServerStatus }
Example 8
Source File: GrpcIntegrationSuiteWithThreeAddress.scala From Waves with MIT License | 5 votes |
package com.wavesplatform.it import com.google.protobuf.ByteString import com.wavesplatform.account.{Address, KeyPair} import com.wavesplatform.common.utils.EitherExt2 import com.wavesplatform.it.api.SyncGrpcApi._ import com.wavesplatform.it.util._ import com.wavesplatform.protobuf.transaction.{PBRecipients, PBTransactions, Recipient} import com.wavesplatform.transaction.transfer.TransferTransaction import com.wavesplatform.utils.ScorexLogging import org.scalatest.concurrent.{IntegrationPatience, ScalaFutures} import org.scalatest.{BeforeAndAfterAll, Matchers, RecoverMethods, Suite} trait GrpcIntegrationSuiteWithThreeAddress extends BeforeAndAfterAll with Matchers with ScalaFutures with IntegrationPatience with RecoverMethods with IntegrationTestsScheme with Nodes with ScorexLogging { this: Suite => def miner: Node = nodes.head def notMiner: Node = nodes.last protected def sender: Node = miner protected lazy val firstAcc: KeyPair = KeyPair("first_acc".getBytes("UTF-8")) protected lazy val secondAcc: KeyPair = KeyPair("second_acc".getBytes("UTF-8")) protected lazy val thirdAcc: KeyPair = KeyPair("third_acc".getBytes("UTF-8")) protected lazy val firstAddress: ByteString = PBRecipients.create(Address.fromPublicKey(firstAcc.publicKey)).getPublicKeyHash protected lazy val secondAddress: ByteString = PBRecipients.create(Address.fromPublicKey(secondAcc.publicKey)).getPublicKeyHash protected lazy val thirdAddress: ByteString = PBRecipients.create(Address.fromPublicKey(thirdAcc.publicKey)).getPublicKeyHash abstract protected override def beforeAll(): Unit = { super.beforeAll() val defaultBalance: Long = 100.waves def dumpBalances(node: Node, accounts: Seq[ByteString], label: String): Unit = { accounts.foreach(acc => { val balance = miner.wavesBalance(acc).available val eff = miner.wavesBalance(acc).effective val formatted = s"$acc: balance = $balance, effective = $eff" log.debug(s"$label account balance:\n$formatted") }) } def waitForTxsToReachAllNodes(txIds: Seq[String]): Unit = { val txNodePairs = for { txId <- txIds node <- nodes } yield (node, txId) txNodePairs.foreach({ case (node, tx) => node.waitForTransaction(tx) }) } def makeTransfers(accounts: Seq[ByteString]): Seq[String] = accounts.map { acc => PBTransactions .vanilla( sender.broadcastTransfer(sender.keyPair, Recipient().withPublicKeyHash(acc), defaultBalance, sender.fee(TransferTransaction.typeId)) ) .explicitGet() .id() .toString } def correctStartBalancesFuture(): Unit = { nodes.foreach(n => n.waitForHeight(2)) val accounts = Seq(firstAddress, secondAddress, thirdAddress) dumpBalances(sender, accounts, "initial") val txs = makeTransfers(accounts) val height = nodes.map(_.height).max withClue(s"waitForHeight(${height + 2})") { nodes.foreach(n => n.waitForHeight(height + 1)) nodes.foreach(n => n.waitForHeight(height + 2)) } withClue("waitForTxsToReachAllNodes") { waitForTxsToReachAllNodes(txs) } dumpBalances(sender, accounts, "after transfer") accounts.foreach(acc => miner.wavesBalance(acc).available shouldBe defaultBalance) accounts.foreach(acc => miner.wavesBalance(acc).effective shouldBe defaultBalance) } withClue("beforeAll") { correctStartBalancesFuture() } } }
Example 9
Source File: LowLevelListenerWebSocketTest.scala From sttp with Apache License 2.0 | 5 votes |
package sttp.client.testing.websocket import java.util.concurrent.ConcurrentLinkedQueue import org.scalatest.concurrent.{Eventually, IntegrationPatience} import org.scalatest.{Assertion, BeforeAndAfterAll} import sttp.client._ import sttp.client.monad.MonadError import sttp.client.testing.{ConvertToFuture, ToFutureWrapper} import sttp.client.monad.syntax._ import scala.collection.JavaConverters._ import org.scalatest.SuiteMixin import org.scalatest.flatspec.AsyncFlatSpecLike import org.scalatest.matchers.should.Matchers import sttp.client.testing.HttpTest.wsEndpoint // TODO: change to `extends AsyncFlatSpec` when https://github.com/scalatest/scalatest/issues/1802 is fixed trait LowLevelListenerWebSocketTest[F[_], WS, WS_HANDLER[_]] extends SuiteMixin with AsyncFlatSpecLike with Matchers with BeforeAndAfterAll with ToFutureWrapper with Eventually with IntegrationPatience { implicit def backend: SttpBackend[F, Nothing, WS_HANDLER] implicit def convertToFuture: ConvertToFuture[F] private implicit lazy val monad: MonadError[F] = backend.responseMonad def testErrorWhenEndpointIsNotWebsocket: Boolean = true def createHandler(onTextFrame: String => Unit): WS_HANDLER[WS] def sendText(ws: WS, t: String): Unit def sendCloseFrame(ws: WS): Unit it should "send and receive ten messages" in { val n = 10 val received = new ConcurrentLinkedQueue[String]() basicRequest .get(uri"$wsEndpoint/ws/echo") .openWebsocket(createHandler(received.add)) .map { response => (1 to n).foreach { i => val msg = s"test$i" info(s"Sending text message: $msg") sendText(response.result, msg) } eventually { received.asScala.toList shouldBe (1 to n).map(i => s"echo: test$i").toList } sendCloseFrame(response.result) succeed } .toFuture() } it should "receive two messages" in { val received = new ConcurrentLinkedQueue[String]() basicRequest .get(uri"$wsEndpoint/ws/send_and_wait") .openWebsocket(createHandler(received.add)) .map { response => eventually { received.asScala.toList shouldBe List("test10", "test20") } sendCloseFrame(response.result) succeed } .toFuture() } if (testErrorWhenEndpointIsNotWebsocket) { it should "error if the endpoint is not a websocket" in { monad .handleError( basicRequest .get(uri"$wsEndpoint/echo") .openWebsocket(createHandler(_ => ())) .map(_ => fail("An exception should be thrown"): Assertion) ) { case e => (e shouldBe a[SttpClientException.ReadException]).unit } .toFuture() } } override protected def afterAll(): Unit = { backend.close().toFuture() super.afterAll() } }
Example 10
Source File: AkkaHttpWebsocketTest.scala From sttp with Apache License 2.0 | 5 votes |
package sttp.client.akkahttp import java.util.concurrent.ConcurrentLinkedQueue import akka.Done import akka.http.scaladsl.model.ws.{Message, TextMessage} import akka.stream.Materializer import akka.stream.scaladsl._ import org.scalatest.BeforeAndAfterAll import org.scalatest.concurrent.{Eventually, IntegrationPatience} import sttp.client._ import scala.collection.JavaConverters._ import scala.concurrent.duration._ import scala.concurrent.{ExecutionContext, Future, Promise} import scala.util.Success import org.scalatest.flatspec.AsyncFlatSpec import org.scalatest.matchers.should.Matchers import sttp.client.testing.HttpTest.wsEndpoint class AkkaHttpWebsocketTest extends AsyncFlatSpec with Matchers with BeforeAndAfterAll with Eventually with IntegrationPatience { implicit val ec: ExecutionContext = scala.concurrent.ExecutionContext.global implicit val backend: SttpBackend[Future, Nothing, Flow[Message, Message, *]] = AkkaHttpBackend() it should "send and receive ten messages" in { val received = new ConcurrentLinkedQueue[String]() val sink: Sink[Message, Future[Done]] = collectionSink(received) val n = 10 val source: Source[Message, Promise[Option[Message]]] = Source((1 to n).map(i => TextMessage(s"test$i"))).concatMat(Source.maybe[Message])(Keep.right) val flow: Flow[Message, Message, (Future[Done], Promise[Option[Message]])] = Flow.fromSinkAndSourceMat(sink, source)(Keep.both) basicRequest.get(uri"$wsEndpoint/ws/echo").openWebsocket(flow).flatMap { r => eventually { received.asScala.toList shouldBe (1 to n).map(i => s"echo: test$i").toList } r.result._2.complete(Success(None)) // the source should now complete r.result._1.map(_ => succeed) // the future should be completed once the stream completes (and the ws closes) } } it should "receive two messages" in { val received = new ConcurrentLinkedQueue[String]() val sink: Sink[Message, Future[Done]] = collectionSink(received) val source: Source[Message, Promise[Option[Message]]] = Source.maybe[Message] val flow: Flow[Message, Message, Promise[Option[Message]]] = Flow.fromSinkAndSourceMat(sink, source)(Keep.right) basicRequest.get(uri"$wsEndpoint/ws/send_and_wait").openWebsocket(flow).flatMap { r => eventually { received.asScala.toList shouldBe List("test10", "test20") } r.result.success(None) // closing succeed } } it should "error if the endpoint is not a websocket" in { basicRequest.get(uri"$wsEndpoint/echo").openWebsocket(Flow.apply[Message]).failed.map { t => t shouldBe a[NotAWebsocketException] } } def collectionSink(queue: ConcurrentLinkedQueue[String]): Sink[Message, Future[Done]] = Sink .setup[Message, Future[Done]] { (_materializer, _) => Flow[Message] // mapping with parallelism 1 so that messages don't get reordered .mapAsync(1) { case m: TextMessage => implicit val materializer: Materializer = _materializer m.toStrict(1.second).map(Some(_)) case _ => Future.successful(None) } .collect { case Some(TextMessage.Strict(text)) => text } .toMat(Sink.foreach(queue.add))(Keep.right) } .mapMaterializedValue(_.flatMap(identity)) override protected def afterAll(): Unit = { backend.close() super.afterAll() } }
Example 11
Source File: PlayScalaTestSpec.scala From play-grpc with Apache License 2.0 | 5 votes |
package play.grpc.scalatest import io.grpc.Status import org.scalatest.concurrent.IntegrationPatience import org.scalatest.concurrent.ScalaFutures import org.scalatestplus.play.PlaySpec import org.scalatestplus.play.guice.GuiceOneServerPerTest import play.api.Application import play.api.inject.bind import play.api.inject.guice.GuiceApplicationBuilder import play.api.libs.ws.WSClient import play.api.routing.Router import akka.grpc.internal.GrpcProtocolNative import example.myapp.helloworld.grpc.helloworld._ class PlayScalaTestSpec extends PlaySpec with GuiceOneServerPerTest with ServerGrpcClient with ScalaFutures with IntegrationPatience { override def fakeApplication(): Application = { GuiceApplicationBuilder() .overrides(bind[Router].to[GreeterServiceImpl]) .build() } implicit def ws: WSClient = app.injector.instanceOf(classOf[WSClient]) "A Play server bound to a gRPC router" must { "give a 404 when routing a non-gRPC request" in { val result = wsUrl("/").get.futureValue result.status must be(404) // Maybe should be a 426, see #396 } "give a 415 error when not using a gRPC content-type" in { val result = wsUrl(s"/${GreeterService.name}/FooBar").get.futureValue result.status must be(415) } "give a grpc 'unimplemented' error when routing a non-existent gRPC method" in { val result = wsUrl(s"/${GreeterService.name}/FooBar") .addHttpHeaders("Content-Type" -> GrpcProtocolNative.contentType.toString) .get .futureValue result.status must be(200) // Maybe should be a 426, see #396 result.header("grpc-status") mustEqual Some(Status.Code.UNIMPLEMENTED.value().toString) } "give a grpc 'invalid argument' error when routing an empty request to a gRPC method" in { val result = wsUrl(s"/${GreeterService.name}/SayHello") .addHttpHeaders("Content-Type" -> GrpcProtocolNative.contentType.toString) .get .futureValue result.status must be(200) result.header("grpc-status") mustEqual Some(Status.Code.INVALID_ARGUMENT.value().toString) } "work with a gRPC client" in withGrpcClient[GreeterServiceClient] { client: GreeterServiceClient => val reply = client.sayHello(HelloRequest("Alice")).futureValue reply.message must be("Hello, Alice!") } } }
Example 12
Source File: RegionSpec.scala From affinity with Apache License 2.0 | 5 votes |
package io.amient.affinity.core.actor import akka.actor.{ActorPath, ActorSystem, PoisonPill, Props} import akka.util.Timeout import com.typesafe.config.ConfigFactory import io.amient.affinity.AffinityActorSystem import io.amient.affinity.core.cluster.Coordinator import org.scalatest.concurrent.{Eventually, IntegrationPatience} import org.scalatest.{Matchers, WordSpecLike} import scala.concurrent.duration._ import scala.language.postfixOps class RegionSpec extends WordSpecLike with Matchers with Eventually with IntegrationPatience { val system: ActorSystem = AffinityActorSystem.create(ConfigFactory.load("regionspec")) val testPartition = Props(new Partition { override def preStart(): Unit = { Thread.sleep(100) super.preStart() } override def handle: Receive = { case _: IllegalStateException => context.stop(self) case _ => } }) "A Region Actor" must { "must keep Coordinator Updated during partition failure & restart scenario" in { // val zk = new EmbeddedZookeperServer {} try { val coordinator = Coordinator.create(system, "region") try { val d = 1 second implicit val timeout = Timeout(d) val region = system.actorOf(Props(new Container("region") { val partitions = List(0, 1, 2, 3) for (partition <- partitions) { context.actorOf(testPartition, name = partition.toString) } }), name = "region") eventually { coordinator.members.size should be(4) } //first stop Partition explicitly - it shouldn't be restarted import system.dispatcher system.actorSelection(ActorPath.fromString(coordinator.members.head._2)).resolveOne.foreach { case actorRef => system.stop(actorRef) } eventually { coordinator.members.size should be(3) } //now simulate error in one of the partitions val partitionToFail = coordinator.members.head._2 system.actorSelection(ActorPath.fromString(partitionToFail)).resolveOne.foreach { case actorRef => actorRef ! new IllegalStateException("Exception expected by the Test") } eventually { coordinator.members.size should be(2) } eventually { coordinator.members should not contain (partitionToFail) } region ! PoisonPill } finally { coordinator.close } } finally { // zk.close() } } } } class RegionSpecPartition extends Partition { override def preStart(): Unit = { Thread.sleep(100) super.preStart() } override def handle: Receive = { case _: IllegalStateException => context.stop(self) case _ => } }
Example 13
Source File: ChatControllerSpec.scala From Scala-Reactive-Programming with MIT License | 5 votes |
package controllers import org.scalatest.concurrent.PatienceConfiguration.Timeout import org.scalatest.concurrent.{IntegrationPatience, ScalaFutures} import org.scalatestplus.play._ import play.api.inject.guice.GuiceApplicationBuilder import play.api.test._ import play.shaded.ahc.org.asynchttpclient.AsyncHttpClient import play.shaded.ahc.org.asynchttpclient.ws.WebSocket import scala.compat.java8.FutureConverters import scala.concurrent.Await import scala.concurrent.duration._ class ChatControllerSpec extends PlaySpec with ScalaFutures with IntegrationPatience { "ChatController" should { "reject a websocket flow if the origin is set incorrectly" in WsTestClient.withClient { client => // Pick a non standard port that will fail the (somewhat contrived) origin check... lazy val port: Int = 31337 val app = new GuiceApplicationBuilder().build() Helpers.running(TestServer(port, app)) { val myPublicAddress = s"localhost:$port" val serverURL = s"ws://$myPublicAddress/chat" val asyncHttpClient: AsyncHttpClient = client.underlying[AsyncHttpClient] val webSocketClient = new WebSocketClient(asyncHttpClient) try { val origin = "ws://example.com/ws/chat" val listener = new WebSocketClient.LoggingListener val completionStage = webSocketClient.call(serverURL, origin, listener) val f = FutureConverters.toScala(completionStage) val result = Await.result(f, atMost = 1000 millis) listener.getThrowable mustBe a[IllegalStateException] } catch { case e: IllegalStateException => e mustBe an [IllegalStateException] case e: java.util.concurrent.ExecutionException => val foo = e.getCause foo mustBe an [IllegalStateException] } } } "accept a websocket flow if the origin is set correctly" in WsTestClient.withClient { client => lazy val port: Int = Helpers.testServerPort val app = new GuiceApplicationBuilder().build() Helpers.running(TestServer(port, app)) { val myPublicAddress = s"localhost:$port" val serverURL = s"ws://$myPublicAddress/chat" val asyncHttpClient: AsyncHttpClient = client.underlying[AsyncHttpClient] val webSocketClient = new WebSocketClient(asyncHttpClient) val origin = serverURL val listener = new WebSocketClient.LoggingListener val completionStage = webSocketClient.call(serverURL, origin, listener) val f = FutureConverters.toScala(completionStage) whenReady(f, timeout = Timeout(1 second)) { webSocket => webSocket mustBe a [WebSocket] } } } } }
Example 14
Source File: EvaluateTestFixture.scala From ncdbg with BSD 3-Clause "New" or "Revised" License | 5 votes |
package com.programmaticallyspeaking.ncd.nashorn import com.programmaticallyspeaking.ncd.host._ import com.programmaticallyspeaking.ncd.messaging.Observer import com.programmaticallyspeaking.ncd.testing.UnitTest import org.scalatest.concurrent.{IntegrationPatience, ScalaFutures} import scala.collection.mutable import scala.concurrent.{ExecutionContext, Promise} import scala.util.{Failure, Success, Try} class EvaluateTestFixture extends UnitTest with NashornScriptHostTestFixture with ScalaFutures with IntegrationPatience { override implicit val executionContext: ExecutionContext = ExecutionContext.global type Tester = (ScriptHost, Seq[StackFrame]) => Unit protected def evaluateInScript(script: String, unknownEventHandler: (ScriptEvent) => Unit = _ => {})(testers: Tester*): Unit = { assert(script.contains("debugger;"), "Script must contain a 'debugger' statement") assert(testers.nonEmpty, "Must have at least one tester") val testerQueue = mutable.Queue[Tester](testers: _*) val donePromise = Promise[Unit]() val observer = Observer.from[ScriptEvent] { case bp: HitBreakpoint => val host = getHost val next = testerQueue.dequeue() Try(next(host, bp.stackFrames)) match { case Success(_) => host.resume() if (testerQueue.isEmpty) donePromise.success(()) case Failure(t) => donePromise.failure(t) } case x => unknownEventHandler(x) } observeAndRunScriptAsync(script, observer)(_ => donePromise.future) } }
Example 15
Source File: ServiceBrokerIntegrationTest.scala From reactive-consul with MIT License | 5 votes |
package stormlantern.consul.client import java.net.URL import org.scalatest._ import org.scalatest.concurrent.{ Eventually, IntegrationPatience, ScalaFutures } import stormlantern.consul.client.dao.akka.AkkaHttpConsulClient import stormlantern.consul.client.dao.{ ConsulHttpClient, ServiceRegistration } import stormlantern.consul.client.discovery.{ ConnectionProvider, ConnectionProviderFactory, ConnectionStrategy, ServiceDefinition } import stormlantern.consul.client.loadbalancers.RoundRobinLoadBalancer import stormlantern.consul.client.util.{ ConsulDockerContainer, Logging, TestActorSystem } import scala.concurrent.Future class ServiceBrokerIntegrationTest extends FlatSpec with Matchers with ScalaFutures with Eventually with IntegrationPatience with ConsulDockerContainer with TestActorSystem with Logging { import scala.concurrent.ExecutionContext.Implicits.global "The ServiceBroker" should "provide a usable connection to consul" in withConsulHost { (host, port) ⇒ withActorSystem { implicit actorSystem ⇒ val akkaHttpClient = new AkkaHttpConsulClient(new URL(s"http://$host:$port")) // Register the HTTP interface akkaHttpClient.putService(ServiceRegistration("consul-http", Some("consul-http-1"), address = Some(host), port = Some(port))) akkaHttpClient.putService(ServiceRegistration("consul-http", Some("consul-http-2"), address = Some(host), port = Some(port))) val connectionProviderFactory = new ConnectionProviderFactory { override def create(host: String, port: Int): ConnectionProvider = new ConnectionProvider { logger.info(s"Asked to create connection provider for $host:$port") val httpClient: ConsulHttpClient = new AkkaHttpConsulClient(new URL(s"http://$host:$port")) override def getConnection: Future[Any] = Future.successful(httpClient) } } val connectionStrategy = ConnectionStrategy(ServiceDefinition("consul-http"), connectionProviderFactory, new RoundRobinLoadBalancer) val sut = ServiceBroker(actorSystem, akkaHttpClient, Set(connectionStrategy)) eventually { sut.withService("consul-http") { connection: ConsulHttpClient ⇒ connection.getService("bogus").map(_.resource should have size 0) } sut } } } }
Example 16
Source File: ITTestDynamoDB.scala From aws-spi-akka-http with Apache License 2.0 | 5 votes |
package com.github.matsluni.akkahttpspi.dynamodb import com.github.matsluni.akkahttpspi.{AkkaHttpAsyncHttpService, TestBase} import org.scalatest.concurrent.{Eventually, Futures, IntegrationPatience} import org.scalatest.wordspec.AnyWordSpec import org.scalatest.matchers.should.Matchers import software.amazon.awssdk.services.dynamodb.DynamoDbAsyncClient import software.amazon.awssdk.services.dynamodb.model._ import org.scalatest.concurrent.ScalaFutures._ import scala.compat.java8.FutureConverters._ class ITTestDynamoDB extends AnyWordSpec with Matchers with Futures with Eventually with IntegrationPatience with TestBase { def withClient(testCode: DynamoDbAsyncClient => Any): Any = { val akkaClient = new AkkaHttpAsyncHttpService().createAsyncHttpClientFactory().build() val client = DynamoDbAsyncClient .builder() .credentialsProvider(credentialProviderChain) .region(defaultRegion) .httpClient(akkaClient) .build() try { testCode(client) } finally { // clean up akkaClient.close() client.close() } } "DynamoDB" should { "create a table" in withClient { implicit client => val tableName = s"Movies-${randomIdentifier(5)}" val attributes = AttributeDefinition.builder.attributeName("film_id").attributeType(ScalarAttributeType.S).build() val keySchema = KeySchemaElement.builder.attributeName("film_id").keyType(KeyType.HASH).build() val result = client.createTable( CreateTableRequest.builder() .tableName(tableName) .attributeDefinitions(attributes) .keySchema(keySchema) .provisionedThroughput(ProvisionedThroughput .builder .readCapacityUnits(1L) .writeCapacityUnits(1L) .build()) .build()).join val desc = result.tableDescription() desc.tableName() should be (tableName) eventually { val response = client.describeTable(DescribeTableRequest.builder().tableName(tableName).build()).toScala response.futureValue.table().tableStatus() should be (TableStatus.ACTIVE) } client.deleteTable(DeleteTableRequest.builder().tableName(tableName).build()).toScala } } }
Example 17
Source File: ResponseMatchers.scala From http-verbs with Apache License 2.0 | 5 votes |
package uk.gov.hmrc.play.http.test import org.scalatest.concurrent.{IntegrationPatience, ScalaFutures} import org.scalatest.matchers.{HavePropertyMatchResult, HavePropertyMatcher} import play.api.libs.json._ import play.api.libs.ws.WSResponse import scala.concurrent.Future trait ResponseMatchers extends ScalaFutures with IntegrationPatience { def jsonProperty(path: JsPath) = new HavePropertyMatcher[Future[WSResponse], JsValue] { def apply(response: Future[WSResponse]) = HavePropertyMatchResult( matches = response.futureValue.json.validate(path.readNullable[JsValue]).get.isDefined, propertyName = "Response JSON at path " + path, expectedValue = JsString("defined"), actualValue = response.futureValue.json.validate(path.readNullable[JsValue]).get.getOrElse(JsNull) ) } } object ResponseMatchers extends ResponseMatchers
Example 18
Source File: CompressionSpec.scala From chronicler with Apache License 2.0 | 5 votes |
package com.github.fsanaulla.chronicler.akka import java.nio.file.Paths import akka.actor.ActorSystem import akka.testkit.TestKit import com.github.fsanaulla.chronicler.akka.io.{AkkaDatabaseApi, InfluxIO} import com.github.fsanaulla.chronicler.akka.management.InfluxMng import com.github.fsanaulla.chronicler.akka.shared.InfluxConfig import com.github.fsanaulla.chronicler.testing.it.DockerizedInfluxDB import org.scalatest.concurrent.{IntegrationPatience, ScalaFutures} import org.scalatest.{FlatSpecLike, Matchers} import scala.concurrent.ExecutionContextExecutor class CompressionSpec extends TestKit(ActorSystem()) with FlatSpecLike with Matchers with DockerizedInfluxDB with ScalaFutures with IntegrationPatience { override def afterAll(): Unit = { mng.close() io.close() TestKit.shutdownActorSystem(system) super.afterAll() } implicit val ec: ExecutionContextExecutor = system.dispatcher val testDB = "db" lazy val influxConf = InfluxConfig(host, port, credentials = Some(creds), compress = true) lazy val mng = InfluxMng(host, port, credentials = Some(creds)) lazy val io = InfluxIO(influxConf) lazy val db: AkkaDatabaseApi = io.database(testDB) it should "write data from file" in { mng.createDatabase(testDB).futureValue.right.get shouldEqual 200 db.writeFromFile(Paths.get(getClass.getResource("/large_batch.txt").getPath)) .futureValue .right .get shouldEqual 204 db.readJson("SELECT * FROM test1").futureValue.right.get.length shouldEqual 10000 } }
Example 19
Source File: CompressionSpec.scala From chronicler with Apache License 2.0 | 5 votes |
package com.github.fsanaulla.chronicler.urlhttp import java.nio.file.Paths import com.github.fsanaulla.chronicler.testing.it.DockerizedInfluxDB import com.github.fsanaulla.chronicler.urlhttp.io.{InfluxIO, UrlIOClient} import com.github.fsanaulla.chronicler.urlhttp.management.{InfluxMng, UrlManagementClient} import org.scalatest.concurrent.{Eventually, IntegrationPatience} import org.scalatest.{FlatSpec, Matchers} class CompressionSpec extends FlatSpec with Matchers with DockerizedInfluxDB with Eventually with IntegrationPatience { override def afterAll(): Unit = { mng.close() io.close() super.afterAll() } val testDB = "db" lazy val mng: UrlManagementClient = InfluxMng(s"http://$host", port, Some(creds)) lazy val io: UrlIOClient = InfluxIO(s"http://$host", port, Some(creds), compress = true) lazy val db: io.Database = io.database(testDB) it should "ping database" in { eventually { io.ping.get.right.get.version shouldEqual version } } it should "write data from file" in { mng.createDatabase(testDB).get.right.get shouldEqual 200 db.writeFromFile(Paths.get(getClass.getResource("/large_batch.txt").getPath)) .get .right .get shouldEqual 204 db.readJson("SELECT * FROM test1").get.right.get.length shouldEqual 10000 } }
Example 20
Source File: CompressionSpec.scala From chronicler with Apache License 2.0 | 5 votes |
package com.github.fsanaulla.chronicler.ahc.io.it import java.nio.file.Paths import com.github.fsanaulla.chronicler.ahc.io.InfluxIO import com.github.fsanaulla.chronicler.ahc.management.InfluxMng import com.github.fsanaulla.chronicler.ahc.shared.Uri import com.github.fsanaulla.chronicler.core.alias.Id import com.github.fsanaulla.chronicler.core.api.DatabaseApi import com.github.fsanaulla.chronicler.testing.it.DockerizedInfluxDB import org.asynchttpclient.Response import org.scalatest.concurrent.{Eventually, IntegrationPatience, ScalaFutures} import org.scalatest.{FlatSpec, Matchers} import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.Future class CompressionSpec extends FlatSpec with Matchers with DockerizedInfluxDB with ScalaFutures with Eventually with IntegrationPatience { override def afterAll(): Unit = { mng.close() io.close() super.afterAll() } val testDB = "db" lazy val mng = InfluxMng(host, port, Some(creds), None) lazy val io = InfluxIO(host, port, Some(creds), compress = true) lazy val db: DatabaseApi[Future, Id, Response, Uri, String] = io.database(testDB) it should "ping database" in { eventually { io.ping.futureValue.right.get.version shouldEqual version } } it should "write data from file" in { mng.createDatabase(testDB).futureValue.right.get shouldEqual 200 db.writeFromFile(Paths.get(getClass.getResource("/large_batch.txt").getPath)) .futureValue .right .get shouldEqual 204 db.readJson("SELECT * FROM test1").futureValue.right.get.length shouldEqual 10000 } }
Example 21
Source File: AppSpec.scala From slim-play with MIT License | 5 votes |
import org.scalatest.{FunSpec, Matchers} import org.scalatest.concurrent.{IntegrationPatience, ScalaFutures} class AppSpec extends FunSpec with OneServerPerSuiteWithMyComponents with Matchers with ScalaFutures with IntegrationPatience { private lazy val ws = components.wsClient describe("/hello/$name") { it("""should respond with "Hello $name"""") { whenReady(ws.url(s"http://localhost:$port/hello/joe").get()) { r => r.body shouldBe "Hello joe" } } } }
Example 22
Source File: EmbeddedKafkaSpecSupport.scala From embedded-kafka with MIT License | 5 votes |
package net.manub.embeddedkafka import java.net.{InetAddress, Socket} import net.manub.embeddedkafka.EmbeddedKafkaSpecSupport.{ Available, NotAvailable, ServerStatus } import org.scalatest.Assertion import org.scalatest.concurrent.{Eventually, IntegrationPatience} import org.scalatest.matchers.should.Matchers import org.scalatest.time.{Milliseconds, Seconds, Span} import org.scalatest.wordspec.AnyWordSpecLike import scala.util.{Failure, Success, Try} trait EmbeddedKafkaSpecSupport extends AnyWordSpecLike with Matchers with Eventually with IntegrationPatience { implicit val config: PatienceConfig = PatienceConfig(Span(1, Seconds), Span(100, Milliseconds)) def expectedServerStatus(port: Int, expectedStatus: ServerStatus): Assertion = eventually { status(port) shouldBe expectedStatus } private def status(port: Int): ServerStatus = { Try(new Socket(InetAddress.getByName("localhost"), port)) match { case Failure(_) => NotAvailable case Success(_) => Available } } } object EmbeddedKafkaSpecSupport { sealed trait ServerStatus case object Available extends ServerStatus case object NotAvailable extends ServerStatus }
Example 23
Source File: FilterTest.scala From naptime with Apache License 2.0 | 5 votes |
package org.coursera.naptime.ari.graphql.controllers.filters import org.coursera.naptime.ari.graphql.GraphqlSchemaProvider import org.coursera.naptime.ari.graphql.Models import org.coursera.naptime.ari.graphql.SangriaGraphQlContext import org.coursera.naptime.ari.graphql.SangriaGraphQlSchemaBuilder import org.coursera.naptime.ari.graphql.models.MergedCourse import org.coursera.naptime.ari.graphql.models.MergedInstructor import org.coursera.naptime.ari.graphql.models.MergedPartner import org.mockito.Mockito.when import org.scalatest.concurrent.IntegrationPatience import org.scalatest.concurrent.ScalaFutures import org.scalatest.junit.AssertionsForJUnit import org.scalatest.mockito.MockitoSugar import play.api.libs.json.Json import play.api.test.FakeRequest import sangria.parser.QueryParser import sangria.schema.Schema import scala.concurrent.Future trait FilterTest extends AssertionsForJUnit with MockitoSugar with ScalaFutures with IntegrationPatience { val baseOutgoingQuery = OutgoingQuery(Json.obj(), None) def noopFilter(incomingQuery: IncomingQuery) = { Future.successful(baseOutgoingQuery) } def exceptionThrowingFilter(incomingQuery: IncomingQuery): Future[OutgoingQuery] = { assert(false, "This filter should not be run") Future.successful(baseOutgoingQuery) } val filter: Filter val defaultQuery = """ |query { | __schema { | queryType { | name | } | } |} """.stripMargin val graphqlSchemaProvider = mock[GraphqlSchemaProvider] val allResources = Set(Models.courseResource, Models.instructorResource, Models.partnersResource) val schemaTypes = Map( "org.coursera.naptime.ari.graphql.models.MergedCourse" -> MergedCourse.SCHEMA, "org.coursera.naptime.ari.graphql.models.MergedPartner" -> MergedPartner.SCHEMA, "org.coursera.naptime.ari.graphql.models.MergedInstructor" -> MergedInstructor.SCHEMA) val builder = new SangriaGraphQlSchemaBuilder(allResources, schemaTypes) val schema = builder.generateSchema().data.asInstanceOf[Schema[SangriaGraphQlContext, Any]] when(graphqlSchemaProvider.schema).thenReturn(schema) def generateIncomingQuery(query: String = defaultQuery) = { val document = QueryParser.parse(query).get val header = FakeRequest("POST", s"/graphql").withBody(query) val variables = Json.obj() val operation = None IncomingQuery(document, header, variables, operation, debugMode = false) } def run(incomingQuery: IncomingQuery): Future[OutgoingQuery] = { filter.apply(noopFilter)(incomingQuery) } def ensureNotPropagated(incomingQuery: IncomingQuery): Future[OutgoingQuery] = { filter.apply(exceptionThrowingFilter)(incomingQuery) } }
Example 24
Source File: KafkaSpecBase.scala From openwhisk with Apache License 2.0 | 5 votes |
package org.apache.openwhisk.core.monitoring.metrics import akka.kafka.testkit.scaladsl.{EmbeddedKafkaLike, ScalatestKafkaSpec} import akka.stream.ActorMaterializer import net.manub.embeddedkafka.EmbeddedKafka import org.scalatest._ import org.scalatest.concurrent.{Eventually, IntegrationPatience, ScalaFutures} import scala.concurrent.duration.{DurationInt, FiniteDuration} abstract class KafkaSpecBase extends ScalatestKafkaSpec(6065) with Matchers with ScalaFutures with FlatSpecLike with EmbeddedKafka with EmbeddedKafkaLike with IntegrationPatience with Eventually with EventsTestHelper { this: Suite => implicit val timeoutConfig: PatienceConfig = PatienceConfig(1.minute) implicit val materializer: ActorMaterializer = ActorMaterializer() override val sleepAfterProduce: FiniteDuration = 10.seconds override protected val topicCreationTimeout = 60.seconds }
Example 25
Source File: NamespaceBlacklistTests.scala From openwhisk with Apache License 2.0 | 5 votes |
package org.apache.openwhisk.core.invoker.test import akka.stream.ActorMaterializer import common.{StreamLogging, WskActorSystem} import org.junit.runner.RunWith import org.scalatest.concurrent.{IntegrationPatience, ScalaFutures} import org.scalatest.junit.JUnitRunner import org.scalatest.{FlatSpec, Matchers} import spray.json._ import org.apache.openwhisk.common.TransactionId import org.apache.openwhisk.core.database.test.DbUtils import org.apache.openwhisk.core.entity._ import org.apache.openwhisk.core.invoker.NamespaceBlacklist import org.apache.openwhisk.utils.{retry => testRetry} import scala.concurrent.duration._ @RunWith(classOf[JUnitRunner]) class NamespaceBlacklistTests extends FlatSpec with Matchers with DbUtils with ScalaFutures with IntegrationPatience with WskActorSystem with StreamLogging { behavior of "NamespaceBlacklist" implicit val materializer = ActorMaterializer() implicit val tid = TransactionId.testing val authStore = WhiskAuthStore.datastore() val limitsAndAuths = Seq( new LimitEntity(EntityName("testnamespace1"), UserLimits(invocationsPerMinute = Some(0))), new LimitEntity(EntityName("testnamespace2"), UserLimits(concurrentInvocations = Some(0))), new LimitEntity( EntityName("testnamespace3"), UserLimits(invocationsPerMinute = Some(1), concurrentInvocations = Some(1)))) val uuid4 = UUID() val uuid5 = UUID() val ak4 = BasicAuthenticationAuthKey(uuid4, Secret()) val ak5 = BasicAuthenticationAuthKey(uuid5, Secret()) val ns4 = Namespace(EntityName("different1"), uuid4) val ns5 = Namespace(EntityName("different2"), uuid5) val blockedSubject = new ExtendedAuth(Subject(), Set(WhiskNamespace(ns4, ak4), WhiskNamespace(ns5, ak5)), true) val blockedNamespacesCount = 2 + blockedSubject.namespaces.size private def authToIdentities(auth: WhiskAuth): Set[Identity] = { auth.namespaces.map { ns => Identity(auth.subject, ns.namespace, ns.authkey) } } private def limitToIdentity(limit: LimitEntity): Identity = { val namespace = limit.docid.id.dropRight("/limits".length) Identity(limit.subject, Namespace(EntityName(namespace), UUID()), BasicAuthenticationAuthKey(UUID(), Secret())) } override def beforeAll() = { limitsAndAuths foreach (put(authStore, _)) put(authStore, blockedSubject) waitOnView(authStore, blockedNamespacesCount, NamespaceBlacklist.view) } override def afterAll() = { cleanup() super.afterAll() } it should "mark a namespace as blocked if limit is 0 in database or if one of its subjects is blocked" in { val blacklist = new NamespaceBlacklist(authStore) testRetry({ blacklist.refreshBlacklist().futureValue should have size blockedNamespacesCount }, 60, Some(1.second)) limitsAndAuths.map(limitToIdentity).map(blacklist.isBlacklisted) shouldBe Seq(true, true, false) authToIdentities(blockedSubject).toSeq.map(blacklist.isBlacklisted) shouldBe Seq(true, true) } class LimitEntity(name: EntityName, limits: UserLimits) extends WhiskAuth(Subject(), namespaces = Set.empty) { override def docid = DocId(s"${name.name}/limits") override def toJson = UserLimits.serdes.write(limits).asJsObject } class ExtendedAuth(subject: Subject, namespaces: Set[WhiskNamespace], blocked: Boolean) extends WhiskAuth(subject, namespaces) { override def toJson = JsObject(super.toJson.fields + ("blocked" -> JsBoolean(blocked))) } }
Example 26
Source File: ActivationStoreBehaviorBase.scala From openwhisk with Apache License 2.0 | 5 votes |
package org.apache.openwhisk.core.database.test.behavior import java.time.Instant import akka.stream.ActorMaterializer import common.{StreamLogging, WskActorSystem} import org.apache.openwhisk.common.TransactionId import org.apache.openwhisk.core.database.{ActivationStore, CacheChangeNotification, UserContext} import org.apache.openwhisk.core.database.test.behavior.ArtifactStoreTestUtil.storeAvailable import org.apache.openwhisk.core.entity._ import org.scalatest.concurrent.{IntegrationPatience, ScalaFutures} import org.scalatest.{BeforeAndAfterEach, FlatSpec, Matchers, Outcome} import scala.collection.mutable.ListBuffer import scala.concurrent.Await import scala.concurrent.duration.Duration import scala.concurrent.duration.DurationInt import scala.language.postfixOps import scala.util.{Random, Try} trait ActivationStoreBehaviorBase extends FlatSpec with ScalaFutures with Matchers with StreamLogging with WskActorSystem with IntegrationPatience with BeforeAndAfterEach { protected implicit val materializer: ActorMaterializer = ActorMaterializer() protected implicit val notifier: Option[CacheChangeNotification] = None def context: UserContext def activationStore: ActivationStore private val docsToDelete = ListBuffer[(UserContext, ActivationId)]() def storeType: String protected def transId() = TransactionId(Random.alphanumeric.take(32).mkString) override def afterEach(): Unit = { cleanup() stream.reset() } override protected def withFixture(test: NoArgTest): Outcome = { assume(storeAvailable(storeAvailableCheck), s"$storeType not configured or available") val outcome = super.withFixture(test) if (outcome.isFailed) { println(logLines.mkString("\n")) } outcome } protected def storeAvailableCheck: Try[Any] = Try(true) //~----------------------------------------< utility methods > protected def store(activation: WhiskActivation, context: UserContext)( implicit transid: TransactionId, notifier: Option[CacheChangeNotification]): DocInfo = { val doc = activationStore.store(activation, context).futureValue docsToDelete.append((context, ActivationId(activation.docid.asString))) doc } protected def newActivation(ns: String, actionName: String, start: Long): WhiskActivation = { WhiskActivation( EntityPath(ns), EntityName(actionName), Subject(), ActivationId.generate(), Instant.ofEpochMilli(start), Instant.ofEpochMilli(start + 1000)) } def cleanup()(implicit timeout: Duration = 10 seconds): Unit = { implicit val tid: TransactionId = transId() docsToDelete.map { e => Try { Await.result(activationStore.delete(e._2, e._1), timeout) } } docsToDelete.clear() } }
Example 27
Source File: DatabaseScriptTestUtils.scala From openwhisk with Apache License 2.0 | 5 votes |
package org.apache.openwhisk.core.database.test import scala.concurrent.duration.DurationInt import scala.io.Source import org.scalatest.Matchers import org.scalatest.concurrent.IntegrationPatience import org.scalatest.concurrent.ScalaFutures import akka.actor.ActorSystem import common.WaitFor import common.WhiskProperties import pureconfig._ import pureconfig.generic.auto._ import spray.json._ import spray.json.DefaultJsonProtocol._ import org.apache.openwhisk.common.Logging import org.apache.openwhisk.core.ConfigKeys import org.apache.openwhisk.core.WhiskConfig import org.apache.openwhisk.core.database.CouchDbRestClient import org.apache.openwhisk.core.database.CouchDbConfig trait DatabaseScriptTestUtils extends ScalaFutures with Matchers with WaitFor with IntegrationPatience { case class DatabaseUrl(dbProtocol: String, dbUsername: String, dbPassword: String, dbHost: String, dbPort: String) { def url = s"$dbProtocol://$dbUsername:$dbPassword@$dbHost:$dbPort" def safeUrl = s"$dbProtocol://$dbHost:$dbPort" } val python = WhiskProperties.python val config = loadConfigOrThrow[CouchDbConfig](ConfigKeys.couchdb) val dbProtocol = config.protocol val dbHost = config.host val dbPort = config.port val dbUsername = config.username val dbPassword = config.password val dbPrefix = WhiskProperties.getProperty(WhiskConfig.dbPrefix) val dbUrl = DatabaseUrl(dbProtocol, dbUsername, dbPassword, dbHost, dbPort.toString) def retry[T](task: => T) = org.apache.openwhisk.utils.retry(task, 10, Some(500.milliseconds)) def waitForView(db: CouchDbRestClient, designDoc: String, viewName: String, numDocuments: Int) = { waitfor(() => { val view = db.executeView(designDoc, viewName)().futureValue view shouldBe 'right view.right.get.fields("rows").convertTo[List[JsObject]].length == numDocuments }, totalWait = 2.minutes) } }
Example 28
Source File: RuncClientTests.scala From openwhisk with Apache License 2.0 | 5 votes |
package org.apache.openwhisk.core.containerpool.docker.test import akka.actor.ActorSystem import scala.concurrent.Future import org.junit.runner.RunWith import org.scalatest.FlatSpec import org.scalatest.junit.JUnitRunner import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.ExecutionContext import scala.concurrent.duration._ import org.scalatest.Matchers import org.apache.openwhisk.core.containerpool.docker.RuncClient import common.{StreamLogging, WskActorSystem} import org.apache.openwhisk.core.containerpool.ContainerId import org.apache.openwhisk.common.TransactionId import org.scalatest.BeforeAndAfterEach import org.scalatest.concurrent.{IntegrationPatience, ScalaFutures} import org.apache.openwhisk.common.LogMarker import org.apache.openwhisk.common.LoggingMarkers.INVOKER_RUNC_CMD @RunWith(classOf[JUnitRunner]) class RuncClientTests extends FlatSpec with Matchers with StreamLogging with BeforeAndAfterEach with WskActorSystem with ScalaFutures with IntegrationPatience { override def beforeEach = stream.reset() implicit val transid = TransactionId.testing val id = ContainerId("Id") val runcCommand = "docker-runc" def verifyLogs(cmd: String, failed: Boolean = false) = { logLines.head should include(s"${runcCommand} ${cmd} ${id.asString}") // start log maker must be found val start = LogMarker.parse(logLines.head) start.token.toStringWithSubAction should be(INVOKER_RUNC_CMD(cmd).toStringWithSubAction) // end log marker must be found val expectedEnd = if (failed) INVOKER_RUNC_CMD(cmd).asError else INVOKER_RUNC_CMD(cmd).asFinish val end = LogMarker.parse(logLines.last) end.token.toStringWithSubAction shouldBe expectedEnd.toStringWithSubAction } behavior of "RuncClient" Seq("pause", "resume").foreach { cmd => it should s"$cmd a container successfully and create log entries" in { val rc = runcClient { Future.successful("") } runcProxy(rc, cmd).futureValue verifyLogs(cmd) } it should s"write error markers when $cmd fails" in { val rc = runcClient { Future.failed(new RuntimeException()) } a[RuntimeException] should be thrownBy runcProxy(rc, cmd).futureValue verifyLogs(cmd, failed = true) } } }