org.scalatest.Suite Scala Examples

The following examples show how to use org.scalatest.Suite. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: WireMockHelper.scala    From pertax-frontend   with Apache License 2.0 6 votes vote down vote up
package util

import com.github.tomakehurst.wiremock.WireMockServer
import com.github.tomakehurst.wiremock.core.WireMockConfiguration.wireMockConfig
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, Suite}

trait WireMockHelper extends BeforeAndAfterAll with BeforeAndAfterEach {
  this: Suite =>

  protected val server: WireMockServer = new WireMockServer(wireMockConfig().dynamicPort())

  override def beforeAll(): Unit = {
    server.start()
    super.beforeAll()
  }

  override def beforeEach(): Unit = {
    server.resetAll()
    super.beforeEach()
  }

  override def afterAll(): Unit = {
    super.afterAll()
    server.stop()
  }
} 
Example 2
Source File: KafkaSpec.scala    From kmq   with Apache License 2.0 6 votes vote down vote up
package com.softwaremill.kmq.redelivery.infrastructure

import net.manub.embeddedkafka.{EmbeddedKafka, EmbeddedKafkaConfig}
import org.apache.kafka.common.serialization.StringDeserializer
import org.scalatest.{BeforeAndAfterEach, Suite}

trait KafkaSpec extends BeforeAndAfterEach { self: Suite =>

  val testKafkaConfig = EmbeddedKafkaConfig(9092, 2182)
  private implicit val stringDeserializer = new StringDeserializer()

  def sendToKafka(topic: String, message: String): Unit = {
    EmbeddedKafka.publishStringMessageToKafka(topic, message)(testKafkaConfig)
  }

  def consumeFromKafka(topic: String): String = {
    EmbeddedKafka.consumeFirstStringMessageFrom(topic)(testKafkaConfig)
  }

  override def beforeEach(): Unit = {
    super.beforeEach()
    EmbeddedKafka.start()(testKafkaConfig)
  }

  override def afterEach(): Unit = {
    super.afterEach()
    EmbeddedKafka.stop()
  }
} 
Example 3
Source File: SandboxNextFixture.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.sandboxnext

import com.daml.ledger.participant.state.v1.SeedService
import com.daml.ledger.api.testing.utils.{OwnedResource, Resource, SuiteResource}
import com.daml.platform.sandbox.AbstractSandboxFixture
import com.daml.platform.sandbox.config.SandboxConfig
import com.daml.platform.sandbox.services.GrpcClientResource
import com.daml.ports.Port
import com.daml.resources.ResourceOwner
import io.grpc.Channel
import org.scalatest.Suite

import scala.concurrent.ExecutionContext
import scala.concurrent.duration.DurationInt

trait SandboxNextFixture extends AbstractSandboxFixture with SuiteResource[(Port, Channel)] {
  self: Suite =>

  override protected def config: SandboxConfig =
    super.config.copy(
      seeding = Some(SeedService.Seeding.Weak),
    )

  override protected def serverPort: Port = suiteResource.value._1

  override protected def channel: Channel = suiteResource.value._2

  override protected lazy val suiteResource: Resource[(Port, Channel)] = {
    implicit val ec: ExecutionContext = system.dispatcher
    new OwnedResource[(Port, Channel)](
      for {
        jdbcUrl <- database
          .fold[ResourceOwner[Option[String]]](ResourceOwner.successful(None))(_.map(info =>
            Some(info.jdbcUrl)))
        port <- new Runner(config.copy(jdbcUrl = jdbcUrl))
        channel <- GrpcClientResource.owner(port)
      } yield (port, channel),
      acquisitionTimeout = 1.minute,
      releaseTimeout = 1.minute,
    )
  }
} 
Example 4
Source File: AbstractSandboxFixture.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.sandbox

import java.io.File
import java.net.InetAddress

import akka.stream.Materializer
import com.daml.api.util.TimeProvider
import com.daml.bazeltools.BazelRunfiles._
import com.daml.grpc.adapter.ExecutionSequencerFactory
import com.daml.ledger.api.auth.client.LedgerCallCredentials
import com.daml.ledger.api.domain
import com.daml.ledger.api.domain.LedgerId
import com.daml.ledger.api.testing.utils.AkkaBeforeAndAfterAll
import com.daml.ledger.api.v1.ledger_identity_service.{
  GetLedgerIdentityRequest,
  LedgerIdentityServiceGrpc
}
import com.daml.ledger.api.v1.testing.time_service.TimeServiceGrpc
import com.daml.ledger.client.services.testing.time.StaticTime
import com.daml.ledger.participant.state.v1.SeedService.Seeding
import com.daml.platform.common.LedgerIdMode
import com.daml.platform.sandbox.config.SandboxConfig
import com.daml.platform.sandbox.services.DbInfo
import com.daml.platform.services.time.TimeProviderType
import com.daml.ports.Port
import com.daml.resources.ResourceOwner
import io.grpc.Channel
import org.scalatest.Suite
import scalaz.syntax.tag._

import scala.concurrent.Await
import scala.concurrent.duration._
import scala.util.Try

trait AbstractSandboxFixture extends AkkaBeforeAndAfterAll {
  self: Suite =>

  protected def darFile = new File(rlocation("ledger/test-common/model-tests.dar"))

  protected def ledgerId(token: Option[String] = None): domain.LedgerId =
    domain.LedgerId(
      LedgerIdentityServiceGrpc
        .blockingStub(channel)
        .withCallCredentials(token.map(new LedgerCallCredentials(_)).orNull)
        .getLedgerIdentity(GetLedgerIdentityRequest())
        .ledgerId)

  protected def getTimeProviderForClient(
      implicit mat: Materializer,
      esf: ExecutionSequencerFactory
  ): TimeProvider = {
    Try(TimeServiceGrpc.stub(channel))
      .map(StaticTime.updatedVia(_, ledgerId().unwrap)(mat, esf))
      .fold[TimeProvider](_ => TimeProvider.UTC, Await.result(_, 30.seconds))
  }

  protected def config: SandboxConfig =
    SandboxConfig.default.copy(
      port = Port.Dynamic,
      damlPackages = packageFiles,
      timeProviderType = Some(TimeProviderType.Static),
      scenario = scenario,
      ledgerIdMode = LedgerIdMode.Static(LedgerId("sandbox-server")),
      seeding = Some(Seeding.Weak),
    )

  protected def packageFiles: List[File] = List(darFile)

  protected def scenario: Option[String] = None

  protected def database: Option[ResourceOwner[DbInfo]] = None

  protected def serverHost: String = InetAddress.getLoopbackAddress.getHostName

  protected def serverPort: Port

  protected def channel: Channel
} 
Example 5
Source File: SandboxFixtureWithAuth.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.sandbox.services

import java.time.{Duration, Instant}
import java.util.UUID

import com.daml.jwt.domain.DecodedJwt
import com.daml.jwt.{HMAC256Verifier, JwtSigner}
import com.daml.ledger.api.auth.{AuthServiceJWT, AuthServiceJWTCodec, AuthServiceJWTPayload}
import com.daml.platform.sandbox.config.SandboxConfig
import org.scalatest.Suite
import scalaz.syntax.tag.ToTagOps

trait SandboxFixtureWithAuth extends SandboxFixture { self: Suite =>

  val emptyToken = AuthServiceJWTPayload(
    ledgerId = None,
    participantId = None,
    applicationId = None,
    exp = None,
    admin = false,
    actAs = Nil,
    readAs = Nil
  )

  val adminToken: AuthServiceJWTPayload = emptyToken.copy(admin = true)

  def readOnlyToken(party: String): AuthServiceJWTPayload =
    emptyToken.copy(readAs = List(party))

  def readWriteToken(party: String): AuthServiceJWTPayload =
    emptyToken.copy(actAs = List(party))

  def expiringIn(t: Duration, p: AuthServiceJWTPayload): AuthServiceJWTPayload =
    p.copy(exp = Option(Instant.now().plusNanos(t.toNanos)))

  def forLedgerId(id: String, p: AuthServiceJWTPayload): AuthServiceJWTPayload =
    p.copy(ledgerId = Some(id))

  def forParticipantId(id: String, p: AuthServiceJWTPayload): AuthServiceJWTPayload =
    p.copy(participantId = Some(id))

  def forApplicationId(id: String, p: AuthServiceJWTPayload): AuthServiceJWTPayload =
    p.copy(applicationId = Some(id))

  override protected def config: SandboxConfig =
    super.config.copy(
      authService = Some(
        AuthServiceJWT(HMAC256Verifier(jwtSecret)
          .getOrElse(sys.error("Failed to create HMAC256 verifier")))))

  protected lazy val wrappedLedgerId = ledgerId(Some(toHeader(adminToken)))
  protected lazy val unwrappedLedgerId = wrappedLedgerId.unwrap

  private val jwtHeader = """{"alg": "HS256", "typ": "JWT"}"""
  private val jwtSecret = UUID.randomUUID.toString

  private def signed(payload: AuthServiceJWTPayload, secret: String): String =
    JwtSigner.HMAC256
      .sign(DecodedJwt(jwtHeader, AuthServiceJWTCodec.compactPrint(payload)), secret)
      .getOrElse(sys.error("Failed to generate token"))
      .value

  def toHeader(payload: AuthServiceJWTPayload, secret: String = jwtSecret): String =
    signed(payload, secret)
} 
Example 6
Source File: SandboxFixture.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.sandbox.services

import com.daml.ledger.api.testing.utils.{OwnedResource, Resource, SuiteResource}
import com.daml.platform.sandbox.{AbstractSandboxFixture, SandboxServer}
import com.daml.ports.Port
import com.daml.resources.ResourceOwner
import io.grpc.Channel
import org.scalatest.Suite

import scala.concurrent.ExecutionContext
import scala.concurrent.duration.DurationInt

trait SandboxFixture extends AbstractSandboxFixture with SuiteResource[(SandboxServer, Channel)] {
  self: Suite =>

  protected def server: SandboxServer = suiteResource.value._1

  override protected def serverPort: Port = server.port

  override protected def channel: Channel = suiteResource.value._2

  override protected lazy val suiteResource: Resource[(SandboxServer, Channel)] = {
    implicit val ec: ExecutionContext = system.dispatcher
    new OwnedResource[(SandboxServer, Channel)](
      for {
        jdbcUrl <- database
          .fold[ResourceOwner[Option[String]]](ResourceOwner.successful(None))(_.map(info =>
            Some(info.jdbcUrl)))
        server <- SandboxServer.owner(config.copy(jdbcUrl = jdbcUrl))
        channel <- GrpcClientResource.owner(server.port)
      } yield (server, channel),
      acquisitionTimeout = 1.minute,
      releaseTimeout = 1.minute,
    )
  }
} 
Example 7
Source File: JdbcLedgerDaoBackend.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.platform.store.dao

import com.codahale.metrics.MetricRegistry
import com.daml.ledger.api.domain.LedgerId
import com.daml.ledger.api.testing.utils.AkkaBeforeAndAfterAll
import com.daml.logging.LoggingContext
import com.daml.logging.LoggingContext.newLoggingContext
import com.daml.metrics.Metrics
import com.daml.platform.configuration.ServerRole
import com.daml.platform.store.dao.events.LfValueTranslation
import com.daml.platform.store.{DbType, FlywayMigrations}
import com.daml.resources.{Resource, ResourceOwner}
import org.scalatest.Suite

import scala.concurrent.duration.DurationInt
import scala.concurrent.{Await, ExecutionContext}

private[dao] trait JdbcLedgerDaoBackend extends AkkaBeforeAndAfterAll { this: Suite =>

  protected def dbType: DbType
  protected def jdbcUrl: String

  protected def daoOwner(implicit logCtx: LoggingContext): ResourceOwner[LedgerDao] =
    JdbcLedgerDao
      .writeOwner(
        serverRole = ServerRole.Testing(getClass),
        jdbcUrl = jdbcUrl,
        eventsPageSize = 100,
        metrics = new Metrics(new MetricRegistry),
        lfValueTranslationCache = LfValueTranslation.Cache.none,
      )

  protected final var ledgerDao: LedgerDao = _

  // `dbDispatcher` and `ledgerDao` depend on the `postgresFixture` which is in turn initialized `beforeAll`
  private var resource: Resource[LedgerDao] = _

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    implicit val executionContext: ExecutionContext = system.dispatcher
    resource = newLoggingContext { implicit logCtx =>
      for {
        _ <- Resource.fromFuture(new FlywayMigrations(jdbcUrl).migrate())
        dao <- daoOwner.acquire()
        _ <- Resource.fromFuture(dao.initializeLedger(LedgerId("test-ledger")))
      } yield dao
    }
    ledgerDao = Await.result(resource.asFuture, 10.seconds)
  }

  override protected def afterAll(): Unit = {
    Await.result(resource.release(), 10.seconds)
    super.afterAll()
  }

} 
Example 8
Source File: TemplateSubscriptionSpec.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.extractor

import java.io.File

import com.daml.bazeltools.BazelRunfiles._
import com.daml.lf.data.Ref.Party
import com.daml.extractor.config.{ExtractorConfig, TemplateConfig}
import com.daml.extractor.services.ExtractorFixtureAroundAll
import com.daml.ledger.api.testing.utils.SuiteResourceManagementAroundAll
import com.daml.testing.postgresql.PostgresAroundAll
import org.scalatest.{FlatSpec, Inside, Matchers, Suite}
import scalaz.OneAnd

class TemplateSubscriptionSpec
    extends FlatSpec
    with Suite
    with PostgresAroundAll
    with SuiteResourceManagementAroundAll
    with ExtractorFixtureAroundAll
    with Matchers
    with Inside {

  override protected def darFile = new File(rlocation("extractor/TransactionExample.dar"))

  override def scenario: Option[String] = Some("TransactionExample:templateFilterTest")

  override def configureExtractor(ec: ExtractorConfig): ExtractorConfig = {
    val ec2 = super.configureExtractor(ec)
    ec2.copy(
      parties = OneAnd(Party assertFromString "Bob", Nil),
      templateConfigs = Set(TemplateConfig("TransactionExample", "RightOfUseAgreement")))
  }

  "Transactions" should "be extracted" in {
    getTransactions should have length 1
  }

  "Exercises" should "be extracted" in {
    getExercises should have length 0
  }

  "Contracts" should "be extracted" in {
    inside(getContracts) {
      case List(contract) =>
        contract.template should ===("TransactionExample:RightOfUseAgreement")
    }
  }
} 
Example 9
Source File: MultiPartyTemplateSubscriptionSpec.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.extractor

import java.io.File

import com.daml.bazeltools.BazelRunfiles._
import com.daml.lf.data.Ref.Party
import com.daml.extractor.config.{ExtractorConfig, TemplateConfig}
import com.daml.extractor.services.ExtractorFixtureAroundAll
import com.daml.ledger.api.testing.utils.SuiteResourceManagementAroundAll
import com.daml.testing.postgresql.PostgresAroundAll
import org.scalatest.{FlatSpec, Inside, Matchers, Suite}
import scalaz.OneAnd

class MultiPartyTemplateSubscriptionSpec
    extends FlatSpec
    with Suite
    with PostgresAroundAll
    with SuiteResourceManagementAroundAll
    with ExtractorFixtureAroundAll
    with Matchers
    with Inside {

  override protected def darFile = new File(rlocation("extractor/TransactionExample.dar"))

  override def scenario: Option[String] = Some("TransactionExample:templateFilterTest")

  private final val alice = Party assertFromString "Alice"
  private final val bob = Party assertFromString "Bob"

  override def configureExtractor(ec: ExtractorConfig): ExtractorConfig = {
    val ec2 = super.configureExtractor(ec)
    ec2.copy(
      parties = OneAnd(alice, List(bob)),
      templateConfigs = Set(
        TemplateConfig("TransactionExample", "RightOfUseOffer"),
        TemplateConfig("TransactionExample", "RightOfUseAgreement"))
    )
  }

  "Transactions" should "be extracted" in {
    getTransactions should have length 2
  }

  "Exercises" should "be extracted" in {
    inside(getExercises) {
      case List(e) =>
        e.template should ===("TransactionExample:RightOfUseOffer")
        e.choice should ===("Accept")
    }
  }

  "Contracts" should "be extracted" in {
    inside(getContracts) {
      case List(a1, a2) =>
        a1.template should ===("TransactionExample:RightOfUseOffer")
        a2.template should ===("TransactionExample:RightOfUseAgreement")
    }
  }
} 
Example 10
Source File: EnumSpec.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.extractor

import java.io.File

import com.daml.bazeltools.BazelRunfiles.rlocation
import com.daml.extractor.services.{CustomMatchers, ExtractorFixtureAroundAll}
import com.daml.ledger.api.testing.utils.SuiteResourceManagementAroundAll
import com.daml.testing.postgresql.PostgresAroundAll
import io.circe.parser._
import org.scalatest.{FlatSpec, Inside, Matchers, Suite}
import scalaz.Scalaz._

class EnumSpec
    extends FlatSpec
    with Suite
    with PostgresAroundAll
    with SuiteResourceManagementAroundAll
    with ExtractorFixtureAroundAll
    with Inside
    with Matchers
    with CustomMatchers {

  override protected def darFile = new File(rlocation("daml-lf/encoder/test-1.8.dar"))

  override def scenario: Option[String] = Some("EnumMod:createContracts")

  "Enum" should "be extracted" in {
    getContracts should have length 3
  }

  it should "contain the correct JSON data" in {

    val contractsJson = getContracts.map(_.create_arguments)

    val expected = List(
      """{
      "x" : "Red",
      "party" : "Bob"
      }""",
      """{
      "x" : "Green",
      "party" : "Bob"
      }""",
      """{
      "x" : "Blue",
      "party" : "Bob"
      }"""
    ).traverseU(parse)

    expected should be('right) // That should only fail if this JSON^^ is ill-formatted

    contractsJson should contain theSameElementsAs expected.right.get
  }

} 
Example 11
Source File: SuiteResourceManagement.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.api.testing.utils

import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, Suite}

trait SuiteResource[+T] {
  protected def suiteResource: Resource[T]
}


trait SuiteResourceManagement {}

trait SuiteResourceManagementAroundAll
    extends SuiteResource[Any]
    with SuiteResourceManagement
    with BeforeAndAfterAll {
  self: Suite =>

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    suiteResource.setup()
  }

  override protected def afterAll(): Unit = {
    suiteResource.close()
    super.afterAll()
  }
}

trait SuiteResourceManagementAroundEach
    extends SuiteResource[Any]
    with SuiteResourceManagement
    with BeforeAndAfterEach {
  self: Suite =>

  override protected def beforeEach(): Unit = {
    super.beforeEach()
    suiteResource.setup()
  }

  override protected def afterEach(): Unit = {
    suiteResource.close()
    super.afterEach()
  }
} 
Example 12
Source File: AkkaBeforeAndAfterAll.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.api.testing.utils

import java.util.concurrent.Executors

import akka.actor.ActorSystem
import akka.stream.Materializer
import com.daml.grpc.adapter.{AkkaExecutionSequencerPool, ExecutionSequencerFactory}
import com.google.common.util.concurrent.ThreadFactoryBuilder
import org.scalatest.{BeforeAndAfterAll, Suite}
import org.slf4j.LoggerFactory

import scala.concurrent.duration.DurationInt
import scala.concurrent.{Await, ExecutionContext}

trait AkkaBeforeAndAfterAll extends BeforeAndAfterAll {
  self: Suite =>
  private val logger = LoggerFactory.getLogger(getClass)

  protected def actorSystemName: String = this.getClass.getSimpleName

  private implicit lazy val executionContext: ExecutionContext =
    ExecutionContext.fromExecutorService(
      Executors.newSingleThreadExecutor(
        new ThreadFactoryBuilder()
          .setDaemon(true)
          .setNameFormat(s"$actorSystemName-thread-pool-worker-%d")
          .setUncaughtExceptionHandler((thread, _) =>
            logger.error(s"got an uncaught exception on thread: ${thread.getName}"))
          .build()))

  protected implicit lazy val system: ActorSystem =
    ActorSystem(actorSystemName, defaultExecutionContext = Some(executionContext))

  protected implicit lazy val materializer: Materializer = Materializer(system)

  protected implicit lazy val executionSequencerFactory: ExecutionSequencerFactory =
    new AkkaExecutionSequencerPool(poolName = actorSystemName, actorCount = 1)

  override protected def afterAll(): Unit = {
    executionSequencerFactory.close()
    materializer.shutdown()
    Await.result(system.terminate(), 30.seconds)
    super.afterAll()
  }
} 
Example 13
Source File: AkkaTest.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.ledger.client.testing

import java.util
import java.util.concurrent.{Executors, ScheduledExecutorService}

import akka.NotUsed
import akka.actor.{ActorSystem, Scheduler}
import akka.stream.scaladsl.{Sink, Source}
import akka.stream.Materializer
import akka.util.ByteString
import com.daml.grpc.adapter.{ExecutionSequencerFactory, SingleThreadExecutionSequencerPool}
import com.typesafe.config.{Config, ConfigFactory, ConfigValueFactory}
import com.typesafe.scalalogging.LazyLogging
import org.scalatest.{BeforeAndAfterAll, Suite}

import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContextExecutor, Future}
import scala.util.control.NonFatal

trait AkkaTest extends BeforeAndAfterAll with LazyLogging { self: Suite =>
  // TestEventListener is needed for log testing
  private val loggers =
    util.Arrays.asList("akka.event.slf4j.Slf4jLogger", "akka.testkit.TestEventListener")
  protected implicit val sysConfig: Config = ConfigFactory
    .load()
    .withValue("akka.loggers", ConfigValueFactory.fromIterable(loggers))
    .withValue("akka.logger-startup-timeout", ConfigValueFactory.fromAnyRef("30s"))
    .withValue("akka.stdout-loglevel", ConfigValueFactory.fromAnyRef("INFO"))
  protected implicit val system: ActorSystem = ActorSystem("test", sysConfig)
  protected implicit val ec: ExecutionContextExecutor =
    system.dispatchers.lookup("test-dispatcher")
  protected implicit val scheduler: Scheduler = system.scheduler
  protected implicit val schedulerService: ScheduledExecutorService =
    Executors.newSingleThreadScheduledExecutor()
  protected implicit val materializer: Materializer = Materializer(system)
  protected implicit val esf: ExecutionSequencerFactory =
    new SingleThreadExecutionSequencerPool("testSequencerPool")
  protected val timeout: FiniteDuration = 2.minutes
  protected val shortTimeout: FiniteDuration = 5.seconds

  protected def await[T](fun: => Future[T]): T = Await.result(fun, timeout)

  protected def awaitShort[T](fun: => Future[T]): T = Await.result(fun, shortTimeout)

  protected def drain(source: Source[ByteString, NotUsed]): ByteString = {
    val futureResult: Future[ByteString] = source.runFold(ByteString.empty) { (a, b) =>
      a.concat(b)
    }
    awaitShort(futureResult)
  }

  protected def drain[A, B](source: Source[A, B]): Seq[A] = {
    val futureResult: Future[Seq[A]] = source.runWith(Sink.seq)
    awaitShort(futureResult)
  }

  override protected def afterAll(): Unit = {
    try {
      val _ = await(system.terminate())
    } catch {
      case NonFatal(_) => ()
    }
    schedulerService.shutdownNow()
    super.afterAll()
  }
} 
Example 14
Source File: PostgresAroundSuite.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.testing.postgresql

import org.scalatest.Suite

trait PostgresAroundSuite extends PostgresAround {
  self: Suite =>

  @volatile
  private var database: Option[PostgresDatabase] = None

  protected def postgresDatabase: PostgresDatabase = database.get

  protected def createNewDatabase(): PostgresDatabase = {
    database = Some(createNewRandomDatabase())
    postgresDatabase
  }

  protected def dropDatabase(): Unit = {
    dropDatabase(postgresDatabase)
    database = None
  }
} 
Example 15
Source File: PostgresAroundEach.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.testing.postgresql

import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, Suite}

trait PostgresAroundEach
    extends PostgresAroundSuite
    with BeforeAndAfterAll
    with BeforeAndAfterEach {
  self: Suite =>

  override protected def beforeAll(): Unit = {
    // We start PostgreSQL before calling `super` because _generally_ the database needs to be up
    // before everything else.
    connectToPostgresqlServer()
    super.beforeAll()
  }

  override protected def afterAll(): Unit = {
    super.afterAll()
    disconnectFromPostgresqlServer()
  }

  override protected def beforeEach(): Unit = {
    // We create the database before calling `super` for the same reasons as above.
    createNewDatabase()
    super.beforeEach()
  }

  override protected def afterEach(): Unit = {
    super.afterEach()
    dropDatabase()
  }
} 
Example 16
Source File: PostgresAroundAll.scala    From daml   with Apache License 2.0 5 votes vote down vote up
// Copyright (c) 2020 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved.
// SPDX-License-Identifier: Apache-2.0

package com.daml.testing.postgresql

import org.scalatest.{BeforeAndAfterAll, Suite}

trait PostgresAroundAll extends PostgresAroundSuite with BeforeAndAfterAll {
  self: Suite =>

  override protected def beforeAll(): Unit = {
    // We start PostgreSQL before calling `super` because _generally_ the database needs to be up
    // before everything else.
    connectToPostgresqlServer()
    createNewDatabase()
    super.beforeAll()
  }

  override protected def afterAll(): Unit = {
    super.afterAll()
    disconnectFromPostgresqlServer()
  }
} 
Example 17
Source File: LocalClusterSparkContext.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.mllib.util

import org.scalatest.{BeforeAndAfterAll, Suite}

import org.apache.spark.{SparkConf, SparkContext}

trait LocalClusterSparkContext extends BeforeAndAfterAll { self: Suite =>
  @transient var sc: SparkContext = _

  override def beforeAll() {
    super.beforeAll()
    val conf = new SparkConf()
      .setMaster("local-cluster[2, 1, 1024]")
      .setAppName("test-cluster")
      .set("spark.rpc.message.maxSize", "1") // set to 1MB to detect direct serialization of data
    sc = new SparkContext(conf)
  }

  override def afterAll() {
    try {
      if (sc != null) {
        sc.stop()
      }
    } finally {
      super.afterAll()
    }
  }
} 
Example 18
Source File: MLlibTestSparkContext.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.mllib.util

import java.io.File

import org.scalatest.Suite

import org.apache.spark.SparkContext
import org.apache.spark.ml.util.TempDirectory
import org.apache.spark.sql.{SparkSession, SQLContext, SQLImplicits}
import org.apache.spark.util.Utils

trait MLlibTestSparkContext extends TempDirectory { self: Suite =>
  @transient var spark: SparkSession = _
  @transient var sc: SparkContext = _
  @transient var checkpointDir: String = _

  override def beforeAll() {
    super.beforeAll()
    spark = SparkSession.builder
      .master("local[2]")
      .appName("MLlibUnitTest")
      .getOrCreate()
    sc = spark.sparkContext

    checkpointDir = Utils.createDirectory(tempDir.getCanonicalPath, "checkpoints").toString
    sc.setCheckpointDir(checkpointDir)
  }

  override def afterAll() {
    try {
      Utils.deleteRecursively(new File(checkpointDir))
      SparkSession.clearActiveSession()
      if (spark != null) {
        spark.stop()
      }
      spark = null
    } finally {
      super.afterAll()
    }
  }

  
  protected object testImplicits extends SQLImplicits {
    protected override def _sqlContext: SQLContext = self.spark.sqlContext
  }
} 
Example 19
Source File: SharedSparkContext.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark

import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}
import org.scalatest.Suite


trait SharedSparkContext extends BeforeAndAfterAll with BeforeAndAfterEach { self: Suite =>

  @transient private var _sc: SparkContext = _

  def sc: SparkContext = _sc

  var conf = new SparkConf(false)

  override def beforeAll() {
    super.beforeAll()
    _sc = new SparkContext(
      "local[4]", "test", conf.set("spark.hadoop.fs.file.impl", classOf[DebugFilesystem].getName))
  }

  override def afterAll() {
    try {
      LocalSparkContext.stop(_sc)
      _sc = null
    } finally {
      super.afterAll()
    }
  }

  protected override def beforeEach(): Unit = {
    super.beforeEach()
    DebugFilesystem.clearOpenStreams()
  }

  protected override def afterEach(): Unit = {
    super.afterEach()
    DebugFilesystem.assertNoOpenStreams()
  }
} 
Example 20
Source File: ResetSystemProperties.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.util

import java.util.Properties

import org.apache.commons.lang3.SerializationUtils
import org.scalatest.{BeforeAndAfterEach, Suite}


private[spark] trait ResetSystemProperties extends BeforeAndAfterEach { this: Suite =>
  var oldProperties: Properties = null

  override def beforeEach(): Unit = {
    // we need SerializationUtils.clone instead of `new Properties(System.getProperties())` because
    // the later way of creating a copy does not copy the properties but it initializes a new
    // Properties object with the given properties as defaults. They are not recognized at all
    // by standard Scala wrapper over Java Properties then.
    oldProperties = SerializationUtils.clone(System.getProperties)
    super.beforeEach()
  }

  override def afterEach(): Unit = {
    try {
      super.afterEach()
    } finally {
      System.setProperties(oldProperties)
      oldProperties = null
    }
  }
} 
Example 21
Source File: MLlibTestSparkContext.scala    From spark-lp   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.mllib.util

import org.scalatest.{BeforeAndAfterAll, Suite}

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.SQLContext


trait MLlibTestSparkContext extends BeforeAndAfterAll { self: Suite =>
  @transient var sc: SparkContext = _
  @transient var sqlContext: SQLContext = _

  override def beforeAll() {
    super.beforeAll()
    val conf = new SparkConf()
      .setMaster("local[2]")
      .setAppName("MLlibUnitTest")
    sc = new SparkContext(conf)
    sc.setLogLevel("WARN")
    sqlContext = new SQLContext(sc)
  }

  override def afterAll() {
    sqlContext = null
    if (sc != null) {
      sc.stop()
    }
    sc = null
    super.afterAll()
  }
} 
Example 22
Source File: SparkFunSuite.scala    From spark-alchemy   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark

// scalastyle:off
import java.io.File

import scala.annotation.tailrec
import org.apache.log4j.{Appender, Level, Logger}
import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll, BeforeAndAfterEach, FunSuite, Outcome, Suite}
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config.Tests.IS_TESTING
import org.apache.spark.util.{AccumulatorContext, Utils}


  protected def withLogAppender(
    appender: Appender,
    loggerName: Option[String] = None,
    level: Option[Level] = None)(
    f: => Unit): Unit = {
    val logger = loggerName.map(Logger.getLogger).getOrElse(Logger.getRootLogger)
    val restoreLevel = logger.getLevel
    logger.addAppender(appender)
    if (level.isDefined) {
      logger.setLevel(level.get)
    }
    try f finally {
      logger.removeAppender(appender)
      if (level.isDefined) {
        logger.setLevel(restoreLevel)
      }
    }
  }
} 
Example 23
Source File: SharedSparkSessionBase.scala    From spark-alchemy   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.test

import org.apache.spark.sql.internal.StaticSQLConf
import org.apache.spark.sql.{SQLContext, SparkSession}
import org.apache.spark.{DebugFilesystem, SparkConf}
import org.scalatest.Suite
import org.scalatest.concurrent.Eventually

import scala.concurrent.duration._



  protected override def afterAll(): Unit = {
    try {
      super.afterAll()
    } finally {
      try {
        if (_spark != null) {
          try {
            _spark.sessionState.catalog.reset()
          } finally {
            try {
              waitForTasksToFinish()
            } finally {
              _spark.stop()
              _spark = null
            }
          }
        }
      } finally {
        SparkSession.clearActiveSession()
        SparkSession.clearDefaultSession()
      }
    }
  }

  protected override def beforeEach(): Unit = {
    super.beforeEach()
    DebugFilesystem.clearOpenStreams()
  }

  protected override def afterEach(): Unit = {
    super.afterEach()
    // Clear all persistent datasets after each test
    spark.sharedState.cacheManager.clearCache()
    // files can be closed from other threads, so wait a bit
    // normally this doesn't take more than 1s
    eventually(timeout(30.seconds), interval(2.seconds)) {
      DebugFilesystem.assertNoOpenStreams()
    }
  }
} 
Example 24
Source File: MatcherSpec.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.actors

import akka.actor.ActorSystem
import akka.testkit.TestKitBase
import com.typesafe.config.ConfigFactory
import com.wavesplatform.dex.domain.utils.ScorexLogging
import com.wavesplatform.dex.settings.loadConfig
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpecLike
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, Suite}

abstract class MatcherSpec(_actorSystemName: String) extends AnyWordSpecLike with MatcherSpecLike {
  protected def actorSystemName: String = _actorSystemName
}

trait MatcherSpecLike extends TestKitBase with Matchers with BeforeAndAfterAll with BeforeAndAfterEach with ScorexLogging {
  this: Suite =>

  protected def actorSystemName: String

  implicit override lazy val system: ActorSystem = ActorSystem(
    actorSystemName,
    loadConfig(ConfigFactory.empty())
  )

  override protected def afterAll(): Unit = {
    super.afterAll()
    shutdown(system)
  }
} 
Example 25
Source File: WithDB.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.db

import java.nio.file.Files

import com.wavesplatform.dex.db.leveldb.LevelDBFactory
import com.wavesplatform.dex.domain.account.Address
import com.wavesplatform.dex.domain.asset.Asset
import com.wavesplatform.dex.util.Implicits._
import com.wavesplatform.dex.util.TestHelpers
import monix.reactive.subjects.Subject
import org.iq80.leveldb.{DB, Options}
import org.scalatest.{BeforeAndAfterEach, Suite}

trait WithDB extends BeforeAndAfterEach { this: Suite =>

  private val path                  = Files.createTempDirectory("lvl").toAbsolutePath
  private var currentDBInstance: DB = _

  def db: DB = currentDBInstance

  protected val ignoreSpendableBalanceChanged: Subject[(Address, Asset), (Address, Asset)] = Subject.empty

  override def beforeEach(): Unit = {
    currentDBInstance = LevelDBFactory.factory.open(path.toFile, new Options().createIfMissing(true))
    super.beforeEach()
  }

  override def afterEach(): Unit =
    try {
      super.afterEach()
      db.close()
    } finally {
      TestHelpers.deleteRecursively(path)
    }

  protected def tempDb(f: DB => Any): Any = {
    val path = Files.createTempDirectory("lvl-temp").toAbsolutePath
    val db   = LevelDBFactory.factory.open(path.toFile, new Options().createIfMissing(true))
    try {
      f(db)
    } finally {
      db.close()
      TestHelpers.deleteRecursively(path)
    }
  }
} 
Example 26
Source File: HasWebSockets.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.it.api.websockets

import java.lang
import java.util.concurrent.ConcurrentHashMap

import akka.actor.ActorSystem
import akka.stream.Materializer
import com.wavesplatform.dex.api.ws.connection.{WsConnection, WsConnectionOps}
import com.wavesplatform.dex.api.ws.entities.{WsBalances, WsOrder}
import com.wavesplatform.dex.api.ws.protocol.{WsAddressSubscribe, WsInitial, WsOrderBookSubscribe}
import com.wavesplatform.dex.domain.account.KeyPair
import com.wavesplatform.dex.domain.asset.{Asset, AssetPair}
import com.wavesplatform.dex.error.ErrorFormatterContext
import com.wavesplatform.dex.it.config.PredefinedAssets
import com.wavesplatform.dex.it.docker.DexContainer
import com.wavesplatform.dex.test.matchers.DiffMatcherWithImplicits
import mouse.any._
import org.scalatest.concurrent.Eventually
import org.scalatest.matchers.should.Matchers
import org.scalatest.{BeforeAndAfterAll, Suite}

import scala.concurrent.duration._

trait HasWebSockets extends BeforeAndAfterAll with HasJwt with WsConnectionOps with WsMessageOps {
  _: Suite with Eventually with Matchers with DiffMatcherWithImplicits with PredefinedAssets =>

  implicit protected val system: ActorSystem        = ActorSystem()
  implicit protected val materializer: Materializer = Materializer.matFromSystem(system)
  implicit protected val efc: ErrorFormatterContext = assetDecimalsMap.apply

  protected def getWsStreamUri(dex: DexContainer): String = s"ws://127.0.0.1:${dex.restApiAddress.getPort}/ws/v0"

  protected val knownWsConnections: ConcurrentHashMap.KeySetView[WsConnection, lang.Boolean] =
    ConcurrentHashMap.newKeySet[WsConnection]()

  protected def addConnection(connection: WsConnection): Unit = knownWsConnections.add(connection)

  protected def mkWsAddressConnection(client: KeyPair,
                                      dex: DexContainer,
                                      keepAlive: Boolean = true,
                                      subscriptionLifetime: FiniteDuration = 1.hour): WsConnection = {
    val jwt        = mkJwt(client, lifetime = subscriptionLifetime)
    val connection = mkDexWsConnection(dex, keepAlive)
    connection.send(WsAddressSubscribe(client.toAddress, WsAddressSubscribe.defaultAuthType, jwt))
    connection
  }

  protected def mkWsOrderBookConnection(assetPair: AssetPair, dex: DexContainer, depth: Int = 1): WsConnection = {
    val connection = mkDexWsConnection(dex)
    connection.send(WsOrderBookSubscribe(assetPair, depth))
    connection
  }

  protected def mkWsInternalConnection(dex: DexContainer, keepAlive: Boolean = true): WsConnection =
    mkWsConnection(s"${getWsStreamUri(dex)}/internal", keepAlive)

  protected def mkDexWsConnection(dex: DexContainer, keepAlive: Boolean = true): WsConnection =
    mkWsConnection(getWsStreamUri(dex), keepAlive)

  protected def mkWsConnection(uri: String, keepAlive: Boolean = true): WsConnection = {
    new WsConnection(uri, keepAlive) unsafeTap { wsc =>
      addConnection(wsc)
      eventually { wsc.collectMessages[WsInitial] should have size 1 }
      wsc.clearMessages()
    }
  }

  protected def assertChanges(c: WsConnection, squash: Boolean = true)(expBs: Map[Asset, WsBalances]*)(expOs: WsOrder*): Unit = {
    eventually {
      if (squash) {
        c.balanceChanges.size should be <= expBs.size
        c.balanceChanges.squashed should matchTo { expBs.toList.squashed }
        c.orderChanges.size should be <= expOs.size
        c.orderChanges.squashed should matchTo { expOs.toList.squashed }
      } else {
        c.balanceChanges should matchTo(expBs)
        c.orderChanges should matchTo(expOs)
      }
    }

    c.clearMessages()
  }

  protected def cleanupWebSockets(): Unit = {
    if (!knownWsConnections.isEmpty) {
      knownWsConnections.forEach { _.close() }
      materializer.shutdown()
    }
  }

  override def afterAll(): Unit = {
    super.afterAll()
    cleanupWebSockets()
  }
} 
Example 27
Source File: InformativeTestStart.scala    From matcher   with MIT License 5 votes vote down vote up
package com.wavesplatform.dex.it.test

import java.time.{LocalDateTime, ZoneId}

import com.wavesplatform.dex.it.api.BaseContainersKit
import mouse.any._
import org.scalatest.{Args, Status, Suite}

import scala.util.{Failure, Success}

trait InformativeTestStart extends Suite { self: BaseContainersKit =>

  override protected def runTest(testName: String, args: Args): Status = {

    def print(text: String): Unit = writeGlobalLog(s"---------- [${LocalDateTime.now(ZoneId.of("UTC"))}] $text ----------")

    print(s"Test '$testName' started")

    super.runTest(testName, args) unsafeTap {
      _.whenCompleted {
        case Success(r) => print(s"Test '$testName' ${if (r) "succeeded" else "failed"}")
        case Failure(e) => print(s"Test '$testName' failed with exception '${e.getClass.getSimpleName}'")
      }
    }
  }

  protected def writeGlobalLog(x: String): Unit = {
    log.debug(x)
    knownContainers.get().foreach { _.printDebugMessage(x) }
  }
} 
Example 28
Source File: AffinityPropagationSuite.scala    From SparkAffinityPropagation   with MIT License 5 votes vote down vote up
package org.viirya.spark.ml

import scala.collection.mutable

import org.scalatest.{BeforeAndAfterAll, FunSuite, Suite}

import org.viirya.spark.ml.AffinityPropagation._

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.graphx.{Edge, Graph}

class AffinityPropagationSuite extends FunSuite with BeforeAndAfterAll { self: Suite =>
  @transient var sc: SparkContext = _

  override def beforeAll() {
    super.beforeAll()
    val conf = new SparkConf()
      .setMaster("local[2]")
      .setAppName("AffinityPropagationUnitTest")
    sc = new SparkContext(conf)
  }

  override def afterAll() {
    try {
      if (sc != null) {
        sc.stop()
      }
      sc = null
    } finally {
      super.afterAll()
    }
  }  

  test("affinity propagation") {
    
    val similarities = Seq[(Long, Long, Double)](
      (0, 1, 1.0), (1, 0, 1.0), (0, 2, 1.0), (2, 0, 1.0), (0, 3, 1.0), (3, 0, 1.0),
      (1, 2, 1.0), (2, 1, 1.0), (2, 3, 1.0), (3, 2, 1.0))
    val expected = Array(
      Array(0.0,     1.0/3.0, 1.0/3.0, 1.0/3.0),
      Array(1.0/2.0,     0.0, 1.0/2.0,     0.0),
      Array(1.0/3.0, 1.0/3.0,     0.0, 1.0/3.0),
      Array(1.0/2.0,     0.0, 1.0/2.0,     0.0))
    val s = constructGraph(sc.parallelize(similarities, 2), true, false)
    s.edges.collect().foreach { case Edge(i, j, x) =>
      assert(math.abs(x.similarity - expected(i.toInt)(j.toInt)) < 1e-14)
    }
  }
} 
Example 29
Source File: MusicbrainzSchema.scala    From sddf   with GNU General Public License v3.0 5 votes vote down vote up
package de.unihamburg.vsis.sddf.test.util

import org.apache.spark.rdd.RDD
import org.scalatest.Suite

import de.unihamburg.vsis.sddf.SddfContext.pairToInt
import de.unihamburg.vsis.sddf.preprocessing.PipePreprocessorRemoveRegex
import de.unihamburg.vsis.sddf.preprocessing.PipePreprocessorTrim
import de.unihamburg.vsis.sddf.reading.FeatureIdNameMapping
import de.unihamburg.vsis.sddf.reading.FeatureIdNameMapping.Id
import de.unihamburg.vsis.sddf.reading.FeatureIdNameMapping.Ignore
import de.unihamburg.vsis.sddf.reading.corpus.PipeReaderTupleCsv

trait MusicbrainzSchema extends TestSddfPipeContext { self: Suite =>

  val Number = (0, "number")
  val Title = (1, "title")
  val Length = (2, "length")
  val Artist = (3, "artist")
  val Album = (4, "album")
  val Year = (5, "year")
  val Language = (6, "language")

  val featureIdNameMapping = Map(Number, Title, Length, Artist, Album, Year, Language)

  implicit val featureIdNameMapper = new FeatureIdNameMapping(featureIdNameMapping)

  def parseTuples(input: RDD[String]) = {
    // Parse Tuples
    val allFields: Seq[Int] = Seq(Number, Title, Length, Artist, Album, Year, Language)
    val allFieldsWithId: Seq[Int] = Ignore +: Id +: Ignore +: allFields

    val pipe = PipeReaderTupleCsv(allFieldsWithId)
      .append(PipePreprocessorTrim(allFields: _*))
      .append(PipePreprocessorRemoveRegex("[^0-9]", Number, Year, Length))

    pipe.run(input)

  }

} 
Example 30
Source File: LocalSparkContext.scala    From sddf   with GNU General Public License v3.0 5 votes vote down vote up
package de.unihamburg.vsis.sddf.test.util

import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.scalatest.BeforeAndAfterAll
import org.scalatest.Suite

trait LocalSparkContext extends BeforeAndAfterAll { self: Suite =>

  @transient var sc: SparkContext = _

  override def beforeAll() {
    super.beforeAll()
    val conf = new SparkConf()
      .setMaster("local[2]")
      .setAppName("SddF Test")
      .set("spark.ui.enabled", "false")
    sc = new SparkContext(conf)
  }

  override def afterAll() {
    if (sc != null) {
      sc.stop()
    }
    super.afterAll()
  }

} 
Example 31
Source File: MLlibTestSparkContext.scala    From bisecting-kmeans   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.mllib.util

import org.apache.spark.sql.SQLContext
import org.apache.spark.{SparkConf, SparkContext}
import org.scalatest.{BeforeAndAfterAll, Suite}

trait MLlibTestSparkContext extends BeforeAndAfterAll { self: Suite =>
  @transient var sc: SparkContext = _
  @transient var sqlContext: SQLContext = _

  override def beforeAll() {
    super.beforeAll()
    val conf = new SparkConf()
      .setMaster("local[2]")
      .setAppName("MLlibUnitTest")
    sc = new SparkContext(conf)
    sqlContext = new SQLContext(sc)
  }

  override def afterAll() {
    sqlContext = null
    if (sc != null) {
      sc.stop()
    }
    sc = null
    super.afterAll()
  }
} 
Example 32
Source File: SonarLogTester.scala    From sonar-scala   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package org.sonar.api.utils.log

import scala.jdk.CollectionConverters._

import org.scalatest.{BeforeAndAfter, Suite}

trait SonarLogTester extends BeforeAndAfter { this: Suite =>
  before {
    LogInterceptors.set(new ListInterceptor())
    Loggers.getFactory.setLevel(LoggerLevel.DEBUG)
  }

  after {
    LogInterceptors.set(NullInterceptor.NULL_INSTANCE)
    Loggers.getFactory.setLevel(LoggerLevel.DEBUG)
  }

  def logs: Seq[String] =
    LogInterceptors.get().asInstanceOf[ListInterceptor].logs.asScala.toSeq

  def getLogs: Seq[LogAndArguments] =
    LogInterceptors.get().asInstanceOf[ListInterceptor].getLogs().asScala.toSeq

  def logsFor(level: LoggerLevel): Seq[String] =
    LogInterceptors.get().asInstanceOf[ListInterceptor].logs(level).asScala.toSeq

  def getLogsFor(level: LoggerLevel): Seq[LogAndArguments] =
    LogInterceptors.get().asInstanceOf[ListInterceptor].getLogs(level).asScala.toSeq
} 
Example 33
Source File: MLlibTestSparkContext.scala    From spark-tfocs   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.mllib.util

import org.scalatest.{ BeforeAndAfterAll, Suite }

import org.apache.spark.{ SparkConf, SparkContext }
import org.apache.spark.sql.SQLContext


trait MLlibTestSparkContext extends BeforeAndAfterAll { self: Suite =>
  @transient var sc: SparkContext = _
  @transient var sqlContext: SQLContext = _

  override def beforeAll() {
    super.beforeAll()
    val conf = new SparkConf()
      .setMaster("local[2]")
      .setAppName("MLlibUnitTest")
    sc = new SparkContext(conf)
    sc.setLogLevel("WARN")
    sqlContext = new SQLContext(sc)
  }

  override def afterAll() {
    sqlContext = null
    if (sc != null) {
      sc.stop()
    }
    sc = null
    super.afterAll()
  }
} 
Example 34
Source File: CassandraServerSpecLike.scala    From Spark2Cassandra   with Apache License 2.0 5 votes vote down vote up
package com.github.jparkie.spark.cassandra

import java.net.{ InetAddress, InetSocketAddress }

import com.datastax.driver.core.Session
import com.datastax.spark.connector.cql.CassandraConnector
import org.cassandraunit.utils.EmbeddedCassandraServerHelper
import org.scalatest.{ BeforeAndAfterAll, Suite }

trait CassandraServerSpecLike extends BeforeAndAfterAll { this: Suite =>
  // Remove protected modifier because of SharedSparkContext.
  override def beforeAll(): Unit = {
    super.beforeAll()

    EmbeddedCassandraServerHelper.startEmbeddedCassandra()
  }

  // Remove protected modifier because of SharedSparkContext.
  override def afterAll(): Unit = {
    EmbeddedCassandraServerHelper.cleanEmbeddedCassandra()

    super.afterAll()
  }

  def getClusterName: String = {
    EmbeddedCassandraServerHelper.getClusterName
  }

  def getHosts: Set[InetAddress] = {
    val temporaryAddress =
      new InetSocketAddress(EmbeddedCassandraServerHelper.getHost, EmbeddedCassandraServerHelper.getNativeTransportPort)
        .getAddress

    Set(temporaryAddress)
  }

  def getNativeTransportPort: Int = {
    EmbeddedCassandraServerHelper.getNativeTransportPort
  }

  def getRpcPort: Int = {
    EmbeddedCassandraServerHelper.getRpcPort
  }

  def getCassandraConnector: CassandraConnector = {
    CassandraConnector(hosts = getHosts, port = getNativeTransportPort)
  }

  def createKeyspace(session: Session, keyspace: String): Unit = {
    session.execute(
      s"""CREATE KEYSPACE "$keyspace"
          |WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };
       """.stripMargin
    )
  }
} 
Example 35
Source File: PointCloudTestEnvironment.scala    From geotrellis-pointcloud   with Apache License 2.0 5 votes vote down vote up
package geotrellis.pointcloud.spark

import geotrellis.spark.testkit._

import org.apache.hadoop.fs.Path
import org.scalatest.Suite

import java.io.File

trait PointCloudTestEnvironment extends TestEnvironment { self: Suite =>
  val testResources = new File("src/test/resources")
  val lasPath = new Path(s"file://${testResources.getAbsolutePath}/las")
  val multipleLasPath = new Path(s"file://${testResources.getAbsolutePath}/las/files")

  def setS3Credentials: Unit = {
    try {
      val conf = ssc.sparkContext.hadoopConfiguration

      conf.set("fs.s3.impl", classOf[org.apache.hadoop.fs.s3a.S3AFileSystem].getName)
      conf.set("fs.s3a.aws.credentials.provider", classOf[com.amazonaws.auth.DefaultAWSCredentialsProviderChain].getName)
      conf.set("fs.s3a.endpoint", "s3.eu-west-2.amazonaws.com")
    } catch {
      case e: Throwable => println(e.getMessage)
    }
  }
} 
Example 36
Source File: LocalSparkContext.scala    From flint   with Apache License 2.0 5 votes vote down vote up
package com.twosigma.flint

import org.apache.spark.SparkContext
import org.scalatest.{ BeforeAndAfterAll, BeforeAndAfterEach }
import org.scalatest.Suite


trait LocalSparkContext extends BeforeAndAfterEach with BeforeAndAfterAll {

  self: Suite =>

  @transient var sc: SparkContext = _

  override def beforeAll() {
    super.beforeAll()
  }

  override def afterEach() {
    resetSparkContext()
    super.afterEach()
  }

  def resetSparkContext(): Unit = {
    LocalSparkContext.stop(sc)
    sc = null
  }
}

object LocalSparkContext {
  def stop(sc: SparkContext) {
    if (sc != null) {
      sc.stop()
    }
    System.clearProperty("spark.driver.port")
  }
} 
Example 37
Source File: CouchbasePluginSpec.scala    From akka-persistence-couchbase   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.couchbase.support

import akka.actor.ActorSystem
import akka.persistence.couchbase.{CouchbaseExtension, LoggingConfig}
import com.typesafe.config.ConfigFactory
import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll, Suite}

import scala.concurrent.Await
import scala.concurrent.duration._

object CouchbasePluginSpec {

  val config = ConfigFactory.parseString(
    """
      |akka {
      |  persistence {
      |    journal {
      |      plugin = "couchbase-journal"
      |    }
      |
      |    snapshot-store {
      |      plugin =  "couchbase-snapshot-store"
      |    }
      |
      |    journal-plugin-fallback {
      |      replay-filter {
      |        mode = warn
      |      }
      |    }
      |  }
      |
      |  test.single-expect-default = 10s
      |  loglevel = WARNING
      |  log-dead-letters = 0
      |  log-dead-letters-during-shutdown = off
      |  test.single-expect-default = 10s
      |}
      |
      |couchbase-replay {
      |
      |  batchSize = "4"
      |}
    """.stripMargin)
}

trait CouchbasePluginSpec
  extends Suite
    with BeforeAndAfter
    with BeforeAndAfterAll {

  System.setProperty("java.util.logging.config.class", classOf[LoggingConfig].getName)

  def system: ActorSystem

  def couchbase = CouchbaseExtension(system)

  before {
    assert(couchbase.journalBucket.bucketManager.flush())
    assert(couchbase.snapshotStoreBucket.bucketManager.flush())
  }

  override protected def afterAll(): Unit = {
    Await.result(system.terminate(), 10.seconds)
    super.afterAll()
  }
} 
Example 38
Source File: SharedSparkContext.scala    From tispark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark

import org.apache.spark.SharedSparkContext._
import org.apache.spark.sql.internal.StaticSQLConf
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, Suite}

trait SharedSparkContext extends BeforeAndAfterAll with BeforeAndAfterEach { self: Suite =>

  protected var _isHiveEnabled: Boolean = false
  protected var conf: SparkConf = new SparkConf(false)

  def sc: SparkContext = _sc

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    if (_sc != null) {
      SharedSparkContext.stop()
    }
    initializeContext()
  }

  protected def initializeContext(): Unit =
    synchronized {
      if (null == _sc) {
        conf.set("spark.sql.test.key", "true")
        if (_isHiveEnabled) {
          conf.set(StaticSQLConf.CATALOG_IMPLEMENTATION, "hive")
        }
        _sc = new SparkContext("local[4]", "tispark-integration-test", conf)
      }
    }

  override protected def afterAll(): Unit = {
    try {
      SharedSparkContext.stop()
    } finally {
      super.afterAll()
    }
  }
}

object SharedSparkContext {

  @transient private var _sc: SparkContext = _

  def stop(): Unit =
    synchronized {
      if (_sc != null) {
        _sc.stop()
        _sc = null
      }
      // To avoid RPC rebinding to the same port, since it doesn't unbind immediately on shutdown
      System.clearProperty("spark.driver.port")
    }

} 
Example 39
Source File: VertxEnvironment.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate.adapter.vertx

import java.util.UUID

import akka.testkit.TestKit
import io.vertx.core.Vertx
import org.scalatest.{ BeforeAndAfterEach, Suite }

trait VertxEnvironment extends BeforeAndAfterEach {
  this: TestKit with Suite =>

  var vertx: Vertx = _

  override def beforeEach(): Unit = {
    super.beforeEach()
    vertx = Vertx.vertx()
  }

  def registerEventBusCodec(clazz: Class[_]): Unit = {
    vertx.eventBus().registerDefaultCodec(clazz.asInstanceOf[Class[AnyRef]], AkkaSerializationMessageCodec(clazz))
  }

  def endpointAddress(id: String): String =
    s"vertx-endpoint-$id-${UUID.randomUUID().toString}"
} 
Example 40
Source File: VertxEventBusProbes.scala    From eventuate   with Apache License 2.0 5 votes vote down vote up
package com.rbmhtechnology.eventuate.adapter.vertx

import akka.actor.ActorSystem
import akka.testkit.{ TestKit, TestProbe }
import com.rbmhtechnology.eventuate.adapter.vertx.utilities.EventBusMessage
import io.vertx.core.eventbus.Message
import org.scalatest.{ BeforeAndAfterEach, Suite }

trait VertxEventBusProbes extends BeforeAndAfterEach {
  this: TestKit with Suite with VertxEnvironment =>

  import VertxHandlerConverters._

  var endpoint1: EventBusEndpoint = _
  var endpoint2: EventBusEndpoint = _

  override def beforeEach(): Unit = {
    super.beforeEach()

    endpoint1 = EventBusEndpoint.withId("1")
    endpoint2 = EventBusEndpoint.withId("2")
  }

  def eventBusProbe(endpoint: String): TestProbe = {
    val probe = TestProbe()
    val handler = (m: Message[String]) => probe.ref ! EventBusMessage(m.body(), m, endpoint)
    vertx.eventBus().consumer[String](endpoint, handler.asVertxHandler)
    probe
  }

  object EventBusEndpoint {
    def apply(address: String): EventBusEndpoint =
      new EventBusEndpoint(address, eventBusProbe(address))

    def withId(id: String): EventBusEndpoint =
      apply(endpointAddress(id))
  }

  case class EventBusEndpoint(address: String, probe: TestProbe)
} 
Example 41
Source File: AkkaUnitTestLike.scala    From reactive-kinesis   with Apache License 2.0 5 votes vote down vote up
package com.weightwatchers.reactive.kinesis.common

import akka.actor.{ActorSystem, Scheduler}
import akka.stream.{ActorMaterializer, Materializer}
import akka.testkit.TestKitBase
import com.typesafe.config.{Config, ConfigFactory}
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{BeforeAndAfterAll, Suite}

import scala.concurrent.ExecutionContextExecutor


trait AkkaUnitTestLike extends TestKitBase with ScalaFutures with BeforeAndAfterAll {
  self: Suite =>

  implicit lazy val config: Config                = ConfigFactory.load("sample.conf")
  implicit lazy val system: ActorSystem           = ActorSystem(suiteName, config)
  implicit lazy val scheduler: Scheduler          = system.scheduler
  implicit lazy val mat: Materializer             = ActorMaterializer()
  implicit lazy val ctx: ExecutionContextExecutor = system.dispatcher

  abstract override def afterAll(): Unit = {
    super.afterAll()
    // intentionally shutdown the actor system last.
    system.terminate().futureValue
  }
} 
Example 42
Source File: TestSchemaClickhouseQuerySpec.scala    From clickhouse-scala-client   with GNU Lesser General Public License v3.0 5 votes vote down vote up
package com.crobox.clickhouse

import com.crobox.clickhouse.dsl.TestSchema
import com.crobox.clickhouse.dsl.execution.ClickhouseQueryExecutor
import com.crobox.clickhouse.dsl.schemabuilder.{CreateTable, Engine}
import com.crobox.clickhouse.testkit.ClickhouseSpec
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{BeforeAndAfterAll, Suite}

import scala.concurrent.ExecutionContext

trait TestSchemaClickhouseQuerySpec extends ClickhouseSpec with BeforeAndAfterAll with TestSchema with ScalaFutures {
  this: Suite =>
  val table1Entries: Seq[Table1Entry] = Seq()
  val table2Entries: Seq[Table2Entry] = Seq()

  implicit val ec: ExecutionContext

  implicit lazy val chExecutor: ClickhouseQueryExecutor = ClickhouseQueryExecutor.default(clickClient)

  override def beforeAll(): Unit = {
    super.beforeAll()
    val tables = for {
      _ <- clickClient.execute(
        CreateTable(OneTestTable, Engine.Memory, ifNotExists = true).query
      )
      _ <- clickClient.execute(
        CreateTable(
          TwoTestTable,
          Engine.Memory,
          ifNotExists = true
        ).query
      )
    } yield {}
    whenReady(tables) { _ =>
      val inserts = for {
        _ <- table1Entries.into(OneTestTable)
        _ <- table2Entries.into(TwoTestTable)
      } yield {}
      inserts.futureValue
    }
  }

  override def afterAll(): Unit = {
    super.afterAll()
  }
} 
Example 43
Source File: TestStreamingContext.scala    From gihyo-spark-book-example   with Apache License 2.0 5 votes vote down vote up
package jp.gihyo.spark

import org.scalatest.{BeforeAndAfterEach, Suite}

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.streaming.{StreamingContext, Seconds}
import jp.gihyo.spark.ch06.UserDic

private[spark]
trait TestStreamingContext extends BeforeAndAfterEach { self: Suite =>
  @transient var ssc: StreamingContext = _
  @transient var sc: SparkContext = _
  val master = "local[2]"
  val appN = "StreamingUnitTest"
  val bd = Seconds(1)

  override def beforeEach() {
    super.beforeEach()
    val conf = new SparkConf().setMaster(master)
      .setAppName(appN)
      .set("spark.streaming.clock", "org.apache.spark.util.ManualClock")
      .registerKryoClasses(Array(classOf[UserDic]))

    ssc = new StreamingContext(conf, bd)
    sc = ssc.sparkContext
  }

  override def afterEach() {
    try {
      if (ssc != null) {
        // stop with sc
        ssc.stop(true)
      }
      ssc = null;
    } finally {
      super.afterEach()
    }
  }
} 
Example 44
Source File: TestSparkContext.scala    From gihyo-spark-book-example   with Apache License 2.0 5 votes vote down vote up
package jp.gihyo.spark

import org.scalatest.{BeforeAndAfterAll, Suite}

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.SQLContext

private[spark]
trait TestSparkContext extends BeforeAndAfterAll { self: Suite =>
  @transient var sc: SparkContext = _
  @transient var sqlContext: SQLContext = _

  override def beforeAll() {
    super.beforeAll()
    val conf = new SparkConf()
      .setMaster("local[2]")
      .setAppName("SparkUnitTest")
      .set("spark.sql.shuffle.partitions", "2")
    sc = new SparkContext(conf)
    SQLContext.clearActive()
    sqlContext = new SQLContext(sc)
    SQLContext.setActive(sqlContext)
  }

  override def afterAll() {
    try {
      sqlContext = null
      SQLContext.clearActive()
      if (sc != null) {
        sc.stop()
      }
      sc = null
    } finally {
      super.afterAll()
    }
  }
} 
Example 45
Source File: SqliteTestBase.scala    From smui   with Apache License 2.0 5 votes vote down vote up
package utils

import java.io.File

import org.scalatest.{BeforeAndAfterAll, Suite}
import play.api.db.evolutions.Evolutions
import play.api.db.{Database, Databases}

trait SqliteTestBase extends BeforeAndAfterAll { self: Suite =>

  private lazy val dbFile = File.createTempFile("sqlitetest", ".db")

  lazy val db: Database = {
    // Use a temp file for the database - in-memory DB cannot be used
    // since it would be a different DB for each connection in the connection pool
    // (see https://www.sqlite.org/inmemorydb.html)
    val d = Databases("org.sqlite.JDBC", s"jdbc:sqlite:${dbFile.getAbsolutePath}")
    Evolutions.applyEvolutions(d)
    d
  }

  override protected def afterAll(): Unit = {
    super.afterAll()
    db.shutdown()
    dbFile.delete()
  }

} 
Example 46
Source File: ApplicationTestBase.scala    From smui   with Apache License 2.0 5 votes vote down vote up
package models

import models.rules._
import org.scalatest.{BeforeAndAfterAll, Suite}
import play.api.{Application, Mode}
import play.api.db.{Database, Databases}
import play.api.inject.Injector
import play.api.inject.guice.GuiceApplicationBuilder


trait ApplicationTestBase extends BeforeAndAfterAll { self: Suite =>

  protected lazy val db: Database = Databases.inMemory()

  // Use logging settings from logback-test.xml for test application
  System.setProperty("logger.resource", "logback-test.xml")

  protected lazy val application: Application = new GuiceApplicationBuilder().
    in(Mode.Test).
    configure("db.default.url" -> db.url, "db.default.driver" -> "org.h2.Driver",
      "db.default.username" -> "", "db.default.password" -> "", "toggle.rule-deployment.log-rule-id" -> true).
    build()

  protected lazy val injector: Injector = application.injector

  protected lazy val repo: SearchManagementRepository = injector.instanceOf[SearchManagementRepository]

  protected val (core1Id, core2Id) = (SolrIndexId(), SolrIndexId())

  protected def createTestCores(): Unit = {
    repo.addNewSolrIndex(SolrIndex(core1Id, "core1", "First core"))
    repo.addNewSolrIndex(SolrIndex(core2Id, "core2", "Second core"))
  }

  protected def createTestRule(): Seq[SearchInputId] = {
    val synonymRules = List (SynonymRule(SynonymRuleId(), 0, "mercury", isActive = true))
    val upDownRules = List(
      UpDownRule(UpDownRuleId(), UpDownRule.TYPE_UP, 10, "notebook", isActive = true),
      UpDownRule(UpDownRuleId(), UpDownRule.TYPE_UP, 10, "lenovo", isActive = false),
      UpDownRule(UpDownRuleId(), UpDownRule.TYPE_DOWN, 10, "battery", isActive = true)
    )
    val deleteRules = List(DeleteRule(DeleteRuleId(), "freddy", isActive = true))
    val filterRules = List(FilterRule(FilterRuleId(), "zz top", isActive = true))

    val id = repo.addNewSearchInput(core1Id, "aerosmith", Seq.empty)
    val searchInput = SearchInputWithRules(id, "aerosmith", synonymRules, upDownRules, filterRules, isActive = true, comment = "")
    repo.updateSearchInput(searchInput)

    val shippingId = repo.addNewSearchInput(core1Id, "shipping", Seq.empty)
    val redirectRule = RedirectRule(RedirectRuleId(), "http://xyz.com/shipping", isActive = true)
    val searchInputForRedirect = SearchInputWithRules(shippingId, "shipping", redirectRules = List(redirectRule), isActive = true, comment = "")
    repo.updateSearchInput(searchInputForRedirect)

    Seq(id, shippingId)
  }

  override protected def afterAll(): Unit = {
    application.stop()
    db.shutdown()
  }

} 
Example 47
Source File: TestUtils.scala    From odsc-east-realish-predictions   with Apache License 2.0 5 votes vote down vote up
package com.twilio.open.odsc.realish

import com.holdenkarau.spark.testing.{LocalSparkContext, SparkContextProvider}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.SparkSession
import org.scalatest.{BeforeAndAfterAll, Suite}

object TestUtils {

}

@SerialVersionUID(1L)
case class UserPersonality(uuid: String, name: String, tags: Array[String])
  extends Serializable

@SerialVersionUID(1L)
case class Author(uuid: String, name: String, age: Int) extends Serializable

@SerialVersionUID(1L)
case class LibraryBook(uuid: String, name: String, author: Author) extends Serializable

case class MockKafkaDataFrame(key: Array[Byte], value: Array[Byte])

trait SharedSparkSql extends BeforeAndAfterAll with SparkContextProvider {
  self: Suite =>

  @transient var _sparkSql: SparkSession = _
  @transient private var _sc: SparkContext = _

  override def sc: SparkContext = _sc

  def conf: SparkConf

  def sparkSql: SparkSession = _sparkSql

  override def beforeAll() {
    _sparkSql = SparkSession.builder().config(conf).getOrCreate()

    _sc = _sparkSql.sparkContext
    setup(_sc)
    super.beforeAll()
  }

  override def afterAll() {
    try {
      _sparkSql.close()
      _sparkSql = null
      LocalSparkContext.stop(_sc)
      _sc = null
    } finally {
      super.afterAll()
    }
  }

} 
Example 48
Source File: TestUtils.scala    From odsc-east-realish-predictions   with Apache License 2.0 5 votes vote down vote up
package com.twilio.open.odsc.realish

import com.holdenkarau.spark.testing.{LocalSparkContext, SparkContextProvider}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.SparkSession
import org.scalatest.{BeforeAndAfterAll, Suite}

object TestUtils {

}

@SerialVersionUID(1L)
case class UserPersonality(uuid: String, name: String, tags: Array[String])
  extends Serializable

@SerialVersionUID(1L)
case class Author(uuid: String, name: String, age: Int) extends Serializable

@SerialVersionUID(1L)
case class LibraryBook(uuid: String, name: String, author: Author) extends Serializable

case class MockKafkaDataFrame(key: Array[Byte], value: Array[Byte])

trait SharedSparkSql extends BeforeAndAfterAll with SparkContextProvider {
  self: Suite =>

  @transient var _sparkSql: SparkSession = _
  @transient private var _sc: SparkContext = _

  override def sc: SparkContext = _sc

  def conf: SparkConf

  def sparkSql: SparkSession = _sparkSql

  override def beforeAll() {
    _sparkSql = SparkSession.builder().config(conf).getOrCreate()

    _sc = _sparkSql.sparkContext
    setup(_sc)
    super.beforeAll()
  }

  override def afterAll() {
    try {
      _sparkSql.close()
      _sparkSql = null
      LocalSparkContext.stop(_sc)
      _sc = null
    } finally {
      super.afterAll()
    }
  }

} 
Example 49
Source File: TestZooKeeper.scala    From mango   with Apache License 2.0 5 votes vote down vote up
package com.kakao.mango.zk

import java.io.{File, IOException}
import java.net.{ServerSocket, Socket}
import java.util.concurrent.TimeUnit

import com.kakao.mango.concurrent.NamedExecutors
import com.kakao.mango.logging.{LogLevelOverrider, Logging}
import com.kakao.shaded.guava.io.Files
import org.apache.zookeeper.server.persistence.FileTxnSnapLog
import org.apache.zookeeper.server.{ServerCnxnFactory, ServerConfig, ZooKeeperServer}
import org.scalatest.{BeforeAndAfterAll, Suite}

trait TestZooKeeper extends BeforeAndAfterAll with Logging { this: Suite =>

  
  val zkServerPort = 2181
  val zkServerExecutor = NamedExecutors.single("zookeeper-server")
  var zk: ZooKeeperConnection = _

  override protected def beforeAll(): Unit = {
    logger.info("Launching a standalone ZooKeeper server for testing...")

    try {
      val socket = new ServerSocket(zkServerPort)
      socket.close()
    } catch {
      case e: IOException =>
        throw new RuntimeException(s"TCP port $zkServerPort is required for tests but not available")
    }

    zkServerExecutor.submit {
      LogLevelOverrider.error("org.apache.zookeeper")

      val datadir = Files.createTempDir().getAbsolutePath
      val config = new ServerConfig
      config.parse(Array(zkServerPort.toString, datadir))

      val zkServer = new ZooKeeperServer
      zkServer.setTxnLogFactory(new FileTxnSnapLog(new File(datadir), new File(datadir)))
      zkServer.setTickTime(6000)
      zkServer.setMinSessionTimeout(6000)
      zkServer.setMaxSessionTimeout(6000)

      val cnxnFactory = ServerCnxnFactory.createFactory

      try {
        cnxnFactory.configure(config.getClientPortAddress, 60)
        cnxnFactory.startup(zkServer)
        cnxnFactory.join()
      } catch {
        case _: InterruptedException =>
          logger.info("ZooKeeper server interrupted; shutting down...")
          cnxnFactory.shutdown()
          cnxnFactory.join()
          if (zkServer.isRunning) {
            zkServer.shutdown()
          }
          logger.info("ZooKeeper server stopped")
      }
    }

    var connected = false
    while (!connected) {
      logger.info("Waiting for ZooKeeper server to launch...")
      try {
        val socket = new Socket("localhost", zkServerPort)
        logger.info("ZooKeeper server is available")
        socket.close()

        zk = ZooKeeperConnection(s"localhost:$zkServerPort")
        connected = true
      } catch {
        case _: IOException => Thread.sleep(1000) // retry
      }
    }

    super.beforeAll()
  }

  override protected def afterAll(): Unit = {
    try super.afterAll()
    finally {
      zk.close()
      logger.info("Interrupting ZooKeeper server...")
      zkServerExecutor.shutdownNow()
      while (!zkServerExecutor.awaitTermination(1, TimeUnit.SECONDS)) {
        logger.info("awaiting ZooKeeper server termination...")
      }
      logger.info("ZooKeeper server terminated")
    }
  }
} 
Example 50
Source File: SharedSparkSession.scala    From XSQL   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.test

import scala.concurrent.duration._

import org.scalatest.{BeforeAndAfterEach, Suite}
import org.scalatest.concurrent.Eventually

import org.apache.spark.{DebugFilesystem, SparkConf}
import org.apache.spark.sql.{SparkSession, SQLContext}
import org.apache.spark.sql.catalyst.optimizer.ConvertToLocalRelation
import org.apache.spark.sql.internal.SQLConf


  protected override def afterAll(): Unit = {
    try {
      super.afterAll()
    } finally {
      try {
        if (_spark != null) {
          try {
            _spark.sessionState.catalog.reset()
          } finally {
            _spark.stop()
            _spark = null
          }
        }
      } finally {
        SparkSession.clearActiveSession()
        SparkSession.clearDefaultSession()
      }
    }
  }

  protected override def beforeEach(): Unit = {
    super.beforeEach()
    DebugFilesystem.clearOpenStreams()
  }

  protected override def afterEach(): Unit = {
    super.afterEach()
    // Clear all persistent datasets after each test
    spark.sharedState.cacheManager.clearCache()
    // files can be closed from other threads, so wait a bit
    // normally this doesn't take more than 1s
    eventually(timeout(10.seconds), interval(2.seconds)) {
      DebugFilesystem.assertNoOpenStreams()
    }
  }
} 
Example 51
Source File: MockitoSparkContext.scala    From HANAVora-Extensions   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark

import com.sap.spark.WithSparkContext
import org.mockito.Mockito._
import org.scalatest.Suite
import org.scalatest.mock.MockitoSugar


trait MockitoSparkContext extends WithSparkContext with MockitoSugar {
  self: Suite =>

  private var _sparkConf: SparkConf = _
  private var _sc: SparkContext = _

  override def sc: SparkContext = _sc

  protected def mockSparkConf: SparkConf = _sparkConf

  override protected def setUpSparkContext(): Unit = {
    _sparkConf = sparkConf
    _sc = mock[SparkContext](withSettings().stubOnly())
    when(_sc.conf).thenReturn(_sparkConf)
    when(_sc.getConf).thenReturn(_sparkConf)
    when(_sc.ui).thenReturn(None)
  }

  override protected def tearDownSparkContext(): Unit = {
    _sc.stop()
    _sc = null
  }

} 
Example 52
Source File: GlobalSapSQLContext.scala    From HANAVora-Extensions   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql

import java.io.File

import com.sap.spark.util.TestUtils
import com.sap.spark.{GlobalSparkContext, WithSQLContext}
import org.apache.spark.SparkContext
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.{BoundReference, Cast}
import org.apache.spark.unsafe.types._
import org.apache.spark.sql.types._
import org.scalatest.Suite

import scala.io.Source

trait GlobalSapSQLContext extends GlobalSparkContext with WithSQLContext {
  self: Suite =>

  override implicit def sqlContext: SQLContext = GlobalSapSQLContext._sqlc

  override protected def setUpSQLContext(): Unit =
    GlobalSapSQLContext.init(sc)

  override protected def tearDownSQLContext(): Unit =
    GlobalSapSQLContext.reset()

  def getDataFrameFromSourceFile(sparkSchema: StructType, path: File): DataFrame = {
    val conversions = sparkSchema.toSeq.zipWithIndex.map({
      case (field, index) =>
        Cast(BoundReference(index, StringType, nullable = true), field.dataType)
    })
    val data = Source.fromFile(path)
      .getLines()
      .map({ line =>
      val stringRow = InternalRow.fromSeq(line.split(",", -1).map(UTF8String.fromString))
      Row.fromSeq(conversions.map({ c => c.eval(stringRow) }))
    })
    val rdd = sc.parallelize(data.toSeq, numberOfSparkWorkers)
    sqlContext.createDataFrame(rdd, sparkSchema)
  }
}

object GlobalSapSQLContext {

  private var _sqlc: SQLContext = _

  private def init(sc: SparkContext): Unit =
    if (_sqlc == null) {
      _sqlc = TestUtils.newSQLContext(sc)
    }

  private def reset(): Unit = {
    if (_sqlc != null) {
      _sqlc.catalog.unregisterAllTables()
    }
  }

} 
Example 53
Source File: WithSparkContext.scala    From HANAVora-Extensions   with Apache License 2.0 5 votes vote down vote up
package com.sap.spark

import com.sap.spark.util.TestUtils._
import org.apache.spark.{SparkConf, SparkContext}
import org.scalatest.{BeforeAndAfterAll, Suite}

trait WithSparkContext extends BeforeAndAfterAll {
  self: Suite =>

  override def beforeAll(): Unit = {
    try {
      super.beforeAll()
      setUpSparkContext()
    } catch {
      case ex: Throwable =>
        tearDownSparkContext()
        throw ex
    }
  }

  override def afterAll(): Unit = {
    try {
      super.afterAll()
    } finally {
      tearDownSparkContext()
    }
  }

  
    conf.set("spark.sql.autoBroadcastJoinThreshold", "-1")
    conf.set("spark.broadcast.factory", "org.apache.spark.broadcast.HttpBroadcastFactory")
    conf.set("spark.shuffle.spill", "false")
    conf.set("spark.shuffle.compress", "false")
    conf.set("spark.ui.enabled", "false")
    conf.set("spark.ui.showConsoleProgress", "false")
  }

  def sc: SparkContext

  protected def setUpSparkContext(): Unit

  protected def tearDownSparkContext(): Unit

} 
Example 54
Source File: GlobalSparkContext.scala    From HANAVora-Extensions   with Apache License 2.0 5 votes vote down vote up
package com.sap.spark

import org.apache.spark.{SparkConf, SparkContext}
import org.scalatest.{BeforeAndAfterAll, Suite}


  }

}

object GlobalSparkContext {
  @transient private var _sc: SparkContext = _

  def init(sparkMaster: String, sparkConf: SparkConf): Unit = {
    if (_sc == null) {
      this.synchronized {
        if (_sc == null) {
          _sc = new SparkContext(sparkMaster, "test", sparkConf)
        }
      }
    }
  }

  def reset(): Unit = {
    if (_sc != null) {
      _sc.cancelAllJobs()
    }
  }

  def close(): Unit = {
    if (_sc != null) {
      _sc.stop()
      _sc = null
    }
  }

} 
Example 55
Source File: WithSQLContext.scala    From HANAVora-Extensions   with Apache License 2.0 5 votes vote down vote up
package com.sap.spark

import java.util.Locale

import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.hive.HiveContext
import org.scalatest.{BeforeAndAfterEach, Suite}

trait WithSQLContext extends BeforeAndAfterEach {
  self: Suite with WithSparkContext =>

  override def beforeEach(): Unit = {
    try {
      super.beforeEach()
      setUpSQLContext()
    } catch {
      case ex: Throwable =>
        tearDownSQLContext()
        throw ex
    }
  }

  override def afterEach(): Unit = {
    try {
      super.afterEach()
    } finally {
      tearDownSQLContext()
    }
  }

  implicit def sqlContext: SQLContext = _sqlContext
  def sqlc: SQLContext = sqlContext

  var _sqlContext: SQLContext = _

  protected def setUpSQLContext(): Unit =
    _sqlContext = SQLContext.getOrCreate(sc).newSession()


  protected def tearDownSQLContext(): Unit =
    _sqlContext = null

  protected def tableName(name: String): String =
    sqlc match {
      
      case _: HiveContext => name.toLowerCase(Locale.ENGLISH)
      case _ => name
    }

} 
Example 56
Source File: MultiNodeClusterSpec.scala    From JustinDB   with Apache License 2.0 5 votes vote down vote up
package justin.db

import akka.cluster.Cluster
import akka.remote.testkit.MultiNodeSpec
import com.typesafe.config.{Config, ConfigFactory}
import org.scalatest.Suite

object MultiNodeClusterSpec {

  val commonBaseConfig: Config = ConfigFactory.parseString(
    s"""
       |akka.loglevel = INFO
       |akka.log-config-on-start = false
       |akka.log-dead-letters = off
       |akka.log-dead-letters-during-shutdown = off
       |akka.remote.log-remote-lifecycle-events = off
    """.stripMargin
  )
}

trait MultiNodeClusterSpec extends Suite with STMultiNodeSpec { self: MultiNodeSpec ⇒

  
  def cluster: Cluster = Cluster(system)

  def initialParticipants: Int = roles.size
} 
Example 57
Source File: ScalaTestBridge.scala    From play-json-ops   with MIT License 5 votes vote down vote up
package scala.testing.scalatest

import org.scalatest.{Suite, TestRegistration}

import scala.testing.TestSuiteBridge


trait ScalaTestBridge extends TestSuiteBridge with Suite with TestRegistration {

  override protected def assertEqual[T](left: T, right: T): Unit = assert(left == right)

  override def fail(): Nothing                                  = super[TestSuiteBridge].fail()
  override def fail(message: String): Nothing                   = super[TestSuiteBridge].fail(message)
  override def fail(message: String, cause: Throwable): Nothing = super[TestSuiteBridge].fail(message, cause)
  override def fail(cause: Throwable): Nothing                  = super[TestSuiteBridge].fail(cause)

  override protected def doFail(optReason: Option[String], optCause: Option[Throwable]): Nothing = {
    (optReason, optCause) match {
      case (Some(reason), Some(cause)) => super[Suite].fail(reason, cause)
      case (Some(reason), None)        => super[Suite].fail(reason)
      case (None, Some(cause))         => super[Suite].fail(cause)
      case (None, None)                => super[Suite].fail()
    }
  }

  override protected[testing] def registerTests(tests: Map[String, () => Unit]): Unit = {
    for ((name, test) <- tests) {
      registerTest(name)(test())
    }
  }

  // register the tests after the Suite has been initialized
  registerTests()
} 
Example 58
Source File: ScalaTestBridge.scala    From play-json-ops   with MIT License 5 votes vote down vote up
package scala.testing.scalatest

import org.scalatest.{Suite, TestRegistration}

import scala.testing.TestSuiteBridge


trait ScalaTestBridge extends TestSuiteBridge with Suite with TestRegistration {

  override protected def assertEqual[T](left: T, right: T): Unit = assert(left == right)

  override def fail(): Nothing                                  = super[TestSuiteBridge].fail()
  override def fail(message: String): Nothing                   = super[TestSuiteBridge].fail(message)
  override def fail(message: String, cause: Throwable): Nothing = super[TestSuiteBridge].fail(message, cause)
  override def fail(cause: Throwable): Nothing                  = super[TestSuiteBridge].fail(cause)

  override protected def doFail(optReason: Option[String], optCause: Option[Throwable]): Nothing = {
    (optReason, optCause) match {
      case (Some(reason), Some(cause)) => super[Suite].fail(reason, cause)
      case (Some(reason), None)        => super[Suite].fail(reason)
      case (None, Some(cause))         => super[Suite].fail(cause)
      case (None, None)                => super[Suite].fail()
    }
  }

  override protected[testing] def registerTests(tests: Map[String, () => Unit]): Unit = {
    for ((name, test) <- tests) {
      registerTest(name)(test())
    }
  }

  // register the tests after the Suite has been initialized
  registerTests()
} 
Example 59
Source File: ScalaTestBridge.scala    From play-json-ops   with MIT License 5 votes vote down vote up
package scala.testing.scalatest

import org.scalatest.{Suite, Tag}

import scala.testing.TestSuiteBridge


trait ScalaTestBridge extends TestSuiteBridge with Suite {

  override protected def assertEqual[T](left: T, right: T): Unit = assert(left == right)

  override def fail(): Nothing                                  = super[TestSuiteBridge].fail()
  override def fail(message: String): Nothing                   = super[TestSuiteBridge].fail(message)
  override def fail(message: String, cause: Throwable): Nothing = super[TestSuiteBridge].fail(message, cause)
  override def fail(cause: Throwable): Nothing                  = super[TestSuiteBridge].fail(cause)

  override protected def doFail(optReason: Option[String], optCause: Option[Throwable]): Nothing = {
    (optReason, optCause) match {
      case (Some(reason), Some(cause)) => super[Suite].fail(reason, cause)
      case (Some(reason), None)        => super[Suite].fail(reason)
      case (None, Some(cause))         => super[Suite].fail(cause)
      case (None, None)                => super[Suite].fail()
    }
  }

  def registerTest(testText: String, testTags: Tag*)(testFun: => Unit): Unit

  override protected[testing] def registerTests(tests: Map[String, () => Unit]): Unit = {
    for ((name, test) <- tests) {
      registerTest(name)(test())
    }
  }

  // register the tests after the Suite has been initialized
  registerTests()
} 
Example 60
Source File: DockerContainers.scala    From reactive-consul   with MIT License 5 votes vote down vote up
package stormlantern.dockertestkit

import com.spotify.docker.client.messages.{ ContainerConfig, HostConfig }
import org.scalatest.{ BeforeAndAfterAll, Suite }
import stormlantern.dockertestkit.client.Container

trait DockerContainers extends BeforeAndAfterAll { this: Suite =>

  def containerConfigs: Set[ContainerConfig]
  val hostConfig = HostConfig.builder()
    .publishAllPorts(true)
    .networkMode("bridge")
    .build()
  val containers = containerConfigs.map(new Container(_))

  def withDockerHosts[T](ports: Set[String])(f: Map[String, (String, Int)] => T): T = {
    // Find the mapped available ports in the network settings
    f(ports.zip(ports.flatMap(p => containers.map(c => c.mappedPort(p).headOption))).map {
      case (port, Some(binding)) => port -> (DockerClientProvider.hostname, binding.hostPort().toInt)
      case (port, None) => throw new IndexOutOfBoundsException(s"Cannot find mapped port $port")
    }.toMap)
  }

  override def beforeAll(): Unit = containers.foreach(_.start())

  override def afterAll(): Unit = containers.foreach { container =>
    container.stop()
    container.remove()
  }
} 
Example 61
Source File: ConsulRegistratorDockerContainer.scala    From reactive-consul   with MIT License 5 votes vote down vote up
package stormlantern.consul.client.util

import com.spotify.docker.client.messages.ContainerConfig
import org.scalatest.Suite
import stormlantern.dockertestkit.{ DockerClientProvider, DockerContainers }

import scala.collection.JavaConversions._

trait ConsulRegistratorDockerContainer extends DockerContainers { this: Suite ⇒

  def consulContainerConfig = {
    val image: String = "progrium/consul"
    val command: Seq[String] = Seq("-server", "-bootstrap", "-advertise", DockerClientProvider.hostname)
    ContainerConfig.builder().image(image).cmd(command).build()
  }

  def registratorContainerConfig = {
    val hostname = DockerClientProvider.hostname
    val image: String = "progrium/registrator"
    val command: String = s"consul://$hostname:8500"
    val volume: String = "/var/run/docker.sock:/tmp/docker.sock"
    ContainerConfig.builder().image(image).cmd(command).hostname(hostname).volumes(volume).build()
  }

  override def containerConfigs = Set(consulContainerConfig, registratorContainerConfig)

  def withConsulHost[T](f: (String, Int) ⇒ T): T = super.withDockerHosts(Set("8500/tcp")) { pb ⇒
    val (h, p) = pb("8500/tcp")
    f(h, p)
  }
} 
Example 62
Source File: TempDirectory.scala    From spark-tda   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.ml.feature

import java.io.File
import org.scalatest.{BeforeAndAfterAll, Suite}
import com.holdenkarau.spark.testing.Utils


  protected def tempDir: File = _tempDir

  override def beforeAll(): Unit = {
    super.beforeAll()
    _tempDir = Utils.createTempDir()
  }

  override def afterAll(): Unit = {
    try {
      Utils.deleteRecursively(_tempDir)
    } finally {
      super.afterAll()
    }
  }
} 
Example 63
Source File: MockVatReturnsOrchestrator.scala    From vat-api   with Apache License 2.0 5 votes vote down vote up
package uk.gov.hmrc.vatapi.mocks.orchestrators

import org.scalatest.Suite
import uk.gov.hmrc.domain.Vrn
import uk.gov.hmrc.vatapi.mocks.Mock
import uk.gov.hmrc.vatapi.models.VatReturnDeclaration
import uk.gov.hmrc.vatapi.orchestrators.VatReturnsOrchestrator

trait MockVatReturnsOrchestrator extends Mock { _: Suite =>

  val mockVatReturnsOrchestrator = mock[VatReturnsOrchestrator]

  object MockVatReturnsOrchestrator {
    def submitVatReturn(vrn: Vrn, vatReturn: VatReturnDeclaration) = {
      when(mockVatReturnsOrchestrator.submitVatReturn(eqTo(vrn), eqTo(vatReturn), any())(any(), any()))
    }
  }

  override protected def beforeEach(): Unit = {
    super.beforeEach()
    reset(mockVatReturnsOrchestrator)
  }
} 
Example 64
Source File: MockHttp.scala    From vat-api   with Apache License 2.0 5 votes vote down vote up
package uk.gov.hmrc.vatapi.mocks

import org.mockito.ArgumentCaptor
import org.mockito.stubbing.OngoingStubbing
import org.scalatest.Suite
import play.api.libs.json.Writes
import uk.gov.hmrc.http.{HeaderCarrier, HttpReads, HttpResponse}
import uk.gov.hmrc.play.bootstrap.http.DefaultHttpClient

import scala.concurrent.{ExecutionContext, Future}


trait MockHttp extends Mock { _: Suite =>

  val mockHttp: DefaultHttpClient = mock[DefaultHttpClient]

  override protected def beforeEach(): Unit = {
    super.beforeEach()
    reset(mockHttp)
  }

  private val headerCarrierCaptor = ArgumentCaptor.forClass(classOf[HeaderCarrier])

  object MockHttp {

    def GET[T](url: String): OngoingStubbing[Future[T]] = {
      when(mockHttp.GET[T](eqTo(url))(any(), headerCarrierCaptor.capture(), any()))
    }

    def fetchHeaderCarrier: HeaderCarrier = headerCarrierCaptor.getValue
  }

  def setupMockHttpGet(url: String)(response: HttpResponse): OngoingStubbing[Future[HttpResponse]] =
    when(mockHttp.GET[HttpResponse](eqTo(url))
      (any(), any(), any())).thenReturn(Future.successful(response))


  def setupMockFailedHttpGet(url: String)(response: HttpResponse): OngoingStubbing[Future[HttpResponse]] =
    when(mockHttp.GET[HttpResponse](eqTo(url))
      (any(), any(), any())).thenReturn(Future.failed(new Exception))

  def setupMockHttpPost[T, R](url: String, elem: T)(response: R): OngoingStubbing[Future[R]] ={
    when(
      mockHttp.POST[T, R](eqTo(url), eqTo[T](elem), any[Seq[(String, String)]]())
      (any[Writes[T]](), any[HttpReads[R]](), any[HeaderCarrier](), any[ExecutionContext]())
    ).thenReturn(
      Future.successful(response))
  }

  def setupMockHttpPostString[R](url: String, elem: String)(response: R): OngoingStubbing[Future[R]] ={
    when(
      mockHttp.POSTString[R](eqTo(url), eqTo[String](elem), any[Seq[(String, String)]]())
        (any[HttpReads[R]](), headerCarrierCaptor.capture(), any[ExecutionContext]())
    ).thenReturn(
      Future.successful(response))
  }
} 
Example 65
Source File: MockAuthorisationService.scala    From vat-api   with Apache License 2.0 5 votes vote down vote up
package uk.gov.hmrc.vatapi.mocks.auth

import org.scalatest.Suite
import uk.gov.hmrc.domain.Vrn
import uk.gov.hmrc.vatapi.mocks.Mock
import uk.gov.hmrc.vatapi.services.AuthorisationService

trait MockAuthorisationService extends Mock {
  _: Suite =>

  val mockAuthorisationService: AuthorisationService = mock[AuthorisationService]

  object MockAuthorisationService {
    def authCheck(vrn: Vrn) =
      when(mockAuthorisationService.authCheck(eqTo(vrn))(any(),any()))

    def authCheckWithNrsRequirement(vrn: Vrn) =
      when(mockAuthorisationService.authCheckWithNrsRequirement(eqTo(vrn))(any(),any()))
  }

  override protected def beforeEach(): Unit = {
    super.beforeEach()
    reset(mockAuthorisationService)
  }
} 
Example 66
Source File: MockAuditService.scala    From vat-api   with Apache License 2.0 5 votes vote down vote up
package uk.gov.hmrc.vatapi.mocks.services

import org.mockito.stubbing.OngoingStubbing
import org.scalatest.Suite
import uk.gov.hmrc.vatapi.mocks.Mock
import uk.gov.hmrc.vatapi.resources.BusinessResult
import uk.gov.hmrc.vatapi.services.AuditService

trait MockAuditService extends Mock { _: Suite =>

  val mockAuditService: AuditService = mock[AuditService]

  object MockAuditService {
    def audit(): OngoingStubbing[BusinessResult[Unit]] = {
      when(mockAuditService.audit(any())(any(), any(), any(), any()))
    }
  }

  override def beforeEach(): Unit = {
    super.beforeEach()
    reset(mockAuditService)
  }
} 
Example 67
Source File: Mock.scala    From vat-api   with Apache License 2.0 5 votes vote down vote up
package uk.gov.hmrc.vatapi.mocks

import org.mockito.{ArgumentMatchers => Matchers}
import org.mockito.Mockito
import org.mockito.stubbing.OngoingStubbing
import org.mockito.verification.VerificationMode
import org.scalatest.{BeforeAndAfterEach, Suite}
import org.scalatestplus.mockito.MockitoSugar

trait Mock extends MockitoSugar with BeforeAndAfterEach { _: Suite =>

  def any[T]() = Matchers.any[T]()
  def eqTo[T](t: T) = Matchers.eq[T](t)
  def when[T](t: T) = Mockito.when(t)
  def reset[T](t: T) = Mockito.reset(t)

  def verify[T](t: T): T = Mockito.verify(t)
  def verify[T](t: T, mode: VerificationMode): T = Mockito.verify(t, mode)
  def times(n: Int): VerificationMode = Mockito.times(n)
  def never: VerificationMode = Mockito.never()
  def once: VerificationMode = Mockito.times(1)

  implicit class stubbingOps[T](stubbing: OngoingStubbing[T]){
    def returns(t: T) = stubbing.thenReturn(t)
  }
} 
Example 68
Source File: MockAppContext.scala    From vat-api   with Apache License 2.0 5 votes vote down vote up
package uk.gov.hmrc.vatapi.mocks.config

import org.mockito.stubbing.OngoingStubbing
import org.scalatest.Suite
import uk.gov.hmrc.vatapi.config.AppContext
import uk.gov.hmrc.vatapi.mocks.Mock

trait MockAppContext extends Mock { _: Suite =>

  val mockAppContext: AppContext = mock[AppContext]

  object MockAppContext {
    def desUrl: OngoingStubbing[String] = when(mockAppContext.desUrl)
    def desToken: OngoingStubbing[String] = when(mockAppContext.desToken)
    def desEnv: OngoingStubbing[String] = when(mockAppContext.desEnv)
    def nrsMaxTimeoutMilliseconds: OngoingStubbing[Int] = when(mockAppContext.nrsMaxTimeoutMillis)
    def nrsServiceUrl : OngoingStubbing[String] = when(mockAppContext.nrsServiceUrl)
  }

  override protected def beforeEach(): Unit = {
    super.beforeEach()
    reset(mockAppContext)
  }
} 
Example 69
Source File: MockVatReturnsConnector.scala    From vat-api   with Apache License 2.0 5 votes vote down vote up
package uk.gov.hmrc.vatapi.mocks.connectors

import org.scalatest.Suite
import uk.gov.hmrc.domain.Vrn
import uk.gov.hmrc.http.HeaderCarrier
import uk.gov.hmrc.vatapi.connectors.VatReturnsConnector
import uk.gov.hmrc.vatapi.mocks.Mock
import uk.gov.hmrc.vatapi.models.des.VatReturnDeclaration
import uk.gov.hmrc.vatapi.resources.wrappers.VatReturnResponse

import scala.concurrent.{ExecutionContext, Future}

trait MockVatReturnsConnector extends Mock {
  _: Suite =>

val mockVatReturnsConnector: VatReturnsConnector = mock[VatReturnsConnector]

override protected def beforeEach(): Unit = {
  super.beforeEach()
  reset(mockVatReturnsConnector)
}

  def setupVatReturnSubmission(vrn: Vrn, submission: VatReturnDeclaration)(response: VatReturnResponse): Unit =
    when(mockVatReturnsConnector
      .post(
        eqTo(vrn),
        any[VatReturnDeclaration]())(any[HeaderCarrier](), any[ExecutionContext]()))
      .thenReturn(Future.successful(response))

  def retrieveVatReturn(vrn: Vrn, periodKey: String)(response: VatReturnResponse) =
    when(mockVatReturnsConnector
      .query(
        eqTo(vrn),
        any[String]())(any[HeaderCarrier](), any[ExecutionContext]()))
      .thenReturn(Future.successful(response))

  def retrieveVatReturnFailed(vrn: Vrn, periodKey: String) =
    when(mockVatReturnsConnector
      .query(
        eqTo(vrn),
        any[String]())(any[HeaderCarrier](), any[ExecutionContext]()))
      .thenReturn(Future.failed(new Exception("DES FAILED")))

} 
Example 70
Source File: MockFinancialDataConnector.scala    From vat-api   with Apache License 2.0 5 votes vote down vote up
package uk.gov.hmrc.vatapi.mocks.connectors

import org.mockito.stubbing.OngoingStubbing
import org.scalatest.Suite
import uk.gov.hmrc.domain.Vrn
import uk.gov.hmrc.vatapi.connectors.FinancialDataConnector
import uk.gov.hmrc.vatapi.mocks.Mock
import uk.gov.hmrc.vatapi.models.FinancialDataQueryParams
import uk.gov.hmrc.vatapi.resources.wrappers.FinancialDataResponse

import scala.concurrent.Future

trait MockFinancialDataConnector extends Mock { _: Suite =>

  val mockFinancialDataConnector = mock[FinancialDataConnector]

  object MockFinancialDataConnector {
    def get(vrn: Vrn, queryParams: FinancialDataQueryParams): OngoingStubbing[Future[FinancialDataResponse]] = {
      when(mockFinancialDataConnector.getFinancialData(eqTo(vrn), eqTo(queryParams))(any(), any()))
    }
  }

  override protected def beforeEach(): Unit = {
    super.beforeEach()
    reset(mockFinancialDataConnector)
  }
} 
Example 71
Source File: MockObligationsConnector.scala    From vat-api   with Apache License 2.0 5 votes vote down vote up
package uk.gov.hmrc.vatapi.mocks.connectors

import org.mockito.stubbing.OngoingStubbing
import org.scalatest.Suite
import uk.gov.hmrc.domain.Vrn
import uk.gov.hmrc.vatapi.connectors.ObligationsConnector
import uk.gov.hmrc.vatapi.mocks.Mock
import uk.gov.hmrc.vatapi.models.ObligationsQueryParams
import uk.gov.hmrc.vatapi.resources.wrappers.ObligationsResponse

import scala.concurrent.Future

trait MockObligationsConnector extends Mock { _: Suite =>

  val mockObligationsConnector: ObligationsConnector = mock[ObligationsConnector]

  object MockObligationsConnector {
    def get(vrn: Vrn, queryParams: ObligationsQueryParams): OngoingStubbing[Future[ObligationsResponse]] = {
      when(mockObligationsConnector.get(eqTo(vrn), eqTo(queryParams))(any()))
    }
  }

  override protected def beforeEach(): Unit = {
    super.beforeEach()
    reset(mockObligationsConnector)
  }
} 
Example 72
Source File: MockAuditService.scala    From vat-api   with Apache License 2.0 5 votes vote down vote up
package uk.gov.hmrc.vatapi.mocks

import org.mockito.stubbing.OngoingStubbing
import org.scalatest.Suite
import uk.gov.hmrc.vatapi.models.audit.AuditEvent
import uk.gov.hmrc.vatapi.resources.BusinessResult
import uk.gov.hmrc.vatapi.services.AuditService

trait MockAuditService extends Mock { _: Suite =>

  val mockAuditService: AuditService = mock[AuditService]

  object MockAuditService {
    def audit[T](): OngoingStubbing[BusinessResult[Unit]] = {
      when(mockAuditService.audit[T](any())(any(), any(), any(), any()))
    }
    def audit[T](event: AuditEvent[T]): OngoingStubbing[BusinessResult[Unit]] = {
      when(mockAuditService.audit[T](eqTo(event))(any(), any(), any(), any()))
    }

    def verifyAudit[T](event: AuditEvent[T]) = {
      verify(mockAuditService).audit(eqTo(event))(any(), any(), any(), any())
    }
  }

  override protected def beforeEach(): Unit = {
    super.beforeEach()
    reset(mockAuditService)
  }
} 
Example 73
Source File: UsesHttpServer.scala    From udash-core   with Apache License 2.0 5 votes vote down vote up
package io.udash
package rest

import org.eclipse.jetty.server.Server
import org.scalatest.{BeforeAndAfterAll, Suite}

trait UsesHttpServer extends BeforeAndAfterAll { this: Suite =>
  def port: Int
  val server: Server = new Server(port)
  def baseUrl = s"http://localhost:$port"

  protected def setupServer(server: Server): Unit

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    setupServer(server)
    server.start()
  }

  override protected def afterAll(): Unit = {
    server.stop()
    super.afterAll()
  }
} 
Example 74
Source File: TemporaryFolder.scala    From random-projections-at-berlinbuzzwords   with Apache License 2.0 5 votes vote down vote up
package com.stefansavev

import org.junit.rules.TemporaryFolder
import org.junit.runner.Description
import org.junit.runners.model.Statement
import org.scalatest.{SuiteMixin, Outcome, Suite}

//Reference: http://stackoverflow.com/questions/32160549/using-junit-rule-with-scalatest-e-g-temporaryfolder
trait TemporaryFolderFixture extends SuiteMixin {
  this: Suite =>
  val temporaryFolder = new TemporaryFolder

  abstract override def withFixture(test: NoArgTest) = {
    var outcome: Outcome = null
    val statementBody = () => outcome = super.withFixture(test)
    temporaryFolder(
      new Statement() {
        override def evaluate(): Unit = statementBody()
      },
      Description.createSuiteDescription("JUnit rule wrapper")
    ).evaluate()
    outcome
  }
} 
Example 75
Source File: DockerTestKit.scala    From docker-it-scala   with MIT License 5 votes vote down vote up
package com.whisk.docker.scalatest

import com.whisk.docker.DockerKit
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time._
import org.scalatest.{BeforeAndAfterAll, Suite}
import org.slf4j.LoggerFactory

trait DockerTestKit extends BeforeAndAfterAll with ScalaFutures with DockerKit { self: Suite =>

  private lazy val log = LoggerFactory.getLogger(this.getClass)

  def dockerInitPatienceInterval =
    PatienceConfig(scaled(Span(20, Seconds)), scaled(Span(10, Millis)))

  def dockerPullImagesPatienceInterval =
    PatienceConfig(scaled(Span(1200, Seconds)), scaled(Span(250, Millis)))

  override def beforeAll(): Unit = {
    super.beforeAll()
    startAllOrFail()
  }

  override def afterAll(): Unit = {
    stopAllQuietly()
    super.afterAll()

  }
} 
Example 76
Source File: KubeClientSupport.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package org.apache.openwhisk.core.containerpool.kubernetes.test

import common.StreamLogging
import io.fabric8.kubernetes.client.server.mock.KubernetesMockServer
import io.fabric8.kubernetes.client.utils.HttpClientUtils.createHttpClientForMockServer
import io.fabric8.kubernetes.client.{ConfigBuilder, DefaultKubernetesClient}
import okhttp3.TlsVersion.TLS_1_0
import org.scalatest.{BeforeAndAfterAll, Suite, TestSuite}

import scala.concurrent.duration._

trait KubeClientSupport extends TestSuite with BeforeAndAfterAll with StreamLogging {
  self: Suite =>

  protected def useMockServer = true

  val server = new KubernetesMockServer(false)

  protected lazy val (kubeClient, closeable) = {
    if (useMockServer) {
      server.init()
      def defaultClient = {
        val config = new ConfigBuilder()
          .withMasterUrl(server.url("/"))
          .withTrustCerts(true)
          .withTlsVersions(TLS_1_0)
          .withNamespace("test")
          .build
        new DefaultKubernetesClient(createHttpClientForMockServer(config), config)
      }
      (defaultClient, () => server.destroy())
    } else {
      val client = new DefaultKubernetesClient(
        new ConfigBuilder()
          .withConnectionTimeout(1.minute.toMillis.toInt)
          .withRequestTimeout(1.minute.toMillis.toInt)
          .build())
      (client, () => client.close())
    }
  }

  override def beforeAll(): Unit = {
    if (!useMockServer) {
      val kubeconfig = sys.env.get("KUBECONFIG")
      assume(kubeconfig.isDefined, "KUBECONFIG env must be defined")
      println(s"Using kubeconfig from ${kubeconfig.get}")
    }
    super.beforeAll()
  }

  override def afterAll(): Unit = {
    super.afterAll()
    closeable.apply()
  }
} 
Example 77
Source File: WskActorSystem.scala    From openwhisk   with Apache License 2.0 5 votes vote down vote up
package common

import scala.concurrent.Await
import scala.concurrent.ExecutionContext
import scala.concurrent.duration.DurationInt

import akka.actor.ActorSystem
import akka.http.scaladsl.Http

import org.scalatest.BeforeAndAfterAll
import org.scalatest.Suite


trait WskActorSystem extends BeforeAndAfterAll {
  self: Suite =>

  implicit val actorSystem: ActorSystem = ActorSystem()

  implicit def executionContext: ExecutionContext = actorSystem.dispatcher

  override def afterAll() = {
    try {
      Await.result(Http().shutdownAllConnectionPools(), 30.seconds)
    } finally {
      actorSystem.terminate()
      Await.result(actorSystem.whenTerminated, 30.seconds)
    }
    super.afterAll()
  }
} 
Example 78
Source File: BaseSparkSpec.scala    From gemini   with GNU General Public License v3.0 5 votes vote down vote up
package tech.sourced.gemini

import org.apache.spark.SparkConf
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.sql.functions.col
import org.scalatest.{BeforeAndAfterAll, Suite}
import org.slf4j.{Logger => Slf4jLogger}

trait BaseSparkSpec extends BeforeAndAfterAll {
  this: Suite =>

  @transient var sparkSession: SparkSession = _
  private var _conf: SparkConf = _

  def useSparkConf(conf: SparkConf): SparkConf = {
    _conf = conf
    _conf
  }

  def useDefaultSparkConf(): SparkConf = {
    val defaultConf: SparkConf = new SparkConf(true)
      .setAppName(this.getClass.getSimpleName)
      .set("spark.cassandra.connection.host", Gemini.defaultCassandraHost)
      .set("spark.cassandra.connection.port", Gemini.defaultCassandraPort.toString)
      .set("spark.cassandra.connection.keep_alive_ms", "5000")
      .set("spark.cassandra.connection.timeout_ms", "30000")
      .set("spark.tech.sourced.bblfsh.grpc.host", "127.0.0.1")
      .set("spark.ui.showConsoleProgress", "false")
      .set("spark.ui.enabled", "false")
      .set("spark.cleaner.ttl", "3600")

    useSparkConf(defaultConf)
  }

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    if (_conf == null) {
        useDefaultSparkConf()
    }
    sparkSession = SparkSession.builder()
      .master("local[*]")
      .config(_conf)
      .config("spark.driver.host", "localhost")
      .getOrCreate()
  }

  override protected def afterAll(): Unit = {
    // commented due to "Cannot call methods on a stopped SparkContext"
    // but for tests we don't really need to stop spark
    // it will be stopped automatically when tests exit
    // resetSparkContext()

    // make sure different suites don't use the same cache
    cleanSparkCache()
    super.afterAll()
  }

  def resetSparkContext(): Unit = {
    if (sparkSession != null) {
      sparkSession.stop()
    }
    sparkSession = null
  }

  def cleanSparkCache(): Unit = {
    if (sparkSession != null) {
      sparkSession.sqlContext.clearCache()
    }
  }

  // don't process all content of repos to speedup tests
  class LimitedHash(s: SparkSession,
                    log: Slf4jLogger,
                    mode: String,
                    filePaths: Seq[String]) extends Hash(s, log, mode) {
    override def filesForRepos(repos: DataFrame): DataFrame =
      super.filesForRepos(repos).filter(col("path").isin(filePaths: _*))
  }
  object LimitedHash {
    def apply(s: SparkSession, log: Slf4jLogger, mode: String, paths: Seq[String]): LimitedHash =
      new LimitedHash(s, log, mode, paths)
  }
} 
Example 79
Source File: BaseDBSpec.scala    From gemini   with GNU General Public License v3.0 5 votes vote down vote up
package tech.sourced.gemini

import com.datastax.driver.core.{Cluster, Session}
import org.scalatest.{BeforeAndAfterAll, Suite}
import tech.sourced.gemini.util.Logger
import scala.collection.JavaConverters._

case class HashtableItem(hashtable: Int, v: String, sha1: String)

trait BaseDBSpec extends BeforeAndAfterAll {
  this: Suite =>

  private val _logger = Logger("gemini")
  var keyspace : String = _
  var cassandra: Session = _

  override def beforeAll(): Unit = {
    super.beforeAll()

    val cluster = Cluster.builder()
      .addContactPoint(Gemini.defaultCassandraHost)
      .withPort(Gemini.defaultCassandraPort)
      .build()

    cassandra = cluster.connect()

    val gemini = Gemini(null, _logger, keyspace)
    gemini.dropSchema(cassandra)
    gemini.applySchema(cassandra)
  }

  def insertMeta(items: Iterable[RepoFile]): Unit = {
    val cols = Gemini.tables.metaCols
    items.foreach { case RepoFile(repo, commit, path, sha) =>
      val cql = s"""INSERT INTO $keyspace.${Gemini.tables.meta}
        (${cols.repo}, ${cols.commit}, ${cols.path}, ${cols.sha})
        VALUES ('$repo', '$commit', '$path', '$sha')"""
      cassandra.execute(cql)
    }
  }

  def insertHashtables(items: Iterable[HashtableItem], mode: String): Unit = {
    val cols = Gemini.tables.hashtablesCols
    items.foreach { case HashtableItem(ht, v, sha1) =>
      val cql = s"""INSERT INTO $keyspace.${Gemini.tables.hashtables(mode)}
        (${cols.hashtable}, ${cols.value}, ${cols.sha})
        VALUES ($ht, $v, '$sha1')"""
      cassandra.execute(cql)
    }
  }

  def insertDocFreq(docFreq: OrderedDocFreq, mode: String): Unit = {
    val docsCols = Gemini.tables.featuresDocsCols
    cassandra.execute(
      s"INSERT INTO $keyspace.${Gemini.tables.featuresDocs} (${docsCols.id}, ${docsCols.docs}) VALUES (?, ?)",
      mode, int2Integer(docFreq.docs)
    )

    val freqCols = Gemini.tables.featuresFreqCols
    docFreq.df.foreach { case(feature, weight) =>
      cassandra.execute(
        s"INSERT INTO $keyspace.${Gemini.tables.featuresFreq}" +
          s"(${freqCols.id}, ${freqCols.feature}, ${freqCols.weight}) VALUES (?, ?, ?)",
        mode, feature, int2Integer(weight)
      )
    }
  }

  override def afterAll(): Unit = {
    Gemini(null, _logger, keyspace).dropSchema(cassandra)
    cassandra.close()
    super.afterAll()
  }
} 
Example 80
Source File: TestFolder.scala    From schedoscope   with Apache License 2.0 5 votes vote down vote up
package org.schedoscope.scheduler.driver

import java.io.File

import org.apache.commons.io.FileUtils
import org.scalatest.Suite

trait TestFolder extends Suite {
  self: Suite =>
  var testFolder: File = _
  var inputFolder: File = _
  var outputFolder: File = _

  def in = inputFolder.getAbsolutePath()

  def out = outputFolder.getAbsolutePath()

  private def deleteFile(file: File) {
    if (!file.exists) return
    if (file.isFile) {
      file.delete()
    } else {
      file.listFiles().foreach(deleteFile)
      file.delete()
    }
  }

  def /() = File.separator

  def createInputFile(path: String) {
    FileUtils.touch(new File(s"${inputFolder}${File.separator}${path}"))
  }

  def outputFile(path: String) = new File(outputPath(path))

  def inputFile(path: String) = new File(inputPath(path))

  def inputPath(path: String) = s"${in}${File.separator}${path}"

  def outputPath(path: String) = s"${out}${File.separator}${path}"

  abstract override def withFixture(test: NoArgTest) = {
    val tempFolder = System.getProperty("java.io.tmpdir")
    var folder: File = null

    do {
      folder = new File(tempFolder, "scalatest-" + System.nanoTime)
    } while (!folder.mkdir())

    testFolder = folder

    inputFolder = new File(testFolder, "in");
    inputFolder.mkdir()
    outputFolder = new File(testFolder, "out")
    outputFolder.mkdir()

    try {
      super.withFixture(test)
    } finally {
      deleteFile(testFolder)
    }
  }
} 
Example 81
Source File: TestSparkContext.scala    From magellan   with Apache License 2.0 5 votes vote down vote up
package magellan

import org.apache.spark.sql.{SQLContext, SparkSession}
import org.apache.spark.{SparkConf, SparkContext}
import org.scalatest.{BeforeAndAfterAll, Suite}

trait TestSparkContext extends BeforeAndAfterAll { self: Suite =>
  @transient var sc: SparkContext = _
  @transient var spark: SparkSession = _
  @transient var sqlContext: SQLContext = _

  override def beforeAll() {
    super.beforeAll()
    val conf = new SparkConf()
      .setMaster("local[2]")
      .setAppName("MagellanUnitTest")
      .set("spark.sql.crossJoin.enabled", "true")

    spark = SparkSession.builder()
      .config(conf)
      .config("spark.sql.crossJoin.enabled", "true")
      .getOrCreate()
    sqlContext = spark.sqlContext
    sc = spark.sparkContext
  }

  override def afterAll() {

    if (spark != null) {
      spark.stop()
    }
    spark = null
    sqlContext = null
    sc = null
    super.afterAll()
  }
} 
Example 82
Source File: GraphFrameTestSparkContext.scala    From graphframes   with Apache License 2.0 5 votes vote down vote up
package org.graphframes

import java.io.File
import java.nio.file.Files

import org.apache.commons.io.FileUtils
import org.scalatest.{BeforeAndAfterAll, Suite}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{SparkSession, SQLContext, SQLImplicits}

trait GraphFrameTestSparkContext extends BeforeAndAfterAll { self: Suite =>
  @transient var spark: SparkSession = _
  @transient var sc: SparkContext = _
  @transient var sqlContext: SQLContext = _
  @transient var sparkMajorVersion: Int = _
  @transient var sparkMinorVersion: Int = _

  
  def isLaterVersion(minVersion: String): Boolean = {
    val (minMajorVersion, minMinorVersion) = TestUtils.majorMinorVersion(minVersion)
    if (sparkMajorVersion != minMajorVersion) {
      return sparkMajorVersion > minMajorVersion
    } else {
      return sparkMinorVersion >= minMinorVersion
    }
  }

  override def beforeAll() {
    super.beforeAll()

    spark = SparkSession.builder()
      .master("local[2]")
      .appName("GraphFramesUnitTest")
      .config("spark.sql.shuffle.partitions", 4)
      .getOrCreate()

    val checkpointDir = Files.createTempDirectory(this.getClass.getName).toString
    spark.sparkContext.setCheckpointDir(checkpointDir)
    sc = spark.sparkContext
    sqlContext = spark.sqlContext

    val (verMajor, verMinor) = TestUtils.majorMinorVersion(sc.version)
    sparkMajorVersion = verMajor
    sparkMinorVersion = verMinor
  }

  override def afterAll() {
    val checkpointDir = sc.getCheckpointDir
    if (spark != null) {
      spark.stop()
    }
    spark = null
    sqlContext = null
    sc = null

    checkpointDir.foreach { dir =>
      FileUtils.deleteQuietly(new File(dir))
    }
    super.afterAll()
  }
} 
Example 83
Source File: OapSharedSQLContext.scala    From OAP   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.sql.test

import scala.concurrent.duration._

import org.scalatest.{BeforeAndAfterEach, Suite}
import org.scalatest.concurrent.Eventually

import org.apache.spark.{DebugFilesystem, SparkConf}
import org.apache.spark.sql.{SparkSession, SQLContext}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.oap.OapRuntime


trait OapSharedSQLContext extends SQLTestUtils with OapSharedSparkSession


  protected override def afterAll(): Unit = {
    try {
      super.afterAll()
    } finally {
      try {
        if (_spark != null) {
          try {
            _spark.sessionState.catalog.reset()
          } finally {
            OapRuntime.stop()
            _spark.stop()
            _spark = null
          }
        }
      } finally {
        SparkSession.clearActiveSession()
        SparkSession.clearDefaultSession()
      }
    }
  }

  protected override def beforeEach(): Unit = {
    super.beforeEach()
    DebugFilesystem.clearOpenStreams()
  }

  protected override def afterEach(): Unit = {
    super.afterEach()
    // Clear all persistent datasets after each test
    spark.sharedState.cacheManager.clearCache()
    // files can be closed from other threads, so wait a bit
    // normally this doesn't take more than 1s
    eventually(timeout(10.seconds), interval(2.seconds)) {
      DebugFilesystem.assertNoOpenStreams()
    }
  }
} 
Example 84
Source File: TestContext.scala    From freestyle   with Apache License 2.0 5 votes vote down vote up
package freestyle.free.cache.redis

import _root_.redis.embedded.RedisServer
import _root_.redis.RedisClient
import akka.actor.ActorSystem
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, Suite}

trait RedisTestContext extends BeforeAndAfterAll with BeforeAndAfterEach { self: Suite =>

  private[this] val server: RedisServer = new RedisServer()

  private[this] implicit val actorSystem: ActorSystem =
    ActorSystem.create("testing")
  val client: RedisClient =
    RedisClient(host = "localhost", port = server.getPort)

  override def beforeAll = {
    server.start()
    ()
  }
  override def afterAll = {
    server.stop()
    actorSystem.terminate()
    ()
  }
  override def beforeEach = {
    client.flushdb
    ()
  }
} 
Example 85
Source File: WithRemoteHiveMetastoreServiceSupport.scala    From spark-atlas-connector   with Apache License 2.0 5 votes vote down vote up
package com.hortonworks.spark.atlas

import java.io.File
import java.nio.file.Files

import com.hortonworks.spark.atlas.utils.SparkUtils
import com.hotels.beeju.ThriftHiveMetaStoreTestUtil
import org.apache.commons.io.FileUtils
import org.apache.spark.sql.SparkSession
import org.scalatest.{BeforeAndAfterAll, Suite}

trait WithRemoteHiveMetastoreServiceSupport extends BeforeAndAfterAll { self: Suite =>
  protected val dbName = "sac_hive_metastore"

  protected var sparkSession: SparkSession = _

  private var warehouseDir: String = _

  private val hive = new ThriftHiveMetaStoreTestUtil(dbName)

  private def cleanupAnyExistingSession(): Unit = {
    val session = SparkSession.getActiveSession.orElse(SparkSession.getDefaultSession)
    if (session.isDefined) {
      session.get.sessionState.catalog.reset()
      session.get.stop()
      SparkSession.clearActiveSession()
      SparkSession.clearDefaultSession()
    }
  }

  override protected def beforeAll(): Unit = {
    super.beforeAll()

    cleanupAnyExistingSession()

    hive.before()

    warehouseDir = Files.createTempDirectory("sac-warehouse-").toString
    sparkSession = SparkSession.builder()
      .master("local")
      .appName(this.getClass.getCanonicalName)
      .enableHiveSupport()
      .config("spark.ui.enabled", "false")
      .config("spark.sql.warehouse.dir", warehouseDir)
      .config("spark.hadoop.hive.metastore.uris", hive.getThriftConnectionUri)
      .getOrCreate()

    // reset hiveConf to make sure the configuration change takes effect
    SparkUtils.resetHiveConf
  }

  override protected def afterAll(): Unit = {
    try {
      hive.after()
      sparkSession.sessionState.catalog.reset()
      sparkSession.stop()
      SparkSession.clearActiveSession()
      SparkSession.clearDefaultSession()
    } finally {
      // reset hiveConf again to prevent affecting other tests
      SparkUtils.resetHiveConf

      sparkSession = null
      FileUtils.deleteDirectory(new File(warehouseDir))
    }
    System.clearProperty("spark.driver.port")

    super.afterAll()
  }
} 
Example 86
Source File: WithHiveSupport.scala    From spark-atlas-connector   with Apache License 2.0 5 votes vote down vote up
package com.hortonworks.spark.atlas

import java.io.File
import java.nio.file.Files

import org.apache.commons.io.FileUtils
import org.apache.spark.sql.SparkSession
import org.scalatest.{BeforeAndAfterAll, Suite}

trait WithHiveSupport extends BeforeAndAfterAll { self: Suite =>

  protected var sparkSession: SparkSession = _

  private var metastoreDir: String = _
  private var warehouseDir: String = _

  private def cleanupAnyExistingSession(): Unit = {
    val session = SparkSession.getActiveSession.orElse(SparkSession.getDefaultSession)
    if (session.isDefined) {
      session.get.sessionState.catalog.reset()
      session.get.stop()
      SparkSession.clearActiveSession()
      SparkSession.clearDefaultSession()
    }
  }

  override protected def beforeAll(): Unit = {
    super.beforeAll()

    cleanupAnyExistingSession()

    metastoreDir = Files.createTempDirectory("sac-metastore-").toString
    warehouseDir = Files.createTempDirectory("sac-warehouse-").toString
    System.setProperty("derby.system.home", metastoreDir)
    sparkSession = SparkSession.builder()
      .master("local")
      .appName(this.getClass.getCanonicalName)
      .enableHiveSupport()
      .config("spark.ui.enabled", "false")
      .config("spark.sql.warehouse.dir", warehouseDir)
      .getOrCreate()
  }

  override protected def afterAll(): Unit = {
    try {
      sparkSession.sessionState.catalog.reset()
      sparkSession.stop()
      SparkSession.clearActiveSession()
      SparkSession.clearDefaultSession()
    } finally {
      sparkSession = null
      FileUtils.deleteDirectory(new File(warehouseDir))
    }
    System.clearProperty("spark.driver.port")

    super.afterAll()
  }
} 
Example 87
Source File: WithHDFSSupport.scala    From spark-atlas-connector   with Apache License 2.0 5 votes vote down vote up
package com.hortonworks.spark.atlas

import java.io.File

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.FileUtil
import org.apache.hadoop.hdfs.MiniDFSCluster
import org.apache.spark.sql.SparkSession
import org.scalatest.{BeforeAndAfterAll, Suite}

trait WithHDFSSupport extends BeforeAndAfterAll { self: Suite =>

  protected var sparkSession: SparkSession = _

  private var hdfsCluster: MiniDFSCluster = _
  protected var hdfsURI: String = _

  private def cleanupAnyExistingSession(): Unit = {
    val session = SparkSession.getActiveSession.orElse(SparkSession.getDefaultSession)
    if (session.isDefined) {
      session.get.sessionState.catalog.reset()
      session.get.stop()
      SparkSession.clearActiveSession()
      SparkSession.clearDefaultSession()
    }
  }

  override protected def beforeAll(): Unit = {
    super.beforeAll()

    cleanupAnyExistingSession()

    val baseDir = new File("./target/hdfs/").getAbsoluteFile()
    FileUtil.fullyDelete(baseDir)

    val conf = new Configuration()
    conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath())
    val builder = new MiniDFSCluster.Builder(conf)

    hdfsCluster = builder.build()
    hdfsURI = s"hdfs://localhost:${hdfsCluster.getNameNodePort()}/"

    sparkSession = SparkSession.builder()
      .master("local")
      .appName(this.getClass.getCanonicalName)
      .enableHiveSupport()
      .config("spark.hadoop.fs.defaultFS", hdfsURI)
      .config("spark.ui.enabled", "false")
      .getOrCreate()
  }

  override protected def afterAll(): Unit = {
    try {
      sparkSession.sessionState.catalog.reset()
      sparkSession.stop()
      SparkSession.clearActiveSession()
      SparkSession.clearDefaultSession()
    } finally {
      sparkSession = null
    }
    System.clearProperty("spark.driver.port")

    hdfsCluster.shutdown(true)

    super.afterAll()
  }
} 
Example 88
Source File: SharedSparkContext.scala    From arangodb-spark-connector   with Apache License 2.0 5 votes vote down vote up
package com.arangodb.spark

import org.scalatest.{ BeforeAndAfterAll, BeforeAndAfterEach }
import org.scalatest.Suite
import org.apache.spark.SparkContext
import org.apache.spark.SparkConf


trait SharedSparkContext extends BeforeAndAfterAll { self: Suite =>

  @transient private var _sc: SparkContext = _
  def sc: SparkContext = _sc
  val conf = new SparkConf(false)
    .setMaster("local")
    .setAppName("test")

  override def beforeAll() {
    super.beforeAll()
    _sc = new SparkContext(conf)
  }

  override def afterAll() {
    try {
      _sc.stop()
      _sc = null
    } finally {
      super.afterAll()
    }
  }

} 
Example 89
Source File: SharedSparkContextSSL.scala    From arangodb-spark-connector   with Apache License 2.0 5 votes vote down vote up
package com.arangodb.spark

import org.scalatest.{ BeforeAndAfterAll, BeforeAndAfterEach }
import org.scalatest.Suite
import org.apache.spark.SparkContext
import org.apache.spark.SparkConf


trait SharedSparkContextSSL extends BeforeAndAfterAll { self: Suite =>

  @transient private var _sc: SparkContext = _
  def sc: SparkContext = _sc
  val conf = new SparkConf(false)
    .setMaster("local")
    .setAppName("test")
    .set("arangodb.user", "root")
    .set("arangodb.password", "")
    .set("arangodb.hosts", "127.0.0.1:8530")
    .set("arangodb.useSsl", true.toString)
    .set("arangodb.ssl.keyStoreFile", this.getClass().getResource("/example.truststore").getFile())
    .set("arangodb.ssl.passPhrase", "12345678")

  override def beforeAll() {
    super.beforeAll()
    _sc = new SparkContext(conf)
  }

  override def afterAll() {
    try {
      _sc.stop()
      _sc = null
    } finally {
      super.afterAll()
    }
  }

} 
Example 90
Source File: BaseDockerSuite.scala    From spark-exasol-connector   with Apache License 2.0 5 votes vote down vote up
package com.exasol.spark

import com.exasol.spark.util.ExasolConfiguration
import com.exasol.spark.util.ExasolConnectionManager
import com.exasol.spark.util.Types._

import com.dimafeng.testcontainers.ExasolDockerContainer
import com.dimafeng.testcontainers.ForAllTestContainer
import org.scalatest.Suite


trait BaseDockerSuite extends ForAllTestContainer { self: Suite =>

  override val container = ExasolDockerContainer()

  lazy val exaConfiguration = ExasolConfiguration(container.configs)

  lazy val exaManager = ExasolConnectionManager(exaConfiguration)

  val EXA_SCHEMA = "TEST_SCHEMA"
  val EXA_TABLE = "TEST_TABLE"
  val EXA_ALL_TYPES_TABLE = "TEST_ALL_TYPES_TABLE"
  val EXA_TYPES_NOT_COVERED_TABLE = "TEST_TYPES_NOT_COVERED_TABLE"

  // scalastyle:off nonascii
  def createDummyTable(): Unit = {
    val queries = Seq(
      s"DROP SCHEMA IF EXISTS $EXA_SCHEMA CASCADE",
      s"CREATE SCHEMA $EXA_SCHEMA",
      s"""|CREATE OR REPLACE TABLE $EXA_SCHEMA.$EXA_TABLE (
          |   ID INTEGER IDENTITY NOT NULL,
          |   NAME VARCHAR(100) UTF8,
          |   CITY VARCHAR(2000) UTF8,
          |   DATE_INFO DATE,
          |   UNICODE_COL VARCHAR(100) UTF8,
          |   UPDATED_AT TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL
          |)""".stripMargin,
      s"""|INSERT INTO $EXA_SCHEMA.$EXA_TABLE (name, city, date_info, unicode_col)
          | VALUES ('Germany', 'Berlin', '2017-12-31', 'öäüß')
          |""".stripMargin,
      s"""|INSERT INTO $EXA_SCHEMA.$EXA_TABLE (name, city, date_info, unicode_col)
          | VALUES ('France', 'Paris', '2018-01-01','\u00d6')
          |""".stripMargin,
      s"""|INSERT INTO $EXA_SCHEMA.$EXA_TABLE (name, city, date_info, unicode_col)
          | VALUES ('Portugal', 'Lisbon', '2018-10-01','\u00d9')
          |""".stripMargin,
      "commit"
    )
    exaManager.withExecute(queries)
  }
  // scalastyle:on nonascii

  def createAllTypesTable(): Unit = {
    val maxDecimal = " DECIMAL(" + getMaxPrecisionExasol() + "," + getMaxScaleExasol() + ")"
    val queries = Seq(
      s"DROP SCHEMA IF EXISTS $EXA_SCHEMA CASCADE",
      s"CREATE SCHEMA $EXA_SCHEMA",
      s"""|CREATE OR REPLACE TABLE $EXA_SCHEMA.$EXA_ALL_TYPES_TABLE (
          |   MYID INTEGER,
          |   MYTINYINT DECIMAL(3,0),
          |   MYSMALLINT DECIMAL(9,0),
          |   MYBIGINT DECIMAL(36,0),
          |   MYDECIMALSystemDefault DECIMAL,
          |   MYDECIMALMAX $maxDecimal,
          |   MYNUMERIC DECIMAL( 5,2 ),
          |   MYDOUBLE DOUBLE PRECISION,
          |   MYCHAR CHAR,
          |   MYNCHAR CHAR(2000),
          |   MYLONGVARCHAR VARCHAR( 2000000),
          |   MYBOOLEAN BOOLEAN,
          |   MYDATE DATE,
          |   MYTIMESTAMP TIMESTAMP,
          |   MYGEOMETRY Geometry,
          |   MYINTERVAL INTERVAL YEAR TO MONTH
          |)""".stripMargin,
      "commit"
    )
    exaManager.withExecute(queries)
  }

} 
Example 91
Source File: EmbeddedPg.scala    From bitcoin-s   with MIT License 5 votes vote down vote up
package org.bitcoins.testkit

import com.opentable.db.postgres.embedded.EmbeddedPostgres
import org.bitcoins.testkit.BitcoinSTestAppConfig.ProjectType
import org.scalatest.{BeforeAndAfterAll, Suite}

import scala.util.Try

trait EmbeddedPg extends BeforeAndAfterAll { this: Suite =>

  val pgEnabled: Boolean = sys.env.contains("PG_ENABLED")

  val pg: Option[EmbeddedPostgres] =
    if (pgEnabled) Some(EmbeddedPostgres.start()) else None

  def pgUrl(dbname: String): Option[String] =
    pg.map(_.getJdbcUrl("postgres", dbname))

  def pgUrl(project: ProjectType): Option[String] =
    project match {
      case ProjectType.Wallet => pgUrl("walletdb")
      case ProjectType.Node   => pgUrl("nodedb")
      case ProjectType.Chain  => pgUrl("chaindb")
    }

  override def beforeAll(): Unit = {
    super.beforeAll()
    executePgSql(s"CREATE DATABASE chaindb")
    executePgSql(s"CREATE DATABASE walletdb")
    executePgSql(s"CREATE DATABASE nodedb")
  }

  override def afterAll(): Unit = {
    super.afterAll()
    Try(executePgSql(s"DROP DATABASE nodedb"))
    Try(executePgSql(s"DROP DATABASE walletdb"))
    Try(executePgSql(s"DROP DATABASE chaindb"))
    Try(pg.foreach(_.close()))
    ()
  }

  def executePgSql(sql: String): Unit =
    pg.foreach { pg =>
      val conn = pg.getPostgresDatabase.getConnection
      try {
        val st = conn.createStatement()
        try {
          st.execute(sql)
        } finally st.close()

      } finally conn.close()
    }

} 
Example 92
Source File: LivyBaseUnitTestSuite.scala    From incubator-livy   with Apache License 2.0 5 votes vote down vote up
package org.apache.livy

import org.scalatest.{Outcome, Suite}

trait LivyBaseUnitTestSuite extends Suite with Logging {

  protected override def withFixture(test: NoArgTest): Outcome = {
    val testName = test.name
    val suiteName = this.getClass.getName
    try {
      info(s"\n\n==== TEST OUTPUT FOR $suiteName: '$testName' ====\n")
      test()
    } finally {
      info(s"\n\n==== FINISHED $suiteName: '$testName' ====\n")
    }
  }
} 
Example 93
Source File: SharedSparkContext.scala    From spark-testing-base   with Apache License 2.0 5 votes vote down vote up
package com.holdenkarau.spark.testing

import java.util.Date

import org.apache.spark._
import org.scalatest.{BeforeAndAfterAll, Suite}


trait SharedSparkContext extends BeforeAndAfterAll with SparkContextProvider {
  self: Suite =>

  @transient private var _sc: SparkContext = _

  override def sc: SparkContext = _sc

  override def beforeAll() {
    _sc = new SparkContext(conf)
    setup(_sc)
    super.beforeAll()
  }

  override def afterAll() {
    try {
      LocalSparkContext.stop(_sc)
      _sc = null
    } finally {
      super.afterAll()
    }
  }
} 
Example 94
Source File: SharedMiniCluster.scala    From spark-testing-base   with Apache License 2.0 5 votes vote down vote up
package com.holdenkarau.spark.testing

import org.apache.spark.{SparkConf, SparkContext}
import org.scalatest.{BeforeAndAfterAll, Suite}


trait SharedMiniCluster extends BeforeAndAfterAll
    with HDFSClusterLike
    with YARNClusterLike
    with SparkContextProvider{
  self: Suite =>
  @transient private var _sc: SparkContext = _

  def sc: SparkContext = _sc

  val master = "yarn-client"

  override def beforeAll() {
    // Try and do setup, and in-case we fail shutdown
    try {
      super.startHDFS()
      super.startYARN()

      // Create the new context
      val sparkConf = new SparkConf().setMaster(master).setAppName("test")
      _sc = new SparkContext(sparkConf)
      setup(_sc)
    } catch {
      case e: Throwable =>
        super.shutdownYARN()
        super.shutdownHDFS()
        throw e
    }
    super.beforeAll()
  }

  override def afterAll() {
    Option(sc).foreach(_.stop())
    _sc = null

    super.shutdownYARN()
    super.shutdownHDFS()

    super.afterAll()
  }
} 
Example 95
Source File: PerTestSparkContext.scala    From spark-testing-base   with Apache License 2.0 5 votes vote down vote up
package com.holdenkarau.spark.testing

import org.apache.spark._

import org.scalatest.BeforeAndAfterAll
import org.scalatest.BeforeAndAfterEach
import org.scalatest.Suite


trait PerTestSparkContext extends LocalSparkContext with BeforeAndAfterEach
    with SparkContextProvider { self: Suite =>

  override def beforeEach() {
    sc = new SparkContext(conf)
    setup(sc)
    super.beforeEach()
  }

  override def afterEach() {
    super.afterEach()
  }
} 
Example 96
Source File: SharedSparkContext.scala    From spark-testing-base   with Apache License 2.0 5 votes vote down vote up
package com.holdenkarau.spark.testing

import java.util.Date

import org.apache.spark._
import org.scalatest.{BeforeAndAfterAll, Suite}


trait SharedSparkContext extends BeforeAndAfterAll with SparkContextProvider {
  self: Suite =>

  @transient private var _sc: SparkContext = _

  override def sc: SparkContext = _sc

  protected implicit def reuseContextIfPossible: Boolean = false

  override def beforeAll() {
    // This is kind of a hack, but if we've got an existing Spark Context
    // hanging around we need to kill it.
    if (!reuseContextIfPossible) {
      EvilSparkContext.stopActiveSparkContext()
    }
    _sc = SparkContext.getOrCreate(conf)
    setup(_sc)
    super.beforeAll()
  }

  override def afterAll() {
    try {
      if (!reuseContextIfPossible) {
        LocalSparkContext.stop(_sc)
        _sc = null
      }
    } finally {
      super.afterAll()
    }
  }
} 
Example 97
Source File: SharedMiniCluster.scala    From spark-testing-base   with Apache License 2.0 5 votes vote down vote up
package com.holdenkarau.spark.testing

import org.apache.spark.{SparkConf, SparkContext, EvilSparkContext}
import org.scalatest.{BeforeAndAfterAll, Suite}


trait SharedMiniCluster extends BeforeAndAfterAll
    with HDFSClusterLike
    with YARNClusterLike
    with SparkContextProvider{
  self: Suite =>
  @transient private var _sc: SparkContext = _

  def sc: SparkContext = _sc

  val master = "yarn-client"

  override def beforeAll() {
    // Try and do setup, and in-case we fail shutdown
    try {
      super.startHDFS()
      super.startYARN()

      // Stop the spark context if already running
      EvilSparkContext.stopActiveSparkContext()
      // Create the new context
      val sparkConf = new SparkConf().setMaster(master).setAppName("test")
      _sc = new SparkContext(sparkConf)
      setup(_sc)
    } catch {
      case e: Throwable =>
        super.shutdownYARN()
        super.shutdownHDFS()
        throw e
    }
    super.beforeAll()
  }

  override def afterAll() {
    Option(sc).foreach(_.stop())
    _sc = null

    super.shutdownYARN()
    super.shutdownHDFS()

    super.afterAll()
  }
} 
Example 98
Source File: StructuredStreamingBase.scala    From spark-testing-base   with Apache License 2.0 5 votes vote down vote up
package com.holdenkarau.spark.testing

import org.apache.spark.sql._
import org.apache.spark.sql.execution.streaming._

import org.scalatest.Suite

import scala.reflect.ClassTag


  private[holdenkarau] def runSimpleStreamEndState[T: Encoder, R: Encoder](
    spark: SparkSession,
    input: Seq[Seq[T]],
    mode: String,
    queryFunction: Dataset[T] => Dataset[R]) = {
    import spark.implicits._
    implicit val sqlContext = spark.sqlContext
    val inputStream = MemoryStream[T]
    val transformed = queryFunction(inputStream.toDS())
    val queryName = s"${this.getClass.getSimpleName}TestSimpleStreamEndState${count}"
    count = count + 1
    val query = transformed.writeStream.
      format("memory").
      outputMode(mode).
      queryName(queryName).
      start()
    input.foreach(batch => inputStream.addData(batch))
    // Block until all processed
    query.processAllAvailable()
    val table = spark.table(queryName).as[R]
    val resultRows = table.collect()
    resultRows.toSeq
  }
} 
Example 99
Source File: StreamingActionBase.scala    From spark-testing-base   with Apache License 2.0 5 votes vote down vote up
package com.holdenkarau.spark.testing

import org.apache.spark.streaming.TestStreamingContext
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.scheduler.{
  StreamingListenerBatchCompleted, StreamingListener}
import org.apache.spark.streaming.util.TestManualClock
import org.scalatest.Suite

import scala.reflect.ClassTag


  def runAction[U: ClassTag](input: Seq[Seq[U]], operation: DStream[U] => Unit) {

    val numBatches_ = input.size
    withStreamingContext(setupStream[U](input, operation)) { ssc =>
      runActionStream(ssc, numBatches_)
    }
  }

  private def withStreamingContext(outputStreamSSC: TestStreamingContext)
      (block: TestStreamingContext => Unit): Unit = {
    try {
      block(outputStreamSSC)
    } finally {
      try {
        outputStreamSSC.stop(stopSparkContext = false)
      } catch {
        case e: Throwable =>
          logError("Error stopping StreamingContext", e)
      }
    }
  }

  private def setupStream[U: ClassTag](input: Seq[Seq[U]],
      operation: DStream[U] => Any): TestStreamingContext = {

    // Create TestStreamingContext
    val ssc = new TestStreamingContext(sc, batchDuration)
    ssc.addStreamingListener(batchCountListener)
    if (checkpointDir != null) {
      ssc.checkpoint(checkpointDir)
    }

    // Setup the stream computation
    val inputStream = createTestInputStream(sc, ssc, input)
    operation(inputStream)
    ssc
  }

  private def runActionStream(ssc: TestStreamingContext, numBatches: Int) {
    assert(numBatches > 0, "Number of batches to run stream computation is zero")
    batchCountListener.batchCount = 0

    // Start computation
    ssc.start()

    // Advance manual clock
    val clock = ssc.getScheduler().clock.asInstanceOf[TestManualClock]
    logInfo("Manual clock before advancing = " + clock.currentTime())
    if (actuallyWait) {
      for (i <- 1 to numBatches) {
        logInfo("Actually waiting for " + batchDuration)
        clock.addToTime(batchDuration.milliseconds)
        Thread.sleep(batchDuration.milliseconds)
      }
    } else {
      clock.addToTime(numBatches * batchDuration.milliseconds)
    }
    logInfo("Manual clock after advancing = " + clock.currentTime())

    // wait for expected number of batches to execute
    val startTime = System.currentTimeMillis()
    while (batchCountListener.batchCount < numBatches &&
      System.currentTimeMillis() - startTime < maxWaitTimeMillis) {
      logInfo(s"batches: run = ${batchCountListener.batchCount} " +
        s"target = ${numBatches}")
      ssc.awaitTerminationOrTimeout(50)
    }
    val timeTaken = System.currentTimeMillis() - startTime
    logInfo("Output generated in " + timeTaken + " milliseconds")

    Thread.sleep(100) // Give some time for the forgetting old RDDs to complete
  }

}

class BatchCountListener extends StreamingListener {
  var batchCount = 0

  override def onBatchCompleted(
    batchCompleted: StreamingListenerBatchCompleted): Unit = {
    batchCount = batchCount + 1
  }
} 
Example 100
Source File: TestHelper.scala    From spark-summit-2018   with GNU General Public License v3.0 5 votes vote down vote up
package com.twilio.open.streaming.trend.discovery

import java.io.{ByteArrayInputStream, InputStream}

import com.fasterxml.jackson.databind.ObjectMapper
import com.fasterxml.jackson.module.scala.DefaultScalaModule
import com.google.protobuf.Message
import com.googlecode.protobuf.format.JsonFormat
import com.holdenkarau.spark.testing.{LocalSparkContext, SparkContextProvider}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.SparkSession
import org.scalatest.{BeforeAndAfterAll, FlatSpec, Matchers, Suite}
import org.slf4j.{Logger, LoggerFactory}

import scala.collection.Seq
import scala.io.Source
import scala.reflect.ClassTag
import scala.reflect.classTag

object TestHelper {
  val log: Logger = LoggerFactory.getLogger("com.twilio.open.streaming.trend.discovery.TestHelper")
  val mapper: ObjectMapper = {
    val m = new ObjectMapper()
    m.registerModule(DefaultScalaModule)
  }

  val jsonFormat: JsonFormat = new JsonFormat

  def loadScenario[T<: Message : ClassTag](file: String): Seq[T] = {
    val fileString = Source.fromFile(file).mkString
    val parsed = mapper.readValue(fileString, classOf[Sceanario])
    parsed.input.map { data =>
      val json = mapper.writeValueAsString(data)
      convert[T](json)
    }
  }

  def convert[T<: Message : ClassTag](json: String): T = {
    val clazz = classTag[T].runtimeClass
    val builder = clazz.getMethod("newBuilder").invoke(clazz).asInstanceOf[Message.Builder]
    try {
      val input: InputStream = new ByteArrayInputStream(json.getBytes())
      jsonFormat.merge(input, builder)
      builder.build().asInstanceOf[T]
    } catch {
      case e: Exception =>
        throw e
    }
  }

}

@SerialVersionUID(1L)
case class KafkaDataFrame(key: Array[Byte], topic: Array[Byte], value: Array[Byte]) extends Serializable

case class Sceanario(input: Seq[Any], expected: Option[Any] = None)

trait SparkSqlTest extends BeforeAndAfterAll with SparkContextProvider {
  self: Suite =>

  @transient var _sparkSql: SparkSession = _
  @transient private var _sc: SparkContext = _

  override def sc: SparkContext = _sc

  def conf: SparkConf

  def sparkSql: SparkSession = _sparkSql

  override def beforeAll() {
    _sparkSql = SparkSession.builder().config(conf).getOrCreate()

    _sc = _sparkSql.sparkContext
    setup(_sc)
    super.beforeAll()
  }

  override def afterAll() {
    try {
      _sparkSql.close()
      _sparkSql = null
      LocalSparkContext.stop(_sc)
      _sc = null
    } finally {
      super.afterAll()
    }
  }

} 
Example 101
Source File: WireMockSupport.scala    From self-assessment-api   with Apache License 2.0 5 votes vote down vote up
package support.wiremock

import com.github.tomakehurst.wiremock.WireMockServer
import com.github.tomakehurst.wiremock.client.WireMock
import com.github.tomakehurst.wiremock.core.WireMockConfiguration.wireMockConfig
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, Suite}

trait WireMockConfig {
  val wiremockHost = "localhost"
  val wiremockPort = 11111
  val wiremockUrl = s"http://$wiremockHost:$wiremockPort"
}

trait WireMockSupport extends WireMockConfig
  with BeforeAndAfterAll
  with BeforeAndAfterEach { _: Suite =>

  private val wireMockServer = new WireMockServer(wireMockConfig().port(wiremockPort))

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    wireMockServer.start()
    WireMock.configureFor(wiremockHost, wiremockPort)
  }

  override protected def afterAll(): Unit = {
    wireMockServer.stop()
    super.afterAll()
  }

  override protected def beforeEach(): Unit = {
    super.beforeEach()
    WireMock.reset()
  }
} 
Example 102
Source File: MockSelfAssessmentHttpParser.scala    From self-assessment-api   with Apache License 2.0 5 votes vote down vote up
package mocks.httpParser

import mocks.Mock
import org.scalatest.Suite
import router.httpParsers.SelfAssessmentHttpParser
import uk.gov.hmrc.http.HttpResponse

trait MockSelfAssessmentHttpParser extends Mock { _: Suite =>

  val mockSelfAssessmentHttpParser = mock[SelfAssessmentHttpParser]

  object MockSelfAssessmentHttpParser {
    def read = {
      when(mockSelfAssessmentHttpParser.read(any[String](), any[String](), any[HttpResponse]()))
    }
  }

  override protected def beforeEach(): Unit = {
    super.beforeEach()
    reset(mockSelfAssessmentHttpParser)
  }
} 
Example 103
Source File: MockHttp.scala    From self-assessment-api   with Apache License 2.0 5 votes vote down vote up
package mocks

import org.mockito.stubbing.OngoingStubbing
import org.scalatest.Suite
import play.api.libs.json.Writes
import uk.gov.hmrc.http.{HeaderCarrier, HttpReads}
import uk.gov.hmrc.play.bootstrap.http.HttpClient

import scala.concurrent.{ExecutionContext, Future}


trait MockHttp extends Mock {
  _: Suite =>

  val mockHttp: HttpClient = mock[HttpClient]

  override protected def beforeEach(): Unit = {
    super.beforeEach()
    reset(mockHttp)
  }

  object MockHttp {
    def GET[T](url: String): OngoingStubbing[Future[T]] = {
      when(mockHttp.GET[T](eqTo(url))(any[HttpReads[T]](), any[HeaderCarrier](), any[ExecutionContext]()))
    }

    def POST[I, O](url: String, body: I, headers: (String, String)*): OngoingStubbing[Future[O]] = {
      when(mockHttp.POST[I, O](eqTo(url), eqTo(body), eqTo(headers))(any[Writes[I]](),
        any[HttpReads[O]](), any[HeaderCarrier](), any[ExecutionContext]()))
    }

    def POSTEmpty[O](url: String, headers: (String, String)*): OngoingStubbing[Future[O]] = {
      when(mockHttp.POSTEmpty[O](eqTo(url), any[Seq[(String,String)]])(any[HttpReads[O]](), any[HeaderCarrier](), any[ExecutionContext]()))
    }

    def POST[I, O](url: String, body: I): OngoingStubbing[Future[O]] = {
      when(mockHttp.POST[I, O](eqTo(url), eqTo(body), any[Seq[(String,String)]]())(any[Writes[I]](),
        any[HttpReads[O]](), any[HeaderCarrier](), any[ExecutionContext]()))
    }

    def PUT[I, O](url: String, body: I): OngoingStubbing[Future[O]] = {
      when(mockHttp.PUT[I, O](eqTo(url), eqTo(body), any[Seq[(String,String)]])(any[Writes[I]](),
        any[HttpReads[O]](), any[HeaderCarrier](), any[ExecutionContext]()))
    }
  }

} 
Example 104
Source File: MockSelfAssessmentApiDefinition.scala    From self-assessment-api   with Apache License 2.0 5 votes vote down vote up
package mocks.definition

import mocks.Mock
import org.scalatest.Suite
import router.definition.SelfAssessmentApiDefinition

trait MockSelfAssessmentApiDefinition extends Mock { _: Suite =>

  val mockSelfAssessmentApiDefinition = mock[SelfAssessmentApiDefinition]

  object MockSelfAssessmentApiDefinition {
    def definition = {
      when(mockSelfAssessmentApiDefinition.definition)
    }
  }

  override protected def beforeEach(): Unit = {
    super.beforeEach()
    reset(mockSelfAssessmentApiDefinition)
  }
} 
Example 105
Source File: MockAuthConnector.scala    From self-assessment-api   with Apache License 2.0 5 votes vote down vote up
package mocks.auth

import mocks.Mock
import org.mockito.stubbing.OngoingStubbing
import org.scalatest.Suite
import uk.gov.hmrc.auth.core.AuthConnector
import uk.gov.hmrc.auth.core.authorise.{EmptyPredicate, Predicate}
import uk.gov.hmrc.auth.core.retrieve.{EmptyRetrieval, Retrieval}
import uk.gov.hmrc.http.HeaderCarrier

import scala.concurrent.{ExecutionContext, Future}

trait MockAuthConnector extends Mock { _: Suite =>

  val mockAuthConnector = mock[AuthConnector]

  object MockAuthConnector {
    def authorise[T](predicate: Predicate, retrieval: Retrieval[T]): OngoingStubbing[Future[T]] = {
      when(mockAuthConnector.authorise[T](eqTo(predicate), eqTo(retrieval))(any[HeaderCarrier](), any[ExecutionContext]()))
    }

    def authorise(): OngoingStubbing[Future[Unit]] = authorise(EmptyPredicate, EmptyRetrieval)
  }

  override protected def beforeEach(): Unit = {
    super.beforeEach()
    reset(mockAuthConnector)
  }
} 
Example 106
Source File: MockSelfAssessmentService.scala    From self-assessment-api   with Apache License 2.0 5 votes vote down vote up
package mocks.services

import mocks.Mock
import org.mockito.stubbing.OngoingStubbing
import org.scalatest.Suite
import play.api.libs.json.JsValue
import play.api.mvc.Request
import router.httpParsers.SelfAssessmentHttpParser.SelfAssessmentOutcome
import router.services.SelfAssessmentService
import uk.gov.hmrc.http.HeaderCarrier

import scala.concurrent.Future

trait MockSelfAssessmentService extends Mock { _: Suite =>

  val mockSelfAssessmentService = mock[SelfAssessmentService]

  object MockSelfAssessmentService {
    def get(): OngoingStubbing[Future[SelfAssessmentOutcome]] = {
      when(mockSelfAssessmentService.get()(any[HeaderCarrier](), any[Request[_]]()))
    }

    def post(body: JsValue): OngoingStubbing[Future[SelfAssessmentOutcome]] = {
      when(mockSelfAssessmentService.post(eqTo(body))(any[HeaderCarrier](), any[Request[_]]()))
    }

    def put(body: JsValue): OngoingStubbing[Future[SelfAssessmentOutcome]] = {
      when(mockSelfAssessmentService.put(eqTo(body))(any[HeaderCarrier](), any[Request[_]]()))
    }
  }

  override protected def beforeEach(): Unit = {
    super.beforeEach()
    reset(mockSelfAssessmentService)
  }
} 
Example 107
Source File: MockCrystallisationService.scala    From self-assessment-api   with Apache License 2.0 5 votes vote down vote up
package mocks.services

import mocks.Mock
import org.mockito.stubbing.OngoingStubbing
import org.scalatest.Suite
import play.api.libs.json.JsValue
import play.api.mvc.Request
import router.httpParsers.SelfAssessmentHttpParser.SelfAssessmentOutcome
import router.services.CrystallisationService
import uk.gov.hmrc.http.HeaderCarrier

import scala.concurrent.Future

trait MockCrystallisationService extends Mock { _: Suite =>
  val mockCrystallisationService = mock[CrystallisationService]

  object MockCrystallisationService {

    def post(body: JsValue): OngoingStubbing[Future[SelfAssessmentOutcome]] = {
      when(mockCrystallisationService.post(eqTo(body))(any[HeaderCarrier](), any[Request[_]]()))
    }

    def postEmpty: OngoingStubbing[Future[SelfAssessmentOutcome]] = {
      when(mockCrystallisationService.postEmpty(any[HeaderCarrier](), any[Request[_]]()))
    }

    def get(): OngoingStubbing[Future[SelfAssessmentOutcome]] = {
      when(mockCrystallisationService.get()(any[HeaderCarrier](), any[Request[_]]()))
    }
  }

  override protected def beforeEach(): Unit = {
    super.beforeEach()
    reset(mockCrystallisationService)
  }

} 
Example 108
Source File: MockPropertyEopsObligationsService.scala    From self-assessment-api   with Apache License 2.0 5 votes vote down vote up
package mocks.services

import mocks.Mock
import org.mockito.stubbing.OngoingStubbing
import org.scalatest.Suite
import play.api.mvc.Request
import router.httpParsers.SelfAssessmentHttpParser.SelfAssessmentOutcome
import router.services.PropertyEopsObligationsService
import uk.gov.hmrc.http.HeaderCarrier

import scala.concurrent.Future

trait MockPropertyEopsObligationsService extends Mock { _: Suite =>

  val mockPropertyEopsObligationsService = mock[PropertyEopsObligationsService]

  object MockPropertyEopsObligationsService {
    def get(): OngoingStubbing[Future[SelfAssessmentOutcome]] = {
      when(mockPropertyEopsObligationsService.get()(any[HeaderCarrier](), any[Request[_]]()))
    }
  }

  override protected def beforeEach(): Unit = {
    super.beforeEach()
    reset(mockPropertyEopsObligationsService)
  }
} 
Example 109
Source File: MockTaxCalcService.scala    From self-assessment-api   with Apache License 2.0 5 votes vote down vote up
package mocks.services

import mocks.Mock
import org.mockito.stubbing.OngoingStubbing
import org.scalatest.Suite
import play.api.mvc.Request
import router.httpParsers.SelfAssessmentHttpParser.SelfAssessmentOutcome
import router.services.TaxCalcService
import uk.gov.hmrc.http.HeaderCarrier

import scala.concurrent.Future

trait MockTaxCalcService extends Mock { _: Suite =>

  val mockTaxCalcService = mock[TaxCalcService]

  object MockTaxCalcService {
    def get(): OngoingStubbing[Future[SelfAssessmentOutcome]] = {
      when(mockTaxCalcService.get()(any[HeaderCarrier](), any[Request[_]]()))
    }
  }

  override protected def beforeEach(): Unit = {
    super.beforeEach()
    reset(mockTaxCalcService)
  }
} 
Example 110
Source File: MockSavingsAccountService.scala    From self-assessment-api   with Apache License 2.0 5 votes vote down vote up
package mocks.services

import mocks.Mock
import org.mockito.stubbing.OngoingStubbing
import org.scalatest.Suite
import play.api.libs.json.JsValue
import play.api.mvc.Request
import router.httpParsers.SelfAssessmentHttpParser.SelfAssessmentOutcome
import router.services.SavingsAccountService
import uk.gov.hmrc.http.HeaderCarrier

import scala.concurrent.Future

trait MockSavingsAccountService extends Mock { _: Suite =>

  lazy val mockSavingsAccountService: SavingsAccountService = mock[SavingsAccountService]

  object MockSavingsAccountService {

    def post(body: JsValue): OngoingStubbing[Future[SelfAssessmentOutcome]] = {
      when(mockSavingsAccountService.post(eqTo(body))(any[HeaderCarrier](), any[Request[_]]()))
    }

    def get(): OngoingStubbing[Future[SelfAssessmentOutcome]] = {
      when(mockSavingsAccountService.get()(any[HeaderCarrier](), any[Request[_]]()))
    }

    def put(body: JsValue) = {
      when(mockSavingsAccountService.put(eqTo(body))(any[HeaderCarrier](), any[Request[_]]()))
    }

  }

  override protected def beforeEach(): Unit = {
    super.beforeEach()
    reset(mockSavingsAccountService)
  }
} 
Example 111
Source File: MockPropertyEopsDeclarationService.scala    From self-assessment-api   with Apache License 2.0 5 votes vote down vote up
package mocks.services

import mocks.Mock
import org.mockito.stubbing.OngoingStubbing
import org.scalatest.Suite
import play.api.libs.json.JsValue
import play.api.mvc.Request
import router.httpParsers.SelfAssessmentHttpParser.SelfAssessmentOutcome
import router.services.PropertyEopsDeclarationService
import uk.gov.hmrc.http.HeaderCarrier

import scala.concurrent.Future

trait MockPropertyEopsDeclarationService extends Mock { _: Suite =>

  val mockPropertyEopsDeclarationService = mock[PropertyEopsDeclarationService]

  object MockPropertyEopsDeclarationService {
    def post(): OngoingStubbing[Future[SelfAssessmentOutcome]] = {
      when(mockPropertyEopsDeclarationService.post(any[JsValue]())(any[HeaderCarrier](), any[Request[_]]()))
    }
  }

  override protected def beforeEach(): Unit = {
    super.beforeEach()
    reset(mockPropertyEopsDeclarationService)
  }
} 
Example 112
Source File: MockReleaseTwoService.scala    From self-assessment-api   with Apache License 2.0 5 votes vote down vote up
package mocks.services

import mocks.Mock
import org.mockito.stubbing.OngoingStubbing
import org.scalatest.Suite
import play.api.libs.json.JsValue
import play.api.mvc.Request
import router.httpParsers.SelfAssessmentHttpParser.SelfAssessmentOutcome
import router.services.ReleaseTwoService
import uk.gov.hmrc.http.HeaderCarrier

import scala.concurrent.Future

trait MockReleaseTwoService extends Mock { _: Suite =>

  val mockReleaseTwoService = mock[ReleaseTwoService]

  object MockReleaseTwoService {
    def create(body: JsValue): OngoingStubbing[Future[SelfAssessmentOutcome]] = {
      when(mockReleaseTwoService.create(eqTo(body))(any[HeaderCarrier](), any[Request[_]]()))
    }

    def get(): OngoingStubbing[Future[SelfAssessmentOutcome]] = {
      when(mockReleaseTwoService.get()(any[HeaderCarrier](), any[Request[_]]()))
    }

    def amend(body: JsValue): OngoingStubbing[Future[SelfAssessmentOutcome]] = {
      when(mockReleaseTwoService.amend(eqTo(body))(any[HeaderCarrier](), any[Request[_]]()))
    }
  }

  override protected def beforeEach(): Unit = {
    super.beforeEach()
    reset(mockReleaseTwoService)
  }
} 
Example 113
Source File: MockCharitableGivingService.scala    From self-assessment-api   with Apache License 2.0 5 votes vote down vote up
package mocks.services

import mocks.Mock
import org.mockito.stubbing.OngoingStubbing
import org.scalatest.Suite
import play.api.libs.json.JsValue
import play.api.mvc.Request
import router.httpParsers.SelfAssessmentHttpParser.SelfAssessmentOutcome
import router.services.CharitableGivingService
import uk.gov.hmrc.http.HeaderCarrier

import scala.concurrent.Future

trait MockCharitableGivingService extends Mock { _: Suite =>

  val mockCharitableGivingService = mock[CharitableGivingService]

  object MockCharitableGivingService {

    def put(body: JsValue): OngoingStubbing[Future[SelfAssessmentOutcome]] = {
      when(mockCharitableGivingService.put(eqTo(body))(any[HeaderCarrier](), any[Request[_]]()))
    }

    def get(): OngoingStubbing[Future[SelfAssessmentOutcome]] = {
      when(mockCharitableGivingService.get()(any[HeaderCarrier](), any[Request[_]]()))
    }
  }

  override protected def beforeEach(): Unit = {
    super.beforeEach()
    reset(mockCharitableGivingService)
  }
} 
Example 114
Source File: MockSelfEmploymentEopsDeclarationService.scala    From self-assessment-api   with Apache License 2.0 5 votes vote down vote up
package mocks.services

import mocks.Mock
import org.mockito.stubbing.OngoingStubbing
import org.scalatest.Suite
import play.api.libs.json.JsValue
import play.api.mvc.Request
import router.httpParsers.SelfAssessmentHttpParser.SelfAssessmentOutcome
import router.services.SelfEmploymentEopsDeclarationService
import uk.gov.hmrc.http.HeaderCarrier

import scala.concurrent.Future

trait MockSelfEmploymentEopsDeclarationService extends Mock { _: Suite =>

  val mockSelfEmploymentEopsDeclarationService = mock[SelfEmploymentEopsDeclarationService]

  object MockSelfEmploymentEopsDeclarationService {
    def post(): OngoingStubbing[Future[SelfAssessmentOutcome]] = {
      when(mockSelfEmploymentEopsDeclarationService.post(any[JsValue]())(any[HeaderCarrier](), any[Request[_]]()))
    }
  }

  override protected def beforeEach(): Unit = {
    super.beforeEach()
    reset(mockSelfEmploymentEopsDeclarationService)
  }
} 
Example 115
Source File: MockDividendsService.scala    From self-assessment-api   with Apache License 2.0 5 votes vote down vote up
package mocks.services

import mocks.Mock
import org.mockito.stubbing.OngoingStubbing
import org.scalatest.Suite
import play.api.libs.json.JsValue
import play.api.mvc.Request
import router.httpParsers.SelfAssessmentHttpParser.SelfAssessmentOutcome
import router.services.DividendsService
import uk.gov.hmrc.http.HeaderCarrier

import scala.concurrent.Future

trait MockDividendsService extends Mock { _: Suite =>

  val mockDividendsService = mock[DividendsService]

  object MockDividendsService {

    def put(body: JsValue): OngoingStubbing[Future[SelfAssessmentOutcome]] = {
      when(mockDividendsService.put(eqTo(body))(any[HeaderCarrier](), any[Request[_]]()))
    }

    def get(): OngoingStubbing[Future[SelfAssessmentOutcome]] = {
      when(mockDividendsService.get()(any[HeaderCarrier](), any[Request[_]]()))
    }
  }

  override protected def beforeEach(): Unit = {
    super.beforeEach()
    reset(mockDividendsService)
  }
} 
Example 116
Source File: Mock.scala    From self-assessment-api   with Apache License 2.0 5 votes vote down vote up
package mocks

import org.mockito.{ArgumentMatchers => Matchers}
import org.mockito.Mockito
import org.mockito.stubbing.OngoingStubbing
import org.mockito.verification.VerificationMode
import org.scalatest.{BeforeAndAfterEach, Suite}
import org.scalatestplus.mockito.MockitoSugar

trait Mock extends MockitoSugar with BeforeAndAfterEach { _: Suite =>

  // predefined mocking functions to avoid importing
  def any[T]() = Matchers.any[T]()
  def eqTo[T](t: T) = Matchers.eq[T](t)
  def when[T](t: T) = Mockito.when(t)
  def reset[T](t: T) = Mockito.reset(t)
  def verify[T](mock: T, mode: VerificationMode) = Mockito.verify(mock, mode)
  def times(num: Int) = Mockito.times(num)

  implicit class stubbingOps[T](stubbing: OngoingStubbing[T]){
    def returns(t: T) = stubbing.thenReturn(t)
  }
} 
Example 117
Source File: MockAppConfig.scala    From self-assessment-api   with Apache License 2.0 5 votes vote down vote up
package mocks.config

import config.AppConfig
import mocks.Mock
import org.mockito.stubbing.OngoingStubbing
import org.scalatest.Suite
import play.api.Configuration

trait MockAppConfig extends Mock { _: Suite =>

  val mockAppConfig = mock[AppConfig]

  object MockAppConfig {
    def appName: OngoingStubbing[String] = when(mockAppConfig.appName)
    def appUrl: OngoingStubbing[String] = when(mockAppConfig.appUrl)
    def apiStatus(version: String): OngoingStubbing[String] = when(mockAppConfig.apiStatus(any[String]()))
    def featureSwitch: OngoingStubbing[Option[Configuration]] = when(mockAppConfig.featureSwitch)
    def registrationEnabled: OngoingStubbing[Boolean] = when(mockAppConfig.registrationEnabled)

    def saApiUrl: OngoingStubbing[String] = when(mockAppConfig.saApiUrl)
    def cgApiUrl: OngoingStubbing[String] = when(mockAppConfig.cgApiUrl)
    def taxCalcUrl: OngoingStubbing[String] = when(mockAppConfig.taxCalcUrl)
    def propertyUrl: OngoingStubbing[String] =  when(mockAppConfig.propertyUrl)
    def selfEmploymentUrl: OngoingStubbing[String] =  when(mockAppConfig.selfEmploymentUrl)
    def dividendsApiUrl: OngoingStubbing[String] = when(mockAppConfig.dividendsApiUrl)
    def savingsAccountsApiUrl: OngoingStubbing[String] = when(mockAppConfig.savingsAccountApiUrl)
    def crystallisationApiUrl: OngoingStubbing[String] = when(mockAppConfig.crystallisationApiUrl)
  }

  override protected def beforeEach(): Unit = {
    super.beforeEach()
    reset(mockAppConfig)
  }
} 
Example 118
Source File: MockCharitableGivingConnector.scala    From self-assessment-api   with Apache License 2.0 5 votes vote down vote up
package mocks.connectors

import mocks.Mock
import org.mockito.stubbing.OngoingStubbing
import org.scalatest.Suite
import play.api.libs.json.JsValue
import router.connectors.CharitableGivingConnector
import router.httpParsers.SelfAssessmentHttpParser.SelfAssessmentOutcome
import uk.gov.hmrc.http.HeaderCarrier

import scala.concurrent.Future

trait MockCharitableGivingConnector extends Mock { _: Suite =>

  val mockCharitableGivingConnector = mock[CharitableGivingConnector]

  object MockCharitableGivingConnector {
    def put(uri: String, body: JsValue): OngoingStubbing[Future[SelfAssessmentOutcome]] = {
      when(mockCharitableGivingConnector.put(eqTo(uri), eqTo(body))(any[HeaderCarrier]()))
    }

    def get(uri: String): OngoingStubbing[Future[SelfAssessmentOutcome]] = {
      when(mockCharitableGivingConnector.get(eqTo(uri))(any[HeaderCarrier]()))
    }
  }

  override protected def beforeEach(): Unit = {
    super.beforeEach()
    reset(mockCharitableGivingConnector)
  }
} 
Example 119
Source File: MockDividendsConnector.scala    From self-assessment-api   with Apache License 2.0 5 votes vote down vote up
package mocks.connectors

import mocks.Mock
import org.mockito.stubbing.OngoingStubbing
import org.scalatest.Suite
import play.api.libs.json.JsValue
import router.connectors.DividendsConnector
import router.httpParsers.SelfAssessmentHttpParser.SelfAssessmentOutcome
import uk.gov.hmrc.http.HeaderCarrier

import scala.concurrent.Future

trait MockDividendsConnector extends Mock { _: Suite =>

  val mockDividendsConnector = mock[DividendsConnector]

  object MockDividendsConnector {
    def put(uri: String, body: JsValue): OngoingStubbing[Future[SelfAssessmentOutcome]] = {
      when(mockDividendsConnector.put(eqTo(uri), eqTo(body))(any[HeaderCarrier]()))
    }

    def get(uri: String): OngoingStubbing[Future[SelfAssessmentOutcome]] = {
      when(mockDividendsConnector.get(eqTo(uri))(any[HeaderCarrier]()))
    }
  }

  override protected def beforeEach(): Unit = {
    super.beforeEach()
    reset(mockDividendsConnector)
  }
} 
Example 120
Source File: MockSavingsAccountConnector.scala    From self-assessment-api   with Apache License 2.0 5 votes vote down vote up
package mocks.connectors

import mocks.Mock
import org.mockito.stubbing.OngoingStubbing
import org.scalatest.Suite
import play.api.libs.json.JsValue
import router.connectors.SavingsAccountConnector
import router.httpParsers.SelfAssessmentHttpParser.SelfAssessmentOutcome
import uk.gov.hmrc.http.HeaderCarrier

import scala.concurrent.Future

trait MockSavingsAccountConnector extends Mock { _: Suite =>

  val mockSavingsAccountConnector = mock[SavingsAccountConnector]

  object MockSavingsAccountConnector {
    def post(uri: String, body: JsValue): OngoingStubbing[Future[SelfAssessmentOutcome]] = {
      when(mockSavingsAccountConnector.post(eqTo(uri), eqTo(body))(any[HeaderCarrier]()))
    }

    def get(uri: String): OngoingStubbing[Future[SelfAssessmentOutcome]] = {
      when(mockSavingsAccountConnector.get(eqTo(uri))(any[HeaderCarrier]()))
    }

    def put(uri: String, body: JsValue): OngoingStubbing[Future[SelfAssessmentOutcome]] = {
      when(mockSavingsAccountConnector.put(eqTo(uri), eqTo(body))(any[HeaderCarrier]()))
    }
  }

  override protected def beforeEach(): Unit = {
    super.beforeEach()
    reset(mockSavingsAccountConnector)
  }
} 
Example 121
Source File: MockSelfAssessmentConnector.scala    From self-assessment-api   with Apache License 2.0 5 votes vote down vote up
package mocks.connectors

import mocks.Mock
import org.mockito.stubbing.OngoingStubbing
import org.scalatest.Suite
import play.api.libs.json.JsValue
import router.connectors.SelfAssessmentConnector
import router.httpParsers.SelfAssessmentHttpParser.SelfAssessmentOutcome
import uk.gov.hmrc.http.HeaderCarrier

import scala.concurrent.Future

trait MockSelfAssessmentConnector extends Mock { _: Suite =>

  val mockSelfAssessmentConnector = mock[SelfAssessmentConnector]

  object MockSelfAssessmentConnector {
    def get(uri: String): OngoingStubbing[Future[SelfAssessmentOutcome]] = {
      when(mockSelfAssessmentConnector.get(eqTo(uri))(any[HeaderCarrier]()))
    }

    def post(uri: String, body: JsValue): OngoingStubbing[Future[SelfAssessmentOutcome]] = {
      when(mockSelfAssessmentConnector.post(eqTo(uri), eqTo(body))(any[HeaderCarrier]()))
    }

    def postEmpty(uri: String): OngoingStubbing[Future[SelfAssessmentOutcome]] = {
      when(mockSelfAssessmentConnector.postEmpty(eqTo(uri))(any[HeaderCarrier]()))
    }

    def put(uri: String, body: JsValue): OngoingStubbing[Future[SelfAssessmentOutcome]] = {
      when(mockSelfAssessmentConnector.put(eqTo(uri), eqTo(body))(any[HeaderCarrier]()))
    }
  }

  override protected def beforeEach(): Unit = {
    super.beforeEach()
    reset(mockSelfAssessmentConnector)
  }
} 
Example 122
Source File: MockSelfEmploymentConnector.scala    From self-assessment-api   with Apache License 2.0 5 votes vote down vote up
package mocks.connectors

import mocks.Mock
import org.mockito.stubbing.OngoingStubbing
import org.scalatest.Suite
import play.api.libs.json.JsValue
import router.connectors.SelfEmploymentConnector
import router.httpParsers.SelfAssessmentHttpParser.SelfAssessmentOutcome
import uk.gov.hmrc.http.HeaderCarrier

import scala.concurrent.Future

trait MockSelfEmploymentConnector extends Mock { _: Suite =>

  val mockSelfEmploymentConnector = mock[SelfEmploymentConnector]

  object MockSelfEmploymentConnector {
    def get(uri: String): OngoingStubbing[Future[SelfAssessmentOutcome]] = {
      when(mockSelfEmploymentConnector.get(eqTo(uri))(any[HeaderCarrier]()))
    }
    def post(uri: String, body: JsValue): OngoingStubbing[Future[SelfAssessmentOutcome]] = {
      when(mockSelfEmploymentConnector.post(eqTo(uri), eqTo(body))(any[HeaderCarrier]()))
    }
  }

  override protected def beforeEach(): Unit = {
    super.beforeEach()
    reset(mockSelfEmploymentConnector)
  }
} 
Example 123
Source File: MockPropertyConnector.scala    From self-assessment-api   with Apache License 2.0 5 votes vote down vote up
package mocks.connectors

import mocks.Mock
import org.mockito.stubbing.OngoingStubbing
import org.scalatest.Suite
import play.api.libs.json.JsValue
import router.connectors.PropertyConnector
import router.httpParsers.SelfAssessmentHttpParser.SelfAssessmentOutcome
import uk.gov.hmrc.http.HeaderCarrier

import scala.concurrent.Future

trait MockPropertyConnector extends Mock { _: Suite =>

  val mockPropertyConnector = mock[PropertyConnector]

  object MockPropertyConnector {
    def get(uri: String): OngoingStubbing[Future[SelfAssessmentOutcome]] = {
      when(mockPropertyConnector.get(eqTo(uri))(any[HeaderCarrier]()))
    }
    def post(uri: String, body: JsValue): OngoingStubbing[Future[SelfAssessmentOutcome]] = {
      when(mockPropertyConnector.post(eqTo(uri), eqTo(body))(any[HeaderCarrier]()))
    }
  }

  override protected def beforeEach(): Unit = {
    super.beforeEach()
    reset(mockPropertyConnector)
  }
} 
Example 124
Source File: MockCrystallisationConnector.scala    From self-assessment-api   with Apache License 2.0 5 votes vote down vote up
package mocks.connectors

import mocks.Mock
import org.mockito.stubbing.OngoingStubbing
import org.scalatest.Suite
import play.api.libs.json.JsValue
import router.connectors.CrystallisationConnector
import router.httpParsers.SelfAssessmentHttpParser.SelfAssessmentOutcome
import uk.gov.hmrc.http.HeaderCarrier

import scala.concurrent.Future

trait MockCrystallisationConnector extends Mock {_: Suite =>

  val mockCrystallisationConnector = mock[CrystallisationConnector]

  object MockCrystallisationConnector {
    def post(uri: String, body: JsValue): OngoingStubbing[Future[SelfAssessmentOutcome]] = {
      when(mockCrystallisationConnector.post(eqTo(uri), eqTo(body))(any[HeaderCarrier]()))
    }

    def postEmpty(uri: String): OngoingStubbing[Future[SelfAssessmentOutcome]] = {
      when(mockCrystallisationConnector.postEmpty(eqTo(uri))(any[HeaderCarrier]()))
    }

    def get(uri: String): OngoingStubbing[Future[SelfAssessmentOutcome]] = {
      when(mockCrystallisationConnector.get(eqTo(uri))(any[HeaderCarrier]()))
    }
  }

  override protected def beforeEach(): Unit = {
    super.beforeEach()
    reset(mockCrystallisationConnector)
  }

} 
Example 125
Source File: MockTaxCalcConnector.scala    From self-assessment-api   with Apache License 2.0 5 votes vote down vote up
package mocks.connectors

import mocks.Mock
import org.mockito.stubbing.OngoingStubbing
import org.scalatest.Suite
import router.connectors.TaxCalcConnector
import router.httpParsers.SelfAssessmentHttpParser.SelfAssessmentOutcome
import uk.gov.hmrc.http.HeaderCarrier

import scala.concurrent.Future

trait MockTaxCalcConnector extends Mock { _: Suite =>

  val mockTaxCalcConnector = mock[TaxCalcConnector]

  object MockTaxCalcConnector {
    def get(uri: String): OngoingStubbing[Future[SelfAssessmentOutcome]] = {
      when(mockTaxCalcConnector.get(eqTo(uri))(any[HeaderCarrier]()))
    }
  }

  override protected def beforeEach(): Unit = {
    super.beforeEach()
    reset(mockTaxCalcConnector)
  }
} 
Example 126
Source File: LocalClusterSparkContext.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.mllib.util

import org.scalatest.{BeforeAndAfterAll, Suite}

import org.apache.spark.{SparkConf, SparkContext}

trait LocalClusterSparkContext extends BeforeAndAfterAll { self: Suite =>
  @transient var sc: SparkContext = _

  override def beforeAll() {
    super.beforeAll()
    val conf = new SparkConf()
      .setMaster("local-cluster[2, 1, 1024]")
      .setAppName("test-cluster")
      .set("spark.rpc.message.maxSize", "1") // set to 1MB to detect direct serialization of data
    sc = new SparkContext(conf)
  }

  override def afterAll() {
    try {
      if (sc != null) {
        sc.stop()
      }
    } finally {
      super.afterAll()
    }
  }
} 
Example 127
Source File: MLlibTestSparkContext.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.mllib.util

import java.io.File

import org.scalatest.Suite

import org.apache.spark.SparkContext
import org.apache.spark.ml.util.TempDirectory
import org.apache.spark.sql.{SparkSession, SQLContext, SQLImplicits}
import org.apache.spark.util.Utils

trait MLlibTestSparkContext extends TempDirectory { self: Suite =>
  @transient var spark: SparkSession = _
  @transient var sc: SparkContext = _
  @transient var checkpointDir: String = _

  override def beforeAll() {
    super.beforeAll()
    spark = SparkSession.builder
      .master("local[2]")
      .appName("MLlibUnitTest")
      .getOrCreate()
    sc = spark.sparkContext

    checkpointDir = Utils.createDirectory(tempDir.getCanonicalPath, "checkpoints").toString
    sc.setCheckpointDir(checkpointDir)
  }

  override def afterAll() {
    try {
      Utils.deleteRecursively(new File(checkpointDir))
      SparkSession.clearActiveSession()
      if (spark != null) {
        spark.stop()
      }
      spark = null
    } finally {
      super.afterAll()
    }
  }

  
  protected object testImplicits extends SQLImplicits {
    protected override def _sqlContext: SQLContext = self.spark.sqlContext
  }
} 
Example 128
Source File: SharedSparkContext.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark

import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach}
import org.scalatest.Suite


trait SharedSparkContext extends BeforeAndAfterAll with BeforeAndAfterEach { self: Suite =>

  @transient private var _sc: SparkContext = _

  def sc: SparkContext = _sc

  var conf = new SparkConf(false)

  override def beforeAll() {
    super.beforeAll()
    _sc = new SparkContext(
      "local[4]", "test", conf.set("spark.hadoop.fs.file.impl", classOf[DebugFilesystem].getName))
  }

  override def afterAll() {
    try {
      LocalSparkContext.stop(_sc)
      _sc = null
    } finally {
      super.afterAll()
    }
  }

  protected override def beforeEach(): Unit = {
    super.beforeEach()
    DebugFilesystem.clearOpenStreams()
  }

  protected override def afterEach(): Unit = {
    super.afterEach()
    DebugFilesystem.assertNoOpenStreams()
  }
} 
Example 129
Source File: ResetSystemProperties.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.util

import java.util.Properties

import org.apache.commons.lang3.SerializationUtils
import org.scalatest.{BeforeAndAfterEach, Suite}


private[spark] trait ResetSystemProperties extends BeforeAndAfterEach { this: Suite =>
  var oldProperties: Properties = null

  override def beforeEach(): Unit = {
    // we need SerializationUtils.clone instead of `new Properties(System.getProperties())` because
    // the later way of creating a copy does not copy the properties but it initializes a new
    // Properties object with the given properties as defaults. They are not recognized at all
    // by standard Scala wrapper over Java Properties then.
    oldProperties = SerializationUtils.clone(System.getProperties)
    super.beforeEach()
  }

  override def afterEach(): Unit = {
    try {
      super.afterEach()
    } finally {
      System.setProperties(oldProperties)
      oldProperties = null
    }
  }
} 
Example 130
Source File: BeforeAndAfterWithContext.scala    From uberdata   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.rpc.netty

import eleflow.uberdata.core.IUberdataContext
import eleflow.uberdata.core.util.ClusterSettings
import org.apache.log4j.{Level, Logger}
import org.apache.spark.{SparkConf, SparkEnv}
import org.scalatest.{BeforeAndAfterEach, Suite}

object TestSparkConf {
  def conf = {
    val sconf = new SparkConf()
    sconf.set("spark.app.name", "teste")
    sconf
  }

  val separator = ","

}


trait BeforeAndAfterWithContext extends BeforeAndAfterEach { this: Suite =>

  val defaultFilePath = "src/test/resources/"
  import TestSparkConf._
  ClusterSettings.master = Some("local[*]")
  conf.set("spark.driver.allowMultipleContexts", "true")
  @transient val context = IUberdataContext.getUC(conf)

  override def beforeEach() = {
    setLogLevels(Level.INFO, Seq("spark", "org.eclipse.jetty", "akka"))
  }

  def setLogLevels(level: org.apache.log4j.Level, loggers: TraversableOnce[String]) = {
    loggers.map { loggerName =>
      val logger = Logger.getLogger(loggerName)
      val prevLevel = logger.getLevel
      logger.setLevel(level)
      loggerName -> prevLevel
    }.toMap
  }

  override def afterEach() = {
    val get = SparkEnv.get
    val rpcEnv =
      if (get != null) {
        Some(get.rpcEnv)
      } else None
    context.clearContext()
    //rpcEnv.foreach(
    //  _.fileServer.asInstanceOf[org.apache.spark.rpc.netty.HttpBasedFileServer].shutdown())


    System.clearProperty("spark.master.port")
  }
} 
Example 131
Source File: TestFuncUberdataContext.scala    From uberdata   with Apache License 2.0 5 votes vote down vote up
package eleflow.uberdata.core

import eleflow.uberdata.core.data.Dataset
import org.apache.spark.rpc.netty.{BeforeAndAfterWithContext, TestSparkConf}
import org.scalatest.{FlatSpec, Matchers, Suite}


class TestFuncUberdataContext extends FlatSpec with Matchers with BeforeAndAfterWithContext {
  this: Suite =>

  class LocalContextI extends IUberdataContext(TestSparkConf.conf) {

    def extractFileName(file: String) = {
      val name = file.split("/").last
      val index = name.indexOf(".csv") + name.indexOf(".txt")
      name.splitAt(index + 1).productIterator.toList.filter(!_.toString.isEmpty).head.toString
    }
  }

  override val defaultFilePath = "sparknotebook/src/test/resources/"

  "FuncUberdataContext " should "extract .csv from the file name" in {

    val context = new LocalContextI

    assert(context.extractFileName("arquivo.csv") == "arquivo")
    assert(context.extractFileName("/home/arquivo.csv") == "arquivo")
    assert(context.extractFileName("/home/subfolder/arquivo.csv") == "arquivo")
    assert(context.extractFileName("//home/arquivo.csv") == "arquivo")
    assert(context.extractFileName("//home/subfolder/arquivo.csv") == "arquivo")
    assert(context.extractFileName("///home/arquivo.csv") == "arquivo")
    assert(context.extractFileName("///home/subfolder/arquivo.csv") == "arquivo")
    assert(context.extractFileName("s3://home/arquivo.csv") == "arquivo")
    assert(context.extractFileName("s3://home/subfolder/arquivo.csv") == "arquivo")
    assert(context.extractFileName("hdfs://home/arquivo.csv") == "arquivo")
    assert(context.extractFileName("hdfs://home/subfolder/arquivo.csv") == "arquivo")
    assert(context.extractFileName("s3:///home/arquivo.csv") == "arquivo")
    assert(context.extractFileName("s3:///home/subfolder/arquivo.csv") == "arquivo")
    assert(context.extractFileName("hdfs:///home/arquivo.csv") == "arquivo")
    assert(context.extractFileName("hdfs:///home/subfolder/arquivo.csv") == "arquivo")
  }

  it should "extract .txt from the file name" in {

    val context = new LocalContextI

    assert(context.extractFileName("arquivo.txt") == "arquivo")
    assert(context.extractFileName("/home/arquivo.txt") == "arquivo")
    assert(context.extractFileName("/home/subfolder/arquivo.txt") == "arquivo")
    assert(context.extractFileName("//home/arquivo.txt") == "arquivo")
    assert(context.extractFileName("//home/subfolder/arquivo.txt") == "arquivo")
    assert(context.extractFileName("///home/arquivo.txt") == "arquivo")
    assert(context.extractFileName("///home/subfolder/arquivo.txt") == "arquivo")
    assert(context.extractFileName("s3://home/arquivo.txt") == "arquivo")
    assert(context.extractFileName("s3://home/subfolder/arquivo.txt") == "arquivo")
    assert(context.extractFileName("hdfs://home/arquivo.txt") == "arquivo")
    assert(context.extractFileName("hdfs://home/subfolder/arquivo.txt") == "arquivo")
    assert(context.extractFileName("s3:///home/arquivo.txt") == "arquivo")
    assert(context.extractFileName("s3:///home/subfolder/arquivo.txt") == "arquivo")
    assert(context.extractFileName("hdfs:///home/arquivo.txt") == "arquivo")
    assert(context.extractFileName("hdfs:///home/subfolder/arquivo.txt") == "arquivo")
  }
} 
Example 132
Source File: TempDirectory.scala    From spark-FeatureSelection   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.ml.feature.selection.test_util

import java.io.File

import org.apache.spark.util.Utils
import org.scalatest.{BeforeAndAfterAll, Suite}


  protected def tempDir: File = _tempDir

  override def beforeAll(): Unit = {
    super.beforeAll()
    _tempDir = Utils.createTempDir(namePrefix = this.getClass.getName)
  }

  override def afterAll(): Unit = {
    try {
      Utils.deleteRecursively(_tempDir)
    } finally {
      super.afterAll()
    }
  }
} 
Example 133
Source File: TestDBSettings.scala    From scuruto   with MIT License 5 votes vote down vote up
package model

import org.flywaydb.core.Flyway
import org.scalatest.{ BeforeAndAfterEach, Suite }
import scalikejdbc.ConnectionPool
import skinny._
import skinny.exception.DBSettingsException

trait TestDBSettings extends BeforeAndAfterEach with DBSettings { this: Suite =>

  override protected def beforeEach(): Unit = {
    clean()
    dbmigration.DBMigration.migrate()
  }

  private def clean(env: String = SkinnyEnv.Test, poolName: String = ConnectionPool.DEFAULT_NAME.name): Unit = {
    val skinnyEnv = SkinnyEnv.get()
    try {
      System.setProperty(SkinnyEnv.PropertyKey, env)
      DBSettings.initialize()
      try {
        val pool = ConnectionPool.get(Symbol(poolName))
        val flyway = new Flyway
        flyway.setDataSource(pool.dataSource)
        flyway.clean()
      } catch {
        case e: IllegalStateException =>
          throw new DBSettingsException(s"ConnectionPool named $poolName is not found.")
      }
    } finally {
      skinnyEnv.foreach { env => System.setProperty(SkinnyEnv.PropertyKey, env) }
      DBSettings.initialize()
    }
  }

} 
Example 134
Source File: ResetSystemProperties.scala    From SparkCore   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.util

import java.util.Properties

import org.apache.commons.lang3.SerializationUtils
import org.scalatest.{BeforeAndAfterEach, Suite}


private[spark] trait ResetSystemProperties extends BeforeAndAfterEach { this: Suite =>
  var oldProperties: Properties = null

  override def beforeEach(): Unit = {
    // we need SerializationUtils.clone instead of `new Properties(System.getProperties()` because
    // the later way of creating a copy does not copy the properties but it initializes a new
    // Properties object with the given properties as defaults. They are not recognized at all
    // by standard Scala wrapper over Java Properties then.
    oldProperties = SerializationUtils.clone(System.getProperties)
    super.beforeEach()
  }

  override def afterEach(): Unit = {
    try {
      super.afterEach()
    } finally {
      System.setProperties(oldProperties)
      oldProperties = null
    }
  }
} 
Example 135
Source File: TestSparkContext.scala    From spark-neighbors   with MIT License 5 votes vote down vote up
package com.github.karlhigley.spark.neighbors

import org.scalatest.{ BeforeAndAfterAll, Suite }

import org.apache.spark.{ SparkConf, SparkContext }

trait TestSparkContext extends BeforeAndAfterAll { self: Suite =>
  @transient var sc: SparkContext = _

  override def beforeAll() {
    super.beforeAll()
    val conf = new SparkConf()
      .setMaster("local[2]")
      .setAppName("LshUnitTest")
    sc = new SparkContext(conf)
  }

  override def afterAll() {
    if (sc != null) {
      sc.stop()
    }
    sc = null
    super.afterAll()
  }
} 
Example 136
Source File: DockerizedInfluxDB.scala    From chronicler   with Apache License 2.0 5 votes vote down vote up
package com.github.fsanaulla.chronicler.testing.it

import com.github.fsanaulla.chronicler.core.model.InfluxCredentials
import org.scalatest.{BeforeAndAfterAll, Suite}
import org.testcontainers.containers.InfluxDBContainer
import org.testcontainers.containers.output.ToStringConsumer

trait DockerizedInfluxDB extends BeforeAndAfterAll { self: Suite =>

  protected def version: String = sys.env.getOrElse("INFLUXDB_VERSION", "1.7.3")
  protected val _influx: InfluxDBContainer[Nothing] = new InfluxDBContainer(version)
    .withLogConsumer(new ToStringConsumer)

  
  }

  override def afterAll(): Unit = {
    super.afterAll()
    _influx.stop()
  }
} 
Example 137
Source File: TestWithSpark.scala    From ZparkIO   with MIT License 5 votes vote down vote up
package com.leobenkel.zparkiotest

import com.holdenkarau.spark.testing.DataFrameSuiteBase
import org.apache.spark.SparkConf
import org.scalatest.Suite

trait TestWithSpark extends DataFrameSuiteBase { self: Suite =>
  override protected val reuseContextIfPossible: Boolean = true
  override protected val enableHiveSupport:      Boolean = false

  
  def enableSparkUI: Boolean = {
    false
  }

  final override def conf: SparkConf = {
    if (enableSparkUI) {
      super.conf
        .set("spark.ui.enabled", "true")
        .set("spark.ui.port", "4050")
    } else {
      super.conf
    }
  }
} 
Example 138
Source File: CouchbaseBucketSetup.scala    From akka-persistence-couchbase   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.couchbase

import akka.stream.alpakka.couchbase.scaladsl.CouchbaseSession
import org.scalatest.{BeforeAndAfterAll, Suite}

// FIXME this is currently almost test-kit used across all modules, make it testkit or duplicate instead of a test-test dependency?
trait CouchbaseBucketSetup extends BeforeAndAfterAll { self: Suite =>

  private var clusterConnection: CouchbaseClusterConnection = _
  protected def couchbaseSession: CouchbaseSession = clusterConnection.couchbaseSession

  override protected def beforeAll(): Unit = {
    clusterConnection = CouchbaseClusterConnection.connect().cleanUp()
    super.beforeAll()
  }

  override protected def afterAll(): Unit = {
    clusterConnection.close()
    super.afterAll()
  }
} 
Example 139
Source File: KafkaSpec.scala    From akka-stream-eventsourcing   with Apache License 2.0 5 votes vote down vote up
package com.github.krasserm.ases.log

import akka.testkit.TestKit
import org.scalatest.{BeforeAndAfterAll, Suite}

trait KafkaSpec extends BeforeAndAfterAll { this: TestKit with Suite =>
  override protected def beforeAll(): Unit = {
    super.beforeAll()
    KafkaServer.start()
  }

  override def afterAll(): Unit = {
    KafkaServer.stop()
    super.afterAll()
  }

  def host: String =
    "localhost"

  def port: Int =
    KafkaServer.kafkaPort
} 
Example 140
Source File: StreamSpec.scala    From akka-stream-eventsourcing   with Apache License 2.0 5 votes vote down vote up
package com.github.krasserm.ases

import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Flow, Keep}
import akka.stream.testkit.scaladsl.{TestSink, TestSource}
import akka.stream.testkit.{TestPublisher, TestSubscriber}
import akka.testkit.TestKit
import org.scalatest.{BeforeAndAfterAll, Suite}

import scala.collection.immutable.Seq

trait StreamSpec extends BeforeAndAfterAll { this: TestKit with Suite =>
  implicit val materializer = ActorMaterializer()

  val emitterId = "emitter"

  override def afterAll(): Unit = {
    materializer.shutdown()
    TestKit.shutdownActorSystem(system)
    super.afterAll()
  }

  def probes[I, O, M](flow: Flow[I, O, M]): (TestPublisher.Probe[I], TestSubscriber.Probe[O]) =
    TestSource.probe[I].viaMat(flow)(Keep.left).toMat(TestSink.probe[O])(Keep.both).run()

  def durables[A](emitted: Seq[Emitted[A]], offset: Int = 0): Seq[Durable[A]] =
    emitted.zipWithIndex.map { case (e, i) => e.durable(i + offset) }
} 
Example 141
Source File: ViewTestSupport.scala    From ddd-leaven-akka-v2   with MIT License 5 votes vote down vote up
package ecommerce.sales.view

import com.typesafe.config.Config
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Millis, Seconds, Span}
import org.scalatest.{BeforeAndAfterAll, Suite}
import org.slf4j.Logger
import org.slf4j.LoggerFactory.getLogger
import pl.newicom.dddd.view.sql.SqlViewStore
import slick.dbio._

import scala.concurrent.ExecutionContext
import slick.jdbc.H2Profile

trait ViewTestSupport extends BeforeAndAfterAll with ScalaFutures {
  this: Suite =>

  def config: Config
  lazy val viewStore = new SqlViewStore(config)
  val log: Logger = getLogger(getClass)

  implicit val profile = H2Profile

  implicit class ViewStoreAction[A](a: DBIO[A])(implicit ex: ExecutionContext) {
    private val future = viewStore.run(a)

    def run(): Unit = future.map(_ => ()).futureValue
    def result: A = future.futureValue
  }

  def ensureSchemaDropped: DBIO[Unit]
  def ensureSchemaCreated: DBIO[Unit]

  implicit override val patienceConfig = PatienceConfig(
    timeout = scaled(Span(10, Seconds)),
    interval = scaled(Span(200, Millis))
  )

  override def beforeAll() {
    viewStore.run {
      ensureSchemaDropped >> ensureSchemaCreated
    }.futureValue

  }

} 
Example 142
Source File: ViewTestSupport.scala    From ddd-leaven-akka-v2   with MIT License 5 votes vote down vote up
package ecommerce.sales.view

import com.typesafe.config.Config
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Millis, Seconds, Span}
import org.scalatest.{BeforeAndAfterAll, Suite}
import org.slf4j.Logger
import org.slf4j.LoggerFactory.getLogger
import pl.newicom.dddd.view.sql.SqlViewStore
import slick.dbio._
import slick.jdbc.H2Profile

import scala.concurrent.ExecutionContext

trait ViewTestSupport extends BeforeAndAfterAll with ScalaFutures {
  this: Suite =>

  def config: Config
  lazy val viewStore = new SqlViewStore(config)
  val log: Logger = getLogger(getClass)

  implicit val profile = H2Profile

  implicit override val patienceConfig = PatienceConfig(
    timeout = scaled(Span(5, Seconds)),
    interval = scaled(Span(200, Millis))
  )

  implicit class ViewStoreAction[A](a: DBIO[A])(implicit ex: ExecutionContext) {
    private val future = viewStore.run(a)

    def run(): Unit = future.map(_ => ()).futureValue
    def result: A = future.futureValue
  }

  def ensureSchemaDropped: DBIO[Unit]
  def ensureSchemaCreated: DBIO[Unit]

  override def beforeAll() {
    val setup = viewStore.run {
      ensureSchemaDropped >> ensureSchemaCreated
    }
    assert(setup.isReadyWithin(Span(5, Seconds)))

  }

} 
Example 143
Source File: S3Sandbox.scala    From redshift-fake-driver   with Apache License 2.0 5 votes vote down vote up
package jp.ne.opt.redshiftfake

import java.net.URI

import com.amazonaws.auth.{AWSCredentials, BasicAWSCredentials}
import com.amazonaws.regions.RegionUtils
import com.amazonaws.services.s3.AmazonS3Client
import org.gaul.s3proxy.{AuthenticationType, S3Proxy}
import org.jclouds.ContextBuilder
import org.jclouds.blobstore.BlobStoreContext
import org.scalatest.{BeforeAndAfterAll, Suite}

trait S3Sandbox extends BeforeAndAfterAll {this: Suite =>

  val dummyCredentials:  Credentials.WithKey
  val s3Endpoint: String

  var s3Proxy: S3Proxy = _

  override def beforeAll(): Unit = {
    val blobContext: BlobStoreContext = ContextBuilder
      .newBuilder("transient")
      .build(classOf[BlobStoreContext])

    s3Proxy = S3Proxy.builder
      .blobStore(blobContext.getBlobStore)
      .awsAuthentication(AuthenticationType.AWS_V4, dummyCredentials.accessKeyId, dummyCredentials.secretAccessKey)
      .endpoint(URI.create(s3Endpoint))
      .build
    s3Proxy.start()
  }

  override def afterAll(): Unit = {
    s3Proxy.stop()
  }

  def createS3Client(s3Region: String): AmazonS3Client = {
    val credentials: AWSCredentials = new BasicAWSCredentials(dummyCredentials.accessKeyId, dummyCredentials.secretAccessKey)
    val client = new AmazonS3Client(credentials)
    client.setRegion(RegionUtils.getRegion(s3Region))
    client.setEndpoint(s3Endpoint)

    client
  }
} 
Example 144
Source File: TestSupport.scala    From cedi-dtrace   with Apache License 2.0 5 votes vote down vote up
package com.ccadllc.cedi.dtrace
package logging

import cats.effect.{ IO, Sync }

import io.circe._
import io.circe.syntax._

import org.scalacheck.Arbitrary

import org.scalatest.Suite
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpecLike

import org.scalatestplus.scalacheck.ScalaCheckPropertyChecks

import shapeless.Lazy

trait TestSupport extends AnyWordSpecLike with Matchers with ScalaCheckPropertyChecks with TraceGenerators with TestData {
  self: Suite =>

  override def testEmitter[F[_]: Sync]: F[TraceSystem.Emitter[F]] = Sync[F].pure(LogEmitter.apply)

  val salesManagementSystem = TraceSystem(testSystemData, testEmitter[IO].unsafeRunSync, TraceSystem.realTimeTimer[IO])
  val calculateQuarterlySalesTraceContext = TraceContext(quarterlySalesCalculationSpan, true, salesManagementSystem)

  def encodeGeneratedJson[A: Arbitrary](implicit encoder: Lazy[Encoder[A]]): Unit = {
    implicit val e = encoder.value
    "encode arbitrary instances to JSON" in {
      forAll { (msg: A) => msg.asJson.noSpaces should not be (None) }
    }
  }
  def encodeSpecificJson[A](a: A, json: Json)(implicit encoder: Lazy[Encoder[A]]): Unit = {
    implicit val e = encoder.value
    "encode specific instance to JSON and ensure it matches expected" in { a.asJson shouldBe json }
  }
} 
Example 145
Source File: BaseAlgorithmTest.scala    From m3d-engine   with Apache License 2.0 5 votes vote down vote up
package com.adidas.utils

import java.util.UUID

import com.adidas.analytics.util.{DFSWrapper, LoadMode}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.spark.sql.types.StructType
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, Suite}
import org.slf4j.{Logger, LoggerFactory}

import scala.io.Source

trait BaseAlgorithmTest extends Suite with BeforeAndAfterAll with BeforeAndAfterEach with HDFSSupport with SparkSupport {

  override val logger: Logger = LoggerFactory.getLogger(getClass)
  override val testAppId: String = UUID.randomUUID().toString
  override val localTestDir: String = "target"
  override val sparkHadoopConf: Option[Configuration] = Some(fs.getConf)

  val hdfsRootTestPath: Path = new Path("hdfs:///tmp/tests")
  val dfs: DFSWrapper = DFSWrapper(spark.sparkContext.hadoopConfiguration)

  override def afterAll(): Unit = {
    spark.stop()
    cluster.shutdown(true)
  }

  override def beforeEach(): Unit = {
    fs.delete(hdfsRootTestPath, true)
    fs.mkdirs(hdfsRootTestPath)
  }

  override def afterEach(): Unit = {
    spark.sqlContext.clearCache()
    spark.sparkContext.getPersistentRDDs.foreach {
      case (_, rdd) => rdd.unpersist(true)
    }
  }

  def resolveResource(fileName: String, withProtocol: Boolean = false): String = {
    val resource = s"${getClass.getSimpleName}/$fileName"
    logger.info(s"Resolving resource $resource")
    val location = getClass.getClassLoader.getResource(resource).getPath
    if (withProtocol) {
      s"file://$location"
    } else {
      location
    }
  }

  def getResourceAsText(fileName: String): String = {
    val resource = s"${getClass.getSimpleName}/$fileName"
    logger.info(s"Reading resource $resource")
    val stream = getClass.getClassLoader.getResourceAsStream(resource)
    Source.fromInputStream(stream).mkString
  }

  def copyResourceFileToHdfs(resource: String, targetPath: Path): Unit = {
    val localResourceRoot = resolveResource("", withProtocol = true)
    val sourcePath = new Path(localResourceRoot, resource)
    logger.info(s"Copying local resource to HDFS $sourcePath -> $targetPath")
    fs.copyFromLocalFile(sourcePath, targetPath)
  }

  
  def createAndLoadParquetTable(database: String, tableName: String, partitionColumns: Option[Seq[String]] = None, schema: StructType, filePath: String, reader: FileReader): Table = {
    val table = createParquetTable(database, tableName, partitionColumns, schema)
    val inputTableDataURI = resolveResource(filePath, withProtocol = true)
    table.write(Seq(inputTableDataURI), reader, LoadMode.OverwritePartitions)
    table
  }
} 
Example 146
Source File: DockerTmpDB.scala    From akka-stream-extensions   with Apache License 2.0 5 votes vote down vote up
package com.mfglabs.stream
package extensions.postgres

import java.sql.{DriverManager, Connection}
import org.postgresql.util.PSQLException
import org.scalatest.{Suite, BeforeAndAfter}
import scala.sys.process._
import scala.util.{Failure, Success, Try}
import com.typesafe.config.ConfigFactory

trait DockerTmpDB extends BeforeAndAfter { self: Suite =>

  import Debug._

  val version: PostgresVersion = PostgresVersion(ConfigFactory.load().getString("postgres.version"))

  Class.forName("org.postgresql.Driver")
  implicit var conn : Connection = _

  val dockerInstances = collection.mutable.Buffer.empty[String]

  def newPGDB(): Int = {
    val port: Int = 5432 + (math.random * (10000 - 5432)).toInt
    Try {
      s"docker pull postgres:${version.value}".pp.!!.trim
      val containerId =
        s"""docker run -p $port:5432 -e POSTGRES_PASSWORD=pwd -d postgres:${version.value}""".pp.!!.trim
      dockerInstances += containerId.pp("New docker instance with id")
      port
    } match {
      case Success(p) => p
      case Failure(err) =>
        throw  new IllegalStateException(s"Error while trying to run docker container", err)
    }
  }

  lazy val dockerIp: String =
    Try("docker-machine ip default".!!.trim).toOption
      .orElse {
        val conf = ConfigFactory.load()
        if (conf.hasPath("docker.ip")) Some(conf.getString("docker.ip")) else None
      }
      .getOrElse("127.0.0.1") // platform dependent

  //ugly solution to wait for the connection to be ready
  def waitsForConnection(port : Int) : Connection = {
    try {
      DriverManager.getConnection(s"jdbc:postgresql://$dockerIp:$port/postgres", "postgres", "pwd")
    } catch {
      case _: PSQLException =>
        println("Retrying DB connection...")
        Thread.sleep(1000)
        waitsForConnection(port)
    }
  }

  before {
    val port = newPGDB()
    println(s"New postgres ${version.value} instance at port $port")
    Thread.sleep(5000)
    conn = waitsForConnection(port)
  }

  after {
    conn.close()
    dockerInstances.toSeq.foreach { dockerId =>
      s"docker stop $dockerId".pp.!!
      s"docker rm $dockerId".pp.!!
    }
  }

}

object Debug {

  implicit class RichString(s:String){
    def pp :String = pp(None)
    def pp(p:String) :String = pp(Some(p))

    private def pp(p:Option[String]) = {
      println(p.map(_ + " ").getOrElse("") + s)
      s
    }
  }
} 
Example 147
Source File: LocalSparkContext.scala    From practical-data-science-with-hadoop-and-spark   with Apache License 2.0 5 votes vote down vote up
package dsbook.sentimentanalysis
import org.scalatest.Suite
import org.scalatest.BeforeAndAfterAll

import org.apache.spark.SparkContext

trait LocalSparkContext extends BeforeAndAfterAll { self: Suite =>
  @transient var sc: SparkContext = _

  override def beforeAll() {
    sc = new SparkContext("local", "test")
    super.beforeAll()
  }

  override def afterAll() {
    if (sc != null) {
      sc.stop()
    }
    System.clearProperty("spark.driver.port")
    super.afterAll()
  }
} 
Example 148
Source File: ElectrumxService.scala    From eclair   with Apache License 2.0 5 votes vote down vote up
package fr.acinq.eclair.blockchain.electrum

import com.spotify.docker.client.{DefaultDockerClient, DockerClient}
import com.whisk.docker.impl.spotify.SpotifyDockerFactory
import com.whisk.docker.scalatest.DockerTestKit
import com.whisk.docker.{DockerContainer, DockerFactory}
import fr.acinq.eclair.TestUtils
import fr.acinq.eclair.blockchain.bitcoind.BitcoindService
import org.scalatest.Suite

trait ElectrumxService extends DockerTestKit {
  self: Suite with BitcoindService =>

  val electrumPort = TestUtils.availablePort

  val electrumxContainer = if (System.getProperty("os.name").startsWith("Linux")) {
    // "host" mode will let the container access the host network on linux
    // we use our own docker image because other images on Docker lag behind and don't yet support 1.4
    DockerContainer("acinq/electrumx")
      .withNetworkMode("host")
      .withEnv(s"DAEMON_URL=http://foo:bar@localhost:$bitcoindRpcPort", "COIN=BitcoinSegwit", "NET=regtest", s"TCP_PORT=$electrumPort")
      //.withLogLineReceiver(LogLineReceiver(true, println))
  } else {
    // on windows or oxs, host mode is not available, but from docker 18.03 on host.docker.internal can be used instead
    // host.docker.internal is not (yet ?) available on linux though
    DockerContainer("acinq/electrumx")
      .withPorts(electrumPort -> Some(electrumPort))
      .withEnv(s"DAEMON_URL=http://foo:[email protected]:$bitcoindRpcPort", "COIN=BitcoinSegwit", "NET=regtest", s"TCP_PORT=$electrumPort")
      //.withLogLineReceiver(LogLineReceiver(true, println))
  }

  override def dockerContainers: List[DockerContainer] = electrumxContainer :: super.dockerContainers

  private val client: DockerClient = DefaultDockerClient.fromEnv().build()

  override implicit val dockerFactory: DockerFactory = new SpotifyDockerFactory(client)
} 
Example 149
Source File: MLlibTestSparkContext.scala    From dbscan-on-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.mllib.util

import org.scalatest.Suite
import org.scalatest.BeforeAndAfterAll

import org.apache.spark.{SparkConf, SparkContext}

trait MLlibTestSparkContext extends BeforeAndAfterAll { self: Suite =>
  @transient var sc: SparkContext = _

  override def beforeAll() {
    super.beforeAll()
    val conf = new SparkConf()
      .setMaster("local[2]")
      .setAppName("MLlibUnitTest")
    sc = new SparkContext(conf)
  }

  override def afterAll() {
    if (sc != null) {
      sc.stop()
    }
    super.afterAll()
  }
} 
Example 150
Source File: UsesRedisServer.scala    From scala-commons   with MIT License 5 votes vote down vote up
package com.avsystem.commons
package redis

import org.scalatest.{BeforeAndAfterAll, Suite}

import scala.concurrent.Await
import scala.concurrent.duration._


trait UsesRedisServer extends BeforeAndAfterAll with RedisProcessUtils { this: Suite =>
  def port: Int = 7000
  def tlsPort: Int = 8000

  def address: NodeAddress = NodeAddress(port = port)
  def tlsAddress: NodeAddress = NodeAddress(port = tlsPort)

  var redisProcess: RedisProcess = _

  override protected def beforeAll(): Unit = {
    super.beforeAll()

    redisProcess = Await.result(
      launchRedis(
        "--daemonize", "no",
        "--port", port.toString,
        "--tls-port", tlsPort.toString,
        "--tls-cert-file", "./tls/redis.crt",
        "--tls-key-file", "./tls/redis.key",
        "--tls-ca-cert-file", "./tls/ca.crt"
      ),
      10.seconds
    )
  }

  override protected def afterAll(): Unit = {
    Await.result(shutdownRedis(redisProcess), 10.seconds)
    super.afterAll()
  }
} 
Example 151
Source File: RedisProcessUtils.scala    From scala-commons   with MIT License 5 votes vote down vote up
package com.avsystem.commons
package redis

import com.avsystem.commons.redis.commands.NodeId
import org.scalatest.Suite

import scala.sys.process._


trait RedisProcessUtils extends UsesActorSystem { this: Suite =>
  def redisHome: Opt[String] = sys.env.getOpt("REDIS_HOME")
  def inRedisHome(cmd: String): String = redisHome.fold(cmd)(_ + "/" + cmd)
  def password: Opt[String] = Opt.empty[String]
  def runCommand: List[String] =
    if (System.getProperty("os.name") == "Windows 10") List("ubuntu", "run") else Nil

  private val ReadyRegex = ".*Ready to accept connections.*".r
  private val NodeLogRegex = ".*Node configuration loaded, I'm ([0-9a-f]+)$".r
  private val SentinelIdRegex = ".*Sentinel ID is ([0-9a-f]+)$".r
  private val PidRegex = ".*pid=([0-9]+).*".r

  case class RedisProcess(process: Process, pid: Int, nodeId: Opt[NodeId])

  def launchRedis(arguments: String*): Future[RedisProcess] =
    launchRedis("redis-server", arguments)

  def launchSentinel(arguments: String*): Future[RedisProcess] =
    launchRedis("redis-sentinel", arguments)

  def launchRedis(executable: String, arguments: Seq[String]): Future[RedisProcess] = {
    val promise = Promise[Unit]()
    var pid = -1
    var nodeId: Opt[NodeId] = Opt.Empty
    val passArgs = password.fold(Seq.empty[String])(p => Seq("--requirepass", p))
    val process = (runCommand ++: inRedisHome(executable) +: arguments ++: passArgs).run(
      ProcessLogger { line =>
        actorSystem.log.debug(line)
        line match {
          case PidRegex(pidStr) =>
            pid = pidStr.toInt
          case NodeLogRegex(rawNodeId) =>
            nodeId = NodeId(rawNodeId).opt
          case SentinelIdRegex(rawSentinelId) =>
            nodeId = NodeId(rawSentinelId).opt
            promise.success(())
          case ReadyRegex() =>
            promise.success(())
          case _ =>
        }
      })
    promise.future.mapNow { _ =>
      if (pid < 0) {
        throw new IllegalStateException("Could not determine Redis process PID")
      }
      RedisProcess(process, pid, nodeId)
    }
  }

  def shutdownRedis(process: RedisProcess): Future[Unit] =
    SeparateThreadExecutionContext.submit {
      actorSystem.log.info(s"Killing Redis process ${process.pid}")
      (runCommand ++ List("kill", "-SIGKILL", process.pid.toString))
        .run(ProcessLogger(actorSystem.log.debug(_), actorSystem.log.error(_)))
      process.process.exitValue()
      ()
    }
} 
Example 152
Source File: UsesRedisConnectionClient.scala    From scala-commons   with MIT License 5 votes vote down vote up
package com.avsystem.commons
package redis

import com.avsystem.commons.redis.config.ConnectionConfig
import org.scalatest.Suite


trait UsesRedisConnectionClient extends UsesRedisServer with UsesActorSystem with UsesSslContext { this: Suite =>
  def useTls: Boolean = false

  def connectionConfig: ConnectionConfig =
    ConnectionConfig(sslEngineCreator = if (useTls) OptArg(() => sslContext.createSSLEngine()) else OptArg.Empty)

  var redisClient: RedisConnectionClient = _

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    redisClient = new RedisConnectionClient(if (useTls) tlsAddress else address, connectionConfig)
  }

  override protected def afterAll(): Unit = {
    redisClient.close()
    super.afterAll()
  }
} 
Example 153
Source File: UsesRedisNodeClient.scala    From scala-commons   with MIT License 5 votes vote down vote up
package com.avsystem.commons
package redis

import com.avsystem.commons.redis.config.{ConnectionConfig, NodeConfig}
import org.scalatest.Suite


trait UsesRedisNodeClient extends UsesRedisServer with UsesActorSystem with UsesSslContext { this: Suite =>
  def useTls: Boolean = false

  def connectionConfig: ConnectionConfig =
    ConnectionConfig(sslEngineCreator = if (useTls) OptArg(() => sslContext.createSSLEngine()) else OptArg.Empty)

  def nodeConfig: NodeConfig = NodeConfig(
    connectionConfigs = _ => connectionConfig,
    blockingConnectionConfigs = _ => connectionConfig
  )

  var redisClient: RedisNodeClient = _

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    redisClient = new RedisNodeClient(if (useTls) tlsAddress else address, nodeConfig)
  }

  override protected def afterAll(): Unit = {
    redisClient.close()
    super.afterAll()
  }
} 
Example 154
Source File: UsesMasterSlaveServers.scala    From scala-commons   with MIT License 5 votes vote down vote up
package com.avsystem.commons
package redis

import java.io.File

import org.apache.commons.io.FileUtils
import org.scalatest.{BeforeAndAfterAll, Suite}

import scala.concurrent.Await
import scala.concurrent.duration._


trait UsesMasterSlaveServers extends BeforeAndAfterAll with RedisProcessUtils { this: Suite =>

  val masterSlavePath: String = "masterSlave/" + System.currentTimeMillis()
  val masterSlaveDir: File = new File(masterSlavePath.replaceAllLiterally("/", File.separator))

  def masterName: String
  def ports: Seq[Int]
  def sentinelPorts: Seq[Int]

  lazy val addresses: Seq[NodeAddress] = ports.map(port => NodeAddress(port = port))
  lazy val sentinelAddresses: Seq[NodeAddress] = sentinelPorts.map(port => NodeAddress(port = port))

  var redisProcesses: Seq[RedisProcess] = _
  var sentinelProcesses: Seq[RedisProcess] = _

  protected def prepareDirectory(): Unit

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    prepareDirectory()
    val processesFut = Future.traverse(ports)(port => launchRedis(
      "--port", port.toString,
      "--daemonize", "no",
      "--pidfile", "redis.pid",
      "--dbfilename", "dump.rdb",
      "--dir", s"$masterSlavePath/$port"
    ))
    val sentinelsFut = Future.traverse(sentinelPorts)(port => launchSentinel(
      s"$masterSlavePath/$port/sentinel.conf",
      "--port", port.toString,
      "--daemonize", "no",
      "--pidfile", "redis.pid",
      "--dir", s"$masterSlavePath/$port"
    ))
    redisProcesses = Await.result(processesFut, 10.seconds)
    sentinelProcesses = Await.result(sentinelsFut, 10.seconds)
  }

  override protected def afterAll(): Unit = {
    Await.result(Future.traverse(redisProcesses ++ sentinelProcesses)(shutdownRedis), 10.seconds)
    FileUtils.deleteDirectory(masterSlaveDir)
    super.afterAll()
  }
} 
Example 155
Source File: UsesFreshCluster.scala    From scala-commons   with MIT License 5 votes vote down vote up
package com.avsystem.commons
package redis

import java.io.File

import org.scalatest.Suite

import scala.concurrent.Await
import scala.concurrent.duration._


trait UsesFreshCluster extends UsesActorSystem with UsesClusterServers { this: Suite =>

  def ports: Range = 8000 to 8005
  def masterOf(idx: Int): Int = idx - (idx % 2)

  lazy val masterIndices: IIndexedSeq[Int] = ports.indices.filter(i => masterOf(i) == i)
  lazy val slotRanges: IIndexedSeq[Range] = ports.indices.map { i =>
    val masterNo = masterIndices.indexOf(masterOf(i))
    firstSlot(masterNo) until firstSlot(masterNo + 1)
  }

  protected def prepareDirectory(): Unit = {
    ports.foreach { port =>
      new File(clusterDir, port.toString).mkdirs()
    }
  }

  def firstSlot(masterNo: Int): Int = masterNo * Hash.TotalSlots / masterIndices.size

  override protected def beforeAll(): Unit = {
    super.beforeAll()

    val clients = addresses.map(addr => new RedisConnectionClient(addr))
    val commands = clients.map(client => RedisApi.Connection.Async.BinaryTyped(client))

    val initFuture = for {
      _ <- Future.traverse(commands.tail)(_.clusterMeet(addresses.head))
      _ <- Future.traverse(commands)(c => waitUntil(c.clusterInfo.map(_.knownNodes >= addresses.size), 500.millis))
      nodeIds <- commands.head.clusterNodes.map(_.sortBy(_.address.port).map(_.id))
      _ <- Future.traverse(commands.zipWithIndex) {
        case (c, i) => masterOf(i) match {
          case `i` => c.clusterAddslots(slotRanges(i))
          case mi => c.clusterReplicate(nodeIds(mi))
        }
      }
      _ <- Future.traverse(commands)(c => waitUntil(c.clusterInfo.map(_.stateOk), 500.millis))
    } yield ()

    Await.result(initFuture, 30.seconds)

    clients.foreach(_.close())
  }
} 
Example 156
Source File: UsesClusterServers.scala    From scala-commons   with MIT License 5 votes vote down vote up
package com.avsystem.commons
package redis

import java.io.File

import org.apache.commons.io.FileUtils
import org.scalatest.{BeforeAndAfterAll, Suite}

import scala.concurrent.Await
import scala.concurrent.duration._


trait UsesClusterServers extends BeforeAndAfterAll with RedisProcessUtils { this: Suite =>

  val clusterPath: String = "cluster/" + System.currentTimeMillis()
  val clusterDir: File = new File(clusterPath.replaceAllLiterally("/", File.separator))

  def ports: Seq[Int]

  lazy val addresses: Seq[NodeAddress] = ports.map(port => NodeAddress(port = port))
  var redisProcesses: Seq[RedisProcess] = _

  protected def prepareDirectory(): Unit

  protected def slotKey(slot: Int): String = ClusterUtils.SlotKeys(slot)

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    prepareDirectory()
    redisProcesses = Await.result(Future.traverse(ports)(port => launchRedis(
      "--port", port.toString,
      "--daemonize", "no",
      "--pidfile", "redis.pid",
      "--dbfilename", "dump.rdb",
      "--dir", s"$clusterPath/$port",
      "--appendonly", "yes",
      "--appendfilename", "appendonly.aof",
      "--cluster-enabled", "yes",
      "--cluster-config-file", "nodes.conf"
    )), 10.seconds)
  }

  override protected def afterAll(): Unit = {
    Await.result(Future.traverse(redisProcesses)(shutdownRedis), 10.seconds)
    FileUtils.deleteDirectory(clusterDir)
    super.afterAll()
  }
} 
Example 157
Source File: CommunicationLogging.scala    From scala-commons   with MIT License 5 votes vote down vote up
package com.avsystem.commons
package redis

import org.scalatest.{BeforeAndAfterEach, Suite}


trait CommunicationLogging extends BeforeAndAfterEach { this: Suite =>
  protected val listener = new TestDebugListener

  protected def assertCommunication(comm: String): Unit = {
    assert(listener.result().trim == comm.trim)
  }

  override protected def beforeEach() = {
    super.beforeEach()
    listener.clear()
  }
} 
Example 158
Source File: UsesRedisMasterSlaveClient.scala    From scala-commons   with MIT License 5 votes vote down vote up
package com.avsystem.commons
package redis

import com.avsystem.commons.redis.config.MasterSlaveConfig
import org.scalatest.Suite


trait UsesRedisMasterSlaveClient extends UsesMasterSlaveServers with UsesActorSystem { this: Suite =>
  def masterSlaveConfig: MasterSlaveConfig = MasterSlaveConfig()
  def seedSentinels: Seq[NodeAddress] = sentinelAddresses.take(1)

  var redisClient: RedisMasterSlaveClient = _

  def switchMaster(): Future[Unit] = {
    val client = new RedisConnectionClient(seedSentinels.head)
    val api = RedisApi.Connection.Async.StringTyped(client)
    val result = api.sentinelFailover(masterName)
    result.onCompleteNow(_ => client.close())
    result
  }

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    redisClient = new RedisMasterSlaveClient(masterName, seedSentinels, masterSlaveConfig)
  }

  override protected def afterAll(): Unit = {
    redisClient.close()
    super.afterAll()
  }
} 
Example 159
Source File: UsesRedisClusterClient.scala    From scala-commons   with MIT License 5 votes vote down vote up
package com.avsystem.commons
package redis

import com.avsystem.commons.redis.commands.SetslotCmd.{Importing, Migrating, Node}
import com.avsystem.commons.redis.config.ClusterConfig
import org.scalatest.Suite


trait UsesRedisClusterClient extends UsesClusterServers with UsesActorSystem { this: Suite =>
  def clusterConfig: ClusterConfig = ClusterConfig()
  def seedNodes: Seq[NodeAddress] = addresses.take(1)

  var redisClient: RedisClusterClient = _

  protected def migrateSlot(slot: Int, targetNodeSlot: Int, incomplete: Boolean = false, withoutData: Boolean = false): Future[Unit] =
    redisClient.initialized.flatMapNow { client =>
      val state = client.currentState
      val sourceClient = state.clientForSlot(slot)
      val targetClient = state.clientForSlot(targetNodeSlot)
      if (sourceClient != targetClient) {
        val sourceApi = RedisApi.Node.Async.BinaryTyped(sourceClient)
        val targetApi = RedisApi.Node.Async.BinaryTyped(targetClient)
        val sourceIdFut = sourceApi.clusterMyid
        val targetIdFut = targetApi.clusterMyid
        val keysToMigrateFut =
          if (withoutData) Future.successful(Seq.empty)
          else for {
            keyCount <- sourceApi.clusterCountkeysinslot(slot)
            keys <- sourceApi.clusterGetkeysinslot(slot, keyCount.toInt)
          } yield keys
        for {
          sourceId <- sourceIdFut
          targetId <- targetIdFut
          _ <- targetApi.clusterSetslot(slot, Importing(sourceId))
          _ <- sourceApi.clusterSetslot(slot, Migrating(targetId))
          keys <- keysToMigrateFut
          _ <- if (keys.nonEmpty) sourceApi.migrate(keys, targetClient.address, 0, Int.MaxValue) else Future.successful(())
          _ <- {
            if (incomplete) Future.successful(())
            else Future.traverse(List(sourceApi, targetApi))(_.clusterSetslot(slot, Node(targetId)))
          }
        } yield ()
      } else Future.successful(())
    }

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    redisClient = new RedisClusterClient(seedNodes, clusterConfig)
  }

  override protected def afterAll(): Unit = {
    redisClient.close()
    super.afterAll()
  }
} 
Example 160
Source File: UsesActorSystem.scala    From scala-commons   with MIT License 5 votes vote down vote up
package com.avsystem.commons
package redis

import akka.actor.ActorSystem
import org.scalatest.concurrent.PatienceConfiguration
import org.scalatest.time.{Milliseconds, Seconds, Span}
import org.scalatest.{BeforeAndAfterAll, Suite}

import scala.concurrent.Await
import scala.concurrent.duration._


trait UsesActorSystem extends BeforeAndAfterAll with PatienceConfiguration { this: Suite =>
  implicit lazy val actorSystem: ActorSystem = ActorSystem()
  implicit def executionContext: ExecutionContext = actorSystem.dispatcher

  override implicit def patienceConfig: PatienceConfig =
    PatienceConfig(scaled(Span(10, Seconds)), scaled(Span(10, Milliseconds)))

  override protected def afterAll(): Unit = {
    Await.ready(actorSystem.terminate(), Duration.Inf)
    super.afterAll()
  }

  def wait(duration: FiniteDuration): Future[Unit] =
    if (duration == Duration.Zero) Future.successful(()) else {
      val promise = Promise[Unit]()
      actorSystem.scheduler.scheduleOnce(duration)(promise.success(()))
      promise.future
    }

  def waitUntil(predicate: => Future[Boolean], retryInterval: FiniteDuration): Future[Unit] =
    predicate.flatMap { r =>
      if (r) Future.successful(())
      else wait(retryInterval).flatMap(_ => waitUntil(predicate, retryInterval))
    }

  def waitFor[T](future: => Future[T])(condition: T => Boolean, retryInterval: FiniteDuration): Future[T] =
    future.flatMap { value =>
      if (condition(value)) Future.successful(value)
      else wait(retryInterval).flatMap(_ => waitFor(future)(condition, retryInterval))
    }
} 
Example 161
Source File: FlinkTestBase.scala    From flink-tensorflow   with Apache License 2.0 5 votes vote down vote up
package org.apache.flink.contrib.tensorflow.util

import org.apache.flink.runtime.minicluster.LocalFlinkMiniCluster
import org.apache.flink.streaming.util.TestStreamEnvironment
import org.apache.flink.test.util.TestBaseUtils
import org.junit.rules.TemporaryFolder
import org.scalatest.{BeforeAndAfter, Suite}

// Copied from Apache Flink.


trait FlinkTestBase extends BeforeAndAfter {
  that: Suite =>

  var cluster: Option[LocalFlinkMiniCluster] = None
  val parallelism = 4

  protected val tempFolder = new TemporaryFolder()

  before {
    tempFolder.create()
    val cl = TestBaseUtils.startCluster(
      1,
      parallelism,
      false,
      false,
      true)

    TestStreamEnvironment.setAsContext(cl, parallelism)

    cluster = Some(cl)
  }

  after {
    TestStreamEnvironment.unsetAsContext()
    cluster.foreach(c => TestBaseUtils.stopCluster(c, TestBaseUtils.DEFAULT_TIMEOUT))
    tempFolder.delete()
  }

} 
Example 162
Source File: SkinnySpecSupport.scala    From scala-ddd-base   with MIT License 5 votes vote down vote up
package com.github.j5ik2o.dddbase.example.repository.util

import org.scalatest.{ BeforeAndAfter, BeforeAndAfterAll, Suite }
import scalikejdbc.config.DBs
import scalikejdbc.{ ConnectionPool, GlobalSettings, LoggingSQLAndTimeSettings }

trait SkinnySpecSupport extends BeforeAndAfter with BeforeAndAfterAll with JdbcSpecSupport {
  self: Suite with FlywayWithMySQLSpecSupport =>

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    Class.forName("com.mysql.jdbc.Driver")
    ConnectionPool.singleton(s"jdbc:mysql://localhost:${jdbcPort}/dddbase?useSSL=false", "dddbase", "dddbase")
    GlobalSettings.loggingSQLAndTime = LoggingSQLAndTimeSettings(
      enabled = true,
      logLevel = 'DEBUG,
      warningEnabled = true,
      warningThresholdMillis = 1000L,
      warningLogLevel = 'WARN
    )
  }

  override protected def afterAll(): Unit = {
    DBs.closeAll()
    super.afterAll()
  }

} 
Example 163
Source File: Slick3SpecSupport.scala    From scala-ddd-base   with MIT License 5 votes vote down vote up
package com.github.j5ik2o.dddbase.example.repository.util

import com.typesafe.config.ConfigFactory
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{ BeforeAndAfter, BeforeAndAfterAll, Suite }
import slick.basic.DatabaseConfig
import slick.jdbc.SetParameter.SetUnit
import slick.jdbc.{ JdbcProfile, SQLActionBuilder }

import scala.concurrent.Future

trait Slick3SpecSupport extends BeforeAndAfter with BeforeAndAfterAll with ScalaFutures with JdbcSpecSupport {
  self: Suite with FlywayWithMySQLSpecSupport =>

  private var _dbConfig: DatabaseConfig[JdbcProfile] = _

  private var _profile: JdbcProfile = _

  protected def dbConfig = _dbConfig

  protected def profile = _profile

  after {
    implicit val ec = dbConfig.db.executor.executionContext
    val futures = tables.map { table =>
      val q = SQLActionBuilder(List(s"TRUNCATE TABLE $table"), SetUnit).asUpdate
      dbConfig.db.run(q)
    }
    Future.sequence(futures).futureValue
  }

  override protected def beforeAll(): Unit = {
    super.beforeAll()
    val config = ConfigFactory.parseString(s"""
         |dddbase {
         |  profile = "slick.jdbc.MySQLProfile$$"
         |  db {
         |    connectionPool = disabled
         |    driver = "com.mysql.jdbc.Driver"
         |    url = "jdbc:mysql://localhost:$jdbcPort/dddbase?useSSL=false"
         |    user = "dddbase"
         |    password = "dddbase"
         |  }
         |}
      """.stripMargin)
    _dbConfig = DatabaseConfig.forConfig[JdbcProfile]("dddbase", config)
    _profile = dbConfig.profile
  }

  override protected def afterAll(): Unit = {
    dbConfig.db.shutdown
    super.afterAll()
  }

} 
Example 164
Source File: ElasticsearchIntegrationTest.scala    From elasticsearch-client   with Apache License 2.0 5 votes vote down vote up
package com.sumologic.elasticsearch.restlastic

import com.sumologic.elasticsearch.restlastic.RestlasticSearchClient.ReturnTypes
import com.sumologic.elasticsearch.restlastic.dsl.Dsl._
import org.junit.runner.RunWith
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{Millis, Span}
import org.scalatest.{BeforeAndAfterAll, Suite}
import org.scalatestplus.junit.JUnitRunner

import scala.util.{Random, Try}



@RunWith(classOf[JUnitRunner])
trait ElasticsearchIntegrationTest extends BeforeAndAfterAll with ScalaFutures {
  this: Suite =>
  private val indexPrefix = "test-index"

  def restClient: RestlasticSearchClient

  val IndexName = s"$indexPrefix-${math.abs(Random.nextLong())}"

  protected def createIndices(cnt: Int = 1): IndexedSeq[Index] = {
    (1 to cnt).map(idx => {
      val index = dsl.Dsl.Index(s"${IndexName}-${idx}")
      val analyzerName = Name("keyword_lowercase")
      val lowercaseAnalyzer = Analyzer(analyzerName, Keyword, Lowercase)
      val notAnalyzed = Analyzer(Name("not_analyzed"), Keyword)
      val analyzers = Analyzers(
        AnalyzerArray(lowercaseAnalyzer, notAnalyzed),
        FilterArray(),
        NormalizerArray(Normalizer(Name("lowercase"), Lowercase)))
      val indexSetting = IndexSetting(12, 1, analyzers, 30)
      val indexFut = restClient.createIndex(index, Some(indexSetting))
      indexFut.futureValue
      index
    })
  }

  override def beforeAll(): Unit = {
    super.beforeAll()
    Try(delete(Index(s"$indexPrefix*")))
  }

  override def afterAll(): Unit = {
    Try(delete(Index(s"$indexPrefix*")))
    super.afterAll()
  }

  private def delete(index: Index): ReturnTypes.RawJsonResponse = {
    implicit val patienceConfig = PatienceConfig(scaled(Span(1500, Millis)), scaled(Span(15, Millis)))
    restClient.deleteIndex(index).futureValue
  }
} 
Example 165
Source File: DirectWriting.scala    From akka-persistence-cassandra   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.cassandra.query

import java.nio.ByteBuffer

import akka.actor.ActorSystem
import akka.persistence.PersistentRepr
import akka.persistence.cassandra.Hour
import akka.persistence.cassandra.PluginSettings
import akka.persistence.cassandra.journal.CassandraJournalStatements
import akka.persistence.cassandra.journal.TimeBucket
import akka.serialization.SerializationExtension
import akka.serialization.Serializers
import com.datastax.oss.driver.api.core.CqlSession
import com.datastax.oss.driver.api.core.uuid.Uuids
import org.scalatest.BeforeAndAfterAll
import org.scalatest.Suite

trait DirectWriting extends BeforeAndAfterAll {
  self: Suite =>

  def system: ActorSystem
  private lazy val serialization = SerializationExtension(system)
  private lazy val settings = PluginSettings(system)

  def cluster: CqlSession

  private lazy val writeStatements: CassandraJournalStatements = new CassandraJournalStatements(settings)

  private lazy val preparedWriteMessage = cluster.prepare(writeStatements.writeMessage(withMeta = false))

  private lazy val preparedDeleteMessage = cluster.prepare(writeStatements.deleteMessage)

  protected def writeTestEvent(persistent: PersistentRepr, partitionNr: Long = 1L): Unit = {
    val event = persistent.payload.asInstanceOf[AnyRef]
    val serializer = serialization.findSerializerFor(event)
    val serialized = ByteBuffer.wrap(serialization.serialize(event).get)
    val nowUuid = Uuids.timeBased()
    val now = Uuids.unixTimestamp(nowUuid)
    val serManifest = Serializers.manifestFor(serializer, persistent)

    val bs = preparedWriteMessage
      .bind()
      .setString("persistence_id", persistent.persistenceId)
      .setLong("partition_nr", partitionNr)
      .setLong("sequence_nr", persistent.sequenceNr)
      .setUuid("timestamp", nowUuid)
      .setString("timebucket", TimeBucket(now, Hour).key.toString)
      .setInt("ser_id", serializer.identifier)
      .setString("ser_manifest", serManifest)
      .setString("event_manifest", persistent.manifest)
      .setByteBuffer("event", serialized)
    cluster.execute(bs)
    system.log.debug("Directly wrote payload [{}] for entity [{}]", persistent.payload, persistent.persistenceId)
  }

  protected def deleteTestEvent(persistent: PersistentRepr, partitionNr: Long = 1L): Unit = {

    val bs = preparedDeleteMessage
      .bind()
      .setString("persistence_id", persistent.persistenceId)
      .setLong("partition_nr", partitionNr)
      .setLong("sequence_nr", persistent.sequenceNr)
    cluster.execute(bs)
    system.log.debug("Directly deleted payload [{}] for entity [{}]", persistent.payload, persistent.persistenceId)
  }

} 
Example 166
Source File: ReconnectSpec.scala    From akka-persistence-cassandra   with Apache License 2.0 5 votes vote down vote up
package akka.persistence.cassandra

import java.io.File

import akka.actor.{ ActorSystem, Props }
import akka.persistence.cassandra.CassandraLifecycle.AwaitPersistenceInit
import akka.persistence.cassandra.testkit.CassandraLauncher
import akka.testkit.{ ImplicitSender, SocketUtil, TestKit }
import com.typesafe.config.ConfigFactory
import org.scalatest.Suite
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpecLike

object ReconnectSpec {
  val freePort = SocketUtil.temporaryLocalPort()
  val config = ConfigFactory.parseString(s"""
      datastax-java-driver {
        basic.load-balancing-policy.local-datacenter = "datacenter1"
        // Will fail without this setting 
        advanced.reconnect-on-init = true      
        basic.contact-points = ["127.0.0.1:$freePort"]
      }
      """).withFallback(CassandraLifecycle.config)
}

// not using Cassandra Spec
class ReconnectSpec
    extends TestKit(ActorSystem("ReconnectSpec", ReconnectSpec.config))
    with Suite
    with ImplicitSender
    with AnyWordSpecLike
    with Matchers
    with ScalaFutures {

  "Reconnecting" must {
    "start with system off" in {
      val pa = system.actorOf(Props(new AwaitPersistenceInit("pid", "", "")))
      pa ! "hello"
      expectNoMessage()

      CassandraLauncher.start(
        new File("target/ReconnectSpec"),
        configResource = CassandraLauncher.DefaultTestConfigResource,
        clean = true,
        port = ReconnectSpec.freePort,
        CassandraLauncher.classpathForResources("logback-test.xml"))

      try {
        CassandraLifecycle.awaitPersistenceInit(system)
      } finally {
        CassandraLauncher.stop()
      }

    }
  }

} 
Example 167
Source File: EmbeddedKafkaServer.scala    From KafkaPlayground   with GNU General Public License v3.0 5 votes vote down vote up
package com.github.pedrovgs.kafkaplayground.utils

import cakesolutions.kafka.KafkaProducerRecord
import cakesolutions.kafka.testkit.KafkaServer
import org.apache.kafka.common.serialization.{StringDeserializer, StringSerializer}
import org.scalatest.{BeforeAndAfter, Suite}

import scala.concurrent.duration._

trait EmbeddedKafkaServer extends BeforeAndAfter {
  this: Suite =>

  private var kafkaServer: KafkaServer = _

  before {
    kafkaServer = new KafkaServer
    startKafkaServer()
  }

  after {
    stopKafkaServer()
  }

  def startKafkaServer(): Unit = kafkaServer.startup()

  def stopKafkaServer(): Unit = kafkaServer.close()

  def kafkaServerAddress(): String = s"localhost:${kafkaServer.kafkaPort}"

  def zookeeperServerAddress(): String = s"localhost:${kafkaServer.zookeeperPort}"

  def recordsForTopic(topic: String, expectedNumberOfRecords: Int = 1): Iterable[String] =
    kafkaServer
      .consume[String, String](
        topic = topic,
        keyDeserializer = new StringDeserializer,
        valueDeserializer = new StringDeserializer,
        expectedNumOfRecords = expectedNumberOfRecords,
        timeout = 10.seconds.toMillis
      )
      .map(_._2)

  def produceMessage(topic: String, content: String): Unit =
    kafkaServer.produce(
      topic = topic,
      records = Seq(KafkaProducerRecord[String, String](topic = topic, value = content)),
      keySerializer = new StringSerializer(),
      valueSerializer = new StringSerializer()
    )

} 
Example 168
Source File: SeleniumTestContainerSuite.scala    From testcontainers-scala   with MIT License 5 votes vote down vote up
package com.dimafeng.testcontainers

import java.io.File
import java.net.URL
import java.util.Optional

import com.dimafeng.testcontainers.lifecycle.TestLifecycleAware
import org.openqa.selenium.WebDriver
import org.openqa.selenium.remote.{DesiredCapabilities, RemoteWebDriver}
import org.scalatest.Suite
import org.testcontainers.containers.BrowserWebDriverContainer
import org.testcontainers.lifecycle.TestDescription

trait SeleniumTestContainerSuite extends ForEachTestContainer {
  self: Suite =>

  def desiredCapabilities: DesiredCapabilities

  def recordingMode: (BrowserWebDriverContainer.VncRecordingMode, File) = null

  val container = SeleniumContainer(desiredCapabilities, recordingMode)

  implicit def webDriver: WebDriver = container.webDriver
}

class SeleniumContainer(desiredCapabilities: Option[DesiredCapabilities] = None,
                        recordingMode: Option[(BrowserWebDriverContainer.VncRecordingMode, File)] = None)
  extends SingleContainer[BrowserWebDriverContainer[_]] with TestLifecycleAware {
  require(desiredCapabilities.isDefined, "'desiredCapabilities' is required parameter")

  override val container: BrowserWebDriverContainer[_] = new BrowserWebDriverContainer()
  desiredCapabilities.foreach(container.withDesiredCapabilities)
  recordingMode.foreach(Function.tupled(container.withRecordingMode))

  def password: String = container.getPassword

  def port: Int = container.getPort

  def seleniumAddress: URL = container.getSeleniumAddress

  def vncAddress: String = container.getVncAddress

  def webDriver: RemoteWebDriver = container.getWebDriver

  override def afterTest(description: TestDescription, throwable: Option[Throwable]): Unit = {
    val javaThrowable: Optional[Throwable] = throwable match {
      case Some(error) => Optional.of(error)
      case None => Optional.empty()
    }
    container.afterTest(description, javaThrowable)
  }
}

object SeleniumContainer {
  def apply(desiredCapabilities: DesiredCapabilities = null, recordingMode: (BrowserWebDriverContainer.VncRecordingMode, File) = null): SeleniumContainer =
    new SeleniumContainer(Option(desiredCapabilities), Option(recordingMode))
} 
Example 169
Source File: TestContainersSuite.scala    From testcontainers-scala   with MIT License 5 votes vote down vote up
package com.dimafeng.testcontainers.scalatest

import com.dimafeng.testcontainers.TestContainers
import com.dimafeng.testcontainers.lifecycle.{Andable, TestLifecycleAware}
import org.scalatest.{Suite, SuiteMixin}


  def beforeContainersStop(containers: Containers): Unit = {}

  @volatile private[testcontainers] var startedContainers: Option[Containers] = None

  private val suiteDescription = createDescription(self)

  private[testcontainers] def beforeTest(containers: Containers): Unit = {
    containers.foreach {
      case container: TestLifecycleAware => container.beforeTest(suiteDescription)
      case _ => // do nothing
    }
  }

  private[testcontainers] def afterTest(containers: Containers, throwable: Option[Throwable]): Unit = {
    containers.foreach {
      case container: TestLifecycleAware => container.afterTest(suiteDescription, throwable)
      case _ => // do nothing
    }
  }
}

case class IllegalWithContainersCall() extends IllegalStateException(
  "'withContainers' method can't be used before all containers are started. " +
    "'withContainers' method should be used only in test cases to prevent this."
) 
Example 170
Source File: TestContainersForAll.scala    From testcontainers-scala   with MIT License 5 votes vote down vote up
package com.dimafeng.testcontainers.scalatest

import org.scalatest.{Args, CompositeStatus, Status, Suite}


trait TestContainersForAll extends TestContainersSuite { self: Suite =>

  abstract override def run(testName: Option[String], args: Args): Status = {
    if (expectedTestCount(args.filter) == 0) {
      new CompositeStatus(Set.empty)
    } else {
      val containers = startContainers()
      startedContainers = Some(containers)
      try {
        afterContainersStart(containers)
        super.run(testName, args)
      } finally {
        try {
          beforeContainersStop(containers)
        }
        finally {
          try {
            startedContainers.foreach(_.stop())
          }
          finally {
            startedContainers = None
          }
        }
      }
    }
  }

  abstract protected override def runTest(testName: String, args: Args): Status = {
    @volatile var testCalled = false
    @volatile var afterTestCalled = false

    try {
      startedContainers.foreach(beforeTest)

      testCalled = true
      val status = super.runTest(testName, args)

      afterTestCalled = true
      if (!status.succeeds()) {
        val err = new RuntimeException("Test failed")
        startedContainers.foreach(afterTest(_, Some(err)))
      } else {
        startedContainers.foreach(afterTest(_, None))
      }

      status
    }
    catch {
      case e: Throwable =>
        if (testCalled && !afterTestCalled) {
          afterTestCalled = true
          startedContainers.foreach(afterTest(_, Some(e)))
        }

        throw e
    }
  }
} 
Example 171
Source File: TestContainersForEach.scala    From testcontainers-scala   with MIT License 5 votes vote down vote up
package com.dimafeng.testcontainers.scalatest

import org.scalatest.{Args, Status, Suite}


trait TestContainersForEach extends TestContainersSuite { self: Suite =>

  abstract protected override def runTest(testName: String, args: Args): Status = {
    val containers = startContainers()
    startedContainers = Some(containers)

    @volatile var testCalled = false
    @volatile var afterTestCalled = false

    try {
      afterContainersStart(containers)
      beforeTest(containers)

      testCalled = true
      val status = super.runTest(testName, args)

      afterTestCalled = true
      if (!status.succeeds()) {
        val err = new RuntimeException("Test failed")
        startedContainers.foreach(afterTest(_, Some(err)))
      } else {
        startedContainers.foreach(afterTest(_, None))
      }

      status
    }
    catch {
      case e: Throwable =>
        if (testCalled && !afterTestCalled) {
          afterTestCalled = true
          afterTest(containers, Some(e))
        }

        throw e
    }
    finally {
      try {
        beforeContainersStop(containers)
      }
      finally {
        try {
          startedContainers.foreach(_.stop())
        }
        finally {
          startedContainers = None
        }
      }
    }
  }
} 
Example 172
Source File: EsCasKafkaZookeeperDockerSuite.scala    From CM-Well   with Apache License 2.0 5 votes vote down vote up
package cmwell.util.testSuitHelpers.test

import com.dimafeng.testcontainers.ForAllTestContainer
import org.scalatest.Suite

trait EsCasKafkaZookeeperDockerSuite extends ForAllTestContainer { this:Suite =>
  def cassandraVersion: String
  def zookeeperVersion: String
  def kafkaVersion: String
  def elasticsearchVersion: String

  val elasticsearchContainer = ContainerHelpers.elasticsearch(elasticsearchVersion)
  val cassandraContainer = ContainerHelpers.cassandra(cassandraVersion)
  val KafkaZookeeperContainers(kafkaContainer, zookeeperContainer, kafkaZooCombined) = ContainerHelpers.kafkaAndZookeeper(kafkaVersion, zookeeperVersion)

  override val container = MultipleContainersParallelExecution(cassandraContainer, elasticsearchContainer, kafkaZooCombined)

  override def afterStart(): Unit = {
    super.afterStart()
    // scalastyle:off
    kafkaContainer.configure{ container =>
      val result = container.execInContainer("bash", "-c", "${KAFKA_HOME}/bin/kafka-configs.sh " +
        "--bootstrap-server localhost:19092 --entity-type brokers --entity-name 1 --alter --add-config " +
        s"advertised.listeners=[EXTERNAL://${kafkaContainer.containerIpAddress}:${kafkaContainer.mappedPort(9092)},INTERNAL://kafkaBroker-1:19092]")
      val stdOut = result.getStdout.trim
      if (stdOut != "Completed updating config for broker: 1.") {
        val stdErr = result.getStderr.trim
        throw new Exception(s"Couldn't change Kafka's advertised listeners config for broker 1. stdout: [$stdOut]. stderr: [$stdErr]")
      }
    }
    // scalastyle:on
  }
} 
Example 173
Source File: KafkaZookeeperDockerSuite.scala    From CM-Well   with Apache License 2.0 5 votes vote down vote up
package cmwell.util.testSuitHelpers.test

import com.dimafeng.testcontainers.ForAllTestContainer
import org.scalatest.Suite

trait KafkaZookeeperDockerSuite extends ForAllTestContainer { this: Suite =>
  def zookeeperVersion: String
  def kafkaVersion: String
  val KafkaZookeeperContainers(kafkaContainer, zookeeperContainer, combined) = ContainerHelpers.kafkaAndZookeeper(kafkaVersion, zookeeperVersion)
  override val container = combined

  override def afterStart(): Unit = {
    super.afterStart()
    // scalastyle:off
    kafkaContainer.configure{ container =>
      val result = container.execInContainer("bash", "-c", "${KAFKA_HOME}/bin/kafka-configs.sh " +
        "--bootstrap-server localhost:19092 --entity-type brokers --entity-name 1 --alter --add-config " +
        s"advertised.listeners=[EXTERNAL://${kafkaContainer.containerIpAddress}:${kafkaContainer.mappedPort(9092)},INTERNAL://kafkaBroker-1:19092]")
      val stdOut = result.getStdout.trim
      if (stdOut != "Completed updating config for broker: 1.") {
        val stdErr = result.getStderr.trim
        throw new Exception(s"Couldn't change Kafka's advertised listeners config for broker 1. stdout: [$stdOut]. stderr: [$stdErr]")
      }
    }
    // scalastyle:on
  }
} 
Example 174
Source File: KinesisContainerSpecSupport.scala    From reactive-aws-clients   with MIT License 5 votes vote down vote up
package com.github.j5ik2o.reactive.aws.kinesis.test

import com.spotify.docker.client.{ DefaultDockerClient, DockerClient }
import com.whisk.docker.impl.spotify.SpotifyDockerFactory
import com.whisk.docker.scalatest.DockerTestKit
import com.whisk.docker.{ DockerContainer, DockerFactory, DockerReadyChecker }
import org.scalatest.Suite

import scala.concurrent.duration._

trait KinesisContainerSpecSupport extends DockerTestKit with RandomPortSupport {
  this: Suite =>

  protected val connectTimeout: FiniteDuration = 3 seconds
  protected val readTimeout: FiniteDuration    = 3 seconds

  protected lazy val accessKeyId      = "000000000000"
  protected lazy val secretAccessKey  = "000000000000"
  protected lazy val endpoint         = s"http://127.0.0.1:$kinesisPort"
  protected lazy val kinesisPort: Int = temporaryServerPort()

  protected lazy val dynamoDBContainer: DockerContainer =
    DockerContainer("vsouza/kinesis-local:latest")
      .withCommand("--port 4567 --shardLimit 100 --createStreamMs 0 --deleteStreamMs 0 --updateStreamMs 0")
      .withPorts(4567 -> Some(kinesisPort))
      .withReadyChecker(DockerReadyChecker.LogLineContains("Listening at http://"))

  protected val dockerClient: DockerClient =
    DefaultDockerClient
      .fromEnv()
      .connectTimeoutMillis(connectTimeout.toMillis)
      .readTimeoutMillis(readTimeout.toMillis).build()

  abstract override def dockerContainers: List[DockerContainer] =
    dynamoDBContainer :: super.dockerContainers

  override implicit def dockerFactory: DockerFactory =
    new SpotifyDockerFactory(dockerClient)

} 
Example 175
Source File: S3ContainerSpecSupport.scala    From reactive-aws-clients   with MIT License 5 votes vote down vote up
package com.github.j5ik2o.reactive.aws.s3

import java.net.URI

import com.github.j5ik2o.reactive.aws.test.RandomPortSupport
import com.spotify.docker.client.{ DefaultDockerClient, DockerClient }
import com.whisk.docker.impl.spotify.SpotifyDockerFactory
import com.whisk.docker.scalatest.DockerTestKit
import com.whisk.docker.{
  DockerCommandExecutor,
  DockerContainer,
  DockerContainerState,
  DockerFactory,
  DockerReadyChecker,
  LogLineReceiver
}
import org.scalatest.Suite
import software.amazon.awssdk.auth.credentials.{ AwsBasicCredentials, StaticCredentialsProvider }
import software.amazon.awssdk.services.s3.S3AsyncClient

import scala.concurrent.duration._
import scala.concurrent.{ ExecutionContext, Future }
import scala.compat.java8.FutureConverters._

trait S3ContainerSpecSupport extends DockerTestKit with RandomPortSupport {
  this: Suite =>

  // override val StartContainersTimeout: FiniteDuration = 30 seconds
  protected val connectTimeout: FiniteDuration = 3 seconds
  protected val readTimeout: FiniteDuration    = 3 seconds

  protected val dockerClient: DockerClient =
    DefaultDockerClient
      .fromEnv()
      .connectTimeoutMillis(connectTimeout.toMillis)
      .readTimeoutMillis(readTimeout.toMillis).build()

  protected lazy val accessKeyId     = "AKIAIOSFODNN7EXAMPLE"
  protected lazy val secretAccessKey = "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
  protected lazy val endpoint        = s"http://127.0.0.1:$s3Port"

  protected lazy val javaS3Client: S3AsyncClient =
    S3AsyncClient
      .builder()
      .credentialsProvider(StaticCredentialsProvider.create(AwsBasicCredentials.create(accessKeyId, secretAccessKey)))
      .endpointOverride(URI.create(endpoint))
      .build()

  class S3DockerReadyChecker(s3client: S3AsyncClient) extends DockerReadyChecker {
    override def apply(container: DockerContainerState)(
        implicit docker: DockerCommandExecutor,
        ec: ExecutionContext
    ): Future[Boolean] =
      s3client
        .listBuckets().toScala.map { _ =>
          true
        }(ec).recover {
          case _ =>
            false
        }(ec)
  }

  override implicit def dockerFactory: DockerFactory =
    new SpotifyDockerFactory(dockerClient)

  protected lazy val s3Port: Int = temporaryServerPort()

  protected lazy val s3Container: DockerContainer =
    DockerContainer("minio/minio")
      .withPorts(9000 -> Some(s3Port))
      .withEnv(s"MINIO_ACCESS_KEY=$accessKeyId", s"MINIO_SECRET_KEY=$secretAccessKey")
      .withCommand("server", "/data")
      .withLogLineReceiver(LogLineReceiver(true, { message =>
        println(message)
      }))
      .withReadyChecker(new S3DockerReadyChecker(javaS3Client))

  abstract override def dockerContainers: List[DockerContainer] =
    s3Container :: super.dockerContainers
} 
Example 176
Source File: ZIOSpec.scala    From polynote   with Apache License 2.0 5 votes vote down vote up
package polynote.testing

import polynote.config.PolynoteConfig
import polynote.env.ops.Enrich
import polynote.kernel.Kernel.Factory
import polynote.kernel.{BaseEnv, CellEnv, GlobalEnv, Kernel, ResultValue, interpreter}
import polynote.kernel.environment.{Config, Env, NotebookUpdates}
import interpreter.Interpreter
import org.scalatest.{BeforeAndAfterAll, Suite}
import polynote.kernel.logging.Logging
import zio.blocking.Blocking
import zio.clock.Clock
import zio.console.Console
import zio.internal.Platform
import zio.random.Random
import zio.system.System
import zio.{Has, RIO, Runtime, Tagged, ZIO, ZLayer}

abstract class TestRuntime
object TestRuntime {
  val runtime: Runtime.Managed[zio.ZEnv with Logging] = ZIOSpecBase.runtime
  def fiberDump(): List[zio.Fiber.Dump] = runtime.unsafeRun(zio.Fiber.dumpAll).toList
}

trait ZIOSpecBase[Env <: Has[_]] {
  import ZIOSpecBase.BaseEnv
  type Environment = Env
  val baseLayer: ZLayer[Any, Nothing, BaseEnv] = ZIOSpecBase.baseLayer
  def envLayer: ZLayer[zio.ZEnv with Logging, Nothing, Env]
  val runtime: Runtime.Managed[BaseEnv] = ZIOSpecBase.runtime

  // TODO: should test platform behave differently? Isolate per suite?
  implicit class IORunOps[A](val self: ZIO[BaseEnv, Throwable, A]) {
    def runIO(): A = ZIOSpecBase.this.runIO(self)
  }

  implicit class IORunWithOps[R <: Has[_], A](val self: ZIO[R, Throwable, A]) {
    def runWith[R1](env: R1)(implicit ev: Env with Has[R1] <:< R, ev1: Tagged[R1], ev2: Tagged[Has[R1]], ev3: Tagged[Env]): A =
      ZIOSpecBase.this.runIO(self.provideSomeLayer[Env](ZLayer.succeed(env)).provideSomeLayer[BaseEnv](envLayer))
  }

  def runIO[A](io: ZIO[BaseEnv, Throwable, A]): A = runtime.unsafeRunSync(io).getOrElse {
    c => throw c.squash
  }


}

object ZIOSpecBase {

  type BaseEnv = zio.ZEnv with Logging
  val baseLayer: ZLayer[Any, Nothing, zio.ZEnv with Logging] = Clock.live ++ Console.live ++ System.live ++ Random.live ++ Blocking.live ++ (Blocking.live >>> Logging.live)
  val platform: Platform = Platform.default
    .withReportFailure(_ => ()) // suppress printing error stack traces by default
  val runtime: Runtime.Managed[zio.ZEnv with Logging] = Runtime.unsafeFromLayer(baseLayer, platform)
}

trait ZIOSpec extends ZIOSpecBase[Clock with Console with System with Random with Blocking with Logging] {
  override lazy val envLayer: ZLayer[zio.ZEnv, Nothing, Environment] = baseLayer
  implicit class ConfigIORunOps[A](val self: ZIO[Environment with Config, Throwable, A]) {
    def runWithConfig(config: PolynoteConfig): A = ZIOSpec.this.runIO(self.provideSomeLayer[Environment](ZLayer.succeed(config)))
  }
}

trait ConfiguredZIOSpec extends ZIOSpecBase[BaseEnv with Config] { this: Suite =>
  def config: PolynoteConfig = PolynoteConfig()
  override lazy val envLayer: ZLayer[zio.ZEnv, Nothing, BaseEnv with Config] =
    baseLayer ++ ZLayer.succeed(config)
}

trait ExtConfiguredZIOSpec[Env <: Has[_]] extends ZIOSpecBase[BaseEnv with Config with Env] {
  def tagged: Tagged[Env]
  def configuredEnvLayer: ZLayer[zio.ZEnv with Config, Nothing, Env]

  private implicit def _tagged: Tagged[Env] = tagged

  def config: PolynoteConfig = PolynoteConfig()
  lazy val configLayer: ZLayer[Any, Nothing, Config] = ZLayer.succeed(config)
  override final lazy val envLayer: ZLayer[zio.ZEnv, Nothing, BaseEnv with Config with Env] = baseLayer ++ Logging.live ++ ((baseLayer ++ configLayer) >>> configuredEnvLayer) ++ configLayer
}

object ValueMap {
  def unapply(values: List[ResultValue]): Option[Map[String, Any]] = Some(apply(values))
  def apply(values: List[ResultValue]): Map[String, Any] = values.map(v => v.name -> v.value).toMap
} 
Example 177
Source File: BaseTransactionSuite.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.it.transactions

import java.io.File

import com.typesafe.config.{Config, ConfigFactory}
import com.wavesplatform.it._
import monix.eval.Coeval
import org.scalatest.{BeforeAndAfterAll, FunSuite, Suite}

import scala.jdk.CollectionConverters._
import scala.concurrent.ExecutionContext

trait BaseTransactionSuiteLike extends WaitForHeight2 with IntegrationSuiteWithThreeAddresses with BeforeAndAfterAll with NodesFromDocker {
  this: Suite =>

  protected implicit val ec: ExecutionContext = ExecutionContext.Implicits.global

  protected def nodeConfigs: Seq[Config] =
    NodeConfigs.newBuilder
      .overrideBase(_.quorum(0))
      .withDefault(1)
      .withSpecial(_.nonMiner)
      .buildNonConflicting()

  override def miner: Node = nodes.head

  // protected because https://github.com/sbt/zinc/issues/292
  protected val theNodes: Coeval[Seq[Node]] = Coeval.evalOnce {
    Option(System.getProperty("waves.it.config.file")) match {
      case None => dockerNodes()
      case Some(filePath) =>
        val defaultConfig = ConfigFactory.load()
        ConfigFactory
          .parseFile(new File(filePath))
          .getConfigList("nodes")
          .asScala
          .toSeq
          .map(cfg => new ExternalNode(cfg.withFallback(defaultConfig).resolve()))
    }
  }

  override protected def nodes: Seq[Node] = theNodes()

  protected override def beforeAll(): Unit = {
    theNodes.run
    super.beforeAll()
  }
}

abstract class BaseTransactionSuite extends FunSuite with BaseTransactionSuiteLike 
Example 178
Source File: GrpcWaitForHeight.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.it

import com.wavesplatform.utils.ScorexLogging
import com.wavesplatform.it.api.AsyncGrpcApi._
import org.scalatest.{BeforeAndAfterAll, Suite}

import scala.concurrent.{Await, Future}
import scala.concurrent.Future.traverse
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._

trait GrpcWaitForHeight extends BeforeAndAfterAll with ScorexLogging with ReportingTestName with Nodes {
  this: Suite =>

  abstract protected override def beforeAll(): Unit = {
    super.beforeAll()
    Await.result(traverse(nodes)(_.waitForHeight(2)), 2.minute)
  }

  def waitForTxsToReachAllNodes(nodes: Seq[Node] = nodes, txIds: Seq[String]): Future[_] = {
    val txNodePairs = for {
      txId <- txIds
      node <- nodes
    } yield (node, txId)
    traverse(txNodePairs) { case (node, tx) => node.waitForTransaction(tx) }
  }

} 
Example 179
Source File: ReportingTestName.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.it

import com.wavesplatform.http.DebugMessage
import com.wavesplatform.it.api.AsyncHttpApi._
import com.wavesplatform.utils.ScorexLogging
import org.scalatest.{Args, Status, Suite, SuiteMixin}

import scala.concurrent.duration._
import scala.concurrent.{Await, Future}

trait ReportingTestName extends SuiteMixin with ScorexLogging {
  th: Suite with Nodes =>

  abstract override protected def runTest(testName: String, args: Args): Status = {
    print(s"Test '$testName' started")
    val r = super.runTest(testName, args)
    print(s"Test '$testName' ${if (r.succeeds()) "SUCCEEDED" else "FAILED"}")
    r
  }

  private def print(text: String): Unit = {
    import scala.concurrent.ExecutionContext.Implicits.global
    val formatted = s"---------- $text ----------"
    log.debug(formatted)
    try {
      Await.result(Future.traverse(nodes)(_.printDebugMessage(DebugMessage(formatted))), 10.seconds)
    } catch {
      case _: Throwable => ()
    }
  }
} 
Example 180
Source File: GrpcIntegrationSuiteWithThreeAddress.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.it

import com.google.protobuf.ByteString
import com.wavesplatform.account.{Address, KeyPair}
import com.wavesplatform.common.utils.EitherExt2
import com.wavesplatform.it.api.SyncGrpcApi._
import com.wavesplatform.it.util._
import com.wavesplatform.protobuf.transaction.{PBRecipients, PBTransactions, Recipient}
import com.wavesplatform.transaction.transfer.TransferTransaction
import com.wavesplatform.utils.ScorexLogging
import org.scalatest.concurrent.{IntegrationPatience, ScalaFutures}
import org.scalatest.{BeforeAndAfterAll, Matchers, RecoverMethods, Suite}

trait GrpcIntegrationSuiteWithThreeAddress
    extends BeforeAndAfterAll
    with Matchers
    with ScalaFutures
    with IntegrationPatience
    with RecoverMethods
    with IntegrationTestsScheme
    with Nodes
    with ScorexLogging {
  this: Suite =>

  def miner: Node    = nodes.head
  def notMiner: Node = nodes.last

  protected def sender: Node = miner

  protected lazy val firstAcc: KeyPair  = KeyPair("first_acc".getBytes("UTF-8"))
  protected lazy val secondAcc: KeyPair = KeyPair("second_acc".getBytes("UTF-8"))
  protected lazy val thirdAcc: KeyPair  = KeyPair("third_acc".getBytes("UTF-8"))

  protected lazy val firstAddress: ByteString  = PBRecipients.create(Address.fromPublicKey(firstAcc.publicKey)).getPublicKeyHash
  protected lazy val secondAddress: ByteString = PBRecipients.create(Address.fromPublicKey(secondAcc.publicKey)).getPublicKeyHash
  protected lazy val thirdAddress: ByteString  = PBRecipients.create(Address.fromPublicKey(thirdAcc.publicKey)).getPublicKeyHash

  abstract protected override def beforeAll(): Unit = {
    super.beforeAll()

    val defaultBalance: Long = 100.waves

    def dumpBalances(node: Node, accounts: Seq[ByteString], label: String): Unit = {
      accounts.foreach(acc => {
        val balance = miner.wavesBalance(acc).available
        val eff     = miner.wavesBalance(acc).effective

        val formatted = s"$acc: balance = $balance, effective = $eff"
        log.debug(s"$label account balance:\n$formatted")
      })
    }

    def waitForTxsToReachAllNodes(txIds: Seq[String]): Unit = {
      val txNodePairs = for {
        txId <- txIds
        node <- nodes
      } yield (node, txId)

      txNodePairs.foreach({ case (node, tx) => node.waitForTransaction(tx) })
    }

    def makeTransfers(accounts: Seq[ByteString]): Seq[String] = accounts.map { acc =>
      PBTransactions
        .vanilla(
          sender.broadcastTransfer(sender.keyPair, Recipient().withPublicKeyHash(acc), defaultBalance, sender.fee(TransferTransaction.typeId))
        )
        .explicitGet()
        .id()
        .toString
    }

    def correctStartBalancesFuture(): Unit = {
      nodes.foreach(n => n.waitForHeight(2))
      val accounts = Seq(firstAddress, secondAddress, thirdAddress)

      dumpBalances(sender, accounts, "initial")
      val txs = makeTransfers(accounts)

      val height = nodes.map(_.height).max

      withClue(s"waitForHeight(${height + 2})") {
        nodes.foreach(n => n.waitForHeight(height + 1))
        nodes.foreach(n => n.waitForHeight(height + 2))
      }

      withClue("waitForTxsToReachAllNodes") {
        waitForTxsToReachAllNodes(txs)
      }

      dumpBalances(sender, accounts, "after transfer")
      accounts.foreach(acc => miner.wavesBalance(acc).available shouldBe defaultBalance)
      accounts.foreach(acc => miner.wavesBalance(acc).effective shouldBe defaultBalance)
    }

    withClue("beforeAll") {
      correctStartBalancesFuture()
    }
  }
} 
Example 181
Source File: AkkaSuite.scala    From streamee   with Apache License 2.0 5 votes vote down vote up
package io.moia.streamee

import akka.actor.{ ActorSystem, Scheduler }
import org.scalatest.{ BeforeAndAfterAll, Suite }
import scala.concurrent.Await
import scala.concurrent.duration.DurationInt

trait AkkaSuite extends Suite with BeforeAndAfterAll {

  protected implicit val system: ActorSystem =
    ActorSystem()

  protected implicit val scheduler: Scheduler =
    system.scheduler

  override protected def afterAll(): Unit = {
    Await.ready(system.terminate(), 42.seconds)
    super.afterAll()
  }
} 
Example 182
Source File: MockPingPongServer.scala    From Dsl.scala   with Apache License 2.0 5 votes vote down vote up
package com.thoughtworks.dsl
import com.thoughtworks.enableMembersIf
import org.scalatest.{AsyncTestSuite, BeforeAndAfterAll, Suite}

import scala.concurrent.ExecutionContext
import scala.concurrent.duration.Duration


@enableMembersIf(scala.util.Properties.versionNumberString.matches("""^2\.1(1|2)\..*$"""))
trait MockPingPongServer extends BeforeAndAfterAll { this: Suite =>

  implicit def executionContext: ExecutionContext

  protected implicit val system = akka.actor.ActorSystem()

  protected implicit val materializer = akka.stream.ActorMaterializer()

  protected val mockServer = {
    import akka.http.scaladsl.server.Directives._
    val route =
      get {
        path("ping") {
          complete("PING!")
        } ~ path("pong") {
          complete("PONG!")
        }
      }
    concurrent.Await.result(akka.http.scaladsl.Http().bindAndHandle(route, "localhost", 8085), Duration.Inf)
  }

  override protected def afterAll(): Unit = {
    mockServer
      .unbind()
      .onComplete(_ => system.terminate())
  }

} 
Example 183
Source File: SparkTestContext.scala    From scalable-deeplearning   with Apache License 2.0 5 votes vote down vote up
package scaladl.util

import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SparkSession
import org.apache.spark.SparkContext
import org.scalatest.{BeforeAndAfterAll, Suite}

trait SparkTestContext extends BeforeAndAfterAll { self: Suite =>
  @transient var spark: SparkSession = _
  @transient var sc: SparkContext = _
  @transient var checkpointDir: String = _

  override def beforeAll() {
    super.beforeAll()
    spark = SparkSession.builder
      .master("local[2]")
      .appName("MLlibUnitTest")
      .config("spark.sql.warehouse.dir", "warehouse-temp")
      .getOrCreate()
    sc = spark.sparkContext
    Logger.getLogger("org").setLevel(Level.WARN)
  }

  override def afterAll() {
    try {
      SparkSession.clearActiveSession()
      if (spark != null) {
        spark.stop()
      }
      spark = null
    } finally {
      super.afterAll()
    }
  }
} 
Example 184
Source File: PerTestSparkSession.scala    From Spark-RSVD   with Apache License 2.0 5 votes vote down vote up
package com.criteo.rsvd

import java.io.File
import java.nio.file.{Files, Path}
import java.util.concurrent.locks.ReentrantLock

import org.apache.commons.io.FileUtils
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{SQLContext, SparkSession}
import org.scalatest.{BeforeAndAfterEach, Suite}

import scala.reflect.ClassTag
import scala.util.control.NonFatal

object LocalSparkSession {
  private[this] val lock = new ReentrantLock()

  def acquire(): Unit = lock.lock()

  def release(): Unit = lock.unlock()

  def builder: SparkSession.Builder = {
    SparkSession
      .builder()
      .master("local[*]")
      .appName("test")
      .config("spark.ui.enabled", false)
  }
}


  def sparkConf: Map[String, Any] = Map()

  def toRDD[T: ClassTag](input: Seq[T]): RDD[T] = sc.parallelize(input)

  def toArray[T](input: RDD[T]): Array[T] = input.collect()

  protected def closeSession() = {
    currentSession.foreach(_.stop())
    currentSession = None
    try {
      checkpointDir.foreach(path =>
        FileUtils.deleteDirectory(new File(path.toString)))
    } catch {
      case NonFatal(_) =>
    }
    checkpointDir = None
    LocalSparkSession.release()
  }

  private def getOrCreateSession = synchronized {
    if (currentSession.isEmpty) {
      val builder = LocalSparkSession.builder
      for ((key, value) <- sparkConf) {
        builder.config(key, value.toString)
      }
      currentSession = Some(builder.getOrCreate())
      checkpointDir =
        Some(Files.createTempDirectory("spark-unit-test-checkpoint-"))
      currentSession.get.sparkContext
        .setCheckpointDir(checkpointDir.get.toString)
        currentSession.get.sparkContext.setLogLevel("WARN")
    }
    currentSession.get
  }

  override def beforeEach(): Unit = {
    LocalSparkSession.acquire()
    super.beforeEach()
  }

  override def afterEach(): Unit = {
    try {
      super.afterEach()
    } finally {
      closeSession()
    }
  }
} 
Example 185
Source File: LocalSparkContext.scala    From lsh-spark   with Apache License 2.0 5 votes vote down vote up
package com.lendap.spark.lsh


import org.scalatest.Suite
import org.scalatest.BeforeAndAfterAll

import org.apache.spark.{SparkConf, SparkContext}

trait LocalSparkContext extends BeforeAndAfterAll { self: Suite =>
  @transient var sc: SparkContext = _

  override def beforeAll() {
    val conf = new SparkConf()
      .setMaster("local")
      .setAppName("test")
    sc = new SparkContext(conf)
    super.beforeAll()
  }

  override def afterAll() {
    if (sc != null) {
      sc.stop()
    }
    super.afterAll()
  }
} 
Example 186
Source File: InterpreterSpec.scala    From polynote   with Apache License 2.0 5 votes vote down vote up
package polynote.testing

import java.io.File

import cats.data.StateT
import cats.syntax.traverse._
import cats.instances.list._
import org.scalatest.Suite
import polynote.config.PolynoteConfig
import polynote.kernel.environment.Config
import polynote.kernel.{Output, Result, ScalaCompiler}
import polynote.kernel.interpreter.{Interpreter, State}
import polynote.kernel.logging.Logging
import polynote.testing.kernel.MockEnv
import zio.{RIO, ZIO}
import zio.blocking.Blocking
import zio.clock.Clock
import zio.console.Console
import zio.random.Random
import zio.system.System
import zio.interop.catz._

import scala.reflect.internal.util.AbstractFileClassLoader
import scala.reflect.io.VirtualDirectory
import scala.tools.nsc.Settings
import scala.tools.nsc.io.AbstractFile

trait InterpreterSpec extends ZIOSpec {
  import runtime.{unsafeRun, unsafeRunSync}
  val classpath: List[File] = sys.props("java.class.path").split(File.pathSeparator).toList.map(new File(_))
  val settings: Settings = ScalaCompiler.defaultSettings(new Settings(), classpath)

  def outDir: AbstractFile = new VirtualDirectory("(memory)", None)
  settings.outputDirs.setSingleOutput(outDir)

  val classLoader: AbstractFileClassLoader = unsafeRun(ScalaCompiler.makeClassLoader(settings, Nil).provide(Config.of(PolynoteConfig())))
  val compiler: ScalaCompiler = ScalaCompiler(settings, classLoader).runIO()

  def interpreter: Interpreter

  lazy val initialState: State = unsafeRun(interpreter.init(State.Root).provideSomeLayer[Environment](MockEnv.layer(State.Root.id + 1)))
  def cellState: State = State.id(1, initialState)

  def assertOutput(code: String)(assertion: (Map[String, Any], Seq[Result]) => Unit): Unit =
    assertOutput(List(code))(assertion)

  def assertOutput(code: Seq[String])(assertion: (Map[String, Any], Seq[Result]) => Unit): Unit= {
    val (finalState, interpResults) = code.toList.map(interp).sequence.run(cellState).runIO()
    val terminalResults = interpResults.foldLeft((Map.empty[String, Any], List.empty[Result])) {
      case ((vars, results), next) =>
        val nextVars = vars ++ next.state.values.map(v => v.name -> v.value).toMap
        val nextOutputs = results ++ next.env.publishResult.toList.runIO()
        (nextVars, nextOutputs)
    }
    assertion.tupled(terminalResults)
  }

  case class InterpResult(state: State, env: MockEnv)

  type ITask[A] = RIO[Clock with Console with System with Random with Blocking with Logging, A]

  def interp(code: String): StateT[ITask, State, InterpResult] = StateT[ITask, State, InterpResult] {
    state => MockEnv(state.id).flatMap {
      env => interpreter.run(code, state).map {
        newState => State.id(newState.id + 1, newState) -> InterpResult(newState, env)
      }.provideSomeLayer[Environment](env.toCellEnv(classLoader))
    }
  }

  def interp1(code: String): InterpResult = unsafeRun {
    MockEnv(cellState.id).flatMap {
      env =>
        interpreter.run(code, cellState).provideSomeLayer(env.toCellEnv(getClass.getClassLoader)).map {
          state => InterpResult(state, env)
        }
    }
  }

  def stdOut(results: Seq[Result]): String = results.foldLeft("") {
    case (accum, Output("text/plain; rel=stdout", next)) => accum + next.mkString
    case (accum, _) => accum
  }

} 
Example 187
Source File: DockerBased.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.it

import com.wavesplatform.it.Docker.DockerNode
import monix.eval.Coeval
import org.scalatest.{Args, BeforeAndAfterAll, Status, Suite}

trait DockerBased extends BeforeAndAfterAll {
  this: Suite with Nodes =>

  protected val dockerSingleton: Coeval[Docker] = Coeval.evalOnce(createDocker)
  final def docker: Docker                      = dockerSingleton()

  abstract override protected def runTest(testName: String, args: Args): Status = {
    def printThreadDump(): Unit = nodes.collect {
      case node: DockerNode =>
        docker.printThreadDump(node)
    }
    val r = super.runTest(testName, args)
    if (!r.succeeds()) printThreadDump()
    r
  }

  protected def createDocker: Docker = Docker(getClass)
  override protected def afterAll(): Unit = {
    super.afterAll()
    docker.close()
  }
} 
Example 188
Source File: MLlibTestSparkContext.scala    From spark-ranking-algorithms   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.mllib.util

import org.scalatest.{BeforeAndAfterAll, Suite}

import org.apache.spark.sql.SQLContext
import org.apache.spark.{SparkConf, SparkContext}

trait MLlibTestSparkContext extends BeforeAndAfterAll { self: Suite =>
  @transient var sc: SparkContext = _
  @transient var sqlContext: SQLContext = _

  override def beforeAll() {
    super.beforeAll()
    val conf = new SparkConf()
      .setMaster("local[2]")
      .setAppName("MLlibUnitTest")
    sc = new SparkContext(conf)
    SQLContext.clearActive()
    sqlContext = new SQLContext(sc)
    SQLContext.setActive(sqlContext)
  }

  override def afterAll() {
    try {
      sqlContext = null
      SQLContext.clearActive()
      if (sc != null) {
        sc.stop()
      }
      sc = null
    } finally {
      super.afterAll()
    }
  }
} 
Example 189
Source File: LogMatchers.scala    From stryker4s   with Apache License 2.0 5 votes vote down vote up
package stryker4s.scalatest

import org.apache.logging.log4j.Level
import org.apache.logging.log4j.core.LogEvent
import org.scalatest.BeforeAndAfterEach
import org.scalatest.matchers.{BeMatcher, MatchResult}
import stryker4s.testutil.{TestAppender}
import org.scalatest.Suite

trait LogMatchers extends BeforeAndAfterEach {
  // Will cause a compile error if LogMatchers is used outside of a ScalaTest Suite
  this: Suite =>

  def loggedAsDebug = new LogMatcherWithLevel(Level.DEBUG)
  def loggedAsInfo = new LogMatcherWithLevel(Level.INFO)
  def loggedAsWarning = new LogMatcherWithLevel(Level.WARN)
  def loggedAsError = new LogMatcherWithLevel(Level.ERROR)

  override def afterEach(): Unit = TestAppender.reset

  
  implicit private val threadName: String = Thread.currentThread().getName

  protected class LogMatcherWithLevel(expectedLogLevel: Level)(implicit threadName: String) extends BeMatcher[String] {
    def apply(expectedLogMessage: String): MatchResult = {
      getLoggingEventWithLogMessage(expectedLogMessage) match {
        case None =>
          MatchResult(
            matches = false,
            s"Log message '$expectedLogMessage' wasn't logged at any level.",
            s"Log message '$expectedLogMessage' was logged as $expectedLogLevel."
          )
        case Some(loggingEvent) =>
          val result = validateLogLevel(loggingEvent.getLevel, expectedLogLevel)

          MatchResult(
            result,
            s"Log message '$expectedLogMessage' was logged but not on correct log level, " +
              s"expected [$expectedLogLevel] actual [${loggingEvent.getLevel}].",
            s"Log message '$expectedLogMessage' was logged as $expectedLogLevel."
          )
      }
    }

    private def validateLogLevel(actualLogLevel: Level, expectedLogLevel: Level): Boolean = {
      expectedLogLevel.equals(actualLogLevel)
    }

    private def getLoggingEventWithLogMessage(expectedLogMessage: String): Option[LogEvent] = {
      TestAppender
        .events(threadName)
        .find(_.getMessage.getFormattedMessage.contains(expectedLogMessage))
    }
  }
} 
Example 190
Source File: ToCurlConverterTestExtension.scala    From sttp   with Apache License 2.0 5 votes vote down vote up
package sttp.client

import java.io.File

import org.scalatest.Suite
import sttp.client.internal.SttpFile
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers

trait ToCurlConverterTestExtension { suit: Suite with AnyFlatSpec with Matchers =>
  it should "render multipart form data if content is a file" in {
    basicRequest
      .multipartBody(multipartSttpFile("upload", SttpFile.fromPath(new File("myDataSet").toPath)))
      .post(uri"http://localhost")
      .toCurl should include(
      """--form 'upload=@myDataSet'"""
    )
  }
} 
Example 191
Source File: SharedSparkSession.scala    From gimel   with Apache License 2.0 5 votes vote down vote up
package com.paypal.gimel.common.utilities.spark

import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession, SQLContext}
import org.apache.spark.sql.internal.SQLConf
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, Suite}
import org.scalatest.concurrent.Eventually

trait SharedSparkSession
    extends BeforeAndAfterEach
    with BeforeAndAfterAll
    with Eventually { self: Suite =>

  protected val additionalConfig: Map[String, String] = Map.empty

  
  protected override def afterEach(): Unit = {
    super.afterEach()
    // Clear all persistent datasets after each test
    spark.sharedState.cacheManager.clearCache()
  }

  // Mocks data for testing
  def mockDataInDataFrame(numberOfRows: Int): DataFrame = {
    def stringed(n: Int) = s"""{"id": "$n","name": "MAC-$n", "address": "MAC-${n + 1}", "age": "${n + 1}", "company": "MAC-$n", "designation": "MAC-$n", "salary": "${n * 10000}" }"""
    val texts: Seq[String] = (1 to numberOfRows).map { x => stringed(x) }
    val rdd: RDD[String] = spark.sparkContext.parallelize(texts)
    val dataFrame: DataFrame = spark.read.json(rdd)
    dataFrame
  }
} 
Example 192
Source File: SharedSparkSession.scala    From gimel   with Apache License 2.0 5 votes vote down vote up
package com.paypal.gimel.sql

import org.apache.spark.SparkConf
import org.apache.spark.sql.{SparkSession, SQLContext}
import org.apache.spark.sql.internal.SQLConf
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, FunSpec, Suite}
import org.scalatest.concurrent.Eventually

trait SharedSparkSession
    extends FunSpec
    with BeforeAndAfterEach
    with BeforeAndAfterAll
    with Eventually { self: Suite =>

  
  protected override def afterEach(): Unit = {
    super.afterEach()
    // Clear all persistent datasets after each test
    spark.sharedState.cacheManager.clearCache()
  }
} 
Example 193
Source File: MLlibTestSparkContext.scala    From yggdrasil   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.mllib.util

import org.scalatest.{BeforeAndAfterAll, Suite}

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.SQLContext

trait MLlibTestSparkContext extends BeforeAndAfterAll { self: Suite =>
  @transient var sc: SparkContext = _
  @transient var sqlContext: SQLContext = _

  override def beforeAll() {
    super.beforeAll()
    val conf = new SparkConf()
      .setMaster("local[2]")
      .setAppName("MLlibUnitTest")
    sc = new SparkContext(conf)
    SQLContext.clearActive()
    sqlContext = new SQLContext(sc)
    SQLContext.setActive(sqlContext)
  }

  override def afterAll() {
    sqlContext = null
    SQLContext.clearActive()
    if (sc != null) {
      sc.stop()
    }
    sc = null
    super.afterAll()
  }
} 
Example 194
Source File: IntegrationBeforeAndAfterAll.scala    From cosmos   with Apache License 2.0 5 votes vote down vote up
package com.mesosphere.cosmos

import com.google.common.io.CharStreams
import com.mesosphere.cosmos.circe.Decoders.parse
import com.mesosphere.cosmos.http.CosmosRequests
import com.mesosphere.cosmos.test.CosmosIntegrationTestClient.CosmosClient
import com.mesosphere.cosmos.thirdparty.marathon.model.AppId
import io.lemonlabs.uri.dsl._
import com.twitter.finagle.http.Status
import io.circe.jawn.decode
import java.io.InputStreamReader
import org.scalatest.Assertion
import org.scalatest.BeforeAndAfterAll
import org.scalatest.Suite
import org.scalatest.concurrent.Eventually
import scala.concurrent.duration._

trait IntegrationBeforeAndAfterAll extends BeforeAndAfterAll with Eventually { this: Suite =>

  private[this] lazy val logger = org.slf4j.LoggerFactory.getLogger(getClass)

  private[this] val universeUri = "https://downloads.mesosphere.com/universe/02493e40f8564a39446d06c002f8dcc8e7f6d61f/repo-up-to-1.8.json"
  private[this] val universeConverterUri = "https://universe-converter.mesosphere.com/transform?url=" + universeUri

  override def beforeAll(): Unit = {
    Requests.deleteRepository(Some("Universe"))

    val customPkgMgrResource = s"/${ItObjects.customManagerAppName}.json"

    logger.info(s"Creating marathon app from $customPkgMgrResource")
    Requests
      .postMarathonApp(
        parse(
          Option(this.getClass.getResourceAsStream(customPkgMgrResource)) match {
            case Some(is) =>
              CharStreams.toString(new InputStreamReader(is))
            case _ =>
              throw new IllegalStateException(s"Unable to load classpath resource: $customPkgMgrResource")
          }
        ).toOption.get.asObject.get
      )
    Requests.waitForDeployments()

    Requests.addRepository(
      "Universe",
      universeConverterUri,
      Some(0)
    )

    Requests.addRepository(
      "V5Testpackage",
      ItObjects.V5TestPackage,
      Some(0)
    )

    Requests.addRepository(
      "V4TestUniverse",
      ItObjects.V4TestUniverseConverterURI,
      Some(0)
    )

    // This package is present only in V4TestUniverse and this method ensures that the
    // package collection cache is cleared before starting the integration tests
    val _ = waitUntilCacheReloads()
  }

  override def afterAll(): Unit = {
    Requests.deleteRepository(Some("V4TestUniverse"))
    Requests.deleteRepository(Some("V5Testpackage"))
    val customMgrAppId = AppId(ItObjects.customManagerAppName)
    Requests.deleteMarathonApp(customMgrAppId)
    Requests.waitForMarathonAppToDisappear(customMgrAppId)
    Requests.deleteRepository(None, Some(universeConverterUri))
    val _ = Requests.addRepository("Universe", "https://universe.mesosphere.com/repo")
  }

  private[this] def waitUntilCacheReloads(): Assertion = {
    val packageName = "helloworld-invalid"
    eventually(timeout(2.minutes), interval(10.seconds)) {
      val response = CosmosClient.submit(
        CosmosRequests.packageDescribeV3(rpc.v1.model.DescribeRequest(packageName, None))
      )
      assertResult(Status.Ok)(response.status)
      val Right(actualResponse) = decode[rpc.v3.model.DescribeResponse](response.contentString)
      assert(actualResponse.`package`.name == packageName)
    }
  }
} 
Example 195
Source File: DatasetComparer.scala    From lighthouse   with Apache License 2.0 5 votes vote down vote up
package be.dataminded.lighthouse.testing

import be.dataminded.lighthouse.testing.DataFramePrettyPrinter.prettyPrintDataFrame
import org.apache.spark.sql._
import org.apache.spark.sql.functions._
import org.scalatest.Suite

trait DatasetComparer extends DatasetComparerLike {
  self: Suite =>
}

sealed trait DatasetComparerLike {

  def assertDatasetEquality[T](actual: Dataset[T], expected: Dataset[T], orderedComparison: Boolean = true): Unit = {

    def simplifySchema(dataset: Dataset[T]) = dataset.schema.map(field => (field.name, field.dataType))

    assert(simplifySchema(actual) == simplifySchema(expected), schemaMismatchMessage(actual, expected))

    def defaultSortedDataset(ds: Dataset[T]): Dataset[T] = {
      ds.sort(ds.columns.sorted.map(col): _*)
    }

    if (orderedComparison) {
      assert(actual.collect().sameElements(expected.collect()), contentMismatchMessage(actual, expected))
    } else {
      val sortedActual   = defaultSortedDataset(actual)
      val sortedExpected = defaultSortedDataset(expected)
      assert(
        sortedActual.collect().sameElements(sortedExpected.collect()),
        contentMismatchMessage(sortedActual, sortedExpected)
      )
    }
  }

  private def schemaMismatchMessage[T](actual: Dataset[T], expected: Dataset[T]): String =
    s"""
       |Actual schema:
       |${actual.schema}
       |Expected schema:
       |${expected.schema}
     """.stripMargin

  private def contentMismatchMessage[T](actual: Dataset[T], expected: Dataset[T]): String =
    s"""
       |Actual content:
       |${prettyPrintDataFrame(actual.toDF(), 5)}
       |Expected DataFrame Content:
       |${prettyPrintDataFrame(expected.toDF(), 5)}
     """.stripMargin
} 
Example 196
Source File: UsersCache.scala    From infinispan-spark   with Apache License 2.0 5 votes vote down vote up
package org.infinispan.spark.test

import org.infinispan.spark.domain.User
import org.scalatest.{BeforeAndAfterAll, Suite}


trait UsersCache extends BeforeAndAfterAll {
   this: Suite with RemoteTest =>

   protected def getNumEntries: Int

   override protected def beforeAll(): Unit = {
      val MinAge = 15
      val MaxAge = 60
      (1 to getNumEntries).par.foreach { i =>
         val name = "User " + i
         val age = Integer.valueOf(i * (MaxAge - MinAge) / getNumEntries + MinAge)
         val user = new User(name, age)
         user.setName(name)
         user.setAge(age)
         getRemoteCache.put(i, user)
      }
      super.beforeAll()
   }

   override protected def afterAll(): Unit = {
      super.afterAll()
   }
} 
Example 197
Source File: WordCache.scala    From infinispan-spark   with Apache License 2.0 5 votes vote down vote up
package org.infinispan.spark.test

import org.scalatest.{BeforeAndAfterAll, Suite}

import scala.util.Random


trait WordCache extends BeforeAndAfterAll {
   this: Suite with RemoteTest =>

   // https://github.com/bmarcot/haiku/blob/master/haiku.scala
   val adjs = List("autumn", "hidden", "bitter", "misty", "silent",
      "empty", "dry", "dark", "summer", "icy", "delicate", "quiet", "white", "cool",
      "spring", "winter", "patient", "twilight", "dawn", "crimson", "wispy",
      "weathered", "blue", "billowing", "broken", "cold", "damp", "falling",
      "frosty", "green", "long", "late", "lingering", "bold", "little", "morning",
      "muddy", "old", "red", "rough", "still", "small", "sparkling", "throbbing",
      "shy", "wandering", "withered", "wild", "black", "holy", "solitary",
      "fragrant", "aged", "snowy", "proud", "floral", "restless", "divine",
      "polished", "purple", "lively", "nameless", "puffy", "fluffy",
      "calm", "young", "golden", "avenging", "ancestral", "ancient", "argent",
      "reckless", "daunting", "short", "rising", "strong", "timber", "tumbling",
      "silver", "dusty", "celestial", "cosmic", "crescent", "double", "far", "half",
      "inner", "milky", "northern", "southern", "eastern", "western", "outer",
      "terrestrial", "huge", "deep", "epic", "titanic", "mighty", "powerful")

   val nouns = List("waterfall", "river", "breeze", "moon", "rain",
      "wind", "sea", "morning", "snow", "lake", "sunset", "pine", "shadow", "leaf",
      "dawn", "glitter", "forest", "hill", "cloud", "meadow", "glade",
      "bird", "brook", "butterfly", "bush", "dew", "dust", "field",
      "flower", "firefly", "feather", "grass", "haze", "mountain", "night", "pond",
      "darkness", "snowflake", "silence", "sound", "sky", "shape", "surf",
      "thunder", "violet", "wildflower", "wave", "water", "resonance",
      "sun", "wood", "dream", "cherry", "tree", "fog", "frost", "voice", "paper",
      "frog", "smoke", "star", "sierra", "castle", "fortress", "tiger", "day",
      "sequoia", "cedar", "wrath", "blessing", "spirit", "nova", "storm", "burst",
      "protector", "drake", "dragon", "knight", "fire", "king", "jungle", "queen",
      "giant", "elemental", "throne", "game", "weed", "stone", "apogee", "bang",
      "cluster", "corona", "cosmos", "equinox", "horizon", "light", "nebula",
      "solstice", "spectrum", "universe", "magnitude", "parallax")

   protected def getNumEntries: Int

   private val random = new Random(System.currentTimeMillis())

   private def randomWordFrom(l: List[String]) = l(random.nextInt(l.size))

   private def pickNouns = (for (_ <- 0 to random.nextInt(3)) yield randomWordFrom(nouns)).mkString(" ")

   lazy val wordsCache = getRemoteCache[Int,String]

   override protected def beforeAll(): Unit = {
      (1 to getNumEntries).par.foreach { i =>
         val contents = Seq(randomWordFrom(adjs), pickNouns).mkString(" ")
         wordsCache.put(i, contents)
      }
      super.beforeAll()
   }

   override protected def afterAll(): Unit = {
      super.afterAll()
   }

} 
Example 198
Source File: MLlibTestSparkContext.scala    From sona   with Apache License 2.0 5 votes vote down vote up
package com.tencent.angel.sona.ml.util

import java.io.File

import org.apache.spark.SparkContext
import org.apache.spark.sql.types.UDTRegistration
import org.apache.spark.sql.{SQLContext, SQLImplicits, SparkSession}
import org.apache.spark.util.{SparkUtil, Utils}
import org.scalatest.Suite

trait MLlibTestSparkContext extends TempDirectory { self: Suite =>
  @transient var spark: SparkSession = _
  @transient var sc: SparkContext = _
  @transient var checkpointDir: String = _

  override def beforeAll() {
    super.beforeAll()

    SparkUtil.UDTRegister("org.apache.spark.linalg.Vector", "org.apache.spark.linalg.VectorUDT")
    SparkUtil.UDTRegister("org.apache.spark.linalg.DenseVector", "org.apache.spark.linalg.VectorUDT")
    SparkUtil.UDTRegister("org.apache.spark.linalg.SparseVector", "org.apache.spark.linalg.VectorUDT")
    SparkUtil.UDTRegister("org.apache.spark.linalg.Matrix", "org.apache.spark.linalg.MatrixUDT")
    SparkUtil.UDTRegister("org.apache.spark.linalg.DenseMatrix", "org.apache.spark.linalg.MatrixUDT")
    SparkUtil.UDTRegister("org.apache.spark.linalg.SparseMatrix", "org.apache.spark.linalg.MatrixUDT")

    spark = SparkSession.builder
      .master("local[2]")
      .appName("MLlibUnitTest")
      .getOrCreate()
    sc = spark.sparkContext

    checkpointDir = SparkUtil.createDirectory(tempDir.getCanonicalPath, "checkpoints").toString
    sc.setCheckpointDir(checkpointDir)
  }

  override def afterAll() {
    try {
      SparkUtil.deleteRecursively(new File(checkpointDir))
      SparkSession.clearActiveSession()
      if (spark != null) {
        spark.stop()
      }
      spark = null
    } finally {
      super.afterAll()
    }
  }

  /**
   * A helper object for importing SQL implicits.
   *
   * Note that the alternative of importing `spark.implicits._` is not possible here.
   * This is because we create the `SQLContext` immediately before the first test is run,
   * but the implicits import is needed in the constructor.
   */
  protected object testImplicits extends SQLImplicits {
    protected override def _sqlContext: SQLContext = self.spark.sqlContext
  }
} 
Example 199
Source File: SparkLocalContext.scala    From cosine-lsh-join-spark   with MIT License 5 votes vote down vote up
package com.soundcloud.lsh

import java.util.Properties

import org.scalatest.{BeforeAndAfterAll, Suite}
import org.apache.log4j.PropertyConfigurator
import org.apache.spark.{SparkConf, SparkContext}

trait SparkLocalContext extends BeforeAndAfterAll {
  self: Suite =>
  var sc: SparkContext = _

  override def beforeAll() {
    loadTestLog4jConfig()

    val conf = new SparkConf().
      setAppName("test").
      setMaster("local")
    sc = new SparkContext(conf)

    super.beforeAll()
  }

  override def afterAll() {
    if (sc != null) sc.stop()
    super.afterAll()
  }

  private def loadTestLog4jConfig(): Unit = {
    val props = new Properties
    props.load(getClass.getResourceAsStream("/log4j.properties"))
    PropertyConfigurator.configure(props)
  }
} 
Example 200
Source File: TransformerSerialization.scala    From seahorse-workflow-executor   with Apache License 2.0 5 votes vote down vote up
package io.deepsense.deeplang.doperables.spark.wrappers.transformers

import java.nio.file.{Files, Path}

import org.apache.commons.io.FileUtils
import org.scalatest.{BeforeAndAfter, Suite}

import io.deepsense.deeplang.doperables.Transformer
import io.deepsense.deeplang.doperables.dataframe.DataFrame
import io.deepsense.deeplang.{DeeplangIntegTestSupport, ExecutionContext}

trait TransformerSerialization extends Suite with BeforeAndAfter {

  var tempDir: Path = _

  before {
    tempDir = Files.createTempDirectory("writeReadTransformer")
  }

  after {
    FileUtils.deleteDirectory(tempDir.toFile)
  }
}

object TransformerSerialization {

  implicit class TransformerSerializationOps(private val transformer: Transformer) {

    def applyTransformationAndSerialization(
        path: Path,
        df: DataFrame)(implicit executionContext: ExecutionContext): DataFrame = {
      val result = transformer._transform(executionContext, df)
      val deserialized = loadSerializedTransformer(path)
      val resultFromSerializedTransformer = deserialized._transform(executionContext, df)
      DeeplangIntegTestSupport.assertDataFramesEqual(result, resultFromSerializedTransformer)
      result
    }

    def loadSerializedTransformer(
        path: Path)(
        implicit executionContext: ExecutionContext): Transformer = {
      val outputPath: Path = path.resolve(this.getClass.getName)
      transformer.save(executionContext, outputPath.toString)
      Transformer.load(executionContext, outputPath.toString)
    }
  }
}