org.scalatest.concurrent.TimeLimits Scala Examples
The following examples show how to use org.scalatest.concurrent.TimeLimits.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: WholeStageCodegenSparkSubmitSuite.scala From XSQL with Apache License 2.0 | 5 votes |
package org.apache.spark.sql.execution import org.scalatest.{Assertions, BeforeAndAfterEach, Matchers} import org.scalatest.concurrent.TimeLimits import org.apache.spark.{SparkFunSuite, TestUtils} import org.apache.spark.deploy.SparkSubmitSuite import org.apache.spark.internal.Logging import org.apache.spark.sql.{LocalSparkSession, QueryTest, Row, SparkSession} import org.apache.spark.sql.functions.{array, col, count, lit} import org.apache.spark.sql.types.IntegerType import org.apache.spark.unsafe.Platform import org.apache.spark.util.ResetSystemProperties // Due to the need to set driver's extraJavaOptions, this test needs to use actual SparkSubmit. class WholeStageCodegenSparkSubmitSuite extends SparkFunSuite with Matchers with BeforeAndAfterEach with ResetSystemProperties { test("Generated code on driver should not embed platform-specific constant") { val unusedJar = TestUtils.createJarWithClasses(Seq.empty) // HotSpot JVM specific: Set up a local cluster with the driver/executor using mismatched // settings of UseCompressedOops JVM option. val argsForSparkSubmit = Seq( "--class", WholeStageCodegenSparkSubmitSuite.getClass.getName.stripSuffix("$"), "--master", "local-cluster[1,1,1024]", "--driver-memory", "1g", "--conf", "spark.ui.enabled=false", "--conf", "spark.master.rest.enabled=false", "--conf", "spark.driver.extraJavaOptions=-XX:-UseCompressedOops", "--conf", "spark.executor.extraJavaOptions=-XX:+UseCompressedOops", unusedJar.toString) SparkSubmitSuite.runSparkSubmit(argsForSparkSubmit, "../..") } } object WholeStageCodegenSparkSubmitSuite extends Assertions with Logging { var spark: SparkSession = _ def main(args: Array[String]): Unit = { TestUtils.configTestLog4j("INFO") spark = SparkSession.builder().getOrCreate() // Make sure the test is run where the driver and the executors uses different object layouts val driverArrayHeaderSize = Platform.BYTE_ARRAY_OFFSET val executorArrayHeaderSize = spark.sparkContext.range(0, 1).map(_ => Platform.BYTE_ARRAY_OFFSET).collect.head.toInt assert(driverArrayHeaderSize > executorArrayHeaderSize) val df = spark.range(71773).select((col("id") % lit(10)).cast(IntegerType) as "v") .groupBy(array(col("v"))).agg(count(col("*"))) val plan = df.queryExecution.executedPlan assert(plan.find(_.isInstanceOf[WholeStageCodegenExec]).isDefined) val expectedAnswer = Row(Array(0), 7178) :: Row(Array(1), 7178) :: Row(Array(2), 7178) :: Row(Array(3), 7177) :: Row(Array(4), 7177) :: Row(Array(5), 7177) :: Row(Array(6), 7177) :: Row(Array(7), 7177) :: Row(Array(8), 7177) :: Row(Array(9), 7177) :: Nil val result = df.collect QueryTest.sameRows(result.toSeq, expectedAnswer) match { case Some(errMsg) => fail(errMsg) case _ => } } }
Example 2
Source File: SparkRBackendSpec.scala From seahorse with Apache License 2.0 | 5 votes |
package ai.deepsense.workflowexecutor import org.apache.spark.api.r._ import org.scalatest.concurrent.TimeLimits import org.scalatest.mockito.MockitoSugar import org.scalatest.{Matchers, PrivateMethodTester, WordSpec} import ai.deepsense.workflowexecutor.customcode.CustomCodeEntryPoint class SparkRBackendSpec extends WordSpec with MockitoSugar with Matchers with TimeLimits with PrivateMethodTester { "Spark R Backend" should { "return 0 for Entry Point Id" in { val sparkRBackend = new SparkRBackend() val customCodeEntryPoint = mock[CustomCodeEntryPoint] sparkRBackend.start(customCodeEntryPoint) sparkRBackend.entryPointId shouldBe "0" sparkRBackend.close() } } }
Example 3
Source File: OutputCommitCoordinatorIntegrationSuite.scala From Spark-2.3.1 with Apache License 2.0 | 5 votes |
package org.apache.spark.scheduler import org.apache.hadoop.mapred.{FileOutputCommitter, TaskAttemptContext} import org.scalatest.concurrent.{Signaler, ThreadSignaler, TimeLimits} import org.scalatest.time.{Seconds, Span} import org.apache.spark.{LocalSparkContext, SparkConf, SparkContext, SparkFunSuite, TaskContext} import org.apache.spark.util.Utils class OutputCommitCoordinatorIntegrationSuite extends SparkFunSuite with LocalSparkContext with TimeLimits { // Necessary to make ScalaTest 3.x interrupt a thread on the JVM like ScalaTest 2.2.x implicit val defaultSignaler: Signaler = ThreadSignaler override def beforeAll(): Unit = { super.beforeAll() val conf = new SparkConf() .set("spark.hadoop.outputCommitCoordination.enabled", "true") .set("spark.hadoop.mapred.output.committer.class", classOf[ThrowExceptionOnFirstAttemptOutputCommitter].getCanonicalName) sc = new SparkContext("local[2, 4]", "test", conf) } test("exception thrown in OutputCommitter.commitTask()") { // Regression test for SPARK-10381 failAfter(Span(60, Seconds)) { val tempDir = Utils.createTempDir() try { sc.parallelize(1 to 4, 2).map(_.toString).saveAsTextFile(tempDir.getAbsolutePath + "/out") } finally { Utils.deleteRecursively(tempDir) } } } } private class ThrowExceptionOnFirstAttemptOutputCommitter extends FileOutputCommitter { override def commitTask(context: TaskAttemptContext): Unit = { val ctx = TaskContext.get() if (ctx.attemptNumber < 1) { throw new java.io.FileNotFoundException("Intentional exception") } super.commitTask(context) } }
Example 4
Source File: UnpersistSuite.scala From Spark-2.3.1 with Apache License 2.0 | 5 votes |
package org.apache.spark import org.scalatest.concurrent.{Signaler, ThreadSignaler, TimeLimits} import org.scalatest.time.{Millis, Span} class UnpersistSuite extends SparkFunSuite with LocalSparkContext with TimeLimits { // Necessary to make ScalaTest 3.x interrupt a thread on the JVM like ScalaTest 2.2.x implicit val defaultSignaler: Signaler = ThreadSignaler test("unpersist RDD") { sc = new SparkContext("local", "test") val rdd = sc.makeRDD(Array(1, 2, 3, 4), 2).cache() rdd.count assert(sc.persistentRdds.isEmpty === false) rdd.unpersist() assert(sc.persistentRdds.isEmpty === true) failAfter(Span(3000, Millis)) { try { while (! sc.getRDDStorageInfo.isEmpty) { Thread.sleep(200) } } catch { case _: Throwable => Thread.sleep(10) // Do nothing. We might see exceptions because block manager // is racing this thread to remove entries from the driver. } } assert(sc.getRDDStorageInfo.isEmpty === true) } }
Example 5
Source File: DriverSuite.scala From Spark-2.3.1 with Apache License 2.0 | 5 votes |
package org.apache.spark import java.io.File import org.scalatest.concurrent.{Signaler, ThreadSignaler, TimeLimits} import org.scalatest.prop.TableDrivenPropertyChecks._ import org.scalatest.time.SpanSugar._ import org.apache.spark.util.Utils class DriverSuite extends SparkFunSuite with TimeLimits { // Necessary to make ScalaTest 3.x interrupt a thread on the JVM like ScalaTest 2.2.x implicit val defaultSignaler: Signaler = ThreadSignaler ignore("driver should exit after finishing without cleanup (SPARK-530)") { val sparkHome = sys.props.getOrElse("spark.test.home", fail("spark.test.home is not set!")) val masters = Table("master", "local", "local-cluster[2,1,1024]") forAll(masters) { (master: String) => val process = Utils.executeCommand( Seq(s"$sparkHome/bin/spark-class", "org.apache.spark.DriverWithoutCleanup", master), new File(sparkHome), Map("SPARK_TESTING" -> "1", "SPARK_HOME" -> sparkHome)) failAfter(60 seconds) { process.waitFor() } // Ensure we still kill the process in case it timed out process.destroy() } } } object DriverWithoutCleanup { def main(args: Array[String]) { Utils.configTestLog4j("INFO") val conf = new SparkConf val sc = new SparkContext(args(0), "DriverWithoutCleanup", conf) sc.parallelize(1 to 100, 4).count() } }
Example 6
Source File: SizingTest.scala From ingraph with Eclipse Public License 1.0 | 5 votes |
package ingraph.ire.nodes import akka.actor.{ActorRef, Props, actorRef2Scala} import ingraph.ire.datatypes.{JoinCache, Tuple} import ingraph.ire.engine.RelationalEngine import ingraph.ire.inputs.InputTransactionFactory import ingraph.ire.messages.{ChangeSet, Primary, Secondary, Terminator} import ingraph.ire.nodes.binary.JoinNode import ingraph.ire.nodes.unary.{ProductionNode, SelectionNode} import ingraph.ire.util.SizeCounter import ingraph.ire.util.TestUtil.{mask, tuple} import ingraph.ire.messages.Terminator import ingraph.ire.nodes.unary.SelectionNode import org.scalatest.WordSpec import org.scalatest.concurrent.TimeLimits import scala.collection.mutable class SizingTest extends WordSpec with TimeLimits { import ingraph.ire.util.Utils.conversions._ class TestQuery1 extends RelationalEngine { override val production: ActorRef = system.actorOf(Props(new ProductionNode("TestQuery"))) override val inputLookup: Map[String, (ChangeSet) => Unit] = Map( "testval" -> ((cs: ChangeSet) => { joiner ! Primary(cs); joiner ! Secondary(cs) }) ) override val terminator: Terminator = Terminator(Vector(forwarder ! _), production) val forwarder = newLocal(Props(new SelectionNode(production, a => true))) val joiner = newLocal(Props(new JoinNode(forwarder, 2, 2, mask(0), mask(0))), "joiner") } "SizeCounter" should { "count" in { val data = mutable.HashMap[Tuple, Int]() data(tuple(5, 6, 7)) = 8 data(tuple(5, 6, 9)) = 10 assert(SizeCounter.count(data.keys) == 6) } "count deeper" in { val data = new JoinCache data.addBinding(tuple(2, 3), tuple(3, 4)) data.addBinding(tuple(2, 3), tuple(3, 5)) data.addBinding(tuple(2, 3), tuple(3, 6)) data.addBinding(tuple(3, 2), tuple(2, 5)) assert(SizeCounter.countDeeper(data.values) == 8) } "measure size" in { val input = new InputTransactionFactory val query = new TestQuery1 input.subscribe(query.inputLookup) val inputTransaction = input.newInputTransaction inputTransaction.add("testval", tuple(5, 5)) inputTransaction.add("testval", tuple(5, 6)) inputTransaction.add("testval", tuple(5, 7)) inputTransaction.sendAll() assert(query.getCounts == 12) } } }
Example 7
Source File: StreamingTestHelper.scala From spark-acid with Apache License 2.0 | 5 votes |
package com.qubole.spark.hiveacid.streaming import java.io.{File, IOException} import java.util.UUID import com.qubole.spark.hiveacid.TestHelper import org.apache.spark.network.util.JavaUtils import org.apache.spark.sql.execution.streaming.MemoryStream import org.apache.spark.sql.streaming.{OutputMode, StreamingQuery} import org.scalatest.concurrent.TimeLimits import org.scalatest.time.SpanSugar class StreamingTestHelper extends TestHelper with TimeLimits { import StreamingTestHelper._ def runStreaming(tableName: String, outputMode: OutputMode, cols: Seq[String], inputRange: Range, options: List[(String, String)] = List.empty): Unit = { val inputData = MemoryStream[Int] val ds = inputData.toDS() val checkpointDir = createCheckpointDir(namePrefix = "stream.checkpoint").getCanonicalPath var query: StreamingQuery = null try { // Starting streaming query val writerDf = ds.map(i => (i*100, i*10, i)) .toDF(cols:_*) .writeStream .format("HiveAcid") .option("table", tableName) .outputMode(outputMode) .option("checkpointLocation", checkpointDir) //.start() query = options.map { option => writerDf.option(option._1, option._2) }.lastOption.getOrElse(writerDf).start() // Adding data for streaming query inputData.addData(inputRange) failAfter(STREAMING_TIMEOUT) { query.processAllAvailable() } } finally { if (query != null) { // Terminating streaming query query.stop() deleteCheckpointDir(checkpointDir) } } } def deleteCheckpointDir(fileStr: String): Unit = { val file = new File(fileStr) if (file != null) { JavaUtils.deleteRecursively(file) } } def createCheckpointDir(root: String = System.getProperty("java.io.tmpdir"), namePrefix: String = "spark"): File = { var attempts = 0 val maxAttempts = MAX_DIR_CREATION_ATTEMPTS var dir: File = null while (dir == null) { attempts += 1 if (attempts > maxAttempts) { throw new IOException("Failed to create a temp directory (under " + root + ") after " + maxAttempts + " attempts!") } try { dir = new File(root, namePrefix + "-" + UUID.randomUUID.toString) if (dir.exists() || !dir.mkdirs()) { dir = null } } catch { case e: SecurityException => dir = null; } } dir.getCanonicalFile } } object StreamingTestHelper extends TestHelper with SpanSugar { val MAX_DIR_CREATION_ATTEMPTS = 10 val STREAMING_TIMEOUT = 60.seconds }
Example 8
Source File: SparkRBackendSpec.scala From seahorse-workflow-executor with Apache License 2.0 | 5 votes |
package io.deepsense.workflowexecutor import org.apache.spark.api.r._ import org.scalatest.concurrent.TimeLimits import org.scalatest.mockito.MockitoSugar import org.scalatest.{Matchers, PrivateMethodTester, WordSpec} import io.deepsense.workflowexecutor.customcode.CustomCodeEntryPoint class SparkRBackendSpec extends WordSpec with MockitoSugar with Matchers with TimeLimits with PrivateMethodTester { "Spark R Backend" should { "return 0 for Entry Point Id" in { val sparkRBackend = new SparkRBackend() val customCodeEntryPoint = mock[CustomCodeEntryPoint] sparkRBackend.start(customCodeEntryPoint) sparkRBackend.entryPointId shouldBe "0" sparkRBackend.close() } } }
Example 9
Source File: FutureAwaitWithFailFastFnTest.scala From kafka-connect-common with Apache License 2.0 | 5 votes |
package com.datamountaineer.streamreactor.connect.concurrent import java.util.concurrent.Executors import com.datamountaineer.streamreactor.connect.concurrent.ExecutorExtension._ import org.scalactic.source.Position import org.scalatest.concurrent.{Eventually, TimeLimits} import org.scalatest.matchers.should.Matchers import org.scalatest.time.{Millis, Span} import org.scalatest.wordspec.AnyWordSpec import scala.util.{Failure, Try} class FutureAwaitWithFailFastFnTest extends AnyWordSpec with Matchers with Eventually with TimeLimits { "FutureAwaitWithFailFastFn" should { "return when all the futures have completed" in { val exec = Executors.newFixedThreadPool(10) val futures = (1 to 5).map(i => exec.submit { Thread.sleep(300) i }) eventually { val result = FutureAwaitWithFailFastFn(exec, futures) exec.isTerminated shouldBe true result shouldBe Seq(1, 2, 3, 4, 5) } } "stop when the first futures times out" in { val exec = Executors.newFixedThreadPool(6) val futures = for (i <- 1 to 10) yield { exec.submit { if (i == 4) { Thread.sleep(1000) sys.error("this task failed.") } else { Thread.sleep(50000) } } } eventually { val t = Try(FutureAwaitWithFailFastFn(exec, futures)) t.isFailure shouldBe true t.asInstanceOf[Failure[_]].exception.getMessage shouldBe "this task failed." exec.isTerminated shouldBe true } } } }