scala.collection.immutable.IndexedSeq Scala Examples
The following examples show how to use scala.collection.immutable.IndexedSeq.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: ClosestPlusPlusClustererSpec.scala From fotm-info with MIT License | 5 votes |
import info.fotm.clustering._ import info.fotm.clustering.implementations.ClosestPlusPlusClusterer import info.fotm.util.MathVector import org.scalactic.Equality import org.scalatest._ import scala.collection.immutable.{TreeMap, IndexedSeq} class ClosestPlusPlusClustererSpec extends FlatSpec with Matchers with ClustererSpecBase { val clusterer = new ClosestPlusPlusClusterer val input = Seq( MathVector(10, 0, 0), // 0 MathVector(11, 0, 0), // 1 MathVector(12, 0, 0), // 2 MathVector(0, 10, 0), // 3 MathVector(0, 11, 0), // 4 MathVector(0, 12, 0), // 5 MathVector(0, 0, 10), // 6 MathVector(0, 0, 11), // 7 MathVector(0, 0, 12) // 8 ) "init" should "correctly init clusters" in { val expected = Seq( Seq(input(0)), Seq(input(5)), Seq(input(8)) ) val (clusters, _) = clusterer.init(input, 3) clusters should contain theSameElementsAs expected } "init" should "correctly remove cluster points" in { val expected = input diff List(0, 5, 8).map(input) val (_, points) = clusterer.init(input, 3) points should contain theSameElementsAs expected } "clusterize" should "correctly group simple vectors" in { val expected = Set( Seq(0, 1, 2).map(input), Seq(3, 4, 5).map(input), Seq(6, 7, 8).map(input) ) val clusters: Set[Seq[MathVector]] = clusterer.clusterize(input, 3) expected.foreach(clusters should contain (_)) } }
Example 2
Source File: ClusterInternalsPublisherSpec.scala From lithium with Apache License 2.0 | 5 votes |
package akka.cluster.swissborg import akka.actor.ActorSystem import akka.cluster.ClusterEvent.{ReachabilityChanged, SeenChanged} import akka.cluster.Reachability import akka.testkit.{ImplicitSender, TestKit, TestProbe} import com.swissborg.lithium.internals.{LithiumReachabilityChanged, LithiumSeenChanged} import org.scalatest.BeforeAndAfterAll import org.scalatest.wordspec.AnyWordSpecLike import scala.collection.immutable.IndexedSeq import org.scalatest.matchers.should.Matchers class ClusterInternalsPublisherSpec extends TestKit(ActorSystem("lithium")) with ImplicitSender with AnyWordSpecLike with Matchers with BeforeAndAfterAll { override def afterAll(): Unit = TestKit.shutdownActorSystem(system) "ClusterInternalsPublisher" must { "convert and publish ReachabilityChanged events" in { system.actorOf(ClusterInternalsPublisher.props) val probe = TestProbe() system.eventStream.subscribe(probe.ref, classOf[LithiumReachabilityChanged]) system.eventStream.publish(ReachabilityChanged(Reachability(IndexedSeq.empty[Reachability.Record], Map.empty))) probe.expectMsgType[LithiumReachabilityChanged] } "convert and publish SeenChanged events" in { system.actorOf(ClusterInternalsPublisher.props) val probe = TestProbe() system.eventStream.subscribe(probe.ref, classOf[LithiumSeenChanged]) system.eventStream.publish(SeenChanged(false, Set.empty)) probe.expectMsg(LithiumSeenChanged(false, Set.empty)) } } }
Example 3
Source File: CovariateProducer.scala From aloha with MIT License | 5 votes |
package com.eharmony.aloha.dataset import com.eharmony.aloha.dataset.density.{Dense, Sparse} import com.eharmony.aloha.dataset.json.CovariateJson import com.eharmony.aloha.reflect.RefInfo import com.eharmony.aloha.semantics.compiled.CompiledSemantics import com.eharmony.aloha.semantics.func.GenAggFunc import scala.collection.immutable.IndexedSeq import scala.concurrent.ExecutionContext.Implicits.global import scala.util.{Failure, Success, Try} sealed trait CovariateProducer[@specialized(Double) Density] { self: CompilerFailureMessages => protected[this] def featExtFuncProd[A](successes: IndexedSeq[(String, GenAggFunc[A, Density])]): FeatureExtractorFunction[A, Density] protected[this] def refInfoB(): RefInfo[Density] protected[this] def getCovariates[A](semantics: CompiledSemantics[A], cj: CovariateJson[Density], defDefault: Option[Density] = None): Try[FeatureExtractorFunction[A, Density]] = { // Get a new semantics with the imports changed to reflect the imports from the Json Spec // Import of ExecutionContext.Implicits.global is necessary. val semanticsWithImports = semantics.copy[A](imports = cj.imports) def compile(it: Iterator[json.Spec[Density]], successes: List[(String, GenAggFunc[A, Density])]): Try[FeatureExtractorFunction[A, Density]] = { if (!it.hasNext) Success{ featExtFuncProd(successes.reverse.toIndexedSeq) } else { val spec = it.next() val f = semanticsWithImports.createFunction[Density](spec.spec, spec.defVal orElse defDefault)(refInfoB()) f match { case Left(msgs) => Failure { failure(spec.name, msgs) } case Right(success) => compile(it, (spec.name, success) :: successes) } } } compile(cj.features.iterator, Nil) } } trait SparseCovariateProducer extends CovariateProducer[Iterable[(String, Double)]] { self: CompilerFailureMessages => protected[this] final def featExtFuncProd[A](successes: IndexedSeq[(String, GenAggFunc[A, Sparse])]) = SparseFeatureExtractorFunction(successes) protected[this] final def refInfoB() = RefInfo[Sparse] } trait DenseCovariateProducer extends CovariateProducer[Double] { self: CompilerFailureMessages => protected[this] final def featExtFuncProd[A](successes: IndexedSeq[(String, GenAggFunc[A, Dense])]) = DenseFeatureExtractorFunction(successes) protected[this] final def refInfoB() = RefInfo[Dense] }
Example 4
Source File: RepositoryTemplate.scala From akka-http-circe-json-template with Apache License 2.0 | 5 votes |
package com.vitorsvieira.http.repository import com.vitorsvieira.http.model.ModelTemplate import scala.collection.immutable.IndexedSeq import scala.concurrent.Future import scala.concurrent.ExecutionContext.Implicits.global object RepositoryTemplate { def getModels(amount: Int): Future[IndexedSeq[ModelTemplate]] = Future { require(amount > 0, "The amount must be greater than zero.") (1 to amount).map { i ⇒ ModelTemplate(s"Model$i", i, i.toLong, i.toFloat, i.toDouble, Seq(1, 2, 3), List(1, 2, 3)) } } def getModelsByName(name: String): Future[ModelTemplate] = Future { require(name.nonEmpty, "The name must be present.") ModelTemplate(s"Model-$name", 1, 1.toLong, 1.toFloat, 1.toDouble, Seq(1, 2, 3), List(1, 2, 3)) } }
Example 5
Source File: Distribution.scala From BigDatalog with Apache License 2.0 | 5 votes |
package org.apache.spark.util import java.io.PrintStream import scala.collection.immutable.IndexedSeq def summary(out: PrintStream = System.out) { // scalastyle:off println out.println(statCounter) showQuantiles(out) // scalastyle:on println } } private[spark] object Distribution { def apply(data: Traversable[Double]): Option[Distribution] = { if (data.size > 0) { Some(new Distribution(data)) } else { None } } def showQuantiles(out: PrintStream = System.out, quantiles: Traversable[Double]) { // scalastyle:off println out.println("min\t25%\t50%\t75%\tmax") quantiles.foreach{q => out.print(q + "\t")} out.println // scalastyle:on println } }
Example 6
Source File: Distribution.scala From Spark-2.3.1 with Apache License 2.0 | 5 votes |
package org.apache.spark.util import java.io.PrintStream import scala.collection.immutable.IndexedSeq def summary(out: PrintStream = System.out) { // scalastyle:off println out.println(statCounter) showQuantiles(out) // scalastyle:on println } } private[spark] object Distribution { def apply(data: Traversable[Double]): Option[Distribution] = { if (data.size > 0) { Some(new Distribution(data)) } else { None } } def showQuantiles(out: PrintStream = System.out, quantiles: Traversable[Double]) { // scalastyle:off println out.println("min\t25%\t50%\t75%\tmax") quantiles.foreach{q => out.print(q + "\t")} out.println // scalastyle:on println } }
Example 7
Source File: Distribution.scala From spark1.52 with Apache License 2.0 | 5 votes |
package org.apache.spark.util import java.io.PrintStream import scala.collection.immutable.IndexedSeq def summary(out: PrintStream = System.out) { // scalastyle:off println out.println(statCounter) showQuantiles(out) // scalastyle:on println } } private[spark] object Distribution { def apply(data: Traversable[Double]): Option[Distribution] = { if (data.size > 0) { Some(new Distribution(data)) } else { None } } def showQuantiles(out: PrintStream = System.out, quantiles: Traversable[Double]) { // scalastyle:off println out.println("min\t25%\t50%\t75%\tmax") quantiles.foreach{q => out.print(q + "\t")} out.println // scalastyle:on println } }
Example 8
Source File: Distribution.scala From iolap with Apache License 2.0 | 5 votes |
package org.apache.spark.util import java.io.PrintStream import scala.collection.immutable.IndexedSeq def summary(out: PrintStream = System.out) { out.println(statCounter) showQuantiles(out) } } private[spark] object Distribution { def apply(data: Traversable[Double]): Option[Distribution] = { if (data.size > 0) { Some(new Distribution(data)) } else { None } } def showQuantiles(out: PrintStream = System.out, quantiles: Traversable[Double]) { out.println("min\t25%\t50%\t75%\tmax") quantiles.foreach{q => out.print(q + "\t")} out.println } }
Example 9
Source File: Distribution.scala From multi-tenancy-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.util import java.io.PrintStream import scala.collection.immutable.IndexedSeq def summary(out: PrintStream = System.out) { // scalastyle:off println out.println(statCounter) showQuantiles(out) // scalastyle:on println } } private[spark] object Distribution { def apply(data: Traversable[Double]): Option[Distribution] = { if (data.size > 0) { Some(new Distribution(data)) } else { None } } def showQuantiles(out: PrintStream = System.out, quantiles: Traversable[Double]) { // scalastyle:off println out.println("min\t25%\t50%\t75%\tmax") quantiles.foreach{q => out.print(q + "\t")} out.println // scalastyle:on println } }
Example 10
Source File: Distribution.scala From drizzle-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.util import java.io.PrintStream import scala.collection.immutable.IndexedSeq def summary(out: PrintStream = System.out) { // scalastyle:off println out.println(statCounter) showQuantiles(out) // scalastyle:on println } } private[spark] object Distribution { def apply(data: Traversable[Double]): Option[Distribution] = { if (data.size > 0) { Some(new Distribution(data)) } else { None } } def showQuantiles(out: PrintStream = System.out, quantiles: Traversable[Double]) { // scalastyle:off println out.println("min\t25%\t50%\t75%\tmax") quantiles.foreach{q => out.print(q + "\t")} out.println // scalastyle:on println } }
Example 11
Source File: Multiplexer.scala From fotm-info with MIT License | 5 votes |
package info.fotm.clustering.enhancers import info.fotm.clustering.{RealClusterer, Clusterer} import info.fotm.clustering.Clusterer.Cluster import info.fotm.util.MathVector import scala.collection.immutable.{TreeMap, IndexedSeq} import scala.util.Random trait Multiplexer extends RealClusterer { protected lazy val multiplexRng = new Random protected lazy val multiplexTurns = 20 protected lazy val multiplexThreshold = 3 abstract override def clusterize[T](input: Map[T, MathVector], groupSize: Int): Set[Seq[T]] = { class RandomOrder extends Ordering[T] { lazy val shuffled: List[(T, MathVector)] = multiplexRng.shuffle(input.toList) lazy val lookup: Map[T, Int] = shuffled.zipWithIndex.map { kvi => val ((k, v), i) = kvi (k, i) }.toMap def compare(t1: T, t2: T): Int = lookup(t1).compareTo(lookup(t2)) } val allClusters: IndexedSeq[Seq[T]] = for { i <- 0 to multiplexTurns randomInput = TreeMap.empty[T, MathVector](new RandomOrder) ++ input cluster <- super.clusterize(randomInput, groupSize) } yield cluster val groups = allClusters.groupBy(identity) groups.filter(_._2.size >= multiplexThreshold).keySet // take those found at least threshold times } } class SimpleMultiplexer(underlying: Clusterer, turns: Int, threshold: Int) extends Clusterer { protected lazy val multiplexRng = new Random protected lazy val multiplexTurns = turns protected lazy val multiplexThreshold = threshold override def clusterize(input: Cluster, groupSize: Int): Set[Cluster] = { val vectors: Map[MathVector, Int] = input.zipWithIndex.toMap val allClusters: IndexedSeq[Cluster] = for { i <- 1 to multiplexTurns randomInput = multiplexRng.shuffle(input) cluster <- underlying.clusterize(randomInput, groupSize) } yield cluster val groups = allClusters.groupBy(_.map(vectors).sorted) groups .filter(kv => kv._2.size >= multiplexThreshold) // take those found at least threshold times .map(_._2.head) .filter(_.size == groupSize) .toSet } }
Example 12
Source File: MultiArray2.scala From hail with MIT License | 5 votes |
package is.hail.utils import java.io.Serializable import scala.collection.immutable.IndexedSeq import scala.reflect.ClassTag class MultiArray2[@specialized(Int, Long, Float, Double, Boolean) T](val n1: Int, val n2: Int, val a: Array[T]) extends Serializable with Iterable[T] { require(n1 >= 0 && n2 >= 0) require(a.length == n1*n2) class Row(val i:Int) extends IndexedSeq[T] { require(i >= 0 && i < n1) def apply(j:Int): T = { if (j < 0 || j >= length) throw new ArrayIndexOutOfBoundsException a(i*n2 + j) } def length: Int = n2 } class Column(val j:Int) extends IndexedSeq[T] { require(j >= 0 && j < n2) def apply(i:Int): T = { if (i < 0 || i >= length) throw new ArrayIndexOutOfBoundsException a(i*n2 + j) } def length: Int = n1 } def row(i:Int) = new Row(i) def column(j:Int) = new Column(j) def rows: Iterable[Row] = for (i <- rowIndices) yield row(i) def columns: Iterable[Column] = for (j <- columnIndices) yield column(j) def indices: Iterable[(Int,Int)] = for (i <- 0 until n1; j <- 0 until n2) yield (i, j) def rowIndices: Iterable[Int] = 0 until n1 def columnIndices: Iterable[Int] = 0 until n2 def apply(i: Int, j: Int): T = { require(i >= 0 && i < n1 && j >= 0 && j < n2) a(i*n2 + j) } def update(i: Int, j: Int, x:T): Unit = { require(i >= 0 && i < n1 && j >= 0 && j < n2) a.update(i*n2 + j,x) } def update(t: (Int,Int), x:T): Unit = { require(t._1 >= 0 && t._1 < n1 && t._2 >= 0 && t._2 < n2) update(t._1,t._2,x) } def array: Array[T] = a def zip[S](other: MultiArray2[S]): MultiArray2[(T,S)] = { require(n1 == other.n1 && n2 == other.n2) new MultiArray2(n1,n2,a.zip(other.a)) } def iterator: Iterator[T] = a.iterator } object MultiArray2 { def fill[T](n1: Int, n2: Int)(elem: => T)(implicit tct: ClassTag[T]): MultiArray2[T] = new MultiArray2[T](n1, n2, Array.fill[T](n1 * n2)(elem)) def empty[T](implicit tct: ClassTag[T]): MultiArray2[T] = new MultiArray2[T](0, 0, Array.empty[T](tct)) }
Example 13
Source File: SplitBam.scala From fgbio with MIT License | 5 votes |
package com.fulcrumgenomics.bam import java.io.Closeable import java.nio.file.Files import com.fulcrumgenomics.FgBioDef.FgBioEnum import com.fulcrumgenomics.bam.api.{SamSource, SamWriter} import com.fulcrumgenomics.cmdline.{ClpGroups, FgBioTool} import com.fulcrumgenomics.commons.CommonsDef.{PathPrefix, PathToBam, SafelyClosable, javaIterableToIterator, javaIteratorAsScalaIterator} import com.fulcrumgenomics.commons.io.{Io, PathUtil} import com.fulcrumgenomics.commons.util.{LazyLogging, SimpleCounter} import com.fulcrumgenomics.sopt.{arg, clp} import com.fulcrumgenomics.util.ProgressLogger import enumeratum.EnumEntry import htsjdk.samtools._ import scala.collection.immutable.IndexedSeq sealed trait SplitType extends EnumEntry object SplitType extends FgBioEnum[SplitType] { def values: IndexedSeq[SplitType] = findValues case object Library extends SplitType case object ReadGroup extends SplitType } @clp(group = ClpGroups.SamOrBam, description= """ |Splits a BAM into multiple BAMs, one per-read group (or library). | |The resulting BAMs will be named `<output-prefix>.<read-group-id>.bam`, or `<output-prefix>.<library-name>.bam` |when splitting by the library. All reads without a read group, or without a library when splitting by library, |will be written to `<output-prefix>.unknown.bam`. If no such reads exist, then no such file will exist. """) class SplitBam ( @arg(flag='i', doc="Input SAM or BAM file.") val input: PathToBam, @arg(flag='o', doc="Output prefix for all SAM or BAM files (ex. output/sample-name).") val output: PathPrefix, @arg(flag='s', doc="Split by library instead of read group") val splitBy: SplitType = SplitType.ReadGroup, @arg(flag='u', doc="The name to use for the unknown file") val unknown: String = "unknown" ) extends FgBioTool with LazyLogging { Io.assertReadable(input) Io.assertCanWriteFile(output) override def execute(): Unit = { val in = SamSource(input) val progress = ProgressLogger(logger) val unknownBamAndWriter = { val unknownBam: PathToBam = toOutput(unknown) val unknownWriter: SamWriter = SamWriter(toOutput(unknown), in.header) //new SAMFileWriterFactory().makeWriter(in.h, true, toOutput(unknown).toFile, null) WriterInfo(name=unknown, bam=unknownBam, writer=unknownWriter) } val writers = createWriters(header=in.header, splitBy=splitBy).withDefaultValue(unknownBamAndWriter) val counter = new SimpleCounter[WriterInfo]() in.foreach { rec => val info = writers(rec.readGroup) info.writer += rec counter.count(info) progress.record(rec) } writers.values.foreach(_.close()) unknownBamAndWriter.close() in.safelyClose() counter.toSeq sortBy { _._1.name } foreach { case (info, count) => logger.info(s"Wrote $count records to ${info.bam.getFileName}") } if (counter.countOf(unknownBamAndWriter) == 0) { Files.delete(unknownBamAndWriter.bam) } } private def createWriters(header: SAMFileHeader, splitBy: SplitType): Map[SAMReadGroupRecord, WriterInfo] = { splitBy match { case SplitType.Library => header.getReadGroups.toSeq.groupBy { rg => rg.getLibrary } .flatMap { case (library, readGroups) => val bam = toOutput(library) val writer = SamWriter(bam, header) readGroups.map { rg => rg -> WriterInfo(name=library, bam=bam, writer=writer) } } case SplitType.ReadGroup => header.getReadGroups.map { rg => val bam = toOutput(rg.getId) val writer = SamWriter(bam, header) rg -> WriterInfo(name=rg.getId, bam=bam, writer=writer) }.toMap } } private[bam] def toOutput(name: String): PathToBam = { val outputDir = output.getParent val prefix = output.getFileName outputDir.resolve(PathUtil.sanitizeFileName(s"$prefix.$name.bam")) } }
Example 14
Source File: GeoJSON.scala From jsoniter-scala with MIT License | 5 votes |
package com.github.plokhotnyuk.jsoniter_scala.benchmark import com.avsystem.commons.serialization.{flatten, transientDefault} import com.fasterxml.jackson.annotation.JsonSubTypes.Type import com.fasterxml.jackson.annotation.{JsonSubTypes, JsonTypeInfo} import com.rallyhealth.weepickle.v1.implicits.{discriminator, dropDefault, key} import scala.collection.immutable.IndexedSeq object GeoJSON { @discriminator("type") @flatten("type") @JsonTypeInfo(use = JsonTypeInfo.Id.NAME, property = "type") @JsonSubTypes(Array( new Type(value = classOf[Point], name = "Point"), new Type(value = classOf[MultiPoint], name = "MultiPoint"), new Type(value = classOf[LineString], name = "LineString"), new Type(value = classOf[MultiLineString], name = "MultiLineString"), new Type(value = classOf[Polygon], name = "Polygon"), new Type(value = classOf[MultiPolygon], name = "MultiPolygon"), new Type(value = classOf[GeometryCollection], name = "GeometryCollection"))) sealed trait Geometry extends Product with Serializable @discriminator("type") sealed trait SimpleGeometry extends Geometry @key("Point") case class Point(coordinates: (Double, Double)) extends SimpleGeometry @key("MultiPoint") case class MultiPoint(coordinates: IndexedSeq[(Double, Double)]) extends SimpleGeometry @key("LineString") case class LineString(coordinates: IndexedSeq[(Double, Double)]) extends SimpleGeometry @key("MultiLineString") case class MultiLineString(coordinates: IndexedSeq[IndexedSeq[(Double, Double)]]) extends SimpleGeometry @key("Polygon") case class Polygon(coordinates: IndexedSeq[IndexedSeq[(Double, Double)]]) extends SimpleGeometry @key("MultiPolygon") case class MultiPolygon(coordinates: IndexedSeq[IndexedSeq[IndexedSeq[(Double, Double)]]]) extends SimpleGeometry @key("GeometryCollection") case class GeometryCollection(geometries: IndexedSeq[SimpleGeometry]) extends Geometry @discriminator("type") @flatten("type") @JsonTypeInfo(use = JsonTypeInfo.Id.NAME, property = "type") @JsonSubTypes(Array( new Type(value = classOf[Feature], name = "Feature"), new Type(value = classOf[FeatureCollection], name = "FeatureCollection"))) sealed trait GeoJSON extends Product with Serializable @discriminator("type") sealed trait SimpleGeoJSON extends GeoJSON @key("Feature") case class Feature( @transientDefault @dropDefault properties: Map[String, String] = Map.empty, geometry: Geometry, @transientDefault @dropDefault bbox: Option[(Double, Double, Double, Double)] = None) extends SimpleGeoJSON @key("FeatureCollection") case class FeatureCollection( features: IndexedSeq[SimpleGeoJSON], @transientDefault @dropDefault bbox: Option[(Double, Double, Double, Double)] = None) extends GeoJSON }
Example 15
Source File: GoogleMapsAPI.scala From jsoniter-scala with MIT License | 5 votes |
package com.github.plokhotnyuk.jsoniter_scala.benchmark import scala.collection.immutable.IndexedSeq object GoogleMapsAPI { case class Value( text: String, value: Int) case class Elements( distance: Value, duration: Value, status: String) case class DistanceMatrix( destination_addresses: IndexedSeq[String], origin_addresses: IndexedSeq[String], rows: IndexedSeq[Rows], status: String) case class Rows(elements: IndexedSeq[Elements]) }
Example 16
Source File: Distribution.scala From SparkCore with Apache License 2.0 | 5 votes |
package org.apache.spark.util import java.io.PrintStream import scala.collection.immutable.IndexedSeq def summary(out: PrintStream = System.out) { out.println(statCounter) showQuantiles(out) } } private[spark] object Distribution { def apply(data: Traversable[Double]): Option[Distribution] = { if (data.size > 0) { Some(new Distribution(data)) } else { None } } def showQuantiles(out: PrintStream = System.out, quantiles: Traversable[Double]) { out.println("min\t25%\t50%\t75%\tmax") quantiles.foreach{q => out.print(q + "\t")} out.println } }
Example 17
Source File: Distribution.scala From sparkoscope with Apache License 2.0 | 5 votes |
package org.apache.spark.util import java.io.PrintStream import scala.collection.immutable.IndexedSeq def summary(out: PrintStream = System.out) { // scalastyle:off println out.println(statCounter) showQuantiles(out) // scalastyle:on println } } private[spark] object Distribution { def apply(data: Traversable[Double]): Option[Distribution] = { if (data.size > 0) { Some(new Distribution(data)) } else { None } } def showQuantiles(out: PrintStream = System.out, quantiles: Traversable[Double]) { // scalastyle:off println out.println("min\t25%\t50%\t75%\tmax") quantiles.foreach{q => out.print(q + "\t")} out.println // scalastyle:on println } }
Example 18
Source File: SizeSpec.scala From nyaya with GNU Lesser General Public License v2.1 | 5 votes |
package nyaya.gen import scala.annotation.switch import scala.collection.immutable.IndexedSeq sealed trait SizeSpec { def gen : Gen[Int] def gen1: Gen[Int] } object SizeSpec { case object Default extends SizeSpec { override val gen = Gen.chooseSize override val gen1 = Gen.chooseSizeMin1 private[gen] val both = (gen, gen1) } implicit def default: SizeSpec = Default object DisableDefault { implicit def _disableDefaultSizeSpec1: SizeSpec = ??? implicit def _disableDefaultSizeSpec2: SizeSpec = ??? } // =================================================================================================================== case class Exactly(value: Int) extends SizeSpec { override val gen = Gen(_ => value) override val gen1 = Gen(_ => value) } implicit def autoFromInt(i: Int): SizeSpec = Exactly(i) // =================================================================================================================== case class OneOf(possibilities: IndexedSeq[Int]) extends SizeSpec { override val (gen, gen1) = (possibilities.length: @switch) match { case 1 => val e = Exactly(possibilities.head) (e.gen, e.gen1) case 0 => Default.both case _ => val g = Gen.chooseIndexed_!(possibilities) val a = g flatMap (n => Gen(_ fixGenSize n)) val b = g flatMap (n => Gen(_ fixGenSize1 n)) (a, b) } } implicit def autoFromSeq(possibilities: IndexedSeq[Int]): SizeSpec = OneOf(possibilities) // =================================================================================================================== implicit def autoFromOption[A](o: Option[A])(implicit ev: A => SizeSpec): SizeSpec = o.fold[SizeSpec](Default)(ev) }