scala.collection.immutable.NumericRange Scala Examples
The following examples show how to use scala.collection.immutable.NumericRange.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: FloatPointRange.scala From spatial with MIT License | 5 votes |
package emul import scala.collection.immutable.NumericRange case class FloatPointRange( override val start: FloatPoint, override val end: FloatPoint, override val step: FloatPoint, override val isInclusive: Boolean ) extends NumericRange[FloatPoint](start,end,step,isInclusive)(FloatPoint.FloatPointIsIntegral) { override def copy(start: FloatPoint, end: FloatPoint, step: FloatPoint): FloatPointRange = { FloatPointRange(start, end, step, isInclusive) } }
Example 2
Source File: FixedPointRange.scala From spatial with MIT License | 5 votes |
package emul import scala.collection.immutable.NumericRange case class FixedPointRange( override val start: FixedPoint, override val end: FixedPoint, override val step: FixedPoint, override val isInclusive: Boolean ) extends NumericRange[FixedPoint](start,end,step,isInclusive)(FixedPoint.FixedPointIsIntegral) { override def copy(start: FixedPoint, end: FixedPoint, step: FixedPoint) = { FixedPointRange(start, end, step, isInclusive) } }
Example 3
Source File: SparkBlockedVector.scala From DynaML with Apache License 2.0 | 5 votes |
package io.github.mandar2812.dynaml.algebra import breeze.linalg.{DenseVector, NumericOps} import org.apache.spark.rdd.RDD import org.apache.spark.storage.StorageLevel import scala.collection.immutable.NumericRange def vertcat(vectors: SparkBlockedVector*): SparkBlockedVector = { //sanity check assert(vectors.map(_.colBlocks).distinct.length == 1, "In case of vertical concatenation of matrices their columns sizes must be equal") val sizes = vectors.map(_.rowBlocks) new SparkBlockedVector(vectors.zipWithIndex.map(couple => { val offset = sizes.slice(0, couple._2).sum couple._1._data.map(c => (c._1+offset, c._2)) }).reduceLeft((a,b) => a.union(b))) } }
Example 4
Source File: ParallelizedWithLocalityRDD.scala From cloud-integration with Apache License 2.0 | 5 votes |
package org.apache.spark.cloudera import scala.collection.immutable.NumericRange import scala.collection.mutable.ArrayBuffer import scala.reflect.ClassTag import org.apache.spark._ import org.apache.spark.rdd.{ParallelCollectionPartition, RDD} def slice[T: ClassTag](seq: Seq[T], numSlices: Int): Seq[Seq[T]] = { if (numSlices < 1) { throw new IllegalArgumentException( "Positive number of partitions required") } // Sequences need to be sliced at the same set of index positions for operations // like RDD.zip() to behave as expected def positions(length: Long, numSlices: Int): Iterator[(Int, Int)] = { (0 until numSlices).iterator.map { i => val start = ((i * length) / numSlices).toInt val end = (((i + 1) * length) / numSlices).toInt (start, end) } } seq match { case r: Range => positions(r.length, numSlices).zipWithIndex .map { case ((start, end), index) => // If the range is inclusive, use inclusive range for the last slice if (r.isInclusive && index == numSlices - 1) { new Range.Inclusive(r.start + start * r.step, r.end, r.step) } else { new Range(r.start + start * r.step, r.start + end * r.step, r.step) } }.toSeq.asInstanceOf[Seq[Seq[T]]] case nr: NumericRange[T] => // For ranges of Long, Double, BigInteger, etc val slices = new ArrayBuffer[Seq[T]](numSlices) var r = nr for ((start, end) <- positions(nr.length, numSlices)) { val sliceSize = end - start slices += r.take(sliceSize).asInstanceOf[Seq[T]] r = r.drop(sliceSize) } slices case _ => val array = seq.toArray // To prevent O(n^2) operations for List etc positions(array.length, numSlices).map { case (start, end) => array.slice(start, end).toSeq }.toSeq } } }