java.util.Arrays Scala Examples

The following examples show how to use java.util.Arrays. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example.
Example 1
Source File: RUtils.scala    From drizzle-spark   with Apache License 2.0 6 votes vote down vote up
package org.apache.spark.api.r

import java.io.File
import java.util.Arrays

import org.apache.spark.{SparkEnv, SparkException}

private[spark] object RUtils {
  // Local path where R binary packages built from R source code contained in the spark
  // packages specified with "--packages" or "--jars" command line option reside.
  var rPackages: Option[String] = None

  
  def isRInstalled: Boolean = {
    try {
      val builder = new ProcessBuilder(Arrays.asList("R", "--version"))
      builder.start().waitFor() == 0
    } catch {
      case e: Exception => false
    }
  }
} 
Example 2
Source File: VectorSlicerExample.scala    From spark1.52   with Apache License 2.0 5 votes vote down vote up
// scalastyle:off println
package org.apache.spark.examples.ml

// $example on$
import java.util.Arrays

import org.apache.spark.ml.attribute.{Attribute, AttributeGroup, NumericAttribute}
import org.apache.spark.ml.feature.VectorSlicer

import org.apache.spark.sql.Row
import org.apache.spark.sql.types.StructType
// $example off$
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.sql.types.StringType
import org.apache.spark.sql.{SQLContext, DataFrame}

    output.show()
    println(output.select("userFeatures", "features").first())
    // $example off$

    sc.stop()
  }
}
// scalastyle:on println 
Example 3
Source File: PrintProtosTest.scala    From aloha   with MIT License 5 votes vote down vote up
package com.eharmony.aloha.cli.dataset

import java.io.{ByteArrayOutputStream, IOException}
import java.util.Arrays

import com.eharmony.aloha.test.proto.Testing.{PhotoProto, UserProto}
import com.eharmony.aloha.test.proto.Testing.GenderProto.{FEMALE, MALE}
import com.google.protobuf.GeneratedMessage
import org.apache.commons.codec.binary.Base64
import org.junit.runner.RunWith
import org.junit.runners.BlockJUnit4ClassRunner
import org.junit.{Ignore, Test}


@RunWith(classOf[BlockJUnit4ClassRunner])
@Ignore
class PrintProtosTest {
    @Test def testPrintProtos(): Unit = {
        System.out.println(alan)
        System.out.println(kate)
    }

    @throws(classOf[IOException])
    def alan: String = {
        val t = UserProto.newBuilder.
            setId(1).
            setName("Alan").
            setGender(MALE).
            setBmi(23).
            addAllPhotos(Arrays.asList(
                PhotoProto.newBuilder.
                    setId(1).
                    setAspectRatio(1).
                    setHeight(1).
                    build,
                PhotoProto.newBuilder.
                    setId(2).
                    setAspectRatio(2).
                    setHeight(2).build
            )).build
        b64(t)
    }

    def kate: String = {
        val t = UserProto.newBuilder.
            setId(1).
            setName("Kate").
            setGender(FEMALE).
            addAllPhotos(Arrays.asList(
                PhotoProto.newBuilder.
                    setId(3).
                    setAspectRatio(3).
                    setHeight(3).
                    build
            )).build
        b64(t)
    }

    def b64[M <: GeneratedMessage](p: M): String = {
        val baos: ByteArrayOutputStream = new ByteArrayOutputStream
        p.writeTo(baos)
        new String(Base64.encodeBase64(baos.toByteArray))
    }
} 
Example 4
Source File: DynamicExecution.scala    From hazelcast-scala   with Apache License 2.0 5 votes vote down vote up
package com.hazelcast.Scala.serialization

import java.util.{ Arrays, Comparator }
import java.util.concurrent.Callable

import scala.reflect.ClassTag

import com.hazelcast.Scala.{ Aggregator, Pipe }
import com.hazelcast.core.IFunction
import com.hazelcast.map.{ EntryBackupProcessor, EntryProcessor }
import com.hazelcast.nio.{ ObjectDataInput, ObjectDataOutput }
import com.hazelcast.query.Predicate


object DynamicExecution extends DynamicExecution {
  protected def serializeBytecodeFor(cls: Class[_]) = true
}

abstract class DynamicExecution extends SerializerEnum(Defaults) {
  protected def serializeBytecodeFor(cls: Class[_]): Boolean
  private[this] val loaderByClass = new ClassValue[Option[ByteArrayClassLoader]] {
    private[this] val excludePackages = Set("com.hazelcast.", "scala.", "java.", "javax.")
    private def include(cls: Class[_]): Boolean = !excludePackages.exists(cls.getName.startsWith) && serializeBytecodeFor(cls)
    def computeValue(cls: Class[_]): Option[ByteArrayClassLoader] =
      if (include(cls)) {
        try {
          Some(ByteArrayClassLoader(cls))
        } catch {
          case ncdf: NoClassDefFoundError =>
            classByName.get(cls.getName) match {
              case Some((bytes, classForBytes)) if cls == classForBytes => Some(new ByteArrayClassLoader(cls.getName, bytes))
              case _ => throw ncdf
            }
        }
      } else None
  }
  private[this] val classByName = new collection.concurrent.TrieMap[String, (Array[Byte], Class[_])]

  private class ClassBytesSerializer[T: ClassTag] extends StreamSerializer[T] {
    def write(out: ObjectDataOutput, any: T): Unit = {
      out.writeUTF(any.getClass.getName)
      loaderByClass.get(any.getClass) match {
        case Some(cl) => out.writeByteArray(cl.bytes)
        case _ => out.writeByteArray(Array.emptyByteArray)
      }
      UnsafeSerializer.write(out, any)
    }
    def read(inp: ObjectDataInput): T = {
      val className = inp.readUTF()
      val classBytes = inp.readByteArray()
      val cls =
        if (classBytes.length == 0) {
          Class.forName(className)
        } else {
          classByName.get(className) match {
            case Some((bytes, cls)) if Arrays.equals(classBytes, bytes) => cls
            case _ =>
              val cl = new ByteArrayClassLoader(className, classBytes)
              val cls = Class.forName(className, true, cl)
              classByName.put(className, classBytes -> cls)
              cls
          }
        }
      UnsafeSerializer.read(inp, cls).asInstanceOf[T]
    }
  }

  type S[T] = StreamSerializer[T]

  val Function0Ser: S[Function0[_]] = new ClassBytesSerializer
  val Function1Ser: S[Function1[_, _]] = new ClassBytesSerializer
  val Function2Ser: S[Function2[_, _, _]] = new ClassBytesSerializer
  val Function3Ser: S[Function3[_, _, _, _]] = new ClassBytesSerializer
  val PartialFunctionSer: S[PartialFunction[_, _]] = new ClassBytesSerializer
  val EntryProcessorSer: S[EntryProcessor[_, _]] = new ClassBytesSerializer
  val EntryBackupProcessorSer: S[EntryBackupProcessor[_, _]] = new ClassBytesSerializer
  val CallableSer: S[Callable[_]] = new ClassBytesSerializer
  val RunnableSer: S[Runnable] = new ClassBytesSerializer
  val PredicateSer: S[Predicate[_, _]] = new ClassBytesSerializer
  val PipeSer: S[Pipe[_]] = new ClassBytesSerializer
  val AggregatorSer: S[Aggregator[_, _]] = new ClassBytesSerializer
  val ComparatorSer: S[Comparator[_]] = new ClassBytesSerializer
  val IFunctionSer: S[IFunction[_, _]] = new ClassBytesSerializer

} 
Example 5
Source File: ByteArrayCompression.scala    From hazelcast-scala   with Apache License 2.0 5 votes vote down vote up
package com.hazelcast.Scala.serialization.lz4

import java.util.Arrays

import com.hazelcast.nio.serialization.ByteArraySerializer

import net.jpountz.lz4.LZ4Compressor
import net.jpountz.lz4.LZ4FastDecompressor

trait ByteArrayCompression[T] extends ByteArraySerializer[T] {
  protected def comp: (LZ4Compressor, LZ4FastDecompressor)

  abstract override def write(obj: T): Array[Byte] = {
    val serialized = super.write(obj)
    Compression.compress(comp._1)(serialized, serialized.length) {
      case (compressed, len) =>
        Arrays.copyOf(compressed, len)
    }
  }

  abstract override def read(compressed: Array[Byte]): T = {
    Compression.decompress(comp._2)(compressed) {
      case (decompressed, offset, len) =>
        super.read(Arrays.copyOfRange(decompressed, offset, offset + len))
    }
  }
}
trait HighByteArrayCompression[T] extends ByteArrayCompression[T] {
  protected def comp: (LZ4Compressor, LZ4FastDecompressor) = Compression.high
}
trait FastByteArrayCompression[T] extends ByteArrayCompression[T] {
  protected def comp: (LZ4Compressor, LZ4FastDecompressor) = Compression.fast
} 
Example 6
Source File: ByteArrayOutputStream.scala    From hazelcast-scala   with Apache License 2.0 5 votes vote down vote up
package com.hazelcast.Scala.serialization

import java.io.OutputStream
import java.util.Arrays

private[serialization] object ByteArrayOutputStream {
  private final val MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8;
  private def hugeCapacity(minCapacity: Int): Int = {
    if (minCapacity < 0) // overflow
      throw new OutOfMemoryError();
    if (minCapacity > MAX_ARRAY_SIZE) Integer.MAX_VALUE
    else MAX_ARRAY_SIZE
  }
  private[this] val tlOut = new SoftThreadLocal(new ByteArrayOutputStream)
  def borrow[R](thunk: ByteArrayOutputStream => R): R = {
    tlOut.use { out =>
      out.reset()
      out -> thunk(out)
    }
  }
}
private[serialization] class ByteArrayOutputStream
    extends OutputStream {

  import ByteArrayOutputStream._

  private[this] var buf = new Array[Byte](256)
  private[this] var count: Int = 0

  def withArray[T](thunk: (Array[Byte], Int) => T): T = thunk(buf, count)
  def copyArray: Array[Byte] = Arrays.copyOf(buf, count)
  private def ensureCapacity(minCapacity: Int): Unit = {
    if (minCapacity - buf.length > 0) grow(minCapacity)
  }

  private def grow(minCapacity: Int): Unit = {
    val oldCapacity = buf.length
    var newCapacity = oldCapacity << 1
    if (newCapacity - minCapacity < 0)
      newCapacity = minCapacity
    if (newCapacity - MAX_ARRAY_SIZE > 0)
      newCapacity = hugeCapacity(minCapacity)
    buf = Arrays.copyOf(buf, newCapacity)
  }
  def write(b: Int): Unit = {
    ensureCapacity(count + 1)
    buf(count) = b.asInstanceOf[Byte]
    count += 1
  }

  override def write(b: Array[Byte], off: Int, len: Int): Unit = {
    ensureCapacity(count + len)
    System.arraycopy(b, off, buf, count, len)
    count += len
  }

  def reset(): Unit = {
    count = 0
  }

} 
Example 7
Source File: VectorSlicerExample.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
// scalastyle:off println
package org.apache.spark.examples.ml

// $example on$
import java.util.Arrays

import org.apache.spark.ml.attribute.{Attribute, AttributeGroup, NumericAttribute}
import org.apache.spark.ml.feature.VectorSlicer
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.sql.{Row, SparkSession}
import org.apache.spark.sql.types.StructType
// $example off$

object VectorSlicerExample {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder
      .appName("VectorSlicerExample")
      .getOrCreate()

    // $example on$
    val data = Arrays.asList(
      Row(Vectors.sparse(3, Seq((0, -2.0), (1, 2.3)))),
      Row(Vectors.dense(-2.0, 2.3, 0.0))
    )

    val defaultAttr = NumericAttribute.defaultAttr
    val attrs = Array("f1", "f2", "f3").map(defaultAttr.withName)
    val attrGroup = new AttributeGroup("userFeatures", attrs.asInstanceOf[Array[Attribute]])

    val dataset = spark.createDataFrame(data, StructType(Array(attrGroup.toStructField())))

    val slicer = new VectorSlicer().setInputCol("userFeatures").setOutputCol("features")

    slicer.setIndices(Array(1)).setNames(Array("f3"))
    // or slicer.setIndices(Array(1, 2)), or slicer.setNames(Array("f2", "f3"))

    val output = slicer.transform(dataset)
    output.show(false)
    // $example off$

    spark.stop()
  }
}
// scalastyle:on println 
Example 8
Source File: ImageSchemaSuite.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.ml.image

import java.nio.file.Paths
import java.util.Arrays

import org.apache.spark.SparkFunSuite
import org.apache.spark.ml.image.ImageSchema._
import org.apache.spark.mllib.util.MLlibTestSparkContext
import org.apache.spark.sql.Row
import org.apache.spark.sql.types._

class ImageSchemaSuite extends SparkFunSuite with MLlibTestSparkContext {
  // Single column of images named "image"
  private lazy val imagePath = "../data/mllib/images"

  test("Smoke test: create basic ImageSchema dataframe") {
    val origin = "path"
    val width = 1
    val height = 1
    val nChannels = 3
    val data = Array[Byte](0, 0, 0)
    val mode = ocvTypes("CV_8UC3")

    // Internal Row corresponds to image StructType
    val rows = Seq(Row(Row(origin, height, width, nChannels, mode, data)),
      Row(Row(null, height, width, nChannels, mode, data)))
    val rdd = sc.makeRDD(rows)
    val df = spark.createDataFrame(rdd, ImageSchema.imageSchema)

    assert(df.count === 2, "incorrect image count")
    assert(df.schema("image").dataType == columnSchema, "data do not fit ImageSchema")
  }

  test("readImages count test") {
    var df = readImages(imagePath)
    assert(df.count === 1)

    df = readImages(imagePath, null, true, -1, false, 1.0, 0)
    assert(df.count === 10)

    df = readImages(imagePath, null, true, -1, true, 1.0, 0)
    val countTotal = df.count
    assert(countTotal === 8)

    df = readImages(imagePath, null, true, -1, true, 0.5, 0)
    // Random number about half of the size of the original dataset
    val count50 = df.count
    assert(count50 > 0 && count50 < countTotal)
  }

  test("readImages partition test") {
    val df = readImages(imagePath, null, true, 3, true, 1.0, 0)
    assert(df.rdd.getNumPartitions === 3)
  }

  // Images with the different number of channels
  test("readImages pixel values test") {

    val images = readImages(imagePath + "/multi-channel/").collect

    images.foreach { rrow =>
      val row = rrow.getAs[Row](0)
      val filename = Paths.get(getOrigin(row)).getFileName().toString()
      if (firstBytes20.contains(filename)) {
        val mode = getMode(row)
        val bytes20 = getData(row).slice(0, 20)

        val (expectedMode, expectedBytes) = firstBytes20(filename)
        assert(ocvTypes(expectedMode) === mode, "mode of the image is not read correctly")
        assert(Arrays.equals(expectedBytes, bytes20), "incorrect numeric value for flattened image")
      }
    }
  }

  // number of channels and first 20 bytes of OpenCV representation
  // - default representation for 3-channel RGB images is BGR row-wise:
  //   (B00, G00, R00,      B10, G10, R10,      ...)
  // - default representation for 4-channel RGB images is BGRA row-wise:
  //   (B00, G00, R00, A00, B10, G10, R10, A00, ...)
  private val firstBytes20 = Map(
    "grayscale.jpg" ->
      (("CV_8UC1", Array[Byte](-2, -33, -61, -60, -59, -59, -64, -59, -66, -67, -73, -73, -62,
        -57, -60, -63, -53, -49, -55, -69))),
    "chr30.4.184.jpg" -> (("CV_8UC3",
      Array[Byte](-9, -3, -1, -43, -32, -28, -75, -60, -57, -78, -59, -56, -74, -59, -57,
        -71, -58, -56, -73, -64))),
    "BGRA.png" -> (("CV_8UC4",
      Array[Byte](-128, -128, -8, -1, -128, -128, -8, -1, -128,
        -128, -8, -1, 127, 127, -9, -1, 127, 127, -9, -1))),
    "BGRA_alpha_60.png" -> (("CV_8UC4",
      Array[Byte](-128, -128, -8, 60, -128, -128, -8, 60, -128,
        -128, -8, 60, 127, 127, -9, 60, 127, 127, -9, 60)))
  )
} 
Example 9
Source File: RUtils.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.api.r

import java.io.File
import java.util.Arrays

import org.apache.spark.{SparkEnv, SparkException}

private[spark] object RUtils {
  // Local path where R binary packages built from R source code contained in the spark
  // packages specified with "--packages" or "--jars" command line option reside.
  var rPackages: Option[String] = None

  
  def isRInstalled: Boolean = {
    try {
      val builder = new ProcessBuilder(Arrays.asList("R", "--version"))
      builder.start().waitFor() == 0
    } catch {
      case e: Exception => false
    }
  }
} 
Example 10
Source File: SparkStatusTracker.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark

import java.util.Arrays

import org.apache.spark.status.AppStatusStore
import org.apache.spark.status.api.v1.StageStatus


  def getExecutorInfos: Array[SparkExecutorInfo] = {
    store.executorList(true).map { exec =>
      val (host, port) = exec.hostPort.split(":", 2) match {
        case Array(h, p) => (h, p.toInt)
        case Array(h) => (h, -1)
      }
      val cachedMem = exec.memoryMetrics.map { mem =>
        mem.usedOnHeapStorageMemory + mem.usedOffHeapStorageMemory
      }.getOrElse(0L)

      new SparkExecutorInfoImpl(
        host,
        port,
        cachedMem,
        exec.activeTasks)
    }.toArray
  }
} 
Example 11
Source File: CustomShuffledRDD.scala    From Spark-2.3.1   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler

import java.util.Arrays
import java.util.Objects

import org.apache.spark._
import org.apache.spark.rdd.RDD


class CustomShuffledRDD[K, V, C](
    var dependency: ShuffleDependency[K, V, C],
    partitionStartIndices: Array[Int])
  extends RDD[(K, C)](dependency.rdd.context, Seq(dependency)) {

  def this(dep: ShuffleDependency[K, V, C]) = {
    this(dep, (0 until dep.partitioner.numPartitions).toArray)
  }

  override def getDependencies: Seq[Dependency[_]] = List(dependency)

  override val partitioner = {
    Some(new CoalescedPartitioner(dependency.partitioner, partitionStartIndices))
  }

  override def getPartitions: Array[Partition] = {
    val n = dependency.partitioner.numPartitions
    Array.tabulate[Partition](partitionStartIndices.length) { i =>
      val startIndex = partitionStartIndices(i)
      val endIndex = if (i < partitionStartIndices.length - 1) partitionStartIndices(i + 1) else n
      new CustomShuffledRDDPartition(i, startIndex, endIndex)
    }
  }

  override def compute(p: Partition, context: TaskContext): Iterator[(K, C)] = {
    val part = p.asInstanceOf[CustomShuffledRDDPartition]
    SparkEnv.get.shuffleManager.getReader(
      dependency.shuffleHandle, part.startIndexInParent, part.endIndexInParent, context)
      .read()
      .asInstanceOf[Iterator[(K, C)]]
  }

  override def clearDependencies() {
    super.clearDependencies()
    dependency = null
  }
} 
Example 12
Source File: ConjunctionSpans.scala    From odinson   with Apache License 2.0 5 votes vote down vote up
package ai.lum.odinson.lucene.search.spans

import java.util.Arrays
import org.apache.lucene.search._
import org.apache.lucene.search.spans._


trait ConjunctionSpans extends OdinsonSpans {

  import DocIdSetIterator._

  protected var matchStart: Int = -1
  protected var matchEnd: Int = -1

  def startPosition(): Int = if (atFirstInCurrentDoc) -1 else matchStart
  def endPosition(): Int = if (atFirstInCurrentDoc) -1 else matchEnd

  // a subclass needs to implement these two methods
  def subSpans: Array[OdinsonSpans]
  def twoPhaseCurrentDocMatches(): Boolean

  // a first start position is available in current doc for nextStartPosition
  protected var atFirstInCurrentDoc: Boolean = true

  // one subspans exhausted in current doc
  protected var oneExhaustedInCurrentDoc: Boolean = false

  // use to move to next doc with all clauses
  val conjunction: DocIdSetIterator = subSpans match {
    case Array(disi) => disi
    case subSpans => ConjunctionDISI.intersectSpans(Arrays.asList(subSpans:_*))
  }

  def cost(): Long = conjunction.cost()

  def docID(): Int = conjunction.docID()

  def nextDoc(): Int = {
    if (conjunction.nextDoc() == NO_MORE_DOCS) {
      NO_MORE_DOCS
    } else {
      toMatchDoc()
    }
  }

  def advance(target: Int): Int = {
    if (conjunction.advance(target) == NO_MORE_DOCS) {
      NO_MORE_DOCS
    } else {
      toMatchDoc()
    }
  }

  def toMatchDoc(): Int = {
    @annotation.tailrec
    def getDoc(): Int = {
      if (twoPhaseCurrentDocMatches()) {
        docID()
      } else if (conjunction.nextDoc() == NO_MORE_DOCS) {
        NO_MORE_DOCS
      } else {
        getDoc()
      }
    }
    oneExhaustedInCurrentDoc = false
    getDoc()
  }

  def collect(collector: SpanCollector): Unit = {
    for (spans <- subSpans) spans.collect(collector)
  }

  override def asTwoPhaseIterator(): TwoPhaseIterator = {
    var totalMatchCost = 0f
    // Compute the matchCost as the total matchCost/positionsCost of the subSpans.
    for (s <- subSpans) {
      val tpi = s.asTwoPhaseIterator()
      if (tpi != null) {
        totalMatchCost += tpi.matchCost()
      } else {
        totalMatchCost += s.positionsCost()
      }
    }
    new TwoPhaseIterator(conjunction) {
      def matches(): Boolean = twoPhaseCurrentDocMatches()
      def matchCost(): Float = totalMatchCost
    }
  }

  def positionsCost(): Float = {
    // asTwoPhaseIterator never returns null (see above)
    throw new UnsupportedOperationException
  }

} 
Example 13
Source File: RUtils.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.api.r

import java.io.File
import java.util.Arrays

import org.apache.spark.{SparkEnv, SparkException}

private[spark] object RUtils {
  // Local path where R binary packages built from R source code contained in the spark
  // packages specified with "--packages" or "--jars" command line option reside.
  var rPackages: Option[String] = None

  
  def isRInstalled: Boolean = {
    try {
      val builder = new ProcessBuilder(Arrays.asList("R", "--version"))
      builder.start().waitFor() == 0
    } catch {
      case e: Exception => false
    }
  }
} 
Example 14
Source File: CustomShuffledRDD.scala    From BigDatalog   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler

import java.util.Arrays

import org.apache.spark._
import org.apache.spark.rdd.RDD


class CustomShuffledRDD[K, V, C](
    var dependency: ShuffleDependency[K, V, C],
    partitionStartIndices: Array[Int])
  extends RDD[(K, C)](dependency.rdd.context, Seq(dependency)) {

  def this(dep: ShuffleDependency[K, V, C]) = {
    this(dep, (0 until dep.partitioner.numPartitions).toArray)
  }

  override def getDependencies: Seq[Dependency[_]] = List(dependency)

  override val partitioner = {
    Some(new CoalescedPartitioner(dependency.partitioner, partitionStartIndices))
  }

  override def getPartitions: Array[Partition] = {
    val n = dependency.partitioner.numPartitions
    Array.tabulate[Partition](partitionStartIndices.length) { i =>
      val startIndex = partitionStartIndices(i)
      val endIndex = if (i < partitionStartIndices.length - 1) partitionStartIndices(i + 1) else n
      new CustomShuffledRDDPartition(i, startIndex, endIndex)
    }
  }

  override def compute(p: Partition, context: TaskContext): Iterator[(K, C)] = {
    val part = p.asInstanceOf[CustomShuffledRDDPartition]
    SparkEnv.get.shuffleManager.getReader(
      dependency.shuffleHandle, part.startIndexInParent, part.endIndexInParent, context)
      .read()
      .asInstanceOf[Iterator[(K, C)]]
  }

  override def clearDependencies() {
    super.clearDependencies()
    dependency = null
  }
} 
Example 15
Source File: AvlTreeData.scala    From sigmastate-interpreter   with MIT License 5 votes vote down vote up
package sigmastate

import java.util
import java.util.{Arrays, Objects}

import scorex.crypto.authds.ADDigest
import sigmastate.interpreter.CryptoConstants
import sigmastate.serialization.SigmaSerializer
import sigmastate.utils.{SigmaByteReader, SigmaByteWriter}


case class AvlTreeFlags(insertAllowed: Boolean, updateAllowed: Boolean, removeAllowed: Boolean) {
  def serializeToByte: Byte = AvlTreeFlags.serializeFlags(this)
}

object AvlTreeFlags {

  lazy val ReadOnly = AvlTreeFlags(insertAllowed = false, updateAllowed = false, removeAllowed = false)

  lazy val AllOperationsAllowed = AvlTreeFlags(insertAllowed = true, updateAllowed = true, removeAllowed = true)

  lazy val InsertOnly = AvlTreeFlags(insertAllowed = true, updateAllowed = false, removeAllowed = false)

  lazy val RemoveOnly = AvlTreeFlags(insertAllowed = false, updateAllowed = false, removeAllowed = true)

  def apply(serializedFlags: Byte): AvlTreeFlags = {
    val insertAllowed = (serializedFlags & 0x01) != 0
    val updateAllowed = (serializedFlags & 0x02) != 0
    val removeAllowed = (serializedFlags & 0x04) != 0
    AvlTreeFlags(insertAllowed, updateAllowed, removeAllowed)
  }

  def serializeFlags(avlTreeFlags: AvlTreeFlags): Byte = {
    val readOnly = 0
    val i = if(avlTreeFlags.insertAllowed) readOnly | 0x01 else readOnly
    val u = if(avlTreeFlags.updateAllowed) i | 0x02 else i
    val r = if(avlTreeFlags.removeAllowed) u | 0x04 else u
    r.toByte
  }
}



case class AvlTreeData(digest: ADDigest,
                       treeFlags: AvlTreeFlags,
                       keyLength: Int,
                       valueLengthOpt: Option[Int] = None) {
  override def equals(arg: Any): Boolean = arg match {
    case x: AvlTreeData =>
      Arrays.equals(digest, x.digest) &&
      keyLength == x.keyLength &&
      valueLengthOpt == x.valueLengthOpt &&
      treeFlags == x.treeFlags
    case _ => false
  }

  override def hashCode(): Int =
    (util.Arrays.hashCode(digest) * 31 +
        keyLength.hashCode()) * 31 + Objects.hash(valueLengthOpt, treeFlags)
}

object AvlTreeData {
  val DigestSize: Int = CryptoConstants.hashLength + 1 //please read class comments above for details
  val TreeDataSize = DigestSize + 3 + 4 + 4

  val dummy = new AvlTreeData(
    ADDigest @@ Array.fill(DigestSize)(0:Byte),
    AvlTreeFlags.AllOperationsAllowed,
    keyLength = 32)

  object serializer extends SigmaSerializer[AvlTreeData, AvlTreeData] {

    override def serialize(data: AvlTreeData, w: SigmaByteWriter): Unit = {
      val tf = AvlTreeFlags.serializeFlags(data.treeFlags)
      w.putBytes(data.digest)
        .putUByte(tf)
        .putUInt(data.keyLength)
        .putOption(data.valueLengthOpt)(_.putUInt(_))
    }

    override def parse(r: SigmaByteReader): AvlTreeData = {
      val digest = r.getBytes(DigestSize)
      val tf = AvlTreeFlags(r.getByte())
      val keyLength = r.getUInt().toInt
      val valueLengthOpt = r.getOption(r.getUInt().toInt)
      AvlTreeData(ADDigest @@ digest, tf, keyLength, valueLengthOpt)
    }
  }
} 
Example 16
Source File: CosmosAclProvider.scala    From cosmos   with Apache License 2.0 5 votes vote down vote up
package com.mesosphere.cosmos.zookeeper

import java.util.Arrays
import java.util.{List => JList}

import org.apache.curator.framework.api.ACLProvider
import org.apache.zookeeper.ZooDefs
import org.apache.zookeeper.data.ACL
import org.apache.zookeeper.data.Id


final class CosmosAclProvider private (acls: JList[ACL]) extends ACLProvider {
  def getAclForPath(path: String): JList[ACL] = acls

  def getDefaultAcl(): JList[ACL] = acls
}

object CosmosAclProvider {
  def apply(user: String, secret: String): CosmosAclProvider = {
    val userAcl = new ACL(ZooDefs.Perms.ALL, new Id("auth", s"$user:$secret"))
    val worldAcl = new ACL(ZooDefs.Perms.READ, new Id("world", "anyone"))

    new CosmosAclProvider(Arrays.asList(userAcl, worldAcl))
  }
} 
Example 17
Source File: Public.scala    From recogito2   with Apache License 2.0 5 votes vote down vote up
package services.generated


import java.util.ArrayList
import java.util.Arrays
import java.util.List

import javax.annotation.Generated

import org.jooq.Sequence
import org.jooq.Table
import org.jooq.impl.SchemaImpl

import services.generated.tables.AuthorityFile
import services.generated.tables.Document
import services.generated.tables.DocumentFilepart
import services.generated.tables.DocumentPreferences
import services.generated.tables.FeatureToggle
import services.generated.tables.Folder
import services.generated.tables.FolderAssociation
import services.generated.tables.ServiceAnnouncement
import services.generated.tables.SharingPolicy
import services.generated.tables.Similarity
import services.generated.tables.Task
import services.generated.tables.Upload
import services.generated.tables.UploadFilepart
import services.generated.tables.User
import services.generated.tables.UserRole


object Public {

	
@Generated(
	value = Array(
		"http://www.jooq.org",
		"jOOQ version:3.7.2"
	),
	comments = "This class is generated by jOOQ"
)
class Public extends SchemaImpl("public") {

	override def getSequences : List[Sequence[_]] = {
		val result = new ArrayList[Sequence[_]]
		result.addAll(getSequences0)
		result
	}

	private def getSequences0() : List[Sequence[_]] = {
		return Arrays.asList[Sequence[_]](
			Sequences.FEATURE_TOGGLE_ID_SEQ,
			Sequences.SHARING_POLICY_ID_SEQ,
			Sequences.UPLOAD_ID_SEQ,
			Sequences.USER_ROLE_ID_SEQ)
	}

	override def getTables : List[Table[_]] = {
		val result = new ArrayList[Table[_]]
		result.addAll(getTables0)
		result
	}

	private def getTables0() : List[Table[_]] = {
		return Arrays.asList[Table[_]](
			AuthorityFile.AUTHORITY_FILE,
			Document.DOCUMENT,
			DocumentFilepart.DOCUMENT_FILEPART,
			DocumentPreferences.DOCUMENT_PREFERENCES,
			FeatureToggle.FEATURE_TOGGLE,
			Folder.FOLDER,
			FolderAssociation.FOLDER_ASSOCIATION,
			ServiceAnnouncement.SERVICE_ANNOUNCEMENT,
			SharingPolicy.SHARING_POLICY,
			Similarity.SIMILARITY,
			Task.TASK,
			Upload.UPLOAD,
			UploadFilepart.UPLOAD_FILEPART,
			User.USER,
			UserRole.USER_ROLE)
	}
} 
Example 18
Source File: HasEncryption.scala    From recogito2   with Apache License 2.0 5 votes vote down vote up
package services.user

import javax.crypto.Cipher
import javax.crypto.spec.SecretKeySpec
import org.apache.commons.codec.binary.Base64
import java.security.MessageDigest
import java.util.Arrays
import controllers.HasConfig

trait HasEncryption { self: HasConfig =>

  private val CIPHER = "AES/ECB/PKCS5Padding"

  private lazy val keySpec = self.config.getOptional[String]("recogito.email.key").flatMap { key =>
    if (key.isEmpty) {
      None
    } else {
      val md = MessageDigest.getInstance("SHA-1")
      val keyDigest = md.digest(key.getBytes)
      Some(new SecretKeySpec(Arrays.copyOf(keyDigest, 16), "AES"))
    }
  }

  def encrypt(plaintext: String) = keySpec match {
    case Some(spec) => {
      val cipher = Cipher.getInstance(CIPHER)
      cipher.init(Cipher.ENCRYPT_MODE, spec)
      Base64.encodeBase64String(cipher.doFinal(plaintext.getBytes("UTF-8")))
    }

    case None => plaintext
  }

  def decrypt(encrypted: String) = keySpec match {
    case Some(spec) => {
      val cipher = Cipher.getInstance(CIPHER)
      cipher.init(Cipher.DECRYPT_MODE, spec)
      new String(cipher.doFinal(Base64.decodeBase64(encrypted)))
    }

    case None => encrypted
  }

} 
Example 19
Source File: BinaryCodec.scala    From msgpack4z-core   with MIT License 5 votes vote down vote up
package msgpack4z

import java.util.Arrays
import scalaz.\/-

final class Binary(val value: Array[Byte]) {
  override def toString: String = hexString("Binary(size = " + value.length + " value = ", " ", ")", 4)

  def hexString(start: String, sep: String, end: String, n: Int): String = {
    value.sliding(n, n).map(_.map(x => "%02x".format(x & 0xff)).mkString).mkString(start, sep, end)
  }

  def ===(that: Binary): Boolean = {
    if (this eq that)
      true
    else
      Arrays.equals(this.value, that.value)
  }

  override def equals(other: Any): Boolean =
    other match {
      case that: Binary =>
        this.===(that)
      case _ =>
        false
    }

  override def hashCode: Int = Arrays.hashCode(value)
}

trait BinaryCodec {

  
  implicit def binaryCodec: MsgpackCodec[Binary]
}

private[msgpack4z] trait BinaryCodecImpl extends BinaryCodec {
  override final val binaryCodec: MsgpackCodec[Binary] = MsgpackCodec.tryConstE(
    (packer, binary) => {
      packer.packBinary(binary.value)
    },
    unpacker => \/-(new Binary(unpacker.unpackBinary()))
  )
} 
Example 20
Source File: ObjectId.scala    From unicorn   with Apache License 2.0 5 votes vote down vote up
package unicorn.oid

import java.util.{Arrays, Date, UUID}
import java.nio.ByteBuffer
import unicorn.util._


  def string: String = {
    new String(id, utf8)
  }
}

object ObjectId {
  def apply(id: Array[Byte]) = new ObjectId(id)
  def apply(id: String) = new ObjectId(id.getBytes("UTF-8"))

  def apply(id: Int) = {
    val array = Array[Byte](4)
    val buffer = ByteBuffer.wrap(array)
    buffer.putInt(id)
    new ObjectId(array)
  }

  def apply(id: Long) = {
    val array = Array[Byte](8)
    val buffer = ByteBuffer.wrap(array)
    buffer.putLong(id)
    new ObjectId(array)
  }

  def apply(id: Date) = {
    val array = Array[Byte](8)
    val buffer = ByteBuffer.wrap(array)
    buffer.putLong(id.getTime)
    new ObjectId(array)
  }

  def apply(id: UUID) = {
    val array = Array[Byte](16)
    val buffer = ByteBuffer.wrap(array)
    buffer.putLong(id.getMostSignificantBits)
    buffer.putLong(id.getLeastSignificantBits)
    new ObjectId(array)
  }
} 
Example 21
Source File: StdBase58.scala    From Waves   with MIT License 5 votes vote down vote up
package com.wavesplatform.common.utils

import java.util.Arrays

object StdBase58 extends BaseXXEncDec {
  import java.nio.charset.StandardCharsets.US_ASCII

  private val Alphabet: Array[Byte] = Base58Alphabet.getBytes(US_ASCII)

  private val DecodeTable: Array[Byte] = Array(
    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
    -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, -1, -1, -1, -1, -1, -1, -1, 9, 10, 11, 12, 13, 14, 15, 16, -1, 17,
    18, 19, 20, 21, -1, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, -1, -1, -1, -1, -1, -1, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, -1, 44, 45,
    46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57
  )

  private def toBase58(c: Char): Byte = if (c < DecodeTable.length) DecodeTable(c) else -1

  override def defaultDecodeLimit: Int = 192 

  def encode(bytes: Array[Byte]): String = {
    val input     = Arrays.copyOf(bytes, bytes.length)
    val zeroCount = input.takeWhile(_ == 0).length

    var in                  = zeroCount
    var out                 = input.length * 2
    val output: Array[Byte] = new Array[Byte](out)
    while (in < input.length) {
      val mod = convert(input, in, 256, 58)
      if (input(in) == 0) in += 1
      out -= 1
      output(out) = Alphabet(mod)
    }

    while (out < output.length && output(out) == Alphabet(0)) out += 1
    for (i <- 0 until zeroCount) {
      out -= 1
      output(out) = Alphabet(0)
    }

    new String(output, out, output.length - out, US_ASCII)
  }

  def decode(string: String): Array[Byte] = {
    val input: Array[Byte] = new Array[Byte](string.length)

    for (i <- 0 until string.length) {
      input(i) = toBase58(string(i))
      require(input(i) != -1, s"Wrong char '${string(i)}' in Base58 string '$string'")
    }

    val zeroCount = input.takeWhile(_ == 0).length

    var in     = zeroCount
    var out    = input.length
    val output = new Array[Byte](out)
    while (in < input.length) {
      val mod = convert(input, in, 58, 256)
      if (input(in) == 0) in += 1
      out -= 1
      output(out) = mod
    }

    while (out < output.length && output(out) == 0) out += 1
    Arrays.copyOfRange(output, out - zeroCount, output.length)
  }

  private def convert(number: Array[Byte], offset: Int, from: Int, to: Int): Byte = {
    var rem = 0
    var i   = offset
    while (i < number.length) {
      val digit = number(i) & 0xff
      val tmp   = rem * from + digit
      number(i) = (tmp / to).toByte
      rem = tmp % to
      i += 1
    }
    rem.toByte
  }
} 
Example 22
Source File: FileMaterializers.scala    From scala-js-env-selenium   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package org.scalajs.jsenv.selenium

import java.io._
import java.nio.file._
import java.net._
import java.util.Arrays

private[selenium] sealed abstract class FileMaterializer {
  private val tmpSuffixRE = """[a-zA-Z0-9-_.]*$""".r

  private[this] var tmpFiles: List[Path] = Nil

  def materialize(path: Path): URL = {
    val tmp = newTmp(path.toString)
    Files.copy(path, tmp, StandardCopyOption.REPLACE_EXISTING)
    toURL(tmp)
  }

  final def materialize(name: String, content: String): URL = {
    val tmp = newTmp(name)
    Files.write(tmp, Arrays.asList(content))
    toURL(tmp)
  }

  final def close(): Unit = {
    tmpFiles.foreach(Files.delete)
    tmpFiles = Nil
  }

  private def newTmp(path: String): Path = {
    val suffix = tmpSuffixRE.findFirstIn(path).orNull
    val p = createTmp(suffix)
    tmpFiles ::= p
    p
  }

  protected def createTmp(suffix: String): Path
  protected def toURL(file: Path): URL
}

object FileMaterializer {
  import SeleniumJSEnv.Config.Materialization
  def apply(m: Materialization): FileMaterializer = m match {
    case Materialization.Temp =>
      new TempDirFileMaterializer

    case Materialization.Server(contentDir, webRoot) =>
      new ServerDirFileMaterializer(contentDir, webRoot)
  }
}


private class TempDirFileMaterializer extends FileMaterializer {
  override def materialize(path: Path): URL = {
    try {
      path.toFile.toURI.toURL
    } catch {
      case _: UnsupportedOperationException =>
        super.materialize(path)
    }
  }

  protected def createTmp(suffix: String) = Files.createTempFile(null, suffix)
  protected def toURL(file: Path): URL = file.toUri.toURL
}

private class ServerDirFileMaterializer(contentDir: Path, webRoot: URL)
    extends FileMaterializer {
  Files.createDirectories(contentDir)

  protected def createTmp(suffix: String) =
    Files.createTempFile(contentDir, null, suffix)

  protected def toURL(file: Path): URL = {
    val rel = contentDir.relativize(file)
    assert(!rel.isAbsolute)
    val nameURI = new URI(null, null, rel.toString, null)
    webRoot.toURI.resolve(nameURI).toURL
  }
} 
Example 23
Source File: SeleniumJSEnvSuite.scala    From scala-js-env-selenium   with BSD 3-Clause "New" or "Revised" License 5 votes vote down vote up
package org.scalajs.jsenv.selenium

import java.util.Arrays

import org.scalajs.jsenv.test._

import org.junit.runner.RunWith
import org.junit.runner.Runner
import org.junit.runners.Suite
import org.junit.runner.manipulation.Filter
import org.junit.runner.Description

@RunWith(classOf[SeleniumJSSuiteRunner])
class SeleniumJSSuite extends JSEnvSuite(
  JSEnvSuiteConfig(new SeleniumJSEnv(TestCapabilities.fromEnv))
)

class SeleniumJSSuiteRunner private (
    root: Class[_], base: JSEnvSuiteRunner)
    extends Suite(root, Arrays.asList[Runner](base)) {

  
  def this(suite: Class[_ <: SeleniumJSSuite]) =
    this(suite, new JSEnvSuiteRunner(suite))

  // Ignore `largeMessageTest` for chrome.
  if (TestCapabilities.nameFromEnv == "chrome") {
    base.filter(new Filter {
      def describe(): String = "Ignore largeMessageTest"

      def shouldRun(description: Description): Boolean = {
        description.getMethodName == null ||
        !description.getMethodName.startsWith("largeMessageTest")
      }
    })
  }
} 
Example 24
Source File: VectorSlicerExample.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
// scalastyle:off println
package org.apache.spark.examples.ml

// $example on$
import java.util.Arrays

import org.apache.spark.ml.attribute.{Attribute, AttributeGroup, NumericAttribute}
import org.apache.spark.ml.feature.VectorSlicer
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.sql.Row
import org.apache.spark.sql.types.StructType
// $example off$
import org.apache.spark.sql.SparkSession

object VectorSlicerExample {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder
      .appName("VectorSlicerExample")
      .getOrCreate()

    // $example on$
    val data = Arrays.asList(
      Row(Vectors.sparse(3, Seq((0, -2.0), (1, 2.3)))),
      Row(Vectors.dense(-2.0, 2.3, 0.0))
    )

    val defaultAttr = NumericAttribute.defaultAttr
    val attrs = Array("f1", "f2", "f3").map(defaultAttr.withName)
    val attrGroup = new AttributeGroup("userFeatures", attrs.asInstanceOf[Array[Attribute]])

    val dataset = spark.createDataFrame(data, StructType(Array(attrGroup.toStructField())))

    val slicer = new VectorSlicer().setInputCol("userFeatures").setOutputCol("features")

    slicer.setIndices(Array(1)).setNames(Array("f3"))
    // or slicer.setIndices(Array(1, 2)), or slicer.setNames(Array("f2", "f3"))

    val output = slicer.transform(dataset)
    output.show(false)
    // $example off$

    spark.stop()
  }
}
// scalastyle:on println 
Example 25
Source File: RUtils.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.api.r

import java.io.File
import java.util.Arrays

import org.apache.spark.{SparkEnv, SparkException}

private[spark] object RUtils {
  // Local path where R binary packages built from R source code contained in the spark
  // packages specified with "--packages" or "--jars" command line option reside.
  var rPackages: Option[String] = None

  
  def isRInstalled: Boolean = {
    try {
      val builder = new ProcessBuilder(Arrays.asList("R", "--version"))
      builder.start().waitFor() == 0
    } catch {
      case e: Exception => false
    }
  }
} 
Example 26
Source File: CustomShuffledRDD.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler

import java.util.Arrays
import java.util.Objects

import org.apache.spark._
import org.apache.spark.rdd.RDD


class CustomShuffledRDD[K, V, C](
    var dependency: ShuffleDependency[K, V, C],
    partitionStartIndices: Array[Int])
  extends RDD[(K, C)](dependency.rdd.context, Seq(dependency)) {

  def this(dep: ShuffleDependency[K, V, C]) = {
    this(dep, (0 until dep.partitioner.numPartitions).toArray)
  }

  override def getDependencies: Seq[Dependency[_]] = List(dependency)

  override val partitioner = {
    Some(new CoalescedPartitioner(dependency.partitioner, partitionStartIndices))
  }

  override def getPartitions: Array[Partition] = {
    val n = dependency.partitioner.numPartitions
    Array.tabulate[Partition](partitionStartIndices.length) { i =>
      val startIndex = partitionStartIndices(i)
      val endIndex = if (i < partitionStartIndices.length - 1) partitionStartIndices(i + 1) else n
      new CustomShuffledRDDPartition(i, startIndex, endIndex)
    }
  }

  override def compute(p: Partition, context: TaskContext): Iterator[(K, C)] = {
    val part = p.asInstanceOf[CustomShuffledRDDPartition]
    SparkEnv.get.shuffleManager.getReader(
      dependency.shuffleHandle, part.startIndexInParent, part.endIndexInParent, context)
      .read()
      .asInstanceOf[Iterator[(K, C)]]
  }

  override def clearDependencies() {
    super.clearDependencies()
    dependency = null
  }
} 
Example 27
Source File: DiskStoreSuite.scala    From drizzle-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.storage

import java.nio.{ByteBuffer, MappedByteBuffer}
import java.util.Arrays

import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.util.io.ChunkedByteBuffer
import org.apache.spark.util.Utils

class DiskStoreSuite extends SparkFunSuite {

  test("reads of memory-mapped and non memory-mapped files are equivalent") {
    // It will cause error when we tried to re-open the filestore and the
    // memory-mapped byte buffer tot he file has not been GC on Windows.
    assume(!Utils.isWindows)
    val confKey = "spark.storage.memoryMapThreshold"

    // Create a non-trivial (not all zeros) byte array
    val bytes = Array.tabulate[Byte](1000)(_.toByte)
    val byteBuffer = new ChunkedByteBuffer(ByteBuffer.wrap(bytes))

    val blockId = BlockId("rdd_1_2")
    val diskBlockManager = new DiskBlockManager(new SparkConf(), deleteFilesOnStop = true)

    val diskStoreMapped = new DiskStore(new SparkConf().set(confKey, "0"), diskBlockManager)
    diskStoreMapped.putBytes(blockId, byteBuffer)
    val mapped = diskStoreMapped.getBytes(blockId)
    assert(diskStoreMapped.remove(blockId))

    val diskStoreNotMapped = new DiskStore(new SparkConf().set(confKey, "1m"), diskBlockManager)
    diskStoreNotMapped.putBytes(blockId, byteBuffer)
    val notMapped = diskStoreNotMapped.getBytes(blockId)

    // Not possible to do isInstanceOf due to visibility of HeapByteBuffer
    assert(notMapped.getChunks().forall(_.getClass.getName.endsWith("HeapByteBuffer")),
      "Expected HeapByteBuffer for un-mapped read")
    assert(mapped.getChunks().forall(_.isInstanceOf[MappedByteBuffer]),
      "Expected MappedByteBuffer for mapped read")

    def arrayFromByteBuffer(in: ByteBuffer): Array[Byte] = {
      val array = new Array[Byte](in.remaining())
      in.get(array)
      array
    }

    assert(Arrays.equals(mapped.toArray, bytes))
    assert(Arrays.equals(notMapped.toArray, bytes))
  }
} 
Example 28
Source File: GeneralAggregateWithShardingTest.scala    From akka-tools   with MIT License 5 votes vote down vote up
package no.nextgentel.oss.akkatools.aggregate

import java.util.{Arrays, UUID}

import akka.actor.ActorSystem
import akka.actor.Status.Failure
import akka.testkit.{TestKit, TestProbe}
import com.typesafe.config.ConfigFactory
import no.nextgentel.oss.akkatools.aggregate.testAggregate.StateName._
import no.nextgentel.oss.akkatools.aggregate.testAggregate.{StateName, _}
import no.nextgentel.oss.akkatools.testing.AggregateTesting
import org.scalatest._
import org.slf4j.LoggerFactory

import scala.util.Random

object GeneralAggregateWithShardingTest {
  val port = 20000 + Random.nextInt(20000)
}


class GeneralAggregateWithShardingTest(_system:ActorSystem) extends TestKit(_system) with FunSuiteLike with Matchers with BeforeAndAfterAll with BeforeAndAfter {

  def this() = this(ActorSystem("test-actor-system", ConfigFactory.parseString(
    s"""akka.actor.provider = "akka.cluster.ClusterActorRefProvider"
        |akka.remote.enabled-transports = ["akka.remote.netty.tcp"]
        |akka.remote.netty.tcp.hostname="localhost"
        |akka.remote.netty.tcp.port=${GeneralAggregateWithShardingTest.port}
        |akka.cluster.seed-nodes = ["akka.tcp://test-actor-system@localhost:${GeneralAggregateWithShardingTest.port}"]
    """.stripMargin
  ).withFallback(ConfigFactory.load("application-test.conf"))))

  override def afterAll {
    TestKit.shutdownActorSystem(system)
  }

  val log = LoggerFactory.getLogger(getClass)
  private def generateId() = UUID.randomUUID().toString

  val seatIds = List("s1","id-used-in-Failed-in-onAfterValidationSuccess", "s2", "s3-This-id-is-going-to-be-discarded", "s4")

  trait TestEnv extends AggregateTesting[BookingState] {
    val id = generateId()
    val printShop = TestProbe()
    val cinema = TestProbe()
    val onSuccessDmForwardReceiver = TestProbe()

    val starter = new AggregateStarterSimple("booking", system).withAggregatePropsCreator {
      dmSelf =>
        BookingAggregate.props(dmSelf, dmForwardAndConfirm(printShop.ref).path, dmForwardAndConfirm(cinema.ref).path, seatIds, dmForwardAndConfirm(onSuccessDmForwardReceiver.ref).path)
    }

    val main = starter.dispatcher
    starter.start()

    def assertState(correctState:BookingState): Unit = {
      assert(getState(id) == correctState)
    }

  }




  test("normal flow") {

    new TestEnv {

      // Make sure we start with empty state
      assertState(BookingState.empty())

      val maxSeats = 2
      val sender = TestProbe()
      // Open the booking
      println("1")
      sendDMBlocking(main, OpenBookingCmd(id, maxSeats), sender.ref)
      println("2")
      assertState(BookingState(OPEN, maxSeats, Set()))

    }
  }
} 
Example 29
Source File: HexUtils.scala    From ledger-manager-chrome   with MIT License 5 votes vote down vote up
package co.ledger.wallet.core.utils

import java.util.{Arrays}

trait HexUtils {

  val LowerCaseHexDigits = "0123456789abcdef".toCharArray

  private val hexArray = "0123456789ABCDEF".toCharArray
  private val AID_PREFIX = "A00000061700"
  private val AID_SUFFIX = "0101"
  private val SELECT_HEADER = "00A40400"

  def encodeHex(bytes: Array[Byte]): String = {
    val hexChars = Array.ofDim[Char](bytes.length * 2)
    for (i <- 0 until bytes.length) {
      val v = bytes(i) & 0xFF
      hexChars(i * 2) = hexArray(v >>> 4)
      hexChars(i * 2 + 1) = hexArray(v & 0x0F)
    }
    new String(hexChars)
  }

  def decodeHex(hexString: String): Array[Byte] = {
    if ((hexString.length & 0x01) != 0) {
      throw new IllegalArgumentException("Odd number of characters.")
    }
    val hexChars = hexString.toUpperCase().toCharArray()
    val result = Array.ofDim[Byte](hexChars.length / 2)
    var i = 0
    while (i < hexChars.length) {
      result(i / 2) = (Arrays.binarySearch(hexArray, hexChars(i)) * 16 + Arrays.binarySearch(hexArray,
        hexChars(i + 1))).toByte
      i += 2
    }
    result
  }

  def stringToHex(s: String): Int ={
    Integer.decode(s)
  }

  def bytesToHex(bytes: Array[Byte]): String = {
    val hexChars = Array.ofDim[Char](bytes.length * 2)
    for (j <- 0 until bytes.length) {
      val v = bytes(j) & 0xFF
      hexChars(j * 2) = hexArray(v >>> 4)
      hexChars(j * 2 + 1) = hexArray(v & 0x0F)
    }
    new String(hexChars)
  }

  implicit class HexString(val str: String) {

    def decodeHex(): Array[Byte] = HexUtils.decodeHex(str)

  }

}

object HexUtils extends HexUtils 
Example 30
Source File: L6-18Cassandra.scala    From prosparkstreaming   with Apache License 2.0 5 votes vote down vote up
package org.apress.prospark

import java.nio.charset.StandardCharsets
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.streaming.Seconds
import org.apache.spark.streaming.StreamingContext
import org.json4s.DefaultFormats
import org.json4s.JField
import org.json4s.JsonAST.JObject
import org.json4s.jvalue2extractable
import org.json4s.jvalue2monadic
import org.json4s.native.JsonMethods.parse
import org.json4s.string2JsonInput
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.io.Text
import java.nio.ByteBuffer
import org.apache.cassandra.hadoop.ColumnFamilyOutputFormat
import org.apache.cassandra.hadoop.ConfigHelper
import org.apache.cassandra.thrift.ColumnOrSuperColumn
import org.apache.cassandra.thrift.Column
import org.apache.cassandra.utils.ByteBufferUtil
import org.apache.cassandra.thrift.Mutation
import java.util.Arrays

object CassandraSinkApp {

  def main(args: Array[String]) {
    if (args.length != 6) {
      System.err.println(
        "Usage: CassandraSinkApp <appname> <cassandraHost> <cassandraPort> <keyspace> <columnFamilyName> <columnName>")
      System.exit(1)
    }

    val Seq(appName, cassandraHost, cassandraPort, keyspace, columnFamilyName, columnName) = args.toSeq

    val conf = new SparkConf()
      .setAppName(appName)
      .setJars(SparkContext.jarOfClass(this.getClass).toSeq)

    val batchInterval = 10
    val windowSize = 20
    val slideInterval = 10

    val ssc = new StreamingContext(conf, Seconds(batchInterval))

    HttpUtils.createStream(ssc, url = "https://query.yahooapis.com/v1/public/yql?q=select%20*%20from%20yahoo.finance.quotes%20where%20symbol%20in%20(%22IBM,GOOG,MSFT,AAPL,FB,ORCL,YHOO,TWTR,LNKD,INTC%22)%0A%09%09&format=json&diagnostics=true&env=http%3A%2F%2Fdatatables.org%2Falltables.env",
      interval = batchInterval)
      .flatMap(rec => {
        implicit val formats = DefaultFormats
        val query = parse(rec) \ "query"
        ((query \ "results" \ "quote").children)
          .map(rec => ((rec \ "symbol").extract[String], (rec \ "LastTradePriceOnly").extract[String].toFloat))
      })
      .reduceByKeyAndWindow((x: Float, y: Float) => (x + y), Seconds(windowSize), Seconds(slideInterval))
      .foreachRDD(rdd => {
        val jobConf = new Configuration()
        ConfigHelper.setOutputRpcPort(jobConf, cassandraPort)
        ConfigHelper.setOutputInitialAddress(jobConf, cassandraHost)
        ConfigHelper.setOutputColumnFamily(jobConf, keyspace, columnFamilyName)
        ConfigHelper.setOutputPartitioner(jobConf, "Murmur3Partitioner")
        rdd.map(rec => {
          val c = new Column()
          c.setName(ByteBufferUtil.bytes(columnName))
          c.setValue(ByteBufferUtil.bytes(rec._2 / (windowSize / batchInterval)))
          c.setTimestamp(System.currentTimeMillis)
          val m = new Mutation()
          m.setColumn_or_supercolumn(new ColumnOrSuperColumn())
          m.column_or_supercolumn.setColumn(c)
          (ByteBufferUtil.bytes(rec._1), Arrays.asList(m))
        }).saveAsNewAPIHadoopFile(keyspace, classOf[ByteBuffer], classOf[List[Mutation]], classOf[ColumnFamilyOutputFormat], jobConf)
      })

    ssc.start()
    ssc.awaitTermination()
  }
} 
Example 31
Source File: Reprocessor.scala    From nn_coref   with GNU General Public License v3.0 5 votes vote down vote up
package edu.berkeley.nlp.coref.preprocess

import edu.berkeley.nlp.PCFGLA.CoarseToFineMaxRuleParser
import edu.berkeley.nlp.coref.ConllDoc
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
import java.io.PrintWriter
import edu.berkeley.nlp.coref.ConllDocReader
import edu.berkeley.nlp.syntax.Tree
import edu.berkeley.nlp.futile.util.Logger
import java.util.Arrays
import edu.berkeley.nlp.futile.fig.basic.IOUtils
import edu.berkeley.nlp.coref.Chunk
import edu.berkeley.nlp.coref.ConllDocWriter

object Reprocessor {

  def redoConllDocument(parser: CoarseToFineMaxRuleParser, backoffParser: CoarseToFineMaxRuleParser, nerSystem: NerSystem, docReader: ConllDocReader, inputPath: String, outputPath: String) {
    val writer = IOUtils.openOutHard(outputPath);
    val docs = docReader.readConllDocs(inputPath);
    for (doc <- docs) {
      Logger.logss("Reprocessing: " + doc.docID + " part " + doc.docPartNo);
      val newPos = new ArrayBuffer[Seq[String]]();
      val newParses = new ArrayBuffer[edu.berkeley.nlp.futile.syntax.Tree[String]]();
      val newNerChunks = new ArrayBuffer[Seq[Chunk[String]]]();
      for (sentIdx <- 0 until doc.words.size) {
        if (sentIdx % 10 == 0) {
          Logger.logss("Sentence " + sentIdx);
        }
        val sent = doc.words(sentIdx);
        var parse = PreprocessingDriver.parse(parser, backoffParser, sent.asJava);
        parse = if (parse.getYield().size() != sent.length) {
          Logger.logss("Couldn't parse sentence: " + sent.toSeq);
          Logger.logss("Using default parse");
          convertFromFutileTree(doc.trees(sentIdx).constTree);
        } else {
          parse;
        }
        val posTags = parse.getPreTerminalYield().asScala.toArray;
        newPos += posTags;
        newParses += convertToFutileTree(parse);
        val nerBioLabels = nerSystem.runNerSystem(sent.toArray, posTags);
        newNerChunks += convertBioToChunks(nerBioLabels);
      }
      ConllDocWriter.writeIncompleteConllDoc(writer, doc.docID, doc.docPartNo, doc.words, newPos, newParses, doc.speakers, newNerChunks, doc.corefChunks);
    }
    writer.close();
  }
  
  def convertBioToChunks(nerBioLabels: Seq[String]): Seq[Chunk[String]] = {
    var lastNerStart = -1;
    val chunks = new ArrayBuffer[Chunk[String]]();
    for (i <- 0 until nerBioLabels.size) {
      if (nerBioLabels(i).startsWith("B")) {
        if (lastNerStart != -1) {
          chunks += new Chunk[String](lastNerStart, i, "MISC");
        }
        lastNerStart = i;
      } else if (nerBioLabels(i).startsWith("O")) {
        if (lastNerStart != -1) {
          chunks += new Chunk[String](lastNerStart, i, "MISC");
          lastNerStart = -1;
        }
      }
    }
    chunks;
  }
  
  def convertToFutileTree(slavTree: edu.berkeley.nlp.syntax.Tree[String]): edu.berkeley.nlp.futile.syntax.Tree[String] = {
    new edu.berkeley.nlp.futile.syntax.Tree[String](slavTree.getLabel(), slavTree.getChildren().asScala.map(convertToFutileTree(_)).asJava);
  }
  
  def convertFromFutileTree(myTree: edu.berkeley.nlp.futile.syntax.Tree[String]): edu.berkeley.nlp.syntax.Tree[String] = {
    new edu.berkeley.nlp.syntax.Tree[String](myTree.getLabel(), myTree.getChildren().asScala.map(convertFromFutileTree(_)).asJava);
  }
} 
Example 32
Source File: LListFormatSpec.scala    From sjson-new   with Apache License 2.0 5 votes vote down vote up
package sjsonnew
package support.spray

import org.specs2.mutable._
import java.util.Arrays
import spray.json.{ JsArray, JsNumber, JsString, JsObject }

class LListFormatsSpec extends Specification with BasicJsonProtocol {

  "The llistFormat" should {
    val empty = LNil
    val emptyObject = JsObject()
    val list = ("Z", 2) :*: ("a", 1) :*: LNil
    val obj = JsObject("$fields" -> JsArray(JsString("Z"), JsString("a")), "Z" -> JsNumber(2), "a" -> JsNumber(1))
    val nested = ("b", list) :*: LNil
    val nestedObj = JsObject("$fields" -> JsArray(JsString("b")), "b" -> obj)
    "convert an empty list to JObject" in {
      Converter.toJsonUnsafe(empty) mustEqual emptyObject
    }
    "convert a list to JObject" in {
      Converter.toJsonUnsafe(list) mustEqual obj
    }
    "convert a nested list to JObject" in {
      Converter.toJsonUnsafe(nested) mustEqual nestedObj
    }
    "convert a JObject to list" in {
      Converter.fromJsonUnsafe[Int :*: Int :*: LNil](obj) mustEqual list
    }
    "convert a nested JObject to list" in {
      Converter.fromJsonUnsafe[(Int :*: Int :*: LNil) :*: LNil](nestedObj) mustEqual nested
    }

    val obj2 = JsObject("$fields" -> JsArray(JsString("f")), "f" -> JsString("foo"))
    val nested2Obj = JsObject("$fields" -> JsArray(JsString("b"), JsString("c")), "b" -> obj, "c" -> obj2)

    val list2 = ("f", "foo") :*: LNil
    val nested2 = ("b", list) :*: ("c", list2) :*: LNil

    "convert a 2 nested JObjects to list" in {
      Converter.fromJsonUnsafe[(Int :*: Int :*: LNil) :*: (String :*: LNil) :*: LNil](nested2Obj) mustEqual nested2
    }
  }
} 
Example 33
Source File: BuilderSpec.scala    From sjson-new   with Apache License 2.0 5 votes vote down vote up
package sjsonnew
package support.spray

import org.specs2.mutable._
import java.util.Arrays
import spray.json.{ JsArray, JsNumber, JsString, JsObject }
import LList._

class BuilderSpec extends Specification with BasicJsonProtocol {
  case class Person(name: String, value: Int)
  implicit object PersonFormat extends JsonFormat[Person] {
    def write[J](x: Person, builder: Builder[J]): Unit = {
      builder.beginObject()
      builder.addField("name", x.name)
      builder.addField("value", x.value)
      builder.endObject()
    }
    def read[J](jsOpt: Option[J], unbuilder: Unbuilder[J]): Person =
      jsOpt match {
        case Some(js) =>
          unbuilder.beginObject(js)
          val name = unbuilder.readField[String]("name")
          val value = unbuilder.readField[Int]("value")
          unbuilder.endObject()
          Person(name, value)
        case None =>
          deserializationError("Expected JsObject but found None")
      }
  }

  "Custom format using builder" should {
    val p1 = Person("Alice", 1)
    val personJs = JsObject("name" -> JsString("Alice"), "value" -> JsNumber(1))
    "convert from value to JObject" in {
      Converter.toJsonUnsafe(p1) mustEqual personJs
    }
    "convert from JObject to the same value" in {
      Converter.fromJsonUnsafe[Person](personJs) mustEqual p1
    }
  }
} 
Example 34
Source File: UnionFormatSpec.scala    From sjson-new   with Apache License 2.0 5 votes vote down vote up
package sjsonnew
package support.spray

import org.specs2.mutable._
import java.util.Arrays
import spray.json.{ JsArray, JsNumber, JsString, JsObject }
import LList._

class UnionFormatsSpec extends Specification with BasicJsonProtocol {
  sealed trait Fruit
  case class Apple() extends Fruit
  sealed trait Citrus extends Fruit
  case class Orange() extends Citrus
  implicit object AppleJsonFormat extends JsonFormat[Apple] {
    def write[J](x: Apple, builder: Builder[J]): Unit =
      {
        builder.beginObject()
        builder.addField("x", 0)
        builder.endObject()
      }
    def read[J](jsOpt: Option[J], unbuilder: Unbuilder[J]): Apple =
      jsOpt match {
        case Some(js) =>
          val result = unbuilder.beginObject(js) match {
            case 1 =>
              val x = unbuilder.readField[Int]("x")
              if (x == 0) Apple()
              else deserializationError(s"Unexpected value: $x")
            case x => deserializationError(s"Unexpected number of fields: $x")
          }
          unbuilder.endObject()
          result
        case None => deserializationError("Expected JsNumber but found None")
      }
  }
  implicit object OrangeJsonFormat extends JsonFormat[Orange] {
    def write[J](x: Orange, builder: Builder[J]): Unit =
      {
        builder.beginObject()
        builder.addField("x", 1)
        builder.endObject()
      }
    def read[J](jsOpt: Option[J], unbuilder: Unbuilder[J]): Orange =
      jsOpt match {
        case Some(js) =>
          val result = unbuilder.beginObject(js) match {
            case 1 =>
              val x = unbuilder.readField[Int]("x")
              if (x == 1) Orange()
              else deserializationError(s"Unexpected value: $x")
            case x => deserializationError(s"Unexpected number of fields: $x")
          }
          unbuilder.endObject()
          result
        case None => deserializationError("Expected JsNumber but found None")
      }
  }
  val fruit: Fruit = Apple()
  "The unionFormat" should {
    implicit val FruitFormat: JsonFormat[Fruit] = unionFormat2[Fruit, Apple, Orange]
    val fruitJson = JsObject("value" ->  JsObject("x" -> JsNumber(0)), "type" -> JsString("Apple"))
    "convert a value of ADT to JObject" in {
      Converter.toJsonUnsafe(fruit) mustEqual fruitJson
    }
    "convert JObject back to ADT" in {
      Converter.fromJsonUnsafe[Fruit](fruitJson) mustEqual fruit
    }
  }

  "The flatUnionFormat" should {
    implicit val FruitFormat: JsonFormat[Fruit] = flatUnionFormat2[Fruit, Apple, Orange]("type")
    val fruitJson2 = JsObject("type" -> JsString("Apple"), "x" -> JsNumber(0))
    "convert a value of ADT to JObject" in {
      Converter.toJsonUnsafe(fruit) mustEqual fruitJson2
    }
    "convert JObject back to ADT" in {
      // println(Converter.fromJsonUnsafe[Fruit](fruitJson2))
      Converter.fromJsonUnsafe[Fruit](fruitJson2) mustEqual fruit
    }
  }
} 
Example 35
Source File: VectorSlicerExample.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
// scalastyle:off println
package org.apache.spark.examples.ml

// $example on$
import java.util.Arrays

import org.apache.spark.ml.attribute.{Attribute, AttributeGroup, NumericAttribute}
import org.apache.spark.ml.feature.VectorSlicer
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.sql.Row
import org.apache.spark.sql.types.StructType
// $example off$
import org.apache.spark.sql.SparkSession

object VectorSlicerExample {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder
      .appName("VectorSlicerExample")
      .getOrCreate()

    // $example on$
    val data = Arrays.asList(
      Row(Vectors.sparse(3, Seq((0, -2.0), (1, 2.3)))),
      Row(Vectors.dense(-2.0, 2.3, 0.0))
    )

    val defaultAttr = NumericAttribute.defaultAttr
    val attrs = Array("f1", "f2", "f3").map(defaultAttr.withName)
    val attrGroup = new AttributeGroup("userFeatures", attrs.asInstanceOf[Array[Attribute]])

    val dataset = spark.createDataFrame(data, StructType(Array(attrGroup.toStructField())))

    val slicer = new VectorSlicer().setInputCol("userFeatures").setOutputCol("features")

    slicer.setIndices(Array(1)).setNames(Array("f3"))
    // or slicer.setIndices(Array(1, 2)), or slicer.setNames(Array("f2", "f3"))

    val output = slicer.transform(dataset)
    output.show(false)
    // $example off$

    spark.stop()
  }
}
// scalastyle:on println 
Example 36
Source File: HexUtils.scala    From OUTDATED_ledger-wallet-android   with MIT License 5 votes vote down vote up
package co.ledger.wallet.core.utils

import java.util.{Arrays, Locale}

trait HexUtils {

  private val hexArray = "0123456789ABCDEF".toCharArray
  private val AID_PREFIX = "A00000061700"
  private val AID_SUFFIX = "0101"
  private val SELECT_HEADER = "00A40400"

  def encodeHex(bytes: Array[Byte]): String = {
    val hexChars = Array.ofDim[Char](bytes.length * 2)
    for (i <- 0 until bytes.length) {
      val v = bytes(i) & 0xFF
      hexChars(i * 2) = hexArray(v >>> 4)
      hexChars(i * 2 + 1) = hexArray(v & 0x0F)
    }
    new String(hexChars)
  }

  def decodeHex(hexString: String): Array[Byte] = {
    if ((hexString.length & 0x01) != 0) {
      throw new IllegalArgumentException("Odd number of characters.")
    }
    val hexChars = hexString.toUpperCase(Locale.ROOT).toCharArray()
    val result = Array.ofDim[Byte](hexChars.length / 2)
    var i = 0
    while (i < hexChars.length) {
      result(i / 2) = (Arrays.binarySearch(hexArray, hexChars(i)) * 16 + Arrays.binarySearch(hexArray,
        hexChars(i + 1))).toByte
      i += 2
    }
    result
  }

  def stringToHex(s: String): Int ={
    Integer.decode(s)
  }

  def bytesToHex(bytes: Array[Byte]): String = {
    val hexChars = Array.ofDim[Char](bytes.length * 2)
    for (j <- 0 until bytes.length) {
      val v = bytes(j) & 0xFF
      hexChars(j * 2) = hexArray(v >>> 4)
      hexChars(j * 2 + 1) = hexArray(v & 0x0F)
    }
    new String(hexChars)
  }

  implicit class HexString(val str: String) {

    def decodeHex(): Array[Byte] = HexUtils.decodeHex(str)

  }

}

object HexUtils extends HexUtils 
Example 37
Source File: CustomShuffledRDD.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler

import java.util.Arrays
import java.util.Objects

import org.apache.spark._
import org.apache.spark.rdd.RDD


class CustomShuffledRDD[K, V, C](
    var dependency: ShuffleDependency[K, V, C],
    partitionStartIndices: Array[Int])
  extends RDD[(K, C)](dependency.rdd.context, Seq(dependency)) {

  def this(dep: ShuffleDependency[K, V, C]) = {
    this(dep, (0 until dep.partitioner.numPartitions).toArray)
  }

  override def getDependencies: Seq[Dependency[_]] = List(dependency)

  override val partitioner = {
    Some(new CoalescedPartitioner(dependency.partitioner, partitionStartIndices))
  }

  override def getPartitions: Array[Partition] = {
    val n = dependency.partitioner.numPartitions
    Array.tabulate[Partition](partitionStartIndices.length) { i =>
      val startIndex = partitionStartIndices(i)
      val endIndex = if (i < partitionStartIndices.length - 1) partitionStartIndices(i + 1) else n
      new CustomShuffledRDDPartition(i, startIndex, endIndex)
    }
  }

  override def compute(p: Partition, context: TaskContext): Iterator[(K, C)] = {
    val part = p.asInstanceOf[CustomShuffledRDDPartition]
    SparkEnv.get.shuffleManager.getReader(
      dependency.shuffleHandle, part.startIndexInParent, part.endIndexInParent, context)
      .read()
      .asInstanceOf[Iterator[(K, C)]]
  }

  override def clearDependencies() {
    super.clearDependencies()
    dependency = null
  }
} 
Example 38
Source File: DiskStoreSuite.scala    From sparkoscope   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.storage

import java.nio.{ByteBuffer, MappedByteBuffer}
import java.util.Arrays

import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.util.io.ChunkedByteBuffer
import org.apache.spark.util.Utils

class DiskStoreSuite extends SparkFunSuite {

  test("reads of memory-mapped and non memory-mapped files are equivalent") {
    // It will cause error when we tried to re-open the filestore and the
    // memory-mapped byte buffer tot he file has not been GC on Windows.
    assume(!Utils.isWindows)
    val confKey = "spark.storage.memoryMapThreshold"

    // Create a non-trivial (not all zeros) byte array
    val bytes = Array.tabulate[Byte](1000)(_.toByte)
    val byteBuffer = new ChunkedByteBuffer(ByteBuffer.wrap(bytes))

    val blockId = BlockId("rdd_1_2")
    val diskBlockManager = new DiskBlockManager(new SparkConf(), deleteFilesOnStop = true)

    val diskStoreMapped = new DiskStore(new SparkConf().set(confKey, "0"), diskBlockManager)
    diskStoreMapped.putBytes(blockId, byteBuffer)
    val mapped = diskStoreMapped.getBytes(blockId)
    assert(diskStoreMapped.remove(blockId))

    val diskStoreNotMapped = new DiskStore(new SparkConf().set(confKey, "1m"), diskBlockManager)
    diskStoreNotMapped.putBytes(blockId, byteBuffer)
    val notMapped = diskStoreNotMapped.getBytes(blockId)

    // Not possible to do isInstanceOf due to visibility of HeapByteBuffer
    assert(notMapped.getChunks().forall(_.getClass.getName.endsWith("HeapByteBuffer")),
      "Expected HeapByteBuffer for un-mapped read")
    assert(mapped.getChunks().forall(_.isInstanceOf[MappedByteBuffer]),
      "Expected MappedByteBuffer for mapped read")

    def arrayFromByteBuffer(in: ByteBuffer): Array[Byte] = {
      val array = new Array[Byte](in.remaining())
      in.get(array)
      array
    }

    assert(Arrays.equals(mapped.toArray, bytes))
    assert(Arrays.equals(notMapped.toArray, bytes))
  }
} 
Example 39
Source File: Runner.scala    From avrohugger   with Apache License 2.0 5 votes vote down vote up
package avrohugger
package tool

import format.abstractions.SourceFormat
import format.{Scavro, SpecificRecord, Standard}
import java.util.Arrays
import java.util.Map
import java.util.TreeMap
import java.io.{InputStream, PrintStream}

import org.apache.avro.tool.Tool

import scala.util.{Failure, Success, Try}
import scala.collection.JavaConverters._



  def run(args: Array[String]): Int = {
    if (args.length != 0) {
      val tool: Tool = toolsMap.get(args(0))
      if (tool != null) {
        val result = Try {
          tool.run(
            in, out, err, Arrays.asList(args: _*).subList(1, args.length))
        }
        result match {
          case Success(0) => 0
          case Success(exitCode) =>
            err.println("Tool " + args(0) + " failed with exit code " + exitCode)
            exitCode
          case Failure(e) =>
            err.println("Tool " + args(0) + " failed: " + e.toString)
            1
        }
      } else {
        err.println("Unknown tool: " + args(0))
        1
      }
    } else {
      err.println("----------------")

      err.println("Available tools:")
      for (k <- toolsMap.asScala.values) {
        err.printf("%" + maxLen + "s  %s\n", k.getName(), k.getShortDescription())
      }

      1
    }
  }
} 
Example 40
Source File: PrimitivesSpec.scala    From play-soap   with Apache License 2.0 5 votes vote down vote up
package play.soap.sbtplugin.tester

import java.util.Arrays

import play.soap.testservice.PrimitivesImpl
import play.soap.testservice.client._

import scala.collection.JavaConverters._
import scala.reflect.ClassTag



class PrimitivesSpec extends ServiceSpec {

  sequential
  "Primitives" should {

    "handle boolean ops" in withClient { client =>
      await(client.booleanOp(true)) must_== true
    }

    "handle boolean sequences" in withClient { client =>
      await(client.booleanSequence(Arrays.asList(true, true))).asScala must_== List(true, true, true)
    }
    "handle byte ops" in withClient { client =>
      await(client.byteOp(1.toByte)) must_== 1.toByte
    }

    "handle byte sequences" in withClient { client =>
      await(client.byteSequence(Arrays.asList(1.toByte, 1.toByte))).asScala must_== List(1.toByte, 1.toByte, 1.toByte)
    }
    "handle double ops" in withClient { client =>
      await(client.doubleOp(1.0d)) must_== 1.0d
    }

    "handle double sequences" in withClient { client =>
      await(client.doubleSequence(Arrays.asList(1.0d, 1.0d))).asScala must_== List(1.0d, 1.0d, 1.0d)
    }
    "handle float ops" in withClient { client =>
      await(client.floatOp(1.0f)) must_== 1.0f
    }

    "handle float sequences" in withClient { client =>
      await(client.floatSequence(Arrays.asList(1.0f, 1.0f))).asScala must_== List(1.0f, 1.0f, 1.0f)
    }
    "handle int ops" in withClient { client =>
      await(client.intOp(1)) must_== 1
    }

    "handle int sequences" in withClient { client =>
      await(client.intSequence(Arrays.asList(1, 1))).asScala must_== List(1, 1, 1)
    }
    "handle long ops" in withClient { client =>
      await(client.longOp(1L)) must_== 1L
    }

    "handle long sequences" in withClient { client =>
      await(client.longSequence(Arrays.asList(1L, 1L))).asScala must_== List(1L, 1L, 1L)
    }
    "handle short ops" in withClient { client =>
      await(client.shortOp(1.toShort)) must_== 1.toShort
    }

    "handle short sequences" in withClient { client =>
      await(client.shortSequence(Arrays.asList(1.toShort, 1.toShort))).asScala must_== List(1.toShort, 1.toShort, 1.toShort)
    }
  }

  override type ServiceClient = PrimitivesService

  override type Service = Primitives

  override implicit val serviceClientClass: ClassTag[PrimitivesService] = ClassTag(classOf[PrimitivesService])

  override def getServiceFromClient(c: ServiceClient): Service = c.primitives

  override def createServiceImpl(): Any = new PrimitivesImpl

  val servicePath: String = "primitives"

} 
Example 41
Source File: SemanticParserState.scala    From pnp   with Apache License 2.0 5 votes vote down vote up
package org.allenai.pnp.semparse

import java.util.Arrays

import com.google.common.base.Preconditions
import com.jayantkrish.jklol.ccg.lambda.Type
import com.jayantkrish.jklol.ccg.lambda2.Expression2
import edu.cmu.dynet.Expression


  def start(): SemanticParserState = {
    SemanticParserState(Map.empty, List(), 1, 0, null, List(), List())
  }
}

case class ExpressionPart(val expr: Expression2,
    val holes: Array[Int], val holeIds: Array[Int]) {
  Preconditions.checkArgument(holes.length == holeIds.length)
  
  override def toString: String = {
    "ExpressionPart(" + expr + ", " + Arrays.toString(holes) + ", " + Arrays.toString(holeIds) 
  }
}

case class Hole(id: Int, t: Type, scope: Scope, repeated: Boolean) 
Example 42
Source File: VectorSlicerExample.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
// scalastyle:off println
package org.apache.spark.examples.ml

// $example on$
import java.util.Arrays

import org.apache.spark.ml.attribute.{Attribute, AttributeGroup, NumericAttribute}
import org.apache.spark.ml.feature.VectorSlicer
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.sql.Row
import org.apache.spark.sql.types.StructType
// $example off$
import org.apache.spark.sql.SparkSession

object VectorSlicerExample {
  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder
      .appName("VectorSlicerExample")
      .getOrCreate()

    // $example on$
    val data = Arrays.asList(
      Row(Vectors.sparse(3, Seq((0, -2.0), (1, 2.3)))),
      Row(Vectors.dense(-2.0, 2.3, 0.0))
    )

    val defaultAttr = NumericAttribute.defaultAttr
    val attrs = Array("f1", "f2", "f3").map(defaultAttr.withName)
    val attrGroup = new AttributeGroup("userFeatures", attrs.asInstanceOf[Array[Attribute]])

    val dataset = spark.createDataFrame(data, StructType(Array(attrGroup.toStructField())))

    val slicer = new VectorSlicer().setInputCol("userFeatures").setOutputCol("features")

    slicer.setIndices(Array(1)).setNames(Array("f3"))
    // or slicer.setIndices(Array(1, 2)), or slicer.setNames(Array("f2", "f3"))

    val output = slicer.transform(dataset)
    output.show(false)
    // $example off$

    spark.stop()
  }
}
// scalastyle:on println 
Example 43
Source File: RUtils.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.api.r

import java.io.File
import java.util.Arrays

import org.apache.hadoop.security.UserGroupInformation

import org.apache.spark.{SparkEnv, SparkException}

private[spark] object RUtils {
  // Local path where R binary packages built from R source code contained in the spark
  // packages specified with "--packages" or "--jars" command line option reside.
  var rPackages: Option[String] = None

  
  def isRInstalled: Boolean = {
    try {
      val builder = new ProcessBuilder(Arrays.asList("R", "--version"))
      builder.start().waitFor() == 0
    } catch {
      case e: Exception => false
    }
  }
} 
Example 44
Source File: CustomShuffledRDD.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.scheduler

import java.util.Arrays
import java.util.Objects

import org.apache.spark._
import org.apache.spark.rdd.RDD
import org.apache.spark.util.Utils


class CustomShuffledRDD[K, V, C](
    var dependency: ShuffleDependency[K, V, C],
    partitionStartIndices: Array[Int])
  extends RDD[(K, C)](dependency.rdd.context, Seq(dependency)) {

  def this(dep: ShuffleDependency[K, V, C]) = {
    this(dep, (0 until dep.partitioner.numPartitions).toArray)
  }

  override def getDependencies: Seq[Dependency[_]] = List(dependency)

  override val partitioner = {
    Some(new CoalescedPartitioner(dependency.partitioner, partitionStartIndices))
  }

  override def getPartitions: Array[Partition] = {
    val n = dependency.partitioner.numPartitions
    Array.tabulate[Partition](partitionStartIndices.length) { i =>
      val startIndex = partitionStartIndices(i)
      val endIndex = if (i < partitionStartIndices.length - 1) partitionStartIndices(i + 1) else n
      new CustomShuffledRDDPartition(i, startIndex, endIndex)
    }
  }

  override def compute(p: Partition, context: TaskContext): Iterator[(K, C)] = {
    val part = p.asInstanceOf[CustomShuffledRDDPartition]
    val user = Utils.getCurrentUserName()
    SparkEnv.get(user).shuffleManager.getReader(
      dependency.shuffleHandle, part.startIndexInParent, part.endIndexInParent, context)
      .read()
      .asInstanceOf[Iterator[(K, C)]]
  }

  override def clearDependencies() {
    super.clearDependencies()
    dependency = null
  }
} 
Example 45
Source File: DiskStoreSuite.scala    From multi-tenancy-spark   with Apache License 2.0 5 votes vote down vote up
package org.apache.spark.storage

import java.nio.{ByteBuffer, MappedByteBuffer}
import java.util.Arrays

import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.util.io.ChunkedByteBuffer
import org.apache.spark.util.Utils

class DiskStoreSuite extends SparkFunSuite {

  test("reads of memory-mapped and non memory-mapped files are equivalent") {
    // It will cause error when we tried to re-open the filestore and the
    // memory-mapped byte buffer tot he file has not been GC on Windows.
    assume(!Utils.isWindows)
    val confKey = "spark.storage.memoryMapThreshold"

    // Create a non-trivial (not all zeros) byte array
    val bytes = Array.tabulate[Byte](1000)(_.toByte)
    val byteBuffer = new ChunkedByteBuffer(ByteBuffer.wrap(bytes))

    val blockId = BlockId("rdd_1_2")
    val diskBlockManager = new DiskBlockManager(new SparkConf(), deleteFilesOnStop = true)

    val diskStoreMapped = new DiskStore(new SparkConf().set(confKey, "0"), diskBlockManager)
    diskStoreMapped.putBytes(blockId, byteBuffer)
    val mapped = diskStoreMapped.getBytes(blockId)
    assert(diskStoreMapped.remove(blockId))

    val diskStoreNotMapped = new DiskStore(new SparkConf().set(confKey, "1m"), diskBlockManager)
    diskStoreNotMapped.putBytes(blockId, byteBuffer)
    val notMapped = diskStoreNotMapped.getBytes(blockId)

    // Not possible to do isInstanceOf due to visibility of HeapByteBuffer
    assert(notMapped.getChunks().forall(_.getClass.getName.endsWith("HeapByteBuffer")),
      "Expected HeapByteBuffer for un-mapped read")
    assert(mapped.getChunks().forall(_.isInstanceOf[MappedByteBuffer]),
      "Expected MappedByteBuffer for mapped read")

    def arrayFromByteBuffer(in: ByteBuffer): Array[Byte] = {
      val array = new Array[Byte](in.remaining())
      in.get(array)
      array
    }

    assert(Arrays.equals(mapped.toArray, bytes))
    assert(Arrays.equals(notMapped.toArray, bytes))
  }
} 
Example 46
Source File: Utils.scala    From OUTDATED_ledger-wallet-android   with MIT License 5 votes vote down vote up
package co.ledger.wallet.nfc

import java.util.Arrays
import java.util.Locale

object Utils {
  private val hexArray = "0123456789ABCDEF".toCharArray()
  private val AID_PREFIX = "A00000061700"
  private val AID_SUFFIX = "0101"
  private val SELECT_HEADER = "00A40400"

  def encodeHex(bytes: Array[Byte]): String = {
    val hexChars = Array.ofDim[Char](bytes.length * 2)
    for (i <- 0 until bytes.length) {
      val v = bytes(i) & 0xFF
      hexChars(i * 2) = hexArray(v >>> 4)
      hexChars(i * 2 + 1) = hexArray(v & 0x0F)
    }
    new String(hexChars)
  }

  def decodeHex(hexString: String): Array[Byte] = {
    if ((hexString.length & 0x01) != 0) {
      throw new IllegalArgumentException("Odd number of characters.")
    }
    val hexChars = hexString.toUpperCase(Locale.ROOT).toCharArray()
    val result = Array.ofDim[Byte](hexChars.length / 2)
    var i = 0
    while (i < hexChars.length) {
      result(i / 2) = (Arrays.binarySearch(hexArray, hexChars(i)) * 16 + Arrays.binarySearch(hexArray,
        hexChars(i + 1))).toByte
      i += 2
    }
    result
  }

  def stringToHex(s: String): Int ={
    Integer.decode(s)
  }

  def bytesToHex(bytes: Array[Byte]): String = {
    val hexChars = Array.ofDim[Char](bytes.length * 2)
    for (j <- 0 until bytes.length) {
      val v = bytes(j) & 0xFF
      hexChars(j * 2) = hexArray(v >>> 4)
      hexChars(j * 2 + 1) = hexArray(v & 0x0F)
    }
    new String(hexChars)
  }

  def statusBytes(response: Array[Byte]): Array[Byte] = {
    Array(response(response.length - 2), response(response.length - 1))
  }

  def responseData(response: Array[Byte]): Array[Byte] = {
    Arrays.copyOfRange(response, 0, response.length - 2)
  }
}