java.util.Collection Scala Examples
The following examples show how to use java.util.Collection.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: JavaStreamApp.scala From incubator-retired-gearpump with Apache License 2.0 | 5 votes |
package org.apache.gearpump.streaming.dsl.javaapi import java.util.Collection import org.apache.gearpump.cluster.UserConfig import org.apache.gearpump.cluster.client.{ClientContext, RunningApplication} import org.apache.gearpump.streaming.dsl.scalaapi.{CollectionDataSource, StreamApp} import org.apache.gearpump.streaming.source.DataSource import scala.collection.JavaConverters._ class JavaStreamApp(name: String, context: ClientContext, userConfig: UserConfig) { private val streamApp = StreamApp(name, context, userConfig) def source[T](collection: Collection[T], parallelism: Int, conf: UserConfig, description: String): JavaStream[T] = { val dataSource = new CollectionDataSource(collection.asScala.toSeq) source(dataSource, parallelism, conf, description) } def source[T](dataSource: DataSource, parallelism: Int, conf: UserConfig, description: String): JavaStream[T] = { new JavaStream[T](streamApp.source(dataSource, parallelism, conf, description)) } def submit(): RunningApplication = { context.submit(streamApp) } }
Example 2
Source File: ServerReflection.scala From akka-grpc with Apache License 2.0 | 5 votes |
package akka.grpc.javadsl import java.util.Collection import java.util.concurrent.CompletionStage import akka.actor.ClassicActorSystemProvider import akka.annotation.ApiMayChange import akka.grpc.ServiceDescription import akka.grpc.internal.ServerReflectionImpl import akka.http.javadsl.model.{ HttpRequest, HttpResponse } import grpc.reflection.v1alpha.reflection.ServerReflectionHandler @ApiMayChange(issue = "https://github.com/akka/akka-grpc/issues/850") object ServerReflection { @ApiMayChange(issue = "https://github.com/akka/akka-grpc/issues/850") def create( objects: Collection[ServiceDescription], sys: ClassicActorSystemProvider): akka.japi.Function[HttpRequest, CompletionStage[HttpResponse]] = { import scala.collection.JavaConverters._ val delegate = ServerReflectionHandler.apply( ServerReflectionImpl(objects.asScala.map(_.descriptor).toSeq, objects.asScala.map(_.name).toList))(sys) import scala.compat.java8.FutureConverters._ implicit val ec = sys.classicSystem.dispatcher request => delegate .apply(request.asInstanceOf[akka.http.scaladsl.model.HttpRequest]) .map(_.asInstanceOf[HttpResponse]) .toJava } }
Example 3
Source File: Utils.scala From ingraph with Eclipse Public License 1.0 | 5 votes |
package ingraph.ire.util import java.util.Collection import java.util.concurrent.atomic.AtomicInteger import akka.actor.ActorRef import ingraph.ire.datatypes.Tuple import ingraph.ire.messages.{ChangeSet, Primary, ReteMessage, Secondary} import scala.collection.mutable object Utils { object conversions { implicit def toSendingFunction(base: ActorRef): ReteMessage => Unit = base ! _ implicit class ReteNode(base: ActorRef) extends Serializable { def primary(reteMessage: ReteMessage) = { base ! Primary(reteMessage) } def secondary(reteMessage: ReteMessage) = { base ! Secondary(reteMessage) } } } def time[R](block: => R): Long = { val t0 = System.nanoTime() val result = block // call-by-name val t1 = System.nanoTime() val elapsed = t1 - t0 println("Elapsed time: " + elapsed + "ns") elapsed } } class AtomicUniqueCounter { private val counter: AtomicInteger = new AtomicInteger(0) def getNext = counter.getAndIncrement() } trait IterableMultiMap[A, B] extends mutable.MultiMap[A, B] { def multiUnzip: (Iterable[A], Iterable[B]) = { val b1 = genericBuilder[A] val b2 = genericBuilder[B] this.foreach(keyValueSet => { keyValueSet._2.foreach(value => { b1 += keyValueSet._1 b2 += value }) }) (b1.result(), b2.result()) } } object SizeCounter { def countDeeper(containers: Iterable[Iterable[Tuple]]*): Long = containers.map(hashmap => hashmap.foldLeft(0)((sum, set) => sum + set.foldLeft(0)(_ + _.size))).sum def count(containers: Iterable[Iterable[Any]]*): Long = { containers.map(tuples => tuples.foldLeft(0)(_ + _.size)).sum } def count(containers: Collection[Tuple]): Long = { containers.size } }
Example 4
Source File: OdinsonIndexSearcher.scala From odinson with Apache License 2.0 | 5 votes |
package ai.lum.odinson.lucene.search import java.util.Collection import java.util.concurrent.ExecutorService import scala.collection.JavaConverters._ import scala.concurrent.ExecutionContext import org.apache.lucene.index._ import org.apache.lucene.search._ import ai.lum.odinson.lucene._ import ai.lum.odinson.utils.ExecutionContextExecutorServiceBridge class OdinsonIndexSearcher( context: IndexReaderContext, executor: ExecutorService, computeTotalHits: Boolean, ) extends IndexSearcher(context, executor) { def this(r: IndexReader, e: ExecutorService, computeTotalHits: Boolean) = { this(r.getContext(), e, computeTotalHits) } def this(r: IndexReader, e: ExecutionContext, computeTotalHits: Boolean) = { this(r.getContext(), ExecutionContextExecutorServiceBridge(e), computeTotalHits) } def this(r: IndexReader, computeTotalHits: Boolean) = { this(r.getContext(), null, computeTotalHits) } def odinSearch(query: OdinsonQuery): OdinResults = { val n = readerContext.reader().maxDoc() odinSearch(query, n) } def odinSearch(query: OdinsonQuery, n: Int): OdinResults = { odinSearch(null, query, n) } def odinSearch(after: OdinsonScoreDoc, query: OdinsonQuery, numHits: Int): OdinResults = { odinSearch(after, query, numHits, false) } def odinSearch(after: OdinsonScoreDoc, query: OdinsonQuery, numHits: Int, disableMatchSelector: Boolean): OdinResults = { val limit = math.max(1, readerContext.reader().maxDoc()) require( after == null || after.doc < limit, s"after.doc exceeds the number of documents in the reader: after.doc=${after.doc} limit=${limit}" ) val cappedNumHits = math.min(numHits, limit) val manager = new CollectorManager[OdinsonCollector, OdinResults] { def newCollector() = new OdinsonCollector(cappedNumHits, after, computeTotalHits, disableMatchSelector) def reduce(collectors: Collection[OdinsonCollector]): OdinResults = { val results = collectors.iterator.asScala.map(_.odinResults).toArray OdinResults.merge(0, cappedNumHits, results, true) } } search(query, manager) } }
Example 5
Source File: SparkDataUtils.scala From elasticsearch-prediction-spark with Apache License 2.0 | 5 votes |
package com.sdhu.elasticsearchprediction.spark import com.mahisoft.elasticsearchprediction._ import plugin.domain.{ IndexValue, IndexAttributeDefinition } import domain.DataType import org.apache.spark._ import rdd.RDD import mllib.linalg.{ Vectors, Vector ⇒ spV } import mllib.regression.LabeledPoint import java.util.Collection import scala.util.control.Exception._ import scala.collection.JavaConversions._ import scala.util.Random object CsvUtil extends Serializable { implicit class RichArrayString(val a: Array[String]) extends Serializable { def toDoubleOpt(i: Int): Option[Double] = catching(classOf[NumberFormatException]).opt(a(i).toDouble) def toDoubleEither(i: Int): Either[Double, String] = { this.toDoubleOpt(i) match { case Some(d) ⇒ Left(d) case None ⇒ Right(a(i)) } } def toDoubleArray(cm: Map[String, Double]): Array[Double] = { a.zipWithIndex.map{ case (v,i) ⇒ { this.toDoubleOpt(i) match { case Some(d) ⇒ d case None ⇒ cm.getOrElse(v, 0.0) } }} } } } object ReadUtil extends Serializable { import CsvUtil._ def cIndVal2Vector(v: Collection[IndexValue], cm: Map[String, Double]): spV = { val a = v.map(x ⇒ x.getDefinition.getType match { case DataType.DOUBLE ⇒ x.getValue.asInstanceOf[Double].toString case _ ⇒ x.getValue.asInstanceOf[String] }).toArray.toDoubleArray(cm) println(s"array ${a.mkString(",")}") Vectors.dense(a) } // not using IdexAttributeDefinition ... just set it to double def arr2CIndVal(v: Array[String]): Collection[IndexValue] = { val ret = v.map(s ⇒ new IndexValue( new IndexAttributeDefinition("notUsed", DataType.STRING), s)) asJavaCollection[IndexValue](ret) } }
Example 6
Source File: SparkPredictorEngine.scala From elasticsearch-prediction-spark with Apache License 2.0 | 5 votes |
package com.sdhu.elasticsearchprediction.spark import com.mahisoft.elasticsearchprediction.plugin.engine.PredictorEngine import com.mahisoft.elasticsearchprediction.plugin.domain.IndexValue import com.mahisoft.elasticsearchprediction.plugin.exception.PredictionException import org.apache.spark.mllib.linalg.Vectors import org.apache.spark.mllib.regression.GeneralizedLinearModel import java.util.Collection class SparkPredictorEngine[M <: GeneralizedLinearModel](val readPath: String, val spHelp: SparkModelHelpers[M]) extends PredictorEngine { private var _model: ModelData[M] = ModelData[M]() override def getPrediction(values: Collection[IndexValue]): Double = { if (_model.clf.nonEmpty) { val v = ReadUtil.cIndVal2Vector( values, _model.categoriesMap.getOrElse(Map[String, Double]())) _model.clf.get.predict(v) } else { throw new PredictionException("Empty model"); } } def readModel(): ModelData[M] = { _model = spHelp.readSparkModel(readPath) _model } def getModel: ModelData[M] = _model }
Example 7
Source File: SarkPredictorEngineSpec.scala From elasticsearch-prediction-spark with Apache License 2.0 | 5 votes |
package com.sdhu.elasticsearchprediction.spark package test import com.mahisoft.elasticsearchprediction._ import utils.DataProperties import plugin.domain.IndexValue import plugin.exception.PredictionException import plugin.engine.PredictorEngine import org.apache.spark._ import rdd.RDD import mllib.regression._ import mllib.classification._ import org.scalatest._ import com.holdenkarau.spark.testing._ import java.io.File import java.util.Collection import scala.collection.JavaConversions._ class SparkPredictorEngineSpec extends FlatSpec with MustMatchers { val pconf = getClass.getResource("/prop1.conf").getPath val dataP = getClass.getResource("/mini.csv").toURI.toString val dp = new DataProperties(pconf) val modelP = getClass.getResource("/spark-clf-test.model").getPath val clf_type = "spark.logistic-regression" "Predictor Engine" should "throw empty model exception" in { val eng = new SparkPredictorEngine(modelP, SVM_Helper) evaluating {eng.getPrediction(List[IndexValue]())} must produce [PredictionException] } // "Spark_PredictorEngine" should "return sparkPredictorEngine of svm type" in { // val speng = new Spark_PredictorEngine(modelP, "spark.svm") // speng.getSparkPredictorEngine mustBe a [SparkPredictorEngine[_]] // // } it should "return a generic PredictorEngine" in { val speng = new Spark_PredictorEngine(modelP, "spark.svm") speng.getPredictorEngine mustBe a [PredictorEngine] } it should "load the classifier" in { val speng = new Spark_PredictorEngine(modelP, clf_type) val eng = speng.getSparkPredictorEngine val m = eng.getModel val cm = m.categoriesMap.getOrElse(Map[String, Double]()) m.clf must not be empty //m.numClasses must be(Some(2)) //m.binThreshold must be(Some(0.5)) cm.keys must contain allOf("Female", "Male", "United-States", "China") } it should "evaluate values" in { val speng = new Spark_PredictorEngine(modelP, clf_type) val eng = speng.getSparkPredictorEngine val p0 = Array("50", "Self-emp-not-inc", "Male", "0", "0", "United-States") val cindv = ReadUtil.arr2CIndVal(p0) val check = eng.getPrediction(cindv) check must equal(0.0) check mustBe a [java.lang.Double] } it should "evaluate values using generic Predictor engine" in { val speng = new Spark_PredictorEngine(modelP, clf_type) val eng = speng.getPredictorEngine val p0 = Array("50", "Self-emp-not-inc", "Male", "0", "0", "United-States") val cindv = ReadUtil.arr2CIndVal(p0) val check = eng.getPrediction(cindv) check must equal(0.0) check mustBe a [java.lang.Double] } }