scala.collection.JavaConverters Scala Examples
The following examples show how to use scala.collection.JavaConverters.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: GaussianMixtureModelWrapper.scala From drizzle-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.mllib.api.python import scala.collection.JavaConverters import org.apache.spark.SparkContext import org.apache.spark.mllib.clustering.GaussianMixtureModel import org.apache.spark.mllib.linalg.{Vector, Vectors} val gaussians: Array[Byte] = { val modelGaussians = model.gaussians.map { gaussian => Array[Any](gaussian.mu, gaussian.sigma) } SerDe.dumps(JavaConverters.seqAsJavaListConverter(modelGaussians).asJava) } def predictSoft(point: Vector): Vector = { Vectors.dense(model.predictSoft(point)) } def save(sc: SparkContext, path: String): Unit = model.save(sc, path) }
Example 2
Source File: RTreeIndex.scala From watr-works with Apache License 2.0 | 5 votes |
package edu.umass.cs.iesl.watr package rtrees import scala.collection.JavaConverters import utils.DoOrDieHandlers._ import com.github.davidmoten.rtree.{geometry => RG, _} import com.github.davidmoten.rtree import rx.functions.Func1 import geometry._ import geometry.syntax._ class RTreeIndex[A <: GeometricFigure, W, Shape <: LabeledShape.Aux[A, W]]( var spatialIndex: RTree[Shape, RG.Geometry] ) extends RTreeSearch[A, W, Shape] { import RGeometryConversions._ override def rtreeIndex: RTree[Shape, RG.Geometry] = spatialIndex def clearAll(): Unit = { spatialIndex = RTree.create[Shape, RG.Geometry]() } def remove(item: Shape): Unit = { spatialIndex = spatialIndex.delete( item, toRGRectangle(item.bounds) ) } def add(item: Shape): Unit = { spatialIndex = spatialIndex.add( item, toRGRectangle(item.bounds) ) } def getItems(): Seq[Shape] = { toScalaSeq(spatialIndex.entries()) } } object RTreeIndex { import RGeometryConversions._ def empty[A <: GeometricFigure, W, Shape <: LabeledShape.Aux[A, W]](): RTreeIndex[A, W, Shape] = { val init = RTree.create[Shape, RG.Geometry]() new RTreeIndex[A, W, Shape](init) } import _root_.io.circe import circe._ import circe.syntax._ import circe.literal._ implicit def RTreeEncoder[ A <: GeometricFigure, W, Shape <: LabeledShape.Aux[A, W] : Encoder ]: Encoder[RTreeIndex[A, W, Shape]] = Encoder.instance[RTreeIndex[A, W, Shape]]{ shapeIndex => val shapes = shapeIndex.getItems Json.obj( "shapes" := shapes.sortBy(_.id.unwrap) ) } implicit def RTreeDecoder[ A <: GeometricFigure, W, Shape <: LabeledShape.Aux[A, W] : Decoder ]: Decoder[RTreeIndex[A, W, Shape]] = Decoder.instance[RTreeIndex[A, W, Shape]]{ c => val rtreeIndex = RTreeIndex.empty[A, W, Shape]() val shapeJson = c.downField("shapes").focus.orDie("no shapes field found") val shapes = shapeJson.decodeOrDie[List[Shape]]("Invalid shape list") shapes.foreach { shape => rtreeIndex.add(shape) } Right(rtreeIndex) } }
Example 3
Source File: GaussianMixtureModelWrapper.scala From sparkoscope with Apache License 2.0 | 5 votes |
package org.apache.spark.mllib.api.python import scala.collection.JavaConverters import org.apache.spark.SparkContext import org.apache.spark.mllib.clustering.GaussianMixtureModel import org.apache.spark.mllib.linalg.{Vector, Vectors} val gaussians: Array[Byte] = { val modelGaussians = model.gaussians.map { gaussian => Array[Any](gaussian.mu, gaussian.sigma) } SerDe.dumps(JavaConverters.seqAsJavaListConverter(modelGaussians).asJava) } def predictSoft(point: Vector): Vector = { Vectors.dense(model.predictSoft(point)) } def save(sc: SparkContext, path: String): Unit = model.save(sc, path) }
Example 4
Source File: LanguageFactory.scala From vm with GNU Affero General Public License v3.0 | 5 votes |
package org.mmadt.language import java.util.ServiceLoader import org.mmadt.language.mmlang.mmlangPrinter import org.mmadt.language.obj._ import org.mmadt.language.obj.`type`.Type import org.mmadt.language.obj.value.Value import org.mmadt.language.obj.value.strm.Strm import scala.collection.JavaConverters trait LanguageFactory { def printValue(value: Value[Obj]): String def printType(atype: Type[Obj]): String def printInst(inst: Inst[_, _]): String def printStrm(strm: Strm[Obj]): String } object LanguageFactory { ///////PROVIDERS/////// private lazy val providers: ServiceLoader[LanguageProvider] = ServiceLoader.load(classOf[LanguageProvider]) def getLanguage(name: String): LanguageProvider = JavaConverters.asScalaIterator(providers.iterator()).filter(x => x.name().equals(name)).next() /////////////////////// def printValue(value: Value[Obj])(implicit f: LanguageFactory): String = f.printValue(value) def printType(atype: Type[Obj])(implicit f: LanguageFactory): String = f.printType(atype) def printInst(inst: Inst[_, _])(implicit f: LanguageFactory): String = f.printInst(inst) def printStrm(strm: Strm[Obj])(implicit f: LanguageFactory): String = f.printStrm(strm) implicit val mmlangFactory: LanguageFactory = new LanguageFactory { override def printValue(value: Value[Obj]): String = mmlangPrinter.valueString(value) override def printType(atype: Type[Obj]): String = mmlangPrinter.typeString(atype) override def printInst(inst: Inst[_, _]): String = mmlangPrinter.instString(inst) override def printStrm(strm: Strm[Obj]): String = mmlangPrinter.strmString(strm) } }
Example 5
Source File: TraversalMonoid.scala From vm with GNU Affero General Public License v3.0 | 5 votes |
package org.mmadt.language.gremlin import org.mmadt.language.LanguageException import org.mmadt.language.obj.`type`.__ import org.mmadt.language.obj.op.filter.IsOp import org.mmadt.language.obj.op.map.GetOp import org.mmadt.language.obj.{Inst, Obj} import org.mmadt.storage.StorageFactory._ import scala.collection.JavaConverters object TraversalMonoid { def resolve(op: String, args: List[Obj]): List[Inst[Obj, Obj]] = { (op match { case "out" => GetOp(str("outE")) +: args.map(x => IsOp(__.get(str("label")).eqs(x))) :+ GetOp(str("inV")) case "outE" => GetOp(str("outE")) +: args.map(x => IsOp(__.get(str("label")).eqs(x))) case "inV" => List(GetOp(str("inV"))) case "outV" => List(GetOp(str("outV"))) case "V" => GetOp(str("V")) +: args.map(x => IsOp(__.get(str("id")).eqs(x))) case _ => throw LanguageException.unknownInstruction(op, JavaConverters.seqAsJavaList(args)) }).asInstanceOf[List[Inst[Obj, Obj]]] } }
Example 6
Source File: ScriptEngineBlockProcessor.scala From vm with GNU Affero General Public License v3.0 | 5 votes |
package org.mmadt.language.mmlang import java.io.File import org.asciidoctor.ast.{ContentModel, StructuralNode} import org.asciidoctor.extension.{BlockProcessor, Contexts, Name, Reader} import org.asciidoctor.jruby.{AsciiDocDirectoryWalker, DirectoryWalker} import org.asciidoctor.{Asciidoctor, OptionsBuilder, SafeMode} import org.mmadt.language.jsr223.mmADTScriptEngine import org.mmadt.language.obj.Obj import org.mmadt.language.{LanguageException, LanguageFactory, Tokens} import scala.collection.JavaConverters import scala.util.{Failure, Success, Try} @Name("exec") @Contexts(Array(Contexts.LISTING)) @ContentModel(ContentModel.RAW) class ScriptEngineBlockProcessor(astring: String, config: java.util.Map[String, Object]) extends BlockProcessor { lazy val engine: mmADTScriptEngine = LanguageFactory.getLanguage("mmlang").getEngine.get() val style = "source" val language = "python" val prompt = "mmlang> " override def process(parent: StructuralNode, reader: Reader, attributes: java.util.Map[String, Object]): Object = { val builder: StringBuilder = new StringBuilder JavaConverters.collectionAsScalaIterable(reader.readLines()).foreach(w => { builder.append(prompt).append(w).append("\n") Try[Obj] { engine.eval(w) } match { case Failure(exception) if exception.isInstanceOf[LanguageException] => builder.append("language error: ").append(exception.getLocalizedMessage).append("\n") case Failure(exception) => throw exception case Success(value) => val results = value.toStrm.values.toList if (results.isEmpty) builder.append(prompt).append("\n") else results.foreach(a => { builder.append(Tokens.RRDARROW).append(a).append("\n") }) } }) println(builder) this.createBlock(parent, "listing", builder.toString(), JavaConverters.mapAsJavaMap(Map[String, Object]("style" -> style, "language" -> language))) } } object ScriptEngineBlockProcessor { val source: String = "machine/src/asciidoctor/" val target: String = "machine/target/asciidoctor/" def main(args: Array[String]): Unit = { val asciidoctor = Asciidoctor.Factory.create() asciidoctor.requireLibrary("asciidoctor-diagram") val directoryWalker: DirectoryWalker = new AsciiDocDirectoryWalker(source); val asciidocFiles = directoryWalker.scan(); JavaConverters.collectionAsScalaIterable[File](asciidocFiles).map(z => { println("Current file: " + z) z }).filter(z => Set("index.adoc").contains(z.getName)).foreach(z => { println("Processing file: " + z) asciidoctor.javaExtensionRegistry.block(classOf[ScriptEngineBlockProcessor]) asciidoctor.convertFile(z, OptionsBuilder.options().toDir(new File(target)).safe(SafeMode.UNSAFE).mkDirs(true).toFile(true)) }) } }
Example 7
Source File: Streams.scala From haystack-trends with Apache License 2.0 | 5 votes |
package com.expedia.www.haystack.trends.kstream import java.util.function.Supplier import com.expedia.metrics.MetricData import com.expedia.www.haystack.commons.kstreams.serde.metricdata.{MetricDataSerde, MetricTankSerde} import com.expedia.www.haystack.trends.aggregation.TrendMetric import com.expedia.www.haystack.trends.config.AppConfiguration import com.expedia.www.haystack.trends.kstream.processor.{AdditionalTagsProcessorSupplier, ExternalKafkaProcessorSupplier, MetricAggProcessorSupplier} import com.expedia.www.haystack.trends.kstream.store.HaystackStoreBuilder import org.apache.kafka.common.serialization.{Serde, StringDeserializer, StringSerializer} import org.apache.kafka.streams.Topology import org.apache.kafka.streams.state.{KeyValueStore, StoreBuilder} import org.slf4j.LoggerFactory import scala.collection.JavaConverters class Streams(appConfiguration: AppConfiguration) extends Supplier[Topology] { private val LOGGER = LoggerFactory.getLogger(classOf[Streams]) private val TOPOLOGY_SOURCE_NAME = "metricpoint-source" private val TOPOLOGY_EXTERNAL_SINK_NAME = "metricpoint-aggegated-sink-external" private val TOPOLOGY_INTERNAL_SINK_NAME = "metric-data-aggegated-sink-internal" private val TOPOLOGY_AGGREGATOR_PROCESSOR_NAME = "metricpoint-aggregator-process" private val TOPOLOGY_ADDITIONAL_TAGS_PROCESSOR_NAME = "additional-tags-process" private val TOPOLOGY_AGGREGATOR_TREND_METRIC_STORE_NAME = "trend-metric-store" private val kafkaConfig = appConfiguration.kafkaConfig private def initialize(topology: Topology): Topology = { //add source - topic where the raw metricpoints are pushed by the span-timeseries-transformer topology.addSource( kafkaConfig.autoOffsetReset, TOPOLOGY_SOURCE_NAME, kafkaConfig.timestampExtractor, new StringDeserializer, new MetricTankSerde().deserializer(), kafkaConfig.consumeTopic) //The processor which performs aggregations on the metrics topology.addProcessor( TOPOLOGY_AGGREGATOR_PROCESSOR_NAME, new MetricAggProcessorSupplier(TOPOLOGY_AGGREGATOR_TREND_METRIC_STORE_NAME, appConfiguration.encoder), TOPOLOGY_SOURCE_NAME) //key-value, state store associated with each kstreams task(partition) // which keeps the trend-metrics which are currently being computed in memory topology.addStateStore(createTrendMetricStateStore(), TOPOLOGY_AGGREGATOR_PROCESSOR_NAME) // topology to add additional tags if any topology.addProcessor(TOPOLOGY_ADDITIONAL_TAGS_PROCESSOR_NAME, new AdditionalTagsProcessorSupplier(appConfiguration.additionalTags), TOPOLOGY_AGGREGATOR_PROCESSOR_NAME) if (appConfiguration.kafkaConfig.producerConfig.enableExternalKafka) { topology.addProcessor( TOPOLOGY_EXTERNAL_SINK_NAME, new ExternalKafkaProcessorSupplier(appConfiguration.kafkaConfig.producerConfig), TOPOLOGY_ADDITIONAL_TAGS_PROCESSOR_NAME ) } // adding sinks appConfiguration.kafkaConfig.producerConfig.kafkaSinkTopics.foreach(sinkTopic => { if(sinkTopic.enabled){ val serde = Class.forName(sinkTopic.serdeClassName).newInstance().asInstanceOf[Serde[MetricData]] topology.addSink( s"${TOPOLOGY_INTERNAL_SINK_NAME}-${sinkTopic.topic}", sinkTopic.topic, new StringSerializer, serde.serializer(), TOPOLOGY_ADDITIONAL_TAGS_PROCESSOR_NAME) } }) topology } private def createTrendMetricStateStore(): StoreBuilder[KeyValueStore[String, TrendMetric]] = { val stateStoreConfiguration = appConfiguration.stateStoreConfig val storeBuilder = new HaystackStoreBuilder(TOPOLOGY_AGGREGATOR_TREND_METRIC_STORE_NAME, stateStoreConfiguration.stateStoreCacheSize) if (stateStoreConfiguration.enableChangeLogging) { storeBuilder .withLoggingEnabled(JavaConverters.mapAsJavaMap(stateStoreConfiguration.changeLogTopicConfiguration)) } else { storeBuilder .withLoggingDisabled() } } override def get(): Topology = { val topology = new Topology initialize(topology) } }
Example 8
Source File: GaussianMixtureModelWrapper.scala From multi-tenancy-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.mllib.api.python import scala.collection.JavaConverters import org.apache.spark.SparkContext import org.apache.spark.mllib.clustering.GaussianMixtureModel import org.apache.spark.mllib.linalg.{Vector, Vectors} val gaussians: Array[Byte] = { val modelGaussians = model.gaussians.map { gaussian => Array[Any](gaussian.mu, gaussian.sigma) } SerDe.dumps(JavaConverters.seqAsJavaListConverter(modelGaussians).asJava) } def predictSoft(point: Vector): Vector = { Vectors.dense(model.predictSoft(point)) } def save(sc: SparkContext, path: String): Unit = model.save(sc, path) }
Example 9
Source File: Watcher.scala From seed with Apache License 2.0 | 5 votes |
package seed.cli.util import java.nio.file.{Files, Path, StandardWatchEventKinds, WatchEvent} import org.slf4j.LoggerFactory import zio._ import zio.stream._ import io.methvin.watcher.DirectoryChangeEvent import io.methvin.watcher.DirectoryChangeEvent.EventType import io.methvin.watcher.DirectoryChangeListener import io.methvin.watcher.DirectoryWatcher import io.methvin.watcher.hashing.FileHasher import org.apache.commons.io.FilenameUtils import org.slf4j.Logger import scala.collection.JavaConverters import scala.concurrent.ExecutionContext object Watcher { val Extensions = Array("scala", "java") // System.setProperty(org.slf4j.impl.SimpleLogger.DEFAULT_LOG_LEVEL_KEY, "TRACE") def watchPaths( paths: List[Path], onStarted: () => Unit = () => () ): Stream[Throwable, Unit] = Stream.effectAsyncM[Throwable, Unit] { e => val logger = LoggerFactory.getLogger("watcher") val (p, f) = paths.partition(Files.isDirectory(_)) val watcher = new CustomRecursiveFileMonitor(p, f, logger = logger) { override def onCreate(file: Path, count: Int): Unit = if (Extensions.contains(FilenameUtils.getExtension(file.toString))) e(Task.succeed(())) override def onModify(file: Path, count: Int): Unit = if (Extensions.contains(FilenameUtils.getExtension(file.toString))) e(Task.succeed(())) override def onDelete(file: Path, count: Int): Unit = {} } Task.descriptorWith { d => val ec = d.executor.asEC UIO { watcher.start()(ec) onStarted() }.onTermination(_ => UIO(watcher.close())) } } } abstract class CustomRecursiveFileMonitor( val paths: List[Path], val files: List[Path], val fileHasher: Option[FileHasher] = Some(FileHasher.DEFAULT_FILE_HASHER), val logger: Logger ) { protected[this] val watcher: DirectoryWatcher = DirectoryWatcher.builder .paths(JavaConverters.seqAsJavaListConverter(paths).asJava) .files(JavaConverters.seqAsJavaListConverter(files).asJava) .listener(new DirectoryChangeListener { override def onEvent(event: DirectoryChangeEvent): Unit = event.eventType match { case EventType.OVERFLOW => case et => CustomRecursiveFileMonitor.this.onEvent( et.getWatchEventKind.asInstanceOf[WatchEvent.Kind[Path]], event.path, event.count ) } override def onException(e: Exception): Unit = e.printStackTrace() }) .fileHasher(fileHasher.orNull) .logger(logger) .build() def onEvent(eventType: WatchEvent.Kind[Path], file: Path, count: Int): Unit = eventType match { case StandardWatchEventKinds.ENTRY_CREATE => onCreate(file, count) case StandardWatchEventKinds.ENTRY_MODIFY => onModify(file, count) case StandardWatchEventKinds.ENTRY_DELETE => onDelete(file, count) } def start()(implicit executionContext: ExecutionContext): Unit = executionContext.execute(() => watcher.watch()) def close(): Unit = watcher.close() def onCreate(file: Path, count: Int): Unit def onModify(file: Path, count: Int): Unit def onDelete(file: Path, count: Int): Unit }
Example 10
Source File: LanguageDetectorUtils.scala From pravda-ml with Apache License 2.0 | 5 votes |
package org.apache.spark.ml.odkl.texts import java.util import com.optimaize.langdetect.LanguageDetector import com.optimaize.langdetect.i18n.LdLocale import com.optimaize.langdetect.ngram.NgramExtractors import com.optimaize.langdetect.profiles.{LanguageProfile, LanguageProfileReader} import scala.collection.JavaConverters import scala.collection.JavaConverters._ object LanguageDetectorUtils { val additionalLanguages: util.List[LdLocale] = Seq( LdLocale.fromString("az"), LdLocale.fromString("hy"), LdLocale.fromString("ka"), LdLocale.fromString("kk"), LdLocale.fromString("ky"), LdLocale.fromString("tg"), LdLocale.fromString("tk"), LdLocale.fromString("uz") ).asJava def readListLangsBuiltIn(): util.List[LanguageProfile] = { val reader = new LanguageProfileReader() val builtIn = reader.readAllBuiltIn() builtIn.addAll(reader.readBuiltIn(additionalLanguages)) builtIn } def buildLanguageDetector(listLangs: util.List[LanguageProfile], minimalConfidence: java.lang.Double, languagePriors: java.util.Map[String, java.lang.Double]): LanguageDetector = { buildLanguageDetector(listLangs, minimalConfidence.doubleValue(), languagePriors.asScala.mapValues(_.doubleValue()).toMap) } def buildLanguageDetector(listLangs: util.List[LanguageProfile], minimalConfidence: Double, languagePriors: Map[String, Double]): LanguageDetector = { val priorsMap: Map[LdLocale, Double] = JavaConverters.asScalaBufferConverter(listLangs).asScala .map(x => x.getLocale -> languagePriors.getOrElse(x.getLocale.getLanguage, 0.01)) .toMap com.optimaize.langdetect.LanguageDetectorBuilder.create(NgramExtractors.standard()) .withProfiles(listLangs) .languagePriorities(JavaConverters.mapAsJavaMapConverter(priorsMap.mapValues(_.asInstanceOf[java.lang.Double])).asJava) .minimalConfidence(minimalConfidence) .build() } }
Example 11
Source File: GaussianMixtureModelWrapper.scala From Spark-2.3.1 with Apache License 2.0 | 5 votes |
package org.apache.spark.mllib.api.python import scala.collection.JavaConverters import org.apache.spark.SparkContext import org.apache.spark.mllib.clustering.GaussianMixtureModel import org.apache.spark.mllib.linalg.{Vector, Vectors} val gaussians: Array[Byte] = { val modelGaussians = model.gaussians.map { gaussian => Array[Any](gaussian.mu, gaussian.sigma) } SerDe.dumps(JavaConverters.seqAsJavaListConverter(modelGaussians).asJava) } def predictSoft(point: Vector): Vector = { Vectors.dense(model.predictSoft(point)) } def save(sc: SparkContext, path: String): Unit = model.save(sc, path) }