org.apache.spark.ml.feature.IndexToString Scala Examples
The following examples show how to use org.apache.spark.ml.feature.IndexToString.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: IndexToStringExample.scala From drizzle-spark with Apache License 2.0 | 5 votes |
// scalastyle:off println package org.apache.spark.examples.ml // $example on$ import org.apache.spark.ml.attribute.Attribute import org.apache.spark.ml.feature.{IndexToString, StringIndexer} // $example off$ import org.apache.spark.sql.SparkSession object IndexToStringExample { def main(args: Array[String]) { val spark = SparkSession .builder .appName("IndexToStringExample") .getOrCreate() // $example on$ val df = spark.createDataFrame(Seq( (0, "a"), (1, "b"), (2, "c"), (3, "a"), (4, "a"), (5, "c") )).toDF("id", "category") val indexer = new StringIndexer() .setInputCol("category") .setOutputCol("categoryIndex") .fit(df) val indexed = indexer.transform(df) println(s"Transformed string column '${indexer.getInputCol}' " + s"to indexed column '${indexer.getOutputCol}'") indexed.show() val inputColSchema = indexed.schema(indexer.getOutputCol) println(s"StringIndexer will store labels in output column metadata: " + s"${Attribute.fromStructField(inputColSchema).toString}\n") val converter = new IndexToString() .setInputCol("categoryIndex") .setOutputCol("originalCategory") val converted = converter.transform(indexed) println(s"Transformed indexed column '${converter.getInputCol}' back to original string " + s"column '${converter.getOutputCol}' using labels in metadata") converted.select("id", "categoryIndex", "originalCategory").show() // $example off$ spark.stop() } } // scalastyle:on println
Example 2
Source File: LocalIndexToString.scala From spark-ml-serving with Apache License 2.0 | 5 votes |
package io.hydrosphere.spark_ml_serving.preprocessors import io.hydrosphere.spark_ml_serving.TypedTransformerConverter import io.hydrosphere.spark_ml_serving.common._ import org.apache.spark.SparkException import org.apache.spark.ml.feature.IndexToString class LocalIndexToString(override val sparkTransformer: IndexToString) extends LocalTransformer[IndexToString] { override def transform(localData: LocalData): LocalData = { localData.column(sparkTransformer.getInputCol) match { case Some(column) => val labels = sparkTransformer.getLabels val indexer = (index: Double) => { val idx = index.toInt if (0 <= idx && idx < labels.length) { labels(idx) } else { throw new SparkException(s"Unseen index: $index ??") } } val newColumn = LocalDataColumn( sparkTransformer.getOutputCol, column.data map { case i: Int => indexer(i.toDouble) case d: Double => indexer(d) case d => throw new IllegalArgumentException(s"Unknown data to index: $d") } ) localData.withColumn(newColumn) case None => localData } } } object LocalIndexToString extends SimpleModelLoader[IndexToString] with TypedTransformerConverter[IndexToString] { override def build(metadata: Metadata, data: LocalData): IndexToString = { val ctor = classOf[IndexToString].getDeclaredConstructor(classOf[String]) ctor.setAccessible(true) ctor .newInstance(metadata.uid) .setLabels(metadata.paramMap("labels").asInstanceOf[Seq[String]].toArray) .setInputCol(metadata.paramMap("inputCol").asInstanceOf[String]) .setOutputCol(metadata.paramMap("outputCol").asInstanceOf[String]) } override implicit def toLocal(transformer: IndexToString) = new LocalIndexToString(transformer) }
Example 3
Source File: ReverseStringIndexerOp.scala From mleap with Apache License 2.0 | 5 votes |
package org.apache.spark.ml.bundle.ops.feature import ml.combust.bundle.BundleContext import ml.combust.bundle.op.OpModel import ml.combust.bundle.dsl._ import ml.combust.mleap.core.types.{DataShape, ScalarShape} import org.apache.spark.ml.attribute.{Attribute, BinaryAttribute, NominalAttribute, NumericAttribute} import org.apache.spark.ml.bundle._ import org.apache.spark.ml.feature.IndexToString import org.apache.spark.sql.types.StructField import ml.combust.mleap.runtime.types.BundleTypeConverters._ import scala.util.{Failure, Try} object ReverseStringIndexerOp { def labelsForField(field: StructField): Array[String] = { val attr = Attribute.fromStructField(field) (attr match { case nominal: NominalAttribute => if (nominal.values.isDefined) { Try(nominal.values.get) } else { Failure(new RuntimeException(s"invalid nominal value for field ${field.name}")) } case _: BinaryAttribute => Failure(new RuntimeException(s"invalid binary attribute for field ${field.name}")) case _: NumericAttribute => Failure(new RuntimeException(s"invalid numeric attribute for field ${field.name}")) case _ => Failure(new RuntimeException(s"unsupported attribute for field ${field.name}")) // optimistic about unknown attributes }).get } } class ReverseStringIndexerOp extends SimpleSparkOp[IndexToString] { override val Model: OpModel[SparkBundleContext, IndexToString] = new OpModel[SparkBundleContext, IndexToString] { override val klazz: Class[IndexToString] = classOf[IndexToString] override def opName: String = Bundle.BuiltinOps.feature.reverse_string_indexer override def store(model: Model, obj: IndexToString) (implicit context: BundleContext[SparkBundleContext]): Model = { val labels = obj.get(obj.labels).getOrElse { assert(context.context.dataset.isDefined, BundleHelper.sampleDataframeMessage(klazz)) val df = context.context.dataset.get ReverseStringIndexerOp.labelsForField(df.schema(obj.getInputCol)) } model.withValue("labels", Value.stringList(labels)). withValue("input_shape", Value.dataShape(ScalarShape(false))) } override def load(model: Model) (implicit context: BundleContext[SparkBundleContext]): IndexToString = { model.getValue("input_shape").map(_.getDataShape: DataShape).foreach { shape => require(shape.isScalar, "cannot deserialize non-scalar input to Spark IndexToString model") } new IndexToString(uid = "").setLabels(model.value("labels").getStringList.toArray) } } override def sparkLoad(uid: String, shape: NodeShape, model: IndexToString): IndexToString = { new IndexToString(uid = uid).setLabels(model.getLabels) } override def sparkInputs(obj: IndexToString): Seq[ParamSpec] = { Seq("input" -> obj.inputCol) } override def sparkOutputs(obj: IndexToString): Seq[SimpleParamSpec] = { Seq("output" -> obj.outputCol) } }
Example 4
Source File: ReverseStringIndexerParitySpec.scala From mleap with Apache License 2.0 | 5 votes |
package org.apache.spark.ml.parity.feature import org.apache.spark.ml.feature.{IndexToString, StringIndexer} import org.apache.spark.ml.parity.SparkParityBase import org.apache.spark.ml.{Pipeline, Transformer} import org.apache.spark.sql.DataFrame class ReverseStringIndexerParitySpec extends SparkParityBase { override val dataset: DataFrame = baseDataset.select("state") override val sparkTransformer: Transformer = { val stringIndexer = new StringIndexer(). setInputCol("state"). setOutputCol("state_index"). fit(dataset) val reverseStringIndexer = new IndexToString(). setInputCol("state_index"). setOutputCol("state_reverse"). setLabels(stringIndexer.labels) new Pipeline().setStages(Array(stringIndexer, reverseStringIndexer)).fit(dataset) } override val unserializedParams = Set("stringOrderType") }
Example 5
Source File: IndexToStringExample.scala From sparkoscope with Apache License 2.0 | 5 votes |
// scalastyle:off println package org.apache.spark.examples.ml // $example on$ import org.apache.spark.ml.attribute.Attribute import org.apache.spark.ml.feature.{IndexToString, StringIndexer} // $example off$ import org.apache.spark.sql.SparkSession object IndexToStringExample { def main(args: Array[String]) { val spark = SparkSession .builder .appName("IndexToStringExample") .getOrCreate() // $example on$ val df = spark.createDataFrame(Seq( (0, "a"), (1, "b"), (2, "c"), (3, "a"), (4, "a"), (5, "c") )).toDF("id", "category") val indexer = new StringIndexer() .setInputCol("category") .setOutputCol("categoryIndex") .fit(df) val indexed = indexer.transform(df) println(s"Transformed string column '${indexer.getInputCol}' " + s"to indexed column '${indexer.getOutputCol}'") indexed.show() val inputColSchema = indexed.schema(indexer.getOutputCol) println(s"StringIndexer will store labels in output column metadata: " + s"${Attribute.fromStructField(inputColSchema).toString}\n") val converter = new IndexToString() .setInputCol("categoryIndex") .setOutputCol("originalCategory") val converted = converter.transform(indexed) println(s"Transformed indexed column '${converter.getInputCol}' back to original string " + s"column '${converter.getOutputCol}' using labels in metadata") converted.select("id", "categoryIndex", "originalCategory").show() // $example off$ spark.stop() } } // scalastyle:on println
Example 6
Source File: MultilayerPerceptronClassifierWrapper.scala From sparkoscope with Apache License 2.0 | 5 votes |
package org.apache.spark.ml.r import org.apache.hadoop.fs.Path import org.json4s._ import org.json4s.JsonDSL._ import org.json4s.jackson.JsonMethods._ import org.apache.spark.ml.{Pipeline, PipelineModel} import org.apache.spark.ml.classification.{MultilayerPerceptronClassificationModel, MultilayerPerceptronClassifier} import org.apache.spark.ml.feature.{IndexToString, RFormula} import org.apache.spark.ml.linalg.Vectors import org.apache.spark.ml.r.RWrapperUtils._ import org.apache.spark.ml.util.{MLReadable, MLReader, MLWritable, MLWriter} import org.apache.spark.sql.{DataFrame, Dataset} private[r] class MultilayerPerceptronClassifierWrapper private ( val pipeline: PipelineModel ) extends MLWritable { import MultilayerPerceptronClassifierWrapper._ val mlpModel: MultilayerPerceptronClassificationModel = pipeline.stages(1).asInstanceOf[MultilayerPerceptronClassificationModel] val weights: Array[Double] = mlpModel.weights.toArray val layers: Array[Int] = mlpModel.layers def transform(dataset: Dataset[_]): DataFrame = { pipeline.transform(dataset) .drop(mlpModel.getFeaturesCol) .drop(mlpModel.getLabelCol) .drop(PREDICTED_LABEL_INDEX_COL) } override def read: MLReader[MultilayerPerceptronClassifierWrapper] = new MultilayerPerceptronClassifierWrapperReader override def load(path: String): MultilayerPerceptronClassifierWrapper = super.load(path) class MultilayerPerceptronClassifierWrapperReader extends MLReader[MultilayerPerceptronClassifierWrapper]{ override def load(path: String): MultilayerPerceptronClassifierWrapper = { implicit val format = DefaultFormats val pipelinePath = new Path(path, "pipeline").toString val pipeline = PipelineModel.load(pipelinePath) new MultilayerPerceptronClassifierWrapper(pipeline) } } class MultilayerPerceptronClassifierWrapperWriter(instance: MultilayerPerceptronClassifierWrapper) extends MLWriter { override protected def saveImpl(path: String): Unit = { val rMetadataPath = new Path(path, "rMetadata").toString val pipelinePath = new Path(path, "pipeline").toString val rMetadata = "class" -> instance.getClass.getName val rMetadataJson: String = compact(render(rMetadata)) sc.parallelize(Seq(rMetadataJson), 1).saveAsTextFile(rMetadataPath) instance.pipeline.save(pipelinePath) } } }
Example 7
Source File: IndexToStringExample.scala From multi-tenancy-spark with Apache License 2.0 | 5 votes |
// scalastyle:off println package org.apache.spark.examples.ml // $example on$ import org.apache.spark.ml.attribute.Attribute import org.apache.spark.ml.feature.{IndexToString, StringIndexer} // $example off$ import org.apache.spark.sql.SparkSession object IndexToStringExample { def main(args: Array[String]) { val spark = SparkSession .builder .appName("IndexToStringExample") .getOrCreate() // $example on$ val df = spark.createDataFrame(Seq( (0, "a"), (1, "b"), (2, "c"), (3, "a"), (4, "a"), (5, "c") )).toDF("id", "category") val indexer = new StringIndexer() .setInputCol("category") .setOutputCol("categoryIndex") .fit(df) val indexed = indexer.transform(df) println(s"Transformed string column '${indexer.getInputCol}' " + s"to indexed column '${indexer.getOutputCol}'") indexed.show() val inputColSchema = indexed.schema(indexer.getOutputCol) println(s"StringIndexer will store labels in output column metadata: " + s"${Attribute.fromStructField(inputColSchema).toString}\n") val converter = new IndexToString() .setInputCol("categoryIndex") .setOutputCol("originalCategory") val converted = converter.transform(indexed) println(s"Transformed indexed column '${converter.getInputCol}' back to original string " + s"column '${converter.getOutputCol}' using labels in metadata") converted.select("id", "categoryIndex", "originalCategory").show() // $example off$ spark.stop() } } // scalastyle:on println
Example 8
Source File: MultilayerPerceptronClassifierWrapper.scala From multi-tenancy-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.ml.r import org.apache.hadoop.fs.Path import org.json4s._ import org.json4s.JsonDSL._ import org.json4s.jackson.JsonMethods._ import org.apache.spark.ml.{Pipeline, PipelineModel} import org.apache.spark.ml.classification.{MultilayerPerceptronClassificationModel, MultilayerPerceptronClassifier} import org.apache.spark.ml.feature.{IndexToString, RFormula} import org.apache.spark.ml.linalg.Vectors import org.apache.spark.ml.r.RWrapperUtils._ import org.apache.spark.ml.util.{MLReadable, MLReader, MLWritable, MLWriter} import org.apache.spark.sql.{DataFrame, Dataset} private[r] class MultilayerPerceptronClassifierWrapper private ( val pipeline: PipelineModel ) extends MLWritable { import MultilayerPerceptronClassifierWrapper._ val mlpModel: MultilayerPerceptronClassificationModel = pipeline.stages(1).asInstanceOf[MultilayerPerceptronClassificationModel] val weights: Array[Double] = mlpModel.weights.toArray val layers: Array[Int] = mlpModel.layers def transform(dataset: Dataset[_]): DataFrame = { pipeline.transform(dataset) .drop(mlpModel.getFeaturesCol) .drop(mlpModel.getLabelCol) .drop(PREDICTED_LABEL_INDEX_COL) } override def read: MLReader[MultilayerPerceptronClassifierWrapper] = new MultilayerPerceptronClassifierWrapperReader override def load(path: String): MultilayerPerceptronClassifierWrapper = super.load(path) class MultilayerPerceptronClassifierWrapperReader extends MLReader[MultilayerPerceptronClassifierWrapper]{ override def load(path: String): MultilayerPerceptronClassifierWrapper = { implicit val format = DefaultFormats val pipelinePath = new Path(path, "pipeline").toString val pipeline = PipelineModel.load(pipelinePath) new MultilayerPerceptronClassifierWrapper(pipeline) } } class MultilayerPerceptronClassifierWrapperWriter(instance: MultilayerPerceptronClassifierWrapper) extends MLWriter { override protected def saveImpl(path: String): Unit = { val rMetadataPath = new Path(path, "rMetadata").toString val pipelinePath = new Path(path, "pipeline").toString val rMetadata = "class" -> instance.getClass.getName val rMetadataJson: String = compact(render(rMetadata)) sc.parallelize(Seq(rMetadataJson), 1).saveAsTextFile(rMetadataPath) instance.pipeline.save(pipelinePath) } } }
Example 9
Source File: TypedIndexToString.scala From frameless with Apache License 2.0 | 5 votes |
package frameless package ml package feature import frameless.ml.internals.UnaryInputsChecker import org.apache.spark.ml.feature.IndexToString final class TypedIndexToString[Inputs] private[ml](indexToString: IndexToString, inputCol: String) extends AppendTransformer[Inputs, TypedIndexToString.Outputs, IndexToString] { val transformer: IndexToString = indexToString .setInputCol(inputCol) .setOutputCol(AppendTransformer.tempColumnName) } object TypedIndexToString { case class Outputs(originalOutput: String) def apply[Inputs](labels: Array[String]) (implicit inputsChecker: UnaryInputsChecker[Inputs, Double]): TypedIndexToString[Inputs] = { new TypedIndexToString[Inputs](new IndexToString().setLabels(labels), inputsChecker.inputCol) } }
Example 10
Source File: OpIndexToStringTest.scala From TransmogrifAI with BSD 3-Clause "New" or "Revised" License | 5 votes |
package com.salesforce.op.stages.impl.feature import com.salesforce.op.features.types._ import com.salesforce.op.test.{SwTransformerSpec, TestFeatureBuilder} import com.salesforce.op.utils.spark.RichDataset._ import org.apache.spark.ml.feature.IndexToString import org.junit.runner.RunWith import org.scalatest.junit.JUnitRunner @RunWith(classOf[JUnitRunner]) class OpIndexToStringTest extends SwTransformerSpec[Text, IndexToString, OpIndexToString] { val (inputData, indF) = TestFeatureBuilder(Seq(0.0, 2.0, 1.0, 0.0, 0.0, 1.0).map(_.toRealNN)) val labels = Array("a", "c", "b") val transformer = new OpIndexToString().setInput(indF).setLabels(labels) val expectedResult: Seq[Text] = Array("a", "b", "c", "a", "a", "c").map(_.toText) it should "correctly deindex a numeric column" in { val strs = transformer.transform(inputData).collect(transformer.getOutput()) strs shouldBe expectedResult } it should "correctly deindex a numeric column (shortcut)" in { val str = indF.deindexed(labels, handleInvalid = IndexToStringHandleInvalid.Error) val strs = str.originStage.asInstanceOf[OpIndexToString].transform(inputData).collect(str) strs shouldBe expectedResult } it should "get labels" in { transformer.getLabels shouldBe labels } }
Example 11
Source File: IndexToStringExample.scala From Spark-2.3.1 with Apache License 2.0 | 5 votes |
// scalastyle:off println package org.apache.spark.examples.ml // $example on$ import org.apache.spark.ml.attribute.Attribute import org.apache.spark.ml.feature.{IndexToString, StringIndexer} // $example off$ import org.apache.spark.sql.SparkSession object IndexToStringExample { def main(args: Array[String]) { val spark = SparkSession .builder .appName("IndexToStringExample") .getOrCreate() // $example on$ val df = spark.createDataFrame(Seq( (0, "a"), (1, "b"), (2, "c"), (3, "a"), (4, "a"), (5, "c") )).toDF("id", "category") val indexer = new StringIndexer() .setInputCol("category") .setOutputCol("categoryIndex") .fit(df) val indexed = indexer.transform(df) println(s"Transformed string column '${indexer.getInputCol}' " + s"to indexed column '${indexer.getOutputCol}'") indexed.show() val inputColSchema = indexed.schema(indexer.getOutputCol) println(s"StringIndexer will store labels in output column metadata: " + s"${Attribute.fromStructField(inputColSchema).toString}\n") val converter = new IndexToString() .setInputCol("categoryIndex") .setOutputCol("originalCategory") val converted = converter.transform(indexed) println(s"Transformed indexed column '${converter.getInputCol}' back to original string " + s"column '${converter.getOutputCol}' using labels in metadata") converted.select("id", "categoryIndex", "originalCategory").show() // $example off$ spark.stop() } } // scalastyle:on println
Example 12
Source File: MultilayerPerceptronClassifierWrapper.scala From Spark-2.3.1 with Apache License 2.0 | 5 votes |
package org.apache.spark.ml.r import org.apache.hadoop.fs.Path import org.json4s._ import org.json4s.JsonDSL._ import org.json4s.jackson.JsonMethods._ import org.apache.spark.ml.{Pipeline, PipelineModel} import org.apache.spark.ml.classification.{MultilayerPerceptronClassificationModel, MultilayerPerceptronClassifier} import org.apache.spark.ml.feature.{IndexToString, RFormula} import org.apache.spark.ml.linalg.Vectors import org.apache.spark.ml.r.RWrapperUtils._ import org.apache.spark.ml.util.{MLReadable, MLReader, MLWritable, MLWriter} import org.apache.spark.sql.{DataFrame, Dataset} private[r] class MultilayerPerceptronClassifierWrapper private ( val pipeline: PipelineModel ) extends MLWritable { import MultilayerPerceptronClassifierWrapper._ private val mlpModel: MultilayerPerceptronClassificationModel = pipeline.stages(1).asInstanceOf[MultilayerPerceptronClassificationModel] lazy val weights: Array[Double] = mlpModel.weights.toArray lazy val layers: Array[Int] = mlpModel.layers def transform(dataset: Dataset[_]): DataFrame = { pipeline.transform(dataset) .drop(mlpModel.getFeaturesCol) .drop(mlpModel.getLabelCol) .drop(PREDICTED_LABEL_INDEX_COL) } override def read: MLReader[MultilayerPerceptronClassifierWrapper] = new MultilayerPerceptronClassifierWrapperReader override def load(path: String): MultilayerPerceptronClassifierWrapper = super.load(path) class MultilayerPerceptronClassifierWrapperReader extends MLReader[MultilayerPerceptronClassifierWrapper]{ override def load(path: String): MultilayerPerceptronClassifierWrapper = { implicit val format = DefaultFormats val pipelinePath = new Path(path, "pipeline").toString val pipeline = PipelineModel.load(pipelinePath) new MultilayerPerceptronClassifierWrapper(pipeline) } } class MultilayerPerceptronClassifierWrapperWriter(instance: MultilayerPerceptronClassifierWrapper) extends MLWriter { override protected def saveImpl(path: String): Unit = { val rMetadataPath = new Path(path, "rMetadata").toString val pipelinePath = new Path(path, "pipeline").toString val rMetadata = "class" -> instance.getClass.getName val rMetadataJson: String = compact(render(rMetadata)) sc.parallelize(Seq(rMetadataJson), 1).saveAsTextFile(rMetadataPath) instance.pipeline.save(pipelinePath) } } }
Example 13
Source File: IndexToStringExample.scala From BigDatalog with Apache License 2.0 | 5 votes |
// scalastyle:off println package org.apache.spark.examples.ml import org.apache.spark.sql.SQLContext import org.apache.spark.{SparkConf, SparkContext} // $example on$ import org.apache.spark.ml.feature.{StringIndexer, IndexToString} // $example off$ object IndexToStringExample { def main(args: Array[String]) { val conf = new SparkConf().setAppName("IndexToStringExample") val sc = new SparkContext(conf) val sqlContext = SQLContext.getOrCreate(sc) // $example on$ val df = sqlContext.createDataFrame(Seq( (0, "a"), (1, "b"), (2, "c"), (3, "a"), (4, "a"), (5, "c") )).toDF("id", "category") val indexer = new StringIndexer() .setInputCol("category") .setOutputCol("categoryIndex") .fit(df) val indexed = indexer.transform(df) val converter = new IndexToString() .setInputCol("categoryIndex") .setOutputCol("originalCategory") val converted = converter.transform(indexed) converted.select("id", "originalCategory").show() // $example off$ sc.stop() } } // scalastyle:on println
Example 14
Source File: BaseTransformerConverter.scala From mleap with Apache License 2.0 | 5 votes |
package org.apache.spark.ml.mleap.converter.runtime import com.truecar.mleap.runtime.transformer import org.apache.spark.ml.PipelineModel import org.apache.spark.ml.classification.RandomForestClassificationModel import org.apache.spark.ml.feature.{IndexToString, StandardScalerModel, StringIndexerModel, VectorAssembler} import org.apache.spark.ml.mleap.classification.SVMModel import org.apache.spark.ml.mleap.converter.runtime.classification.{RandomForestClassificationModelToMleap, SupportVectorMachineModelToMleap} import org.apache.spark.ml.mleap.converter.runtime.feature.{IndexToStringToMleap, StandardScalerModelToMleap, StringIndexerModelToMleap, VectorAssemblerModelToMleap} import org.apache.spark.ml.mleap.converter.runtime.regression.{LinearRegressionModelToMleap, RandomForestRegressionModelToMleap} import org.apache.spark.ml.regression.{LinearRegressionModel, RandomForestRegressionModel} trait BaseTransformerConverter extends SparkTransformerConverter { // regression implicit val mleapLinearRegressionModelToMleap: TransformerToMleap[LinearRegressionModel, transformer.LinearRegressionModel] = addConverter(LinearRegressionModelToMleap) implicit val mleapRandomForestRegressionModelToMleap: TransformerToMleap[RandomForestRegressionModel, transformer.RandomForestRegressionModel] = addConverter(RandomForestRegressionModelToMleap) // classification implicit val mleapRandomForestClassificationModelToMleap: TransformerToMleap[RandomForestClassificationModel, transformer.RandomForestClassificationModel] = addConverter(RandomForestClassificationModelToMleap) implicit val mleapSupportVectorMachineModelToMleap: TransformerToMleap[SVMModel, transformer.SupportVectorMachineModel] = addConverter(SupportVectorMachineModelToMleap) //feature implicit val mleapIndexToStringToMleap: TransformerToMleap[IndexToString, transformer.ReverseStringIndexerModel] = addConverter(IndexToStringToMleap) implicit val mleapStandardScalerModelToMleap: TransformerToMleap[StandardScalerModel, transformer.StandardScalerModel] = addConverter(StandardScalerModelToMleap) implicit val mleapStringIndexerModelToMleap: TransformerToMleap[StringIndexerModel, transformer.StringIndexerModel] = addConverter(StringIndexerModelToMleap) implicit val mleapVectorAssemblerToMleap: TransformerToMleap[VectorAssembler, transformer.VectorAssemblerModel] = addConverter(VectorAssemblerModelToMleap) // other implicit val mleapPipelineModelToMleap: TransformerToMleap[PipelineModel, transformer.PipelineModel] = addConverter(PipelineModelToMleap(this)) } object BaseTransformerConverter extends BaseTransformerConverter