org.apache.spark.ml.Pipeline Java Examples
The following examples show how to use
org.apache.spark.ml.Pipeline.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: CMM.java From vn.vitk with GNU General Public License v3.0 | 9 votes |
/** * Creates a processing pipeline. * @return a pipeline */ private Pipeline createPipeline() { Tokenizer tokenizer = new Tokenizer() .setInputCol("featureStrings") .setOutputCol("tokens"); CountVectorizer countVectorizer = new CountVectorizer() .setInputCol("tokens") .setOutputCol("features") .setMinDF((Double)params.getOrDefault(params.getMinFF())) .setVocabSize((Integer)params.getOrDefault(params.getNumFeatures())); StringIndexer tagIndexer = new StringIndexer() .setInputCol("tag") .setOutputCol("label"); Pipeline pipeline = new Pipeline().setStages(new PipelineStage[]{tokenizer, countVectorizer, tagIndexer}); return pipeline; }
Example #2
Source File: JavaRandomForestClassifierExample.java From SparkDemo with MIT License | 6 votes |
public static void main(String[] args) { SparkSession spark = SparkSession .builder() .appName("JavaRandomForestClassifierExample") .getOrCreate(); // $example on$ // Load and parse the data file, converting it to a DataFrame. Dataset<Row> data = spark.read().format("libsvm").load("data/mllib/sample_libsvm_data.txt"); // Index labels, adding metadata to the label column. // Fit on whole dataset to include all labels in index. StringIndexerModel labelIndexer = new StringIndexer() .setInputCol("label") .setOutputCol("indexedLabel") .fit(data); // Automatically identify categorical features, and index them. // Set maxCategories so features with > 4 distinct values are treated as continuous. VectorIndexerModel featureIndexer = new VectorIndexer() .setInputCol("features") .setOutputCol("indexedFeatures") .setMaxCategories(4) .fit(data); // Split the data into training and test sets (30% held out for testing) Dataset<Row>[] splits = data.randomSplit(new double[] {0.7, 0.3}); Dataset<Row> trainingData = splits[0]; Dataset<Row> testData = splits[1]; // Train a RandomForest model. RandomForestClassifier rf = new RandomForestClassifier() .setLabelCol("indexedLabel") .setFeaturesCol("indexedFeatures"); // Convert indexed labels back to original labels. IndexToString labelConverter = new IndexToString() .setInputCol("prediction") .setOutputCol("predictedLabel") .setLabels(labelIndexer.labels()); // Chain indexers and forest in a Pipeline Pipeline pipeline = new Pipeline() .setStages(new PipelineStage[] {labelIndexer, featureIndexer, rf, labelConverter}); // Train model. This also runs the indexers. PipelineModel model = pipeline.fit(trainingData); // Make predictions. Dataset<Row> predictions = model.transform(testData); // Select example rows to display. predictions.select("predictedLabel", "label", "features").show(5); // Select (prediction, true label) and compute test error MulticlassClassificationEvaluator evaluator = new MulticlassClassificationEvaluator() .setLabelCol("indexedLabel") .setPredictionCol("prediction") .setMetricName("accuracy"); double accuracy = evaluator.evaluate(predictions); System.out.println("Test Error = " + (1.0 - accuracy)); RandomForestClassificationModel rfModel = (RandomForestClassificationModel)(model.stages()[2]); System.out.println("Learned classification forest model:\n" + rfModel.toDebugString()); // $example off$ spark.stop(); }
Example #3
Source File: TransitionClassifier.java From vn.vitk with GNU General Public License v3.0 | 6 votes |
/** * Creates a processing pipeline. * @return a pipeline */ protected Pipeline createPipeline() { Tokenizer tokenizer = new Tokenizer() .setInputCol("text") .setOutputCol("tokens"); CountVectorizer countVectorizer = new CountVectorizer() .setInputCol("tokens") .setOutputCol("features") .setMinDF((Double)params.getOrDefault(params.getMinFF())) .setVocabSize((Integer)params.getOrDefault(params.getNumFeatures())); StringIndexer transitionIndexer = new StringIndexer() .setInputCol("transition") .setOutputCol("label"); Pipeline pipeline = new Pipeline().setStages(new PipelineStage[]{tokenizer, countVectorizer, transitionIndexer}); return pipeline; }
Example #4
Source File: RandomForestRegressionModelInfoAdapterBridgeTest.java From spark-transformers with Apache License 2.0 | 5 votes |
@Test public void testRandomForestRegressionWithPipeline() { // Load the data stored in LIBSVM format as a DataFrame. DataFrame data = sqlContext.read().format("libsvm").load("src/test/resources/regression_test.libsvm"); // Split the data into training and test sets (30% held out for testing) DataFrame[] splits = data.randomSplit(new double[]{0.7, 0.3}); DataFrame trainingData = splits[0]; DataFrame testData = splits[1]; // Train a RandomForest model. RandomForestRegressionModel regressionModel = new RandomForestRegressor() .setFeaturesCol("features").fit(trainingData); Pipeline pipeline = new Pipeline() .setStages(new PipelineStage[]{regressionModel}); // Train model. This also runs the indexer. PipelineModel sparkPipeline = pipeline.fit(trainingData); //Export this model byte[] exportedModel = ModelExporter.export(sparkPipeline, null); //Import and get Transformer Transformer transformer = ModelImporter.importAndGetTransformer(exportedModel); Row[] sparkOutput = sparkPipeline.transform(testData).select("features", "prediction").collect(); //compare predictions for (Row row : sparkOutput) { Vector v = (Vector) row.get(0); double actual = row.getDouble(1); Map<String, Object> inputData = new HashMap<String, Object>(); inputData.put(transformer.getInputKeys().iterator().next(), v.toArray()); transformer.transform(inputData); double predicted = (double) inputData.get(transformer.getOutputKeys().iterator().next()); assertEquals(actual, predicted, EPSILON); } }
Example #5
Source File: SparkMLHouses.java From -Data-Stream-Development-with-Apache-Spark-Kafka-and-Spring-Boot with MIT License | 5 votes |
public static void main(String[] args) throws InterruptedException, StreamingQueryException { System.setProperty("hadoop.home.dir", HADOOP_HOME_DIR_VALUE); // * the schema can be written on disk, and read from disk // * the schema is not mandatory to be complete, it can contain only the needed fields StructType HOUSES_SCHEMA = new StructType() .add("House", LongType, true) .add("Taxes", LongType, true) .add("Bedrooms", LongType, true) .add("Baths", FloatType, true) .add("Quadrant", LongType, true) .add("NW", StringType, true) .add("Price($)", LongType, false) .add("Size(sqft)", LongType, false) .add("lot", LongType, true); final SparkConf conf = new SparkConf() .setMaster(RUN_LOCAL_WITH_AVAILABLE_CORES) .setAppName(APPLICATION_NAME) .set("spark.sql.caseSensitive", CASE_SENSITIVE); SparkSession sparkSession = SparkSession.builder() .config(conf) .getOrCreate(); Dataset<Row> housesDF = sparkSession.read() .schema(HOUSES_SCHEMA) .json(HOUSES_FILE_PATH); // Gathering Data Dataset<Row> gatheredDF = housesDF.select(col("Taxes"), col("Bedrooms"), col("Baths"), col("Size(sqft)"), col("Price($)")); // Data Preparation Dataset<Row> labelDF = gatheredDF.withColumnRenamed("Price($)", "label"); Imputer imputer = new Imputer() // .setMissingValue(1.0d) .setInputCols(new String[] { "Baths" }) .setOutputCols(new String[] { "~Baths~" }); VectorAssembler assembler = new VectorAssembler() .setInputCols(new String[] { "Taxes", "Bedrooms", "~Baths~", "Size(sqft)" }) .setOutputCol("features"); // Choosing a Model LinearRegression linearRegression = new LinearRegression(); linearRegression.setMaxIter(1000); Pipeline pipeline = new Pipeline() .setStages(new PipelineStage[] { imputer, assembler, linearRegression }); // Training The Data Dataset<Row>[] splitDF = labelDF.randomSplit(new double[] { 0.8, 0.2 }); Dataset<Row> trainDF = splitDF[0]; Dataset<Row> evaluationDF = splitDF[1]; PipelineModel pipelineModel = pipeline.fit(trainDF); // Evaluation Dataset<Row> predictionsDF = pipelineModel.transform(evaluationDF); predictionsDF.show(false); Dataset<Row> forEvaluationDF = predictionsDF.select(col("label"), col("prediction")); RegressionEvaluator evaluteR2 = new RegressionEvaluator().setMetricName("r2"); RegressionEvaluator evaluteRMSE = new RegressionEvaluator().setMetricName("rmse"); double r2 = evaluteR2.evaluate(forEvaluationDF); double rmse = evaluteRMSE.evaluate(forEvaluationDF); logger.info("---------------------------"); logger.info("R2 =" + r2); logger.info("RMSE =" + rmse); logger.info("---------------------------"); }
Example #6
Source File: TestSparkMLDeriver.java From envelope with Apache License 2.0 | 5 votes |
private void generateAndSaveModel(String savePath) throws IOException { // Sourced from the Spark ML documentation and examples StructType trainingSchema = DataTypes.createStructType(Lists.newArrayList( DataTypes.createStructField("id", DataTypes.LongType, false), DataTypes.createStructField("text", DataTypes.StringType, false), DataTypes.createStructField("label", DataTypes.DoubleType, false) )); Dataset<Row> training = Contexts.getSparkSession().createDataFrame(Lists.newArrayList( RowFactory.create(0L, "a b c d e spark", 1.0), RowFactory.create(1L, "b d", 0.0), RowFactory.create(2L, "spark f g h", 1.0), RowFactory.create(3L, "hadoop mapreduce", 0.0) ), trainingSchema); Tokenizer tokenizer = new Tokenizer() .setInputCol("text") .setOutputCol("words"); HashingTF hashingTF = new HashingTF() .setNumFeatures(1000) .setInputCol(tokenizer.getOutputCol()) .setOutputCol("features"); LogisticRegression lr = new LogisticRegression() .setMaxIter(10) .setRegParam(0.001); Pipeline pipeline = new Pipeline() .setStages(new PipelineStage[] {tokenizer, hashingTF, lr}); PipelineModel model = pipeline.fit(training); model.write().overwrite().save(savePath); }
Example #7
Source File: WhitespaceClassifier.java From vn.vitk with GNU General Public License v3.0 | 4 votes |
/** * Trains a whitespace classifier model and save the resulting pipeline model * to an external file. * @param sentences a list of tokenized sentences. * @param pipelineModelFileName * @param numFeatures */ public void train(List<String> sentences, String pipelineModelFileName, int numFeatures) { List<WhitespaceContext> contexts = new ArrayList<WhitespaceContext>(sentences.size()); int id = 0; for (String sentence : sentences) { sentence = sentence.trim(); for (int j = 0; j < sentence.length(); j++) { char c = sentence.charAt(j); if (c == ' ' || c == '_') { WhitespaceContext context = new WhitespaceContext(); context.setId(id++); context.setContext(extractContext(sentence, j)); context.setLabel(c == ' ' ? 0d : 1d); contexts.add(context); } } } JavaRDD<WhitespaceContext> jrdd = jsc.parallelize(contexts); DataFrame df = sqlContext.createDataFrame(jrdd, WhitespaceContext.class); df.show(false); System.out.println("N = " + df.count()); df.groupBy("label").count().show(); org.apache.spark.ml.feature.Tokenizer tokenizer = new Tokenizer() .setInputCol("context").setOutputCol("words"); HashingTF hashingTF = new HashingTF().setNumFeatures(numFeatures) .setInputCol(tokenizer.getOutputCol()).setOutputCol("features"); LogisticRegression lr = new LogisticRegression().setMaxIter(100) .setRegParam(0.01); Pipeline pipeline = new Pipeline().setStages(new PipelineStage[] { tokenizer, hashingTF, lr }); model = pipeline.fit(df); try { model.write().overwrite().save(pipelineModelFileName); } catch (IOException e) { e.printStackTrace(); } DataFrame predictions = model.transform(df); predictions.show(); MulticlassClassificationEvaluator evaluator = new MulticlassClassificationEvaluator().setMetricName("precision"); double accuracy = evaluator.evaluate(predictions); System.out.println("training accuracy = " + accuracy); LogisticRegressionModel lrModel = (LogisticRegressionModel) model.stages()[2]; LogisticRegressionTrainingSummary trainingSummary = lrModel.summary(); double[] objectiveHistory = trainingSummary.objectiveHistory(); System.out.println("#(iterations) = " + objectiveHistory.length); for (double lossPerIteration : objectiveHistory) { System.out.println(lossPerIteration); } }
Example #8
Source File: FillNAValuesTransformerBridgeTest.java From spark-transformers with Apache License 2.0 | 4 votes |
@Test public void shouldWorkCorrectlyWithPipeline() { //Prepare test data DataFrame df = getDataFrame(); Row[] originalData = df.orderBy("id").select("id", "a", "b", "c", "d").collect(); //prepare transformation pipeline FillNAValuesTransformer fillNAValuesTransformer = new FillNAValuesTransformer(); fillNAValuesTransformer.setNAValueMap( getFillNAMap() ); Pipeline pipeline = new Pipeline(); pipeline.setStages(new PipelineStage[]{fillNAValuesTransformer}); PipelineModel model = pipeline.fit(df); //predict Row[] sparkOutput = model.transform(df).orderBy("id").select("id", "a", "b", "c", "d").collect(); //export byte[] exportedModel = ModelExporter.export(model, df); Transformer transformer = ModelImporter.importAndGetTransformer(exportedModel); //verify correctness assertTrue(transformer.getInputKeys().size() == 4); assertTrue(transformer.getInputKeys().containsAll(Arrays.asList("a", "b", "c", "d"))); assertTrue(transformer.getOutputKeys().size() == 4); assertTrue(transformer.getOutputKeys().containsAll(Arrays.asList("a", "b", "c", "d"))); for( int i=0; i < originalData.length; i++) { Map<String, Object> input = new HashMap<String, Object>(); input.put("a", originalData[i].get(1)); input.put("b", originalData[i].get(2)); input.put("c", originalData[i].get(3)); input.put("d", originalData[i].get(4)); transformer.transform(input); assertEquals(sparkOutput[i].get(1), input.get("a")); assertEquals(sparkOutput[i].get(2), input.get("b")); assertEquals(sparkOutput[i].get(3), input.get("c")); assertEquals(sparkOutput[i].get(4), input.get("d")); } }
Example #9
Source File: DecisionTreeClassificationModelBridgeTest.java From spark-transformers with Apache License 2.0 | 4 votes |
@Test public void testDecisionTreeClassificationWithPipeline() { // Load the data stored in LIBSVM format as a DataFrame. DataFrame data = sqlContext.read().format("libsvm").load("src/test/resources/classification_test.libsvm"); // Split the data into training and test sets (30% held out for testing) DataFrame[] splits = data.randomSplit(new double[]{0.7, 0.3}); DataFrame trainingData = splits[0]; DataFrame testData = splits[1]; StringIndexer indexer = new StringIndexer() .setInputCol("label") .setOutputCol("labelIndex"); // Train a DecisionTree model. DecisionTreeClassifier classificationModel = new DecisionTreeClassifier() .setLabelCol("labelIndex") .setFeaturesCol("features"); Pipeline pipeline = new Pipeline() .setStages(new PipelineStage[]{indexer, classificationModel}); // Train model. This also runs the indexer. PipelineModel sparkPipeline = pipeline.fit(trainingData); //Export this model byte[] exportedModel = ModelExporter.export(sparkPipeline, null); //Import and get Transformer Transformer transformer = ModelImporter.importAndGetTransformer(exportedModel); Row[] sparkOutput = sparkPipeline.transform(testData).select("label", "features", "prediction").collect(); //compare predictions for (Row row : sparkOutput) { Vector v = (Vector) row.get(1); double actual = row.getDouble(2); Map<String, Object> inputData = new HashMap<String, Object>(); inputData.put("features", v.toArray()); inputData.put("label", row.get(0).toString()); transformer.transform(inputData); double predicted = (double) inputData.get("prediction"); assertEquals(actual, predicted, EPSILON); } }
Example #10
Source File: RandomForestClassificationModelInfoAdapterBridgeTest.java From spark-transformers with Apache License 2.0 | 4 votes |
@Test public void testRandomForestClassificationWithPipeline() { // Load the data stored in LIBSVM format as a DataFrame. DataFrame data = sqlContext.read().format("libsvm").load("src/test/resources/classification_test.libsvm"); // Split the data into training and test sets (30% held out for testing) DataFrame[] splits = data.randomSplit(new double[]{0.7, 0.3}); DataFrame trainingData = splits[0]; DataFrame testData = splits[1]; StringIndexer indexer = new StringIndexer() .setInputCol("label") .setOutputCol("labelIndex"); // Train a DecisionTree model. RandomForestClassifier classifier = new RandomForestClassifier() .setLabelCol("labelIndex") .setFeaturesCol("features") .setPredictionCol("prediction") .setRawPredictionCol("rawPrediction") .setProbabilityCol("probability"); Pipeline pipeline = new Pipeline() .setStages(new PipelineStage[]{indexer, classifier}); // Train model. This also runs the indexer. PipelineModel sparkPipeline = pipeline.fit(trainingData); //Export this model byte[] exportedModel = ModelExporter.export(sparkPipeline, null); //Import and get Transformer Transformer transformer = ModelImporter.importAndGetTransformer(exportedModel); Row[] sparkOutput = sparkPipeline.transform(testData).select("label", "features", "prediction", "rawPrediction", "probability").collect(); //compare predictions for (Row row : sparkOutput) { Vector v = (Vector) row.get(1); double actual = row.getDouble(2); double [] actualProbability = ((Vector) row.get(4)).toArray(); double[] actualRaw = ((Vector) row.get(3)).toArray(); Map<String, Object> inputData = new HashMap<String, Object>(); inputData.put("features", v.toArray()); inputData.put("label", row.get(0).toString()); transformer.transform(inputData); double predicted = (double) inputData.get("prediction"); double[] probability = (double[]) inputData.get("probability"); double[] rawPrediction = (double[]) inputData.get("rawPrediction"); assertEquals(actual, predicted, EPSILON); assertArrayEquals(actualProbability, probability, EPSILON); assertArrayEquals(actualRaw, rawPrediction, EPSILON); } }
Example #11
Source File: PipelineBridgeTest.java From spark-transformers with Apache License 2.0 | 4 votes |
@Test public void testPipeline() { // Prepare training documents, which are labeled. StructType schema = createStructType(new StructField[]{ createStructField("id", LongType, false), createStructField("text", StringType, false), createStructField("label", DoubleType, false) }); DataFrame trainingData = sqlContext.createDataFrame(Arrays.asList( cr(0L, "a b c d e spark", 1.0), cr(1L, "b d", 0.0), cr(2L, "spark f g h", 1.0), cr(3L, "hadoop mapreduce", 0.0) ), schema); // Configure an ML pipeline, which consists of three stages: tokenizer, hashingTF, and LogisticRegression. RegexTokenizer tokenizer = new RegexTokenizer() .setInputCol("text") .setOutputCol("words") .setPattern("\\s") .setGaps(true) .setToLowercase(false); HashingTF hashingTF = new HashingTF() .setNumFeatures(1000) .setInputCol(tokenizer.getOutputCol()) .setOutputCol("features"); LogisticRegression lr = new LogisticRegression() .setMaxIter(10) .setRegParam(0.01); Pipeline pipeline = new Pipeline() .setStages(new PipelineStage[]{tokenizer, hashingTF, lr}); // Fit the pipeline to training documents. PipelineModel sparkPipelineModel = pipeline.fit(trainingData); //Export this model byte[] exportedModel = ModelExporter.export(sparkPipelineModel, trainingData); System.out.println(new String(exportedModel)); //Import and get Transformer Transformer transformer = ModelImporter.importAndGetTransformer(exportedModel); //prepare test data StructType testSchema = createStructType(new StructField[]{ createStructField("id", LongType, false), createStructField("text", StringType, false), }); DataFrame testData = sqlContext.createDataFrame(Arrays.asList( cr(4L, "spark i j k"), cr(5L, "l m n"), cr(6L, "mapreduce spark"), cr(7L, "apache hadoop") ), testSchema); //verify that predictions for spark pipeline and exported pipeline are the same Row[] predictions = sparkPipelineModel.transform(testData).select("id", "text", "probability", "prediction").collect(); for (Row r : predictions) { System.out.println(r); double sparkPipelineOp = r.getDouble(3); Map<String, Object> data = new HashMap<String, Object>(); data.put("text", r.getString(1)); transformer.transform(data); double exportedPipelineOp = (double) data.get("prediction"); double exportedPipelineProb = (double) data.get("probability"); assertEquals(sparkPipelineOp, exportedPipelineOp, EPSILON); } }
Example #12
Source File: DecisionTreeRegressionModelBridgeTest.java From spark-transformers with Apache License 2.0 | 4 votes |
@Test public void testDecisionTreeRegressionWithPipeline() { // Load the data stored in LIBSVM format as a DataFrame. DataFrame data = sqlContext.read().format("libsvm").load("src/test/resources/regression_test.libsvm"); // Split the data into training and test sets (30% held out for testing) DataFrame[] splits = data.randomSplit(new double[]{0.7, 0.3}); DataFrame trainingData = splits[0]; DataFrame testData = splits[1]; // Train a DecisionTree model. DecisionTreeRegressor dt = new DecisionTreeRegressor() .setFeaturesCol("features"); Pipeline pipeline = new Pipeline() .setStages(new PipelineStage[]{dt}); // Train model. This also runs the indexer. PipelineModel sparkPipeline = pipeline.fit(trainingData); //Export this model byte[] exportedModel = ModelExporter.export(sparkPipeline, null); //Import and get Transformer Transformer transformer = ModelImporter.importAndGetTransformer(exportedModel); Row[] sparkOutput = sparkPipeline.transform(testData).select("features", "prediction").collect(); //compare predictions for (Row row : sparkOutput) { Vector v = (Vector) row.get(0); double actual = row.getDouble(1); Map<String, Object> inputData = new HashMap<String, Object>(); inputData.put(transformer.getInputKeys().iterator().next(), v.toArray()); transformer.transform(inputData); double predicted = (double) inputData.get(transformer.getOutputKeys().iterator().next()); assertEquals(actual, predicted, EPSILON); } }
Example #13
Source File: PipelineBridgeTest.java From spark-transformers with Apache License 2.0 | 4 votes |
@Test public void testPipeline() { // Prepare training documents, which are labeled. StructType schema = createStructType(new StructField[]{ createStructField("id", LongType, false), createStructField("text", StringType, false), createStructField("label", DoubleType, false) }); Dataset<Row> trainingData = spark.createDataFrame(Arrays.asList( cr(0L, "a b c d e spark", 1.0), cr(1L, "b d", 0.0), cr(2L, "spark f g h", 1.0), cr(3L, "hadoop mapreduce", 0.0) ), schema); // Configure an ML pipeline, which consists of three stages: tokenizer, hashingTF, and LogisticRegression. RegexTokenizer tokenizer = new RegexTokenizer() .setInputCol("text") .setOutputCol("words") .setPattern("\\s") .setGaps(true) .setToLowercase(false); HashingTF hashingTF = new HashingTF() .setNumFeatures(1000) .setInputCol(tokenizer.getOutputCol()) .setOutputCol("features"); LogisticRegression lr = new LogisticRegression() .setMaxIter(10) .setRegParam(0.01); Pipeline pipeline = new Pipeline() .setStages(new PipelineStage[]{tokenizer, hashingTF, lr}); // Fit the pipeline to training documents. PipelineModel sparkPipelineModel = pipeline.fit(trainingData); //Export this model byte[] exportedModel = ModelExporter.export(sparkPipelineModel); System.out.println(new String(exportedModel)); //Import and get Transformer Transformer transformer = ModelImporter.importAndGetTransformer(exportedModel); //prepare test data StructType testSchema = createStructType(new StructField[]{ createStructField("id", LongType, false), createStructField("text", StringType, false), }); Dataset<Row> testData = spark.createDataFrame(Arrays.asList( cr(4L, "spark i j k"), cr(5L, "l m n"), cr(6L, "mapreduce spark"), cr(7L, "apache hadoop") ), testSchema); //verify that predictions for spark pipeline and exported pipeline are the same List<Row> predictions = sparkPipelineModel.transform(testData).select("id", "text", "probability", "prediction").collectAsList(); for (Row r : predictions) { System.out.println(r); double sparkPipelineOp = r.getDouble(3); Map<String, Object> data = new HashMap<String, Object>(); data.put("text", r.getString(1)); transformer.transform(data); double exportedPipelineOp = (double) data.get("prediction"); double exportedPipelineProb = (double) data.get("probability"); assertEquals(sparkPipelineOp, exportedPipelineOp, 0.01); } }
Example #14
Source File: DecisionTreeClassificationModelBridgePipelineTest.java From spark-transformers with Apache License 2.0 | 4 votes |
@Test public void testDecisionTreeClassificationWithPipeline() { // Load the data stored in LIBSVM format as a DataFrame. String datapath = "src/test/resources/classification_test.libsvm"; Dataset<Row> data = spark.read().format("libsvm").load(datapath); // Split the data into training and test sets (30% held out for testing) Dataset<Row>[] splits = data.randomSplit(new double[]{0.7, 0.3}); Dataset<Row> trainingData = splits[0]; Dataset<Row> testData = splits[1]; StringIndexer indexer = new StringIndexer() .setInputCol("label") .setOutputCol("labelIndex"); // Train a DecisionTree model. DecisionTreeClassifier classificationModel = new DecisionTreeClassifier() .setLabelCol("labelIndex") .setFeaturesCol("features"); Pipeline pipeline = new Pipeline() .setStages(new PipelineStage[]{indexer, classificationModel}); // Train model. This also runs the indexer. PipelineModel sparkPipeline = pipeline.fit(trainingData); //Export this model byte[] exportedModel = ModelExporter.export(sparkPipeline); //Import and get Transformer Transformer transformer = ModelImporter.importAndGetTransformer(exportedModel); List<Row> output = sparkPipeline.transform(testData).select("features", "label","prediction","rawPrediction").collectAsList(); //compare predictions for (Row row : output) { Map<String, Object> data_ = new HashMap<>(); double [] actualRawPrediction = ((DenseVector) row.get(3)).toArray(); data_.put("features", ((SparseVector) row.get(0)).toArray()); data_.put("label", (row.get(1)).toString()); transformer.transform(data_); System.out.println(data_); System.out.println(data_.get("prediction")); assertEquals((double)data_.get("prediction"), (double)row.get(2), EPSILON); assertArrayEquals((double[]) data_.get("rawPrediction"), actualRawPrediction, EPSILON); } }
Example #15
Source File: GradientBoostClassificationModelPipelineTest.java From spark-transformers with Apache License 2.0 | 4 votes |
@Test public void testGradientBoostClassification() { // Load the data stored in LIBSVM format as a DataFrame. String datapath = "src/test/resources/binary_classification_test.libsvm"; Dataset<Row> data = spark.read().format("libsvm").load(datapath); StringIndexer indexer = new StringIndexer() .setInputCol("label") .setOutputCol("labelIndex"); // Split the data into training and test sets (30% held out for testing) Dataset<Row>[] splits = data.randomSplit(new double[]{0.7, 0.3}); Dataset<Row> trainingData = splits[0]; Dataset<Row> testData = splits[1]; // Train a RandomForest model. GBTClassifier classificationModel = new GBTClassifier().setLabelCol("labelIndex") .setFeaturesCol("features");; Pipeline pipeline = new Pipeline() .setStages(new PipelineStage[]{indexer, classificationModel}); PipelineModel sparkPipeline = pipeline.fit(trainingData); // Export this model byte[] exportedModel = ModelExporter.export(sparkPipeline); // Import and get Transformer Transformer transformer = ModelImporter.importAndGetTransformer(exportedModel); List<Row> sparkOutput = sparkPipeline.transform(testData).select("features", "prediction", "label").collectAsList(); // compare predictions for (Row row : sparkOutput) { Map<String, Object> data_ = new HashMap<>(); data_.put("features", ((SparseVector) row.get(0)).toArray()); data_.put("label", (row.get(2)).toString()); transformer.transform(data_); System.out.println(data_); System.out.println(data_.get("prediction")+" ,"+row.get(1)); assertEquals((double) data_.get("prediction"), (double) row.get(1), EPSILON); } }
Example #16
Source File: DecisionTreeRegressionModelBridgePipelineTest.java From spark-transformers with Apache License 2.0 | 4 votes |
@Test public void testDecisionTreeRegressionPrediction() { // Load the data stored in LIBSVM format as a DataFrame. String datapath = "src/test/resources/regression_test.libsvm"; Dataset<Row> data = spark.read().format("libsvm").load(datapath); // Split the data into training and test sets (30% held out for testing) Dataset<Row>[] splits = data.randomSplit(new double[]{0.7, 0.3}); Dataset<Row> trainingData = splits[0]; Dataset<Row> testData = splits[1]; StringIndexer indexer = new StringIndexer() .setInputCol("label") .setOutputCol("labelIndex").setHandleInvalid("skip"); DecisionTreeRegressor regressionModel = new DecisionTreeRegressor().setLabelCol("labelIndex").setFeaturesCol("features"); Pipeline pipeline = new Pipeline() .setStages(new PipelineStage[]{indexer, regressionModel}); PipelineModel sparkPipeline = pipeline.fit(trainingData); byte[] exportedModel = ModelExporter.export(sparkPipeline); Transformer transformer = ModelImporter.importAndGetTransformer(exportedModel); List<Row> output = sparkPipeline.transform(testData).select("features", "prediction", "label").collectAsList(); //compare predictions for (Row row : output) { Map<String, Object> data_ = new HashMap<>(); data_.put("features", ((SparseVector) row.get(0)).toArray()); data_.put("label", (row.get(2)).toString()); transformer.transform(data_); System.out.println(data_); System.out.println(data_.get("prediction")); assertEquals((double)data_.get("prediction"), (double)row.get(1), EPSILON); } }
Example #17
Source File: TransitionClassifier.java From vn.vitk with GNU General Public License v3.0 | 4 votes |
/** * Trains a transition classifier on the data frame. * @param jsc * @param graphs * @param featureFrame * @param classifierFileName * @param numHiddenUnits * @return a transition classifier. */ public Transformer trainMLP(JavaSparkContext jsc, List<DependencyGraph> graphs, FeatureFrame featureFrame, String classifierFileName, int numHiddenUnits) { // create a SQLContext this.sqlContext = new SQLContext(jsc); // extract a data frame from these graphs DataFrame dataset = toDataFrame(jsc, graphs, featureFrame); // create a processing pipeline and fit it to the data frame Pipeline pipeline = createPipeline(); PipelineModel pipelineModel = pipeline.fit(dataset); DataFrame trainingData = pipelineModel.transform(dataset); // cache the training data for better performance trainingData.cache(); if (verbose) { trainingData.show(false); } // compute the number of different labels, which is the maximum element // in the 'label' column. trainingData.registerTempTable("dfTable"); Row row = sqlContext.sql("SELECT MAX(label) as maxValue from dfTable").first(); int numLabels = (int)row.getDouble(0); numLabels++; int vocabSize = ((CountVectorizerModel)(pipelineModel.stages()[1])).getVocabSize(); // default is a two-layer MLP int[] layers = {vocabSize, numLabels}; // if user specify a hidden layer, use a 3-layer MLP: if (numHiddenUnits > 0) { layers = new int[3]; layers[0] = vocabSize; layers[1] = numHiddenUnits; layers[2] = numLabels; } MultilayerPerceptronClassifier classifier = new MultilayerPerceptronClassifier() .setLayers(layers) .setBlockSize(128) .setSeed(1234L) .setTol((Double)params.getOrDefault(params.getTolerance())) .setMaxIter((Integer)params.getOrDefault(params.getMaxIter())); MultilayerPerceptronClassificationModel model = classifier.fit(trainingData); // compute precision on the training data // DataFrame result = model.transform(trainingData); DataFrame predictionAndLabel = result.select("prediction", "label"); MulticlassClassificationEvaluator evaluator = new MulticlassClassificationEvaluator().setMetricName("precision"); if (verbose) { System.out.println("N = " + trainingData.count()); System.out.println("D = " + vocabSize); System.out.println("K = " + numLabels); System.out.println("H = " + numHiddenUnits); System.out.println("training precision = " + evaluator.evaluate(predictionAndLabel)); } // save the trained MLP to a file // String classifierPath = new Path(classifierFileName, "data").toString(); jsc.parallelize(Arrays.asList(model), 1).saveAsObjectFile(classifierPath); // save the pipeline model to sub-directory "pipelineModel" // try { String pipelinePath = new Path(classifierFileName, "pipelineModel").toString(); pipelineModel.write().overwrite().save(pipelinePath); } catch (IOException e) { e.printStackTrace(); } return model; }
Example #18
Source File: JavaDecisionTreeRegressionExample.java From SparkDemo with MIT License | 4 votes |
public static void main(String[] args) { SparkSession spark = SparkSession .builder() .appName("JavaDecisionTreeRegressionExample") .getOrCreate(); // $example on$ // Load the data stored in LIBSVM format as a DataFrame. Dataset<Row> data = spark.read().format("libsvm") .load("data/mllib/sample_libsvm_data.txt"); // Automatically identify categorical features, and index them. // Set maxCategories so features with > 4 distinct values are treated as continuous. VectorIndexerModel featureIndexer = new VectorIndexer() .setInputCol("features") .setOutputCol("indexedFeatures") .setMaxCategories(4) .fit(data); // Split the data into training and test sets (30% held out for testing). Dataset<Row>[] splits = data.randomSplit(new double[]{0.7, 0.3}); Dataset<Row> trainingData = splits[0]; Dataset<Row> testData = splits[1]; // Train a DecisionTree model. DecisionTreeRegressor dt = new DecisionTreeRegressor() .setFeaturesCol("indexedFeatures"); // Chain indexer and tree in a Pipeline. Pipeline pipeline = new Pipeline() .setStages(new PipelineStage[]{featureIndexer, dt}); // Train model. This also runs the indexer. PipelineModel model = pipeline.fit(trainingData); // Make predictions. Dataset<Row> predictions = model.transform(testData); // Select example rows to display. predictions.select("label", "features").show(5); // Select (prediction, true label) and compute test error. RegressionEvaluator evaluator = new RegressionEvaluator() .setLabelCol("label") .setPredictionCol("prediction") .setMetricName("rmse"); double rmse = evaluator.evaluate(predictions); System.out.println("Root Mean Squared Error (RMSE) on test data = " + rmse); DecisionTreeRegressionModel treeModel = (DecisionTreeRegressionModel) (model.stages()[1]); System.out.println("Learned regression tree model:\n" + treeModel.toDebugString()); // $example off$ spark.stop(); }
Example #19
Source File: JavaDecisionTreeClassificationExample.java From SparkDemo with MIT License | 4 votes |
public static void main(String[] args) { SparkSession spark = SparkSession .builder() .appName("JavaDecisionTreeClassificationExample") .getOrCreate(); // $example on$ // Load the data stored in LIBSVM format as a DataFrame. Dataset<Row> data = spark .read() .format("libsvm") .load("data/mllib/sample_libsvm_data.txt"); // Index labels, adding metadata to the label column. // Fit on whole dataset to include all labels in index. StringIndexerModel labelIndexer = new StringIndexer() .setInputCol("label") .setOutputCol("indexedLabel") .fit(data); // Automatically identify categorical features, and index them. VectorIndexerModel featureIndexer = new VectorIndexer() .setInputCol("features") .setOutputCol("indexedFeatures") .setMaxCategories(4) // features with > 4 distinct values are treated as continuous. .fit(data); // Split the data into training and test sets (30% held out for testing). Dataset<Row>[] splits = data.randomSplit(new double[]{0.7, 0.3}); Dataset<Row> trainingData = splits[0]; Dataset<Row> testData = splits[1]; // Train a DecisionTree model. DecisionTreeClassifier dt = new DecisionTreeClassifier() .setLabelCol("indexedLabel") .setFeaturesCol("indexedFeatures"); // Convert indexed labels back to original labels. IndexToString labelConverter = new IndexToString() .setInputCol("prediction") .setOutputCol("predictedLabel") .setLabels(labelIndexer.labels()); // Chain indexers and tree in a Pipeline. Pipeline pipeline = new Pipeline() .setStages(new PipelineStage[]{labelIndexer, featureIndexer, dt, labelConverter}); // Train model. This also runs the indexers. PipelineModel model = pipeline.fit(trainingData); // Make predictions. Dataset<Row> predictions = model.transform(testData); // Select example rows to display. predictions.select("predictedLabel", "label", "features").show(5); // Select (prediction, true label) and compute test error. MulticlassClassificationEvaluator evaluator = new MulticlassClassificationEvaluator() .setLabelCol("indexedLabel") .setPredictionCol("prediction") .setMetricName("accuracy"); double accuracy = evaluator.evaluate(predictions); System.out.println("Test Error = " + (1.0 - accuracy)); DecisionTreeClassificationModel treeModel = (DecisionTreeClassificationModel) (model.stages()[2]); System.out.println("Learned classification tree model:\n" + treeModel.toDebugString()); // $example off$ spark.stop(); }
Example #20
Source File: JavaModelSelectionViaCrossValidationExample.java From SparkDemo with MIT License | 4 votes |
public static void main(String[] args) { SparkSession spark = SparkSession .builder() .appName("JavaModelSelectionViaCrossValidationExample") .getOrCreate(); // $example on$ // Prepare training documents, which are labeled. Dataset<Row> training = spark.createDataFrame(Arrays.asList( new JavaLabeledDocument(0L, "a b c d e spark", 1.0), new JavaLabeledDocument(1L, "b d", 0.0), new JavaLabeledDocument(2L,"spark f g h", 1.0), new JavaLabeledDocument(3L, "hadoop mapreduce", 0.0), new JavaLabeledDocument(4L, "b spark who", 1.0), new JavaLabeledDocument(5L, "g d a y", 0.0), new JavaLabeledDocument(6L, "spark fly", 1.0), new JavaLabeledDocument(7L, "was mapreduce", 0.0), new JavaLabeledDocument(8L, "e spark program", 1.0), new JavaLabeledDocument(9L, "a e c l", 0.0), new JavaLabeledDocument(10L, "spark compile", 1.0), new JavaLabeledDocument(11L, "hadoop software", 0.0) ), JavaLabeledDocument.class); // Configure an ML pipeline, which consists of three stages: tokenizer, hashingTF, and lr. Tokenizer tokenizer = new Tokenizer() .setInputCol("text") .setOutputCol("words"); HashingTF hashingTF = new HashingTF() .setNumFeatures(1000) .setInputCol(tokenizer.getOutputCol()) .setOutputCol("features"); LogisticRegression lr = new LogisticRegression() .setMaxIter(10) .setRegParam(0.01); Pipeline pipeline = new Pipeline() .setStages(new PipelineStage[] {tokenizer, hashingTF, lr}); // We use a ParamGridBuilder to construct a grid of parameters to search over. // With 3 values for hashingTF.numFeatures and 2 values for lr.regParam, // this grid will have 3 x 2 = 6 parameter settings for CrossValidator to choose from. ParamMap[] paramGrid = new ParamGridBuilder() .addGrid(hashingTF.numFeatures(), new int[] {10, 100, 1000}) .addGrid(lr.regParam(), new double[] {0.1, 0.01}) .build(); // We now treat the Pipeline as an Estimator, wrapping it in a CrossValidator instance. // This will allow us to jointly choose parameters for all Pipeline stages. // A CrossValidator requires an Estimator, a set of Estimator ParamMaps, and an Evaluator. // Note that the evaluator here is a BinaryClassificationEvaluator and its default metric // is areaUnderROC. CrossValidator cv = new CrossValidator() .setEstimator(pipeline) .setEvaluator(new BinaryClassificationEvaluator()) .setEstimatorParamMaps(paramGrid).setNumFolds(2); // Use 3+ in practice // Run cross-validation, and choose the best set of parameters. CrossValidatorModel cvModel = cv.fit(training); // Prepare test documents, which are unlabeled. Dataset<Row> test = spark.createDataFrame(Arrays.asList( new JavaDocument(4L, "spark i j k"), new JavaDocument(5L, "l m n"), new JavaDocument(6L, "mapreduce spark"), new JavaDocument(7L, "apache hadoop") ), JavaDocument.class); // Make predictions on test documents. cvModel uses the best model found (lrModel). Dataset<Row> predictions = cvModel.transform(test); for (Row r : predictions.select("id", "text", "probability", "prediction").collectAsList()) { System.out.println("(" + r.get(0) + ", " + r.get(1) + ") --> prob=" + r.get(2) + ", prediction=" + r.get(3)); } // $example off$ spark.stop(); }
Example #21
Source File: JavaGradientBoostedTreeClassifierExample.java From SparkDemo with MIT License | 4 votes |
public static void main(String[] args) { SparkSession spark = SparkSession .builder() .appName("JavaGradientBoostedTreeClassifierExample") .getOrCreate(); // $example on$ // Load and parse the data file, converting it to a DataFrame. Dataset<Row> data = spark .read() .format("libsvm") .load("data/mllib/sample_libsvm_data.txt"); // Index labels, adding metadata to the label column. // Fit on whole dataset to include all labels in index. StringIndexerModel labelIndexer = new StringIndexer() .setInputCol("label") .setOutputCol("indexedLabel") .fit(data); // Automatically identify categorical features, and index them. // Set maxCategories so features with > 4 distinct values are treated as continuous. VectorIndexerModel featureIndexer = new VectorIndexer() .setInputCol("features") .setOutputCol("indexedFeatures") .setMaxCategories(4) .fit(data); // Split the data into training and test sets (30% held out for testing) Dataset<Row>[] splits = data.randomSplit(new double[] {0.7, 0.3}); Dataset<Row> trainingData = splits[0]; Dataset<Row> testData = splits[1]; // Train a GBT model. GBTClassifier gbt = new GBTClassifier() .setLabelCol("indexedLabel") .setFeaturesCol("indexedFeatures") .setMaxIter(10); // Convert indexed labels back to original labels. IndexToString labelConverter = new IndexToString() .setInputCol("prediction") .setOutputCol("predictedLabel") .setLabels(labelIndexer.labels()); // Chain indexers and GBT in a Pipeline. Pipeline pipeline = new Pipeline() .setStages(new PipelineStage[] {labelIndexer, featureIndexer, gbt, labelConverter}); // Train model. This also runs the indexers. PipelineModel model = pipeline.fit(trainingData); // Make predictions. Dataset<Row> predictions = model.transform(testData); // Select example rows to display. predictions.select("predictedLabel", "label", "features").show(5); // Select (prediction, true label) and compute test error. MulticlassClassificationEvaluator evaluator = new MulticlassClassificationEvaluator() .setLabelCol("indexedLabel") .setPredictionCol("prediction") .setMetricName("accuracy"); double accuracy = evaluator.evaluate(predictions); System.out.println("Test Error = " + (1.0 - accuracy)); GBTClassificationModel gbtModel = (GBTClassificationModel)(model.stages()[2]); System.out.println("Learned classification GBT model:\n" + gbtModel.toDebugString()); // $example off$ spark.stop(); }
Example #22
Source File: JavaRandomForestRegressorExample.java From SparkDemo with MIT License | 4 votes |
public static void main(String[] args) { SparkSession spark = SparkSession .builder() .appName("JavaRandomForestRegressorExample") .getOrCreate(); // $example on$ // Load and parse the data file, converting it to a DataFrame. Dataset<Row> data = spark.read().format("libsvm").load("data/mllib/sample_libsvm_data.txt"); // Automatically identify categorical features, and index them. // Set maxCategories so features with > 4 distinct values are treated as continuous. VectorIndexerModel featureIndexer = new VectorIndexer() .setInputCol("features") .setOutputCol("indexedFeatures") .setMaxCategories(4) .fit(data); // Split the data into training and test sets (30% held out for testing) Dataset<Row>[] splits = data.randomSplit(new double[] {0.7, 0.3}); Dataset<Row> trainingData = splits[0]; Dataset<Row> testData = splits[1]; // Train a RandomForest model. RandomForestRegressor rf = new RandomForestRegressor() .setLabelCol("label") .setFeaturesCol("indexedFeatures"); // Chain indexer and forest in a Pipeline Pipeline pipeline = new Pipeline() .setStages(new PipelineStage[] {featureIndexer, rf}); // Train model. This also runs the indexer. PipelineModel model = pipeline.fit(trainingData); // Make predictions. Dataset<Row> predictions = model.transform(testData); // Select example rows to display. predictions.select("prediction", "label", "features").show(5); // Select (prediction, true label) and compute test error RegressionEvaluator evaluator = new RegressionEvaluator() .setLabelCol("label") .setPredictionCol("prediction") .setMetricName("rmse"); double rmse = evaluator.evaluate(predictions); System.out.println("Root Mean Squared Error (RMSE) on test data = " + rmse); RandomForestRegressionModel rfModel = (RandomForestRegressionModel)(model.stages()[1]); System.out.println("Learned regression forest model:\n" + rfModel.toDebugString()); // $example off$ spark.stop(); }
Example #23
Source File: JavaGradientBoostedTreeRegressorExample.java From SparkDemo with MIT License | 4 votes |
public static void main(String[] args) { SparkSession spark = SparkSession .builder() .appName("JavaGradientBoostedTreeRegressorExample") .getOrCreate(); // $example on$ // Load and parse the data file, converting it to a DataFrame. Dataset<Row> data = spark.read().format("libsvm").load("data/mllib/sample_libsvm_data.txt"); // Automatically identify categorical features, and index them. // Set maxCategories so features with > 4 distinct values are treated as continuous. VectorIndexerModel featureIndexer = new VectorIndexer() .setInputCol("features") .setOutputCol("indexedFeatures") .setMaxCategories(4) .fit(data); // Split the data into training and test sets (30% held out for testing). Dataset<Row>[] splits = data.randomSplit(new double[] {0.7, 0.3}); Dataset<Row> trainingData = splits[0]; Dataset<Row> testData = splits[1]; // Train a GBT model. GBTRegressor gbt = new GBTRegressor() .setLabelCol("label") .setFeaturesCol("indexedFeatures") .setMaxIter(10); // Chain indexer and GBT in a Pipeline. Pipeline pipeline = new Pipeline().setStages(new PipelineStage[] {featureIndexer, gbt}); // Train model. This also runs the indexer. PipelineModel model = pipeline.fit(trainingData); // Make predictions. Dataset<Row> predictions = model.transform(testData); // Select example rows to display. predictions.select("prediction", "label", "features").show(5); // Select (prediction, true label) and compute test error. RegressionEvaluator evaluator = new RegressionEvaluator() .setLabelCol("label") .setPredictionCol("prediction") .setMetricName("rmse"); double rmse = evaluator.evaluate(predictions); System.out.println("Root Mean Squared Error (RMSE) on test data = " + rmse); GBTRegressionModel gbtModel = (GBTRegressionModel)(model.stages()[1]); System.out.println("Learned regression GBT model:\n" + gbtModel.toDebugString()); // $example off$ spark.stop(); }
Example #24
Source File: JavaPipelineExample.java From SparkDemo with MIT License | 4 votes |
public static void main(String[] args) { SparkSession spark = SparkSession .builder() .appName("JavaPipelineExample") .getOrCreate(); // $example on$ // Prepare training documents, which are labeled. Dataset<Row> training = spark.createDataFrame(Arrays.asList( new JavaLabeledDocument(0L, "a b c d e spark", 1.0), new JavaLabeledDocument(1L, "b d", 0.0), new JavaLabeledDocument(2L, "spark f g h", 1.0), new JavaLabeledDocument(3L, "hadoop mapreduce", 0.0) ), JavaLabeledDocument.class); // Configure an ML pipeline, which consists of three stages: tokenizer, hashingTF, and lr. Tokenizer tokenizer = new Tokenizer() .setInputCol("text") .setOutputCol("words"); HashingTF hashingTF = new HashingTF() .setNumFeatures(1000) .setInputCol(tokenizer.getOutputCol()) .setOutputCol("features"); LogisticRegression lr = new LogisticRegression() .setMaxIter(10) .setRegParam(0.001); Pipeline pipeline = new Pipeline() .setStages(new PipelineStage[] {tokenizer, hashingTF, lr}); // Fit the pipeline to training documents. PipelineModel model = pipeline.fit(training); // Prepare test documents, which are unlabeled. Dataset<Row> test = spark.createDataFrame(Arrays.asList( new JavaDocument(4L, "spark i j k"), new JavaDocument(5L, "l m n"), new JavaDocument(6L, "spark hadoop spark"), new JavaDocument(7L, "apache hadoop") ), JavaDocument.class); // Make predictions on test documents. Dataset<Row> predictions = model.transform(test); for (Row r : predictions.select("id", "text", "probability", "prediction").collectAsList()) { System.out.println("(" + r.get(0) + ", " + r.get(1) + ") --> prob=" + r.get(2) + ", prediction=" + r.get(3)); } // $example off$ spark.stop(); }
Example #25
Source File: SparkMultiClassClassifier.java From mmtf-spark with Apache License 2.0 | 4 votes |
/** * Dataset must at least contain the following two columns: * label: the class labels * features: feature vector * @param data * @return map with metrics */ public Map<String,String> fit(Dataset<Row> data) { int classCount = (int)data.select(label).distinct().count(); StringIndexerModel labelIndexer = new StringIndexer() .setInputCol(label) .setOutputCol("indexedLabel") .fit(data); // Split the data into training and test sets (30% held out for testing) Dataset<Row>[] splits = data.randomSplit(new double[] {1.0-testFraction, testFraction}, seed); Dataset<Row> trainingData = splits[0]; Dataset<Row> testData = splits[1]; String[] labels = labelIndexer.labels(); System.out.println(); System.out.println("Class\tTrain\tTest"); for (String l: labels) { System.out.println(l + "\t" + trainingData.select(label).filter(label + " = '" + l + "'").count() + "\t" + testData.select(label).filter(label + " = '" + l + "'").count()); } // Set input columns predictor .setLabelCol("indexedLabel") .setFeaturesCol("features"); // Convert indexed labels back to original labels. IndexToString labelConverter = new IndexToString() .setInputCol("prediction") .setOutputCol("predictedLabel") .setLabels(labelIndexer.labels()); // Chain indexers and forest in a Pipeline Pipeline pipeline = new Pipeline() .setStages(new PipelineStage[] {labelIndexer, predictor, labelConverter}); // Train model. This also runs the indexers. PipelineModel model = pipeline.fit(trainingData); // Make predictions. Dataset<Row> predictions = model.transform(testData).cache(); // Display some sample predictions System.out.println(); System.out.println("Sample predictions: " + predictor.getClass().getSimpleName()); predictions.sample(false, 0.1, seed).show(25); predictions = predictions.withColumnRenamed(label, "stringLabel"); predictions = predictions.withColumnRenamed("indexedLabel", label); // collect metrics Dataset<Row> pred = predictions.select("prediction",label); Map<String,String> metrics = new LinkedHashMap<>(); metrics.put("Method", predictor.getClass().getSimpleName()); if (classCount == 2) { BinaryClassificationMetrics b = new BinaryClassificationMetrics(pred); metrics.put("AUC", Float.toString((float)b.areaUnderROC())); } MulticlassMetrics m = new MulticlassMetrics(pred); metrics.put("F", Float.toString((float)m.weightedFMeasure())); metrics.put("Accuracy", Float.toString((float)m.accuracy())); metrics.put("Precision", Float.toString((float)m.weightedPrecision())); metrics.put("Recall", Float.toString((float)m.weightedRecall())); metrics.put("False Positive Rate", Float.toString((float)m.weightedFalsePositiveRate())); metrics.put("True Positive Rate", Float.toString((float)m.weightedTruePositiveRate())); metrics.put("", "\nConfusion Matrix\n" + Arrays.toString(labels) +"\n" + m.confusionMatrix().toString()); return metrics; }
Example #26
Source File: SparkRegressor.java From mmtf-spark with Apache License 2.0 | 4 votes |
/** * Dataset must at least contain the following two columns: * label: the class labels * features: feature vector * @param data * @return map with metrics */ public Map<String,String> fit(Dataset<Row> data) { // Split the data into training and test sets (30% held out for testing) Dataset<Row>[] splits = data.randomSplit(new double[] {1.0-testFraction, testFraction}, seed); Dataset<Row> trainingData = splits[0]; Dataset<Row> testData = splits[1]; // Train a RandomForest model. predictor .setLabelCol(label) .setFeaturesCol("features"); // Chain indexer and forest in a Pipeline Pipeline pipeline = new Pipeline() .setStages(new PipelineStage[] {predictor}); // Train model. This also runs the indexer. PipelineModel model = pipeline.fit(trainingData); // Make predictions. Dataset<Row> predictions = model.transform(testData); // Display some sample predictions System.out.println("Sample predictions: " + predictor.getClass().getSimpleName()); String primaryKey = predictions.columns()[0]; predictions.select(primaryKey, label, "prediction").sample(false, 0.1, seed).show(50); Map<String,String> metrics = new LinkedHashMap<>(); metrics.put("Method", predictor.getClass().getSimpleName()); // Select (prediction, true label) and compute test error RegressionEvaluator evaluator = new RegressionEvaluator() .setLabelCol(label) .setPredictionCol("prediction") .setMetricName("rmse"); metrics.put("rmse", Double.toString(evaluator.evaluate(predictions))); return metrics; }
Example #27
Source File: BikeRentalPrediction.java From Apache-Spark-2x-for-Java-Developers with MIT License | 4 votes |
public static void main(String[] args) { System.setProperty("hadoop.home.dir", "E:\\sumitK\\Hadoop"); SparkSession sparkSession = SparkSession .builder() .master("local") .config("spark.sql.warehouse.dir", "file:///E:/sumitK/Hadoop/warehouse") .appName("BikeRentalPrediction").getOrCreate(); Logger rootLogger = LogManager.getRootLogger(); rootLogger.setLevel(Level.WARN); //We use the sqlContext.read method to read the data and set a few options: // 'format': specifies the Spark CSV data source // 'header': set to true to indicate that the first line of the CSV data file is a header // The file is called 'hour.csv'. Dataset<Row> ds=sparkSession.read() .format("org.apache.spark.sql.execution.datasources.csv.CSVFileFormat") .option("header", "true") .load("E:\\sumitK\\Hadoop\\Bike-Sharing-Dataset\\hour.csv"); ds.cache(); ds.select("season").show();; ds.show(); System.out.println("Our dataset has rows :: "+ ds.count()); Dataset<Row> df = ds.drop("instant").drop("dteday").drop("casual").drop("registered"); df.printSchema(); //col("...") is preferable to df.col("...") Dataset<Row> dformatted = df.select(col("season").cast(DataTypes.IntegerType), col("yr").cast(DataTypes.IntegerType), col("mnth").cast(DataTypes.IntegerType), col("hr").cast(DataTypes.IntegerType), col("holiday").cast(DataTypes.IntegerType), col("weekday").cast(DataTypes.IntegerType), col("workingday").cast(DataTypes.IntegerType), col("weathersit").cast(DataTypes.IntegerType), col("temp").cast(DataTypes.IntegerType), col("atemp").cast(DataTypes.IntegerType), col("hum").cast(DataTypes.IntegerType), col("windspeed").cast(DataTypes.IntegerType), col("cnt").cast(DataTypes.IntegerType)); dformatted.printSchema(); Dataset<Row>[] data= dformatted.randomSplit(new double[]{0.7,0.3}); System.out.println("We have training examples count :: "+ data[0].count()+" and test examples count ::"+data[1].count()); /// //removing 'cnt' cloumn and then forming str array String[] featuresCols = dformatted.drop("cnt").columns(); for(String str:featuresCols){ System.out.println(str+" :: "); } //This concatenates all feature columns into a single feature vector in a new column "rawFeatures". VectorAssembler vectorAssembler = new VectorAssembler().setInputCols(featuresCols).setOutputCol("rawFeatures"); //This identifies categorical features and indexes them. VectorIndexer vectorIndexer= new VectorIndexer().setInputCol("rawFeatures").setOutputCol("features").setMaxCategories(4); //Takes the "features" column and learns to predict "cnt" GBTRegressor gbt = new GBTRegressor().setLabelCol("cnt"); // Define a grid of hyperparameters to test: // - maxDepth: max depth of each decision tree in the GBT ensemble // - maxIter: iterations, i.e., number of trees in each GBT ensemble // In this example notebook, we keep these values small. In practice, to get the highest accuracy, you would likely want to try deeper trees (10 or higher) and more trees in the ensemble (>100). ParamMap[] paramGrid = new ParamGridBuilder().addGrid(gbt.maxDepth(),new int[]{2, 5}).addGrid(gbt.maxIter(),new int[] {10, 100}).build(); // We define an evaluation metric. This tells CrossValidator how well we are doing by comparing the true labels with predictions. RegressionEvaluator evaluator = new RegressionEvaluator().setMetricName("rmse").setLabelCol(gbt.getLabelCol()).setPredictionCol(gbt.getPredictionCol()); // # Declare the CrossValidator, which runs model tuning for us. CrossValidator cv = new CrossValidator().setEstimator(gbt).setEvaluator(evaluator).setEstimatorParamMaps(paramGrid); Pipeline pipeline = new Pipeline().setStages(new PipelineStage[]{vectorAssembler,vectorIndexer,cv}); PipelineModel pipelineModel=pipeline.fit(data[0]); Dataset<Row> predictions = pipelineModel.transform(data[1]); predictions.show(); //predictions.select("cnt", "prediction", *featuresCols); }