org.apache.spark.ml.feature.VectorIndexer Java Examples
The following examples show how to use
org.apache.spark.ml.feature.VectorIndexer.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: JavaVectorIndexerExample.java From SparkDemo with MIT License | 5 votes |
public static void main(String[] args) { SparkSession spark = SparkSession .builder() .appName("JavaVectorIndexerExample") .getOrCreate(); // $example on$ Dataset<Row> data = spark.read().format("libsvm").load("data/mllib/sample_libsvm_data.txt"); VectorIndexer indexer = new VectorIndexer() .setInputCol("features") .setOutputCol("indexed") .setMaxCategories(10); VectorIndexerModel indexerModel = indexer.fit(data); Map<Integer, Map<Double, Integer>> categoryMaps = indexerModel.javaCategoryMaps(); System.out.print("Chose " + categoryMaps.size() + " categorical features:"); for (Integer feature : categoryMaps.keySet()) { System.out.print(" " + feature); } System.out.println(); // Create new column "indexed" with categorical values transformed to indices Dataset<Row> indexedData = indexerModel.transform(data); indexedData.show(); // $example off$ spark.stop(); }
Example #2
Source File: Data2CoNLL.java From ambiverse-nlu with Apache License 2.0 | 4 votes |
@Override protected int run() throws Exception { SparkConf sparkConf = new SparkConf() .setAppName("Data2CoNLL") .set("spark.hadoop.validateOutputSpecs", "false") .set("spark.yarn.executor.memoryOverhead", "3072") .set("spark.rdd.compress", "true") .set("spark.core.connection.ack.wait.timeout", "600") .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer") //.set("spark.kryo.registrationRequired", "true") .registerKryoClasses(new Class[] {SCAS.class, LabeledPoint.class, SparseVector.class, int[].class, double[].class, InternalRow[].class, GenericInternalRow.class, Object[].class, GenericArrayData.class, VectorIndexer.class}) ;//.setMaster("local[4]"); //Remove this if you run it on the server. JavaSparkContext sc = new JavaSparkContext(sparkConf); int totalCores = Integer.parseInt(sc.getConf().get("spark.executor.instances")) * Integer.parseInt(sc.getConf().get("spark.executor.cores")); FileSystem fs = FileSystem.get(new Configuration()); int partitionNumber = 3 * totalCores; if(partitions != null) { partitionNumber = partitions; } //Read training documents serialized as SCAS JavaRDD<SCAS> documents = sc.sequenceFile(input, Text.class, SCAS.class, partitionNumber).values(); JavaRDD<String> docStrings = documents.map( s -> { JCas jCas = s.getJCas(); NYTArticleMetaData metadata = JCasUtil.selectSingle(jCas, NYTArticleMetaData.class); StringJoiner docBuilder = new StringJoiner("\n"); docBuilder.add("-DOCSTART- (" + metadata.getGuid() + ")"); docBuilder.add(""); Collection<Sentence> sentences = JCasUtil.select(jCas, Sentence.class); for(Sentence sentence: sentences) { List<Token> tokens = JCasUtil.selectCovered(jCas, Token.class, sentence); for(Token token: tokens) { CoreLabel taggedWord = CoreNlpUtils.tokenToWord(token); StringJoiner lineBuilder = new StringJoiner("\t"); lineBuilder.add(taggedWord.word().toLowerCase()); docBuilder.add(lineBuilder.toString()); } docBuilder.add(""); } return docBuilder.toString(); }); docStrings.saveAsTextFile(output); sc.stop(); return 0; }
Example #3
Source File: EntitySalienceTrainingSparkRunner.java From ambiverse-nlu with Apache License 2.0 | 4 votes |
@Override protected int run() throws Exception { SparkConf sparkConf = new SparkConf() .setAppName("EntitySalienceTrainingSparkRunner") .set("spark.hadoop.validateOutputSpecs", "false") .set("spark.yarn.executor.memoryOverhead", "3072") .set("spark.rdd.compress", "true") .set("spark.core.connection.ack.wait.timeout", "600") .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer") //.set("spark.kryo.registrationRequired", "true") .registerKryoClasses(new Class[] {SCAS.class, LabeledPoint.class, SparseVector.class, int[].class, double[].class, InternalRow[].class, GenericInternalRow.class, Object[].class, GenericArrayData.class, VectorIndexer.class}) ;//.setMaster("local[4]"); //Remove this if you run it on the server. TrainingSettings trainingSettings = new TrainingSettings(); if(folds != null) { trainingSettings.setNumFolds(folds); } if(method != null) { trainingSettings.setClassificationMethod(TrainingSettings.ClassificationMethod.valueOf(method)); } if(defaultConf != null) { trainingSettings.setAidaDefaultConf(defaultConf); } if(scalingFactor != null) { trainingSettings.setPositiveInstanceScalingFactor(scalingFactor); } JavaSparkContext sc = new JavaSparkContext(sparkConf); int totalCores = Integer.parseInt(sc.getConf().get("spark.executor.instances")) * Integer.parseInt(sc.getConf().get("spark.executor.cores")); // int totalCores = 4; //// trainingSettings.setFeatureExtractor(TrainingSettings.FeatureExtractor.ANNOTATE_AND_ENTITY_SALIENCE); //// trainingSettings.setAidaDefaultConf("db"); // //trainingSettings.setClassificationMethod(TrainingSettings.ClassificationMethod.LOG_REG); // trainingSettings.setPositiveInstanceScalingFactor(1); //Add the cache files to each node only if annotation is required. //The input documents could already be annotated, and in this case no caches are needed. if(trainingSettings.getFeatureExtractor().equals(TrainingSettings.FeatureExtractor.ANNOTATE_AND_ENTITY_SALIENCE)) { sc.addFile(trainingSettings.getBigramCountCache()); sc.addFile(trainingSettings.getKeywordCountCache()); sc.addFile(trainingSettings.getWordContractionsCache()); sc.addFile(trainingSettings.getWordExpansionsCache()); if (trainingSettings.getAidaDefaultConf().equals("db")) { sc.addFile(trainingSettings.getDatabaseAida()); } else { sc.addFile(trainingSettings.getCassandraConfig()); } } SQLContext sqlContext = new SQLContext(sc); FileSystem fs = FileSystem.get(new Configuration()); int partitionNumber = 3 * totalCores; if(partitions != null) { partitionNumber = partitions; } //Read training documents serialized as SCAS JavaRDD<SCAS> documents = sc.sequenceFile(input, Text.class, SCAS.class, partitionNumber).values(); //Instanciate a training spark runner TrainingSparkRunner trainingSparkRunner = new TrainingSparkRunner(); //Train a model CrossValidatorModel model = trainingSparkRunner.crossValidate(sc, sqlContext, documents, trainingSettings); //Create the model path String modelPath = output+"/"+sc.getConf().getAppId()+"/model_"+trainingSettings.getClassificationMethod(); //Delete the old model if there is one fs.delete(new Path(modelPath), true); //Save the new model model List<Model> models = new ArrayList<>(); models.add(model.bestModel()); sc.parallelize(models, 1).saveAsObjectFile(modelPath); //Save the model stats SparkClassificationModel.saveStats(model, trainingSettings, output+"/"+sc.getConf().getAppId()+"/"); return 0; }
Example #4
Source File: EntitySalienceTestingSparkRunner.java From ambiverse-nlu with Apache License 2.0 | 4 votes |
@Override protected int run() throws Exception { SparkConf sparkConf = new SparkConf() .setAppName("EntitySalienceTrainingSparkRunner") .set("spark.hadoop.validateOutputSpecs", "false") //.set("spark.yarn.executor.memoryOverhead", "4096") .set("spark.rdd.compress", "true") .set("spark.core.connection.ack.wait.timeout", "600") .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer") //.set("spark.kryo.registrationRequired", "true") .registerKryoClasses(new Class[] {SCAS.class, LabeledPoint.class, SparseVector.class, int[].class, double[].class, InternalRow[].class, GenericInternalRow.class, Object[].class, GenericArrayData.class, VectorIndexer.class}) ;//setMaster("local"); //Remove this if you run it on the server. TrainingSettings trainingSettings = new TrainingSettings(); if(defaultConf != null) { trainingSettings.setAidaDefaultConf(defaultConf); } JavaSparkContext sc = new JavaSparkContext(sparkConf); int totalCores = Integer.parseInt(sc.getConf().get("spark.executor.instances")) * Integer.parseInt(sc.getConf().get("spark.executor.cores")); // int totalCores = 2; //trainingSettings.setClassificationMethod(TrainingSettings.ClassificationMethod.LOG_REG); trainingSettings.setPositiveInstanceScalingFactor(1); if(trainingSettings.getFeatureExtractor().equals(TrainingSettings.FeatureExtractor.ANNOTATE_AND_ENTITY_SALIENCE)) { sc.addFile(trainingSettings.getBigramCountCache()); sc.addFile(trainingSettings.getKeywordCountCache()); sc.addFile(trainingSettings.getWordContractionsCache()); sc.addFile(trainingSettings.getWordExpansionsCache()); if (trainingSettings.getAidaDefaultConf().equals("db")) { sc.addFile(trainingSettings.getDatabaseAida()); } else { sc.addFile(trainingSettings.getCassandraConfig()); } } SQLContext sqlContext = new SQLContext(sc); int partitionNumber = 3 * totalCores; //Read training documents serialized as SCAS JavaPairRDD<Text, SCAS> documents = sc.sequenceFile(input, Text.class, SCAS.class, partitionNumber); //Instanciate a training spark runner TrainingSparkRunner trainingSparkRunner = new TrainingSparkRunner(); PipelineModel trainingModel = (PipelineModel) sc.objectFile(model).first(); //Evaluate the model and write down the evaluation metrics. trainingSparkRunner.evaluate(sc, sqlContext, documents, trainingModel, trainingSettings, output+"/"+sc.getConf().getAppId()+"/"); return 0; }
Example #5
Source File: BikeRentalPrediction.java From Apache-Spark-2x-for-Java-Developers with MIT License | 4 votes |
public static void main(String[] args) { System.setProperty("hadoop.home.dir", "E:\\sumitK\\Hadoop"); SparkSession sparkSession = SparkSession .builder() .master("local") .config("spark.sql.warehouse.dir", "file:///E:/sumitK/Hadoop/warehouse") .appName("BikeRentalPrediction").getOrCreate(); Logger rootLogger = LogManager.getRootLogger(); rootLogger.setLevel(Level.WARN); //We use the sqlContext.read method to read the data and set a few options: // 'format': specifies the Spark CSV data source // 'header': set to true to indicate that the first line of the CSV data file is a header // The file is called 'hour.csv'. Dataset<Row> ds=sparkSession.read() .format("org.apache.spark.sql.execution.datasources.csv.CSVFileFormat") .option("header", "true") .load("E:\\sumitK\\Hadoop\\Bike-Sharing-Dataset\\hour.csv"); ds.cache(); ds.select("season").show();; ds.show(); System.out.println("Our dataset has rows :: "+ ds.count()); Dataset<Row> df = ds.drop("instant").drop("dteday").drop("casual").drop("registered"); df.printSchema(); //col("...") is preferable to df.col("...") Dataset<Row> dformatted = df.select(col("season").cast(DataTypes.IntegerType), col("yr").cast(DataTypes.IntegerType), col("mnth").cast(DataTypes.IntegerType), col("hr").cast(DataTypes.IntegerType), col("holiday").cast(DataTypes.IntegerType), col("weekday").cast(DataTypes.IntegerType), col("workingday").cast(DataTypes.IntegerType), col("weathersit").cast(DataTypes.IntegerType), col("temp").cast(DataTypes.IntegerType), col("atemp").cast(DataTypes.IntegerType), col("hum").cast(DataTypes.IntegerType), col("windspeed").cast(DataTypes.IntegerType), col("cnt").cast(DataTypes.IntegerType)); dformatted.printSchema(); Dataset<Row>[] data= dformatted.randomSplit(new double[]{0.7,0.3}); System.out.println("We have training examples count :: "+ data[0].count()+" and test examples count ::"+data[1].count()); /// //removing 'cnt' cloumn and then forming str array String[] featuresCols = dformatted.drop("cnt").columns(); for(String str:featuresCols){ System.out.println(str+" :: "); } //This concatenates all feature columns into a single feature vector in a new column "rawFeatures". VectorAssembler vectorAssembler = new VectorAssembler().setInputCols(featuresCols).setOutputCol("rawFeatures"); //This identifies categorical features and indexes them. VectorIndexer vectorIndexer= new VectorIndexer().setInputCol("rawFeatures").setOutputCol("features").setMaxCategories(4); //Takes the "features" column and learns to predict "cnt" GBTRegressor gbt = new GBTRegressor().setLabelCol("cnt"); // Define a grid of hyperparameters to test: // - maxDepth: max depth of each decision tree in the GBT ensemble // - maxIter: iterations, i.e., number of trees in each GBT ensemble // In this example notebook, we keep these values small. In practice, to get the highest accuracy, you would likely want to try deeper trees (10 or higher) and more trees in the ensemble (>100). ParamMap[] paramGrid = new ParamGridBuilder().addGrid(gbt.maxDepth(),new int[]{2, 5}).addGrid(gbt.maxIter(),new int[] {10, 100}).build(); // We define an evaluation metric. This tells CrossValidator how well we are doing by comparing the true labels with predictions. RegressionEvaluator evaluator = new RegressionEvaluator().setMetricName("rmse").setLabelCol(gbt.getLabelCol()).setPredictionCol(gbt.getPredictionCol()); // # Declare the CrossValidator, which runs model tuning for us. CrossValidator cv = new CrossValidator().setEstimator(gbt).setEvaluator(evaluator).setEstimatorParamMaps(paramGrid); Pipeline pipeline = new Pipeline().setStages(new PipelineStage[]{vectorAssembler,vectorIndexer,cv}); PipelineModel pipelineModel=pipeline.fit(data[0]); Dataset<Row> predictions = pipelineModel.transform(data[1]); predictions.show(); //predictions.select("cnt", "prediction", *featuresCols); }
Example #6
Source File: JavaGradientBoostedTreeRegressorExample.java From SparkDemo with MIT License | 4 votes |
public static void main(String[] args) { SparkSession spark = SparkSession .builder() .appName("JavaGradientBoostedTreeRegressorExample") .getOrCreate(); // $example on$ // Load and parse the data file, converting it to a DataFrame. Dataset<Row> data = spark.read().format("libsvm").load("data/mllib/sample_libsvm_data.txt"); // Automatically identify categorical features, and index them. // Set maxCategories so features with > 4 distinct values are treated as continuous. VectorIndexerModel featureIndexer = new VectorIndexer() .setInputCol("features") .setOutputCol("indexedFeatures") .setMaxCategories(4) .fit(data); // Split the data into training and test sets (30% held out for testing). Dataset<Row>[] splits = data.randomSplit(new double[] {0.7, 0.3}); Dataset<Row> trainingData = splits[0]; Dataset<Row> testData = splits[1]; // Train a GBT model. GBTRegressor gbt = new GBTRegressor() .setLabelCol("label") .setFeaturesCol("indexedFeatures") .setMaxIter(10); // Chain indexer and GBT in a Pipeline. Pipeline pipeline = new Pipeline().setStages(new PipelineStage[] {featureIndexer, gbt}); // Train model. This also runs the indexer. PipelineModel model = pipeline.fit(trainingData); // Make predictions. Dataset<Row> predictions = model.transform(testData); // Select example rows to display. predictions.select("prediction", "label", "features").show(5); // Select (prediction, true label) and compute test error. RegressionEvaluator evaluator = new RegressionEvaluator() .setLabelCol("label") .setPredictionCol("prediction") .setMetricName("rmse"); double rmse = evaluator.evaluate(predictions); System.out.println("Root Mean Squared Error (RMSE) on test data = " + rmse); GBTRegressionModel gbtModel = (GBTRegressionModel)(model.stages()[1]); System.out.println("Learned regression GBT model:\n" + gbtModel.toDebugString()); // $example off$ spark.stop(); }
Example #7
Source File: JavaRandomForestRegressorExample.java From SparkDemo with MIT License | 4 votes |
public static void main(String[] args) { SparkSession spark = SparkSession .builder() .appName("JavaRandomForestRegressorExample") .getOrCreate(); // $example on$ // Load and parse the data file, converting it to a DataFrame. Dataset<Row> data = spark.read().format("libsvm").load("data/mllib/sample_libsvm_data.txt"); // Automatically identify categorical features, and index them. // Set maxCategories so features with > 4 distinct values are treated as continuous. VectorIndexerModel featureIndexer = new VectorIndexer() .setInputCol("features") .setOutputCol("indexedFeatures") .setMaxCategories(4) .fit(data); // Split the data into training and test sets (30% held out for testing) Dataset<Row>[] splits = data.randomSplit(new double[] {0.7, 0.3}); Dataset<Row> trainingData = splits[0]; Dataset<Row> testData = splits[1]; // Train a RandomForest model. RandomForestRegressor rf = new RandomForestRegressor() .setLabelCol("label") .setFeaturesCol("indexedFeatures"); // Chain indexer and forest in a Pipeline Pipeline pipeline = new Pipeline() .setStages(new PipelineStage[] {featureIndexer, rf}); // Train model. This also runs the indexer. PipelineModel model = pipeline.fit(trainingData); // Make predictions. Dataset<Row> predictions = model.transform(testData); // Select example rows to display. predictions.select("prediction", "label", "features").show(5); // Select (prediction, true label) and compute test error RegressionEvaluator evaluator = new RegressionEvaluator() .setLabelCol("label") .setPredictionCol("prediction") .setMetricName("rmse"); double rmse = evaluator.evaluate(predictions); System.out.println("Root Mean Squared Error (RMSE) on test data = " + rmse); RandomForestRegressionModel rfModel = (RandomForestRegressionModel)(model.stages()[1]); System.out.println("Learned regression forest model:\n" + rfModel.toDebugString()); // $example off$ spark.stop(); }
Example #8
Source File: JavaDecisionTreeRegressionExample.java From SparkDemo with MIT License | 4 votes |
public static void main(String[] args) { SparkSession spark = SparkSession .builder() .appName("JavaDecisionTreeRegressionExample") .getOrCreate(); // $example on$ // Load the data stored in LIBSVM format as a DataFrame. Dataset<Row> data = spark.read().format("libsvm") .load("data/mllib/sample_libsvm_data.txt"); // Automatically identify categorical features, and index them. // Set maxCategories so features with > 4 distinct values are treated as continuous. VectorIndexerModel featureIndexer = new VectorIndexer() .setInputCol("features") .setOutputCol("indexedFeatures") .setMaxCategories(4) .fit(data); // Split the data into training and test sets (30% held out for testing). Dataset<Row>[] splits = data.randomSplit(new double[]{0.7, 0.3}); Dataset<Row> trainingData = splits[0]; Dataset<Row> testData = splits[1]; // Train a DecisionTree model. DecisionTreeRegressor dt = new DecisionTreeRegressor() .setFeaturesCol("indexedFeatures"); // Chain indexer and tree in a Pipeline. Pipeline pipeline = new Pipeline() .setStages(new PipelineStage[]{featureIndexer, dt}); // Train model. This also runs the indexer. PipelineModel model = pipeline.fit(trainingData); // Make predictions. Dataset<Row> predictions = model.transform(testData); // Select example rows to display. predictions.select("label", "features").show(5); // Select (prediction, true label) and compute test error. RegressionEvaluator evaluator = new RegressionEvaluator() .setLabelCol("label") .setPredictionCol("prediction") .setMetricName("rmse"); double rmse = evaluator.evaluate(predictions); System.out.println("Root Mean Squared Error (RMSE) on test data = " + rmse); DecisionTreeRegressionModel treeModel = (DecisionTreeRegressionModel) (model.stages()[1]); System.out.println("Learned regression tree model:\n" + treeModel.toDebugString()); // $example off$ spark.stop(); }