org.apache.spark.mllib.stat.Statistics Java Examples
The following examples show how to use
org.apache.spark.mllib.stat.Statistics.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: JavaHypothesisTestingKolmogorovSmirnovTestExample.java From SparkDemo with MIT License | 6 votes |
public static void main(String[] args) { SparkConf conf = new SparkConf().setAppName("JavaHypothesisTestingKolmogorovSmirnovTestExample"); JavaSparkContext jsc = new JavaSparkContext(conf); // $example on$ JavaDoubleRDD data = jsc.parallelizeDoubles(Arrays.asList(0.1, 0.15, 0.2, 0.3, 0.25)); KolmogorovSmirnovTestResult testResult = Statistics.kolmogorovSmirnovTest(data, "norm", 0.0, 1.0); // summary of the test including the p-value, test statistic, and null hypothesis // if our p-value indicates significance, we can reject the null hypothesis System.out.println(testResult); // $example off$ jsc.stop(); }
Example #2
Source File: JavaSummaryStatisticsExample.java From SparkDemo with MIT License | 6 votes |
public static void main(String[] args) { SparkConf conf = new SparkConf().setAppName("JavaSummaryStatisticsExample"); JavaSparkContext jsc = new JavaSparkContext(conf); // $example on$ JavaRDD<Vector> mat = jsc.parallelize( Arrays.asList( Vectors.dense(1.0, 10.0, 100.0), Vectors.dense(2.0, 20.0, 200.0), Vectors.dense(3.0, 30.0, 300.0) ) ); // an RDD of Vectors // Compute column summary statistics. MultivariateStatisticalSummary summary = Statistics.colStats(mat.rdd()); System.out.println(summary.mean()); // a dense vector containing the mean value for each column System.out.println(summary.variance()); // column-wise variance System.out.println(summary.numNonzeros()); // number of nonzeros in each column // $example off$ jsc.stop(); }
Example #3
Source File: JavaCorrelationsExample.java From SparkDemo with MIT License | 5 votes |
public static void main(String[] args) { SparkConf conf = new SparkConf().setAppName("JavaCorrelationsExample"); JavaSparkContext jsc = new JavaSparkContext(conf); // $example on$ JavaDoubleRDD seriesX = jsc.parallelizeDoubles( Arrays.asList(1.0, 2.0, 3.0, 3.0, 5.0)); // a series // must have the same number of partitions and cardinality as seriesX JavaDoubleRDD seriesY = jsc.parallelizeDoubles( Arrays.asList(11.0, 22.0, 33.0, 33.0, 555.0)); // compute the correlation using Pearson's method. Enter "spearman" for Spearman's method. // If a method is not specified, Pearson's method will be used by default. Double correlation = Statistics.corr(seriesX.srdd(), seriesY.srdd(), "pearson"); System.out.println("Correlation is: " + correlation); // note that each Vector is a row and not a column JavaRDD<Vector> data = jsc.parallelize( Arrays.asList( Vectors.dense(1.0, 10.0, 100.0), Vectors.dense(2.0, 20.0, 200.0), Vectors.dense(5.0, 33.0, 366.0) ) ); // calculate the correlation matrix using Pearson's method. // Use "spearman" for Spearman's method. // If a method is not specified, Pearson's method will be used by default. Matrix correlMatrix = Statistics.corr(data.rdd(), "pearson"); System.out.println(correlMatrix.toString()); // $example off$ jsc.stop(); }
Example #4
Source File: JavaHypothesisTestingExample.java From SparkDemo with MIT License | 4 votes |
public static void main(String[] args) { SparkConf conf = new SparkConf().setAppName("JavaHypothesisTestingExample"); JavaSparkContext jsc = new JavaSparkContext(conf); // $example on$ // a vector composed of the frequencies of events Vector vec = Vectors.dense(0.1, 0.15, 0.2, 0.3, 0.25); // compute the goodness of fit. If a second vector to test against is not supplied // as a parameter, the test runs against a uniform distribution. ChiSqTestResult goodnessOfFitTestResult = Statistics.chiSqTest(vec); // summary of the test including the p-value, degrees of freedom, test statistic, // the method used, and the null hypothesis. System.out.println(goodnessOfFitTestResult + "\n"); // Create a contingency matrix ((1.0, 2.0), (3.0, 4.0), (5.0, 6.0)) Matrix mat = Matrices.dense(3, 2, new double[]{1.0, 3.0, 5.0, 2.0, 4.0, 6.0}); // conduct Pearson's independence test on the input contingency matrix ChiSqTestResult independenceTestResult = Statistics.chiSqTest(mat); // summary of the test including the p-value, degrees of freedom... System.out.println(independenceTestResult + "\n"); // an RDD of labeled points JavaRDD<LabeledPoint> obs = jsc.parallelize( Arrays.asList( new LabeledPoint(1.0, Vectors.dense(1.0, 0.0, 3.0)), new LabeledPoint(1.0, Vectors.dense(1.0, 2.0, 0.0)), new LabeledPoint(-1.0, Vectors.dense(-1.0, 0.0, -0.5)) ) ); // The contingency table is constructed from the raw (feature, label) pairs and used to conduct // the independence test. Returns an array containing the ChiSquaredTestResult for every feature // against the label. ChiSqTestResult[] featureTestResults = Statistics.chiSqTest(obs.rdd()); int i = 1; for (ChiSqTestResult result : featureTestResults) { System.out.println("Column " + i + ":"); System.out.println(result + "\n"); // summary of the test i++; } // $example off$ jsc.stop(); }
Example #5
Source File: SparkStatistics.java From spliceengine with GNU Affero General Public License v3.0 | 4 votes |
private static MultivariateStatisticalSummary getColumnStatisticsSummary(JavaRDD<ExecRow> resultSetRDD, int[] fieldsToConvert) throws StandardException{ JavaRDD<Vector> vectorJavaRDD = SparkMLibUtils.locatedRowRDDToVectorRDD(resultSetRDD, fieldsToConvert); return Statistics.colStats(vectorJavaRDD.rdd()); }