Java Code Examples for com.tdunning.math.stats.TDigest#add()
The following examples show how to use
com.tdunning.math.stats.TDigest#add() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TDigestTest.java From kylin-on-parquet-v2 with Apache License 2.0 | 6 votes |
@Test public void testBasic() { int times = 1; int compression = 100; for (int t = 0; t < times; t++) { TDigest tDigest = TDigest.createAvlTreeDigest(compression); Random random = new Random(); int dataSize = 10000; List<Double> dataset = Lists.newArrayListWithCapacity(dataSize); for (int i = 0; i < dataSize; i++) { double d = random.nextDouble(); tDigest.add(d); dataset.add(d); } Collections.sort(dataset); double actualResult = tDigest.quantile(0.5); double expectedResult = MathUtil.findMedianInSortedList(dataset); assertEquals(expectedResult, actualResult, 0.01); } }
Example 2
Source File: PercentileCounterTest.java From kylin with Apache License 2.0 | 6 votes |
@Test public void testTDigest() { double compression = 100; double quantile = 0.5; PercentileCounter counter = new PercentileCounter(compression, quantile); TDigest tDigest = TDigest.createAvlTreeDigest(compression); Random random = new Random(); int dataSize = 10000; List<Double> dataset = Lists.newArrayListWithCapacity(dataSize); for (int i = 0; i < dataSize; i++) { double d = random.nextDouble(); counter.add(d); tDigest.add(d); } double actualResult = counter.getResultEstimate(); Collections.sort(dataset); double expectedResult = tDigest.quantile(quantile); assertEquals(expectedResult, actualResult, 0); }
Example 3
Source File: TDigestTest.java From kylin with Apache License 2.0 | 6 votes |
@Test public void testBasic() { int times = 1; int compression = 100; for (int t = 0; t < times; t++) { TDigest tDigest = TDigest.createAvlTreeDigest(compression); Random random = new Random(); int dataSize = 10000; List<Double> dataset = Lists.newArrayListWithCapacity(dataSize); for (int i = 0; i < dataSize; i++) { double d = random.nextDouble(); tDigest.add(d); dataset.add(d); } Collections.sort(dataset); double actualResult = tDigest.quantile(0.5); double expectedResult = MathUtil.findMedianInSortedList(dataset); assertEquals(expectedResult, actualResult, 0.01); } }
Example 4
Source File: ComparisonTest.java From t-digest with Apache License 2.0 | 6 votes |
private void compareQD(PrintWriter out, AbstractContinousDistribution gen, String tag, long scale) { for (double compression : new double[]{10, 20, 50, 100, 200, 500, 1000, 2000}) { QDigest qd = new QDigest(compression); TDigest dist = new MergingDigest(compression); double[] data = new double[100000]; for (int i = 0; i < 100000; i++) { double x = gen.nextDouble(); dist.add(x); qd.offer((long) (x * scale)); data[i] = x; } dist.compress(); Arrays.sort(data); for (double q : new double[]{1e-5, 1e-4, 0.001, 0.01, 0.1, 0.5, 0.9, 0.99, 0.999, 0.9999, 0.99999}) { double x1 = dist.quantile(q); double x2 = (double) qd.getQuantile(q) / scale; double e1 = Dist.cdf(x1, data) - q; double e2 = Dist.cdf(x2, data) - q; out.printf("%s,%.0f,%.8f,%.10g,%.10g,%d,%d\n", tag, compression, q, e1, e2, dist.smallByteSize(), QDigest.serialize(qd).length); } } }
Example 5
Source File: ObjectSerDeUtilsTest.java From incubator-pinot with Apache License 2.0 | 6 votes |
@Test public void testTDigest() { for (int i = 0; i < NUM_ITERATIONS; i++) { TDigest expected = TDigest.createMergingDigest(PercentileTDigestAggregationFunction.DEFAULT_TDIGEST_COMPRESSION); int size = RANDOM.nextInt(100) + 1; for (int j = 0; j < size; j++) { expected.add(RANDOM.nextDouble()); } byte[] bytes = ObjectSerDeUtils.serialize(expected); TDigest actual = ObjectSerDeUtils.deserialize(bytes, ObjectSerDeUtils.ObjectType.TDigest); for (int j = 0; j <= 100; j++) { assertEquals(actual.quantile(j / 100.0), expected.quantile(j / 100.0), 1e-5); } } }
Example 6
Source File: PercentileTDigestAggregationFunction.java From incubator-pinot with Apache License 2.0 | 6 votes |
@Override public void aggregateGroupBySV(int length, int[] groupKeyArray, GroupByResultHolder groupByResultHolder, Map<ExpressionContext, BlockValSet> blockValSetMap) { BlockValSet blockValSet = blockValSetMap.get(_expression); if (blockValSet.getValueType() != DataType.BYTES) { double[] doubleValues = blockValSet.getDoubleValuesSV(); for (int i = 0; i < length; i++) { getDefaultTDigest(groupByResultHolder, groupKeyArray[i]).add(doubleValues[i]); } } else { // Serialized TDigest byte[][] bytesValues = blockValSet.getBytesValuesSV(); for (int i = 0; i < length; i++) { TDigest value = ObjectSerDeUtils.TDIGEST_SER_DE.deserialize(bytesValues[i]); int groupKey = groupKeyArray[i]; TDigest tDigest = groupByResultHolder.getResult(groupKey); if (tDigest != null) { tDigest.add(value); } else { groupByResultHolder.setValueForKey(groupKey, value); } } } }
Example 7
Source File: PercentileCounterTest.java From kylin-on-parquet-v2 with Apache License 2.0 | 6 votes |
@Test public void testTDigest() { double compression = 100; double quantile = 0.5; PercentileCounter counter = new PercentileCounter(compression, quantile); TDigest tDigest = TDigest.createAvlTreeDigest(compression); Random random = new Random(); int dataSize = 10000; List<Double> dataset = Lists.newArrayListWithCapacity(dataSize); for (int i = 0; i < dataSize; i++) { double d = random.nextDouble(); counter.add(d); tDigest.add(d); } double actualResult = counter.getResultEstimate(); Collections.sort(dataset); double expectedResult = tDigest.quantile(quantile); assertEquals(expectedResult, actualResult, 0); }
Example 8
Source File: PercentileTDigestValueAggregator.java From incubator-pinot with Apache License 2.0 | 5 votes |
@Override public TDigest applyRawValue(TDigest value, Object rawValue) { if (rawValue instanceof byte[]) { value.add(deserializeAggregatedValue((byte[]) rawValue)); } else { value.add(((Number) rawValue).doubleValue()); } _maxByteSize = Math.max(_maxByteSize, value.byteSize()); return value; }
Example 9
Source File: ComparisonTest.java From t-digest with Apache License 2.0 | 5 votes |
private void compareSQ(PrintWriter out, AbstractContinousDistribution gen, String tag) { double[] quantiles = {0.001, 0.01, 0.1, 0.2, 0.3, 0.5, 0.7, 0.8, 0.9, 0.99, 0.999}; for (double compression : new double[]{10, 20, 50, 100, 200, 500, 1000, 2000}) { QuantileEstimator sq = new QuantileEstimator(1001); TDigest dist = new MergingDigest(compression); double[] data = new double[100000]; for (int i = 0; i < 100000; i++) { double x = gen.nextDouble(); dist.add(x); sq.add(x); data[i] = x; } dist.compress(); Arrays.sort(data); List<Double> qz = sq.getQuantiles(); for (double q : quantiles) { double x1 = dist.quantile(q); double x2 = qz.get((int) (q * 1000 + 0.5)); double e1 = Dist.cdf(x1, data) - q; double e2 = Dist.cdf(x2, data) - q; out.printf("%s,%.0f,%.8f,%.10g,%.10g,%d,%d\n", tag, compression, q, e1, e2, dist.smallByteSize(), sq.serializedSize()); } } }
Example 10
Source File: SegmentGenerationWithBytesTypeTest.java From incubator-pinot with Apache License 2.0 | 5 votes |
/** * Build Avro file containing serialized TDigest bytes. * * @param schema Schema of data (one fixed and one variable column) * @param _fixedExpected Serialized bytes of fixed length column are populated here * @param _varExpected Serialized bytes of variable length column are populated here * @throws IOException */ private void buildAvro(Schema schema, List<byte[]> _fixedExpected, List<byte[]> _varExpected) throws IOException { org.apache.avro.Schema avroSchema = AvroUtils.getAvroSchemaFromPinotSchema(schema); try (DataFileWriter<GenericData.Record> recordWriter = new DataFileWriter<>(new GenericDatumWriter<>(avroSchema))) { if (!new File(AVRO_DIR_NAME).mkdir()) { throw new RuntimeException("Unable to create test directory: " + AVRO_DIR_NAME); } recordWriter.create(avroSchema, new File(AVRO_DIR_NAME, AVRO_NAME)); for (int i = 0; i < NUM_ROWS; i++) { GenericData.Record record = new GenericData.Record(avroSchema); TDigest tDigest = TDigest.createMergingDigest(PercentileTDigestAggregationFunction.DEFAULT_TDIGEST_COMPRESSION); tDigest.add(_random.nextDouble()); ByteBuffer buffer = ByteBuffer.allocate(tDigest.byteSize()); tDigest.asBytes(buffer); _fixedExpected.add(buffer.array()); buffer.flip(); record.put(FIXED_BYTES_UNSORTED_COLUMN, buffer); if (i % 2 == 0) { tDigest.add(_random.nextDouble()); } buffer = ByteBuffer.allocate(tDigest.byteSize()); tDigest.asBytes(buffer); _varExpected.add(buffer.array()); buffer.flip(); record.put(VARIABLE_BYTES_COLUMN, buffer); recordWriter.append(record); } } }
Example 11
Source File: PreAggregatedPercentileTDigestStarTreeV2Test.java From incubator-pinot with Apache License 2.0 | 5 votes |
@Override Object getRandomRawValue(Random random) { TDigest tDigest = TDigest.createMergingDigest(COMPRESSION); tDigest.add(random.nextLong()); tDigest.add(random.nextLong()); return ObjectSerDeUtils.TDIGEST_SER_DE.serialize(tDigest); }
Example 12
Source File: PercentileTDigestValueAggregator.java From incubator-pinot with Apache License 2.0 | 5 votes |
@Override public TDigest getInitialAggregatedValue(Object rawValue) { TDigest initialValue; if (rawValue instanceof byte[]) { byte[] bytes = (byte[]) rawValue; initialValue = deserializeAggregatedValue(bytes); _maxByteSize = Math.max(_maxByteSize, bytes.length); } else { initialValue = TDigest.createMergingDigest(PercentileTDigestAggregationFunction.DEFAULT_TDIGEST_COMPRESSION); initialValue.add(((Number) rawValue).doubleValue()); _maxByteSize = Math.max(_maxByteSize, initialValue.byteSize()); } return initialValue; }
Example 13
Source File: PercentileTDigestAggregationFunction.java From incubator-pinot with Apache License 2.0 | 5 votes |
@Override public TDigest merge(TDigest intermediateResult1, TDigest intermediateResult2) { if (intermediateResult1.size() == 0L) { return intermediateResult2; } if (intermediateResult2.size() == 0L) { return intermediateResult1; } intermediateResult1.add(intermediateResult2); return intermediateResult1; }
Example 14
Source File: PercentileTDigestMVAggregationFunction.java From incubator-pinot with Apache License 2.0 | 5 votes |
@Override public void aggregateGroupBySV(int length, int[] groupKeyArray, GroupByResultHolder groupByResultHolder, Map<ExpressionContext, BlockValSet> blockValSetMap) { double[][] valuesArray = blockValSetMap.get(_expression).getDoubleValuesMV(); for (int i = 0; i < length; i++) { TDigest tDigest = getDefaultTDigest(groupByResultHolder, groupKeyArray[i]); for (double value : valuesArray[i]) { tDigest.add(value); } } }
Example 15
Source File: PercentileTDigestMVAggregationFunction.java From incubator-pinot with Apache License 2.0 | 5 votes |
@Override public void aggregate(int length, AggregationResultHolder aggregationResultHolder, Map<ExpressionContext, BlockValSet> blockValSetMap) { double[][] valuesArray = blockValSetMap.get(_expression).getDoubleValuesMV(); TDigest tDigest = getDefaultTDigest(aggregationResultHolder); for (int i = 0; i < length; i++) { for (double value : valuesArray[i]) { tDigest.add(value); } } }
Example 16
Source File: PercentileTDigestValueAggregator.java From incubator-pinot with Apache License 2.0 | 4 votes |
@Override public TDigest applyAggregatedValue(TDigest value, TDigest aggregatedValue) { value.add(aggregatedValue); _maxByteSize = Math.max(_maxByteSize, value.byteSize()); return value; }
Example 17
Source File: PercentileTDigestMVQueriesTest.java From incubator-pinot with Apache License 2.0 | 4 votes |
@Override protected void buildSegment() throws Exception { List<GenericRow> rows = new ArrayList<>(NUM_ROWS); for (int i = 0; i < NUM_ROWS; i++) { HashMap<String, Object> valueMap = new HashMap<>(); int numMultiValues = RANDOM.nextInt(MAX_NUM_MULTI_VALUES) + 1; Double[] values = new Double[numMultiValues]; TDigest tDigest = TDigest.createMergingDigest(PercentileTDigestAggregationFunction.DEFAULT_TDIGEST_COMPRESSION); for (int j = 0; j < numMultiValues; j++) { double value = RANDOM.nextDouble() * VALUE_RANGE; values[j] = value; tDigest.add(value); } valueMap.put(DOUBLE_COLUMN, values); ByteBuffer byteBuffer = ByteBuffer.allocate(tDigest.byteSize()); tDigest.asBytes(byteBuffer); valueMap.put(TDIGEST_COLUMN, byteBuffer.array()); String group = GROUPS[RANDOM.nextInt(GROUPS.length)]; valueMap.put(GROUP_BY_COLUMN, group); GenericRow genericRow = new GenericRow(); genericRow.init(valueMap); rows.add(genericRow); } Schema schema = new Schema(); schema.addField(new DimensionFieldSpec(DOUBLE_COLUMN, FieldSpec.DataType.DOUBLE, false)); schema.addField(new MetricFieldSpec(TDIGEST_COLUMN, FieldSpec.DataType.BYTES)); schema.addField(new DimensionFieldSpec(GROUP_BY_COLUMN, FieldSpec.DataType.STRING, true)); TableConfig tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).build(); SegmentGeneratorConfig config = new SegmentGeneratorConfig(tableConfig, schema); config.setOutDir(INDEX_DIR.getPath()); config.setTableName(TABLE_NAME); config.setSegmentName(SEGMENT_NAME); config.setRawIndexCreationColumns(Collections.singletonList(TDIGEST_COLUMN)); SegmentIndexCreationDriverImpl driver = new SegmentIndexCreationDriverImpl(); try (RecordReader recordReader = new GenericRowRecordReader(rows)) { driver.init(config, recordReader); driver.build(); } }
Example 18
Source File: PercentileTDigestQueriesTest.java From incubator-pinot with Apache License 2.0 | 4 votes |
protected void buildSegment() throws Exception { List<GenericRow> rows = new ArrayList<>(NUM_ROWS); for (int i = 0; i < NUM_ROWS; i++) { HashMap<String, Object> valueMap = new HashMap<>(); double value = RANDOM.nextDouble() * VALUE_RANGE; valueMap.put(DOUBLE_COLUMN, value); TDigest tDigest = TDigest.createMergingDigest(PercentileTDigestAggregationFunction.DEFAULT_TDIGEST_COMPRESSION); tDigest.add(value); ByteBuffer byteBuffer = ByteBuffer.allocate(tDigest.byteSize()); tDigest.asBytes(byteBuffer); valueMap.put(TDIGEST_COLUMN, byteBuffer.array()); String group = GROUPS[RANDOM.nextInt(GROUPS.length)]; valueMap.put(GROUP_BY_COLUMN, group); GenericRow genericRow = new GenericRow(); genericRow.init(valueMap); rows.add(genericRow); } Schema schema = new Schema(); schema.addField(new MetricFieldSpec(DOUBLE_COLUMN, FieldSpec.DataType.DOUBLE)); schema.addField(new MetricFieldSpec(TDIGEST_COLUMN, FieldSpec.DataType.BYTES)); schema.addField(new DimensionFieldSpec(GROUP_BY_COLUMN, FieldSpec.DataType.STRING, true)); TableConfig tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).build(); SegmentGeneratorConfig config = new SegmentGeneratorConfig(tableConfig, schema); config.setOutDir(INDEX_DIR.getPath()); config.setTableName(TABLE_NAME); config.setSegmentName(SEGMENT_NAME); config.setRawIndexCreationColumns(Collections.singletonList(TDIGEST_COLUMN)); SegmentIndexCreationDriverImpl driver = new SegmentIndexCreationDriverImpl(); try (RecordReader recordReader = new GenericRowRecordReader(rows)) { driver.init(config, recordReader); driver.build(); } }
Example 19
Source File: RandomWalkSamplerTest.java From log-synth with Apache License 2.0 | 4 votes |
@Test public void testBasics() throws IOException { // this sampler has four variables // g1 is gamma distributed with alpha = 0.2, beta = 0.2 // v1 is unit normal // v2 is normal with mean = 0, sd = 2 // v3 is gamma-normal with dof=2, mean = 0. SchemaSampler s = new SchemaSampler(Resources.asCharSource(Resources.getResource("schema015.json"), Charsets.UTF_8).read()); TDigest tdG1 = new AVLTreeDigest(500); TDigest tdG2 = new AVLTreeDigest(500); TDigest td1 = new AVLTreeDigest(500); TDigest td2 = new AVLTreeDigest(500); TDigest td3 = new AVLTreeDigest(500); double x1 = 0; double x2 = 0; double x3 = 0; for (int i = 0; i < 1000000; i++) { JsonNode r = s.sample(); tdG1.add(r.get("g1").asDouble()); tdG2.add(r.get("g2").asDouble()); double step1 = r.get("v1").get("step").asDouble(); td1.add(step1); x1 += step1; assertEquals(x1, r.get("v1").get("value").asDouble(), 0); assertEquals(x1, r.get("v1-bare").asDouble(), 0); double step2 = r.get("v2").get("step").asDouble(); td2.add(step2); x2 += step2; assertEquals(x2, r.get("v2").get("value").asDouble(), 0); double step3 = r.get("v3").get("step").asDouble(); td3.add(step3); x3 += step3; assertEquals(x3, r.get("v3").get("value").asDouble(), 0); } // now compare against reference distributions to test accuracy of the observed step distributions NormalDistribution normalDistribution = new NormalDistribution(); GammaDistribution gd1 = new GammaDistribution(0.2, 5); GammaDistribution gd2 = new GammaDistribution(1, 1); TDistribution tDistribution = new TDistribution(2); for (double q : new double[]{0.001, 0.01, 0.1, 0.2, 0.5, 0.8, 0.9, 0.99, 0.99}) { double uG1 = gd1.cumulativeProbability(tdG1.quantile(q)); assertEquals(q, uG1, (1 - q) * q * 10e-2); double uG2 = gd2.cumulativeProbability(tdG2.quantile(q)); assertEquals(q, uG2, (1 - q) * q * 10e-2); double u1 = normalDistribution.cumulativeProbability(td1.quantile(q)); assertEquals(q, u1, (1 - q) * q * 10e-2); double u2 = normalDistribution.cumulativeProbability(td2.quantile(q) / 2); assertEquals(q, u2, (1 - q) * q * 10e-2); double u3 = tDistribution.cumulativeProbability(td3.quantile(q)); assertEquals(q, u3, (1 - q) * q * 10e-2); } }
Example 20
Source File: BinFill.java From t-digest with Apache License 2.0 | 4 votes |
public static void main(String[] args) throws FileNotFoundException { try (PrintWriter out = new PrintWriter("bin-fill.csv")) { out.printf("iteration,dist,algo,scale,q,x,k0,k1,dk,q0,q1,count,max0,max1\n"); // for all scale functions except the non-normalized ones for (ScaleFunction f : ScaleFunction.values()) { if (f.toString().contains("NO_NORM")) { continue; } System.out.printf("%s\n", f); // for all kinds of t-digests for (Util.Factory factory : Util.Factory.values()) { // for different distributions of values for (Util.Distribution distribution : Util.Distribution.values()) { AbstractDistribution gen = distribution.create(new Random()); // do multiple passes for (int i = 0; i < 10; i++) { TDigest dist = factory.create(); if (dist instanceof MergingDigest) { // can only set scale function on merging digest right now ... // ability for TreeDigest coming soon dist.setScaleFunction(f); } for (int j = 0; j < N; j++) { dist.add(gen.nextDouble()); } // now dump stats for the centroids double q0 = 0; double k0 = 0; for (Centroid c : dist.centroids()) { double q1 = q0 + (double) c.count() / N; double k1 = f.k(q1, dist.compression(), dist.size()); out.printf("%d,%s,%s,%s,%.7f,%.7f,%.7f,%.7f,%.7f,%.7f,%.7f,%d,%.1f,%.1f\n", i, distribution, factory, f, (q0 + q1) / 2, c.mean(), k0, k1, k1 - k0, q0, q1, c.count(), dist.size() * f.max(q0, dist.compression(), dist.size()), dist.size() * f.max(q1, dist.compression(), dist.size()) ); q0 = q1; k0 = k1; } } } } } } }