org.apache.cassandra.utils.EstimatedHistogram Java Examples
The following examples show how to use
org.apache.cassandra.utils.EstimatedHistogram.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: CollectorFunctions.java From cassandra-exporter with Apache License 2.0 | 6 votes |
/** * Collect a {@link JmxGaugeMBean} with a Cassandra {@link EstimatedHistogram} value as a Prometheus summary */ public static CollectorFunction<JmxGaugeMBean> histogramGaugeAsSummary(final FloatFloatFunction bucketScaleFunction) { return group -> { final Stream<SummaryMetricFamily.Summary> summaryStream = group.labeledObjects().entrySet().stream() .map(e -> new Object() { final Labels labels = e.getKey(); final JmxGaugeMBean gauge = e.getValue(); }) .map(e -> { final long[] bucketData = (long[]) e.gauge.getValue(); if (bucketData.length == 0) { return new SummaryMetricFamily.Summary(e.labels, Float.NaN, Float.NaN, Interval.asIntervals(Interval.Quantile.STANDARD_PERCENTILES, q -> Float.NaN)); } final EstimatedHistogram histogram = new EstimatedHistogram(bucketData); final Iterable<Interval> quantiles = Interval.asIntervals(Interval.Quantile.STANDARD_PERCENTILES, q -> bucketScaleFunction.apply((float) histogram.percentile(q.value))); return new SummaryMetricFamily.Summary(e.labels, Float.NaN, histogram.count(), quantiles); }); return Stream.of(new SummaryMetricFamily(group.name(), group.help(), summaryStream)); }; }
Example #2
Source File: TerminalUtils.java From sstable-tools with Apache License 2.0 | 6 votes |
public TermHistogram(EstimatedHistogram histogram, String title, Function<Long, String> offsetName, Function<Long, String> countName) { this(new TreeMap<Number, long[]>() { { long[] counts = histogram.getBuckets(false); long[] offsets = histogram.getBucketOffsets(); for (int i = 0; i < counts.length; i++) { long e = counts[i]; if (e > 0) { put(offsets[i], new long[] {e}); } } } }, title, offsetName, countName); }
Example #3
Source File: NodeProbe.java From stratio-cassandra with Apache License 2.0 | 5 votes |
public double[] metricPercentilesAsArray(long[] counts) { double[] result = new double[7]; if (isEmpty(counts)) { Arrays.fill(result, Double.NaN); return result; } double[] offsetPercentiles = new double[] { 0.5, 0.75, 0.95, 0.98, 0.99 }; long[] offsets = new EstimatedHistogram(counts.length).getBucketOffsets(); EstimatedHistogram metric = new EstimatedHistogram(offsets, counts); if (metric.isOverflowed()) { System.err.println(String.format("EstimatedHistogram overflowed larger than %s, unable to calculate percentiles", offsets[offsets.length - 1])); for (int i = 0; i < result.length; i++) result[i] = Double.NaN; } else { for (int i = 0; i < offsetPercentiles.length; i++) result[i] = metric.percentile(offsetPercentiles[i]); } result[5] = metric.min(); result[6] = metric.max(); return result; }
Example #4
Source File: StatsMetadata.java From stratio-cassandra with Apache License 2.0 | 5 votes |
public StatsMetadata(EstimatedHistogram estimatedRowSize, EstimatedHistogram estimatedColumnCount, ReplayPosition replayPosition, long minTimestamp, long maxTimestamp, int maxLocalDeletionTime, double compressionRatio, StreamingHistogram estimatedTombstoneDropTime, int sstableLevel, List<ByteBuffer> minColumnNames, List<ByteBuffer> maxColumnNames, boolean hasLegacyCounterShards, long repairedAt) { this.estimatedRowSize = estimatedRowSize; this.estimatedColumnCount = estimatedColumnCount; this.replayPosition = replayPosition; this.minTimestamp = minTimestamp; this.maxTimestamp = maxTimestamp; this.maxLocalDeletionTime = maxLocalDeletionTime; this.compressionRatio = compressionRatio; this.estimatedTombstoneDropTime = estimatedTombstoneDropTime; this.sstableLevel = sstableLevel; this.minColumnNames = minColumnNames; this.maxColumnNames = maxColumnNames; this.hasLegacyCounterShards = hasLegacyCounterShards; this.repairedAt = repairedAt; }
Example #5
Source File: SSTableReader.java From stratio-cassandra with Apache License 2.0 | 4 votes |
public EstimatedHistogram getEstimatedRowSize() { return sstableMetadata.estimatedRowSize; }
Example #6
Source File: SSTableReader.java From stratio-cassandra with Apache License 2.0 | 4 votes |
public EstimatedHistogram getEstimatedColumnCount() { return sstableMetadata.estimatedColumnCount; }
Example #7
Source File: LegacyMetadataSerializer.java From stratio-cassandra with Apache License 2.0 | 4 votes |
/** * Legacy serializer deserialize all components no matter what types are specified. */ @Override public Map<MetadataType, MetadataComponent> deserialize(Descriptor descriptor, EnumSet<MetadataType> types) throws IOException { Map<MetadataType, MetadataComponent> components = Maps.newHashMap(); File statsFile = new File(descriptor.filenameFor(Component.STATS)); if (!statsFile.exists() && types.contains(MetadataType.STATS)) { components.put(MetadataType.STATS, MetadataCollector.defaultStatsMetadata()); } else { try (DataInputStream in = new DataInputStream(new BufferedInputStream(new FileInputStream(statsFile)))) { EstimatedHistogram rowSizes = EstimatedHistogram.serializer.deserialize(in); EstimatedHistogram columnCounts = EstimatedHistogram.serializer.deserialize(in); ReplayPosition replayPosition = ReplayPosition.serializer.deserialize(in); long minTimestamp = in.readLong(); long maxTimestamp = in.readLong(); int maxLocalDeletionTime = in.readInt(); double bloomFilterFPChance = in.readDouble(); double compressionRatio = in.readDouble(); String partitioner = in.readUTF(); int nbAncestors = in.readInt(); Set<Integer> ancestors = new HashSet<>(nbAncestors); for (int i = 0; i < nbAncestors; i++) ancestors.add(in.readInt()); StreamingHistogram tombstoneHistogram = StreamingHistogram.serializer.deserialize(in); int sstableLevel = 0; if (in.available() > 0) sstableLevel = in.readInt(); int colCount = in.readInt(); List<ByteBuffer> minColumnNames = new ArrayList<>(colCount); for (int i = 0; i < colCount; i++) minColumnNames.add(ByteBufferUtil.readWithShortLength(in)); colCount = in.readInt(); List<ByteBuffer> maxColumnNames = new ArrayList<>(colCount); for (int i = 0; i < colCount; i++) maxColumnNames.add(ByteBufferUtil.readWithShortLength(in)); if (types.contains(MetadataType.VALIDATION)) components.put(MetadataType.VALIDATION, new ValidationMetadata(partitioner, bloomFilterFPChance)); if (types.contains(MetadataType.STATS)) components.put(MetadataType.STATS, new StatsMetadata(rowSizes, columnCounts, replayPosition, minTimestamp, maxTimestamp, maxLocalDeletionTime, compressionRatio, tombstoneHistogram, sstableLevel, minColumnNames, maxColumnNames, true, ActiveRepairService.UNREPAIRED_SSTABLE)); if (types.contains(MetadataType.COMPACTION)) components.put(MetadataType.COMPACTION, new CompactionMetadata(ancestors, null)); } } return components; }
Example #8
Source File: MetadataCollector.java From stratio-cassandra with Apache License 2.0 | 4 votes |
static EstimatedHistogram defaultColumnCountHistogram() { // EH of 114 can track a max value of 2395318855, i.e., > 2B columns return new EstimatedHistogram(114); }
Example #9
Source File: MetadataCollector.java From stratio-cassandra with Apache License 2.0 | 4 votes |
static EstimatedHistogram defaultRowSizeHistogram() { // EH of 150 can track a max value of 1697806495183, i.e., > 1.5PB return new EstimatedHistogram(150); }
Example #10
Source File: StatsMetadata.java From stratio-cassandra with Apache License 2.0 | 4 votes |
public StatsMetadata deserialize(Descriptor.Version version, DataInput in) throws IOException { EstimatedHistogram rowSizes = EstimatedHistogram.serializer.deserialize(in); EstimatedHistogram columnCounts = EstimatedHistogram.serializer.deserialize(in); ReplayPosition replayPosition = ReplayPosition.serializer.deserialize(in); long minTimestamp = in.readLong(); long maxTimestamp = in.readLong(); int maxLocalDeletionTime = in.readInt(); double compressionRatio = in.readDouble(); StreamingHistogram tombstoneHistogram = StreamingHistogram.serializer.deserialize(in); int sstableLevel = in.readInt(); long repairedAt = 0; if (version.hasRepairedAt) repairedAt = in.readLong(); int colCount = in.readInt(); List<ByteBuffer> minColumnNames = new ArrayList<>(colCount); for (int i = 0; i < colCount; i++) minColumnNames.add(ByteBufferUtil.readWithShortLength(in)); colCount = in.readInt(); List<ByteBuffer> maxColumnNames = new ArrayList<>(colCount); for (int i = 0; i < colCount; i++) maxColumnNames.add(ByteBufferUtil.readWithShortLength(in)); boolean hasLegacyCounterShards = true; if (version.tracksLegacyCounterShards) hasLegacyCounterShards = in.readBoolean(); return new StatsMetadata(rowSizes, columnCounts, replayPosition, minTimestamp, maxTimestamp, maxLocalDeletionTime, compressionRatio, tombstoneHistogram, sstableLevel, minColumnNames, maxColumnNames, hasLegacyCounterShards, repairedAt); }
Example #11
Source File: MetadataSerializerTest.java From stratio-cassandra with Apache License 2.0 | 4 votes |
@Test public void testSerialization() throws IOException { EstimatedHistogram rowSizes = new EstimatedHistogram(new long[] { 1L, 2L }, new long[] { 3L, 4L, 5L }); EstimatedHistogram columnCounts = new EstimatedHistogram(new long[] { 6L, 7L }, new long[] { 8L, 9L, 10L }); ReplayPosition rp = new ReplayPosition(11L, 12); long minTimestamp = 2162517136L; long maxTimestamp = 4162517136L; MetadataCollector collector = new MetadataCollector(new SimpleDenseCellNameType(BytesType.instance)) .estimatedRowSize(rowSizes) .estimatedColumnCount(columnCounts) .replayPosition(rp); collector.updateMinTimestamp(minTimestamp); collector.updateMaxTimestamp(maxTimestamp); Set<Integer> ancestors = Sets.newHashSet(1, 2, 3, 4); for (int i : ancestors) collector.addAncestor(i); String partitioner = RandomPartitioner.class.getCanonicalName(); double bfFpChance = 0.1; Map<MetadataType, MetadataComponent> originalMetadata = collector.finalizeMetadata(partitioner, bfFpChance, 0); MetadataSerializer serializer = new MetadataSerializer(); // Serialize to tmp file File statsFile = File.createTempFile(Component.STATS.name, null); try (DataOutputStreamAndChannel out = new DataOutputStreamAndChannel(new FileOutputStream(statsFile))) { serializer.serialize(originalMetadata, out); } Descriptor desc = new Descriptor(Descriptor.Version.CURRENT, statsFile.getParentFile(), "", "", 0, Descriptor.Type.FINAL); try (RandomAccessReader in = RandomAccessReader.open(statsFile)) { Map<MetadataType, MetadataComponent> deserialized = serializer.deserialize(desc, in, EnumSet.allOf(MetadataType.class)); for (MetadataType type : MetadataType.values()) { assertEquals(originalMetadata.get(type), deserialized.get(type)); } } }
Example #12
Source File: ColumnFamilyMetrics.java From stratio-cassandra with Apache License 2.0 | votes |
public EstimatedHistogram getHistogram(SSTableReader reader);