org.rocksdb.Statistics Java Examples
The following examples show how to use
org.rocksdb.Statistics.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ByteStoreManager.java From dremio-oss with Apache License 2.0 | 6 votes |
private void registerMetrics(DBOptions dbOptions) { // calling DBOptions.statisticsPtr() will create a Statistics object that will collect various stats from RocksDB and // will introduce a 5-10% overhead if(!COLLECT_METRICS) { return; } final Statistics statistics = new Statistics(); statistics.setStatsLevel(StatsLevel.ALL); dbOptions.setStatistics(statistics); // for now, let's add all ticker stats as gauge metrics for (TickerType tickerType : TickerType.values()) { if (tickerType == TickerType.TICKER_ENUM_MAX) { continue; } Metrics.newGauge(Metrics.join(METRICS_PREFIX, tickerType.name()), () -> statistics.getTickerCount(tickerType)); } // Note that Statistics also contains various histogram metrics, but those cannot be easily tracked through our metrics }
Example #2
Source File: TestTypedRDBTableStore.java From hadoop-ozone with Apache License 2.0 | 6 votes |
@Before public void setUp() throws Exception { options = new DBOptions(); options.setCreateIfMissing(true); options.setCreateMissingColumnFamilies(true); Statistics statistics = new Statistics(); statistics.setStatsLevel(StatsLevel.ALL); options = options.setStatistics(statistics); Set<TableConfig> configSet = new HashSet<>(); for (String name : families) { TableConfig newConfig = new TableConfig(name, new ColumnFamilyOptions()); configSet.add(newConfig); } rdbStore = new RDBStore(folder.newFolder(), options, configSet); codecRegistry = new CodecRegistry(); }
Example #3
Source File: TestRDBStore.java From hadoop-ozone with Apache License 2.0 | 6 votes |
@Before public void setUp() throws Exception { options = new DBOptions(); options.setCreateIfMissing(true); options.setCreateMissingColumnFamilies(true); Statistics statistics = new Statistics(); statistics.setStatsLevel(StatsLevel.ALL); options = options.setStatistics(statistics); configSet = new HashSet<>(); for(String name : families) { TableConfig newConfig = new TableConfig(name, new ColumnFamilyOptions()); configSet.add(newConfig); } rdbStore = new RDBStore(folder.newFolder(), options, configSet); }
Example #4
Source File: TestRDBTableStore.java From hadoop-ozone with Apache License 2.0 | 6 votes |
@Before public void setUp() throws Exception { options = new DBOptions(); options.setCreateIfMissing(true); options.setCreateMissingColumnFamilies(true); Statistics statistics = new Statistics(); statistics.setStatsLevel(StatsLevel.ALL); options = options.setStatistics(statistics); Set<TableConfig> configSet = new HashSet<>(); for(String name : families) { TableConfig newConfig = new TableConfig(name, new ColumnFamilyOptions()); configSet.add(newConfig); } rdbStore = new RDBStore(folder.newFolder(), options, configSet); }
Example #5
Source File: RocksDBStats.java From besu with Apache License 2.0 | 6 votes |
public static void registerRocksDBMetrics( final Statistics stats, final PrometheusMetricsSystem metricsSystem, final MetricCategory category) { for (final TickerType ticker : TICKERS) { final String promCounterName = ticker.name().toLowerCase(); metricsSystem.createLongGauge( category, promCounterName, "RocksDB reported statistics for " + ticker.name(), () -> stats.getTickerCount(ticker)); } for (final HistogramType histogram : HISTOGRAMS) { metricsSystem.addCollector(category, histogramToCollector(stats, histogram)); } }
Example #6
Source File: RocksDBStats.java From besu with Apache License 2.0 | 6 votes |
private static Collector histogramToCollector( final Statistics stats, final HistogramType histogram) { return new Collector() { final String metricName = KVSTORE_ROCKSDB_STATS.getName() + "_" + histogram.name().toLowerCase(); @Override public List<MetricFamilySamples> collect() { final HistogramData data = stats.getHistogramData(histogram); return Collections.singletonList( new MetricFamilySamples( metricName, Type.SUMMARY, "RocksDB histogram for " + metricName, Arrays.asList( new MetricFamilySamples.Sample(metricName, LABELS, LABEL_50, data.getMedian()), new MetricFamilySamples.Sample( metricName, LABELS, LABEL_95, data.getPercentile95()), new MetricFamilySamples.Sample( metricName, LABELS, LABEL_99, data.getPercentile99())))); } }; }
Example #7
Source File: RocksDBKeyValueStorage.java From besu with Apache License 2.0 | 6 votes |
public RocksDBKeyValueStorage( final RocksDBConfiguration configuration, final MetricsSystem metricsSystem, final RocksDBMetricsFactory rocksDBMetricsFactory) { try { final Statistics stats = new Statistics(); options = new Options() .setCreateIfMissing(true) .setMaxOpenFiles(configuration.getMaxOpenFiles()) .setTableFormatConfig(createBlockBasedTableConfig(configuration)) .setMaxBackgroundCompactions(configuration.getMaxBackgroundCompactions()) .setStatistics(stats); options.getEnv().setBackgroundThreads(configuration.getBackgroundThreadCount()); txOptions = new TransactionDBOptions(); db = TransactionDB.open(options, txOptions, configuration.getDatabaseDir().toString()); rocksDBMetrics = rocksDBMetricsFactory.create(metricsSystem, configuration, db, stats); } catch (final RocksDBException e) { throw new StorageException(e); } }
Example #8
Source File: RocksDBStoreMBean.java From hadoop-ozone with Apache License 2.0 | 5 votes |
public static RocksDBStoreMBean create(Statistics statistics, String contextName) { RocksDBStoreMBean rocksDBStoreMBean = new RocksDBStoreMBean( statistics, contextName); MetricsSystem ms = DefaultMetricsSystem.instance(); MetricsSource metricsSource = ms.getSource(rocksDBStoreMBean.contextName); if (metricsSource != null) { return (RocksDBStoreMBean)metricsSource; } else { return ms.register(rocksDBStoreMBean.contextName, "RocksDB Metrics", rocksDBStoreMBean); } }
Example #9
Source File: RocksDbInstanceFactory.java From teku with Apache License 2.0 | 5 votes |
private static DBOptions createDBOptions( final RocksDbConfiguration configuration, final Statistics stats) { return new DBOptions() .setCreateIfMissing(true) .setBytesPerSync(1048576L) .setWalBytesPerSync(1048576L) .setMaxBackgroundFlushes(2) .setDbWriteBufferSize(configuration.getWriteBufferCapacity()) .setMaxOpenFiles(configuration.getMaxOpenFiles()) .setMaxBackgroundCompactions(configuration.getMaxBackgroundCompactions()) .setCreateMissingColumnFamilies(true) .setEnv(Env.getDefault().setBackgroundThreads(configuration.getBackgroundThreadCount())) .setStatistics(stats); }
Example #10
Source File: RocksDbStats.java From teku with Apache License 2.0 | 5 votes |
private Collector histogramToCollector( final MetricCategory metricCategory, final Statistics stats, final HistogramType histogram) { return new Collector() { final String metricName = metricCategory.getApplicationPrefix().orElse("") + metricCategory.getName() + "_" + histogram.name().toLowerCase(); @Override public List<MetricFamilySamples> collect() { return ifOpen( () -> { final HistogramData data = stats.getHistogramData(histogram); return Collections.singletonList( new MetricFamilySamples( metricName, Type.SUMMARY, "RocksDB histogram for " + metricName, Arrays.asList( new MetricFamilySamples.Sample( metricName, LABELS, LABEL_50, data.getMedian()), new MetricFamilySamples.Sample( metricName, LABELS, LABEL_95, data.getPercentile95()), new MetricFamilySamples.Sample( metricName, LABELS, LABEL_99, data.getPercentile99())))); }, Collections.emptyList()); } }; }
Example #11
Source File: RocksStatistics.java From sofa-jraft with Apache License 2.0 | 5 votes |
/** * String representation of the statistic. */ public static String getStatisticsString(final RocksRawKVStore rocksRawKVStore) { final Statistics statistics = statistics(rocksRawKVStore); if (statistics == null) { return ""; } return statistics.toString(); }
Example #12
Source File: RocksStatistics.java From sofa-jraft with Apache License 2.0 | 5 votes |
/** * Gets a string representation of a particular histogram. */ public String getHistogramString(final RocksRawKVStore rocksRawKVStore, final HistogramType histogramType) { final Statistics statistics = statistics(rocksRawKVStore); if (statistics == null) { return ""; } return statistics.getHistogramString(histogramType); }
Example #13
Source File: RocksStatistics.java From sofa-jraft with Apache License 2.0 | 5 votes |
/** * Gets the histogram data for a particular histogram. */ public static HistogramData getHistogramData(final RocksRawKVStore rocksRawKVStore, final HistogramType histogramType) { final Statistics statistics = statistics(rocksRawKVStore); if (statistics == null) { return null; } return statistics.getHistogramData(histogramType); }
Example #14
Source File: RocksStatistics.java From sofa-jraft with Apache License 2.0 | 5 votes |
/** * Get the count for a ticker and reset the tickers count. */ public static long getAndResetTickerCount(final RocksRawKVStore rocksRawKVStore, final TickerType tickerType) { final Statistics statistics = statistics(rocksRawKVStore); if (statistics == null) { return -1L; } return statistics.getAndResetTickerCount(tickerType); }
Example #15
Source File: RocksStatistics.java From sofa-jraft with Apache License 2.0 | 5 votes |
/** * Get the count for a ticker. */ public static long getTickerCount(final RocksRawKVStore rocksRawKVStore, final TickerType tickerType) { final Statistics statistics = statistics(rocksRawKVStore); if (statistics == null) { return -1L; } return statistics.getTickerCount(tickerType); }
Example #16
Source File: RocksDBStoreMBean.java From hadoop-ozone with Apache License 2.0 | 5 votes |
public RocksDBStoreMBean(Statistics statistics, String dbName) { this.contextName = ROCKSDB_CONTEXT_PREFIX + dbName; this.statistics = statistics; histogramAttributes.add("Average"); histogramAttributes.add("Median"); histogramAttributes.add("Percentile95"); histogramAttributes.add("Percentile99"); histogramAttributes.add("StandardDeviation"); }
Example #17
Source File: RocksDBColumnarKeyValueStorage.java From besu with Apache License 2.0 | 4 votes |
public RocksDBColumnarKeyValueStorage( final RocksDBConfiguration configuration, final List<SegmentIdentifier> segments, final MetricsSystem metricsSystem, final RocksDBMetricsFactory rocksDBMetricsFactory) throws StorageException { try { final List<ColumnFamilyDescriptor> columnDescriptors = segments.stream() .map(segment -> new ColumnFamilyDescriptor(segment.getId())) .collect(Collectors.toList()); columnDescriptors.add( new ColumnFamilyDescriptor( DEFAULT_COLUMN.getBytes(StandardCharsets.UTF_8), new ColumnFamilyOptions() .setTableFormatConfig(createBlockBasedTableConfig(configuration)))); final Statistics stats = new Statistics(); options = new DBOptions() .setCreateIfMissing(true) .setMaxOpenFiles(configuration.getMaxOpenFiles()) .setMaxBackgroundCompactions(configuration.getMaxBackgroundCompactions()) .setStatistics(stats) .setCreateMissingColumnFamilies(true) .setEnv( Env.getDefault().setBackgroundThreads(configuration.getBackgroundThreadCount())); txOptions = new TransactionDBOptions(); final List<ColumnFamilyHandle> columnHandles = new ArrayList<>(columnDescriptors.size()); db = TransactionDB.open( options, txOptions, configuration.getDatabaseDir().toString(), columnDescriptors, columnHandles); metrics = rocksDBMetricsFactory.create(metricsSystem, configuration, db, stats); final Map<Bytes, String> segmentsById = segments.stream() .collect( Collectors.toMap( segment -> Bytes.wrap(segment.getId()), SegmentIdentifier::getName)); final ImmutableMap.Builder<String, ColumnFamilyHandle> builder = ImmutableMap.builder(); for (ColumnFamilyHandle columnHandle : columnHandles) { final String segmentName = requireNonNullElse( segmentsById.get(Bytes.wrap(columnHandle.getName())), DEFAULT_COLUMN); builder.put(segmentName, columnHandle); } columnHandlesByName = builder.build(); } catch (final RocksDBException e) { throw new StorageException(e); } }
Example #18
Source File: RocksStatistics.java From sofa-jraft with Apache License 2.0 | 4 votes |
private static Statistics statistics(final RocksRawKVStore rocksRawKVStore) { return statisticsGetter.get(rocksRawKVStore); }
Example #19
Source File: RocksRawKVStore.java From sofa-jraft with Apache License 2.0 | 4 votes |
public void addStatisticsCollectorCallback(final StatisticsCollectorCallback callback) { final RocksStatisticsCollector collector = Requires.requireNonNull(this.statisticsCollector, "statisticsCollector"); final Statistics statistics = Requires.requireNonNull(this.statistics, "statistics"); collector.addStatsCollectorInput(new StatsCollectorInput(statistics, callback)); }
Example #20
Source File: RocksDbStats.java From teku with Apache License 2.0 | 4 votes |
public RocksDbStats(final MetricsSystem metricsSystem, final MetricCategory category) { this.stats = new Statistics(); this.metricsSystem = metricsSystem; this.category = category; }
Example #21
Source File: RocksDbStats.java From teku with Apache License 2.0 | 4 votes |
public Statistics getStats() { return stats; }
Example #22
Source File: MetadataStoreBuilder.java From hadoop-ozone with Apache License 2.0 | 4 votes |
public MetadataStore build() throws IOException { if (dbFile == null) { throw new IllegalArgumentException("Failed to build metadata store, " + "dbFile is required but not found"); } // Build db store based on configuration final ConfigurationSource conf = optionalConf.orElse(DEFAULT_CONF); if (dbType == null) { LOG.debug("dbType is null, using "); dbType = conf.getTrimmed(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_DEFAULT); LOG.debug("dbType is null, using dbType {} from ozone configuration", dbType); } else { LOG.debug("Using dbType {} for metastore", dbType); } if (OZONE_METADATA_STORE_IMPL_LEVELDB.equals(dbType)) { Options options = new Options(); options.createIfMissing(createIfMissing); if (cacheSize > 0) { options.cacheSize(cacheSize); } return new LevelDBStore(dbFile, options); } else if (OZONE_METADATA_STORE_IMPL_ROCKSDB.equals(dbType)) { org.rocksdb.Options opts; // Used cached options if config object passed down is the same if (CACHED_OPTS.containsKey(conf)) { opts = CACHED_OPTS.get(conf); } else { opts = new org.rocksdb.Options(); if (cacheSize > 0) { BlockBasedTableConfig tableConfig = new BlockBasedTableConfig(); tableConfig.setBlockCacheSize(cacheSize); opts.setTableFormatConfig(tableConfig); } String rocksDbStat = conf.getTrimmed( OZONE_METADATA_STORE_ROCKSDB_STATISTICS, OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT); if (!rocksDbStat.equals(OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF)) { Statistics statistics = new Statistics(); statistics.setStatsLevel(StatsLevel.valueOf(rocksDbStat)); opts = opts.setStatistics(statistics); } } opts.setCreateIfMissing(createIfMissing); CACHED_OPTS.put(conf, opts); return new RocksDBStore(dbFile, opts); } throw new IllegalArgumentException("Invalid argument for " + OzoneConfigKeys.OZONE_METADATA_STORE_IMPL + ". Expecting " + OZONE_METADATA_STORE_IMPL_LEVELDB + " or " + OZONE_METADATA_STORE_IMPL_ROCKSDB + ", but met " + dbType); }
Example #23
Source File: DBStoreBuilder.java From hadoop-ozone with Apache License 2.0 | 4 votes |
private DBOptions getDbProfile() { if (rocksDBOption != null) { return rocksDBOption; } DBOptions option = null; if (StringUtil.isNotBlank(dbname)) { List<ColumnFamilyDescriptor> columnFamilyDescriptors = new LinkedList<>(); for (TableConfig tc : tables) { columnFamilyDescriptors.add(tc.getDescriptor()); } if (columnFamilyDescriptors.size() > 0) { try { option = DBConfigFromFile.readFromFile(dbname, columnFamilyDescriptors); if(option != null) { LOG.info("Using Configs from {}.ini file", dbname); } } catch (IOException ex) { LOG.info("Unable to read RocksDB config from {}", dbname, ex); } } } if (option == null) { LOG.debug("Using default options: {}", dbProfile); option = dbProfile.getDBOptions(); } if (rocksDBConfiguration.isRocksdbLoggingEnabled()) { org.rocksdb.Logger logger = new org.rocksdb.Logger(option) { @Override protected void log(InfoLogLevel infoLogLevel, String s) { ROCKS_DB_LOGGER.info(s); } }; InfoLogLevel level = InfoLogLevel.valueOf(rocksDBConfiguration .getRocksdbLogLevel() + "_LEVEL"); logger.setInfoLogLevel(level); option.setLogger(logger); } if (!rocksDbStat.equals(OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF)) { Statistics statistics = new Statistics(); statistics.setStatsLevel(StatsLevel.valueOf(rocksDbStat)); option = option.setStatistics(statistics); } return option; }
Example #24
Source File: RocksDbDataSourceImpl.java From gsc-core with GNU Lesser General Public License v3.0 | 4 votes |
public void initDB(RocksDbSettings settings) { resetDbLock.writeLock().lock(); try { if (isAlive()) { return; } Preconditions.checkNotNull(dataBaseName, "no name set to the dbStore"); try (Options options = new Options()) { // most of these options are suggested by https://github.com/facebook/rocksdb/wiki/Set-Up-Options // general options if (settings.isEnableStatistics()) { options.setStatistics(new Statistics()); options.setStatsDumpPeriodSec(60); } options.setCreateIfMissing(true); options.setIncreaseParallelism(1); options.setLevelCompactionDynamicLevelBytes(true); options.setMaxOpenFiles(settings.getMaxOpenFiles()); // general options supported user config options.setNumLevels(settings.getLevelNumber()); options.setMaxBytesForLevelMultiplier(settings.getMaxBytesForLevelMultiplier()); options.setMaxBytesForLevelBase(settings.getMaxBytesForLevelBase()); options.setMaxBackgroundCompactions(settings.getCompactThreads()); options.setLevel0FileNumCompactionTrigger(settings.getLevel0FileNumCompactionTrigger()); options.setTargetFileSizeMultiplier(settings.getTargetFileSizeMultiplier()); options.setTargetFileSizeBase(settings.getTargetFileSizeBase()); // table options final BlockBasedTableConfig tableCfg; options.setTableFormatConfig(tableCfg = new BlockBasedTableConfig()); tableCfg.setBlockSize(settings.getBlockSize()); tableCfg.setBlockCacheSize(32 * 1024 * 1024); tableCfg.setCacheIndexAndFilterBlocks(true); tableCfg.setPinL0FilterAndIndexBlocksInCache(true); tableCfg.setFilter(new BloomFilter(10, false)); // read options readOpts = new ReadOptions(); readOpts = readOpts.setPrefixSameAsStart(true) .setVerifyChecksums(false); try { logger.debug("Opening database"); final Path dbPath = getDbPath(); if (!Files.isSymbolicLink(dbPath.getParent())) { Files.createDirectories(dbPath.getParent()); } try { database = RocksDB.open(options, dbPath.toString()); } catch (RocksDBException e) { logger.error(e.getMessage(), e); throw new RuntimeException("Failed to initialize database", e); } alive = true; } catch (IOException ioe) { logger.error(ioe.getMessage(), ioe); throw new RuntimeException("Failed to initialize database", ioe); } logger.debug("<~ RocksDbDataSource.initDB(): " + dataBaseName); } } finally { resetDbLock.writeLock().unlock(); } }
Example #25
Source File: RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapter.java From kcache with Apache License 2.0 | 4 votes |
@Override public Options setStatistics(final Statistics statistics) { dbOptions.setStatistics(statistics); return this; }
Example #26
Source File: RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapter.java From kcache with Apache License 2.0 | 4 votes |
@Override public Statistics statistics() { return dbOptions.statistics(); }
Example #27
Source File: RocksDBDAO.java From hudi with Apache License 2.0 | 4 votes |
/** * Initialized Rocks DB instance. */ private void init() { try { LOG.info("DELETING RocksDB persisted at " + rocksDBBasePath); FileIOUtils.deleteDirectory(new File(rocksDBBasePath)); managedHandlesMap = new ConcurrentHashMap<>(); managedDescriptorMap = new ConcurrentHashMap<>(); // If already present, loads the existing column-family handles final DBOptions dbOptions = new DBOptions().setCreateIfMissing(true).setCreateMissingColumnFamilies(true) .setWalDir(rocksDBBasePath).setStatsDumpPeriodSec(300).setStatistics(new Statistics()); dbOptions.setLogger(new org.rocksdb.Logger(dbOptions) { @Override protected void log(InfoLogLevel infoLogLevel, String logMsg) { LOG.info("From Rocks DB : " + logMsg); } }); final List<ColumnFamilyDescriptor> managedColumnFamilies = loadManagedColumnFamilies(dbOptions); final List<ColumnFamilyHandle> managedHandles = new ArrayList<>(); FileIOUtils.mkdir(new File(rocksDBBasePath)); rocksDB = RocksDB.open(dbOptions, rocksDBBasePath, managedColumnFamilies, managedHandles); ValidationUtils.checkArgument(managedHandles.size() == managedColumnFamilies.size(), "Unexpected number of handles are returned"); for (int index = 0; index < managedHandles.size(); index++) { ColumnFamilyHandle handle = managedHandles.get(index); ColumnFamilyDescriptor descriptor = managedColumnFamilies.get(index); String familyNameFromHandle = new String(handle.getName()); String familyNameFromDescriptor = new String(descriptor.getName()); ValidationUtils.checkArgument(familyNameFromDescriptor.equals(familyNameFromHandle), "Family Handles not in order with descriptors"); managedHandlesMap.put(familyNameFromHandle, handle); managedDescriptorMap.put(familyNameFromDescriptor, descriptor); } } catch (RocksDBException | IOException re) { LOG.error("Got exception opening Rocks DB instance ", re); throw new HoodieException(re); } }