com.codahale.metrics.MetricRegistry Java Examples
The following examples show how to use
com.codahale.metrics.MetricRegistry.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TableLifeCycleTest.java From emodb with Apache License 2.0 | 8 votes |
@Test public void testMoveCanceledAfterPromote() throws Exception { InMemoryDataStore backingStore = newBackingStore(new MetricRegistry()); Date fct = new Date(0); AstyanaxTableDAO tableDAO = newTableDAO(backingStore, DC_US, mock(DataCopyDAO.class), mock(DataPurgeDAO.class), fct); tableDAO.create(TABLE, newOptions(PL_US), ImmutableMap.<String, Object>of(), newAudit()); // Perform an initial move. tableDAO.move(TABLE, PL_GLOBAL, Optional.<Integer>absent(), newAudit(), MoveType.SINGLE_TABLE); TableJson table = tableDAO.readTableJson(TABLE, true); String srcUuid = checkNotNull(table.getUuidString()); String destUuid = checkNotNull(table.getMasterStorage().getMoveTo().getUuidString()); // Hack the table JSON to get to the state where promote has occurred. advanceActivatedToPromoted(destUuid, tableDAO, backingStore, fct); try { tableDAO.move(TABLE, PL_US, Optional.<Integer>absent(), newAudit(), MoveType.SINGLE_TABLE); fail(); } catch (IllegalArgumentException e) { assertEquals(e.getMessage(), "This table name is currently undergoing maintenance and therefore cannot be modified: my:table"); } }
Example #2
Source File: ConnectionPool.java From StubbornJava with MIT License | 6 votes |
public static HikariDataSource getDataSourceFromConfig( Config conf , MetricRegistry metricRegistry , HealthCheckRegistry healthCheckRegistry) { HikariConfig jdbcConfig = new HikariConfig(); jdbcConfig.setPoolName(conf.getString("poolName")); jdbcConfig.setMaximumPoolSize(conf.getInt("maximumPoolSize")); jdbcConfig.setMinimumIdle(conf.getInt("minimumIdle")); jdbcConfig.setJdbcUrl(conf.getString("jdbcUrl")); jdbcConfig.setUsername(conf.getString("username")); jdbcConfig.setPassword(conf.getString("password")); jdbcConfig.addDataSourceProperty("cachePrepStmts", conf.getBoolean("cachePrepStmts")); jdbcConfig.addDataSourceProperty("prepStmtCacheSize", conf.getInt("prepStmtCacheSize")); jdbcConfig.addDataSourceProperty("prepStmtCacheSqlLimit", conf.getInt("prepStmtCacheSqlLimit")); jdbcConfig.addDataSourceProperty("useServerPrepStmts", conf.getBoolean("useServerPrepStmts")); // Add HealthCheck jdbcConfig.setHealthCheckRegistry(healthCheckRegistry); // Add Metrics jdbcConfig.setMetricRegistry(metricRegistry); return new HikariDataSource(jdbcConfig); }
Example #3
Source File: KafkaRepositoryAT.java From nakadi with MIT License | 6 votes |
private KafkaTopicRepository createKafkaTopicRepository() { final KafkaZookeeper kafkaZookeeper = Mockito.mock(KafkaZookeeper.class); Mockito.when(kafkaZookeeper.getZookeeperConnectionString()).thenReturn(ZOOKEEPER_URL); final Consumer<byte[], byte[]> consumer = Mockito.mock(Consumer.class); Mockito.when(consumer.partitionsFor(any())).thenReturn(new ArrayList<>()); final KafkaFactory factory = Mockito.mock(KafkaFactory.class); Mockito.when(factory.getConsumer()).thenReturn(consumer); final KafkaLocationManager kafkaLocationManager = Mockito.mock(KafkaLocationManager.class); Mockito .doReturn(kafkaHelper.createProducer()) .when(factory) .takeProducer(); return new KafkaTopicRepository.Builder() .setKafkaZookeeper(kafkaZookeeper) .setKafkaFactory(factory) .setNakadiSettings(nakadiSettings) .setKafkaSettings(kafkaSettings) .setZookeeperSettings(zookeeperSettings) .setKafkaTopicConfigFactory(kafkaTopicConfigFactory) .setKafkaLocationManager(kafkaLocationManager) .setMetricRegistry(new MetricRegistry()) .build(); }
Example #4
Source File: UserTaskManager.java From cruise-control with BSD 2-Clause "Simplified" License | 6 votes |
public UserTaskManager(KafkaCruiseControlConfig config, MetricRegistry dropwizardMetricRegistry, Map<EndPoint, Timer> successfulRequestExecutionTimer, Purgatory purgatory) { _purgatory = purgatory; _sessionKeyToUserTaskIdMap = new HashMap<>(); List<CruiseControlEndpointType> endpointTypes = Collections.unmodifiableList(Arrays.asList(CruiseControlEndpointType.values())); _uuidToCompletedUserTaskInfoMap = new HashMap<>(endpointTypes.size()); _completedUserTaskRetentionTimeMs = new HashMap<>(endpointTypes.size()); initCompletedUserTaskRetentionPolicy(config, endpointTypes); _sessionExpiryMs = config.getLong(WebServerConfig.WEBSERVER_SESSION_EXPIRY_MS_CONFIG); _maxActiveUserTasks = config.getInt(WebServerConfig.MAX_ACTIVE_USER_TASKS_CONFIG); _uuidToActiveUserTaskInfoMap = new LinkedHashMap<>(_maxActiveUserTasks); _time = Time.SYSTEM; _uuidGenerator = new UUIDGenerator(); _userTaskScannerExecutor.scheduleAtFixedRate(new UserTaskScanner(), USER_TASK_SCANNER_INITIAL_DELAY_SECONDS, USER_TASK_SCANNER_PERIOD_SECONDS, TimeUnit.SECONDS); dropwizardMetricRegistry.register(MetricRegistry.name("UserTaskManager", "num-active-sessions"), (Gauge<Integer>) _sessionKeyToUserTaskIdMap::size); dropwizardMetricRegistry.register(MetricRegistry.name("UserTaskManager", "num-active-user-tasks"), (Gauge<Integer>) _uuidToActiveUserTaskInfoMap::size); _successfulRequestExecutionTimer = successfulRequestExecutionTimer; }
Example #5
Source File: PersistenceMigrationModuleTest.java From hivemq-community-edition with Apache License 2.0 | 6 votes |
@Test public void test_startup_singleton() { final Injector injector = Guice.createInjector( new PersistenceMigrationModule(new MetricRegistry(), persistenceConfigurationService), new AbstractModule() { @Override protected void configure() { bind(SystemInformation.class).toInstance(systemInformation); bindScope(LazySingleton.class, LazySingletonScope.get()); bind(MqttConfigurationService.class).toInstance(mqttConfigurationService); } }); final PersistenceStartup instance1 = injector.getInstance(PersistenceStartup.class); final PersistenceStartup instance2 = injector.getInstance(PersistenceStartup.class); assertSame(instance1, instance2); }
Example #6
Source File: InstrHttpClientBuilderProvider.java From knox with Apache License 2.0 | 6 votes |
@Override public String getNameFor(String name, HttpRequest request) { try { String context = ""; Header header = request.getFirstHeader("X-Forwarded-Context"); if (header != null) { context = header.getValue(); } RequestLine requestLine = request.getRequestLine(); URIBuilder uriBuilder = new URIBuilder(requestLine.getUri()); String resourcePath = InstrUtils.getResourcePath(uriBuilder.removeQuery().build().toString()); return MetricRegistry.name("service", name, context + resourcePath, methodNameString(request)); } catch (URISyntaxException e) { throw new IllegalArgumentException(e); } }
Example #7
Source File: TestJedisPoolFacade.java From HttpSessionReplacer with MIT License | 6 votes |
@Test public void testMetrics() { MetricRegistry metrics = mock(MetricRegistry.class); Client client = mock(Client.class); when(client.getHost()).thenReturn("myhost"); when(jedis.getClient()).thenReturn(client); when(pool.getNumActive()).thenReturn(1); when(pool.getNumIdle()).thenReturn(2); when(pool.getNumWaiters()).thenReturn(3); rf.startMonitoring(metrics); @SuppressWarnings("rawtypes") ArgumentCaptor<Gauge> gauge = ArgumentCaptor.forClass(Gauge.class); verify(metrics).register(eq("com.amadeus.session.redis.myhost.active"), gauge.capture()); verify(metrics).register(eq("com.amadeus.session.redis.myhost.idle"), gauge.capture()); verify(metrics).register(eq("com.amadeus.session.redis.myhost.waiting"), gauge.capture()); assertEquals(1, gauge.getAllValues().get(0).getValue()); assertEquals(2, gauge.getAllValues().get(1).getValue()); assertEquals(3, gauge.getAllValues().get(2).getValue()); }
Example #8
Source File: CloudWatchLogsInput.java From graylog-plugin-aws with Apache License 2.0 | 6 votes |
@Inject public CloudWatchLogsInput(@Assisted Configuration configuration, MetricRegistry metricRegistry, KinesisTransport.Factory transport, LocalMetricRegistry localRegistry, CloudWatchRawLogCodec.Factory codec, Config config, Descriptor descriptor, ServerStatus serverStatus) { super( metricRegistry, configuration, transport.create(configuration), localRegistry, codec.create(configuration), config, descriptor, serverStatus ); }
Example #9
Source File: MetricRuleEvaluatorHelper.java From datacollector with Apache License 2.0 | 6 votes |
public static Metric getMetric(MetricRegistry metrics, String metricId, MetricType metricType) { Metric metric; switch (metricType) { case HISTOGRAM: metric = MetricsConfigurator.getHistogram(metrics, metricId); break; case METER: metric = MetricsConfigurator.getMeter(metrics, metricId); break; case COUNTER: metric = MetricsConfigurator.getCounter(metrics, metricId); break; case TIMER: metric = MetricsConfigurator.getTimer(metrics, metricId); break; case GAUGE: metric = MetricsConfigurator.getGauge(metrics, metricId); break; default : throw new IllegalArgumentException(Utils.format("Unknown metric type '{}'", metricType)); } return metric; }
Example #10
Source File: LibratoReporterProviderTest.java From graylog-plugin-metrics-reporter with GNU General Public License v3.0 | 6 votes |
@Test public void get() throws Exception { final MetricsLibratoReporterConfiguration configuration = new MetricsLibratoReporterConfiguration() { @Override public String getUsername() { return "username"; } @Override public String getToken() { return "token"; } }; final LibratoReporterProvider provider = new LibratoReporterProvider(configuration, new MetricRegistry()); final LibratoReporter reporter = provider.get(); assertNotNull(reporter); }
Example #11
Source File: CompactionControlMonitorManager.java From emodb with Apache License 2.0 | 6 votes |
@Inject CompactionControlMonitorManager(LifeCycleRegistry lifeCycle, @LocalCompactionControl CompactionControlSource compactionControlSource, @GlobalFullConsistencyZooKeeper CuratorFramework curator, @SelfHostAndPort HostAndPort self, Clock clock, LeaderServiceTask dropwizardTask, final MetricRegistry metricRegistry) { LeaderService leaderService = new LeaderService( curator, "/leader/compaction-control-monitor", self.toString(), "Leader-CompactionControlMonitor", 30, TimeUnit.MINUTES, () -> new CompactionControlMonitor(compactionControlSource, clock, metricRegistry) ); ServiceFailureListener.listenTo(leaderService, metricRegistry); dropwizardTask.register("stash-runtime-monitor", leaderService); lifeCycle.manage(new ManagedGuavaService(leaderService)); }
Example #12
Source File: CSVReporter.java From jboot with Apache License 2.0 | 6 votes |
@Override public void report(MetricRegistry metricRegistry) { JbootMetricCVRReporterConfig cvrReporterConfig = Jboot.config(JbootMetricCVRReporterConfig.class); if (StrUtil.isBlank(cvrReporterConfig.getPath())) { throw new NullPointerException("csv reporter path must not be null, please config jboot.metrics.reporter.cvr.path in you properties."); } final CsvReporter reporter = CsvReporter.forRegistry(metricRegistry) .formatFor(Locale.US) .convertRatesTo(TimeUnit.SECONDS) .convertDurationsTo(TimeUnit.MILLISECONDS) .build(new File(cvrReporterConfig.getPath())); reporter.start(1, TimeUnit.SECONDS); }
Example #13
Source File: DropwizardHelper.java From okapi with Apache License 2.0 | 6 votes |
/** * Configure Dropwizard helper. * @param graphiteHost graphite server host * @param port graphits server port * @param tu time unit * @param period reporting period * @param vopt Vert.x options * @param hostName logical hostname for this node (reporting) */ public static void config(String graphiteHost, int port, TimeUnit tu, int period, VertxOptions vopt, String hostName) { final String registryName = "okapi"; MetricRegistry registry = SharedMetricRegistries.getOrCreate(registryName); DropwizardMetricsOptions metricsOpt = new DropwizardMetricsOptions(); metricsOpt.setEnabled(true).setRegistryName(registryName); vopt.setMetricsOptions(metricsOpt); Graphite graphite = new Graphite(new InetSocketAddress(graphiteHost, port)); final String prefix = "folio.okapi." + hostName; GraphiteReporter reporter = GraphiteReporter.forRegistry(registry) .prefixedWith(prefix) .build(graphite); reporter.start(period, tu); logger.info("Metrics remote {}:{} this {}", graphiteHost, port, prefix); }
Example #14
Source File: JmxReportingTest.java From metrics-sql with Apache License 2.0 | 6 votes |
@Before public void setUp() throws SQLException { mBeanServer=ManagementFactory.getPlatformMBeanServer(); metricRegistry = new MetricRegistry(); jmxReporter = JmxReporter.forRegistry(metricRegistry) .registerWith(mBeanServer) .createsObjectNamesWith(new SqlObjectNameFactory()) .build(); jmxReporter.start(); proxyFactory = new JdbcProxyFactory(metricRegistry); rawDataSource = H2DbUtil.createDataSource(); try(Connection connection = rawDataSource.getConnection()) { H2DbUtil.initTable(connection); } dataSource = proxyFactory.wrapDataSource(rawDataSource); }
Example #15
Source File: JmxMetricsReporter.java From kylin with Apache License 2.0 | 5 votes |
public JmxMetricsReporter(MetricRegistry registry, KylinConfig conf) { this.registry = registry; this.conf = conf; jmxReporter = JmxReporter.forRegistry(registry).convertRatesTo(TimeUnit.SECONDS) .createsObjectNamesWith(new KylinObjectNameFactory()).convertDurationsTo(TimeUnit.MILLISECONDS).build(); }
Example #16
Source File: SubscriptionStreamerFactory.java From nakadi with MIT License | 5 votes |
@Autowired public SubscriptionStreamerFactory( final TimelineService timelineService, final CursorTokenService cursorTokenService, final ObjectMapper objectMapper, final CursorConverter cursorConverter, @Qualifier("streamMetricsRegistry") final MetricRegistry metricRegistry, final SubscriptionClientFactory zkClientFactory, final EventStreamWriter eventStreamWriter, final AuthorizationValidator authorizationValidator, final EventTypeChangeListener eventTypeChangeListener, final EventTypeCache eventTypeCache, final NakadiKpiPublisher nakadiKpiPublisher, final CursorOperationsService cursorOperationsService, final EventStreamChecks eventStreamChecks, @Value("${nakadi.kpi.event-types.nakadiDataStreamed}") final String kpiDataStreamedEventType, @Value("${nakadi.kpi.config.stream-data-collection-frequency-ms}") final long kpiCollectionFrequencyMs, @Value("${nakadi.subscription.maxStreamMemoryBytes}") final long streamMemoryLimitBytes) { this.timelineService = timelineService; this.cursorTokenService = cursorTokenService; this.objectMapper = objectMapper; this.cursorConverter = cursorConverter; this.metricRegistry = metricRegistry; this.zkClientFactory = zkClientFactory; this.eventStreamWriter = eventStreamWriter; this.authorizationValidator = authorizationValidator; this.eventTypeChangeListener = eventTypeChangeListener; this.eventTypeCache = eventTypeCache; this.nakadiKpiPublisher = nakadiKpiPublisher; this.cursorOperationsService = cursorOperationsService; this.eventStreamChecks = eventStreamChecks; this.kpiDataStreamedEventType = kpiDataStreamedEventType; this.kpiCollectionFrequencyMs = kpiCollectionFrequencyMs; this.streamMemoryLimitBytes = streamMemoryLimitBytes; }
Example #17
Source File: ResilientMegabusRefResolver.java From emodb with Apache License 2.0 | 5 votes |
@Inject public ResilientMegabusRefResolver(DataProvider dataProvider, @MegabusRefTopic Topic megabusRefTopic, @MegabusTopic Topic megabusResolvedTopic, @RetryRefTopic Topic retryRefTopic, @MissingRefTopic Topic missingRefTopic, KafkaCluster kafkaCluster, Clock clock, @SelfHostAndPort HostAndPort hostAndPort, @RefResolverConsumerGroup String refResolverConsumerGroup, MetricRegistry metricRegistry) { super(SERVICE_NAME, () -> new MegabusRefResolver(dataProvider, megabusRefTopic, megabusResolvedTopic, retryRefTopic, missingRefTopic, kafkaCluster, clock, hostAndPort, refResolverConsumerGroup, metricRegistry), RESTART_DELAY, false); }
Example #18
Source File: MetricsRegistryTest.java From spectator with Apache License 2.0 | 5 votes |
@Test public void timer() { MetricRegistry codaRegistry = new MetricRegistry(); MetricsRegistry r = new MetricsRegistry(clock, codaRegistry); r.timer("foo").record(1, TimeUnit.MILLISECONDS); Assertions.assertEquals(1, codaRegistry.getTimers().get("foo").getCount()); }
Example #19
Source File: HadoopMetrics2ReporterTest.java From kylin with Apache License 2.0 | 5 votes |
@Before public void setup() { mockRegistry = mock(MetricRegistry.class); mockMetricsSystem = mock(MetricsSystem.class); recordName = "myserver"; metrics2Reporter = HadoopMetrics2Reporter.forRegistry(mockRegistry).convertDurationsTo(TimeUnit.MILLISECONDS) .convertRatesTo(TimeUnit.SECONDS).build(mockMetricsSystem, "MyServer", "My Cool Server", recordName); }
Example #20
Source File: MonitoringModule.java From curiostack with MIT License | 5 votes |
private static void configureJvmMetrics(MetricRegistry registry) { MBeanServer mBeanServer = ManagementFactory.getPlatformMBeanServer(); registry.register("jvm.buffer-pool", new BufferPoolMetricSet(mBeanServer)); registry.register("jvm.class-loading", new ClassLoadingGaugeSet()); registry.register("jvm.file-descriptor-ratio", new FileDescriptorRatioGauge()); registry.register("jvm.gc", new GarbageCollectorMetricSet()); registry.register("jvm.memory", new MemoryUsageGaugeSet()); registry.register("jvm.threads", new ThreadStatesGaugeSet()); }
Example #21
Source File: ConnectionPoolTest.java From StubbornJava with MIT License | 5 votes |
@Test public void test() throws SQLException { Config config = ConfigFactory.empty() .withValue("poolName", ConfigValueFactory.fromAnyRef("test pool")) .withValue("jdbcUrl", ConfigValueFactory.fromAnyRef("jdbc:hsqldb:mem:testdb")) .withValue("maximumPoolSize", ConfigValueFactory.fromAnyRef(10)) .withValue("minimumIdle", ConfigValueFactory.fromAnyRef(2)) .withValue("username", ConfigValueFactory.fromAnyRef("SA")) .withValue("password", ConfigValueFactory.fromAnyRef("")) .withValue("cachePrepStmts", ConfigValueFactory.fromAnyRef(true)) .withValue("prepStmtCacheSize", ConfigValueFactory.fromAnyRef(256)) .withValue("prepStmtCacheSqlLimit", ConfigValueFactory.fromAnyRef(2048)) .withValue("useServerPrepStmts", ConfigValueFactory.fromAnyRef(true)) ; MetricRegistry metricRegistry = new MetricRegistry(); HealthCheckRegistry healthCheckRegistry = new HealthCheckRegistry(); try (HikariDataSource ds = ConnectionPool.getDataSourceFromConfig(config, metricRegistry, healthCheckRegistry)) { assertTrue(ds.getPoolName().equals("test pool")); assertTrue(ds.getMaximumPoolSize() == 10); assertTrue(ds.getMinimumIdle() == 2); assertTrue(ds.getUsername().equals("SA")); assertTrue(ds.getPassword().equals("")); Properties dsp = ds.getDataSourceProperties(); assertTrue(((boolean)dsp.get("cachePrepStmts")) == true); assertTrue(((int)dsp.get("prepStmtCacheSize")) == 256); assertTrue(((int)dsp.get("prepStmtCacheSqlLimit")) == 2048); assertTrue(((boolean)dsp.get("useServerPrepStmts")) == true); // Using identity equals on purpose assertTrue(ds.getHealthCheckRegistry() == healthCheckRegistry); assertTrue(ds.getMetricRegistry() == metricRegistry); try (Connection conn = ds.getConnection()) { assertTrue(conn.isValid(1000)); } } }
Example #22
Source File: SolrCloudAuthTestCase.java From lucene-solr with Apache License 2.0 | 5 votes |
/** * Common test method to be able to check security from any authentication plugin * @param cluster the MiniSolrCloudCluster to fetch metrics from * @param prefix the metrics key prefix, currently "SECURITY./authentication." for basic auth and "SECURITY./authentication/pki." for PKI * @param keys what keys to examine */ Map<String,Long> countSecurityMetrics(MiniSolrCloudCluster cluster, String prefix, List<String> keys) { List<Map<String, Metric>> metrics = new ArrayList<>(); cluster.getJettySolrRunners().forEach(r -> { MetricRegistry registry = r.getCoreContainer().getMetricManager().registry("solr.node"); assertNotNull(registry); metrics.add(registry.getMetrics()); }); Map<String,Long> counts = new HashMap<>(); keys.forEach(k -> { counts.put(k, sumCount(prefix, k, metrics)); }); return counts; }
Example #23
Source File: PartitionAwareServiceFactory.java From emodb with Apache License 2.0 | 5 votes |
public PartitionAwareServiceFactory(Class<S> serviceClass, MultiThreadedServiceFactory<S> delegate, S local, HostAndPort self, HealthCheckRegistry healthCheckRegistry, MetricRegistry metricRegistry) { _serviceClass = checkNotNull(serviceClass, "serviceClass"); _delegate = checkNotNull(delegate, "delegate"); _local = checkNotNull(local, "local"); _localId = self.toString(); _healthCheckRegistry = healthCheckRegistry; _errorMeter = metricRegistry.meter(MetricRegistry.name("bv.emodb.web.partition-forwarding", serviceClass.getSimpleName(), "errors")); }
Example #24
Source File: LoggingPartitionedService.java From emodb with Apache License 2.0 | 5 votes |
public LoggingPartitionedService(CuratorFramework curator, String leaderPath, String instanceId, String serviceName, int numPartitions, long reacquireDelay, long repartitionDelay, TimeUnit delayUnit, MetricRegistry metricRegistry, @Nullable Clock clock) { super(curator, leaderPath, instanceId, serviceName, numPartitions, reacquireDelay, repartitionDelay, delayUnit, clock); setServiceFactory(this::createServiceForPartition); _serviceName = serviceName; _ownedPartitions = new AtomicInteger(0); metricRegistry.register( MetricRegistry.name("bv.emodb.LoggingPartitionedService", serviceName), (Gauge) _ownedPartitions::intValue); }
Example #25
Source File: AzureBlobDataAccessorTest.java From ambry with Apache License 2.0 | 5 votes |
@Before public void setup() throws Exception { BlobServiceClient mockServiceClient = mock(BlobServiceClient.class); mockBlockBlobClient = setupMockBlobClient(mockServiceClient); mockBatchClient = mock(BlobBatchClient.class); mockBlobExistence(false); blobId = AzureTestUtils.generateBlobId(); AzureTestUtils.setConfigProperties(configProps); azureMetrics = new AzureMetrics(new MetricRegistry()); dataAccessor = new AzureBlobDataAccessor(mockServiceClient, mockBatchClient, clusterName, azureMetrics); }
Example #26
Source File: AzureStorageCompactorTest.java From ambry with Apache License 2.0 | 5 votes |
@Before public void setup() throws Exception { mockServiceClient = mock(BlobServiceClient.class); mockBlockBlobClient = AzureBlobDataAccessorTest.setupMockBlobClient(mockServiceClient); mockBlobBatchClient = mock(BlobBatchClient.class); mockumentClient = mock(AsyncDocumentClient.class); azureMetrics = new AzureMetrics(new MetricRegistry()); int lookbackDays = CloudConfig.DEFAULT_RETENTION_DAYS + numQueryBuckets * CloudConfig.DEFAULT_COMPACTION_QUERY_BUCKET_DAYS; configProps.setProperty(CloudConfig.CLOUD_COMPACTION_LOOKBACK_DAYS, String.valueOf(lookbackDays)); buildCompactor(configProps); }
Example #27
Source File: ElasticsearchReporter.java From oneops with Apache License 2.0 | 5 votes |
private Builder(MetricRegistry registry) { this.registry = registry; this.clock = Clock.defaultClock(); this.prefix = null; this.rateUnit = TimeUnit.SECONDS; this.durationUnit = TimeUnit.MILLISECONDS; this.filter = MetricFilter.ALL; }
Example #28
Source File: MegabusRefProducer.java From emodb with Apache License 2.0 | 5 votes |
public MegabusRefProducer(MegabusRefProducerConfiguration config, DatabusEventStore eventStore, RateLimitedLogFactory logFactory, MetricRegistry metricRegistry, Producer<String, JsonNode> producer, ObjectMapper objectMapper, Topic topic, String subscriptionName, String partitionIdentifier) { this(config, eventStore, logFactory, metricRegistry, null, producer, objectMapper, topic, subscriptionName, partitionIdentifier, null); }
Example #29
Source File: MetricsConfigurator.java From datacollector with Apache License 2.0 | 5 votes |
public static Gauge<Map<String, Object>> createStageGauge(MetricRegistry metrics, String nameSuffix, Comparator<String> comparator, final String pipelineName, final String pipelineRev) { String name = metricName(nameSuffix, GAUGE_SUFFIX); if(metrics.getGauges().containsKey(name)) { return metrics.getGauges().get(name); } return createGauge(metrics, nameSuffix, comparator, pipelineName, pipelineRev); }
Example #30
Source File: ReadPerformance.java From heftydb with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { Random random = new Random(System.nanoTime()); Config config = new Config.Builder().directory(TestFileHelper.TEMP_PATH).compactionStrategy (CompactionStrategies.SIZE_TIERED_COMPACTION_STRATEGY).tableCacheSize(512000000).indexCacheSize (64000000).maxWriteRate(Integer.MAX_VALUE).build(); MetricRegistry metrics = new MetricRegistry(); ConsoleReporter reporter = PerformanceHelper.consoleReporter(metrics); Timer readTimer = metrics.register("reads", new Timer(new ExponentiallyDecayingReservoir())); DB db = HeftyDB.open(config); db.compact().get(); //Read for (int i = 0; i < RECORD_COUNT * 10; i++) { String key = random.nextInt(RECORD_COUNT) + ""; Timer.Context watch = readTimer.time(); db.get(ByteBuffers.fromString(key)); watch.stop(); } reporter.report(); db.logMetrics(); db.close(); System.exit(0); }