Java Code Examples for com.codahale.metrics.Timer#Context
The following examples show how to use
com.codahale.metrics.Timer#Context .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DPreaggregatedMetricsRW.java From blueflood with Apache License 2.0 | 6 votes |
/** * Inserts a collection of rolled up metrics to the metrics_preaggregated_{granularity} column family. * Only our tests should call this method. Services should call either insertMetrics(Collection metrics) * or insertRollups() * * @param metrics * @throws IOException */ @VisibleForTesting @Override public void insertMetrics(Collection<IMetric> metrics, Granularity granularity) throws IOException { Timer.Context ctx = Instrumentation.getWriteTimerContext( CassandraModel.getPreaggregatedColumnFamilyName(granularity)); try { Multimap<Locator, IMetric> map = asMultimap(metrics); if ( isBatchIngestEnabled ) { insertMetricsInBatch(map, granularity); } else { insertMetricsIndividually(map, granularity); } } finally { ctx.stop(); } }
Example 2
Source File: RollupBatchWriteRunnable.java From blueflood with Apache License 2.0 | 6 votes |
@Override public void run() { Timer.Context ctx = batchWriteTimer.time(); try { metricsRW.insertRollups(writeContexts); } catch (Exception e) { LOG.warn("not able to insert rollups", e); executionContext.markUnsuccessful(e); } finally { executionContext.decrementWriteCounter(writeContexts.size()); rollupsPerBatch.update(writeContexts.size()); rollupsWriteRate.mark(writeContexts.size()); RollupService.lastRollupTime.set(System.currentTimeMillis()); ctx.stop(); } }
Example 3
Source File: MetricFetcher.java From cruise-control with BSD 2-Clause "Simplified" License | 6 votes |
/** * Execute one iteration of metric sampling for all the assigned partitions. */ protected void fetchMetricsForAssignedPartitions() throws MetricSamplingException { final Timer.Context ctx = _fetchTimer.time(); try { MetricSampler.Samples samples = fetchSamples(); if (_sampleStore != null) { _sampleStore.storeSamples(samples); } // TODO: evolve sample store interface to allow independent eviction time for different type of metric samples. // We are not calling sampleStore.evictSamplesBefore() because the broker metric samples and partition metric // samples may have different number of windows so they can not be evicted using the same timestamp. } catch (Exception e) { _fetchFailureRate.mark(); throw e; } finally { ctx.stop(); } }
Example 4
Source File: DetectorManager.java From adaptive-alerting with Apache License 2.0 | 5 votes |
private Optional<DetectorResult> doDetection(DetectorContainer container, MetricData metricData) { try (Timer.Context autoClosable = detectTimer.apply(container.getName()).time()) { Optional<DetectorResult> optionalDetectorResult = Optional.empty(); try { DetectorResult detectorResult = detectorExecutor.doDetection(container, metricData); optionalDetectorResult = Optional.of(detectorResult); } catch (Exception e) { log.error("Error during anomaly detection", e); } finally { markAnomalyLevelMeter(container.getDetector(), optionalDetectorResult); } return optionalDetectorResult; } }
Example 5
Source File: BaseStateMachine.java From incubator-ratis with Apache License 2.0 | 5 votes |
protected CompletableFuture<Message> recordTime(Timer timer, Task task) { final Timer.Context timerContext = timer.time(); try { return task.run(); } finally { timerContext.stop(); } }
Example 6
Source File: DumpIndexTool.java From ambry with Apache License 2.0 | 5 votes |
public List<IndexEntry> getAllEntriesFromIndexSegment(File segmentFile) throws IOException, StoreException { verifyPath(segmentFile, false); IndexSegment segment = new IndexSegment(segmentFile, false, storeKeyFactory, storeConfig, storeMetrics, new Journal(segmentFile.getParent(), 0, 0), time); List<IndexEntry> entries = new ArrayList<>(); final Timer.Context context = metrics.findAllEntriesPerIndexTimeMs.time(); try { segment.getIndexEntriesSince(null, new FindEntriesCondition(Long.MAX_VALUE), entries, new AtomicLong(0), false); } finally { context.stop(); } return entries; }
Example 7
Source File: AstyanaxWriter.java From blueflood with Apache License 2.0 | 5 votes |
public void insertRollups(List<SingleRollupWriteContext> writeContexts) throws ConnectionException { if (writeContexts.size() == 0) { return; } Timer.Context ctx = Instrumentation.getBatchWriteTimerContext(writeContexts.get(0).getDestinationCF().getName()); MutationBatch mb = keyspace.prepareMutationBatch(); for (SingleRollupWriteContext writeContext : writeContexts) { Rollup rollup = writeContext.getRollup(); int ttl = (int)TTL_PROVIDER.getTTL( writeContext.getLocator().getTenantId(), writeContext.getGranularity(), writeContext.getRollup().getRollupType()).get().toSeconds(); AbstractSerializer serializer = Serializers.serializerFor(rollup.getClass()); try { mb.withRow(writeContext.getDestinationCF(), writeContext.getLocator()) .putColumn(writeContext.getTimestamp(), rollup, serializer, ttl); } catch (RuntimeException ex) { // let's not let stupidness prevent the rest of this put. log.warn(String.format("Cannot save %s", writeContext.getLocator().toString()), ex); } } try { mb.execute(); } catch (ConnectionException e) { Instrumentation.markWriteError(e); log.error("Error writing rollup batch", e); throw e; } finally { ctx.stop(); } }
Example 8
Source File: IrisKafkaAppender.java From arcusplatform with Apache License 2.0 | 5 votes |
@Override protected void append(E event) { final Timer.Context context = logTime.time(); try { String msg = layout.doLayout(event); getKafka().send(new ProducerRecord<String,String>(topic,msg), this); } finally { context.stop(); } }
Example 9
Source File: RuleStore.java From notification with Apache License 2.0 | 5 votes |
/** * Asynchronously store a rule * * @param category Rule category * @param rule Rule to store * @throws NotificationStoreException if unable to store the rule */ public void store(final String category, final Rule rule) throws NotificationStoreException { Objects.requireNonNull(category, "category == null"); Preconditions.checkArgument(!category.isEmpty(), "category cannot be empty"); Objects.requireNonNull(rule, "rule == null"); Preconditions.checkState(rule.isValid(), "rule is not valid"); final Optional<Context> fetchContext = fetchContext(); final MapUpdate op = new MapUpdate(); op.update(category, getUpdate(rule, fetchContext)); final UpdateMap.Builder builder = new UpdateMap.Builder(LOCATION, op).withTimeout(timeout); fetchContext.ifPresent(c -> builder.withContext(c)); LOGGER.debug("Storing key (async): {}", LOCATION); try (Timer.Context context = storeTimer.time()) { final RiakFuture<UpdateMap.Response, Location> future = client.executeAsync(builder.build()); future.await(requestTimeout.getQuantity(), requestTimeout.getUnit()); if (future.isSuccess()) { LOGGER.debug("Successfully stored key: {}", LOCATION); } } catch (InterruptedException e) { LOGGER.warn("Store request was interrupted", e); Thread.currentThread().interrupt(); throw new NotificationStoreException(e); } cache.invalidateAll(); }
Example 10
Source File: RocksRawKVStore.java From sofa-jraft with Apache License 2.0 | 5 votes |
@Override public void getSequence(final byte[] seqKey, final int step, final KVStoreClosure closure) { final Timer.Context timeCtx = getTimeContext("GET_SEQUENCE"); final Lock readLock = this.readWriteLock.readLock(); readLock.lock(); try { final byte[] prevBytesVal = this.db.get(this.sequenceHandle, seqKey); long startVal; if (prevBytesVal == null) { startVal = 0; } else { startVal = Bits.getLong(prevBytesVal, 0); } if (step < 0) { // never get here setFailure(closure, "Fail to [GET_SEQUENCE], step must >= 0"); return; } if (step == 0) { setSuccess(closure, new Sequence(startVal, startVal)); return; } final long endVal = getSafeEndValueForSequence(startVal, step); if (startVal != endVal) { final byte[] newBytesVal = new byte[8]; Bits.putLong(newBytesVal, 0, endVal); this.db.put(this.sequenceHandle, this.writeOptions, seqKey, newBytesVal); } setSuccess(closure, new Sequence(startVal, endVal)); } catch (final Exception e) { LOG.error("Fail to [GET_SEQUENCE], [key = {}, step = {}], {}.", BytesUtil.toHex(seqKey), step, StackTraceUtil.stackTrace(e)); setCriticalError(closure, "Fail to [GET_SEQUENCE]", e); } finally { readLock.unlock(); timeCtx.stop(); } }
Example 11
Source File: DAbstractMetricsRW.java From blueflood with Apache License 2.0 | 5 votes |
/** * Fetches a {@link com.rackspacecloud.blueflood.types.Points} object for a * particular locator and rollupType from the specified column family and * range * * @param locator * @param rollupType * @param range * @param columnFamilyName * @return */ @Override public Points getDataToRollup(final Locator locator, RollupType rollupType, Range range, String columnFamilyName) throws IOException { Timer.Context ctx = Instrumentation.getReadTimerContext(columnFamilyName); try { // read the rollup object from the proper IO class DAbstractMetricIO io = getIO( rollupType.name().toLowerCase(), CassandraModel.getGranularity( columnFamilyName ) ); Table<Locator, Long, Object> locatorTimestampRollup = io.getRollupsForLocator( locator, columnFamilyName, range ); Points points = new Points(); for (Table.Cell<Locator, Long, Object> cell : locatorTimestampRollup.cellSet()) { points.add( createPoint( cell.getColumnKey(), cell.getValue())); } return points; } catch( Exception e ) { Instrumentation.markReadError(); LOG.error( String.format( "Unable to read locator=%s rolluptype=%s columnFamilyName=%s for rollup", locator, rollupType.name(), columnFamilyName ), e ); throw new IOException( e ); } finally { ctx.stop(); } }
Example 12
Source File: GenericDistributedQueue.java From lucene-solr with Apache License 2.0 | 5 votes |
/** * Attempts to remove the head of the queue and return it. Returns null if the * queue is empty. * * @return Head of the queue or null. */ @Override public byte[] poll() throws Exception { Timer.Context time = stats.time(dir + "_poll"); try { return removeFirst(); } finally { time.stop(); } }
Example 13
Source File: HomeGraphAPI.java From arcusplatform with Apache License 2.0 | 5 votes |
protected void sendReportState(UUID placeId, List<Model> devices, boolean hubOffline) { ReportStateRequest request = null; try (Timer.Context ctxt = GoogleMetrics.startReportStateTimer()) { // @formatter:off request = this.gRpcContext.getRequestBuilder() .withPlaceId(placeId) .withHubOffline(hubOffline) .withPayloadDevices(devices) .build(); // @formatter:on request.send(); // throws on failure to post // Prod is set to debug level logger.trace("Successfully posted ReportState for {}: Request: {}", placeId, request); GoogleMetrics.incReportStateSuccesses(); } catch (Exception e) { // sometimes we send google more information than they need. It's difficult to know which device didn't get communicated with a SYNC call. if (e.getMessage().contains("Requested entity was not found")) { logger.trace("Sent data to Google for an unknown device in place [{}]: Request: {}", placeId, request, e); } else { logger.warn("Failed to post ReportState for {}: Request: {}", placeId, request, e); } GoogleMetrics.incReportStateFailures(); } }
Example 14
Source File: MetricHelper.java From metrics-sql with Apache License 2.0 | 4 votes |
public Timer.Context startConnectionGetTimer() { return startTimer(metricNamingStrategy.getConnectionGetTimer()); }
Example 15
Source File: DeviceDAOImpl.java From arcusplatform with Apache License 2.0 | 4 votes |
@Override public void replaceDriverState(Device device, DeviceDriverStateHolder state) { try(Timer.Context ctx = replaceDriverStateTimer.time()) { executeStateUpdate(device, state, true); } }
Example 16
Source File: HardDeleter.java From ambry with Apache License 2.0 | 4 votes |
private void persistCleanupToken() throws IOException, StoreException { /* The cleanup token format is as follows: -- token_version Cleanup_Token_Version_V0 startTokenForRecovery endTokenForRecovery numBlobsInRange -- blob1_blobReadOptions {version, offset, sz, ttl, key} blob2_blobReadOptions .... blobN_blobReadOptions -- length_of_blob1_messageStoreRecoveryInfo blob1_messageStoreRecoveryInfo {headerVersion, userMetadataVersion, userMetadataSize, blobRecordVersion, blobStreamSize} length_of_blob2_messageStoreRecoveryInfo blob2_messageStoreRecoveryInfo .... length_of_blobN_messageStoreRecoveryInfo blobN_messageStoreRecoveryInfo Cleanup_Token_Version_V1 startTokenForRecovery endTokenForRecovery pause flag numBlobsInRange -- blob1_blobReadOptions {version, offset, sz, ttl, key} blob2_blobReadOptions .... blobN_blobReadOptions -- length_of_blob1_messageStoreRecoveryInfo blob1_messageStoreRecoveryInfo {headerVersion, userMetadataVersion, userMetadataSize, blobRecordVersion, blobStreamSize} length_of_blob2_messageStoreRecoveryInfo blob2_messageStoreRecoveryInfo .... length_of_blobN_messageStoreRecoveryInfo blobN_messageStoreRecoveryInfo -- crc --- */ if (endToken == null) { return; } final Timer.Context context = metrics.cleanupTokenFlushTime.time(); File tempFile = new File(dataDir, Cleanup_Token_Filename + ".tmp"); File actual = new File(dataDir, Cleanup_Token_Filename); FileOutputStream fileStream = new FileOutputStream(tempFile); CrcOutputStream crc = new CrcOutputStream(fileStream); DataOutputStream writer = new DataOutputStream(crc); try { // write the current version writer.writeShort(Cleanup_Token_Version_V1); writer.write(startTokenSafeToPersist.toBytes()); writer.write(endToken.toBytes()); writer.writeByte(isPaused() ? (byte) 1 : (byte) 0); writer.write(hardDeleteRecoveryRange.toBytes()); long crcValue = crc.getValue(); writer.writeLong(crcValue); fileStream.getChannel().force(true); tempFile.renameTo(actual); if (config.storeSetFilePermissionEnabled) { Files.setPosixFilePermissions(actual.toPath(), config.storeDataFilePermission); } } catch (IOException e) { StoreErrorCodes errorCode = StoreException.resolveErrorCode(e); throw new StoreException( errorCode.toString() + " while persisting cleanup tokens to disk " + tempFile.getAbsoluteFile(), errorCode); } finally { writer.close(); context.stop(); } logger.debug("Completed writing cleanup tokens to file {}", actual.getAbsolutePath()); }
Example 17
Source File: CassandraIndexer.java From newts with Apache License 2.0 | 4 votes |
@Override public void update(Collection<Sample> samples) { Timer.Context ctx = m_updateTimer.time(); Set<StatementGenerator> generators = Sets.newHashSet(); Map<Context, Map<Resource, ResourceMetadata>> cacheQueue = Maps.newHashMap(); for (Sample sample : samples) { maybeIndexResource(cacheQueue, generators, sample.getContext(), sample.getResource()); maybeIndexResourceAttributes(cacheQueue, generators, sample.getContext(), sample.getResource()); maybeAddMetricName(cacheQueue, generators, sample.getContext(), sample.getResource(), sample.getName()); } try { if (!generators.isEmpty()) { synchronized(statementsInFlight) { generators.removeAll(statementsInFlight); statementsInFlight.addAll(generators); } m_inserts.mark(generators.size()); // Asynchronously execute the statements List<ResultSetFuture> futures = Lists.newArrayList(); for (Statement statementToExecute : toStatements(generators)) { futures.add(m_session.executeAsync(statementToExecute)); } for (ResultSetFuture future : futures) { future.getUninterruptibly(); } } // Order matters here; We want the cache updated only after a successful Cassandra write. for (Context context : cacheQueue.keySet()) { for (Map.Entry<Resource, ResourceMetadata> entry : cacheQueue.get(context).entrySet()) { m_cache.merge(context, entry.getKey(), entry.getValue()); } } } finally { synchronized(statementsInFlight) { statementsInFlight.removeAll(generators); } ctx.stop(); } }
Example 18
Source File: EndpointMetrics.java From vertx-dropwizard-metrics with Apache License 2.0 | 4 votes |
public void dequeueRequest(Timer.Context taskMetric) { queueSize.dec(); taskMetric.stop(); }
Example 19
Source File: VideoServiceDao.java From arcusplatform with Apache License 2.0 | 4 votes |
public void addTags(UUID placeId, UUID recordingId, Collection<String> tags) throws Exception { try (Timer.Context context = ADD_TAGS.time()) { videoDao.addTags(placeId, recordingId, ImmutableSet.copyOf(tags), ttlResolver.resolveTtlInSeconds(placeId)); } }
Example 20
Source File: Metrics.java From dcos-commons with Apache License 2.0 | 2 votes |
/** * Returns a timer context which may be used to measure the time spent processing offers. The returned timer must * be terminated by invoking {@link Timer.Context#stop()}. */ public static Timer.Context getProcessOffersDurationTimer() { return METRICS.timer(PROCESS_OFFERS).time(); }