org.apache.hadoop.metrics.MetricsRecord Java Examples
The following examples show how to use
org.apache.hadoop.metrics.MetricsRecord.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: MetricsTimeVaryingRate.java From RDFS with Apache License 2.0 | 6 votes |
/** * Push the delta metrics to the mr. * The delta is since the last push/interval. * * Note this does NOT push to JMX * (JMX gets the info via {@link #getPreviousIntervalAverageTime()} and * {@link #getPreviousIntervalNumOps()} * * @param mr metrics record. If null, simply interval heart beat only. */ public void pushMetric(final MetricsRecord mr) { lock.lock(); try { intervalHeartBeat(); try { if (mr != null) { mr.incrMetric(getName() + "_num_ops", getPreviousIntervalNumOps()); mr.setMetric(getName() + "_avg_time", getPreviousIntervalAverageTime()); if (printMinMax) { mr.setMetric(getName() + "_min", getMinTime()); mr.setMetric(getName() + "_max", getMaxTime()); resetMinMax(); } } } catch (Exception e) { LOG.info("pushMetric failed for " + getName() + "\n" + StringUtils.stringifyException(e)); } } finally { lock.unlock(); } }
Example #2
Source File: MetricsTimeVaryingInt.java From RDFS with Apache License 2.0 | 6 votes |
/** * Push the delta metrics to the mr. * The delta is since the last push/interval. * * Note this does NOT push to JMX * (JMX gets the info via {@link #previousIntervalValue} * * @param mr */ public void pushMetric(final MetricsRecord mr) { lock.lock(); try { intervalHeartBeat(); try { mr.incrMetric(getName(), getPreviousIntervalValue()); } catch (Exception e) { LOG.info("pushMetric failed for " + getName() + "\n" + StringUtils.stringifyException(e)); } } finally { lock.unlock(); } }
Example #3
Source File: ProxyJobTracker.java From RDFS with Apache License 2.0 | 6 votes |
@Override public void doUpdates(MetricsContext unused) { synchronized (aggregateJobStats) { // Update metrics with aggregate job stats and reset the aggregate. aggregateJobStats.incrementMetricsAndReset(metricsRecord); incrementMetricsAndReset(metricsRecord, aggregateCounters); for (Map.Entry<String, MetricsRecord> entry : poolToMetricsRecord.entrySet()) { String pool = entry.getKey(); JobStats poolJobStats = poolToJobStats.get(pool); poolJobStats.incrementMetricsAndReset(entry.getValue()); Counters poolCounters = poolToJobCounters.get(pool); incrementMetricsAndReset(entry.getValue(), poolCounters); } } }
Example #4
Source File: ProxyJobTracker.java From RDFS with Apache License 2.0 | 6 votes |
private static void incrementMetricsAndReset( MetricsRecord record, Counters counters) { // Now update metrics with the counters and reset the aggregate. for (Counters.Group group : counters) { String groupName = group.getName(); for (Counter counter : group) { String name = groupName + "_" + counter.getName(); name = name.replaceAll("[^a-zA-Z_]", "_").toLowerCase(); record.incrMetric(name, counter.getValue()); } } // Reset the aggregate counters. for (Counters.Group g : counters) { for (Counter c : g) { c.setValue(0); } } record.update(); }
Example #5
Source File: ProxyJobTracker.java From RDFS with Apache License 2.0 | 6 votes |
@Override public void reportJobStats( String jobId, String pool, JobStats stats, Counters counters) { synchronized (aggregateJobStats) { aggregateJobStats.accumulate(stats); JobStats poolJobStats = poolToJobStats.get(pool); if (poolJobStats == null) { poolJobStats = new JobStats(); poolToJobStats.put(pool, poolJobStats); } poolJobStats.accumulate(stats); accumulateCounters(aggregateCounters, counters); Counters poolCounters = poolToJobCounters.get(pool); if (poolCounters == null) { poolCounters = new Counters(); poolToJobCounters.put(pool, poolCounters); } accumulateCounters(poolCounters, counters); if (!poolToMetricsRecord.containsKey(pool)) { MetricsRecord poolRecord = context.createRecord("pool-" + pool); poolToMetricsRecord.put(pool, poolRecord); } } }
Example #6
Source File: FairSchedulerMetricsInst.java From RDFS with Apache License 2.0 | 6 votes |
private void submitPoolMetrics(PoolInfo info) { MetricsRecord record = poolToMetricsRecord.get(info.poolName); if (record == null) { record = MetricsUtil.createRecord(context, "pool-" + info.poolName); FairScheduler.LOG.info("Create metrics record for pool:" + info.poolName); poolToMetricsRecord.put(info.poolName, record); } record.setMetric("min_map", info.minMaps); record.setMetric("min_reduce", info.minReduces); record.setMetric("max_map", info.maxMaps); record.setMetric("max_reduce", info.maxReduces); record.setMetric("running_map", info.runningMaps); record.setMetric("running_reduce", info.runningReduces); record.setMetric("runnable_map", info.runnableMaps); record.setMetric("runnable_reduce", info.runnableReduces); record.setMetric("inited_tasks", info.initedTasks); record.setMetric("max_inited_tasks", info.maxInitedTasks); int runningJobs = info.runningJobs; record.setMetric("avg_first_map_wait_ms", (runningJobs == 0) ? 0 : info.totalFirstMapWaitTime / runningJobs); record.setMetric("avg_first_reduce_wait_ms", (runningJobs == 0) ? 0 : info.totalFirstReduceWaitTime / runningJobs); }
Example #7
Source File: FairSchedulerMetricsInst.java From RDFS with Apache License 2.0 | 5 votes |
public FairSchedulerMetricsInst(FairScheduler scheduler, Configuration conf) { // Create a record for map-reduce metrics metricsRecord = MetricsUtil.createRecord(context, "fairscheduler"); poolToMetricsRecord = new HashMap<String, MetricsRecord>(); context.registerUpdater(this); updatePeriod = conf.getLong("mapred.fairscheduler.metric.update.period", 5 * 1000); // default period is 5 seconds. jobInitializer = scheduler.getJobInitializer(); }
Example #8
Source File: FairSchedulerMetricsInst.java From RDFS with Apache License 2.0 | 5 votes |
@Override public void doUpdates(MetricsContext context) { long now = JobTracker.getClock().getTime(); if (now - lastUpdateTime > updatePeriod) { updateMetrics(); lastUpdateTime = now; } updateCounters(); metricsRecord.update(); for (MetricsRecord mr : poolToMetricsRecord.values()) { mr.update(); } }
Example #9
Source File: JMXContext.java From RDFS with Apache License 2.0 | 5 votes |
@Override protected MetricsRecord newRecord(String recordName) { MetricsRecord record = super.newRecord(recordName); if (records.isEmpty() || records.contains(recordName)) { // Create MBean to expose this record // Only if this record is to be exposed through JMX getOrCreateMBean(recordName); } return record; }
Example #10
Source File: CompositeContext.java From hadoop-gpu with Apache License 2.0 | 5 votes |
private static Method initMethod() { try { return MetricsRecord.class.getMethod("getRecordName", new Class[0]); } catch (Exception e) { throw new RuntimeException("Internal error", e); } }
Example #11
Source File: PoolInfoMetrics.java From RDFS with Apache License 2.0 | 5 votes |
/** * Constructor for a pool info of a specific resource. * * @param poolInfo Pool info * @param type Resource type * @param record The metrics record for this object * @param */ public PoolInfoMetrics(PoolInfo poolInfo, ResourceType type, MetricsRecord record) { this.poolInfo = poolInfo; this.type = type; this.counters = Collections.synchronizedMap(new HashMap<MetricName, Long>()); this.record = record; }
Example #12
Source File: Scheduler.java From RDFS with Apache License 2.0 | 5 votes |
/** * Submit the metrics. * * @param metricsRecord Where the metrics will be submitted */ public void submitMetrics(MetricsRecord metricsRecord) { List<PoolMetadata> poolMetadatas = getPoolMetadataList(); PoolFairnessCalculator.calculateFairness(poolMetadatas, metricsRecord); for (SchedulerForType scheduler: schedulersForTypes.values()) { scheduler.submitMetrics(); } }
Example #13
Source File: JobStats.java From RDFS with Apache License 2.0 | 5 votes |
public synchronized void incrementMetricsAndReset( MetricsRecord metricsRecord) { metricsRecord.incrMetric("maps_launched", getNumMapTasksLaunched()); metricsRecord.incrMetric("maps_completed", getNumMapTasksCompleted()); metricsRecord.incrMetric("maps_failed", getNumMapTasksFailed()); metricsRecord.incrMetric("reduces_launched", getNumReduceTasksLaunched()); metricsRecord.incrMetric("reduces_completed", getNumReduceTasksCompleted()); metricsRecord.incrMetric("reduces_failed", getNumReduceTasksFailed()); metricsRecord.incrMetric("num_speculative_maps", getNumSpeculativeMaps()); metricsRecord.incrMetric("num_speculative_reduces", getNumSpeculativeReduces()); metricsRecord.incrMetric("num_speculative_succeeded_maps", getNumSpeculativeSucceededMaps()); metricsRecord.incrMetric("num_speculative_succeeded_reduces", getNumSpeculativeSucceededReduces()); metricsRecord.incrMetric("num_speculative_wasted_maps", getNumSpeculativeWasteMaps()); metricsRecord.incrMetric("num_speculative_wasted_reduces", getNumSpeculativeWasteReduces()); metricsRecord.incrMetric("speculative_map_time_waste", getSpeculativeMapTimeWaste()); metricsRecord.incrMetric("speculative_reduce_time_waste", getSpeculativeMapTimeWaste()); metricsRecord.incrMetric("killed_tasks_map_time", getKilledMapTime()); metricsRecord.incrMetric("killed_tasks_reduce_time", getKilledReduceTime()); metricsRecord.incrMetric("failed_tasks_map_time", getFailedMapTime()); metricsRecord.incrMetric("failed_tasks_reduce_time", getFailedReduceTime()); metricsRecord.incrMetric("num_dataLocal_maps", getNumDataLocalMaps()); metricsRecord.incrMetric("num_rackLocal_maps", getNumRackLocalMaps()); metricsRecord.incrMetric("maps_killed", getNumMapTasksKilled()); metricsRecord.incrMetric("reduces_killed", getNumReduceTasksKilled()); metricsRecord.incrMetric("total_map_input_bytes", getTotalMapInputBytes()); metricsRecord.incrMetric("local_map_input_bytes", getLocalMapInputBytes()); metricsRecord.incrMetric("rack_map_input_bytes", getRackMapInputBytes()); metricsRecord.incrMetric("maps_failed_by_fetch_failures", getNumMapTasksFailedByFetchFailures()); metricsRecord.incrMetric("map_fetches_failed", getNumMapFetchFailures()); reset(); }
Example #14
Source File: CompositeContext.java From hadoop-gpu with Apache License 2.0 | 5 votes |
MetricsRecordDelegator(String recordName, ArrayList<MetricsContext> ctxts) { this.recordName = recordName; this.subrecs = new ArrayList<MetricsRecord>(ctxts.size()); for (MetricsContext ctxt : ctxts) { subrecs.add(ctxt.createRecord(recordName)); } }
Example #15
Source File: CompositeContext.java From RDFS with Apache License 2.0 | 5 votes |
@Override public MetricsRecord newRecord(String recordName) { return (MetricsRecord) Proxy.newProxyInstance( MetricsRecord.class.getClassLoader(), new Class[] { MetricsRecord.class }, new MetricsRecordDelegator(recordName, subctxt)); }
Example #16
Source File: MetricsIntValue.java From hadoop-gpu with Apache License 2.0 | 5 votes |
/** * Push the metric to the mr. * The metric is pushed only if it was updated since last push * * Note this does NOT push to JMX * (JMX gets the info via {@link #get()} * * @param mr */ public synchronized void pushMetric(final MetricsRecord mr) { if (changed) { try { mr.setMetric(getName(), value); } catch (Exception e) { LOG.info("pushMetric failed for " + getName() + "\n" + StringUtils.stringifyException(e)); } } changed = false; }
Example #17
Source File: CompositeContext.java From RDFS with Apache License 2.0 | 5 votes |
private static Method initMethod() { try { return MetricsRecord.class.getMethod("getRecordName", new Class[0]); } catch (Exception e) { throw new RuntimeException("Internal error", e); } }
Example #18
Source File: MetricsIntValue.java From big-c with Apache License 2.0 | 5 votes |
/** * Push the metric to the mr. * The metric is pushed only if it was updated since last push * * Note this does NOT push to JMX * (JMX gets the info via {@link #get()} * * @param mr */ public synchronized void pushMetric(final MetricsRecord mr) { if (changed) { try { mr.setMetric(getName(), value); } catch (Exception e) { LOG.info("pushMetric failed for " + getName() + "\n", e); } } changed = false; }
Example #19
Source File: CompositeContext.java From hadoop with Apache License 2.0 | 5 votes |
@InterfaceAudience.Private @Override public MetricsRecord newRecord(String recordName) { return (MetricsRecord) Proxy.newProxyInstance( MetricsRecord.class.getClassLoader(), new Class[] { MetricsRecord.class }, new MetricsRecordDelegator(recordName, subctxt)); }
Example #20
Source File: CompositeContext.java From hadoop with Apache License 2.0 | 5 votes |
private static Method initMethod() { try { return MetricsRecord.class.getMethod("getRecordName", new Class[0]); } catch (Exception e) { throw new RuntimeException("Internal error", e); } }
Example #21
Source File: CompositeContext.java From hadoop with Apache License 2.0 | 5 votes |
MetricsRecordDelegator(String recordName, ArrayList<MetricsContext> ctxts) { this.recordName = recordName; this.subrecs = new ArrayList<MetricsRecord>(ctxts.size()); for (MetricsContext ctxt : ctxts) { subrecs.add(ctxt.createRecord(recordName)); } }
Example #22
Source File: MetricsIntValue.java From hadoop with Apache License 2.0 | 5 votes |
/** * Push the metric to the mr. * The metric is pushed only if it was updated since last push * * Note this does NOT push to JMX * (JMX gets the info via {@link #get()} * * @param mr */ public synchronized void pushMetric(final MetricsRecord mr) { if (changed) { try { mr.setMetric(getName(), value); } catch (Exception e) { LOG.info("pushMetric failed for " + getName() + "\n", e); } } changed = false; }
Example #23
Source File: CompositeContext.java From hadoop-gpu with Apache License 2.0 | 5 votes |
@Override public MetricsRecord newRecord(String recordName) { return (MetricsRecord) Proxy.newProxyInstance( MetricsRecord.class.getClassLoader(), new Class[] { MetricsRecord.class }, new MetricsRecordDelegator(recordName, subctxt)); }
Example #24
Source File: MetricsTimeVaryingLong.java From RDFS with Apache License 2.0 | 5 votes |
/** * Push the delta metrics to the mr. * The delta is since the last push/interval. * * Note this does NOT push to JMX * (JMX gets the info via {@link #previousIntervalValue} * * @param mr */ public void pushMetric(final MetricsRecord mr) { lock.lock(); try { intervalHeartBeat(); try { mr.incrMetric(getName(), getPreviousIntervalValue()); } catch (Exception e) { LOG.info("pushMetric failed for " + getName() + "\n" + StringUtils.stringifyException(e)); } } finally { lock.unlock(); } }
Example #25
Source File: MetricsTimeVaryingRate.java From hadoop-gpu with Apache License 2.0 | 5 votes |
/** * Push the delta metrics to the mr. * The delta is since the last push/interval. * * Note this does NOT push to JMX * (JMX gets the info via {@link #getPreviousIntervalAverageTime()} and * {@link #getPreviousIntervalNumOps()} * * @param mr */ public synchronized void pushMetric(final MetricsRecord mr) { intervalHeartBeat(); try { mr.incrMetric(getName() + "_num_ops", getPreviousIntervalNumOps()); mr.setMetric(getName() + "_avg_time", getPreviousIntervalAverageTime()); } catch (Exception e) { LOG.info("pushMetric failed for " + getName() + "\n" + StringUtils.stringifyException(e)); } }
Example #26
Source File: MetricsTimeVaryingLong.java From hadoop-gpu with Apache License 2.0 | 5 votes |
/** * Push the delta metrics to the mr. * The delta is since the last push/interval. * * Note this does NOT push to JMX * (JMX gets the info via {@link #previousIntervalValue} * * @param mr */ public synchronized void pushMetric(final MetricsRecord mr) { intervalHeartBeat(); try { mr.incrMetric(getName(), getPreviousIntervalValue()); } catch (Exception e) { LOG.info("pushMetric failed for " + getName() + "\n" + StringUtils.stringifyException(e)); } }
Example #27
Source File: MetricsIntValue.java From RDFS with Apache License 2.0 | 5 votes |
/** * Push the metric to the mr. * The metric is pushed only if it was updated since last push * * Note this does NOT push to JMX * (JMX gets the info via {@link #get()} * * @param mr */ public synchronized void pushMetric(final MetricsRecord mr) { if (changed) { try { mr.setMetric(getName(), value); } catch (Exception e) { LOG.info("pushMetric failed for " + getName() + "\n" + StringUtils.stringifyException(e)); } } changed = false; }
Example #28
Source File: MetricsTimeVaryingInt.java From hadoop-gpu with Apache License 2.0 | 5 votes |
/** * Push the delta metrics to the mr. * The delta is since the last push/interval. * * Note this does NOT push to JMX * (JMX gets the info via {@link #previousIntervalValue} * * @param mr */ public synchronized void pushMetric(final MetricsRecord mr) { intervalHeartBeat(); try { mr.incrMetric(getName(), getPreviousIntervalValue()); } catch (Exception e) { LOG.info("pushMetric failed for " + getName() + "\n" + StringUtils.stringifyException(e)); } }
Example #29
Source File: CompositeContext.java From big-c with Apache License 2.0 | 5 votes |
@InterfaceAudience.Private @Override public MetricsRecord newRecord(String recordName) { return (MetricsRecord) Proxy.newProxyInstance( MetricsRecord.class.getClassLoader(), new Class[] { MetricsRecord.class }, new MetricsRecordDelegator(recordName, subctxt)); }
Example #30
Source File: CompositeContext.java From big-c with Apache License 2.0 | 5 votes |
private static Method initMethod() { try { return MetricsRecord.class.getMethod("getRecordName", new Class[0]); } catch (Exception e) { throw new RuntimeException("Internal error", e); } }