org.apache.hadoop.mapreduce.Counter Java Examples
The following examples show how to use
org.apache.hadoop.mapreduce.Counter.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TableRecordReaderImpl.java From hbase with Apache License 2.0 | 6 votes |
protected static void updateCounters(ScanMetrics scanMetrics, long numScannerRestarts, Method getCounter, TaskAttemptContext context, long numStale) { // we can get access to counters only if hbase uses new mapreduce APIs if (getCounter == null) { return; } try { for (Map.Entry<String, Long> entry:scanMetrics.getMetricsMap().entrySet()) { Counter ct = (Counter)getCounter.invoke(context, HBASE_COUNTER_GROUP_NAME, entry.getKey()); ct.increment(entry.getValue()); } ((Counter) getCounter.invoke(context, HBASE_COUNTER_GROUP_NAME, "NUM_SCANNER_RESTARTS")).increment(numScannerRestarts); ((Counter) getCounter.invoke(context, HBASE_COUNTER_GROUP_NAME, "NUM_SCAN_RESULTS_STALE")).increment(numStale); } catch (Exception e) { LOG.debug("can't update counter." + StringUtils.stringifyException(e)); } }
Example #2
Source File: UpdateReducer.java From incubator-retired-blur with Apache License 2.0 | 6 votes |
@Override protected void setup(final Context context) throws IOException, InterruptedException { BlurOutputFormat.setProgressable(context); BlurOutputFormat.setGetCounter(new GetCounter() { @Override public Counter getCounter(Enum<?> counterName) { return context.getCounter(counterName); } }); _newRecordsUpdate = context.getCounter(BLUR_UPDATE, NEW_RECORDS + SEP + UPDATE); _newRecordsNoUpdate = context.getCounter(BLUR_UPDATE, NEW_RECORDS + SEP + NO_UPDATE); _existingRecordsUpdate = context.getCounter(BLUR_UPDATE, EXISTING_RECORDS + SEP + UPDATE); _existingRecordsNoUpdate = context.getCounter(BLUR_UPDATE, EXISTING_RECORDS + SEP + NO_UPDATE); _ignoredExistingRows = context.getCounter(BLUR_UPDATE, IGNORED_EXISTING_ROWS); _debugRecordsWithSameRecordId = context.getCounter(BLUR_UPDATE_DEBUG, MULTIPLE_RECORD_W_SAME_RECORD_ID); _debugMarkerRecordsNoUpdate = context.getCounter(BLUR_UPDATE_DEBUG, MARKER_RECORDS + SEP + NO_UPDATE); _debugMarkerRecordsUpdate = context.getCounter(BLUR_UPDATE_DEBUG, MARKER_RECORDS + SEP + UPDATE); _debugIndexValues = context.getCounter(BLUR_UPDATE_DEBUG, INDEX_VALUES); _debugNullBlurRecords = context.getCounter(BLUR_UPDATE_DEBUG, NULL_BLUR_RECORDS); }
Example #3
Source File: AbstractCounters.java From big-c with Apache License 2.0 | 6 votes |
/** * Construct from another counters object. * @param <C1> type of the other counter * @param <G1> type of the other counter group * @param counters the counters object to copy * @param groupFactory the factory for new groups */ @InterfaceAudience.Private public <C1 extends Counter, G1 extends CounterGroupBase<C1>> AbstractCounters(AbstractCounters<C1, G1> counters, CounterGroupFactory<C, G> groupFactory) { this.groupFactory = groupFactory; for(G1 group: counters) { String name = group.getName(); G newGroup = groupFactory.newGroup(name, group.getDisplayName(), limits); (isFrameworkGroup(name) ? fgroups : groups).put(name, newGroup); for(Counter counter: group) { newGroup.addCounter(counter.getName(), counter.getDisplayName(), counter.getValue()); } } }
Example #4
Source File: AbstractCounters.java From hadoop with Apache License 2.0 | 6 votes |
/** * Construct from another counters object. * @param <C1> type of the other counter * @param <G1> type of the other counter group * @param counters the counters object to copy * @param groupFactory the factory for new groups */ @InterfaceAudience.Private public <C1 extends Counter, G1 extends CounterGroupBase<C1>> AbstractCounters(AbstractCounters<C1, G1> counters, CounterGroupFactory<C, G> groupFactory) { this.groupFactory = groupFactory; for(G1 group: counters) { String name = group.getName(); G newGroup = groupFactory.newGroup(name, group.getDisplayName(), limits); (isFrameworkGroup(name) ? fgroups : groups).put(name, newGroup); for(Counter counter: group) { newGroup.addCounter(counter.getName(), counter.getDisplayName(), counter.getValue()); } } }
Example #5
Source File: MRJobLauncher.java From incubator-gobblin with Apache License 2.0 | 6 votes |
/** * Create a {@link org.apache.gobblin.metrics.GobblinMetrics} instance for this job run from the Hadoop counters. */ @VisibleForTesting void countersToMetrics(GobblinMetrics metrics) throws IOException { Optional<Counters> counters = Optional.fromNullable(this.job.getCounters()); if (counters.isPresent()) { // Write job-level counters CounterGroup jobCounterGroup = counters.get().getGroup(MetricGroup.JOB.name()); for (Counter jobCounter : jobCounterGroup) { metrics.getCounter(jobCounter.getName()).inc(jobCounter.getValue()); } // Write task-level counters CounterGroup taskCounterGroup = counters.get().getGroup(MetricGroup.TASK.name()); for (Counter taskCounter : taskCounterGroup) { metrics.getCounter(taskCounter.getName()).inc(taskCounter.getValue()); } } }
Example #6
Source File: JsonCountersIterator.java From datawave with Apache License 2.0 | 6 votes |
@Override public JsonElement serialize(CounterGroup cg, Type t, JsonSerializationContext ctx) { JsonObject obj = new JsonObject(); if (!cg.getName().equals(cg.getDisplayName())) obj.addProperty("displayName", cg.getDisplayName()); JsonObject dns = new JsonObject(); boolean anyNamesDiffer = false; for (Counter c : cg) { obj.addProperty(c.getName(), c.getValue()); if (!c.getName().equals(c.getDisplayName())) anyNamesDiffer = true; dns.addProperty(c.getName(), c.getDisplayName()); } if (anyNamesDiffer) obj.add("displayNames", dns); return obj; }
Example #7
Source File: ContextUtil.java From parquet-mr with Apache License 2.0 | 6 votes |
private static Method findCounterMethod(TaskAttemptContext context) { if (context != null) { if (COUNTER_METHODS_BY_CLASS.containsKey(context.getClass())) { return COUNTER_METHODS_BY_CLASS.get(context.getClass()); } try { Method method = context.getClass().getMethod("getCounter", String.class, String.class); if (method.getReturnType().isAssignableFrom(Counter.class)) { COUNTER_METHODS_BY_CLASS.put(context.getClass(), method); return method; } } catch (NoSuchMethodException e) { return null; } } return null; }
Example #8
Source File: ContextWrappedStatusReporter.java From datawave with Apache License 2.0 | 5 votes |
@Override public Counter getCounter(String group, String name) { try { return context.getCounter(group, name); } catch (NullPointerException npe) { return null; } }
Example #9
Source File: TaskAttemptImpl.java From big-c with Apache License 2.0 | 5 votes |
private void updateProgressSplits() { double newProgress = reportedStatus.progress; newProgress = Math.max(Math.min(newProgress, 1.0D), 0.0D); Counters counters = reportedStatus.counters; if (counters == null) return; WrappedProgressSplitsBlock splitsBlock = getProgressSplitBlock(); if (splitsBlock != null) { long now = clock.getTime(); long start = getLaunchTime(); // TODO Ensure not 0 if (start != 0 && now - start <= Integer.MAX_VALUE) { splitsBlock.getProgressWallclockTime().extend(newProgress, (int) (now - start)); } Counter cpuCounter = counters.findCounter(TaskCounter.CPU_MILLISECONDS); if (cpuCounter != null && cpuCounter.getValue() <= Integer.MAX_VALUE) { splitsBlock.getProgressCPUTime().extend(newProgress, (int) cpuCounter.getValue()); // long to int? TODO: FIX. Same below } Counter virtualBytes = counters .findCounter(TaskCounter.VIRTUAL_MEMORY_BYTES); if (virtualBytes != null) { splitsBlock.getProgressVirtualMemoryKbytes().extend(newProgress, (int) (virtualBytes.getValue() / (MEMORY_SPLITS_RESOLUTION))); } Counter physicalBytes = counters .findCounter(TaskCounter.PHYSICAL_MEMORY_BYTES); if (physicalBytes != null) { splitsBlock.getProgressPhysicalMemoryKbytes().extend(newProgress, (int) (physicalBytes.getValue() / (MEMORY_SPLITS_RESOLUTION))); } } }
Example #10
Source File: ReduceContextImpl.java From big-c with Apache License 2.0 | 5 votes |
public ReduceContextImpl(Configuration conf, TaskAttemptID taskid, RawKeyValueIterator input, Counter inputKeyCounter, Counter inputValueCounter, RecordWriter<KEYOUT,VALUEOUT> output, OutputCommitter committer, StatusReporter reporter, RawComparator<KEYIN> comparator, Class<KEYIN> keyClass, Class<VALUEIN> valueClass ) throws InterruptedException, IOException{ super(conf, taskid, output, committer, reporter); this.input = input; this.inputKeyCounter = inputKeyCounter; this.inputValueCounter = inputValueCounter; this.comparator = comparator; this.serializationFactory = new SerializationFactory(conf); this.keyDeserializer = serializationFactory.getDeserializer(keyClass); this.keyDeserializer.open(buffer); this.valueDeserializer = serializationFactory.getDeserializer(valueClass); this.valueDeserializer.open(buffer); hasMore = input.next(); this.keyClass = keyClass; this.valueClass = valueClass; this.conf = conf; this.taskid = taskid; }
Example #11
Source File: FrameworkCounterGroup.java From hadoop with Apache License 2.0 | 5 votes |
/** * FrameworkGroup ::= #counter (key value)* */ @Override @SuppressWarnings("unchecked") public void write(DataOutput out) throws IOException { WritableUtils.writeVInt(out, size()); for (int i = 0; i < counters.length; ++i) { Counter counter = (C) counters[i]; if (counter != null) { WritableUtils.writeVInt(out, i); WritableUtils.writeVLong(out, counter.getValue()); } } }
Example #12
Source File: PerfProfiler.java From Cubert with Apache License 2.0 | 5 votes |
private void updateCounter() { long[] operatorTime = getOperatorTime(); String profileCounterGroupName = PhaseContext.isMapper() ? mapperProfileCounterGroupName : reducerProfileCounterGroupName; ArrayNode operatorsJson = multipassOperatorsJson.get(currentPassIndex); for (int i = 0; i < operatorTime.length; i++) { if (operatorTime[i] > 0) { JsonNode operatorJson = operatorsJson.get(i); OperatorType type = OperatorType.valueOf(operatorJson.get("operator").getTextValue()); String outputName = operatorJson.get("output").getTextValue(); String counterName = String.format("P%d-O%d-%s-%s", currentPassIndex, i, type, outputName); Counter profileCounter = PhaseContext.getCounter(profileCounterGroupName, counterName); profileCounter.increment(operatorTime[i]); } } }
Example #13
Source File: CountersStrings.java From big-c with Apache License 2.0 | 5 votes |
/** * Make the pre 0.21 counter string (for e.g. old job history files) * [(actual-name)(display-name)(value)] * @param counter to stringify * @return the stringified result */ public static String toEscapedCompactString(Counter counter) { // First up, obtain the strings that need escaping. This will help us // determine the buffer length apriori. String escapedName, escapedDispName; long currentValue; synchronized(counter) { escapedName = escape(counter.getName()); escapedDispName = escape(counter.getDisplayName()); currentValue = counter.getValue(); } int length = escapedName.length() + escapedDispName.length() + 4; length += 8; // For the following delimiting characters StringBuilder builder = new StringBuilder(length); builder.append(COUNTER_OPEN); // Add the counter name builder.append(UNIT_OPEN); builder.append(escapedName); builder.append(UNIT_CLOSE); // Add the display name builder.append(UNIT_OPEN); builder.append(escapedDispName); builder.append(UNIT_CLOSE); // Add the value builder.append(UNIT_OPEN); builder.append(currentValue); builder.append(UNIT_CLOSE); builder.append(COUNTER_CLOSE); return builder.toString(); }
Example #14
Source File: CompactorOutputCommitter.java From incubator-gobblin with Apache License 2.0 | 5 votes |
private static long getRecordCountFromCounter(TaskAttemptContext context, Enum<?> counterName) { try { Method getCounterMethod = context.getClass().getMethod("getCounter", Enum.class); return ((Counter) getCounterMethod.invoke(context, counterName)).getValue(); } catch (Exception e) { throw new RuntimeException("Error reading record count counter", e); } }
Example #15
Source File: AbstractCounter.java From hadoop with Apache License 2.0 | 5 votes |
@Override public synchronized boolean equals(Object genericRight) { if (genericRight instanceof Counter) { synchronized (genericRight) { Counter right = (Counter) genericRight; return getName().equals(right.getName()) && getDisplayName().equals(right.getDisplayName()) && getValue() == right.getValue(); } } return false; }
Example #16
Source File: OnlineFeatureDriver.java From laser with Apache License 2.0 | 5 votes |
public static long run(String collection, Path input, Path output, Configuration baseConf) throws IOException, ClassNotFoundException, InterruptedException { Configuration conf = new Configuration(baseConf); Job job = Job.getInstance(conf); job.setJarByClass(OnlineFeatureDriver.class); job.setJobName("GROUP each record's feature BY identifier"); FileInputFormat.setInputPaths(job, input); FileOutputFormat.setOutputPath(job, output); job.setInputFormatClass(SequenceFileInputFormat.class); job.setOutputFormatClass(SequenceFileOutputFormat.class); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(OnlineVectorWritable.class); job.setOutputKeyClass(Text.class); job.setOutputValueClass(ListWritable.class); job.setMapperClass(OnlineFeatureMapper.class); job.setReducerClass(OnlineFeatureReducer.class); HadoopUtil.delete(conf, output); boolean succeeded = job.waitForCompletion(true); if (!succeeded) { throw new IllegalStateException("Job:Group feature, Failed!"); } Counter counter = job.getCounters().findCounter( "org.apache.hadoop.mapred.Task$Counter", "REDUCE_OUTPUT_RECORDS"); long reduceOutputRecords = counter.getValue(); LOG.info( "Job: GROUP each record's feature BY identifier, output recordes = {}", reduceOutputRecords); return reduceOutputRecords; }
Example #17
Source File: JobTrackerMetricsInst.java From RDFS with Apache License 2.0 | 5 votes |
private void clearCounters() { for (Group g : countersToMetrics) { for (Counter c : g) { c.setValue(0); } } }
Example #18
Source File: JobHistoryEventHandler.java From hadoop with Apache License 2.0 | 5 votes |
private void setSummarySlotSeconds(JobSummary summary, Counters allCounters) { Counter slotMillisMapCounter = allCounters .findCounter(JobCounter.SLOTS_MILLIS_MAPS); if (slotMillisMapCounter != null) { summary.setMapSlotSeconds(slotMillisMapCounter.getValue() / 1000); } Counter slotMillisReduceCounter = allCounters .findCounter(JobCounter.SLOTS_MILLIS_REDUCES); if (slotMillisReduceCounter != null) { summary.setReduceSlotSeconds(slotMillisReduceCounter.getValue() / 1000); } }
Example #19
Source File: TaskAttemptImpl.java From hadoop with Apache License 2.0 | 5 votes |
private void updateProgressSplits() { double newProgress = reportedStatus.progress; newProgress = Math.max(Math.min(newProgress, 1.0D), 0.0D); Counters counters = reportedStatus.counters; if (counters == null) return; WrappedProgressSplitsBlock splitsBlock = getProgressSplitBlock(); if (splitsBlock != null) { long now = clock.getTime(); long start = getLaunchTime(); // TODO Ensure not 0 if (start != 0 && now - start <= Integer.MAX_VALUE) { splitsBlock.getProgressWallclockTime().extend(newProgress, (int) (now - start)); } Counter cpuCounter = counters.findCounter(TaskCounter.CPU_MILLISECONDS); if (cpuCounter != null && cpuCounter.getValue() <= Integer.MAX_VALUE) { splitsBlock.getProgressCPUTime().extend(newProgress, (int) cpuCounter.getValue()); // long to int? TODO: FIX. Same below } Counter virtualBytes = counters .findCounter(TaskCounter.VIRTUAL_MEMORY_BYTES); if (virtualBytes != null) { splitsBlock.getProgressVirtualMemoryKbytes().extend(newProgress, (int) (virtualBytes.getValue() / (MEMORY_SPLITS_RESOLUTION))); } Counter physicalBytes = counters .findCounter(TaskCounter.PHYSICAL_MEMORY_BYTES); if (physicalBytes != null) { splitsBlock.getProgressPhysicalMemoryKbytes().extend(newProgress, (int) (physicalBytes.getValue() / (MEMORY_SPLITS_RESOLUTION))); } } }
Example #20
Source File: CounterGroupInfo.java From hadoop with Apache License 2.0 | 5 votes |
public CounterGroupInfo(String name, CounterGroup group, CounterGroup mg, CounterGroup rg) { this.counterGroupName = name; this.counter = new ArrayList<CounterInfo>(); for (Counter c : group) { Counter mc = mg == null ? null : mg.findCounter(c.getName()); Counter rc = rg == null ? null : rg.findCounter(c.getName()); CounterInfo cinfo = new CounterInfo(c, mc, rc); this.counter.add(cinfo); } }
Example #21
Source File: TaskCounterGroupInfo.java From big-c with Apache License 2.0 | 5 votes |
public TaskCounterGroupInfo(String name, CounterGroup group) { this.counterGroupName = name; this.counter = new ArrayList<TaskCounterInfo>(); for (Counter c : group) { TaskCounterInfo cinfo = new TaskCounterInfo(c.getName(), c.getValue()); this.counter.add(cinfo); } }
Example #22
Source File: SimpleHfileToRmdbMapper.java From super-cloudops with Apache License 2.0 | 5 votes |
@Override public void map(ImmutableBytesWritable key, Result result, Context context) throws IOException, InterruptedException { Counter c = context.getCounter(DEFUALT_COUNTER_GROUP, DEFUALT_COUNTER_TOTAL); c.increment(1); LinkedHashMap<String, String> rowdata = new LinkedHashMap<>(); rowdata.put("row", Bytes.toString(key.get())); Iterator<Cell> it = result.listCells().iterator(); while (it.hasNext()) { Cell cell = it.next(); byte[] qualifier = extractFieldByteArray(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()); byte[] value = extractFieldByteArray(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()); String _qualifier = Bytes.toString(qualifier); if (!HbaseMigrateUtils.isIgnoreHbaseQualifier(_qualifier)) { rowdata.put(_qualifier, Bytes.toString(value)); } } // Insert sql. try { String insertSql = SimpleHfileToRmdbExporter.currentRmdbManager.buildInsertSql(rowdata); if (SimpleHfileToRmdbExporter.verbose) { log.info(format("Inserting [%s]: %s", c.getValue(), insertSql)); } SimpleHfileToRmdbExporter.currentRmdbManager.getRmdbRepository().saveRowdata(insertSql); context.getCounter(DEFUALT_COUNTER_GROUP, DEFUALT_COUNTER_PROCESSED).increment(1); } catch (Exception e) { log.error(e); } }
Example #23
Source File: StatsDEnabledClassesTest.java From datawave with Apache License 2.0 | 5 votes |
@Test public void testMapper() throws IOException, InterruptedException { Configuration conf = new Configuration(); // basic config conf.set("statsd.host", "localhost"); conf.set("statsd.port", "8125"); conf.set("mapreduce.job.queuename", "queue1"); conf.set("mapreduce.job.name", "job1"); // some valid aspect configs conf.set("statsd.final.gauge.MyGroup1", "CounterGroup1"); conf.set("statsd.final.counter.MyGroup2", "CounterGroup2/Counter1"); conf.set("statsd.live.time.MyGroup3.MyCounter2", "CounterGroup3/Counter2"); conf.set("statsd.live.counter.TestGroup", TestCounters.class.getName()); CounterToStatsDConfiguration config = new CounterToStatsDConfiguration(conf); TestStatsDEnabledMapper mapper = new TestStatsDEnabledMapper(); Mapper.Context context = mapper.createTestContext(conf); mapper.setup(context); Assert.assertNotNull(mapper.getHelper()); TaskAttemptContext returnedContext = mapper.getContext(context); Assert.assertEquals(CounterStatsDClient.class.getName() + '$' + "StatsDTaskAttemptContext", returnedContext.getClass().getName()); Counter testCounter = mapper.getCounter(context, TestCounters.COUNTER1); Assert.assertEquals(CounterStatsDClient.class.getName() + '$' + "StatsDCounter", testCounter.getClass().getName()); testCounter = mapper.getCounter(context, "CounterGroup1", "Counter1"); Assert.assertEquals(CounterStatsDClient.class.getName() + '$' + "StatsDCounter", testCounter.getClass().getName()); Assert.assertFalse(((CounterStatsDClientTest.TestCounterStatsDClient) (mapper.getHelper()).getClient()).stopped); mapper.cleanup(context); Assert.assertNull(mapper.getHelper().getClient()); }
Example #24
Source File: HadoopMapReduceCounterGroup.java From ignite with Apache License 2.0 | 5 votes |
/** {@inheritDoc} */ @Override public Counter addCounter(String name, String displayName, long value) { final Counter counter = cntrs.findCounter(this.name, name); counter.setValue(value); return counter; }
Example #25
Source File: FetchTaskContext.java From spork with Apache License 2.0 | 5 votes |
@Override public boolean incrCounter(String group, String name, long delta) { if (context == null) { return false; } Counter counter = context.getCounter(group, name); counter.increment(delta); return true; }
Example #26
Source File: FileSystemCounterGroup.java From hadoop with Apache License 2.0 | 5 votes |
@Override @SuppressWarnings("unchecked") public void incrAllCounters(CounterGroupBase<C> other) { if (checkNotNull(other.getUnderlyingGroup(), "other group") instanceof FileSystemCounterGroup<?>) { for (Counter counter : other) { FSCounter c = (FSCounter) ((Counter)counter).getUnderlyingCounter(); findCounter(c.scheme, c.key) .increment(counter.getValue()); } } }
Example #27
Source File: KeyDedupReducerTest.java From incubator-gobblin with Apache License 2.0 | 4 votes |
@Test public void testAvroReduce() throws IOException, InterruptedException { Schema keySchema = new Schema.Parser().parse(AVRO_KEY_SCHEMA); GenericRecordBuilder keyRecordBuilder = new GenericRecordBuilder(keySchema.getField("key").schema()); keyRecordBuilder.set("partitionKey", 1); keyRecordBuilder.set("environment", "test"); keyRecordBuilder.set("subKey", "2"); GenericRecord record = keyRecordBuilder.build(); keyRecordBuilder = new GenericRecordBuilder(keySchema); keyRecordBuilder.set("key", record); GenericRecord keyRecord = keyRecordBuilder.build(); // Test reducer with delta field "scn" Schema fullSchema = new Schema.Parser().parse(AVRO_FULL_SCHEMA); AvroValue<GenericRecord> fullRecord1 = new AvroValue<>(); AvroValue<GenericRecord> fullRecord2 = new AvroValue<>(); AvroValue<GenericRecord> fullRecord3 = new AvroValue<>(); AvroValue<GenericRecord> fullRecord4 = new AvroValue<>(); GenericRecordBuilder fullRecordBuilder1 = new GenericRecordBuilder(fullSchema); fullRecordBuilder1.set("key", record); fullRecordBuilder1.set("scn", 123); fullRecordBuilder1.set("scn2", 100); fullRecord1.datum(fullRecordBuilder1.build()); fullRecordBuilder1.set("scn", 125); fullRecordBuilder1.set("scn2", 1); fullRecord2.datum(fullRecordBuilder1.build()); fullRecordBuilder1.set("scn", 124); fullRecordBuilder1.set("scn2", 10); fullRecord3.datum(fullRecordBuilder1.build()); fullRecordBuilder1.set("scn", 122); fullRecordBuilder1.set("scn2", 1000); fullRecord4.datum(fullRecordBuilder1.build()); Configuration conf = mock(Configuration.class); when(conf.get(AvroKeyDedupReducer.DELTA_SCHEMA_PROVIDER)) .thenReturn(FieldAttributeBasedDeltaFieldsProvider.class.getName()); when(conf.get(FieldAttributeBasedDeltaFieldsProvider.ATTRIBUTE_FIELD)).thenReturn("attributes_json"); when(conf.get(FieldAttributeBasedDeltaFieldsProvider.DELTA_PROP_NAME, FieldAttributeBasedDeltaFieldsProvider.DEFAULT_DELTA_PROP_NAME)) .thenReturn(FieldAttributeBasedDeltaFieldsProvider.DEFAULT_DELTA_PROP_NAME); RecordKeyDedupReducerBase<AvroKey<GenericRecord>, AvroValue<GenericRecord>, AvroKey<GenericRecord>, NullWritable> reducer = new AvroKeyDedupReducer(); WrappedReducer.Context reducerContext = mock(WrappedReducer.Context.class); when(reducerContext.getConfiguration()).thenReturn(conf); Counter moreThan1Counter = new GenericCounter(); when(reducerContext.getCounter(RecordKeyDedupReducerBase.EVENT_COUNTER.MORE_THAN_1)).thenReturn(moreThan1Counter); Counter dedupedCounter = new GenericCounter(); when(reducerContext.getCounter(RecordKeyDedupReducerBase.EVENT_COUNTER.DEDUPED)).thenReturn(dedupedCounter); Counter recordCounter = new GenericCounter(); when(reducerContext.getCounter(RecordKeyDedupReducerBase.EVENT_COUNTER.RECORD_COUNT)).thenReturn(recordCounter); reducer.setup(reducerContext); doNothing().when(reducerContext).write(any(AvroKey.class), any(NullWritable.class)); List<AvroValue<GenericRecord>> valueIterable = Lists.newArrayList(fullRecord1, fullRecord2, fullRecord3, fullRecord4); AvroKey<GenericRecord> key = new AvroKey<>(); key.datum(keyRecord); reducer.reduce(key, valueIterable, reducerContext); Assert.assertEquals(reducer.getOutKey().datum(), fullRecord2.datum()); // Test reducer without delta field Configuration conf2 = mock(Configuration.class); when(conf2.get(AvroKeyDedupReducer.DELTA_SCHEMA_PROVIDER)).thenReturn(null); when(reducerContext.getConfiguration()).thenReturn(conf2); RecordKeyDedupReducerBase<AvroKey<GenericRecord>, AvroValue<GenericRecord>, AvroKey<GenericRecord>, NullWritable> reducer2 = new AvroKeyDedupReducer(); reducer2.setup(reducerContext); reducer2.reduce(key, valueIterable, reducerContext); Assert.assertEquals(reducer2.getOutKey().datum(), fullRecord1.datum()); // Test reducer with compound delta key. Schema fullSchema2 = new Schema.Parser().parse(AVRO_FULL_SCHEMA_WITH_TWO_DELTA_FIELDS); GenericRecordBuilder fullRecordBuilder2 = new GenericRecordBuilder(fullSchema2); fullRecordBuilder2.set("key", record); fullRecordBuilder2.set("scn", 123); fullRecordBuilder2.set("scn2", 100); fullRecord1.datum(fullRecordBuilder2.build()); fullRecordBuilder2.set("scn", 125); fullRecordBuilder2.set("scn2", 1000); fullRecord2.datum(fullRecordBuilder2.build()); fullRecordBuilder2.set("scn", 126); fullRecordBuilder2.set("scn2", 1000); fullRecord3.datum(fullRecordBuilder2.build()); fullRecordBuilder2.set("scn", 130); fullRecordBuilder2.set("scn2", 100); fullRecord4.datum(fullRecordBuilder2.build()); List<AvroValue<GenericRecord>> valueIterable2 = Lists.newArrayList(fullRecord1, fullRecord2, fullRecord3, fullRecord4); reducer.reduce(key, valueIterable2, reducerContext); Assert.assertEquals(reducer.getOutKey().datum(), fullRecord3.datum()); }
Example #28
Source File: Parser.java From hadoop with Apache License 2.0 | 4 votes |
@Override public Counter getCounter(String group, String name) { return context.getCounter(group, name); }
Example #29
Source File: HadoopClientProtocolSelfTest.java From ignite with Apache License 2.0 | 4 votes |
/** * Tests job counters retrieval. * * @throws Exception If failed. */ @Test public void testJobCounters() throws Exception { IgniteFileSystem igfs = grid(0).fileSystem(HadoopAbstractSelfTest.igfsName); igfs.mkdirs(new IgfsPath(PATH_INPUT)); try (BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(igfs.create( new IgfsPath(PATH_INPUT + "/test.file"), true)))) { bw.write( "alpha\n" + "beta\n" + "gamma\n" + "alpha\n" + "beta\n" + "gamma\n" + "alpha\n" + "beta\n" + "gamma\n" ); } Configuration conf = config(HadoopAbstractSelfTest.REST_PORT); final Job job = Job.getInstance(conf); try { job.setOutputKeyClass(Text.class); job.setOutputValueClass(IntWritable.class); job.setMapperClass(TestCountingMapper.class); job.setReducerClass(TestCountingReducer.class); job.setCombinerClass(TestCountingCombiner.class); FileInputFormat.setInputPaths(job, new Path("igfs://" + igfsName + "@" + PATH_INPUT)); FileOutputFormat.setOutputPath(job, new Path("igfs://" + igfsName + "@" + PATH_OUTPUT)); job.submit(); final Counter cntr = job.getCounters().findCounter(TestCounter.COUNTER1); assertEquals(0, cntr.getValue()); cntr.increment(10); assertEquals(10, cntr.getValue()); // Transferring to map phase. setupLockFile.delete(); // Transferring to reduce phase. mapLockFile.delete(); job.waitForCompletion(false); assertEquals("job must end successfully", JobStatus.State.SUCCEEDED, job.getStatus().getState()); final Counters counters = job.getCounters(); assertNotNull("counters cannot be null", counters); assertEquals("wrong counters count", 3, counters.countCounters()); assertEquals("wrong counter value", 15, counters.findCounter(TestCounter.COUNTER1).getValue()); assertEquals("wrong counter value", 3, counters.findCounter(TestCounter.COUNTER2).getValue()); assertEquals("wrong counter value", 3, counters.findCounter(TestCounter.COUNTER3).getValue()); } catch (Throwable t) { log.error("Unexpected exception", t); } finally { job.getCluster().close(); } }
Example #30
Source File: MockStatusReporter.java From datawave with Apache License 2.0 | 4 votes |
@Override public Counter getCounter(String group, String name) { return counters.getGroup(group).findCounter(name); }