Java Code Examples for org.apache.beam.sdk.metrics.Counter#inc()
The following examples show how to use
org.apache.beam.sdk.metrics.Counter#inc() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: KafkaExactlyOnceSink.java From DataflowTemplates with Apache License 2.0 | 6 votes |
Future<RecordMetadata> sendRecord(TimestampedValue<KV<K, V>> record, Counter sendCounter) { try { Long timestampMillis = spec.getPublishTimestampFunction() != null ? spec.getPublishTimestampFunction() .getTimestamp(record.getValue(), record.getTimestamp()) .getMillis() : null; Future<RecordMetadata> result = producer.send( new ProducerRecord<>( spec.getTopic(), null, timestampMillis, record.getValue().getKey(), record.getValue().getValue())); sendCounter.inc(); return result; } catch (KafkaException e) { ProducerSpEL.abortTransaction(producer); throw e; } }
Example 2
Source File: KafkaExactlyOnceSink.java From DataflowTemplates with Apache License 2.0 | 6 votes |
void commitTxn(long lastRecordId, Counter numTransactions) throws IOException { try { // Store id in consumer group metadata for the partition. // NOTE: Kafka keeps this metadata for 24 hours since the last update. This limits // how long the pipeline could be down before resuming it. It does not look like // this TTL can be adjusted (asked about it on Kafka users list). ProducerSpEL.sendOffsetsToTransaction( producer, ImmutableMap.of( new TopicPartition(spec.getTopic(), shard), new OffsetAndMetadata( 0L, JSON_MAPPER.writeValueAsString(new ShardMetadata(lastRecordId, writerId)))), spec.getSinkGroupId()); ProducerSpEL.commitTransaction(producer); numTransactions.inc(); LOG.debug("{} : committed {} records", shard, lastRecordId - committedId); committedId = lastRecordId; } catch (KafkaException e) { ProducerSpEL.abortTransaction(producer); throw e; } }
Example 3
Source File: KafkaExactlyOnceSink.java From beam with Apache License 2.0 | 6 votes |
Future<RecordMetadata> sendRecord( TimestampedValue<ProducerRecord<K, V>> record, Counter sendCounter) { try { Long timestampMillis = spec.getPublishTimestampFunction() != null ? spec.getPublishTimestampFunction() .getTimestamp(record.getValue(), record.getTimestamp()) .getMillis() : null; Future<RecordMetadata> result = producer.send( new ProducerRecord<>( spec.getTopic(), null, timestampMillis, record.getValue().key(), record.getValue().value())); sendCounter.inc(); return result; } catch (KafkaException e) { ProducerSpEL.abortTransaction(producer); throw e; } }
Example 4
Source File: KafkaExactlyOnceSink.java From beam with Apache License 2.0 | 6 votes |
void commitTxn(long lastRecordId, Counter numTransactions) throws IOException { try { // Store id in consumer group metadata for the partition. // NOTE: Kafka keeps this metadata for 24 hours since the last update. This limits // how long the pipeline could be down before resuming it. It does not look like // this TTL can be adjusted (asked about it on Kafka users list). ProducerSpEL.sendOffsetsToTransaction( producer, ImmutableMap.of( new TopicPartition(spec.getTopic(), shard), new OffsetAndMetadata( 0L, JSON_MAPPER.writeValueAsString(new ShardMetadata(lastRecordId, writerId)))), spec.getSinkGroupId()); ProducerSpEL.commitTransaction(producer); numTransactions.inc(); LOG.debug("{} : committed {} records", shard, lastRecordId - committedId); committedId = lastRecordId; } catch (KafkaException e) { ProducerSpEL.abortTransaction(producer); throw e; } }
Example 5
Source File: LabeledMetricsTest.java From beam with Apache License 2.0 | 6 votes |
@Test public void testOperationsUpdateCounterFromContainerWhenContainerIsPresent() { HashMap<String, String> labels = new HashMap<String, String>(); String urn = MonitoringInfoConstants.Urns.ELEMENT_COUNT; MonitoringInfoMetricName name = MonitoringInfoMetricName.named(urn, labels); MetricsContainer mockContainer = Mockito.mock(MetricsContainer.class); Counter mockCounter = Mockito.mock(Counter.class); when(mockContainer.getCounter(name)).thenReturn(mockCounter); Counter counter = LabeledMetrics.counter(name); MetricsEnvironment.setCurrentContainer(mockContainer); counter.inc(); verify(mockCounter).inc(1); counter.inc(47L); verify(mockCounter).inc(47); counter.dec(5L); verify(mockCounter).inc(-5); }
Example 6
Source File: BatchModeExecutionContextTest.java From beam with Apache License 2.0 | 6 votes |
@Test public void extractThrottleTimeCounters() { BatchModeExecutionContext executionContext = BatchModeExecutionContext.forTesting(PipelineOptionsFactory.create(), "testStage"); DataflowOperationContext operationContext = executionContext.createOperationContext(NameContextsForTests.nameContextForTest()); Counter counter = operationContext .metricsContainer() .getCounter( MetricName.named( BatchModeExecutionContext.DATASTORE_THROTTLE_TIME_NAMESPACE, "cumulativeThrottlingSeconds")); counter.inc(12); counter.inc(17); counter.inc(1); assertEquals(30L, (long) executionContext.extractThrottleTime()); }
Example 7
Source File: ParDoLoadTest.java From beam with Apache License 2.0 | 5 votes |
@ProcessElement public void processElement(ProcessContext processContext) { for (int i = 0; i < numberOfOperations; i++) { for (Counter counter : counters) { counter.inc(); } } processContext.output(processContext.element()); }
Example 8
Source File: LabeledMetricsTest.java From beam with Apache License 2.0 | 5 votes |
@Test public void testCounterDoesNotFailOperationsWhenNoMetricsContainerPresent() { MetricsEnvironment.setCurrentContainer(null); assertNull(MetricsEnvironment.getCurrentContainer()); HashMap<String, String> labels = new HashMap<String, String>(); String urn = MonitoringInfoConstants.Urns.ELEMENT_COUNT; MonitoringInfoMetricName name = MonitoringInfoMetricName.named(urn, labels); Counter counter = LabeledMetrics.counter(name); counter.inc(); counter.inc(5L); counter.dec(); counter.dec(5L); }
Example 9
Source File: BatchModeExecutionContextTest.java From beam with Apache License 2.0 | 4 votes |
@Test public void extractMetricUpdatesCounter() { BatchModeExecutionContext executionContext = BatchModeExecutionContext.forTesting(PipelineOptionsFactory.create(), "testStage"); DataflowOperationContext operationContext = executionContext.createOperationContext(NameContextsForTests.nameContextForTest()); Counter counter = operationContext .metricsContainer() .getCounter(MetricName.named("namespace", "some-counter")); counter.inc(1); counter.inc(41); counter.inc(1); counter.inc(-1); final CounterUpdate expected = new CounterUpdate() .setStructuredNameAndMetadata( new CounterStructuredNameAndMetadata() .setName( new CounterStructuredName() .setOrigin("USER") .setOriginNamespace("namespace") .setName("some-counter") .setOriginalStepName("originalName")) .setMetadata(new CounterMetadata().setKind(Kind.SUM.toString()))) .setCumulative(true) .setInteger(longToSplitInt(42)); assertThat(executionContext.extractMetricUpdates(false), containsInAnyOrder(expected)); executionContext.commitMetricUpdates(); Counter counterUncommitted = operationContext .metricsContainer() .getCounter(MetricName.named("namespace", "uncommitted-counter")); counterUncommitted.inc(64); final CounterUpdate expectedUncommitted = new CounterUpdate() .setStructuredNameAndMetadata( new CounterStructuredNameAndMetadata() .setName( new CounterStructuredName() .setOrigin("USER") .setOriginNamespace("namespace") .setName("uncommitted-counter") .setOriginalStepName("originalName")) .setMetadata(new CounterMetadata().setKind(Kind.SUM.toString()))) .setCumulative(true) .setInteger(longToSplitInt(64)); // Expect to get only the uncommitted metric, unless final update. assertThat( executionContext.extractMetricUpdates(false), containsInAnyOrder(expectedUncommitted)); assertThat( executionContext.extractMetricUpdates(true), containsInAnyOrder(expected, expectedUncommitted)); executionContext.commitMetricUpdates(); // All Metrics are committed, expect none unless final update. assertThat(executionContext.extractMetricUpdates(false), emptyIterable()); assertThat( executionContext.extractMetricUpdates(true), containsInAnyOrder(expected, expectedUncommitted)); }