Java Code Examples for org.apache.kafka.clients.consumer.ConsumerRecord#timestampType()
The following examples show how to use
org.apache.kafka.clients.consumer.ConsumerRecord#timestampType() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: BaseKafkaConsumer11.java From datacollector with Apache License 2.0 | 6 votes |
@Override MessageAndOffset getMessageAndOffset(ConsumerRecord message, boolean isEnabled) { MessageAndOffset messageAndOffset; if (message.timestampType() != TimestampType.NO_TIMESTAMP_TYPE && message.timestamp() > 0 && isEnabled) { messageAndOffset = new MessageAndOffsetWithTimestamp( message.key(), message.value(), message.offset(), message.partition(), message.timestamp(), message.timestampType().toString() ); } else { messageAndOffset = new MessageAndOffset(message.key(), message.value(), message.offset(), message.partition()); } return messageAndOffset; }
Example 2
Source File: KafkaConsumer10.java From datacollector with Apache License 2.0 | 6 votes |
@Override MessageAndOffset getMessageAndOffset(ConsumerRecord message, boolean isEnabled) { MessageAndOffset messageAndOffset; if (message.timestampType() != TimestampType.NO_TIMESTAMP_TYPE && message.timestamp() > 0 && isEnabled) { messageAndOffset = new MessageAndOffsetWithTimestamp( message.key(), message.value(), message.offset(), message.partition(), message.timestamp(), message.timestampType().toString() ); } else { messageAndOffset = new MessageAndOffset(message.key(), message.value(), message.offset(), message.partition()); } return messageAndOffset; }
Example 3
Source File: ConsumerRecordsIteratorWrapper.java From apm-agent-java with Apache License 2.0 | 5 votes |
@Override public ConsumerRecord next() { endCurrentTransaction(); ConsumerRecord record = delegate.next(); try { String topic = record.topic(); if (!WildcardMatcher.isAnyMatch(messagingConfiguration.getIgnoreMessageQueues(), topic)) { Transaction transaction = tracer.startChildTransaction(record, KafkaRecordHeaderAccessor.instance(), ConsumerRecordsIteratorWrapper.class.getClassLoader()); if (transaction != null) { transaction.withType("messaging").withName("Kafka record from " + topic).activate(); transaction.setFrameworkName(FRAMEWORK_NAME); Message message = transaction.getContext().getMessage(); message.withQueue(topic); if (record.timestampType() == TimestampType.CREATE_TIME) { message.withAge(System.currentTimeMillis() - record.timestamp()); } if (transaction.isSampled() && coreConfiguration.isCaptureHeaders()) { for (Header header : record.headers()) { String key = header.key(); if (!TraceContext.TRACE_PARENT_BINARY_HEADER_NAME.equals(key) && WildcardMatcher.anyMatch(coreConfiguration.getSanitizeFieldNames(), key) == null) { message.addHeader(key, header.value()); } } } if (transaction.isSampled() && coreConfiguration.getCaptureBody() != CoreConfiguration.EventType.OFF) { message.appendToBody("key=").appendToBody(String.valueOf(record.key())).appendToBody("; ") .appendToBody("value=").appendToBody(String.valueOf(record.value())); } } } } catch (Exception e) { logger.error("Error in transaction creation based on Kafka record", e); } return record; }
Example 4
Source File: KafkaMirrorMakerConnectorTask.java From brooklin with BSD 2-Clause "Simplified" License | 5 votes |
@Override protected DatastreamProducerRecord translate(ConsumerRecord<?, ?> fromKafka, Instant readTime) { long eventsSourceTimestamp = fromKafka.timestampType() == TimestampType.LOG_APPEND_TIME ? fromKafka.timestamp() : readTime.toEpochMilli(); HashMap<String, String> metadata = new HashMap<>(); metadata.put(KAFKA_ORIGIN_CLUSTER, _mirrorMakerSource.getBrokerListString()); String topic = fromKafka.topic(); metadata.put(KAFKA_ORIGIN_TOPIC, topic); int partition = fromKafka.partition(); String partitionStr = String.valueOf(partition); metadata.put(KAFKA_ORIGIN_PARTITION, partitionStr); long offset = fromKafka.offset(); String offsetStr = String.valueOf(offset); metadata.put(KAFKA_ORIGIN_OFFSET, offsetStr); metadata.put(BrooklinEnvelopeMetadataConstants.EVENT_TIMESTAMP, String.valueOf(eventsSourceTimestamp)); BrooklinEnvelope envelope = new BrooklinEnvelope(fromKafka.key(), fromKafka.value(), null, metadata); DatastreamProducerRecordBuilder builder = new DatastreamProducerRecordBuilder(); builder.addEvent(envelope); builder.setEventsSourceTimestamp(eventsSourceTimestamp); builder.setSourceCheckpoint(new KafkaMirrorMakerCheckpoint(topic, partition, offset).toString()); builder.setDestination(_datastreamTask.getDatastreamDestination() .getConnectionString() .replace(KafkaMirrorMakerConnector.MM_TOPIC_PLACEHOLDER, StringUtils.isBlank(_destinationTopicPrefix) ? topic : _destinationTopicPrefix + topic)); if (_isIdentityMirroringEnabled) { builder.setPartition(partition); } return builder.build(); }
Example 5
Source File: KafkaConnectorTask.java From brooklin with BSD 2-Clause "Simplified" License | 5 votes |
@Override protected DatastreamProducerRecord translate(ConsumerRecord<?, ?> fromKafka, Instant readTime) { HashMap<String, String> metadata = new HashMap<>(); metadata.put("kafka-origin", _srcConnString.toString()); int partition = fromKafka.partition(); String partitionStr = String.valueOf(partition); metadata.put("kafka-origin-partition", partitionStr); String offsetStr = String.valueOf(fromKafka.offset()); metadata.put("kafka-origin-offset", offsetStr); long eventsSourceTimestamp = readTime.toEpochMilli(); if (fromKafka.timestampType() == TimestampType.CREATE_TIME) { // If the Kafka header contains the create time. We store the event creation time as event timestamp metadata.put(BrooklinEnvelopeMetadataConstants.EVENT_TIMESTAMP, String.valueOf(fromKafka.timestamp())); } else if (fromKafka.timestampType() == TimestampType.LOG_APPEND_TIME) { // If the Kafka header contains the log append time, We use that as event source Timestamp // which will be used to calculate the SLA. metadata.put(BrooklinEnvelopeMetadataConstants.SOURCE_TIMESTAMP, String.valueOf(fromKafka.timestamp())); metadata.put(BrooklinEnvelopeMetadataConstants.EVENT_TIMESTAMP, String.valueOf(readTime.toEpochMilli())); eventsSourceTimestamp = fromKafka.timestamp(); } BrooklinEnvelope envelope = new BrooklinEnvelope(fromKafka.key(), fromKafka.value(), null, metadata); DatastreamProducerRecordBuilder builder = new DatastreamProducerRecordBuilder(); builder.addEvent(envelope); builder.setEventsSourceTimestamp(eventsSourceTimestamp); builder.setPartition(partition); // assume source partition count is same as dest builder.setSourceCheckpoint(partitionStr + "-" + offsetStr); return builder.build(); }
Example 6
Source File: KafkaRowConverterTest.java From calcite with Apache License 2.0 | 5 votes |
/** * Parse and reformat Kafka message from consumer, to fit with row schema * defined as {@link #rowDataType(String)}. * @param message, the raw Kafka message record; * @return fields in the row */ @Override public Object[] toRow(final ConsumerRecord<String, String> message) { Object[] fields = new Object[3]; fields[0] = message.topic(); fields[1] = message.partition(); fields[2] = message.timestampType().name; return fields; }
Example 7
Source File: DBusConsumerRecord.java From DBus with Apache License 2.0 | 4 votes |
public DBusConsumerRecord(ConsumerRecord<K, V> record) { this(record.topic(), record.partition(), record.offset(), record.timestamp(), record.timestampType(), record.checksum(), record.serializedKeySize(), record.serializedValueSize(), record.key(), record.value()); }