Java Code Examples for kafka.message.MessageAndMetadata#key()
The following examples show how to use
kafka.message.MessageAndMetadata#key() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: KafkaConsumer08.java From datacollector with Apache License 2.0 | 6 votes |
@Override public MessageAndOffset read() throws StageException { try { //has next blocks indefinitely if consumer.timeout.ms is set to -1 //But if consumer.timeout.ms is set to a value, like 6000, a ConsumerTimeoutException is thrown //if no message is written to kafka topic in that time. if(consumerIterator.hasNext()) { MessageAndMetadata<byte[], byte[]> messageAndMetadata = consumerIterator.next(); byte[] message = messageAndMetadata.message(); long offset = messageAndMetadata.offset(); int partition = messageAndMetadata.partition(); return new MessageAndOffset(messageAndMetadata.key(), message, offset, partition); } return null; } catch (ConsumerTimeoutException e) { /*For high level consumer the fetching logic is handled by a background fetcher thread and is hidden from user, for either case of 1) broker down or 2) no message is available the fetcher thread will keep retrying while the user thread will wait on the fetcher thread to put some data into the buffer until timeout. So in a sentence the high-level consumer design is to not let users worry about connect / reconnect issues.*/ return null; } }
Example 2
Source File: LegacyKafkaMessageIterator.java From secor with Apache License 2.0 | 6 votes |
@Override public Message next() { MessageAndMetadata<byte[], byte[]> kafkaMessage; try { kafkaMessage = mIterator.next(); } catch (ConsumerTimeoutException e) { throw new LegacyConsumerTimeoutException(e); } long timestamp = 0L; if (mConfig.useKafkaTimestamp()) { timestamp = mKafkaMessageTimestampFactory.getKafkaMessageTimestamp().getTimestamp(kafkaMessage); } return new Message(kafkaMessage.topic(), kafkaMessage.partition(), kafkaMessage.offset(), kafkaMessage.key(), kafkaMessage.message(), timestamp, null); }
Example 3
Source File: MessageResource.java From dropwizard-kafka-http with Apache License 2.0 | 5 votes |
public Message(MessageAndMetadata<byte[], byte[]> message) { this.topic = message.topic(); this.key = message.key() != null ? new String(message.key(), Charset.forName("utf-8")) : null; this.message = new String(message.message(), Charset.forName("utf-8")); this.partition = message.partition(); this.offset = message.offset(); }
Example 4
Source File: KafkaSource.java From flume-ng-extends-source with MIT License | 4 votes |
public Status process() throws EventDeliveryException { byte[] kafkaMessage; byte[] kafkaKey; Event event; Map<String, String> headers; long batchStartTime = System.currentTimeMillis(); long batchEndTime = System.currentTimeMillis() + timeUpperLimit; try { boolean iterStatus = false; long startTime = System.nanoTime(); while (eventList.size() < batchUpperLimit && System.currentTimeMillis() < batchEndTime) { iterStatus = hasNext(); if (iterStatus) { // get next message MessageAndMetadata<byte[], byte[]> messageAndMetadata = it.next(); kafkaMessage = messageAndMetadata.message(); kafkaKey = messageAndMetadata.key(); // Add headers to event (topic, timestamp, and key) headers = new HashMap<String, String>(); headers.put(KafkaSourceConstants.TIMESTAMP, String.valueOf(System.currentTimeMillis())); headers.put(KafkaSourceConstants.TOPIC, topic); if (kafkaKey != null) { headers.put(KafkaSourceConstants.KEY, new String(kafkaKey)); } if (log.isDebugEnabled()) { log.debug("Message: {}", new String(kafkaMessage)); } event = EventBuilder.withBody(kafkaMessage, headers); eventList.add(event); } if (log.isDebugEnabled()) { log.debug("Waited: {} ", System.currentTimeMillis() - batchStartTime); log.debug("Event #: {}", eventList.size()); } } long endTime = System.nanoTime(); counter.addToKafkaEventGetTimer((endTime-startTime)/(1000*1000)); counter.addToEventReceivedCount(Long.valueOf(eventList.size())); // If we have events, send events to channel // clear the event list // and commit if Kafka doesn't auto-commit if (eventList.size() > 0) { getChannelProcessor().processEventBatch(eventList); counter.addToEventAcceptedCount(eventList.size()); eventList.clear(); if (log.isDebugEnabled()) { log.debug("Wrote {} events to channel", eventList.size()); } if (!kafkaAutoCommitEnabled) { // commit the read transactions to Kafka to avoid duplicates long commitStartTime = System.nanoTime(); consumer.commitOffsets(); long commitEndTime = System.nanoTime(); counter.addToKafkaCommitTimer((commitEndTime-commitStartTime)/(1000*1000)); } } if (!iterStatus) { if (log.isDebugEnabled()) { counter.incrementKafkaEmptyCount(); log.debug("Returning with backoff. No more data to read"); } return Status.BACKOFF; } return Status.READY; } catch (Exception e) { log.error("KafkaSource EXCEPTION, {}", e); return Status.BACKOFF; } }
Example 5
Source File: FastKafkaSource.java From fraud-detection-tutorial with Apache License 2.0 | 4 votes |
public Status process() throws EventDeliveryException { long batchStartTime = System.currentTimeMillis(); long batchEndTime = System.currentTimeMillis() + (long)this.timeUpperLimit; try { boolean e = false; long startTime = System.nanoTime(); while(this.eventList.size() < this.batchUpperLimit && System.currentTimeMillis() < batchEndTime) { e = this.hasNext(); if(e) { MessageAndMetadata endTime = this.it.next(); byte[] kafkaMessage = (byte[])endTime.message(); byte[] kafkaKey = (byte[])endTime.key(); HashMap headers = new HashMap(); headers.put("timestamp", String.valueOf(System.currentTimeMillis())); headers.put("topic", this.topic); if(kafkaKey != null) { headers.put("key", new String(kafkaKey)); } if(log.isDebugEnabled()) { log.debug("Message: {}", new String(kafkaMessage)); } Event event = EventBuilder.withBody(kafkaMessage, headers); this.eventList.add(event); } if(log.isDebugEnabled()) { log.debug("Waited: {} ", Long.valueOf(System.currentTimeMillis() - batchStartTime)); log.debug("Event #: {}", Integer.valueOf(this.eventList.size())); } } long endTime1 = System.nanoTime(); this.counter.addToKafkaEventGetTimer((endTime1 - startTime) / 1000000L); this.counter.addToEventReceivedCount(Long.valueOf((long)this.eventList.size()).longValue()); if(this.eventList.size() > 0) { this.getChannelProcessor().processEventBatch(this.eventList); this.counter.addToEventAcceptedCount((long)this.eventList.size()); this.eventList.clear(); if(log.isDebugEnabled()) { log.debug("Wrote {} events to channel", Integer.valueOf(this.eventList.size())); } if(!this.kafkaAutoCommitEnabled) { long commitStartTime = System.nanoTime(); this.consumer.commitOffsets(); long commitEndTime = System.nanoTime(); this.counter.addToKafkaCommitTimer((commitEndTime - commitStartTime) / 1000000L); } } if(!e) { if(log.isDebugEnabled()) { this.counter.incrementKafkaEmptyCount(); log.debug("Returning with backoff. No more data to read"); } //Thread.sleep(10); return Status.READY; } else { return Status.READY; } } catch (Exception var18) { log.error("KafkaSource EXCEPTION, {}", var18); return Status.BACKOFF; } }
Example 6
Source File: KafkaAvroJobStatusMonitorTest.java From incubator-gobblin with Apache License 2.0 | 4 votes |
private DecodeableKafkaRecord convertMessageAndMetadataToDecodableKafkaRecord(MessageAndMetadata messageAndMetadata) { ConsumerRecord consumerRecord = new ConsumerRecord<>(TOPIC, messageAndMetadata.partition(), messageAndMetadata.offset(), messageAndMetadata.key(), messageAndMetadata.message()); return new Kafka09ConsumerClient.Kafka09ConsumerRecord(consumerRecord); }
Example 7
Source File: Kafka09DataWriterTest.java From incubator-gobblin with Apache License 2.0 | 4 votes |
@Test public void testKeyedAvroSerialization() throws IOException, InterruptedException, SchemaRegistryException { String topic = "testAvroSerialization09"; _kafkaTestHelper.provisionTopic(topic); Properties props = new Properties(); props.setProperty(KafkaWriterConfigurationKeys.KAFKA_TOPIC, topic); props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX + "bootstrap.servers", "127.0.0.1:" + _kafkaTestHelper.getKafkaServerPort()); props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX + "value.serializer", LiAvroSerializer.class.getName()); props.setProperty(KafkaWriterConfigurationKeys.WRITER_KAFKA_KEYED_CONFIG, "true"); String keyField = "field1"; props.setProperty(KafkaWriterConfigurationKeys.WRITER_KAFKA_KEYFIELD_CONFIG, keyField); // set up mock schema registry props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX + KafkaSchemaRegistryConfigurationKeys.KAFKA_SCHEMA_REGISTRY_CLASS, ConfigDrivenMd5SchemaRegistry.class.getCanonicalName()); Kafka09DataWriter<String, GenericRecord> kafka09DataWriter = new Kafka09DataWriter<>(props); WriteCallback callback = mock(WriteCallback.class); GenericRecord record = TestUtils.generateRandomAvroRecord(); try { kafka09DataWriter.write(record, callback); } finally { kafka09DataWriter.close(); } verify(callback, times(1)).onSuccess(isA(WriteResponse.class)); verify(callback, never()).onFailure(isA(Exception.class)); MessageAndMetadata<byte[], byte[]> value = _kafkaTestHelper.getIteratorForTopic(topic).next(); byte[] key = value.key(); byte[] message = value.message(); ConfigDrivenMd5SchemaRegistry schemaReg = new ConfigDrivenMd5SchemaRegistry(topic, record.getSchema()); LiAvroDeserializer deser = new LiAvroDeserializer(schemaReg); GenericRecord receivedRecord = deser.deserialize(topic, message); Assert.assertEquals(record.toString(), receivedRecord.toString()); Assert.assertEquals(new String(key), record.get(keyField)); }
Example 8
Source File: Kafka09DataWriterTest.java From incubator-gobblin with Apache License 2.0 | 4 votes |
@Test public void testValueSerialization() throws IOException, InterruptedException, SchemaRegistryException { String topic = "testAvroSerialization09"; _kafkaTestHelper.provisionTopic(topic); Properties props = new Properties(); props.setProperty(KafkaWriterConfigurationKeys.KAFKA_TOPIC, topic); props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX + "bootstrap.servers", "127.0.0.1:" + _kafkaTestHelper.getKafkaServerPort()); props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX + "value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); props.setProperty(KafkaWriterConfigurationKeys.WRITER_KAFKA_KEYED_CONFIG, "true"); String keyField = "field1"; props.setProperty(KafkaWriterConfigurationKeys.WRITER_KAFKA_KEYFIELD_CONFIG, keyField); props.setProperty(KafkaWriterConfigurationKeys.WRITER_KAFKA_VALUEFIELD_CONFIG, keyField); // set up mock schema registry props.setProperty(KafkaWriterConfigurationKeys.KAFKA_PRODUCER_CONFIG_PREFIX + KafkaSchemaRegistryConfigurationKeys.KAFKA_SCHEMA_REGISTRY_CLASS, ConfigDrivenMd5SchemaRegistry.class.getCanonicalName()); Kafka09DataWriter<String, GenericRecord> kafka09DataWriter = new Kafka09DataWriter<>(props); WriteCallback callback = mock(WriteCallback.class); GenericRecord record = TestUtils.generateRandomAvroRecord(); try { kafka09DataWriter.write(record, callback); } finally { kafka09DataWriter.close(); } verify(callback, times(1)).onSuccess(isA(WriteResponse.class)); verify(callback, never()).onFailure(isA(Exception.class)); MessageAndMetadata<byte[], byte[]> value = _kafkaTestHelper.getIteratorForTopic(topic).next(); byte[] key = value.key(); byte[] message = value.message(); Assert.assertEquals(new String(message), record.get(keyField)); Assert.assertEquals(new String(key), record.get(keyField)); }
Example 9
Source File: KafkaMessageAdapter.java From message-queue-client-framework with Apache License 2.0 | 3 votes |
/** * <p>Title: messageAdapter</p> * <p>Description: 消息适配方法</p> * * @param messageAndMetadata the message and metadata * @throws MQException the mq exception */ public void messageAdapter(MessageAndMetadata<?, ?> messageAndMetadata) throws MQException { byte[] keyBytes = (byte[]) messageAndMetadata.key(); byte[] valBytes = (byte[]) messageAndMetadata.message(); K k = decoder.decodeKey(keyBytes); V v = decoder.decodeVal(valBytes); messageListener.onMessage(k, v); }