Java Code Examples for org.apache.kafka.clients.producer.ProducerRecord#value()
The following examples show how to use
org.apache.kafka.clients.producer.ProducerRecord#value() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ProducerRecordCoder.java From beam with Apache License 2.0 | 6 votes |
@SuppressWarnings("unchecked") @Override public Object structuralValue(ProducerRecord<K, V> value) { if (consistentWithEquals()) { return value; } else { if (!ConsumerSpEL.hasHeaders()) { return new ProducerRecord<>( value.topic(), value.partition(), value.timestamp(), value.key(), value.value()); } else { return new ProducerRecord<>( value.topic(), value.partition(), value.timestamp(), value.key(), value.value(), value.headers()); } } }
Example 2
Source File: ProducerInterceptorPrefix.java From kafka_book_demo with Apache License 2.0 | 5 votes |
@Override public ProducerRecord<String, String> onSend( ProducerRecord<String, String> record) { String modifiedValue = "prefix1-" + record.value(); return new ProducerRecord<>(record.topic(), record.partition(), record.timestamp(), record.key(), modifiedValue, record.headers()); // if (record.value().length() < 5) { // throw new RuntimeException(); // } // return record; }
Example 3
Source File: DemoProducerInterceptor.java From BigData-In-Practice with Apache License 2.0 | 5 votes |
/** * 在将消息序列化和计算分区之前会调用 * * @param record * @return */ @Override public ProducerRecord<String, String> onSend(ProducerRecord<String, String> record) { // 拦截器,仅在 value 之前添加前缀 String modifiedValue = "prefix1-" + record.value(); return new ProducerRecord<>(record.topic(), record.partition(), record.timestamp(), record.key(), modifiedValue, record.headers()); }
Example 4
Source File: TestContextDriver.java From simplesource with Apache License 2.0 | 5 votes |
<V> V verifyAndReturn(ProducerRecord<K, V> record, boolean isNull, K k, Consumer<V> verifier) { if (isNull) { assertThat(record).isNull(); return null; } if (record == null) return null; if (k != null) { assertThat(record.key()).isEqualTo(k); } if (verifier != null) { verifier.accept(record.value()); } return record.value(); }
Example 5
Source File: DefaultKafkaMessageConverterTest.java From extension-kafka with Apache License 2.0 | 5 votes |
private static ConsumerRecord<String, byte[]> toReceiverRecord(ProducerRecord<String, byte[]> message) { ConsumerRecord<String, byte[]> receiverRecord = new ConsumerRecord<>( SOME_TOPIC, SOME_PARTITION, SOME_OFFSET, message.key(), message.value() ); message.headers().forEach(header -> receiverRecord.headers().add(header)); return receiverRecord; }
Example 6
Source File: ProducedKafkaRecord.java From kafka-junit with BSD 3-Clause "New" or "Revised" License | 5 votes |
/** * Utility factory. * @param recordMetadata Metadata about the produced record. * @param producerRecord The original record that was produced. * @param <K> Type of key * @param <V> Type of message * @return A ProducedKafkaRecord that represents metadata about the original record, and the results of it being published. */ static <K,V> ProducedKafkaRecord<K,V> newInstance( final RecordMetadata recordMetadata, final ProducerRecord<K,V> producerRecord) { return new ProducedKafkaRecord<>( recordMetadata.topic(), recordMetadata.partition(), recordMetadata.offset(), producerRecord.key(), producerRecord.value() ); }
Example 7
Source File: Kafka_Streams_MachineLearning_H2O_GBM_ExampleTest.java From kafka-streams-machine-learning-examples with Apache License 2.0 | 5 votes |
private String getOutput() { ProducerRecord<String, String> output = testDriver.readOutput( Kafka_Streams_MachineLearning_H2O_GBM_Example.OUTPUT_TOPIC, stringDeserializer, stringDeserializer); assertThat(output).isNotNull(); return output.value(); }
Example 8
Source File: Kafka_Streams_MachineLearning_H2O_DeepLearning_ExampleTest.java From kafka-streams-machine-learning-examples with Apache License 2.0 | 5 votes |
private String getOutput() { ProducerRecord<String, String> output = testDriver.readOutput( Kafka_Streams_MachineLearning_H2O_DeepLearning_Example.OUTPUT_TOPIC, stringDeserializer, stringDeserializer); assertThat(output).isNotNull(); return output.value(); }
Example 9
Source File: KafkaProducerHelper.java From zerocode with Apache License 2.0 | 5 votes |
public static ProducerRecord prepareRecordToSend(String topicName, ProducerRecord recordToSend) { return new ProducerRecord(topicName, recordToSend.partition(), recordToSend.timestamp(), recordToSend.key(), recordToSend.value(), recordToSend.headers()); }
Example 10
Source File: TestKafkaWriter.java From singer with Apache License 2.0 | 4 votes |
@Test public void testWriteLogMessagesWithCrcPartitioning() throws Exception { KafkaMessagePartitioner partitioner = new Crc32ByteArrayPartitioner(); KafkaProducerConfig config = new KafkaProducerConfig(); SingerSettings.setSingerConfig(new SingerConfig()); KafkaProducerManager.injectTestProducer(config, producer); // default value for skip noleader partition is false KafkaWriter writer = new KafkaWriter(config, partitioner, "topicx", false, Executors.newCachedThreadPool()); List<PartitionInfo> partitions = ImmutableList.copyOf(Arrays.asList( new PartitionInfo("topicx", 1, new Node(2, "broker2", 9092, "us-east-1b"), null, null), new PartitionInfo("topicx", 0, new Node(1, "broker1", 9092, "us-east-1a"), null, null), new PartitionInfo("topicx", 2, new Node(3, "broker3", 9092, "us-east-1c"), null, null), new PartitionInfo("topicx", 6, new Node(2, "broker2", 9092, "us-east-1b"), null, null), new PartitionInfo("topicx", 3, new Node(4, "broker4", 9092, "us-east-1a"), null, null), new PartitionInfo("topicx", 5, new Node(1, "broker1", 9092, "us-east-1a"), null, null), new PartitionInfo("topicx", 7, new Node(3, "broker3", 9092, "us-east-1c"), null, null), new PartitionInfo("topicx", 4, new Node(5, "broker5", 9092, "us-east-1b"), null, null), new PartitionInfo("topicx", 8, new Node(4, "broker4", 9092, "us-east-1a"), null, null), new PartitionInfo("topicx", 9, new Node(5, "broker5", 9092, "us-east-1b"), null, null), new PartitionInfo("topicx", 10, new Node(1, "broker1", 9092, "us-east-1a"), null, null))); when(producer.partitionsFor("topicx")).thenReturn(partitions); // message with same key will be put together in the same bucket (same partition); List<String> keys = IntStream.range(0, NUM_KEYS).mapToObj(i->"key"+i).collect(Collectors.toList()); Map<Integer, List<LogMessage>> msgPartitionMap = new HashMap<>(); Map<Integer, List<ProducerRecord<byte[], byte[]>>> recordPartitionMap = new HashMap<>(); Map<Integer, List<RecordMetadata>> metadataPartitionMap = new HashMap<>(); HashFunction crc32 = Hashing.crc32(); List<LogMessage> logMessages = new ArrayList<>(); for(int i = 0; i < NUM_KEYS; i++){ for(int j = 0; j < NUM_EVENTS / NUM_KEYS; j++){ LogMessage logMessage = new LogMessage(); logMessage.setKey(keys.get(i).getBytes()); logMessage.setMessage(ByteBuffer.allocate(100).put(String.valueOf(i).getBytes())); logMessages.add(logMessage); int partitionId = Math.abs(crc32.hashBytes(logMessage.getKey()).asInt() % partitions.size()); ProducerRecord<byte[], byte[]> record = new ProducerRecord<byte[], byte[]>("topicx", partitionId, logMessage.getKey(), logMessage.getMessage()); RecordMetadata recordMetadata = new RecordMetadata(new TopicPartition( record.topic(), record.partition()), 0, 0, 0, 0L, record.key().length, record.value().length); when(producer.send(record)).thenReturn(ConcurrentUtils.constantFuture(recordMetadata)); if (msgPartitionMap.containsKey(partitionId)){ msgPartitionMap.get(partitionId).add(logMessage); recordPartitionMap.get(partitionId).add(record); metadataPartitionMap.get(partitionId).add(recordMetadata); } else { msgPartitionMap.put(partitionId, new ArrayList<>()); recordPartitionMap.put(partitionId, new ArrayList<>()); metadataPartitionMap.put(partitionId, new ArrayList<>()); msgPartitionMap.get(partitionId).add(logMessage); recordPartitionMap.get(partitionId).add(record); metadataPartitionMap.get(partitionId).add(recordMetadata); } } } List<PartitionInfo> sortedPartitions = new ArrayList<>(partitions); Collections.sort(sortedPartitions, new PartitionComparator()); Map<Integer, Map<Integer, LoggingAuditHeaders>> mapOfHeadersMap = new HashMap<>(); Map<Integer, List<ProducerRecord<byte[], byte[]>>> messageCollation = writer.messageCollation(partitions, "topicx", logMessages, mapOfHeadersMap); for(int partitionId = 0; partitionId < messageCollation.keySet().size(); partitionId++) { if (messageCollation.get(partitionId).size() == 0) { continue; } List<ProducerRecord<byte[], byte[]>> writerOutput = messageCollation.get(partitionId); // verify the message order is what is expected by calling messageCollation() List<ProducerRecord<byte[], byte[]>> expectedRecords = recordPartitionMap.get(partitionId); assertEquals(expectedRecords.size(), writerOutput.size()); for(int j = 0; j < writerOutput.size(); j++){ assertEquals(expectedRecords.get(j), writerOutput.get(j)); } // verify the content of LogMessage and the content of ProducerRecord match List<LogMessage> originalData = msgPartitionMap.get(partitionId); assertEquals(originalData.size(), writerOutput.size()); for (int j = 0; j < writerOutput.size(); j++) { assertTrue(Arrays.equals(originalData.get(j).getKey(), writerOutput.get(j).key())); assertTrue(Arrays.equals(originalData.get(j).getMessage(), writerOutput.get(j).value())); } // verify the RecordMetadata that corresponds to record send to certain partitions are put // together into a list and the order of the RecordMetadata is same as the original message order List<RecordMetadata> expectedRecordMetadata = metadataPartitionMap.get(partitionId); KafkaWritingTaskResult kafkaWritingTaskResult = writer.getClusterThreadPool().submit(new KafkaWritingTask(producer, writerOutput, 0, sortedPartitions)).get(); assertEquals(expectedRecordMetadata.size(), kafkaWritingTaskResult.getRecordMetadataList().size()); for(int j = 0; j < expectedRecordMetadata.size(); j++){ assertEquals(expectedRecordMetadata.get(j), kafkaWritingTaskResult.getRecordMetadataList().get(j)); } } // validate if writes are throwing any error writer.writeLogMessages(logMessages); writer.close(); }
Example 11
Source File: Kafka_Streams_TensorFlow_Image_Recognition_ExampleTest.java From kafka-streams-machine-learning-examples with Apache License 2.0 | 4 votes |
private String getOutput() { ProducerRecord<String, String> output = testDriver.readOutput(Kafka_Streams_TensorFlow_Image_Recognition_Example.imageOutputTopic, stringDeserializer, stringDeserializer); assertThat(output).isNotNull(); return output.value(); }