org.springframework.kafka.support.KafkaHeaders Java Examples
The following examples show how to use
org.springframework.kafka.support.KafkaHeaders.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: AvroProducer.java From tutorials with MIT License | 8 votes |
public void produceEmployeeDetails(int empId, String firstName, String lastName) { // creating employee details Employee employee = new Employee(); employee.setId(empId); employee.setFirstName(firstName); employee.setLastName(lastName); employee.setDepartment("IT"); employee.setDesignation("Engineer"); // creating partition key for kafka topic EmployeeKey employeeKey = new EmployeeKey(); employeeKey.setId(empId); employeeKey.setDepartmentName("IT"); Message<Employee> message = MessageBuilder.withPayload(employee) .setHeader(KafkaHeaders.MESSAGE_KEY, employeeKey) .build(); processor.output() .send(message); }
Example #2
Source File: SpringKafkaIntegrationApplicationTest.java From spring-kafka with MIT License | 6 votes |
@Test public void testIntegration() throws Exception { MessageChannel producingChannel = applicationContext.getBean("producingChannel", MessageChannel.class); Map<String, Object> headers = Collections.singletonMap(KafkaHeaders.TOPIC, SPRING_INTEGRATION_KAFKA_TOPIC); LOGGER.info("sending 10 messages"); for (int i = 0; i < 10; i++) { GenericMessage<String> message = new GenericMessage<>("Hello Spring Integration Kafka " + i + "!", headers); producingChannel.send(message); LOGGER.info("sent message='{}'", message); } countDownLatchHandler.getLatch().await(10000, TimeUnit.MILLISECONDS); assertThat(countDownLatchHandler.getLatch().getCount()).isEqualTo(0); }
Example #3
Source File: ScheduledImageResizeRequestSubmitter.java From Spring-Boot-2.0-Projects with MIT License | 6 votes |
public void scheduleTaskWithCronExpression() { Flux.just(new File(imagesDirectory).listFiles()).filter(File::isFile).subscribe( f -> { Flux.just(new Dimension(800, 600), new Dimension(180, 180), new Dimension(1200, 630)).subscribe(d -> { try { ImageResizeRequest imageResizeRequest = new ImageResizeRequest((int) d.getWidth(), (int) d.getHeight(), f.getAbsolutePath()); ProducerRecord<String, String> record = new ProducerRecord<>("asyncRequests", objectMapper.writeValueAsString(imageResizeRequest)); record.headers().add(new RecordHeader(KafkaHeaders.REPLY_TOPIC, "asyncReplies".getBytes())); RequestReplyFuture<String, String, String> replyFuture = template.sendAndReceive(record); ConsumerRecord<String, String> consumerRecord = replyFuture.get(); } catch (Exception e) { LOGGER.error("Error while sending message", e); } }, e -> LOGGER.error("Error while running lambda"), () -> f.renameTo(new File(f.getParent() + "/Done", f.getName()))); } ); }
Example #4
Source File: TorrentStoreServiceApplication.java From Dodder with MIT License | 5 votes |
@StreamListener("index-message-in") public void indexTorrent(Message<Torrent> message) { try { Acknowledgment acknowledgment = message.getHeaders().get(KafkaHeaders.ACKNOWLEDGMENT, Acknowledgment.class); Torrent torrent = message.getPayload(); log.debug("Index torrent to elasticsearch, info hash is {}", torrent.getInfoHash()); torrentService.index(torrent); //no error, execute acknowledge if (acknowledgment != null) { acknowledgment.acknowledge(); } } catch (Exception e) { log.error("Index torrent error: {}", e); } }
Example #5
Source File: TracingChannelInterceptorTest.java From spring-cloud-sleuth with Apache License 2.0 | 5 votes |
@Test public void should_store_kafka_as_remote_service_name_when_kafka_header_is_present() { ExecutorSubscribableChannel channel = new ExecutorSubscribableChannel(); channel.addInterceptor(this.interceptor); List<Message<?>> messages = new ArrayList<>(); channel.subscribe(messages::add); Map<String, Object> headers = new HashMap<>(); headers.put(KafkaHeaders.MESSAGE_KEY, "hello"); channel.send(MessageBuilder.createMessage("foo", new MessageHeaders(headers))); assertThat(this.spans).extracting(MutableSpan::remoteServiceName) .contains("kafka"); }
Example #6
Source File: Receiver.java From spring-kafka with MIT License | 5 votes |
@KafkaListener(id = "batch-listener", topics = "${kafka.topic.batch}") public void receive(List<String> data, @Header(KafkaHeaders.RECEIVED_PARTITION_ID) List<Integer> partitions, @Header(KafkaHeaders.OFFSET) List<Long> offsets) { LOGGER.info("start of batch receive"); for (int i = 0; i < data.size(); i++) { LOGGER.info("received message='{}' with partition-offset='{}'", data.get(i), partitions.get(i) + "-" + offsets.get(i)); // handle message latch.countDown(); } LOGGER.info("end of batch receive"); }
Example #7
Source File: KafkaMessageBroker.java From piper with Apache License 2.0 | 5 votes |
@Override public void send (String aRoutingKey, Object aMessage) { Assert.notNull(aRoutingKey,"routing key can't be null"); if(aMessage instanceof Retryable) { Retryable r = (Retryable) aMessage; delay(r.getRetryDelayMillis()); } kafkaTemplate.send(MessageBuilder .withPayload(aMessage) .setHeader(KafkaHeaders.TOPIC, aRoutingKey) .setHeader("_type", aMessage.getClass().getName()) .build()); }
Example #8
Source File: KafkaBinderTests.java From spring-cloud-stream-binder-kafka with Apache License 2.0 | 5 votes |
@Test @SuppressWarnings("unchecked") public void testMessageKeyInPayload() throws Exception { Binding<?> producerBinding = null; try { String testPayload = "test"; ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties(); producerProperties.getExtension() .setMessageKeyExpression(spelExpressionParser.parseExpression("payload.field.bytes")); DirectChannel moduleOutputChannel = createBindableChannel("output", createProducerBindingProperties(producerProperties)); String testTopicName = "existing" + System.currentTimeMillis(); KafkaTestBinder binder = getBinder(); producerBinding = binder.bindProducer(testTopicName, moduleOutputChannel, producerProperties); moduleOutputChannel.addInterceptor(new ChannelInterceptor() { @Override public Message<?> preSend(Message<?> message, MessageChannel channel) { assertThat(message.getHeaders() .get(KafkaExpressionEvaluatingInterceptor.MESSAGE_KEY_HEADER)) .isEqualTo("foo".getBytes()); return message; } }); moduleOutputChannel.send( new GenericMessage<>(new Pojo("foo"), Collections.singletonMap(KafkaHeaders.PARTITION_ID, 0))); } finally { if (producerBinding != null) { producerBinding.unbind(); } } }
Example #9
Source File: KafkaBinderTests.java From spring-cloud-stream-binder-kafka with Apache License 2.0 | 5 votes |
@SuppressWarnings({ "rawtypes", "unchecked" }) @Test public void testTopicPatterns() throws Exception { try (AdminClient admin = AdminClient.create( Collections.singletonMap(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, embeddedKafka.getEmbeddedKafka().getBrokersAsString()))) { admin.createTopics(Collections .singletonList(new NewTopic("topicPatterns.1", 1, (short) 1))).all() .get(); Binder binder = getBinder(); ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties(); consumerProperties.getExtension().setDestinationIsPattern(true); DirectChannel moduleInputChannel = createBindableChannel("input", createConsumerBindingProperties(consumerProperties)); final CountDownLatch latch = new CountDownLatch(1); final AtomicReference<String> topic = new AtomicReference<>(); moduleInputChannel.subscribe(m -> { topic.set(m.getHeaders().get(KafkaHeaders.RECEIVED_TOPIC, String.class)); latch.countDown(); }); Binding<MessageChannel> consumerBinding = binder.bindConsumer( "topicPatterns\\..*", "testTopicPatterns", moduleInputChannel, consumerProperties); DefaultKafkaProducerFactory pf = new DefaultKafkaProducerFactory( KafkaTestUtils.producerProps(embeddedKafka.getEmbeddedKafka())); KafkaTemplate template = new KafkaTemplate(pf); template.send("topicPatterns.1", "foo"); assertThat(latch.await(10, TimeUnit.SECONDS)).isTrue(); assertThat(topic.get()).isEqualTo("topicPatterns.1"); consumerBinding.unbind(); pf.destroy(); } }
Example #10
Source File: KafkaBinderTests.java From spring-cloud-stream-binder-kafka with Apache License 2.0 | 5 votes |
@Test @SuppressWarnings("unchecked") public void testDynamicKeyExpression() throws Exception { Binder binder = getBinder(createConfigurationProperties()); QueueChannel moduleInputChannel = new QueueChannel(); ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties(); producerProperties.getExtension().getConfiguration().put("key.serializer", StringSerializer.class.getName()); producerProperties.getExtension().setMessageKeyExpression( spelExpressionParser.parseExpression("headers.key")); ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties(); String uniqueBindingId = UUID.randomUUID().toString(); DirectChannel moduleOutputChannel = createBindableChannel("output", createProducerBindingProperties(producerProperties)); Binding<MessageChannel> producerBinding = binder.bindProducer( "foo" + uniqueBindingId + ".0", moduleOutputChannel, producerProperties); Binding<MessageChannel> consumerBinding = binder.bindConsumer( "foo" + uniqueBindingId + ".0", null, moduleInputChannel, consumerProperties); Thread.sleep(1000); Message<?> message = MessageBuilder.withPayload("somePayload") .setHeader("key", "myDynamicKey").build(); // Let the consumer actually bind to the producer before sending a msg binderBindUnbindLatency(); moduleOutputChannel.send(message); Message<?> inbound = receive(moduleInputChannel); assertThat(inbound).isNotNull(); String receivedKey = new String(inbound.getHeaders() .get(KafkaHeaders.RECEIVED_MESSAGE_KEY, byte[].class)); assertThat(receivedKey).isEqualTo("myDynamicKey"); producerBinding.unbind(); consumerBinding.unbind(); }
Example #11
Source File: KafkaBinderTests.java From spring-cloud-stream-binder-kafka with Apache License 2.0 | 5 votes |
@Test @SuppressWarnings({ "unchecked", "rawtypes" }) public void testPartitionedNative() throws Exception { Binder binder = getBinder(); ExtendedProducerProperties<KafkaProducerProperties> properties = createProducerProperties(); properties.setPartitionCount(6); DirectChannel output = createBindableChannel("output", createProducerBindingProperties(properties)); output.setBeanName("test.output"); Binding<MessageChannel> outputBinding = binder.bindProducer("partNative.raw.0", output, properties); ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties(); QueueChannel input0 = new QueueChannel(); input0.setBeanName("test.inputNative"); Binding<MessageChannel> inputBinding = binder.bindConsumer("partNative.raw.0", "test", input0, consumerProperties); output.send(new GenericMessage<>("foo".getBytes(), Collections.singletonMap(KafkaHeaders.PARTITION_ID, 5))); Message<?> received = receive(input0); assertThat(received).isNotNull(); assertThat(received.getPayload()).isEqualTo("foo".getBytes()); assertThat(received.getHeaders().get(KafkaHeaders.RECEIVED_PARTITION_ID)) .isEqualTo(5); inputBinding.unbind(); outputBinding.unbind(); }
Example #12
Source File: KafkaMessageChannelBinder.java From spring-cloud-stream-binder-kafka with Apache License 2.0 | 5 votes |
ProducerConfigurationMessageHandler(KafkaTemplate<byte[], byte[]> kafkaTemplate, String topic, ExtendedProducerProperties<KafkaProducerProperties> producerProperties, ProducerFactory<byte[], byte[]> producerFactory) { super(kafkaTemplate); if (producerProperties.getExtension().isUseTopicHeader()) { setTopicExpression(PARSER.parseExpression("headers['" + KafkaHeaders.TOPIC + "'] ?: '" + topic + "'")); } else { setTopicExpression(new LiteralExpression(topic)); } Expression messageKeyExpression = producerProperties.getExtension().getMessageKeyExpression(); if (expressionInterceptorNeeded(producerProperties)) { messageKeyExpression = PARSER.parseExpression("headers['" + KafkaExpressionEvaluatingInterceptor.MESSAGE_KEY_HEADER + "']"); } setMessageKeyExpression(messageKeyExpression); setBeanFactory(KafkaMessageChannelBinder.this.getBeanFactory()); if (producerProperties.isPartitioned()) { setPartitionIdExpression(PARSER.parseExpression( "headers['" + BinderHeaders.PARTITION_HEADER + "']")); } if (producerProperties.getExtension().isSync()) { setSync(true); } if (producerProperties.getExtension().getSendTimeoutExpression() != null) { setSendTimeoutExpression(producerProperties.getExtension().getSendTimeoutExpression()); } this.producerFactory = producerFactory; }
Example #13
Source File: NotificationEventHandler.java From stream-registry with Apache License 2.0 | 5 votes |
static <T, V> T sendEntityNotificationEvent( Function<V, ?> entityToKeyRecord, Function<V, ?> entityToValueRecord, Function<Message<?>, T> sendMessage, String topic, NotificationEvent<V> event ) { val key = entityToKeyRecord.apply(event.getEntity()); val value = entityToValueRecord.apply(event.getEntity()); val eventType = Optional.ofNullable(event.getEventType()) .map(EventType::toString) .orElse(NotificationEventConstants.NOTIFICATION_TYPE_HEADER.defaultValue); val entity = Optional.ofNullable(event.getEntity()) .map(Object::getClass) .map(Class::getSimpleName) .map(String::toUpperCase) .orElse(NotificationEventConstants.ENTITY_TYPE_HEADER.defaultValue); val message = MessageBuilder .withPayload(value) .setHeader(KafkaHeaders.MESSAGE_KEY, key) .setHeader(KafkaHeaders.TOPIC, topic) .setHeader(NotificationEventConstants.NOTIFICATION_TYPE_HEADER.name, eventType) .setHeader(NotificationEventConstants.ENTITY_TYPE_HEADER.name, entity) .build(); return sendMessage.apply(message); }
Example #14
Source File: TorrentStoreServiceApplication.java From Dodder with MIT License | 5 votes |
@StreamListener("torrent-message-in") public void handleTorrent(Message<Torrent> message) { try { Acknowledgment acknowledgment = message.getHeaders().get(KafkaHeaders.ACKNOWLEDGMENT, Acknowledgment.class); Torrent torrent = message.getPayload(); log.debug("Save torrent to MongoDB, info hash is {}", torrent.getInfoHash()); torrentService.upsert(torrent); //no error, execute acknowledge if (acknowledgment != null) { acknowledgment.acknowledge(); } } catch (Exception e) { log.error("Insert or update torrent error: {}", e); } }
Example #15
Source File: BatchMessageConsumer.java From kafka-with-springboot with Apache License 2.0 | 5 votes |
@KafkaListener(topics = "${kafka.topic.batchConsumerTopic}", containerFactory = "kafkaListenerContainerFactoryForBatchConsumer", groupId = "batchConsumer") public void receive(@Payload List<String> payloads, @Header(KafkaHeaders.RECEIVED_PARTITION_ID) List<Long> partitionIds, @Header(KafkaHeaders.OFFSET) List<Long> offsets) { LOGGER.info("Received group=batchConsumer with batch group data: "); for (int i = 0; i< payloads.size(); ++i) { LOGGER.info("---------------- payload='{}' from partitionId@offset='{}'", payloads.get(i), partitionIds.get(i)+"@"+offsets.get(i)); } }
Example #16
Source File: KafkaMessageChannelBinder.java From spring-cloud-stream-binder-kafka with Apache License 2.0 | 5 votes |
@Override protected MessageHandler getPolledConsumerErrorMessageHandler( ConsumerDestination destination, String group, ExtendedConsumerProperties<KafkaConsumerProperties> properties) { if (properties.getExtension().isEnableDlq()) { return getErrorMessageHandler(destination, group, properties); } final MessageHandler superHandler = super.getErrorMessageHandler(destination, group, properties); return (message) -> { ConsumerRecord<?, ?> record = (ConsumerRecord<?, ?>) message.getHeaders() .get(KafkaHeaders.RAW_DATA); if (!(message instanceof ErrorMessage)) { logger.error("Expected an ErrorMessage, not a " + message.getClass().toString() + " for: " + message); } else if (record == null) { if (superHandler != null) { superHandler.handleMessage(message); } } else { if (message.getPayload() instanceof MessagingException) { AcknowledgmentCallback ack = StaticMessageHeaderAccessor .getAcknowledgmentCallback( ((MessagingException) message.getPayload()) .getFailedMessage()); if (ack != null) { if (isAutoCommitOnError(properties)) { ack.acknowledge(AcknowledgmentCallback.Status.REJECT); } else { ack.acknowledge(AcknowledgmentCallback.Status.REQUEUE); } } } } }; }
Example #17
Source File: KafkaConsumer.java From java-tutorial with MIT License | 5 votes |
/** * 监听kafka.tut 的 topic * * @param record * @param topic topic */ @KafkaListener(id = "tut", topics = "kafka.tut") public void listen(ConsumerRecord<?, ?> record, @Header(KafkaHeaders.RECEIVED_TOPIC) String topic) { //判断是否NULL Optional<?> kafkaMessage = Optional.ofNullable(record.value()); if (kafkaMessage.isPresent()) { //获取消息 Object message = kafkaMessage.get(); logger.info("Receive: +++++++++++++++ Topic:" + topic); logger.info("Receive: +++++++++++++++ Record:" + record); logger.info("Receive: +++++++++++++++ Message:" + message); } }
Example #18
Source File: KafkaMessageChannelBinder.java From spring-cloud-stream-binder-kafka with Apache License 2.0 | 5 votes |
@Override protected void postProcessPollableSource(DefaultPollableMessageSource bindingTarget) { bindingTarget.setAttributesProvider((accessor, message) -> { Object rawMessage = message.getHeaders().get(KafkaHeaders.RAW_DATA); if (rawMessage != null) { accessor.setAttribute(KafkaHeaders.RAW_DATA, rawMessage); } }); }
Example #19
Source File: KafkaBinderTests.java From spring-cloud-stream-binder-kafka with Apache License 2.0 | 4 votes |
@Test @SuppressWarnings("unchecked") public void testManualAckIsNotPossibleWhenAutoCommitOffsetIsEnabledOnTheBinder() throws Exception { Binder binder = getBinder(); DirectChannel moduleOutputChannel = createBindableChannel("output", createProducerBindingProperties(createProducerProperties())); QueueChannel moduleInputChannel = new QueueChannel(); Binding<MessageChannel> producerBinding = binder.bindProducer( "testManualAckIsNotPossibleWhenAutoCommitOffsetIsEnabledOnTheBinder", moduleOutputChannel, createProducerProperties()); ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties(); Binding<MessageChannel> consumerBinding = binder.bindConsumer( "testManualAckIsNotPossibleWhenAutoCommitOffsetIsEnabledOnTheBinder", "test", moduleInputChannel, consumerProperties); AbstractMessageListenerContainer<?, ?> container = TestUtils.getPropertyValue( consumerBinding, "lifecycle.messageListenerContainer", AbstractMessageListenerContainer.class); assertThat(container.getContainerProperties().getAckMode()) .isEqualTo(ContainerProperties.AckMode.BATCH); String testPayload1 = "foo" + UUID.randomUUID().toString(); Message<?> message1 = org.springframework.integration.support.MessageBuilder .withPayload(testPayload1.getBytes()).build(); // Let the consumer actually bind to the producer before sending a msg binderBindUnbindLatency(); moduleOutputChannel.send(message1); Message<?> receivedMessage = receive(moduleInputChannel); assertThat(receivedMessage).isNotNull(); assertThat(receivedMessage.getHeaders().get(KafkaHeaders.ACKNOWLEDGMENT)) .isNull(); producerBinding.unbind(); consumerBinding.unbind(); }
Example #20
Source File: KafkaService.java From eventapis with Apache License 2.0 | 4 votes |
@KafkaListener(topics = "test", containerFactory = "eventsKafkaListenerContainerFactory") public void handleMessage(JsonNode event, @Header(KafkaHeaders.RECEIVED_TOPIC) String topic) { kafkaTemplate.send("top", "id", "data"); log.warn(topic + " key: " + topic + " EventData: " + event.toString()); }
Example #21
Source File: KafkaBinderTests.java From spring-cloud-stream-binder-kafka with Apache License 2.0 | 4 votes |
@Test @SuppressWarnings("unchecked") public void testManualAckSucceedsWhenAutoCommitOffsetIsTurnedOff() throws Exception { Binder binder = getBinder(); DirectChannel moduleOutputChannel = createBindableChannel("output", createProducerBindingProperties(createProducerProperties())); QueueChannel moduleInputChannel = new QueueChannel(); Binding<MessageChannel> producerBinding = binder.bindProducer( "testManualAckSucceedsWhenAutoCommitOffsetIsTurnedOff", moduleOutputChannel, createProducerProperties()); ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties(); consumerProperties.getExtension().setAutoCommitOffset(false); Binding<MessageChannel> consumerBinding = binder.bindConsumer( "testManualAckSucceedsWhenAutoCommitOffsetIsTurnedOff", "test", moduleInputChannel, consumerProperties); String testPayload1 = "foo" + UUID.randomUUID().toString(); Message<?> message1 = org.springframework.integration.support.MessageBuilder .withPayload(testPayload1.getBytes()).build(); // Let the consumer actually bind to the producer before sending a msg binderBindUnbindLatency(); moduleOutputChannel.send(message1); Message<?> receivedMessage = receive(moduleInputChannel); assertThat(receivedMessage).isNotNull(); assertThat(receivedMessage.getHeaders().get(KafkaHeaders.ACKNOWLEDGMENT)) .isNotNull(); Acknowledgment acknowledgment = receivedMessage.getHeaders() .get(KafkaHeaders.ACKNOWLEDGMENT, Acknowledgment.class); try { acknowledgment.acknowledge(); } catch (Exception e) { fail("Acknowledge must not throw an exception"); } finally { producerBinding.unbind(); consumerBinding.unbind(); } }
Example #22
Source File: KafkaBinderTests.java From spring-cloud-stream-binder-kafka with Apache License 2.0 | 4 votes |
@Test @SuppressWarnings("unchecked") public void testResetOffsets() throws Exception { Binding<?> producerBinding = null; Binding<?> consumerBinding = null; try { String testPayload = "test"; ExtendedProducerProperties<KafkaProducerProperties> producerProperties = createProducerProperties(); DirectChannel moduleOutputChannel = createBindableChannel("output", createProducerBindingProperties(producerProperties)); ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties(); consumerProperties.setConcurrency(2); consumerProperties.setInstanceCount(5); // 10 partitions across 2 threads consumerProperties.getExtension().setResetOffsets(true); DirectChannel moduleInputChannel = createBindableChannel("input", createConsumerBindingProperties(consumerProperties)); String testTopicName = "existing" + System.currentTimeMillis(); KafkaBinderConfigurationProperties configurationProperties = createConfigurationProperties(); configurationProperties.setAutoAddPartitions(true); Binder binder = getBinder(configurationProperties); producerBinding = binder.bindProducer(testTopicName, moduleOutputChannel, producerProperties); consumerBinding = binder.bindConsumer(testTopicName, "testReset", moduleInputChannel, consumerProperties); // Let the consumer actually bind to the producer before sending a msg binderBindUnbindLatency(); IntStream.range(0, 10).forEach(i -> moduleOutputChannel.send(MessageBuilder.withPayload(testPayload) .setHeader(MessageHeaders.CONTENT_TYPE, MimeTypeUtils.TEXT_PLAIN) .setHeader(KafkaHeaders.PARTITION_ID, i) .build())); CountDownLatch latch1 = new CountDownLatch(10); CountDownLatch latch2 = new CountDownLatch(20); AtomicReference<Message<byte[]>> inboundMessageRef = new AtomicReference<>(); AtomicInteger received = new AtomicInteger(); moduleInputChannel.subscribe(message1 -> { try { inboundMessageRef.set((Message<byte[]>) message1); } finally { received.incrementAndGet(); latch1.countDown(); latch2.countDown(); } }); assertThat(latch1.await(10, TimeUnit.SECONDS)).as("Failed to receive messages").isTrue(); consumerBinding.unbind(); consumerBinding = binder.bindConsumer(testTopicName, "testReset", moduleInputChannel, consumerProperties); assertThat(latch2.await(10, TimeUnit.SECONDS)).as("Failed to receive message").isTrue(); binder.bindConsumer(testTopicName + "-x", "testReset", moduleInputChannel, consumerProperties).unbind(); // cause another rebalance assertThat(received.get()).as("Unexpected reset").isEqualTo(20); assertThat(inboundMessageRef.get()).isNotNull(); assertThat(inboundMessageRef.get().getPayload()).isEqualTo("test".getBytes()); assertThat(inboundMessageRef.get().getHeaders()).containsEntry("contentType", MimeTypeUtils.TEXT_PLAIN); } finally { if (producerBinding != null) { producerBinding.unbind(); } if (consumerBinding != null) { consumerBinding.unbind(); } } }
Example #23
Source File: KafkaBinderTests.java From spring-cloud-stream-binder-kafka with Apache License 2.0 | 4 votes |
@Test @Override @SuppressWarnings("unchecked") public void testSendAndReceiveMultipleTopics() throws Exception { Binder binder = getBinder(); DirectChannel moduleOutputChannel1 = createBindableChannel("output1", createProducerBindingProperties(createProducerProperties())); DirectChannel moduleOutputChannel2 = createBindableChannel("output2", createProducerBindingProperties(createProducerProperties())); QueueChannel moduleInputChannel = new QueueChannel(); ExtendedProducerProperties<KafkaProducerProperties> producer1Props = createProducerProperties(); producer1Props.getExtension().setUseTopicHeader(true); Binding<MessageChannel> producerBinding1 = binder.bindProducer("foo.x", moduleOutputChannel1, producer1Props); Binding<MessageChannel> producerBinding2 = binder.bindProducer("foo.y", moduleOutputChannel2, createProducerProperties()); ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties(); consumerProperties.getExtension().setAutoRebalanceEnabled(false); Binding<MessageChannel> consumerBinding1 = binder.bindConsumer("foo.x", "test1", moduleInputChannel, consumerProperties); Binding<MessageChannel> consumerBinding2 = binder.bindConsumer("foo.y", "test2", moduleInputChannel, consumerProperties); String testPayload1 = "foo1"; Message<?> message1 = org.springframework.integration.support.MessageBuilder .withPayload(testPayload1.getBytes()).build(); String testPayload2 = "foo2"; Message<?> message2 = org.springframework.integration.support.MessageBuilder .withPayload(testPayload2.getBytes()).build(); String testPayload3 = "foo3"; Message<?> message3 = org.springframework.integration.support.MessageBuilder .withPayload(testPayload3.getBytes()) .setHeader(KafkaHeaders.TOPIC, "foo.y") .build(); // Let the consumer actually bind to the producer before sending a msg binderBindUnbindLatency(); moduleOutputChannel1.send(message1); moduleOutputChannel2.send(message2); moduleOutputChannel1.send(message3); Message<?>[] messages = new Message[3]; messages[0] = receive(moduleInputChannel); messages[1] = receive(moduleInputChannel); messages[2] = receive(moduleInputChannel); assertThat(messages[0]).isNotNull(); assertThat(messages[1]).isNotNull(); assertThat(messages[1]).isNotNull(); assertThat(messages).extracting("payload").containsExactlyInAnyOrder( testPayload1.getBytes(), testPayload2.getBytes(), testPayload3.getBytes()); Arrays.asList(messages).forEach(message -> { if (new String((byte[]) message.getPayload()).equals("foo1")) { assertThat(message.getHeaders().get(KafkaHeaders.RECEIVED_TOPIC)).isEqualTo("foo.x"); } else { assertThat(message.getHeaders().get(KafkaHeaders.RECEIVED_TOPIC)).isEqualTo("foo.y"); } }); producerBinding1.unbind(); producerBinding2.unbind(); consumerBinding1.unbind(); consumerBinding2.unbind(); }
Example #24
Source File: KafkaBinderTests.java From spring-cloud-stream-binder-kafka with Apache License 2.0 | 4 votes |
@Test @SuppressWarnings({ "unchecked", "rawtypes" }) public void testSendAndReceiveBatch() throws Exception { Binder binder = getBinder(); BindingProperties outputBindingProperties = createProducerBindingProperties( createProducerProperties()); DirectChannel moduleOutputChannel = createBindableChannel("output", outputBindingProperties); ExtendedConsumerProperties<KafkaConsumerProperties> consumerProperties = createConsumerProperties(); consumerProperties.setBatchMode(true); consumerProperties.getExtension().getConfiguration().put("fetch.min.bytes", "1000"); consumerProperties.getExtension().getConfiguration().put("fetch.max.wait.ms", "5000"); consumerProperties.getExtension().getConfiguration().put("max.poll.records", "2"); DirectChannel moduleInputChannel = createBindableChannel("input", createConsumerBindingProperties(consumerProperties)); Binding<MessageChannel> producerBinding = binder.bindProducer("c.batching", moduleOutputChannel, outputBindingProperties.getProducer()); Binding<MessageChannel> consumerBinding = binder.bindConsumer("c.batching", "testSendAndReceiveBatch", moduleInputChannel, consumerProperties); Message<?> message = org.springframework.integration.support.MessageBuilder .withPayload("foo".getBytes(StandardCharsets.UTF_8)) .setHeader(KafkaHeaders.PARTITION_ID, 0) .build(); // Let the consumer actually bind to the producer before sending a msg binderBindUnbindLatency(); moduleOutputChannel.send(message); message = MessageBuilder .withPayload("bar".getBytes(StandardCharsets.UTF_8)) .setHeader(KafkaHeaders.PARTITION_ID, 0) .build(); moduleOutputChannel.send(message); CountDownLatch latch = new CountDownLatch(1); AtomicReference<Message<List<byte[]>>> inboundMessageRef = new AtomicReference<>(); moduleInputChannel.subscribe(message1 -> { try { inboundMessageRef.compareAndSet(null, (Message<List<byte[]>>) message1); } finally { latch.countDown(); } }); Assert.isTrue(latch.await(5, TimeUnit.SECONDS), "Failed to receive message"); assertThat(inboundMessageRef.get()).isNotNull(); List<byte[]> payload = inboundMessageRef.get().getPayload(); assertThat(payload.get(0)).isEqualTo("foo".getBytes()); if (payload.size() > 1) { // it's a race as to whether we'll get them both or just one. assertThat(payload.get(1)).isEqualTo("bar".getBytes()); } producerBinding.unbind(); consumerBinding.unbind(); }
Example #25
Source File: S1pKafkaApplication.java From grussell-spring-kafka with Apache License 2.0 | 4 votes |
@KafkaListener(topics = "${kafka.topic}") public void listen(@Payload String foo, @Header(KafkaHeaders.RECEIVED_PARTITION_ID) int partition) { System.out.println("Received: " + foo + " (partition: " + partition + ")"); this.latch.countDown(); }
Example #26
Source File: PartitioningKafkaDemoApplication.java From spring-cloud-stream-samples with Apache License 2.0 | 4 votes |
@Bean public Consumer<Message<String>> listen() { return message -> logger.info(message.getPayload() + " received from partition " + message.getHeaders().get(KafkaHeaders.RECEIVED_PARTITION_ID)); }
Example #27
Source File: Sender.java From spring-kafka with MIT License | 4 votes |
public void send(String topic, Object payload) { LOGGER.info("sending payload='{}' to topic='{}'", payload.toString(), topic); kafkaTemplate .send(MessageBuilder.withPayload(payload).setHeader(KafkaHeaders.TOPIC, topic).build()); }
Example #28
Source File: MultiPartitionMessageConsumer.java From kafka-with-springboot with Apache License 2.0 | 4 votes |
@KafkaListener(topics = "${kafka.topic.multiPartitionTopic}", containerFactory = "kafkaListenerContainerFactoryWith6Consumer", groupId = "multiPartitionWithSingleConsumer6Thread") public void receive2(@Payload String payload, @Header(KafkaHeaders.RECEIVED_PARTITION_ID)Long partitionId, @Header(KafkaHeaders.OFFSET)Long offset) { LOGGER.info("Received consumer=2 group=multiPartitionWithSingleConsumer6Thread payload='{}' from partitionId@offset='{}'", payload, partitionId+"@"+offset); }
Example #29
Source File: MultiPartitionMessageConsumer.java From kafka-with-springboot with Apache License 2.0 | 4 votes |
@KafkaListener(topics = "${kafka.topic.multiPartitionTopic}", groupId = "multiPartitionWith2Consumer") public void receiver1b(@Payload String payload, @Header(KafkaHeaders.RECEIVED_PARTITION_ID)Long partitionId, @Header(KafkaHeaders.OFFSET)Long offset) { LOGGER.info("Received consumer=1b group=multiPartitionWith2Consumer payload='{}' from partitionId@offset='{}'", payload, partitionId+"@"+offset); }
Example #30
Source File: MultiPartitionMessageConsumer.java From kafka-with-springboot with Apache License 2.0 | 4 votes |
@KafkaListener(topics = "${kafka.topic.multiPartitionTopic}", groupId = "multiPartitionWith2Consumer") public void receiver1a(@Payload String payload, @Header(KafkaHeaders.RECEIVED_PARTITION_ID)Long partitionId, @Header(KafkaHeaders.OFFSET)Long offset) { LOGGER.info("Received consumer=1a group=multiPartitionWith2Consumer payload='{}' from partitionId@offset='{}'", payload, partitionId+"@"+offset); }