org.apache.kafka.common.header.internals.RecordHeader Java Examples

The following examples show how to use org.apache.kafka.common.header.internals.RecordHeader. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KafkaProducerRecordImpl.java    From vertx-kafka-client with Apache License 2.0 7 votes vote down vote up
@Override
public ProducerRecord<K, V> record() {
  if (headers.isEmpty()) {
    return new ProducerRecord<>(topic, partition, timestamp, key, value);
  } else {
    return new ProducerRecord<>(
      topic,
      partition,
      timestamp,
      key,
      value,
      headers.stream()
        .map(header -> new RecordHeader(header.key(), header.value().getBytes()))
        .collect(Collectors.toList()));
  }
}
 
Example #2
Source File: MockKafkaTest.java    From jackdaw with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
@Test
public void testDefaultRecordMapping() {
  final MockKafka<Object, Object> mockKafka = new MockKafka<>();
  final RecordHeaders recordHeaders = new RecordHeaders(Collections.singleton(
                  new RecordHeader("headerKey", "headerValue".getBytes(StandardCharsets.UTF_8))));
  final RecordMetadata recordMetadata = new RecordMetadata(new TopicPartition("topic", 0),
          0, 0, 0, -1L, -1, -1);
  final ProducerRecord<Object, Object> producerRecord =
          new ProducerRecord<>("topic", 0, "key", "value", recordHeaders);

  final ConsumerRecord<Object, Object> consumerRecord = mockKafka.defaultRecordMapping(producerRecord, recordMetadata);

  assertEquals(producerRecord.topic(), consumerRecord.topic());
  assertEquals(producerRecord.partition().intValue(), consumerRecord.partition());
  assertEquals(producerRecord.key(), consumerRecord.key());
  assertEquals(producerRecord.value(), consumerRecord.value());
  assertEquals(producerRecord.headers(), consumerRecord.headers());
}
 
Example #3
Source File: ConsumerMockTestBase.java    From vertx-kafka-client with Apache License 2.0 6 votes vote down vote up
@Test
public void testConsumeWithHeader(TestContext ctx) {
  MockConsumer<String, String> mock = new MockConsumer<>(OffsetResetStrategy.EARLIEST);
  KafkaReadStream<String, String> consumer = createConsumer(vertx, mock);
  Async doneLatch = ctx.async();
  consumer.handler(record -> {
    ctx.assertEquals("the_topic", record.topic());
    ctx.assertEquals(0, record.partition());
    ctx.assertEquals("abc", record.key());
    ctx.assertEquals("def", record.value());
    Header[] headers = record.headers().toArray();
    ctx.assertEquals(1, headers.length);
    Header header = headers[0];
    ctx.assertEquals("header_key", header.key());
    ctx.assertEquals("header_value", new String(header.value()));
    consumer.close(v -> doneLatch.complete());
  });
  consumer.subscribe(Collections.singleton("the_topic"), v -> {
    mock.schedulePollTask(() -> {
      mock.rebalance(Collections.singletonList(new TopicPartition("the_topic", 0)));
      mock.addRecord(new ConsumerRecord<>("the_topic", 0, 0L, 0L, TimestampType.NO_TIMESTAMP_TYPE, 0L, 0, 0, "abc", "def",
        new RecordHeaders(Collections.singletonList(new RecordHeader("header_key", "header_value".getBytes())))));
      mock.seek(new TopicPartition("the_topic", 0), 0L);
    });
  });
}
 
Example #4
Source File: KafkaMessageSenderTest.java    From synapse with Apache License 2.0 6 votes vote down vote up
@Test
public void shouldSendEvent() {
    // given
    final Message<ExampleJsonObject> message = message("someKey", new ExampleJsonObject("banana"));

    try (final Consumer<String, String> consumer = getKafkaConsumer("someTestGroup")) {
        embeddedKafka.consumeFromAnEmbeddedTopic(consumer, KAFKA_TOPIC);

        // when
        messageSender.send(message).join();

        // then

        final ConsumerRecord<String, String> record = getSingleRecord(consumer, KAFKA_TOPIC, 250L);
        assertThat(record.key(), is("someKey"));
        assertThat(record.value(), is("{\"value\":\"banana\"}"));
        assertThat(record.headers(), containsInAnyOrder(
                new RecordHeader("_synapse_msg_partitionKey", "someKey".getBytes(UTF_8)),
                new RecordHeader("_synapse_msg_compactionKey", "someKey".getBytes(UTF_8))
        ));
        assertThat(record.topic(), is(KAFKA_TOPIC));
        assertThat(record.partition(), is(0));
    }
}
 
Example #5
Source File: KafkaEncoderTest.java    From synapse with Apache License 2.0 6 votes vote down vote up
@Test
public void shouldPartitionMessage() {
    // given
    final KafkaEncoder encoder = new KafkaEncoder("test", 2);
    final TextMessage first = TextMessage.of(Key.of("0", "someKeyForPartition0"), null);
    final TextMessage second = TextMessage.of(Key.of("1", "someKeyForPartition1"), null);

    // when
    ProducerRecord<String, String> firstRecord = encoder.apply(first);
    ProducerRecord<String, String> secondRecord = encoder.apply(second);

    // then
    assertThat(firstRecord.key(), is("someKeyForPartition0"));
    assertThat(firstRecord.headers(), containsInAnyOrder(
            new RecordHeader("_synapse_msg_partitionKey", "0".getBytes(UTF_8)),
            new RecordHeader("_synapse_msg_compactionKey", "someKeyForPartition0".getBytes(UTF_8))
    ));
    assertThat(firstRecord.partition(), is(0));
    // and
    assertThat(secondRecord.key(), is("someKeyForPartition1"));
    assertThat(secondRecord.headers(), containsInAnyOrder(
            new RecordHeader("_synapse_msg_partitionKey", "1".getBytes(UTF_8)),
            new RecordHeader("_synapse_msg_compactionKey", "someKeyForPartition1".getBytes(UTF_8))
    ));
    assertThat(secondRecord.partition(), is(1));
}
 
Example #6
Source File: KafkaEncoderTest.java    From synapse with Apache License 2.0 6 votes vote down vote up
@Test
public void shouldEncodeMessageHeaders() {
    // given
    final KafkaEncoder encoder = new KafkaEncoder("test", 1);
    final TextMessage message = TextMessage.of(
            "someKey",
            Header.builder()
                    .withAttribute("foo", "bar")
                    .withAttribute("foobar", Instant.ofEpochMilli(42)).build(),
            null
    );

    // when
    final ProducerRecord<String, String> record = encoder.apply(message);

    // then
    assertThat(record.headers(), containsInAnyOrder(
            new RecordHeader("_synapse_msg_partitionKey", "someKey".getBytes(UTF_8)),
            new RecordHeader("_synapse_msg_compactionKey", "someKey".getBytes(UTF_8)),
            new RecordHeader("foo", "bar".getBytes(UTF_8)),
            new RecordHeader("foobar", "1970-01-01T00:00:00.042Z".getBytes(UTF_8))
    ));
}
 
Example #7
Source File: KafkaEncoderTest.java    From synapse with Apache License 2.0 6 votes vote down vote up
@Test
public void shouldEncodeMessage() {
    // given
    final KafkaEncoder encoder = new KafkaEncoder("test", 1);
    final TextMessage message = TextMessage.of("someKey", "payload");

    // when
    ProducerRecord<String, String> record = encoder.apply(message);

    // then
    assertThat(record.key(), is("someKey"));
    assertThat(record.value(), is("payload"));
    assertThat(record.headers(), containsInAnyOrder(
            new RecordHeader("_synapse_msg_partitionKey", "someKey".getBytes(UTF_8)),
            new RecordHeader("_synapse_msg_compactionKey", "someKey".getBytes(UTF_8))
    ));
    assertThat(record.topic(), is("test"));
    assertThat(record.partition(), is(nullValue()));
}
 
Example #8
Source File: KafkaDecoderTest.java    From synapse with Apache License 2.0 6 votes vote down vote up
@Test
public void shouldDecodeBrokenCompoundKeysAsMessageKey() {
    final KafkaDecoder decoder = new KafkaDecoder();
    final ConsumerRecord<String,String> record = new ConsumerRecord<>(
            "ch01",
            0,
            42L,
            1234L, TimestampType.CREATE_TIME,
            -1L, -1, -1,
            "record-key",
            null,
            new RecordHeaders(asList(
                    new RecordHeader("_synapse_msg_partitionKey", "1234".getBytes(UTF_8)),
                    new RecordHeader("_synapse_msg_compactionKey", "key-1234".getBytes(UTF_8))
            ))
    );

    // when
    final TextMessage decodedMessage = decoder.apply(record);

    // then
    assertThat(decodedMessage.getKey().isCompoundKey(), is(false));
    assertThat(decodedMessage.getKey().compactionKey(), is("record-key"));
}
 
Example #9
Source File: KafkaDecoderTest.java    From synapse with Apache License 2.0 6 votes vote down vote up
@Test
public void shouldDecodeCompoundKeys() {
    final KafkaDecoder decoder = new KafkaDecoder();
    final ConsumerRecord<String,String> record = new ConsumerRecord<>(
            "ch01",
            0,
            42L,
            1234L, TimestampType.CREATE_TIME,
            -1L, -1, -1,
            "key-1234",
            null,
            new RecordHeaders(asList(
                    new RecordHeader("_synapse_msg_partitionKey", "1234".getBytes(UTF_8)),
                    new RecordHeader("_synapse_msg_compactionKey", "key-1234".getBytes(UTF_8))
            ))
    );

    // when
    final TextMessage decodedMessage = decoder.apply(record);

    // then
    assertThat(decodedMessage.getKey().isCompoundKey(), is(true));
    assertThat(decodedMessage.getKey().compactionKey(), is("key-1234"));
    assertThat(decodedMessage.getKey().partitionKey(), is("1234"));
}
 
Example #10
Source File: KafkaSourceTaskTest.java    From MirrorTool-for-Kafka-Connect with Apache License 2.0 6 votes vote down vote up
private ConsumerRecords<byte[], byte[]> createTestRecordsWithHeaders() {
  RecordHeader header = new RecordHeader("testHeader", new byte[0]);
  RecordHeaders headers = new RecordHeaders();
  headers.add(header);
  TimestampType timestampType = TimestampType.NO_TIMESTAMP_TYPE;

  byte testByte = 0;
  byte[] testKey = { testByte };
  byte[] testValue = { testByte };

  ConnectHeaders destinationHeaders = new ConnectHeaders();
  destinationHeaders.add(header.key(), header.value(), Schema.OPTIONAL_BYTES_SCHEMA);
  ConsumerRecord<byte[], byte[]> testConsumerRecord = new ConsumerRecord<byte[], byte[]>(FIRST_TOPIC, FIRST_PARTITION,
      FIRST_OFFSET, System.currentTimeMillis(), timestampType, 0L, 0, 0, testKey, testValue, headers);

  TopicPartition topicPartition = new TopicPartition(FIRST_TOPIC, FIRST_PARTITION);
  List<ConsumerRecord<byte[], byte[]>> consumerRecords = new ArrayList<>();
  consumerRecords.add(testConsumerRecord);

  Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> consumerRecordMap = new HashMap<>(1);
  consumerRecordMap.put(topicPartition, consumerRecords);
  ConsumerRecords<byte[], byte[]> testRecords = new ConsumerRecords<>(consumerRecordMap);
  return testRecords;
}
 
Example #11
Source File: ScheduledImageResizeRequestSubmitter.java    From Spring-Boot-2.0-Projects with MIT License 6 votes vote down vote up
public void scheduleTaskWithCronExpression() {
    Flux.just(new File(imagesDirectory).listFiles()).filter(File::isFile).subscribe(
        f -> {
            Flux.just(new Dimension(800, 600), new Dimension(180, 180), new Dimension(1200, 630)).subscribe(d -> {
                try {
                    ImageResizeRequest imageResizeRequest = new ImageResizeRequest((int) d.getWidth(), (int) d.getHeight(), f.getAbsolutePath());
                    ProducerRecord<String, String> record = new ProducerRecord<>("asyncRequests", objectMapper.writeValueAsString(imageResizeRequest));
                    record.headers().add(new RecordHeader(KafkaHeaders.REPLY_TOPIC, "asyncReplies".getBytes()));
                    RequestReplyFuture<String, String, String> replyFuture = template.sendAndReceive(record);
                    ConsumerRecord<String, String> consumerRecord = replyFuture.get();
                } catch (Exception e) {
                    LOGGER.error("Error while sending message", e);
                }
            },
            e -> LOGGER.error("Error while running lambda"),
            () -> f.renameTo(new File(f.getParent() + "/Done", f.getName())));
        }
    );
}
 
Example #12
Source File: MessageRecordUtils.java    From kop with Apache License 2.0 6 votes vote down vote up
private static Header[] getHeadersFromMetadata(List<KeyValue> properties) {
    Header[] headers = new Header[properties.size()];

    if (log.isDebugEnabled()) {
        log.debug("getHeadersFromMetadata. Header size: {}",
            properties.size());
    }

    int index = 0;
    for (KeyValue kv: properties) {
        headers[index] = new RecordHeader(kv.getKey(), kv.getValue().getBytes(UTF_8));

        if (log.isDebugEnabled()) {
            log.debug("index: {} kv.getKey: {}. kv.getValue: {}",
                index, kv.getKey(), kv.getValue());
        }
        index++;
    }

    return headers;
}
 
Example #13
Source File: KafkaEventReceiverTest.java    From stream-registry with Apache License 2.0 6 votes vote down vote up
@Test
public void typical() throws Exception {
  when(config.getTopic()).thenReturn(topic);
  when(consumer.partitionsFor(topic)).thenReturn(List.of(partitionInfo));
  when(consumer.beginningOffsets(topicPartitions)).thenReturn(Map.of(topicPartition, 0L));
  when(consumer.endOffsets(topicPartitions)).thenReturn(Map.of(topicPartition, 0L));
  when(consumer.poll(Duration.ofMillis(100))).thenReturn(new ConsumerRecords<>(Map.of(topicPartition, List.of(record))));
  when(record.key()).thenReturn(avroKey);
  when(record.value()).thenReturn(avroValue);
  when(converter.toModel(avroKey, avroValue)).thenReturn(event);
  when(record.headers()).thenReturn(new RecordHeaders(List.of(new RecordHeader(CORRELATION_ID, "foo".getBytes(UTF_8)))));

  underTest.receive(listener);
  Thread.sleep(100L);
  underTest.close();

  var inOrder = Mockito.inOrder(consumer, listener, correlator);
  inOrder.verify(consumer).assign(topicPartitions);
  inOrder.verify(consumer).seekToBeginning(topicPartitions);
  inOrder.verify(listener).onEvent(LOAD_COMPLETE);
  inOrder.verify(listener).onEvent(event);
  inOrder.verify(correlator).received("foo");
}
 
Example #14
Source File: KafkaEventReceiverTest.java    From stream-registry with Apache License 2.0 6 votes vote down vote up
@Test
public void listenerThrowsException() throws Exception {
  when(config.getTopic()).thenReturn(topic);
  when(consumer.partitionsFor(topic)).thenReturn(List.of(partitionInfo));
  when(consumer.beginningOffsets(topicPartitions)).thenReturn(Map.of(topicPartition, 0L));
  when(consumer.endOffsets(topicPartitions)).thenReturn(Map.of(topicPartition, 0L));
  when(consumer.poll(Duration.ofMillis(100))).thenReturn(new ConsumerRecords<>(Map.of(topicPartition, List.of(record))));
  when(record.key()).thenReturn(avroKey);
  when(record.value()).thenReturn(avroValue);
  when(converter.toModel(avroKey, avroValue)).thenReturn(event);
  when(record.headers()).thenReturn(new RecordHeaders(List.of(new RecordHeader(CORRELATION_ID, "foo".getBytes(UTF_8)))));
  doThrow(new RuntimeException("listener error")).when(listener).onEvent(event);

  underTest.receive(listener);
  Thread.sleep(100L);
  underTest.close();

  var inOrder = Mockito.inOrder(consumer, listener, correlator);
  inOrder.verify(consumer).assign(topicPartitions);
  inOrder.verify(consumer).seekToBeginning(topicPartitions);
  inOrder.verify(listener).onEvent(LOAD_COMPLETE);
  inOrder.verify(listener).onEvent(event);
  inOrder.verify(correlator).received("foo");
}
 
Example #15
Source File: IncomingKafkaRecordMetadata.java    From smallrye-reactive-messaging with Apache License 2.0 6 votes vote down vote up
public IncomingKafkaRecordMetadata(KafkaConsumerRecord<K, T> record) {
    this.record = record;
    this.recordKey = record.key();
    this.topic = record.topic();
    this.partition = record.partition();
    this.timestamp = Instant.ofEpochMilli(record.timestamp());
    this.timestampType = record.timestampType();
    this.offset = record.offset();
    if (record.headers() == null) {
        this.headers = new RecordHeaders();
    } else {
        this.headers = new RecordHeaders(record.headers().stream()
                .map(kh -> new RecordHeader(kh.key(), kh.value().getBytes())).collect(
                        Collectors.toList()));
    }
}
 
Example #16
Source File: KafkaEasyTransMsgConsumerImpl.java    From EasyTransaction with Apache License 2.0 5 votes vote down vote up
private void reconsumeLater(ConsumerRecord<String, byte[]> consumeRecord) throws InterruptedException, ExecutionException {

		// add all header to headList except RETRY_COUNT
		Headers headers = consumeRecord.headers();
		List<Header> headerList = new ArrayList<Header>(8);
		Iterator<Header> iterator = headers.iterator();
		Integer retryCount = -1;
		boolean hasOrignalHeader = false;
		while (iterator.hasNext()) {
			Header next = iterator.next();
			if (next.key().equals(RETRY_COUNT_KEY)) {
				retryCount = serializer.deserialize(next.value());
				continue;
			}
			
			if(next.key().equals(ORGINAL_TOPIC)){
				hasOrignalHeader = true;
			}
			headerList.add(next);
		}
		
		// add RETRY_COUNT to header
		retryCount++;
		headerList.add(new RecordHeader(RETRY_COUNT_KEY, serializer.serialization(retryCount)));
		
		if(!hasOrignalHeader){
			headerList.add(new RecordHeader(ORGINAL_TOPIC, serializer.serialization(consumeRecord.topic())));
		}

		// send message to corresponding queue according to retry times
		String retryTopic = calcRetryTopic(consumeRecord.topic(), retryCount);
		
		ProducerRecord<String, byte[]> record = new ProducerRecord<>(retryTopic,
				consumeRecord.partition() % retryQueuePartitionCount.get(retryTopic), null, consumeRecord.key(),
				consumeRecord.value(), headerList);
		Future<RecordMetadata> publishKafkaMessage = retryQueueMsgProducer.publishKafkaMessage(record);
		publishKafkaMessage.get();
	}
 
Example #17
Source File: ConsumerTestBase.java    From vertx-kafka-client with Apache License 2.0 5 votes vote down vote up
private Properties setupConsumeWithHeaders(TestContext ctx, int numMessages, String topicName) {
  Async batch = ctx.async();
  AtomicInteger index = new AtomicInteger();
  kafkaCluster.useTo().produceStrings(numMessages, batch::complete, () ->
    new ProducerRecord<>(topicName, 0, "key-" + index.get(), "value-" + index.get(),
      Collections.singletonList(new RecordHeader("header_key" + index.get(), ("header_value" + index.getAndIncrement()).getBytes()))));
  batch.awaitSuccess(20000);
  Properties config = kafkaCluster.useTo().getConsumerProperties(topicName, topicName, OffsetResetStrategy.EARLIEST);
  config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
  config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
  return config;
}
 
Example #18
Source File: BaseRecordWeigherTest.java    From kafka-workers with Apache License 2.0 5 votes vote down vote up
private WorkerRecord<byte[], byte[]> emptyWorkerRecordWithHeaders(String[] headers) {
    RecordHeaders recordHeaders = new RecordHeaders();
    for (String headerStr: headers) {
        String[] split = headerStr.split(":");
        recordHeaders.add(new RecordHeader(split[0], split[1].getBytes(ISO_8859_1)));
    }
    ConsumerRecord<byte[], byte[]> consumerRecord = new ConsumerRecord<>(EMPTY_TOPIC, SOME_PARTITION, SOME_OFFSET,
            ConsumerRecord.NO_TIMESTAMP, TimestampType.NO_TIMESTAMP_TYPE, (long) ConsumerRecord.NULL_CHECKSUM,
            0, 0,
            new byte[0], new byte[0],
            recordHeaders);

    return new WorkerRecord<>(consumerRecord, SOME_SUBPARTITION);
}
 
Example #19
Source File: ProducingMessageWithHeaderBean.java    From smallrye-reactive-messaging with Apache License 2.0 5 votes vote down vote up
@Incoming("data")
@Outgoing("output-2")
@Acknowledgment(Acknowledgment.Strategy.MANUAL)
public Message<Integer> process(Message<Integer> input) {
    List<RecordHeader> list = Arrays.asList(
            new RecordHeader("hello", "clement".getBytes()),
            new RecordHeader("count", Integer.toString(counter.incrementAndGet()).getBytes()));
    return Message.of(
            input.getPayload() + 1,
            Metadata.of(OutgoingKafkaRecordMetadata.builder().withKey(Integer.toString(input.getPayload()))
                    .withHeaders(list).build()),
            input::ack);
}
 
Example #20
Source File: KafkaDecoderTest.java    From synapse with Apache License 2.0 5 votes vote down vote up
@Test
public void shouldDecodeMessageHeaders() {
    // given
    final KafkaDecoder decoder = new KafkaDecoder();
    final ConsumerRecord<String,String> record = new ConsumerRecord<>(
            "ch01",
            0,
            42L,
            1234L, TimestampType.CREATE_TIME,
            -1L, -1, -1,
            "key",
            null,
            new RecordHeaders(asList(
                    new RecordHeader("foo", "foovalue".getBytes(UTF_8)),
                    new RecordHeader("bar", "barvalue".getBytes(UTF_8))
            ))
    );

    // when
    final TextMessage decodedMessage = decoder.apply(record);

    // then
    final Map<String, String> expectedHeaders = ImmutableMap.of(
            "foo", "foovalue",
            "bar", "barvalue"
    );
    assertThat(decodedMessage.getHeader().getAll(), is(expectedHeaders));
}
 
Example #21
Source File: KafkaEncoder.java    From synapse with Apache License 2.0 5 votes vote down vote up
private List<Header> headersOf(final Message<String> message) {
    final ImmutableList.Builder<org.apache.kafka.common.header.Header> messageAttributes = ImmutableList.builder();
    message.getHeader()
            .getAll()
            .forEach((key, value) -> messageAttributes.add(new RecordHeader(key, value.getBytes(UTF_8))));
    messageAttributes.add(
            new RecordHeader(PARTITION_KEY, message.getKey().partitionKey().getBytes(UTF_8)),
            new RecordHeader(COMPACTION_KEY, message.getKey().compactionKey().getBytes(UTF_8))
    );
    return messageAttributes.build();
}
 
Example #22
Source File: KafkaPublisherActor.java    From ditto with Eclipse Public License 2.0 5 votes vote down vote up
private static Iterable<Header> mapExternalMessageHeaders(final ExternalMessage externalMessage) {
    return externalMessage.getHeaders()
            .entrySet()
            .stream()
            .map(header -> new RecordHeader(header.getKey(), header.getValue().getBytes(StandardCharsets.UTF_8)))
            .collect(Collectors.toList());
}
 
Example #23
Source File: KafkaEasyTransMsgPublisherImpl.java    From EasyTransaction with Apache License 2.0 5 votes vote down vote up
@Override
public EasyTransMsgPublishResult publish(String topic, String tag, String key, Map<String,Object> header, byte[] msgByte) {
	String kafkaTopic = QueueKafkaHelper.getKafkaTopic(topic, tag);
	
	//calculate partition
	TransactionId trxId = (TransactionId) header.get(EasytransConstant.CallHeadKeys.PARENT_TRX_ID_KEY);
	int partition = calcMessagePartition(kafkaTopic, trxId);
	
	List<Header> kafkaHeaderList = new ArrayList<>(header.size());
	for(Entry<String, Object> entry:header.entrySet()){
		kafkaHeaderList.add(new RecordHeader(entry.getKey(),serializer.serialization(entry.getValue())));
	}
	
	ProducerRecord<String, byte[]> record = new ProducerRecord<>(kafkaTopic, partition, null, key, msgByte, kafkaHeaderList);
	Future<RecordMetadata> sendResultFuture = kafkaProducer.send(record);
	try {
		RecordMetadata recordMetadata = sendResultFuture.get();
		log.info("message sent:" + recordMetadata);
	} catch (InterruptedException | ExecutionException e) {
		throw new RuntimeException("message sent error",e);
	}
	
	EasyTransMsgPublishResult easyTransMsgPublishResult = new EasyTransMsgPublishResult();
	easyTransMsgPublishResult.setTopic(topic);
	easyTransMsgPublishResult.setMessageId(key);
	return easyTransMsgPublishResult;
}
 
Example #24
Source File: SubscriptionManagerTest.java    From kafka-pubsub-emulator with Apache License 2.0 5 votes vote down vote up
@Test
public void pull_withHeader() {
  int partitions = 1;
  int recordsPerPartition = 3;
  List<Header> headers = new ArrayList<>();
  headers.add(new RecordHeader("key1", "value1".getBytes()));
  headers.add(new RecordHeader("key2", "value2".getBytes()));
  generateTestRecordsForConsumers(partitions, recordsPerPartition, headers);

  // Each response should pull from a different partition
  List<String> messageIds = new ArrayList<>();
  List<String> messages = new ArrayList<>();
  List<Map<String, String>> attributes = new ArrayList<>();
  List<PubsubMessage> response = subscriptionManager.pull(10, false);
  for (PubsubMessage message : response) {
    messageIds.add(message.getMessageId());
    messages.add(message.getData().toStringUtf8());
    attributes.add(message.getAttributesMap());
  }

  assertThat(messageIds, Matchers.contains("0-0", "0-1", "0-2"));
  assertThat(messages, Matchers.contains("message-0000", "message-0001", "message-0002"));
  ImmutableMap<String, String> expectedAttributes =
      new Builder<String, String>().put("key1", "value1").put("key2", "value2").build();
  assertThat(
      attributes,
      Matchers.equalTo(
          Arrays.asList(expectedAttributes, expectedAttributes, expectedAttributes)));

  assertThat(subscriptionManager.pull(10, false), Matchers.empty());
}
 
Example #25
Source File: HeaderUtilsTest.java    From extension-kafka with Apache License 2.0 5 votes vote down vote up
@Test
public void testGeneratingHeadersWithCustomMapperShouldGeneratedCorrectHeaders() {
    String metaKey = "someHeaderKey";
    String expectedMetaDataValue = "evt:someValue";
    Headers header = toHeaders(
            asEventMessage("SomePayload").withMetaData(MetaData.with(metaKey, "someValue")),
            serializedObject(),
            (key, value) -> new RecordHeader(key, ("evt:" + value.toString()).getBytes())
    );

    assertThat(valueAsString(header, generateMetadataKey(metaKey))).isEqualTo(expectedMetaDataValue);
}
 
Example #26
Source File: HeaderUtilsTest.java    From extension-kafka with Apache License 2.0 5 votes vote down vote up
@Test
public void testByteMapperNullValueShouldBeAbleToHandle() {
    BiFunction<String, Object, RecordHeader> fxn = byteMapper();
    RecordHeader header = fxn.apply("abc", null);

    assertThat(header.value()).isNull();
}
 
Example #27
Source File: PublisherServiceTest.java    From kafka-pubsub-emulator with Apache License 2.0 5 votes vote down vote up
@Test
public void publish_withAttributes() {
  int messages = 3;
  PublishRequest request =
      PublishRequest.newBuilder()
          .setTopic("projects/project-1/topics/topic-2")
          .addAllMessages(generatePubsubMessagesWithHeader(messages))
          .build();

  MockProducer<String, ByteBuffer> producer = startPublishExecutor(messages);

  PublishResponse response = blockingStub.publish(request);
  assertThat(response.getMessageIdsList(), Matchers.contains("0-0", "0-1", "0-2"));

  List<Headers> headers =
      producer.history().stream().map(ProducerRecord::headers).collect(Collectors.toList());
  assertThat(
      headers,
      Matchers.contains(
          new RecordHeaders(
              Collections.singletonList(
                  new RecordHeader("some-key", "some-value".getBytes(UTF_8)))),
          new RecordHeaders(
              Collections.singletonList(
                  new RecordHeader("some-key", "some-value".getBytes(UTF_8)))),
          new RecordHeaders(
              Collections.singletonList(
                  new RecordHeader("some-key", "some-value".getBytes(UTF_8))))));

  verify(statisticsManager, times(3))
      .computePublish(
          eq("projects/project-1/topics/topic-2"),
          argThat(message -> message.toStringUtf8().matches(MESSAGE_CONTENT_REGEX)),
          anyLong());
  verify(statisticsManager, never()).computePublishError(anyString());
}
 
Example #28
Source File: KafkaUtils.java    From sdk-java with Apache License 2.0 5 votes vote down vote up
static RecordHeaders kafkaHeaders(RecordHeader... headers) {
    RecordHeaders hs = new RecordHeaders();
    for (RecordHeader h : headers) {
        hs.add(h);
    }
    return hs;
}
 
Example #29
Source File: KafkaProducerMessageWriterTest.java    From sdk-java with Apache License 2.0 5 votes vote down vote up
@ParameterizedTest
@MethodSource("io.cloudevents.core.test.Data#allEventsWithoutExtensions")
void testRequestWithStructured(CloudEvent event) {
    String expectedContentType = CSVFormat.INSTANCE.serializedContentType();
    byte[] expectedBuffer = CSVFormat.INSTANCE.serialize(event);

    String topic = "test";
    Integer partition = 10;
    Long timestamp = System.currentTimeMillis();
    String key = "aaa";

    ProducerRecord<String, byte[]> producerRecord = StructuredMessageReader
        .from(event, CSVFormat.INSTANCE)
        .read(KafkaMessageFactory.createWriter(topic, partition, timestamp, key));

    assertThat(producerRecord.topic())
        .isEqualTo(topic);
    assertThat(producerRecord.partition())
        .isEqualTo(partition);
    assertThat(producerRecord.timestamp())
        .isEqualTo(timestamp);
    assertThat(producerRecord.key())
        .isEqualTo(key);
    assertThat(producerRecord.headers())
        .containsExactly(new RecordHeader(KafkaHeaders.CONTENT_TYPE, expectedContentType.getBytes()));
    assertThat(producerRecord.value())
        .isEqualTo(expectedBuffer);
}
 
Example #30
Source File: PublisherService.java    From kafka-pubsub-emulator with Apache License 2.0 5 votes vote down vote up
private Headers buildHeaders(Map<String, String> attributesMap) {
  if (attributesMap == null || attributesMap.isEmpty()) {
    return null;
  }
  return new RecordHeaders(
      attributesMap
          .entrySet()
          .parallelStream()
          .map(attribute -> new RecordHeader(attribute.getKey(), attribute.getValue().getBytes()))
          .collect(Collectors.toList()));
}