io.vertx.kafka.client.consumer.OffsetAndMetadata Java Examples

The following examples show how to use io.vertx.kafka.client.consumer.OffsetAndMetadata. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KafkaAdminClientImpl.java    From vertx-kafka-client with Apache License 2.0 6 votes vote down vote up
public Future<Map<TopicPartition, OffsetAndMetadata>> listConsumerGroupOffsets(String groupId, ListConsumerGroupOffsetsOptions options) {
  ContextInternal ctx = (ContextInternal) vertx.getOrCreateContext();
  Promise<Map<TopicPartition, OffsetAndMetadata>> promise = ctx.promise();

  ListConsumerGroupOffsetsResult listConsumerGroupOffsetsResult = this.adminClient.listConsumerGroupOffsets(groupId, Helper.to(options));
  listConsumerGroupOffsetsResult.partitionsToOffsetAndMetadata().whenComplete((cgo, ex) -> {

    if (ex == null) {
      Map<TopicPartition, OffsetAndMetadata> consumerGroupOffsets = new HashMap<>();

      for (Map.Entry<org.apache.kafka.common.TopicPartition, org.apache.kafka.clients.consumer.OffsetAndMetadata> cgoOffset : cgo.entrySet()) {
        consumerGroupOffsets.put(Helper.from(cgoOffset.getKey()), Helper.from(cgoOffset.getValue()));
      }
      promise.complete(consumerGroupOffsets);
    } else {
      promise.fail(ex);
    }
  });
  return promise.future();
}
 
Example #2
Source File: SnowdropKafkaConsumerTest.java    From vertx-spring-boot with Apache License 2.0 5 votes vote down vote up
@Test
public void shouldGetCommitted() {
    given(mockAxleConsumer.committed(new TopicPartition("test-topic", 1)))
        .willReturn(completedFuture(new OffsetAndMetadata(2, "test-metadata")));

    StepVerifier.create(consumer.committed(Partition.create("test-topic", 1)))
        .expectNext(2L)
        .verifyComplete();
}
 
Example #3
Source File: SnowdropKafkaConsumer.java    From vertx-spring-boot with Apache License 2.0 5 votes vote down vote up
@Override
public Mono<Long> committed(Partition partition) {
    Objects.requireNonNull(partition, "Partition cannot be null");

    return Mono.fromCompletionStage(() -> delegate.committed(toVertxTopicPartition(partition)))
        .map(OffsetAndMetadata::getOffset);
}
 
Example #4
Source File: Helper.java    From vertx-kafka-client with Apache License 2.0 5 votes vote down vote up
public static OffsetAndMetadata from(org.apache.kafka.clients.consumer.OffsetAndMetadata offsetAndMetadata) {
  if (offsetAndMetadata != null) {
    return new OffsetAndMetadata(offsetAndMetadata.offset(), offsetAndMetadata.metadata());
  } else {
    return null;
  }
}
 
Example #5
Source File: KafkaConsumerImpl.java    From vertx-kafka-client with Apache License 2.0 5 votes vote down vote up
@Override
public void commit(Map<TopicPartition, OffsetAndMetadata> offsets, Handler<AsyncResult<Map<TopicPartition, OffsetAndMetadata>>> completionHandler) {

  this.stream.commit(Helper.to(offsets), done -> {

    if (done.succeeded()) {

      completionHandler.handle(Future.succeededFuture(Helper.from(done.result())));
    } else {
      completionHandler.handle(Future.failedFuture(done.cause()));
    }

  });
}
 
Example #6
Source File: KafkaConsumerImpl.java    From vertx-kafka-client with Apache License 2.0 5 votes vote down vote up
@Override
public void committed(TopicPartition topicPartition, Handler<AsyncResult<OffsetAndMetadata>> handler) {
  this.stream.committed(Helper.to(topicPartition), done -> {

    if (done.succeeded()) {
      handler.handle(Future.succeededFuture(Helper.from(done.result())));
    } else {
      handler.handle(Future.failedFuture(done.cause()));
    }
  });
}
 
Example #7
Source File: AmqpSinkBridgeEndpointMockTest.java    From strimzi-kafka-bridge with Apache License 2.0 4 votes vote down vote up
@Test
public <K, V> void normalFlow_AtLeastOnce() throws Exception {
    String topic = "my_topic";
    Vertx vertx = Vertx.vertx();
    MockRecordProducer recordProducer = new MockRecordProducer(topic, 0, 0L);
    AmqpSinkBridgeEndpoint<K, V> endpoint = (AmqpSinkBridgeEndpoint) new AmqpSinkBridgeEndpoint<>(vertx, BridgeConfig.fromMap(config),
            EmbeddedFormat.JSON, new StringDeserializer(), new ByteArrayDeserializer());
    endpoint.open();

    // Create a mock for the sender
    ProtonSender mockSender = mockSender(ProtonQoS.AT_LEAST_ONCE, topic + "/group.id/my_group");

    // Call handle()
    endpoint.handle(new AmqpEndpoint(mockSender));

    // Now the consumer is set we can add a spy for it
    // ( so we can inspect KafkaConsumer.commit() )
    KafkaConsumer<K, V> consumerSpy = installConsumerSpy(endpoint);

    // Simulate vertx-kafka-client delivering a batch
    Method batchHandler = endpoint.getClass().getSuperclass().getDeclaredMethod("handleKafkaBatch", KafkaConsumerRecords.class);
    batchHandler.setAccessible(true);
    KafkaConsumerRecords<String, byte[]> mockRecords = mockRecords();

    // Simulate vertx-kafka-client delivering a record
    Method handler = endpoint.getClass().getSuperclass().getDeclaredMethod("handleKafkaRecord", KafkaConsumerRecord.class);
    handler.setAccessible(true);

    // Kafka batch of 1
    batchHandler.invoke(endpoint, mockRecords);
    handler.invoke(endpoint, recordProducer.mockRecord(null, () -> "Hello, world".getBytes()));

    // verify sender.send() was called and grab the arguments
    ArgumentCaptor<byte[]> tagCap = ArgumentCaptor.forClass(byte[].class);
    ArgumentCaptor<Message> messageCap = ArgumentCaptor.forClass(Message.class);
    ArgumentCaptor<Handler<ProtonDelivery>> handlerCap = ArgumentCaptor.forClass(Handler.class);
    verify(mockSender).send(tagCap.capture(), messageCap.capture(), handlerCap.capture());
    Message message = messageCap.getValue();

    // Assert the transformed message was as expected
    assertThat(message.getAddress(), is(topic + "/group.id/my_group"));
    assertThat(((Data) message.getBody()).getValue().getArray(), is("Hello, world".getBytes()));
    MessageAnnotations messageAnnotations = message.getMessageAnnotations();
    assertThat(messageAnnotations.getValue().get(Symbol.valueOf(AmqpBridge.AMQP_TOPIC_ANNOTATION)), is(topic));
    assertThat(messageAnnotations.getValue().get(Symbol.valueOf(AmqpBridge.AMQP_PARTITION_ANNOTATION)), is(0));
    assertThat(messageAnnotations.getValue().get(Symbol.valueOf(AmqpBridge.AMQP_OFFSET_ANNOTATION)), is(0L));

    // Simulate Proton delivering settlement
    ProtonDelivery mockDelivery = mock(ProtonDelivery.class);
    when(mockDelivery.getTag()).thenReturn(tagCap.getValue());
    handlerCap.getValue().handle(mockDelivery);

    // We now have to deliver another batch
    // because the AMQP delivery callback for the first message
    // fires after commitOffsets() is called for the last message of the first batch

    // Kafka batch of 1
    batchHandler.invoke(endpoint, mockRecords);
    handler.invoke(endpoint, recordProducer.mockRecord(null, () -> "Hello, world".getBytes()));

    ArgumentCaptor<Map<TopicPartition, OffsetAndMetadata>> commitMapCap = ArgumentCaptor.forClass(Map.class);
    verify(consumerSpy).commit(commitMapCap.capture(), any(Handler.class));

    // TODO test closure (commit)
}
 
Example #8
Source File: Helper.java    From vertx-kafka-client with Apache License 2.0 4 votes vote down vote up
public static org.apache.kafka.clients.consumer.OffsetAndMetadata to(OffsetAndMetadata offsetAndMetadata) {
  return new org.apache.kafka.clients.consumer.OffsetAndMetadata(offsetAndMetadata.getOffset(), offsetAndMetadata.getMetadata());
}
 
Example #9
Source File: Helper.java    From vertx-kafka-client with Apache License 2.0 4 votes vote down vote up
public static Map<TopicPartition, OffsetAndMetadata> from(Map<org.apache.kafka.common.TopicPartition, org.apache.kafka.clients.consumer.OffsetAndMetadata> offsets) {
  return offsets.entrySet().stream().collect(Collectors.toMap(
    e -> new TopicPartition(e.getKey().topic(), e.getKey().partition()),
    e -> new OffsetAndMetadata(e.getValue().offset(), e.getValue().metadata()))
  );
}
 
Example #10
Source File: Helper.java    From vertx-kafka-client with Apache License 2.0 4 votes vote down vote up
public static Map<org.apache.kafka.common.TopicPartition, org.apache.kafka.clients.consumer.OffsetAndMetadata> to(Map<TopicPartition, OffsetAndMetadata> offsets) {
  return offsets.entrySet().stream().collect(Collectors.toMap(
    e -> new org.apache.kafka.common.TopicPartition(e.getKey().getTopic(), e.getKey().getPartition()),
    e -> new org.apache.kafka.clients.consumer.OffsetAndMetadata(e.getValue().getOffset(), e.getValue().getMetadata()))
  );
}
 
Example #11
Source File: KafkaConsumerImpl.java    From vertx-kafka-client with Apache License 2.0 4 votes vote down vote up
@Override
public Future<OffsetAndMetadata> committed(TopicPartition topicPartition) {
  Promise<OffsetAndMetadata> promise = Promise.promise();
  committed(topicPartition, promise);
  return promise.future();
}
 
Example #12
Source File: KafkaConsumerImpl.java    From vertx-kafka-client with Apache License 2.0 4 votes vote down vote up
@Override
public Future<Map<TopicPartition, OffsetAndMetadata>> commit(Map<TopicPartition, OffsetAndMetadata> offsets) {
  Promise<Map<TopicPartition, OffsetAndMetadata>> promise = Promise.promise();
  commit(offsets, promise);
  return promise.future();
}
 
Example #13
Source File: KafkaAdminClientImpl.java    From vertx-kafka-client with Apache License 2.0 4 votes vote down vote up
public void listConsumerGroupOffsets(String groupId, ListConsumerGroupOffsetsOptions options, Handler<AsyncResult<Map<TopicPartition, OffsetAndMetadata>>> completionHandler) {
  listConsumerGroupOffsets(groupId, options).onComplete(completionHandler);
}
 
Example #14
Source File: KafkaAdminClient.java    From vertx-kafka-client with Apache License 2.0 4 votes vote down vote up
/**
 * Like {@link #listConsumerGroupOffsets(String, Handler)} but returns a {@code Future} of the asynchronous result
 */
@GenIgnore
default Future<Map<TopicPartition, OffsetAndMetadata>> listConsumerGroupOffsets(String groupId) {
  return listConsumerGroupOffsets(groupId, new ListConsumerGroupOffsetsOptions());
}
 
Example #15
Source File: KafkaAdminClient.java    From vertx-kafka-client with Apache License 2.0 2 votes vote down vote up
/**
 * List the consumer group offsets available in the cluster.
 *
 * @param groupId The group id of the group whose offsets will be listed
 * @param completionHandler handler called on operation completed with the consumer groups offsets
 */
@GenIgnore
default void listConsumerGroupOffsets(String groupId, Handler<AsyncResult<Map<TopicPartition, OffsetAndMetadata>>> completionHandler) {
  listConsumerGroupOffsets(groupId, new ListConsumerGroupOffsetsOptions(), completionHandler);
}
 
Example #16
Source File: KafkaAdminClient.java    From vertx-kafka-client with Apache License 2.0 2 votes vote down vote up
/**
 * Like {@link #listConsumerGroupOffsets(String, ListConsumerGroupOffsetsOptions, Handler)} but returns a {@code Future} of the asynchronous result
 */
@GenIgnore
Future<Map<TopicPartition, OffsetAndMetadata>> listConsumerGroupOffsets(String groupId, ListConsumerGroupOffsetsOptions options);
 
Example #17
Source File: KafkaAdminClient.java    From vertx-kafka-client with Apache License 2.0 2 votes vote down vote up
/**
 * List the consumer group offsets available in the cluster.
 *
 * @param groupId The group id of the group whose offsets will be listed
 * @param options The options to use when listing the consumer group offsets.
 * @param completionHandler handler called on operation completed with the consumer groups offsets
 */
@GenIgnore
void listConsumerGroupOffsets(String groupId, ListConsumerGroupOffsetsOptions options, Handler<AsyncResult<Map<TopicPartition, OffsetAndMetadata>>> completionHandler);