org.apache.kafka.clients.consumer.OffsetAndTimestamp Java Examples

The following examples show how to use org.apache.kafka.clients.consumer.OffsetAndTimestamp. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: ConsumerProxy.java    From kbear with Apache License 2.0 7 votes vote down vote up
@Override
public Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes(Map<TopicPartition, Long> timestampsToSearch,
        Duration timeout) {
    ObjectExtension.requireNonNull(timestampsToSearch, "timestampsToSearch");
    Objects.requireNonNull(timeout, "timeout");
    if (timeout.toMillis() < 0)
        throw new IllegalArgumentException("timeout must not be negative");

    return runWithoutConcurrency(() -> {
        Map<String, Map<TopicPartition, Long>> byTopic = new HashMap<>();
        timestampsToSearch
                .forEach((tp, ts) -> byTopic.computeIfAbsent(tp.topic(), k -> new HashMap<>()).put(tp, ts));
        Map<TopicPartition, OffsetAndTimestamp> result = new HashMap<>();
        forEach(byTopic::containsKey,
                (t, c) -> result.putAll(c.getConsumer().offsetsForTimes(byTopic.get(t), timeout)));
        return Collections.unmodifiableMap(result);
    });
}
 
Example #2
Source File: DefaultWebKafkaConsumer.java    From kafka-webview with MIT License 6 votes vote down vote up
/**
 * Seek consumer to specific timestamp
 * @param timestamp Unix timestamp in milliseconds to seek to.
 */
@Override
public ConsumerState seek(final long timestamp) {
    // Find offsets for timestamp
    final Map<TopicPartition, Long> timestampMap = new HashMap<>();
    for (final TopicPartition topicPartition: getAllPartitions()) {
        timestampMap.put(topicPartition, timestamp);
    }
    final Map<TopicPartition, OffsetAndTimestamp> offsetMap = kafkaConsumer.offsetsForTimes(timestampMap);

    // Build map of partition => offset
    final Map<Integer, Long> partitionOffsetMap = new HashMap<>();
    for (Map.Entry<TopicPartition, OffsetAndTimestamp> entry: offsetMap.entrySet()) {
        partitionOffsetMap.put(entry.getKey().partition(), entry.getValue().offset());
    }

    // Now lets seek to those offsets
    return seek(partitionOffsetMap);
}
 
Example #3
Source File: SocketKafkaConsumer.java    From kafka-webview with MIT License 6 votes vote down vote up
/**
 * Seek consumer to specific timestamp
 * @param timestamp Unix timestamp in milliseconds to seek to.
 */
private void seekToTimestamp(final long timestamp) {
    // Find offsets for timestamp
    final Map<TopicPartition, Long> timestampMap = new HashMap<>();
    for (final TopicPartition topicPartition: getAllPartitions()) {
        timestampMap.put(topicPartition, timestamp);
    }
    final Map<TopicPartition, OffsetAndTimestamp> offsetMap = kafkaConsumer.offsetsForTimes(timestampMap);

    // Build map of partition => offset
    final Map<TopicPartition, Long> partitionOffsetMap = new HashMap<>();
    for (Map.Entry<TopicPartition, OffsetAndTimestamp> entry: offsetMap.entrySet()) {
        partitionOffsetMap.put(entry.getKey(), entry.getValue().offset());
    }

    // Now lets seek to those offsets
    seek(partitionOffsetMap);
}
 
Example #4
Source File: ConsumerTest.java    From kbear with Apache License 2.0 6 votes vote down vote up
protected void offsetsForTimes(
        BiFunction<Consumer<String, String>, Map<TopicPartition, Long>, Map<TopicPartition, OffsetAndTimestamp>> offsetsForTimesFetcher)
        throws InterruptedException {
    produceMessages();

    try (Consumer<String, String> consumer = createConsumer()) {
        consumer.subscribe(_topics);
        pollDurationTimeout(consumer);

        Map<TopicPartition, Long> topicPartitionTimes = new HashMap<>();
        long time = System.currentTimeMillis() - _sendInterval * 5;
        _topicPartitions.forEach(tp -> {
            topicPartitionTimes.put(tp, time);
        });
        Map<TopicPartition, OffsetAndTimestamp> results = offsetsForTimesFetcher.apply(consumer,
                topicPartitionTimes);
        System.out.println("results: " + results);
        Assert.assertFalse(CollectionExtension.isEmpty(results));
    }
}
 
Example #5
Source File: ParallelWebKafkaConsumer.java    From kafka-webview with MIT License 6 votes vote down vote up
@Override
public ConsumerState seek(final long timestamp) {

    // Find offsets for timestamp
    final Map<TopicPartition, Long> timestampMap = new HashMap<>();
    for (final TopicPartition topicPartition : getAllPartitions(getCoordinatorConsumer())) {
        timestampMap.put(topicPartition, timestamp);
    }
    final Map<TopicPartition, OffsetAndTimestamp> offsetMap = getCoordinatorConsumer().offsetsForTimes(timestampMap);

    // Build map of partition => offset
    final Map<Integer, Long> partitionOffsetMap = new HashMap<>();
    for (Map.Entry<TopicPartition, OffsetAndTimestamp> entry : offsetMap.entrySet()) {
        partitionOffsetMap.put(entry.getKey().partition(), entry.getValue().offset());
    }

    // Now lets seek to those offsets
    return seek(partitionOffsetMap);
}
 
Example #6
Source File: ConsumerSpEL.java    From beam with Apache License 2.0 6 votes vote down vote up
/**
 * Look up the offset for the given partition by timestamp. Throws RuntimeException if there are
 * no messages later than timestamp or if this partition does not support timestamp based offset.
 */
@SuppressWarnings("unchecked")
public long offsetForTime(Consumer<?, ?> consumer, TopicPartition topicPartition, Instant time) {

  checkArgument(hasOffsetsForTimes, "This Kafka Client must support Consumer.OffsetsForTimes().");

  // 'value' in the map returned by offsetFoTime() is null if there is no offset for the time.
  OffsetAndTimestamp offsetAndTimestamp =
      Iterables.getOnlyElement(
          consumer.offsetsForTimes(ImmutableMap.of(topicPartition, time.getMillis())).values());

  if (offsetAndTimestamp == null) {
    throw new RuntimeException(
        "There are no messages has a timestamp that is greater than or "
            + "equals to the target time or the message format version in this partition is "
            + "before 0.10.0, topicPartition is: "
            + topicPartition);
  } else {
    return offsetAndTimestamp.offset();
  }
}
 
Example #7
Source File: KafkaSystemAdmin.java    From samza with Apache License 2.0 6 votes vote down vote up
@Override
public String visit(SystemStreamPartition systemStreamPartition, StartpointTimestamp startpointTimestamp) {
  Preconditions.checkNotNull(startpointTimestamp, "Startpoint cannot be null");
  Preconditions.checkNotNull(startpointTimestamp.getTimestampOffset(), "Timestamp field in startpoint cannot be null");
  TopicPartition topicPartition = toTopicPartition(systemStreamPartition);

  Map<TopicPartition, Long> topicPartitionToTimestamp = ImmutableMap.of(topicPartition, startpointTimestamp.getTimestampOffset());
  LOG.info("Finding offset for timestamp: {} in topic partition: {}.", startpointTimestamp.getTimestampOffset(), topicPartition);
  Map<TopicPartition, OffsetAndTimestamp> topicPartitionToOffsetTimestamps = threadSafeKafkaConsumer.execute(consumer -> consumer.offsetsForTimes(topicPartitionToTimestamp));

  OffsetAndTimestamp offsetAndTimestamp = topicPartitionToOffsetTimestamps.get(topicPartition);
  if (offsetAndTimestamp != null) {
    return String.valueOf(offsetAndTimestamp.offset());
  } else {
    LOG.info("Offset for timestamp: {} does not exist for partition: {}. Falling back to end offset.", startpointTimestamp.getTimestampOffset(), topicPartition);
    return getEndOffset(systemStreamPartition);
  }
}
 
Example #8
Source File: TestKafkaSystemAdminJava.java    From samza with Apache License 2.0 6 votes vote down vote up
@Test
public void testStartpointTimestampVisitorShouldResolveToCorrectOffset() {
  // Define dummy variables for testing.
  final Long testTimeStamp = 10L;

  final KafkaConsumer consumer = Mockito.mock(KafkaConsumer.class);

  final KafkaStartpointToOffsetResolver kafkaStartpointToOffsetResolver = new KafkaStartpointToOffsetResolver(consumer);

  final StartpointTimestamp startpointTimestamp = new StartpointTimestamp(testTimeStamp);
  final Map<TopicPartition, OffsetAndTimestamp> offsetForTimesResult = ImmutableMap.of(
      TEST_TOPIC_PARTITION, new OffsetAndTimestamp(Long.valueOf(TEST_OFFSET), testTimeStamp));

  // Mock the consumer interactions.
  Mockito.when(consumer.offsetsForTimes(ImmutableMap.of(TEST_TOPIC_PARTITION, testTimeStamp))).thenReturn(offsetForTimesResult);
  Mockito.when(consumer.position(TEST_TOPIC_PARTITION)).thenReturn(Long.valueOf(TEST_OFFSET));

  String resolvedOffset = kafkaStartpointToOffsetResolver.visit(TEST_SYSTEM_STREAM_PARTITION, startpointTimestamp);
  Assert.assertEquals(TEST_OFFSET, resolvedOffset);
}
 
Example #9
Source File: TestKafkaSystemAdminJava.java    From samza with Apache License 2.0 6 votes vote down vote up
@Test
public void testStartpointTimestampVisitorShouldResolveToCorrectOffsetWhenTimestampDoesNotExist() {
  final KafkaConsumer consumer = Mockito.mock(KafkaConsumer.class);
  final KafkaStartpointToOffsetResolver kafkaStartpointToOffsetResolver = new KafkaStartpointToOffsetResolver(consumer);

  final StartpointTimestamp startpointTimestamp = new StartpointTimestamp(0L);
  final Map<TopicPartition, OffsetAndTimestamp> offsetForTimesResult = new HashMap<>();
  offsetForTimesResult.put(TEST_TOPIC_PARTITION, null);

  // Mock the consumer interactions.
  Mockito.when(consumer.offsetsForTimes(ImmutableMap.of(TEST_TOPIC_PARTITION, 0L))).thenReturn(offsetForTimesResult);
  Mockito.when(consumer.endOffsets(ImmutableSet.of(TEST_TOPIC_PARTITION))).thenReturn(ImmutableMap.of(TEST_TOPIC_PARTITION, 10L));

  String resolvedOffset = kafkaStartpointToOffsetResolver.visit(TEST_SYSTEM_STREAM_PARTITION, startpointTimestamp);
  Assert.assertEquals(TEST_OFFSET, resolvedOffset);

  // Mock verifications.
  Mockito.verify(consumer).offsetsForTimes(ImmutableMap.of(TEST_TOPIC_PARTITION, 0L));
}
 
Example #10
Source File: SamplingUtils.java    From cruise-control with BSD 2-Clause "Simplified" License 6 votes vote down vote up
/**
 * Check whether there are failures in fetching offsets during sampling.
 *
 * @param endOffsets End offsets retrieved by consumer.
 * @param offsetsForTimes Offsets for times retrieved by consumer.
 */
static void sanityCheckOffsetFetch(Map<TopicPartition, Long> endOffsets,
                                   Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes)
    throws MetricSamplingException {
  Set<TopicPartition> failedToFetchOffsets = new HashSet<>();
  for (Map.Entry<TopicPartition, OffsetAndTimestamp> entry : offsetsForTimes.entrySet()) {
    if (entry.getValue() == null && endOffsets.get(entry.getKey()) == null) {
      failedToFetchOffsets.add(entry.getKey());
    }
  }

  if (!failedToFetchOffsets.isEmpty()) {
    throw new MetricSamplingException(String.format("Metric consumer failed to fetch offsets for %s. Consider "
                                                    + "decreasing reconnect.backoff.ms to mitigate consumption failures"
                                                    + " due to transient network issues.", failedToFetchOffsets));
  }
}
 
Example #11
Source File: ConsumerOffsetClient.java    From common-kafka with Apache License 2.0 6 votes vote down vote up
/**
 * Returns the offsets for the provided topics at the specified {@code time}.  If no offset for a topic or partition
 * is available at the specified {@code time} then the {@link #getEndOffsets(Collection) latest} offsets
 * for that partition are returned.
 *
 * @param topics
 *      collection of Kafka topics
 * @param time the specific time at which to retrieve offsets
 * @return the offsets for the provided topics at the specified time.
 * @throws org.apache.kafka.common.KafkaException
 *      if there is an issue fetching the offsets
 * @throws IllegalArgumentException
 *      if topics is null
 */
public Map<TopicPartition, Long> getOffsetsForTimes(Collection<String> topics, long time) {
    if (topics == null)
        throw new IllegalArgumentException("topics cannot be null");

    Collection<TopicPartition> partitions = getPartitionsFor(topics);

    //Find all the offsets at a specified time.
    Map<TopicPartition, Long> topicTimes = getPartitionsFor(topics)
            .stream().collect(Collectors.toMap(Function.identity(), s -> time));
    Map<TopicPartition, OffsetAndTimestamp> foundOffsets = consumer.offsetsForTimes(topicTimes);

    //merge the offsets together into a single collection.
    Map<TopicPartition, Long> offsets = new HashMap<>();
    offsets.putAll(foundOffsets.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().offset())));

    //if some partitions do not have offsets at the specified time, find the earliest partitions for that time.
    List<TopicPartition> missingPartitions = partitions.stream()
            .filter(t -> !foundOffsets.containsKey(t)).collect(Collectors.toList());
    if(!missingPartitions.isEmpty()) {
        Map<TopicPartition, Long> missingOffsets = consumer.endOffsets(missingPartitions);
        offsets.putAll(missingOffsets);
    }

    return offsets;
}
 
Example #12
Source File: ConsumerRebalanceHandlerTest.java    From synapse with Apache License 2.0 6 votes vote down vote up
@Test
public void shouldSeekToPositionAfterAssignmentStartingFromTimestamp() {
    // given
    final ApplicationEventPublisher eventPublisher = mock(ApplicationEventPublisher.class);
    final KafkaConsumer<String, String> consumer = mock(KafkaConsumer.class);
    when(consumer.offsetsForTimes(of(
            new TopicPartition("foo", 0), 42L)))
            .thenReturn(of(
                    new TopicPartition("foo", 0), new OffsetAndTimestamp(4711L, 42L))
            );

    final ConsumerRebalanceHandler handler = new ConsumerRebalanceHandler(
            "foo",
            channelPosition(fromTimestamp("0", ofEpochMilli(42))),
            eventPublisher,
            consumer);

    // when
    handler.onPartitionsAssigned(asList(
            new TopicPartition("foo", 0)
    ));

    // then
    verify(consumer).seek(new TopicPartition("foo", 0), 4711);
}
 
Example #13
Source File: ConsumerSpEL.java    From DataflowTemplates with Apache License 2.0 6 votes vote down vote up
/**
 * Look up the offset for the given partition by timestamp. Throws RuntimeException if there are
 * no messages later than timestamp or if this partition does not support timestamp based offset.
 */
@SuppressWarnings("unchecked")
public long offsetForTime(Consumer<?, ?> consumer, TopicPartition topicPartition, Instant time) {

  checkArgument(hasOffsetsForTimes, "This Kafka Client must support Consumer.OffsetsForTimes().");

  // 'value' in the map returned by offsetFoTime() is null if there is no offset for the time.
  OffsetAndTimestamp offsetAndTimestamp =
      Iterables.getOnlyElement(
          consumer.offsetsForTimes(ImmutableMap.of(topicPartition, time.getMillis())).values());

  if (offsetAndTimestamp == null) {
    throw new RuntimeException(
        "There are no messages has a timestamp that is greater than or "
            + "equals to the target time or the message format version in this partition is "
            + "before 0.10.0, topicPartition is: "
            + topicPartition);
  } else {
    return offsetAndTimestamp.offset();
  }
}
 
Example #14
Source File: ReplicaStatsUtil.java    From doctorkafka with Apache License 2.0 6 votes vote down vote up
public static Map<TopicPartition, Long> getProcessingStartOffsets(
    KafkaConsumer<?, ?> kafkaConsumer,
    String brokerStatsTopic,
    long startTimestampInMillis) {
  List<TopicPartition> tpList = kafkaConsumer.partitionsFor(brokerStatsTopic).stream()
                .map(p->new TopicPartition(p.topic(), p.partition())).collect(Collectors.toList());
  Map<TopicPartition, Long> partitionMap = new HashMap<>();
  for (TopicPartition topicPartition : tpList) {
    partitionMap.put(topicPartition, startTimestampInMillis);
  }

  Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes = kafkaConsumer
      .offsetsForTimes(partitionMap);
  for (Map.Entry<TopicPartition, OffsetAndTimestamp> entry : offsetsForTimes.entrySet()) {
    partitionMap.put(entry.getKey(), entry.getValue().offset());
  }
  return partitionMap;
}
 
Example #15
Source File: Kafka0_10ConsumerLoader.java    From datacollector with Apache License 2.0 5 votes vote down vote up
private void setOffsetsByTimestamp(String topic, KafkaConsumer kafkaAuxiliaryConsumer) {
  // Build map of topics partitions and timestamp to use when searching offset for that partition (same timestamp
  // for all the partitions)
  List<PartitionInfo> partitionInfoList = kafkaAuxiliaryConsumer.partitionsFor(topic);

  if (partitionInfoList != null) {
    Map<TopicPartition, Long> partitionsAndTimestampMap = partitionInfoList.stream().map(e -> new TopicPartition(
        topic,
        e.partition()
    )).collect(Collectors.toMap(e -> e, (e) -> timestampToSearchOffsets));

    // Get Offsets by timestamp using previously built map and commit them to corresponding partition
    if (!partitionsAndTimestampMap.isEmpty()) {
      Map<TopicPartition, OffsetAndTimestamp> partitionsOffsets = kafkaAuxiliaryConsumer.offsetsForTimes(
          partitionsAndTimestampMap);
      if (partitionsOffsets != null && !partitionsOffsets.isEmpty()) {
        Map<TopicPartition, OffsetAndMetadata> offsetsToCommit = partitionsOffsets.entrySet().stream().filter(
            entry -> entry.getKey() != null && entry.getValue() != null).collect(
            Collectors.toMap(entry -> entry.getKey(), entry -> new OffsetAndMetadata(entry.getValue().offset())));

        if (!offsetsToCommit.isEmpty()) {
          kafkaAuxiliaryConsumer.commitSync(offsetsToCommit);
        }
      }
    }
  }


}
 
Example #16
Source File: KafkaConsumerFromTime.java    From post-kafka-rewind-consumer-offset with MIT License 5 votes vote down vote up
public static void main(String[] args) {
    KafkaConsumer<String, String> consumer = createConsumer();
    consumer.subscribe(Arrays.asList(TOPIC));

    boolean flag = true;

    while (true) {
        ConsumerRecords<String, String> records = consumer.poll(100);

        if (flag) {
            Set<TopicPartition> assignments = consumer.assignment();
            Map<TopicPartition, Long> query = new HashMap<>();
            for (TopicPartition topicPartition : assignments) {
                query.put(
                        topicPartition,
                        Instant.now().minus(10, MINUTES).toEpochMilli());
            }

            Map<TopicPartition, OffsetAndTimestamp> result = consumer.offsetsForTimes(query);

            result.entrySet()
                    .stream()
                    .forEach(entry ->
                            consumer.seek(
                                    entry.getKey(),
                                    Optional.ofNullable(entry.getValue())
                                            .map(OffsetAndTimestamp::offset)
                                            .orElse(new Long(0))));

            flag = false;
        }


        for (ConsumerRecord<String, String> record : records)
            System.out.printf("offset = %d, key = %s, value = %s%n", record.offset(), record.key(), record.value());
    }


}
 
Example #17
Source File: BaseKafkaConsumer11.java    From datacollector with Apache License 2.0 5 votes vote down vote up
private void setOffsetsByTimestamp() {
  try (KafkaConsumer kafkaAuxiliaryConsumer = new KafkaConsumer(auxiliaryKafkaConsumerProperties)) {
    // Build map of topics partitions and timestamp to use when searching offset for that partition (same timestamp
    // for all the partitions)
    List<PartitionInfo> partitionInfoList = kafkaAuxiliaryConsumer.partitionsFor(topic);

    if (partitionInfoList != null) {
      Map<TopicPartition, Long> partitionsAndTimestampMap = partitionInfoList.stream().map(e -> new TopicPartition(
          topic,
          e.partition()
      )).collect(Collectors.toMap(e -> e, (e) -> timestampToSearchOffsets));

      // Get Offsets by timestamp using previously built map and commit them to corresponding partition
      if (!partitionsAndTimestampMap.isEmpty()) {
        Map<TopicPartition, OffsetAndTimestamp> partitionsOffsets = kafkaAuxiliaryConsumer.offsetsForTimes(
            partitionsAndTimestampMap);
        if (partitionsOffsets != null && !partitionsOffsets.isEmpty()) {
          Map<TopicPartition, OffsetAndMetadata> offsetsToCommit = partitionsOffsets.entrySet().stream().filter(
              entry -> entry.getKey() != null && entry.getValue() != null).collect(
              Collectors.toMap(entry -> entry.getKey(), entry -> new OffsetAndMetadata(entry.getValue().offset())));

          if (!offsetsToCommit.isEmpty()) {
            kafkaAuxiliaryConsumer.commitSync(offsetsToCommit);
          }
        }
      }
    }
  }
}
 
Example #18
Source File: FlinkKafkaConsumer010.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
protected Map<KafkaTopicPartition, Long> fetchOffsetsWithTimestamp(
		Collection<KafkaTopicPartition> partitions,
		long timestamp) {

	Map<TopicPartition, Long> partitionOffsetsRequest = new HashMap<>(partitions.size());
	for (KafkaTopicPartition partition : partitions) {
		partitionOffsetsRequest.put(
			new TopicPartition(partition.getTopic(), partition.getPartition()),
			timestamp);
	}

	final Map<KafkaTopicPartition, Long> result = new HashMap<>(partitions.size());

	// use a short-lived consumer to fetch the offsets;
	// this is ok because this is a one-time operation that happens only on startup
	try (KafkaConsumer<?, ?> consumer = new KafkaConsumer(properties)) {
		for (Map.Entry<TopicPartition, OffsetAndTimestamp> partitionToOffset :
			consumer.offsetsForTimes(partitionOffsetsRequest).entrySet()) {

			result.put(
				new KafkaTopicPartition(partitionToOffset.getKey().topic(), partitionToOffset.getKey().partition()),
				(partitionToOffset.getValue() == null) ? null : partitionToOffset.getValue().offset());
		}
	}

	return result;
}
 
Example #19
Source File: KafkaEventSource.java    From mewbase with MIT License 5 votes vote down vote up
@Override
public CompletableFuture<Subscription> subscribeFromInstant(String channelName, Instant startInstant, EventHandler eventHandler) {
    TopicPartition partition0 = new TopicPartition(channelName, partitionZeroOnly);
    KafkaConsumer<String, byte[]> kafkaConsumer = createAndAssignConsumer(partition0);
    java.util.Map<TopicPartition,java.lang.Long> timeForPartition0 = new HashMap<>(1);
    timeForPartition0.put(partition0,startInstant.toEpochMilli());
    OffsetAndTimestamp offsetAndTimestamp = kafkaConsumer.offsetsForTimes(timeForPartition0).get(partition0);
    kafkaConsumer.seek(partition0 , offsetAndTimestamp.offset());
    return CompletableFuture.completedFuture(createAndRegisterSubscription(kafkaConsumer,eventHandler));
}
 
Example #20
Source File: LiKafkaInstrumentedConsumerImpl.java    From li-apache-kafka-clients with BSD 2-Clause "Simplified" License 5 votes vote down vote up
@Override
public Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes(Map<TopicPartition, Long> timestampsToSearch) {
  try (
      @SuppressWarnings("unused") CloseableLock uLock = new CloseableLock(userLock);
      @SuppressWarnings("unused") CloseableLock srLock = new CloseableLock(delegateLock.readLock())
  ) {
    verifyOpen();
    return delegate.offsetsForTimes(timestampsToSearch);
  }
}
 
Example #21
Source File: LiKafkaInstrumentedConsumerImpl.java    From li-apache-kafka-clients with BSD 2-Clause "Simplified" License 5 votes vote down vote up
@Override
public Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes(Map<TopicPartition, Long> timestampsToSearch,
    Duration timeout) {
  try (
      @SuppressWarnings("unused") CloseableLock uLock = new CloseableLock(userLock);
      @SuppressWarnings("unused") CloseableLock srLock = new CloseableLock(delegateLock.readLock())
  ) {
    verifyOpen();
    return delegate.offsetsForTimes(timestampsToSearch, timeout);
  }
}
 
Example #22
Source File: KafkaSampleStore.java    From cruise-control with BSD 2-Clause "Simplified" License 5 votes vote down vote up
/**
 * Config the sample loading consumers to consume from proper starting offsets. The sample store Kafka topic may contain data
 * which are too old for {@link com.linkedin.cruisecontrol.monitor.sampling.aggregator.MetricSampleAggregator} to keep in memory,
 * to prevent loading these stale data, manually seek the consumers' staring offset to the offset at proper timestamp.
 */
protected void prepareConsumerOffset() {
  Map<TopicPartition, Long> beginningTimestamp = new HashMap<>(_consumer.assignment().size());
  long currentTimeMs = System.currentTimeMillis();
  for (TopicPartition tp : _consumer.assignment()) {
    if (tp.topic().equals(_brokerMetricSampleStoreTopic)) {
      beginningTimestamp.put(tp, currentTimeMs - _sampleLoader.brokerMonitoringPeriodMs());
    } else {
      beginningTimestamp.put(tp, currentTimeMs - _sampleLoader.partitionMonitoringPeriodMs());
    }
  }

  Set<TopicPartition> partitionWithNoRecentMessage = new HashSet<>();
  Map<TopicPartition, OffsetAndTimestamp> beginningOffsetAndTimestamp = _consumer.offsetsForTimes(beginningTimestamp);
  for (Map.Entry<TopicPartition, OffsetAndTimestamp> entry: beginningOffsetAndTimestamp.entrySet()) {
    if (entry.getValue() == null) {
      // If this sample store topic partition does not have data available after beginning timestamp, then seek to the
      // beginning of this topic partition.
      partitionWithNoRecentMessage.add(entry.getKey());
    } else {
      _consumer.seek(entry.getKey(), entry.getValue().offset());
    }
  }
  if (partitionWithNoRecentMessage.size() > 0) {
    _consumer.seekToBeginning(partitionWithNoRecentMessage);
  }
}
 
Example #23
Source File: FlinkKafkaConsumer.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
protected Map<KafkaTopicPartition, Long> fetchOffsetsWithTimestamp(
	Collection<KafkaTopicPartition> partitions,
	long timestamp) {

	Map<TopicPartition, Long> partitionOffsetsRequest = new HashMap<>(partitions.size());
	for (KafkaTopicPartition partition : partitions) {
		partitionOffsetsRequest.put(
			new TopicPartition(partition.getTopic(), partition.getPartition()),
			timestamp);
	}

	final Map<KafkaTopicPartition, Long> result = new HashMap<>(partitions.size());
	// use a short-lived consumer to fetch the offsets;
	// this is ok because this is a one-time operation that happens only on startup
	try (KafkaConsumer<?, ?> consumer = new KafkaConsumer(properties)) {
		for (Map.Entry<TopicPartition, OffsetAndTimestamp> partitionToOffset :
			consumer.offsetsForTimes(partitionOffsetsRequest).entrySet()) {

			result.put(
				new KafkaTopicPartition(partitionToOffset.getKey().topic(), partitionToOffset.getKey().partition()),
				(partitionToOffset.getValue() == null) ? null : partitionToOffset.getValue().offset());
		}

	}
	return result;
}
 
Example #24
Source File: FlinkKafkaConsumer010.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
protected Map<KafkaTopicPartition, Long> fetchOffsetsWithTimestamp(
		Collection<KafkaTopicPartition> partitions,
		long timestamp) {

	Map<TopicPartition, Long> partitionOffsetsRequest = new HashMap<>(partitions.size());
	for (KafkaTopicPartition partition : partitions) {
		partitionOffsetsRequest.put(
			new TopicPartition(partition.getTopic(), partition.getPartition()),
			timestamp);
	}

	final Map<KafkaTopicPartition, Long> result = new HashMap<>(partitions.size());

	// use a short-lived consumer to fetch the offsets;
	// this is ok because this is a one-time operation that happens only on startup
	try (KafkaConsumer<?, ?> consumer = new KafkaConsumer(properties)) {
		for (Map.Entry<TopicPartition, OffsetAndTimestamp> partitionToOffset :
			consumer.offsetsForTimes(partitionOffsetsRequest).entrySet()) {

			result.put(
				new KafkaTopicPartition(partitionToOffset.getKey().topic(), partitionToOffset.getKey().partition()),
				(partitionToOffset.getValue() == null) ? null : partitionToOffset.getValue().offset());
		}
	}

	return result;
}
 
Example #25
Source File: ConsumerOffsetClientTest.java    From common-kafka with Apache License 2.0 5 votes vote down vote up
@Test
public void getOffsetsForTimes() {
    Map<TopicPartition, OffsetAndTimestamp> offsets = new HashMap<>();
    offsets.put(new TopicPartition("topic1", 0), new OffsetAndTimestamp(123L, 10));
    offsets.put(new TopicPartition("topic1", 1), new OffsetAndTimestamp(234L, 10));
    offsets.put(new TopicPartition("topic2", 0), new OffsetAndTimestamp(0L, 10));
    offsets.put(new TopicPartition("topic2", 1), new OffsetAndTimestamp(0L, 10));

    Map<TopicPartition, Long> longOffsets = offsets.entrySet()
            .stream().collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().offset()));

    when(consumer.partitionsFor("topic1")).thenReturn(Arrays.asList(
            new PartitionInfo("topic1", 0, null, null, null),
            new PartitionInfo("topic1", 1, null, null, null)));
    when(consumer.partitionsFor("topic2")).thenReturn(Arrays.asList(
            new PartitionInfo("topic2", 0, null, null, null),
            new PartitionInfo("topic2", 1, null, null, null)));

    when(consumer.offsetsForTimes(anyObject())).thenReturn(offsets);

    long time = 10L;
    assertThat(client.getOffsetsForTimes(Arrays.asList("topic1", "topic2"), time), is(longOffsets));

    //all were found so no need to look up any ending offsets
    verify(consumer, never()).endOffsets(anyObject());

    verify(consumer).offsetsForTimes(offsetsRequests.capture());

    Map<TopicPartition, Long> requestValue = offsetsRequests.getValue();
    Set<TopicPartition> topicPartitions = requestValue.keySet();
    IntStream.range(0, 2).forEach( i -> {
            assertThat(topicPartitions, hasItem(new TopicPartition("topic1", i)));
            assertThat(topicPartitions, hasItem(new TopicPartition("topic2", i)));
        }
    );
    requestValue.values().forEach(i -> assertThat(i, is(time)));
}
 
Example #26
Source File: FlinkKafkaConsumer.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
protected Map<KafkaTopicPartition, Long> fetchOffsetsWithTimestamp(
	Collection<KafkaTopicPartition> partitions,
	long timestamp) {

	Map<TopicPartition, Long> partitionOffsetsRequest = new HashMap<>(partitions.size());
	for (KafkaTopicPartition partition : partitions) {
		partitionOffsetsRequest.put(
			new TopicPartition(partition.getTopic(), partition.getPartition()),
			timestamp);
	}

	final Map<KafkaTopicPartition, Long> result = new HashMap<>(partitions.size());
	// use a short-lived consumer to fetch the offsets;
	// this is ok because this is a one-time operation that happens only on startup
	try (KafkaConsumer<?, ?> consumer = new KafkaConsumer(properties)) {
		for (Map.Entry<TopicPartition, OffsetAndTimestamp> partitionToOffset :
			consumer.offsetsForTimes(partitionOffsetsRequest).entrySet()) {

			result.put(
				new KafkaTopicPartition(partitionToOffset.getKey().topic(), partitionToOffset.getKey().partition()),
				(partitionToOffset.getValue() == null) ? null : partitionToOffset.getValue().offset());
		}

	}
	return result;
}
 
Example #27
Source File: KafkaConfigUtil.java    From flink-learning with Apache License 2.0 5 votes vote down vote up
private static Map<KafkaTopicPartition, Long> buildOffsetByTime(Properties props, ParameterTool parameterTool, Long time) {
    props.setProperty("group.id", "query_time_" + time);
    KafkaConsumer consumer = new KafkaConsumer(props);
    List<PartitionInfo> partitionsFor = consumer.partitionsFor(parameterTool.getRequired(PropertiesConstants.METRICS_TOPIC));
    Map<TopicPartition, Long> partitionInfoLongMap = new HashMap<>();
    for (PartitionInfo partitionInfo : partitionsFor) {
        partitionInfoLongMap.put(new TopicPartition(partitionInfo.topic(), partitionInfo.partition()), time);
    }
    Map<TopicPartition, OffsetAndTimestamp> offsetResult = consumer.offsetsForTimes(partitionInfoLongMap);
    Map<KafkaTopicPartition, Long> partitionOffset = new HashMap<>();
    offsetResult.forEach((key, value) -> partitionOffset.put(new KafkaTopicPartition(key.topic(), key.partition()), value.offset()));

    consumer.close();
    return partitionOffset;
}
 
Example #28
Source File: FlinkKafkaConsumer010.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
protected Map<KafkaTopicPartition, Long> fetchOffsetsWithTimestamp(
		Collection<KafkaTopicPartition> partitions,
		long timestamp) {

	Map<TopicPartition, Long> partitionOffsetsRequest = new HashMap<>(partitions.size());
	for (KafkaTopicPartition partition : partitions) {
		partitionOffsetsRequest.put(
			new TopicPartition(partition.getTopic(), partition.getPartition()),
			timestamp);
	}

	final Map<KafkaTopicPartition, Long> result = new HashMap<>(partitions.size());

	// use a short-lived consumer to fetch the offsets;
	// this is ok because this is a one-time operation that happens only on startup
	try (KafkaConsumer<?, ?> consumer = new KafkaConsumer(properties)) {
		for (Map.Entry<TopicPartition, OffsetAndTimestamp> partitionToOffset :
			consumer.offsetsForTimes(partitionOffsetsRequest).entrySet()) {

			result.put(
				new KafkaTopicPartition(partitionToOffset.getKey().topic(), partitionToOffset.getKey().partition()),
				(partitionToOffset.getValue() == null) ? null : partitionToOffset.getValue().offset());
		}
	}

	return result;
}
 
Example #29
Source File: FlinkKafkaConsumer.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
protected Map<KafkaTopicPartition, Long> fetchOffsetsWithTimestamp(
	Collection<KafkaTopicPartition> partitions,
	long timestamp) {

	Map<TopicPartition, Long> partitionOffsetsRequest = new HashMap<>(partitions.size());
	for (KafkaTopicPartition partition : partitions) {
		partitionOffsetsRequest.put(
			new TopicPartition(partition.getTopic(), partition.getPartition()),
			timestamp);
	}

	final Map<KafkaTopicPartition, Long> result = new HashMap<>(partitions.size());
	// use a short-lived consumer to fetch the offsets;
	// this is ok because this is a one-time operation that happens only on startup
	try (KafkaConsumer<?, ?> consumer = new KafkaConsumer(properties)) {
		for (Map.Entry<TopicPartition, OffsetAndTimestamp> partitionToOffset :
			consumer.offsetsForTimes(partitionOffsetsRequest).entrySet()) {

			result.put(
				new KafkaTopicPartition(partitionToOffset.getKey().topic(), partitionToOffset.getKey().partition()),
				(partitionToOffset.getValue() == null) ? null : partitionToOffset.getValue().offset());
		}

	}
	return result;
}
 
Example #30
Source File: KafkaConfigUtil.java    From flink-learning with Apache License 2.0 5 votes vote down vote up
private static Map<KafkaTopicPartition, Long> buildOffsetByTime(Properties props, ParameterTool parameterTool, Long time) {
    props.setProperty("group.id", "query_time_" + time);
    KafkaConsumer consumer = new KafkaConsumer(props);
    List<PartitionInfo> partitionsFor = consumer.partitionsFor(parameterTool.getRequired(PropertiesConstants.METRICS_TOPIC));
    Map<TopicPartition, Long> partitionInfoLongMap = new HashMap<>();
    for (PartitionInfo partitionInfo : partitionsFor) {
        partitionInfoLongMap.put(new TopicPartition(partitionInfo.topic(), partitionInfo.partition()), time);
    }
    Map<TopicPartition, OffsetAndTimestamp> offsetResult = consumer.offsetsForTimes(partitionInfoLongMap);
    Map<KafkaTopicPartition, Long> partitionOffset = new HashMap<>();
    offsetResult.forEach((key, value) -> partitionOffset.put(new KafkaTopicPartition(key.topic(), key.partition()), value.offset()));

    consumer.close();
    return partitionOffset;
}