Java Code Examples for org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartitionState#setCommittedOffset()

The following examples show how to use org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartitionState#setCommittedOffset() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: Kafka010Fetcher.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
protected void doCommitInternalOffsetsToKafka(
		Map<KafkaTopicPartition, Long> offsets,
		@Nonnull KafkaCommitCallback commitCallback) throws Exception {

	List<KafkaTopicPartitionState<T, TopicPartition>> partitions = subscribedPartitionStates();

	Map<TopicPartition, OffsetAndMetadata> offsetsToCommit = new HashMap<>(partitions.size());

	for (KafkaTopicPartitionState<T, TopicPartition> partition : partitions) {
		Long lastProcessedOffset = offsets.get(partition.getKafkaTopicPartition());
		if (lastProcessedOffset != null) {
			checkState(lastProcessedOffset >= 0, "Illegal offset value to commit");

			// committed offsets through the KafkaConsumer need to be 1 more than the last processed offset.
			// This does not affect Flink's checkpoints/saved state.
			long offsetToCommit = lastProcessedOffset + 1;

			offsetsToCommit.put(partition.getKafkaPartitionHandle(), new OffsetAndMetadata(offsetToCommit));
			partition.setCommittedOffset(offsetToCommit);
		}
	}

	// record the work to be committed by the main consumer thread and make sure the consumer notices that
	consumerThread.setOffsetsToCommit(offsetsToCommit, commitCallback);
}
 
Example 2
Source File: Kafka09Fetcher.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
protected void doCommitInternalOffsetsToKafka(
		Map<KafkaTopicPartition, Long> offsets,
		@Nonnull KafkaCommitCallback commitCallback) throws Exception {

	@SuppressWarnings("unchecked")
	List<KafkaTopicPartitionState<TopicPartition>> partitions = subscribedPartitionStates();

	Map<TopicPartition, OffsetAndMetadata> offsetsToCommit = new HashMap<>(partitions.size());

	for (KafkaTopicPartitionState<TopicPartition> partition : partitions) {
		Long lastProcessedOffset = offsets.get(partition.getKafkaTopicPartition());
		if (lastProcessedOffset != null) {
			checkState(lastProcessedOffset >= 0, "Illegal offset value to commit");

			// committed offsets through the KafkaConsumer need to be 1 more than the last processed offset.
			// This does not affect Flink's checkpoints/saved state.
			long offsetToCommit = lastProcessedOffset + 1;

			offsetsToCommit.put(partition.getKafkaPartitionHandle(), new OffsetAndMetadata(offsetToCommit));
			partition.setCommittedOffset(offsetToCommit);
		}
	}

	// record the work to be committed by the main consumer thread and make sure the consumer notices that
	consumerThread.setOffsetsToCommit(offsetsToCommit, commitCallback);
}
 
Example 3
Source File: KafkaFetcher.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
protected void doCommitInternalOffsetsToKafka(
	Map<KafkaTopicPartition, Long> offsets,
	@Nonnull KafkaCommitCallback commitCallback) throws Exception {

	@SuppressWarnings("unchecked")
	List<KafkaTopicPartitionState<TopicPartition>> partitions = subscribedPartitionStates();

	Map<TopicPartition, OffsetAndMetadata> offsetsToCommit = new HashMap<>(partitions.size());

	for (KafkaTopicPartitionState<TopicPartition> partition : partitions) {
		Long lastProcessedOffset = offsets.get(partition.getKafkaTopicPartition());
		if (lastProcessedOffset != null) {
			checkState(lastProcessedOffset >= 0, "Illegal offset value to commit");

			// committed offsets through the KafkaConsumer need to be 1 more than the last processed offset.
			// This does not affect Flink's checkpoints/saved state.
			long offsetToCommit = lastProcessedOffset + 1;

			offsetsToCommit.put(partition.getKafkaPartitionHandle(), new OffsetAndMetadata(offsetToCommit));
			partition.setCommittedOffset(offsetToCommit);
		}
	}

	// record the work to be committed by the main consumer thread and make sure the consumer notices that
	consumerThread.setOffsetsToCommit(offsetsToCommit, commitCallback);
}
 
Example 4
Source File: Kafka09Fetcher.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
protected void doCommitInternalOffsetsToKafka(
		Map<KafkaTopicPartition, Long> offsets,
		@Nonnull KafkaCommitCallback commitCallback) throws Exception {

	@SuppressWarnings("unchecked")
	List<KafkaTopicPartitionState<TopicPartition>> partitions = subscribedPartitionStates();

	Map<TopicPartition, OffsetAndMetadata> offsetsToCommit = new HashMap<>(partitions.size());

	for (KafkaTopicPartitionState<TopicPartition> partition : partitions) {
		Long lastProcessedOffset = offsets.get(partition.getKafkaTopicPartition());
		if (lastProcessedOffset != null) {
			checkState(lastProcessedOffset >= 0, "Illegal offset value to commit");

			// committed offsets through the KafkaConsumer need to be 1 more than the last processed offset.
			// This does not affect Flink's checkpoints/saved state.
			long offsetToCommit = lastProcessedOffset + 1;

			offsetsToCommit.put(partition.getKafkaPartitionHandle(), new OffsetAndMetadata(offsetToCommit));
			partition.setCommittedOffset(offsetToCommit);
		}
	}

	// record the work to be committed by the main consumer thread and make sure the consumer notices that
	consumerThread.setOffsetsToCommit(offsetsToCommit, commitCallback);
}
 
Example 5
Source File: KafkaFetcher.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
protected void doCommitInternalOffsetsToKafka(
	Map<KafkaTopicPartition, Long> offsets,
	@Nonnull KafkaCommitCallback commitCallback) throws Exception {

	@SuppressWarnings("unchecked")
	List<KafkaTopicPartitionState<TopicPartition>> partitions = subscribedPartitionStates();

	Map<TopicPartition, OffsetAndMetadata> offsetsToCommit = new HashMap<>(partitions.size());

	for (KafkaTopicPartitionState<TopicPartition> partition : partitions) {
		Long lastProcessedOffset = offsets.get(partition.getKafkaTopicPartition());
		if (lastProcessedOffset != null) {
			checkState(lastProcessedOffset >= 0, "Illegal offset value to commit");

			// committed offsets through the KafkaConsumer need to be 1 more than the last processed offset.
			// This does not affect Flink's checkpoints/saved state.
			long offsetToCommit = lastProcessedOffset + 1;

			offsetsToCommit.put(partition.getKafkaPartitionHandle(), new OffsetAndMetadata(offsetToCommit));
			partition.setCommittedOffset(offsetToCommit);
		}
	}

	// record the work to be committed by the main consumer thread and make sure the consumer notices that
	consumerThread.setOffsetsToCommit(offsetsToCommit, commitCallback);
}
 
Example 6
Source File: KafkaFetcher.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
protected void doCommitInternalOffsetsToKafka(
	Map<KafkaTopicPartition, Long> offsets,
	@Nonnull KafkaCommitCallback commitCallback) throws Exception {

	@SuppressWarnings("unchecked")
	List<KafkaTopicPartitionState<T, TopicPartition>> partitions = subscribedPartitionStates();

	Map<TopicPartition, OffsetAndMetadata> offsetsToCommit = new HashMap<>(partitions.size());

	for (KafkaTopicPartitionState<T, TopicPartition> partition : partitions) {
		Long lastProcessedOffset = offsets.get(partition.getKafkaTopicPartition());
		if (lastProcessedOffset != null) {
			checkState(lastProcessedOffset >= 0, "Illegal offset value to commit");

			// committed offsets through the KafkaConsumer need to be 1 more than the last processed offset.
			// This does not affect Flink's checkpoints/saved state.
			long offsetToCommit = lastProcessedOffset + 1;

			offsetsToCommit.put(partition.getKafkaPartitionHandle(), new OffsetAndMetadata(offsetToCommit));
			partition.setCommittedOffset(offsetToCommit);
		}
	}

	// record the work to be committed by the main consumer thread and make sure the consumer notices that
	consumerThread.setOffsetsToCommit(offsetsToCommit, commitCallback);
}