Java Code Examples for org.apache.kafka.clients.consumer.KafkaConsumer#seek()
The following examples show how to use
org.apache.kafka.clients.consumer.KafkaConsumer#seek() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SeekDemo.java From kafka_book_demo with Apache License 2.0 | 6 votes |
public static void main(String[] args) { Properties props = initConfig(); KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props); consumer.subscribe(Arrays.asList(topic)); consumer.poll(Duration.ofMillis(2000)); Set<TopicPartition> assignment = consumer.assignment(); System.out.println(assignment); for (TopicPartition tp : assignment) { consumer.seek(tp, 10); } // consumer.seek(new TopicPartition(topic,0),10); while (true) { ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000)); //consume the record. for (ConsumerRecord<String, String> record : records) { System.out.println(record.offset() + ":" + record.value()); } } }
Example 2
Source File: KafkaUtils.java From kafka-spark-consumer with Apache License 2.0 | 6 votes |
public static ConsumerRecords<byte[], byte[]> fetchMessages( KafkaConfig config, KafkaConsumer<byte[], byte[]> consumer, Partition partition, long offset) { String topic = (String) config._stateConf.get(Config.KAFKA_TOPIC); int partitionId = partition.partition; TopicPartition topicAndPartition = new TopicPartition (topic, partitionId); consumer.seek(topicAndPartition, offset); ConsumerRecords<byte[], byte[]> records; try { records = consumer.poll(config._fillFreqMs / 2); } catch(InvalidOffsetException ex) { throw new OutOfRangeException(ex.getMessage()); } catch (Exception e) { if (e instanceof KafkaException || e instanceof ConnectException || e instanceof SocketTimeoutException || e instanceof IOException || e instanceof UnresolvedAddressException) { LOG.warn("Network error when fetching messages:", e); throw new FailedFetchException(e); } else { throw new RuntimeException(e); } } return records; }
Example 3
Source File: SeekToEnd.java From BigData-In-Practice with Apache License 2.0 | 6 votes |
public static void main(String[] args) { KafkaConsumer<String, String> consumer = new ConsumerFactory<String, String>().create(); Set<TopicPartition> assignment = new HashSet<>(); while (assignment.size() == 0) { consumer.poll(Duration.ofMillis(100)); assignment = consumer.assignment(); } Map<TopicPartition, Long> offsets = consumer.endOffsets(assignment); for (TopicPartition tp : assignment) { // consumer.seek(tp, offsets.get(tp)); consumer.seek(tp, offsets.get(tp) + 1); } System.out.println(assignment); System.out.println(offsets); while (true) { ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000)); //consume the record. for (ConsumerRecord<String, String> record : records) { System.out.println(record.offset() + ":" + record.value()); } } }
Example 4
Source File: SeekDemo.java From BigData-In-Practice with Apache License 2.0 | 6 votes |
public static void main(String[] args) { KafkaConsumer<String, String> consumer = new ConsumerFactory<String, String>().create(); consumer.poll(Duration.ofMillis(2000)); Set<TopicPartition> assignment = consumer.assignment(); System.out.println(assignment); for (TopicPartition tp : assignment) { consumer.seek(tp, 10); } // consumer.seek(new TopicPartition(ConsumerFactory.topic,0),10); while (true) { ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000)); //consume the record. for (ConsumerRecord<String, String> record : records) { System.out.println(record.offset() + ":" + record.value()); } } }
Example 5
Source File: SeekDemoAssignment.java From BigData-In-Practice with Apache License 2.0 | 6 votes |
public static void main(String[] args) { KafkaConsumer<String, String> consumer = new ConsumerFactory<String, String>().create(); long start = System.currentTimeMillis(); Set<TopicPartition> assignment = new HashSet<>(); while (assignment.size() == 0) { consumer.poll(Duration.ofMillis(100)); assignment = consumer.assignment(); } long end = System.currentTimeMillis(); System.out.println(end - start); System.out.println(assignment); for (TopicPartition tp : assignment) { consumer.seek(tp, 10); } while (true) { ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000)); //consume the record. for (ConsumerRecord<String, String> record : records) { System.out.println(record.offset() + ":" + record.value()); } } }
Example 6
Source File: SeekDemoAssignment.java From kafka_book_demo with Apache License 2.0 | 6 votes |
public static void main(String[] args) { Properties props = initConfig(); KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props); consumer.subscribe(Arrays.asList(topic)); long start = System.currentTimeMillis(); Set<TopicPartition> assignment = new HashSet<>(); while (assignment.size() == 0) { consumer.poll(Duration.ofMillis(100)); assignment = consumer.assignment(); } long end = System.currentTimeMillis(); System.out.println(end - start); System.out.println(assignment); for (TopicPartition tp : assignment) { consumer.seek(tp, 10); } while (true) { ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000)); //consume the record. for (ConsumerRecord<String, String> record : records) { System.out.println(record.offset() + ":" + record.value()); } } }
Example 7
Source File: SeekToEnd.java From kafka_book_demo with Apache License 2.0 | 5 votes |
public static void main(String[] args) { Properties props = initConfig(); KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props); consumer.subscribe(Arrays.asList(topic)); Set<TopicPartition> assignment = new HashSet<>(); while (assignment.size() == 0) { consumer.poll(Duration.ofMillis(100)); assignment = consumer.assignment(); } Map<TopicPartition, Long> offsets = consumer.endOffsets(assignment); for (TopicPartition tp : assignment) { // consumer.seek(tp, offsets.get(tp)); consumer.seek(tp, offsets.get(tp) + 1); } System.out.println(assignment); System.out.println(offsets); while (true) { ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000)); //consume the record. for (ConsumerRecord<String, String> record : records) { System.out.println(record.offset() + ":" + record.value()); } } }
Example 8
Source File: SinkerKafkaSource.java From DBus with Apache License 2.0 | 5 votes |
public SinkerKafkaSource() throws IOException, PropertyException { Properties config = ConfUtils.getProps(CONFIG_PROPERTIES); topic = config.getProperty(Constants.SINKER_HEARTBEAT_TOPIC); if (topic == null) { throw new PropertyException("[sinker] 配置参数文件内容不能为空! " + Constants.SINKER_HEARTBEAT_TOPIC); } topicPartition = new TopicPartition(topic, 0); Properties statProps = ConfUtils.getProps(CONSUMER_PROPERTIES); statProps.setProperty("enable.auto.commit", "true"); statProps.setProperty("client.id", "heartbeat_consumer_sinker_client"); List<TopicPartition> topics = Arrays.asList(topicPartition); //security if (KafkaUtil.checkSecurity()) { statProps.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "SASL_PLAINTEXT"); } LOG.info("[sinker] SinkerKafkaSource message: set max.poll.records=1000"); statProps.setProperty("max.poll.records", "1000"); consumer = new KafkaConsumer(statProps); consumer.assign(topics); long beforeOffset = consumer.position(topicPartition); String offset = config.getProperty("sinker.kafka.offset"); if (StringUtils.isBlank(offset) || offset.equalsIgnoreCase("none")) { // do nothing } else if (offset.equalsIgnoreCase("begin")) { consumer.seekToBeginning(Lists.newArrayList(topicPartition)); } else if (offset.equalsIgnoreCase("end")) { consumer.seekToEnd(Lists.newArrayList(topicPartition)); } else { long nOffset = Long.parseLong(offset); consumer.seek(topicPartition, nOffset); } long afferOffset = consumer.position(topicPartition); LOG.info("[sinker] SinkerKafkaSource init OK. beforeOffset {}, afferOffset={}", beforeOffset, afferOffset); }
Example 9
Source File: DoctorKafkaActionsServlet.java From doctorkafka with Apache License 2.0 | 5 votes |
private List<ConsumerRecord<byte[], byte[]>> retrieveActionReportMessages() { DoctorKafkaConfig doctorKafkaConfig = DoctorKafkaMain.doctorKafka.getDoctorKafkaConfig(); String zkUrl = doctorKafkaConfig.getBrokerstatsZkurl(); String actionReportTopic = doctorKafkaConfig.getActionReportTopic(); Properties properties = OperatorUtil.createKafkaConsumerProperties(zkUrl, OPERATOR_ACTIONS_CONSUMER_GROUP, doctorKafkaConfig.getActionReportProducerSecurityProtocol(), doctorKafkaConfig.getActionReportProducerSslConfigs()); KafkaConsumer<byte[], byte[]> consumer = new KafkaConsumer<>(properties); TopicPartition operatorReportTopicPartition = new TopicPartition(actionReportTopic, 0); List<TopicPartition> tps = new ArrayList<>(); tps.add(operatorReportTopicPartition); consumer.assign(tps); Map<TopicPartition, Long> beginOffsets = consumer.beginningOffsets(tps); Map<TopicPartition, Long> endOffsets = consumer.endOffsets(tps); for (TopicPartition tp : endOffsets.keySet()) { long numMessages = endOffsets.get(tp) - beginOffsets.get(tp); LOG.info("{} : offsets [{}, {}], num messages : {}", tp, beginOffsets.get(tp), endOffsets.get(tp), numMessages); consumer.seek(tp, Math.max(beginOffsets.get(tp), endOffsets.get(tp) - NUM_MESSAGES)); } ConsumerRecords<byte[], byte[]> records = consumer.poll(CONSUMER_POLL_TIMEOUT_MS); List<ConsumerRecord<byte[], byte[]>> recordList = new ArrayList<>(); while (!records.isEmpty()) { for (ConsumerRecord<byte[], byte[]> record : records) { recordList.add(record); } records = consumer.poll(CONSUMER_POLL_TIMEOUT_MS); } LOG.info("Read {} messages", recordList.size()); return recordList; }
Example 10
Source File: NewApiTopicConsumer.java From jeesuite-libs with Apache License 2.0 | 5 votes |
/** * 按上次记录重置offsets */ private void resetCorrectOffsets(ConsumerWorker worker) { KafkaConsumer<String, Serializable> consumer = worker.consumer; Map<String, List<PartitionInfo>> topicInfos = consumer.listTopics(); Set<String> topics = topicInfos.keySet(); List<String> expectTopics = new ArrayList<>(topicHandlers.keySet()); List<PartitionInfo> patitions = null; consumer.poll(200); for (String topic : topics) { if(!expectTopics.contains(topic))continue; patitions = topicInfos.get(topic); for (PartitionInfo partition : patitions) { try { //期望的偏移 long expectOffsets = consumerContext.getLatestProcessedOffsets(topic, partition.partition()); // TopicPartition topicPartition = new TopicPartition(partition.topic(), partition.partition()); OffsetAndMetadata metadata = consumer.committed(topicPartition); Set<TopicPartition> assignment = consumer.assignment(); if(assignment.contains(topicPartition)){ if(expectOffsets > 0 && expectOffsets < metadata.offset()){ consumer.seek(topicPartition, expectOffsets); //consumer.seekToBeginning(assignment); logger.info(">>>>>>> seek Topic[{}] partition[{}] from {} to {}",topic, partition.partition(),metadata.offset(),expectOffsets); } } } catch (Exception e) { logger.warn("try seek topic["+topic+"] partition["+partition.partition()+"] offsets error"); } } } consumer.resume(consumer.assignment()); }
Example 11
Source File: KafkaEventSource.java From mewbase with MIT License | 5 votes |
@Override public CompletableFuture<Subscription> subscribeFromMostRecent(String channelName, EventHandler eventHandler) { TopicPartition partition0 = new TopicPartition(channelName, partitionZeroOnly); KafkaConsumer<String, byte[]> kafkaConsumer = createAndAssignConsumer(partition0); kafkaConsumer.seekToEnd(Arrays.asList(partition0)); final long offset = kafkaConsumer.position(partition0); kafkaConsumer.seek(partition0 , offset-1); return CompletableFuture.completedFuture(createAndRegisterSubscription(kafkaConsumer,eventHandler)); }
Example 12
Source File: KafkaEventSource.java From mewbase with MIT License | 5 votes |
@Override public CompletableFuture<Subscription> subscribeFromEventNumber(String channelName, Long startInclusive, EventHandler eventHandler) { TopicPartition partition0 = new TopicPartition(channelName, partitionZeroOnly); KafkaConsumer<String, byte[]> kafkaConsumer = createAndAssignConsumer(partition0); kafkaConsumer.seek(partition0 , startInclusive); // to include this jump back one return CompletableFuture.completedFuture(createAndRegisterSubscription(kafkaConsumer,eventHandler)); }
Example 13
Source File: KafkaEventSource.java From mewbase with MIT License | 5 votes |
@Override public CompletableFuture<Subscription> subscribeFromInstant(String channelName, Instant startInstant, EventHandler eventHandler) { TopicPartition partition0 = new TopicPartition(channelName, partitionZeroOnly); KafkaConsumer<String, byte[]> kafkaConsumer = createAndAssignConsumer(partition0); java.util.Map<TopicPartition,java.lang.Long> timeForPartition0 = new HashMap<>(1); timeForPartition0.put(partition0,startInstant.toEpochMilli()); OffsetAndTimestamp offsetAndTimestamp = kafkaConsumer.offsetsForTimes(timeForPartition0).get(partition0); kafkaConsumer.seek(partition0 , offsetAndTimestamp.offset()); return CompletableFuture.completedFuture(createAndRegisterSubscription(kafkaConsumer,eventHandler)); }
Example 14
Source File: SimulateResultService.java From SkaETL with Apache License 2.0 | 4 votes |
private void resetOffset(KafkaConsumer kafkaConsumer, TopicPartition topicPartition, long newPosition) { log.error("Reseting partition position on {} partition {} to {}", topicPartition.topic(), topicPartition.partition(), newPosition); kafkaConsumer.seek(topicPartition, newPosition); }
Example 15
Source File: KafkaConsumerCommand.java From azeroth with Apache License 2.0 | 4 votes |
public void resetTopicOffsets(String groupId, String topic, int partition, long newOffsets) { KafkaConsumer<String, Serializable> kafkaConsumer = getConsumer(groupId); kafkaConsumer.seek(new TopicPartition(topic, partition), newOffsets); }
Example 16
Source File: BrokerStatsFilter.java From doctorkafka with Apache License 2.0 | 4 votes |
public static List<BrokerStats> processOnePartition(String zkUrl, TopicPartition topicPartition, long startOffset, long endOffset, Set<String> brokerNames) { KafkaConsumer<byte[], byte[]> kafkaConsumer = null; List<BrokerStats> result = new ArrayList<>(); try { String brokers = KafkaUtils.getBrokers(zkUrl, SecurityProtocol.PLAINTEXT); LOG.info("ZkUrl: {}, Brokers: {}", zkUrl, brokers); Properties props = new Properties(); props.put(KafkaUtils.BOOTSTRAP_SERVERS, brokers); props.put(KafkaUtils.ENABLE_AUTO_COMMIT, "false"); props.put(KafkaUtils.GROUP_ID, "kafka_operator" + topicPartition); props.put(KafkaUtils.KEY_DESERIALIZER, "org.apache.kafka.common.serialization.ByteArrayDeserializer"); props.put(KafkaUtils.VALUE_DESERIALIZER, "org.apache.kafka.common.serialization.ByteArrayDeserializer"); props.put(KafkaUtils.MAX_POLL_RECORDS, 2000); props.put("max.partition.fetch.bytes", 1048576 * 4); kafkaConsumer = new KafkaConsumer<>(props); Set<TopicPartition> topicPartitions = new HashSet<>(); topicPartitions.add(topicPartition); kafkaConsumer.assign(topicPartitions); kafkaConsumer.seek(topicPartition, startOffset); ConsumerRecords<byte[], byte[]> records = null; while (kafkaConsumer.position(topicPartition) < endOffset) { records = kafkaConsumer.poll(100); for (ConsumerRecord<byte[], byte[]> record : records) { BrokerStats brokerStats = OperatorUtil.deserializeBrokerStats(record); if (brokerStats == null || brokerStats.getName() == null) { continue; } if (brokerNames.contains(brokerStats.getName())) { result.add(brokerStats); } } } } catch (Exception e) { LOG.error("Exception in processing brokerstats", e); } finally { if (kafkaConsumer != null) { kafkaConsumer.close(); } } return result; }
Example 17
Source File: PastReplicaStatsProcessor.java From doctorkafka with Apache License 2.0 | 4 votes |
public void run() { KafkaConsumer<byte[], byte[]> kafkaConsumer = null; try { String brokers = KafkaUtils.getBrokers(zkUrl, securityProtocol); LOG.info("ZkUrl: {}, Brokers: {}", zkUrl, brokers); Properties props = new Properties(); props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokers); props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false"); props.put(ConsumerConfig.GROUP_ID_CONFIG, "doctorkafka_" + topicPartition); props.put(KafkaUtils.KEY_DESERIALIZER, "org.apache.kafka.common.serialization.ByteArrayDeserializer"); props.put(KafkaUtils.VALUE_DESERIALIZER, "org.apache.kafka.common.serialization.ByteArrayDeserializer"); props.put(KafkaUtils.MAX_POLL_RECORDS, 2000); props.put("max.partition.fetch.bytes", 1048576 * 4); kafkaConsumer = new KafkaConsumer<>(props); Set<TopicPartition> topicPartitions = new HashSet<>(); topicPartitions.add(topicPartition); kafkaConsumer.assign(topicPartitions); kafkaConsumer.seek(topicPartition, startOffset); ConsumerRecords<byte[], byte[]> records = null; while (kafkaConsumer.position(topicPartition) < endOffset) { records = kafkaConsumer.poll(100); for (ConsumerRecord<byte[], byte[]> record : records) { BrokerStats brokerStats = OperatorUtil.deserializeBrokerStats(record); if (brokerStats == null || brokerStats.getName() == null) { OpenTsdbMetricConverter.incr(DoctorKafkaMetrics.MESSAGE_DESERIALIZE_ERROR, 1); continue; } replicaStatsManager.update(brokerStats); } } } catch (Exception e) { LOG.error("Exception in processing brokerstats", e); } finally { if (kafkaConsumer != null) { kafkaConsumer.close(); } } }
Example 18
Source File: ExactlyOnceStaticConsumer.java From javabase with Apache License 2.0 | 4 votes |
private static void readMessages() throws InterruptedException, IOException { KafkaConsumer<String, String> consumer = createConsumer(); String topic = "normal-topic"; int partition = 1; TopicPartition topicPartition = registerConsumerToSpecificPartition(consumer, topic, partition); // Read the offset for the topic and partition from external storage. long offset = offsetManager.readOffsetFromExternalStore(topic, partition); // Use seek and go to exact offset for that topic and partition. consumer.seek(topicPartition, offset); processRecords(consumer); }
Example 19
Source File: KafkaConsumerCommand.java From jeesuite-libs with Apache License 2.0 | 4 votes |
public void resetTopicOffsets(String groupId,String topic,int partition,long newOffsets){ KafkaConsumer<String, Serializable> kafkaConsumer = getConsumer(groupId); kafkaConsumer.seek(new TopicPartition(topic, partition), newOffsets); }