kafka.api.OffsetRequest Java Examples
The following examples show how to use
kafka.api.OffsetRequest.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KafkaBoltTest.java From storm-kafka-0.8-plus with Apache License 2.0 | 6 votes |
private boolean verifyMessage(String key, String message) { long lastMessageOffset = KafkaUtils.getOffset(simpleConsumer, kafkaConfig.topic, 0, OffsetRequest.LatestTime()) - 1; ByteBufferMessageSet messageAndOffsets = KafkaUtils.fetchMessages(kafkaConfig, simpleConsumer, new Partition(Broker.fromString(broker.getBrokerConnectionString()), 0), lastMessageOffset); MessageAndOffset messageAndOffset = messageAndOffsets.iterator().next(); Message kafkaMessage = messageAndOffset.message(); ByteBuffer messageKeyBuffer = kafkaMessage.key(); String keyString = null; String messageString = new String(Utils.toByteArray(kafkaMessage.payload())); if (messageKeyBuffer != null) { keyString = new String(Utils.toByteArray(messageKeyBuffer)); } assertEquals(key, keyString); assertEquals(message, messageString); return true; }
Example #2
Source File: KafkaSourceOp.java From PoseidonX with Apache License 2.0 | 6 votes |
private Properties initKafkaProperties(StreamingConfig conf) throws StreamingException { Properties kafkaProperties = new Properties(); kafkaProperties.put(KafkaConfig.KAFKA_CON_ZK_CONNECT, conf.getStringValue(StreamingConfig.OPERATOR_KAFKA_ZOOKEEPERS)); kafkaProperties.put(KafkaConfig.KAFKA_GROUP_ID, conf.getStringValue(StreamingConfig.OPERATOR_KAFKA_GROUPID)); kafkaProperties.put(KafkaConfig.KAFKA_SERIAL_CLASS, conf.getStringValue(StreamingConfig.OPERATOR_KAFKA_MESSAGESERIALIZERCLASS)); kafkaProperties.put(KafkaConfig.KAFKA_SESSION_TIME, conf.getStringValue(StreamingConfig.OPERATOR_KAFKA_ZKSESSIONTIMEOUT)); kafkaProperties.put(KafkaConfig.KAFKA_SYNC_TIME, conf.getStringValue(StreamingConfig.OPERATOR_KAFKA_ZKSYNCTIME)); if (conf.getBooleanValue(StreamingConfig.OPERATOR_KAFKA_READ_FROMBEGINNING)) { kafkaProperties.put(KafkaConfig.KAFKA_OFFSET_RESET, OffsetRequest.SmallestTimeString()); } else { kafkaProperties.put(KafkaConfig.KAFKA_OFFSET_RESET, OffsetRequest.LargestTimeString()); } return kafkaProperties; }
Example #3
Source File: KafkaInputFormat.java From kangaroo with Apache License 2.0 | 5 votes |
@VisibleForTesting List<Long> getOffsets(final SimpleConsumer consumer, final String topic, final int partitionNum, final long lastCommit, final long asOfTime, final int maxSplitsPerPartition) { // all offsets that exist for this partition (in descending order) final long[] allOffsets = consumer.getOffsetsBefore(topic, partitionNum, OffsetRequest.LatestTime(), Integer.MAX_VALUE); // this gets us an offset that is strictly before 'asOfTime', or zero if none exist before that time final long[] offsetsBeforeAsOf = consumer.getOffsetsBefore(topic, partitionNum, asOfTime, 1); final long includeAfter = offsetsBeforeAsOf.length == 1 ? offsetsBeforeAsOf[0] : 0; // note that the offsets are in descending order List<Long> result = Lists.newArrayList(); for (final long offset : allOffsets) { if (offset > lastCommit && offset > includeAfter) { result.add(offset); } else { // we add "lastCommit" iff it is after "includeAfter" if (lastCommit > includeAfter) { result.add(lastCommit); } // we can break out of loop here bc offsets are in desc order, and we've hit the latest one to include break; } } // to get maxSplitsPerPartition number of splits, you need (maxSplitsPerPartition + 1) number of offsets. if (result.size() - 1 > maxSplitsPerPartition) { result = result.subList(result.size() - maxSplitsPerPartition - 1, result.size()); } LOG.debug(String.format("Offsets for %s:%d:%d = %s", consumer.host(), consumer.port(), partitionNum, result)); return result; }
Example #4
Source File: KafkaUtilsTest.java From storm-kafka-0.8-plus with Apache License 2.0 | 5 votes |
@Test public void getOffsetFromConfigAndFroceFromStart() { config.forceFromStart = true; config.startOffsetTime = OffsetRequest.EarliestTime(); createTopicAndSendMessage(); long earliestOffset = KafkaUtils.getOffset(simpleConsumer, config.topic, 0, OffsetRequest.EarliestTime()); long offsetFromConfig = KafkaUtils.getOffset(simpleConsumer, config.topic, 0, config); assertThat(earliestOffset, is(equalTo(offsetFromConfig))); }
Example #5
Source File: KafkaUtilsTest.java From storm-kafka-0.8-plus with Apache License 2.0 | 5 votes |
@Test public void getOffsetFromConfigAndDontForceFromStart() { config.forceFromStart = false; config.startOffsetTime = OffsetRequest.EarliestTime(); createTopicAndSendMessage(); long latestOffset = KafkaUtils.getOffset(simpleConsumer, config.topic, 0, OffsetRequest.LatestTime()); long offsetFromConfig = KafkaUtils.getOffset(simpleConsumer, config.topic, 0, config); assertThat(latestOffset, is(equalTo(offsetFromConfig))); }
Example #6
Source File: KafkaUtilsTest.java From storm-kafka-0.8-plus with Apache License 2.0 | 5 votes |
@Test public void fetchMessage() throws Exception { String value = "test"; createTopicAndSendMessage(value); long offset = KafkaUtils.getOffset(simpleConsumer, config.topic, 0, OffsetRequest.LatestTime()) - 1; ByteBufferMessageSet messageAndOffsets = KafkaUtils.fetchMessages(config, simpleConsumer, new Partition(Broker.fromString(broker.getBrokerConnectionString()), 0), offset); String message = new String(Utils.toByteArray(messageAndOffsets.iterator().next().message().payload())); assertThat(message, is(equalTo(value))); }
Example #7
Source File: KafkaUtilsTest.java From storm-kafka-0.8-plus with Apache License 2.0 | 5 votes |
@Test(expected = FailedFetchException.class) public void brokerIsDown() throws Exception { int port = broker.getPort(); broker.shutdown(); SimpleConsumer simpleConsumer = new SimpleConsumer("localhost", port, 100, 1024, "testClient"); try { KafkaUtils.fetchMessages(config, simpleConsumer, new Partition(Broker.fromString(broker.getBrokerConnectionString()), 0), OffsetRequest.LatestTime()); } finally { simpleConsumer.close(); } }
Example #8
Source File: SimpleConsumerThread.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * For a set of partitions, if a partition is set with the special offsets {@link OffsetRequest#EarliestTime()} * or {@link OffsetRequest#LatestTime()}, replace them with actual offsets requested via a Kafka consumer. * * @param consumer The consumer connected to lead broker * @param partitions The list of partitions we need offsets for */ private static void requestAndSetEarliestOrLatestOffsetsFromKafka( SimpleConsumer consumer, List<KafkaTopicPartitionState<TopicAndPartition>> partitions) throws Exception { Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<>(); for (KafkaTopicPartitionState<TopicAndPartition> part : partitions) { if (part.getOffset() == OffsetRequest.EarliestTime() || part.getOffset() == OffsetRequest.LatestTime()) { requestInfo.put(part.getKafkaPartitionHandle(), new PartitionOffsetRequestInfo(part.getOffset(), 1)); } } requestAndSetOffsetsFromKafka(consumer, partitions, requestInfo); }
Example #9
Source File: KafkaInfos.java From DCMonitor with MIT License | 5 votes |
private long getTopicLogSize(String topic, int pid) { Option<Object> o = ZkUtils.getLeaderForPartition(zkClient, topic, pid); if (o.isEmpty() || o.get() == null) { log.error("No broker for partition %s - %s", topic, pid); return 0; } Integer leaderId = Int.unbox(o.get()); SimpleConsumer consumer = consumerMap.get(leaderId); if (consumer == null) { consumer = createSimpleConsumer(leaderId); } // createSimpleConsumer may fail. if (consumer == null) { return 0; } consumerMap.put(leaderId, consumer); TopicAndPartition topicAndPartition = new TopicAndPartition(topic, pid); PartitionOffsetRequestInfo requestInfo = new PartitionOffsetRequestInfo(OffsetRequest.LatestTime(), 1); OffsetRequest request = new OffsetRequest( new Map1<TopicAndPartition, PartitionOffsetRequestInfo>(topicAndPartition, requestInfo), 0, Request.OrdinaryConsumerId() ); OffsetResponse response = consumer.getOffsetsBefore(request); PartitionOffsetsResponse offsetsResponse = response.partitionErrorAndOffsets().get(topicAndPartition).get(); return scala.Long.unbox(offsetsResponse.offsets().head()); }
Example #10
Source File: StatisticTopology.java From storm-statistic with Apache License 2.0 | 5 votes |
/** * BrokerHosts hosts kafka集群列表 * String topic 要消费的topic主题 * String zkRoot kafka在zk中的目录(会在该节点目录下记录读取kafka消息的偏移量) * String id 当前操作的标识id */ private static KafkaSpout createKafkaSpout() { String brokerZkStr = "uplooking01:2181,uplooking02:2181,uplooking03:2181"; BrokerHosts hosts = new ZkHosts(brokerZkStr); // 通过zookeeper中的/brokers即可找到kafka的地址 String topic = "f-k-s"; String zkRoot = "/" + topic; String id = "consumer-id"; SpoutConfig spoutConf = new SpoutConfig(hosts, topic, zkRoot, id); // 本地环境设置之后,也可以在zk中建立/f-k-s节点,在集群环境中,不用配置也可以在zk中建立/f-k-s节点 //spoutConf.zkServers = Arrays.asList(new String[]{"uplooking01", "uplooking02", "uplooking03"}); //spoutConf.zkPort = 2181; spoutConf.startOffsetTime = OffsetRequest.LatestTime(); // 设置之后,刚启动时就不会把之前的消息也进行读取,会从最新的偏移量开始读取 return new KafkaSpout(spoutConf); }
Example #11
Source File: KafkaStreamReader.java From arcusplatform with Apache License 2.0 | 5 votes |
public KafkaStreamReader( KafkaConsumerConfig config, KafkaConsumer handler, Supplier<SimpleConsumer> consumerFactory ) { this.config = new KafkaConsumerConfig(config); this.handler = handler; this.consumerFactory = consumerFactory; this.executor = Executors.newCachedThreadPool(); this.timestamp = config.getOffset().orElse(Instant.ofEpochMilli(kafka.api.OffsetRequest.LatestTime())); this.search = config.getScanSearch().orElse(null); }
Example #12
Source File: Kafka08Fetcher.java From flink with Apache License 2.0 | 5 votes |
/** * Retrieve the behaviour of "auto.offset.reset" from the config properties. * A partition needs to fallback to "auto.offset.reset" as default offset when * we can't find offsets in ZK to start from in {@link StartupMode#GROUP_OFFSETS} startup mode. * * @param config kafka consumer properties * @return either OffsetRequest.LatestTime() or OffsetRequest.EarliestTime() */ private static long getInvalidOffsetBehavior(Properties config) { final String val = config.getProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "largest"); if (val.equals("largest") || val.equals("latest")) { // largest is kafka 0.8, latest is kafka 0.9 return OffsetRequest.LatestTime(); } else { return OffsetRequest.EarliestTime(); } }
Example #13
Source File: SimpleConsumerThread.java From flink with Apache License 2.0 | 5 votes |
/** * For a set of partitions, if a partition is set with the special offsets {@link OffsetRequest#EarliestTime()} * or {@link OffsetRequest#LatestTime()}, replace them with actual offsets requested via a Kafka consumer. * * @param consumer The consumer connected to lead broker * @param partitions The list of partitions we need offsets for */ private static void requestAndSetEarliestOrLatestOffsetsFromKafka( SimpleConsumer consumer, List<KafkaTopicPartitionState<TopicAndPartition>> partitions) throws Exception { Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<>(); for (KafkaTopicPartitionState<TopicAndPartition> part : partitions) { if (part.getOffset() == OffsetRequest.EarliestTime() || part.getOffset() == OffsetRequest.LatestTime()) { requestInfo.put(part.getKafkaPartitionHandle(), new PartitionOffsetRequestInfo(part.getOffset(), 1)); } } requestAndSetOffsetsFromKafka(consumer, partitions, requestInfo); }
Example #14
Source File: Kafka08Fetcher.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * Retrieve the behaviour of "auto.offset.reset" from the config properties. * A partition needs to fallback to "auto.offset.reset" as default offset when * we can't find offsets in ZK to start from in {@link StartupMode#GROUP_OFFSETS} startup mode. * * @param config kafka consumer properties * @return either OffsetRequest.LatestTime() or OffsetRequest.EarliestTime() */ private static long getInvalidOffsetBehavior(Properties config) { final String val = config.getProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "largest"); if (val.equals("largest") || val.equals("latest")) { // largest is kafka 0.8, latest is kafka 0.9 return OffsetRequest.LatestTime(); } else { return OffsetRequest.EarliestTime(); } }
Example #15
Source File: KafkaStreamReader.java From arcusplatform with Apache License 2.0 | 4 votes |
public KafkaStreamReader scanToLatest() { this.timestamp = Instant.ofEpochMilli(kafka.api.OffsetRequest.LatestTime()); return this; }
Example #16
Source File: KafkaInputFormatTest.java From kangaroo with Apache License 2.0 | 4 votes |
@Test public void testGetOffsets() throws Exception { final SimpleConsumer consumer = mock(SimpleConsumer.class); final long[] offsets = { 101, 91, 81, 71, 61, 51, 41, 31, 21, 11 }; when(consumer.getOffsetsBefore("topic", 1, OffsetRequest.LatestTime(), Integer.MAX_VALUE)).thenReturn(offsets); when(consumer.getOffsetsBefore("topic", 1, 0, 1)).thenReturn(new long[] {}); final KafkaInputFormat inputFormat = new KafkaInputFormat(); // case 0: get everything (-1 last commit, 0 asOfTime, as many partitions as possible) -> all offsets long[] expected = offsets; List<Long> actual = inputFormat.getOffsets(consumer, "topic", 1, -1, 0, Integer.MAX_VALUE); compareArrayContents(offsets, actual); // case 1: lastCommit of 52 -> we should only get back the first 5 offsets + the lastCommit final int lastCommit = 52; expected = new long[6]; System.arraycopy(offsets, 0, expected, 0, 6); expected[5] = lastCommit; actual = inputFormat.getOffsets(consumer, "topic", 1, lastCommit, 0, Integer.MAX_VALUE); compareArrayContents(expected, actual); // case 2: lastCommit of 52, asOfTime 51 -> still include last offsets final int asOfTime = 999; when(consumer.getOffsetsBefore("topic", 1, asOfTime, 1)).thenReturn(new long[] { 51 }); actual = inputFormat.getOffsets(consumer, "topic", 1, lastCommit, asOfTime, Integer.MAX_VALUE); compareArrayContents(expected, actual); // case 3: lastCommit of 52, asOfTime 52 -> don't include last offsets when(consumer.getOffsetsBefore("topic", 1, asOfTime, 1)).thenReturn(new long[] { 52 }); expected = Arrays.copyOfRange(offsets, 0, 5); actual = inputFormat.getOffsets(consumer, "topic", 1, lastCommit, asOfTime, Integer.MAX_VALUE); compareArrayContents(expected, actual); // case 4: maxSplitsPerPartition == number of commits (5) -> should include all 5 offsets actual = inputFormat.getOffsets(consumer, "topic", 1, lastCommit, asOfTime, 5); compareArrayContents(expected, actual); // case 5: maxSplitsPerPartition = number of commits - 1 (4) -> should STILL include all 5 offsets actual = inputFormat.getOffsets(consumer, "topic", 1, lastCommit, asOfTime, 4); compareArrayContents(expected, actual); // case 6: maxSplitsPerPartition = number of commits - 2 (3) -> should exclude the first (largest) offset actual = inputFormat.getOffsets(consumer, "topic", 1, lastCommit, asOfTime, 3); expected = Arrays.copyOfRange(offsets, 1, 5); compareArrayContents(expected, actual); // case 7: maxSplitsPerPartition = 1 -> should include just 2 commits actual = inputFormat.getOffsets(consumer, "topic", 1, lastCommit, asOfTime, 1); expected = Arrays.copyOfRange(offsets, 3, 5); compareArrayContents(expected, actual); }
Example #17
Source File: KafkaClient.java From elasticsearch-river-kafka with Apache License 2.0 | 4 votes |
public long getNewestOffset(String topic, int partition) { return consumer.getOffsetsBefore(topic, partition, OffsetRequest.LatestTime(), 1)[0]; }
Example #18
Source File: KafkaClient.java From elasticsearch-river-kafka with Apache License 2.0 | 4 votes |
public long getOldestOffset(String topic, int partition) { return consumer.getOffsetsBefore(topic, partition, OffsetRequest.EarliestTime(), 1)[0]; }
Example #19
Source File: PartitionManager.java From storm-kafka-0.8-plus with Apache License 2.0 | 4 votes |
public long queryPartitionOffsetLatestTime() { return KafkaUtils.getOffset(_consumer, _spoutConfig.topic, _partition.partition, OffsetRequest.LatestTime()); }
Example #20
Source File: KafkaStreamReader.java From arcusplatform with Apache License 2.0 | 4 votes |
public KafkaStreamReader scanToEarliest() { this.timestamp = Instant.ofEpochMilli(kafka.api.OffsetRequest.EarliestTime()); return this; }
Example #21
Source File: KafkaUtilsTest.java From storm-kafka-0.8-plus with Apache License 2.0 | 4 votes |
private ByteBufferMessageSet getLastMessage() { long offsetOfLastMessage = KafkaUtils.getOffset(simpleConsumer, config.topic, 0, OffsetRequest.LatestTime()) - 1; return KafkaUtils.fetchMessages(config, simpleConsumer, new Partition(Broker.fromString(broker.getBrokerConnectionString()), 0), offsetOfLastMessage); }