kafka.javaapi.FetchResponse Java Examples
The following examples show how to use
kafka.javaapi.FetchResponse.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KafkaLeaderReader.java From arcusplatform with Apache License 2.0 | 7 votes |
private int readNext() { FetchRequestBuilder requestBuilder = new FetchRequestBuilder() .clientId(clientId); for(Map.Entry<TopicAndPartition, Long> offset: offsets.entrySet()) { if(offset.getValue() == null) { logger.warn("Invalid offset for topic: [{}] partition: [{}]", offset.getKey().topic(), offset.getKey().partition()); } else { requestBuilder.addFetch(offset.getKey().topic(), offset.getKey().partition(), offset.getValue(), fetchSize); } } FetchRequest request = requestBuilder.build(); FetchResponse response = getKafkaConsumer().fetch(request); // FIXME handle errors / leader rebalances hear return dispatch(response); }
Example #2
Source File: KafkaPartitionReader.java From Scribengin with GNU Affero General Public License v3.0 | 6 votes |
void nextMessageSet() throws Exception { FetchRequest req = new FetchRequestBuilder(). clientId(name). addFetch(topic, partitionMetadata.partitionId(), currentOffset, fetchSize). minBytes(1). maxWait(1000). build(); FetchResponse fetchResponse = consumer.fetch(req); if(fetchResponse.hasError()) { throw new Exception("TODO: handle the error, reset the consumer...."); } currentMessageSet = fetchResponse.messageSet(topic, partitionMetadata.partitionId()); currentMessageSetIterator = currentMessageSet.iterator(); }
Example #3
Source File: DemoLowLevelConsumer.java From KafkaExample with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { final String topic = "topic1"; String clientID = "DemoLowLevelConsumer1"; SimpleConsumer simpleConsumer = new SimpleConsumer("kafka0", 9092, 100000, 64 * 1000000, clientID); FetchRequest req = new FetchRequestBuilder().clientId(clientID) .addFetch(topic, 0, 0L, 50).addFetch(topic, 1, 0L, 5000).addFetch(topic, 2, 0L, 1000000).build(); FetchResponse fetchResponse = simpleConsumer.fetch(req); ByteBufferMessageSet messageSet = (ByteBufferMessageSet) fetchResponse.messageSet(topic, 0); for (MessageAndOffset messageAndOffset : messageSet) { ByteBuffer payload = messageAndOffset.message().payload(); long offset = messageAndOffset.offset(); byte[] bytes = new byte[payload.limit()]; payload.get(bytes); System.out.println("Offset:" + offset + ", Payload:" + new String(bytes, "UTF-8")); } }
Example #4
Source File: Kafka08ConsumerClient.java From incubator-gobblin with Apache License 2.0 | 6 votes |
private Iterator<KafkaConsumerRecord> getIteratorFromFetchResponse(FetchResponse fetchResponse, KafkaPartition partition) { try { ByteBufferMessageSet messageBuffer = fetchResponse.messageSet(partition.getTopicName(), partition.getId()); return Iterators.transform(messageBuffer.iterator(), new Function<kafka.message.MessageAndOffset, KafkaConsumerRecord>() { @Override public KafkaConsumerRecord apply(kafka.message.MessageAndOffset input) { return new Kafka08ConsumerRecord(input, partition.getTopicName(), partition.getId()); } }); } catch (Exception e) { log.warn(String.format("Failed to retrieve next message buffer for partition %s: %s." + "The remainder of this partition will be skipped.", partition, e)); return null; } }
Example #5
Source File: KafkaComponent.java From metron with Apache License 2.0 | 6 votes |
public List<byte[]> readMessages(String topic) { SimpleConsumer consumer = new SimpleConsumer("localhost", 6667, 100000, 64 * 1024, "consumer"); FetchRequest req = new FetchRequestBuilder() .clientId("consumer") .addFetch(topic, 0, 0, 100000) .build(); FetchResponse fetchResponse = consumer.fetch(req); Iterator<MessageAndOffset> results = fetchResponse.messageSet(topic, 0).iterator(); List<byte[]> messages = new ArrayList<>(); while(results.hasNext()) { ByteBuffer payload = results.next().message().payload(); byte[] bytes = new byte[payload.limit()]; payload.get(bytes); messages.add(bytes); } consumer.close(); return messages; }
Example #6
Source File: Kafka08ConsumerClient.java From incubator-gobblin with Apache License 2.0 | 6 votes |
@Override public Iterator<KafkaConsumerRecord> consume(KafkaPartition partition, long nextOffset, long maxOffset) { if (nextOffset > maxOffset) { return null; } FetchRequest fetchRequest = createFetchRequest(partition, nextOffset); try { FetchResponse fetchResponse = getFetchResponseForFetchRequest(fetchRequest, partition); return getIteratorFromFetchResponse(fetchResponse, partition); } catch (Exception e) { log.warn(String.format( "Fetch message buffer for partition %s has failed: %s. Will refresh topic metadata and retry", partition, e)); return refreshTopicMetadataAndRetryFetch(partition, fetchRequest); } }
Example #7
Source File: KafkaWrapper.java From incubator-gobblin with Apache License 2.0 | 6 votes |
@Override protected Iterator<MessageAndOffset> fetchNextMessageBuffer(KafkaPartition partition, long nextOffset, long maxOffset) { if (nextOffset > maxOffset) { return null; } FetchRequest fetchRequest = createFetchRequest(partition, nextOffset); try { FetchResponse fetchResponse = getFetchResponseForFetchRequest(fetchRequest, partition); return getIteratorFromFetchResponse(fetchResponse, partition); } catch (Exception e) { LOG.warn( String.format("Fetch message buffer for partition %s has failed: %s. Will refresh topic metadata and retry", partition, e)); return refreshTopicMetadataAndRetryFetch(partition, fetchRequest); } }
Example #8
Source File: KafkaWrapper.java From incubator-gobblin with Apache License 2.0 | 5 votes |
private Iterator<MessageAndOffset> refreshTopicMetadataAndRetryFetch(KafkaPartition partition, FetchRequest fetchRequest) { try { refreshTopicMetadata(partition); FetchResponse fetchResponse = getFetchResponseForFetchRequest(fetchRequest, partition); return getIteratorFromFetchResponse(fetchResponse, partition); } catch (Exception e) { LOG.warn(String.format("Fetch message buffer for partition %s has failed: %s. This partition will be skipped.", partition, e)); return null; } }
Example #9
Source File: KafkaUtils.java From storm-kafka-0.8-plus with Apache License 2.0 | 5 votes |
public static ByteBufferMessageSet fetchMessages(KafkaConfig config, SimpleConsumer consumer, Partition partition, long offset) { ByteBufferMessageSet msgs = null; String topic = config.topic; int partitionId = partition.partition; for (int errors = 0; errors < 2 && msgs == null; errors++) { FetchRequestBuilder builder = new FetchRequestBuilder(); FetchRequest fetchRequest = builder.addFetch(topic, partitionId, offset, config.fetchSizeBytes). clientId(config.clientId).build(); FetchResponse fetchResponse; try { fetchResponse = consumer.fetch(fetchRequest); } catch (Exception e) { if (e instanceof ConnectException) { throw new FailedFetchException(e); } else { throw new RuntimeException(e); } } if (fetchResponse.hasError()) { KafkaError error = KafkaError.getError(fetchResponse.errorCode(topic, partitionId)); if (error.equals(KafkaError.OFFSET_OUT_OF_RANGE) && config.useStartOffsetTimeIfOffsetOutOfRange && errors == 0) { long startOffset = getOffset(consumer, topic, partitionId, config.startOffsetTime); LOG.warn("Got fetch request with offset out of range: [" + offset + "]; " + "retrying with default start offset time from configuration. " + "configured start offset time: [" + config.startOffsetTime + "] offset: [" + startOffset + "]"); offset = startOffset; } else { String message = "Error fetching data from [" + partition + "] for topic [" + topic + "]: [" + error + "]"; LOG.error(message); throw new FailedFetchException(message); } } else { msgs = fetchResponse.messageSet(topic, partitionId); } } return msgs; }
Example #10
Source File: KafkaPartitionReader.java From Scribengin with GNU Affero General Public License v3.0 | 5 votes |
public List<byte[]> execute() throws Exception { FetchRequest req = new FetchRequestBuilder(). clientId(name). addFetch(topic, partitionMetadata.partitionId(), currentOffset, fetchSize). minBytes(1). maxWait(maxWait). build(); FetchResponse fetchResponse = consumer.fetch(req); if(fetchResponse.hasError()) { short errorCode = fetchResponse.errorCode(topic, partitionMetadata.partitionId()); String msg = "Kafka error code = " + errorCode + ", Partition " + partitionMetadata.partitionId() ; throw new Exception(msg); } List<byte[]> holder = new ArrayList<byte[]>(); ByteBufferMessageSet messageSet = fetchResponse.messageSet(topic, partitionMetadata.partitionId()); int count = 0; for(MessageAndOffset messageAndOffset : messageSet) { if (messageAndOffset.offset() < currentOffset) continue; //old offset, ignore ByteBuffer payload = messageAndOffset.message().payload(); byte[] bytes = new byte[payload.limit()]; payload.get(bytes); holder.add(bytes); currentOffset = messageAndOffset.nextOffset(); count++; if(count == maxRead) break; } return holder ; }
Example #11
Source File: KafkaPartitionLevelConsumerTest.java From incubator-pinot with Apache License 2.0 | 5 votes |
@Override public FetchResponse fetch(FetchRequest request) { scala.collection.Traversable<Tuple2<TopicAndPartition, PartitionFetchInfo>> requestInfo = request.requestInfo(); java.util.Map<TopicAndPartition, Short> errorMap = new HashMap<>(); while (requestInfo.headOption().isDefined()) { // jfim: IntelliJ erroneously thinks the following line is an incompatible type error, but it's only because // it doesn't understand scala covariance when called from Java (ie. it thinks head() is of type A even though // it's really of type Tuple2[TopicAndPartition, PartitionFetchInfo]) Tuple2<TopicAndPartition, PartitionFetchInfo> t2 = requestInfo.head(); TopicAndPartition topicAndPartition = t2._1(); PartitionFetchInfo partitionFetchInfo = t2._2(); if (!topicAndPartition.topic().equals(topicName)) { errorMap.put(topicAndPartition, Errors.UNKNOWN_TOPIC_OR_PARTITION.code()); } else if (partitionLeaderIndices.length < topicAndPartition.partition()) { errorMap.put(topicAndPartition, Errors.UNKNOWN_TOPIC_OR_PARTITION.code()); } else if (partitionLeaderIndices[topicAndPartition.partition()] != index) { errorMap.put(topicAndPartition, Errors.NOT_LEADER_FOR_PARTITION.code()); } else { // Do nothing, we'll generate a fake message } requestInfo = requestInfo.tail(); } return new MockFetchResponse(errorMap); }
Example #12
Source File: KafkaPartitionLevelConsumer.java From incubator-pinot with Apache License 2.0 | 5 votes |
/** * Fetch messages and the per-partition high watermark from Kafka between the specified offsets. * * @param startOffset The offset of the first message desired, inclusive * @param endOffset The offset of the last message desired, exclusive, or {@link Long#MAX_VALUE} for no end offset. * @param timeoutMillis Timeout in milliseconds * @throws java.util.concurrent.TimeoutException If the operation could not be completed within {@code timeoutMillis} * milliseconds * @return An iterable containing messages fetched from Kafka and their offsets, as well as the high watermark for * this partition. */ public synchronized MessageBatch fetchMessages(long startOffset, long endOffset, int timeoutMillis) throws java.util.concurrent.TimeoutException { // TODO Improve error handling final long connectEndTime = System.currentTimeMillis() + _connectTimeoutMillis; while (_currentState.getStateValue() != KafkaConnectionHandler.ConsumerState.CONNECTED_TO_PARTITION_LEADER && System.currentTimeMillis() < connectEndTime) { _currentState.process(); } if (_currentState.getStateValue() != KafkaConnectionHandler.ConsumerState.CONNECTED_TO_PARTITION_LEADER && connectEndTime <= System.currentTimeMillis()) { throw new java.util.concurrent.TimeoutException(); } FetchResponse fetchResponse = _simpleConsumer.fetch( new FetchRequestBuilder().minBytes(_fetchRequestMinBytes).maxWait(timeoutMillis) .addFetch(_topic, _partition, startOffset, _fetchRequestSizeBytes).build()); if (!fetchResponse.hasError()) { final Iterable<MessageAndOffset> messageAndOffsetIterable = buildOffsetFilteringIterable(fetchResponse.messageSet(_topic, _partition), startOffset, endOffset); // TODO: Instantiate with factory return new SimpleConsumerMessageBatch(messageAndOffsetIterable); } else { throw exceptionForKafkaErrorCode(fetchResponse.errorCode(_topic, _partition)); } }
Example #13
Source File: Kafka08ConsumerClient.java From incubator-gobblin with Apache License 2.0 | 5 votes |
private Iterator<KafkaConsumerRecord> refreshTopicMetadataAndRetryFetch(KafkaPartition partition, FetchRequest fetchRequest) { try { refreshTopicMetadata(partition); FetchResponse fetchResponse = getFetchResponseForFetchRequest(fetchRequest, partition); return getIteratorFromFetchResponse(fetchResponse, partition); } catch (Exception e) { log.warn(String.format("Fetch message buffer for partition %s has failed: %s. This partition will be skipped.", partition, e)); return null; } }
Example #14
Source File: Kafka08ConsumerClient.java From incubator-gobblin with Apache License 2.0 | 5 votes |
private synchronized FetchResponse getFetchResponseForFetchRequest(FetchRequest fetchRequest, KafkaPartition partition) { SimpleConsumer consumer = getSimpleConsumer(partition.getLeader().getHostAndPort()); FetchResponse fetchResponse = consumer.fetch(fetchRequest); if (fetchResponse.hasError()) { throw new RuntimeException(String.format("error code %d", fetchResponse.errorCode(partition.getTopicName(), partition.getId()))); } return fetchResponse; }
Example #15
Source File: KafkaWrapper.java From incubator-gobblin with Apache License 2.0 | 5 votes |
private Iterator<MessageAndOffset> getIteratorFromFetchResponse(FetchResponse fetchResponse, KafkaPartition partition) { try { ByteBufferMessageSet messageBuffer = fetchResponse.messageSet(partition.getTopicName(), partition.getId()); return messageBuffer.iterator(); } catch (Exception e) { LOG.warn(String.format("Failed to retrieve next message buffer for partition %s: %s." + "The remainder of this partition will be skipped.", partition, e)); return null; } }
Example #16
Source File: KafkaWrapper.java From incubator-gobblin with Apache License 2.0 | 5 votes |
private synchronized FetchResponse getFetchResponseForFetchRequest(FetchRequest fetchRequest, KafkaPartition partition) { SimpleConsumer consumer = getSimpleConsumer(partition.getLeader().getHostAndPort()); FetchResponse fetchResponse = consumer.fetch(fetchRequest); if (fetchResponse.hasError()) { throw new RuntimeException( String.format("error code %d", fetchResponse.errorCode(partition.getTopicName(), partition.getId()))); } return fetchResponse; }
Example #17
Source File: AbstractExactlyOnceKafkaOutputOperator.java From attic-apex-malhar with Apache License 2.0 | 5 votes |
private void initializeLastProcessingOffset() { // read last received kafka message TopicMetadata tm = KafkaMetadataUtil.getTopicMetadata(Sets.newHashSet((String)getConfigProperties().get(KafkaMetadataUtil.PRODUCER_PROP_BROKERLIST)), this.getTopic()); if (tm == null) { throw new RuntimeException("Failed to retrieve topic metadata"); } partitionNum = tm.partitionsMetadata().size(); lastMsgs = new HashMap<Integer, Pair<byte[],byte[]>>(partitionNum); for (PartitionMetadata pm : tm.partitionsMetadata()) { String leadBroker = pm.leader().host(); int port = pm.leader().port(); String clientName = this.getClass().getName().replace('$', '.') + "_Client_" + tm.topic() + "_" + pm.partitionId(); SimpleConsumer consumer = new SimpleConsumer(leadBroker, port, 100000, 64 * 1024, clientName); long readOffset = KafkaMetadataUtil.getLastOffset(consumer, tm.topic(), pm.partitionId(), kafka.api.OffsetRequest.LatestTime(), clientName); FetchRequest req = new FetchRequestBuilder().clientId(clientName).addFetch(tm.topic(), pm.partitionId(), readOffset - 1, 100000).build(); FetchResponse fetchResponse = consumer.fetch(req); for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(tm.topic(), pm.partitionId())) { Message m = messageAndOffset.message(); ByteBuffer payload = m.payload(); ByteBuffer key = m.key(); byte[] valueBytes = new byte[payload.limit()]; byte[] keyBytes = new byte[key.limit()]; payload.get(valueBytes); key.get(keyBytes); lastMsgs.put(pm.partitionId(), new Pair<byte[], byte[]>(keyBytes, valueBytes)); } } }
Example #18
Source File: SimpleKafkaConsumer.java From twill with Apache License 2.0 | 5 votes |
/** * Makes a call to kafka to fetch messages. */ private FetchResponse fetchMessages(SimpleConsumer consumer, long offset) { FetchRequest request = new FetchRequestBuilder() .clientId(consumer.clientId()) .addFetch(topicPart.getTopic(), topicPart.getPartition(), offset, FETCH_SIZE) .maxWait(MAX_WAIT) .build(); return consumer.fetch(request); }
Example #19
Source File: SimpleConsumerDemo.java From javabase with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { generateData(); SimpleConsumer simpleConsumer = new SimpleConsumer(KafkaProperties.KAFKA_SERVER_URL, KafkaProperties.KAFKA_SERVER_PORT, KafkaProperties.CONNECTION_TIMEOUT, KafkaProperties.KAFKA_PRODUCER_BUFFER_SIZE, KafkaProperties.CLIENT_ID); System.out.println("Testing single fetch"); FetchRequest req = new FetchRequestBuilder() .clientId(KafkaProperties.CLIENT_ID) .addFetch(KafkaProperties.TOPIC2, 0, 0L, 100) .build(); FetchResponse fetchResponse = simpleConsumer.fetch(req); printMessages(fetchResponse.messageSet(KafkaProperties.TOPIC2, 0)); System.out.println("Testing single multi-fetch"); Map<String, List<Integer>> topicMap = new HashMap<String, List<Integer>>(); topicMap.put(KafkaProperties.TOPIC2, Collections.singletonList(0)); topicMap.put(KafkaProperties.TOPIC3, Collections.singletonList(0)); req = new FetchRequestBuilder() .clientId(KafkaProperties.CLIENT_ID) .addFetch(KafkaProperties.TOPIC2, 0, 0L, 100) .addFetch(KafkaProperties.TOPIC3, 0, 0L, 100) .build(); fetchResponse = simpleConsumer.fetch(req); int fetchReq = 0; for (Map.Entry<String, List<Integer>> entry : topicMap.entrySet()) { String topic = entry.getKey(); for (Integer offset : entry.getValue()) { System.out.println("Response from fetch request no: " + ++fetchReq); printMessages(fetchResponse.messageSet(topic, offset)); } } }
Example #20
Source File: KafkaLeaderReader.java From arcusplatform with Apache License 2.0 | 5 votes |
private int dispatch(FetchResponse response) { int numDispatched = 0; for(TopicAndPartition tap: new ArrayList<>(offsets.keySet())) { short errorCode = response.errorCode(tap.topic(), tap.partition()); if(errorCode != 0) { logger.warn("Error reading from topic: [{}] partition: [{}]", tap.topic(), tap.partition(), ErrorMapping.exceptionFor(errorCode)); continue; } ByteBufferMessageSet message = response.messageSet(tap.topic(), tap.partition()); for(MessageAndOffset mao: message) { Long offset = offsets.get(tap); if(offset != null && offset > mao.offset()) { // skip older offsets continue; } KafkaConsumer handler = handlers.computeIfAbsent(tap, handlerFactory); if(handler == null) { logger.debug("No handler for topic: [{}] partition: [{}], this partition won't be processed", tap.topic(), tap.partition()); offsets.remove(tap); handlers.remove(tap); break; } if(handler.apply(tap, mao.message())) { numDispatched++; offsets.put(tap, mao.nextOffset()); } else { logger.debug("Done processing topic: [{}] partition: [{}]", tap.topic(), tap.partition()); offsets.remove(tap); handlers.remove(tap); break; } } } return numDispatched; }
Example #21
Source File: KafkaSimpleConsumer.java From Pistachio with Apache License 2.0 | 4 votes |
public Iterable<BytesMessageWithOffset> fetch(long offset, int timeoutMs) throws InterruptedException { List<BytesMessageWithOffset> newOffsetMsg = new ArrayList<BytesMessageWithOffset>(); FetchResponse response = null; Broker previousLeader = leaderBroker; while (true) { ensureConsumer(previousLeader); if (offset == Long.MAX_VALUE) { offset = getOffset(false); logger.info("offset max long, fetch from latest in kafka {}", offset); } FetchRequest request = new FetchRequestBuilder() .clientId(clientId) .addFetch(topic, partitionId, offset, 100000000) .maxWait(timeoutMs) .minBytes(1) .build(); //logger.debug("fetch offset {}", offset); try { response = consumer.fetch(request); } catch (Exception e) { // e could be an instance of ClosedByInterruptException as SimpleConsumer.fetch uses nio if (Thread.interrupted()) { logger.info("catch exception of {} with interrupted in fetch for {} - {} with offset {}", e.getClass().getName(), topic, partitionId, offset); throw new InterruptedException(); } logger.warn("caughte exception in fetch {} - {}", topic, partitionId, e); response = null; } if (response == null || response.hasError()) { short errorCode = response != null ? response.errorCode(topic, partitionId) : ErrorMapping.UnknownCode(); logger.warn("fetch {} - {} with offset {} encounters error: {}", topic, partitionId, offset, errorCode); boolean needNewLeader = false; if (errorCode == ErrorMapping.RequestTimedOutCode()) { //TODO: leave it here } else if (errorCode == ErrorMapping.OffsetOutOfRangeCode()) { //TODO: fetch the earliest offset or latest offset ? // seems no obvious correct way to handle it long earliestOffset = getOffset(true); logger.debug("get earilset offset {} for {} - {}", earliestOffset, topic, partitionId); if (earliestOffset < 0) { needNewLeader = true; } else { newOffsetMsg.add(new BytesMessageWithOffset(null, earliestOffset)); offset = earliestOffset; continue; } } else { needNewLeader = true; } if (needNewLeader) { stopConsumer(); previousLeader = leaderBroker; leaderBroker = null; continue; } } else { break; } } return response != null ? filterAndDecode(response.messageSet(topic, partitionId), offset) : (newOffsetMsg.size() > 0 ? newOffsetMsg : EMPTY_MSGS); }
Example #22
Source File: KafkaReader.java From HiveKa with Apache License 2.0 | 4 votes |
/** * Creates a fetch request. * * @return false if there's no more fetches * @throws IOException */ public boolean fetch() throws IOException { if (currentOffset >= lastOffset) { return false; } long tempTime = System.currentTimeMillis(); TopicAndPartition topicAndPartition = new TopicAndPartition( kafkaRequest.getTopic(), kafkaRequest.getPartition()); log.debug("\nAsking for offset : " + (currentOffset)); PartitionFetchInfo partitionFetchInfo = new PartitionFetchInfo( currentOffset, fetchBufferSize); HashMap<TopicAndPartition, PartitionFetchInfo> fetchInfo = new HashMap<TopicAndPartition, PartitionFetchInfo>(); fetchInfo.put(topicAndPartition, partitionFetchInfo); FetchRequest fetchRequest = new FetchRequest( 1, // fetch request correlation id "hive_kafka_client", // client name 1000, // fetch request max wait 1024, // fetch request min bytes fetchInfo); FetchResponse fetchResponse = null; try { fetchResponse = simpleConsumer.fetch(fetchRequest); if (fetchResponse.hasError()) { log.info("Error encountered during a fetch request from Kafka"); log.info("Error Code generated : " + fetchResponse.errorCode(kafkaRequest.getTopic(), kafkaRequest.getPartition())); return false; } else { ByteBufferMessageSet messageBuffer = fetchResponse.messageSet( kafkaRequest.getTopic(), kafkaRequest.getPartition()); lastFetchTime = (System.currentTimeMillis() - tempTime); log.debug("Time taken to fetch : " + (lastFetchTime / 1000) + " seconds"); log.debug("The size of the ByteBufferMessageSet returned is : " + messageBuffer.sizeInBytes()); int skipped = 0; totalFetchTime += lastFetchTime; messageIter = messageBuffer.iterator(); //boolean flag = false; Iterator<MessageAndOffset> messageIter2 = messageBuffer .iterator(); MessageAndOffset message = null; while (messageIter2.hasNext()) { message = messageIter2.next(); if (message.offset() < currentOffset) { //flag = true; skipped++; } else { log.debug("Skipped offsets till : " + message.offset()); break; } } log.debug("Number of offsets to be skipped: " + skipped); while(skipped !=0 ) { MessageAndOffset skippedMessage = messageIter.next(); log.debug("Skipping offset : " + skippedMessage.offset()); skipped --; } if (!messageIter.hasNext()) { System.out .println("No more data left to process. Returning false"); messageIter = null; return false; } return true; } } catch (Exception e) { log.info("Exception generated during fetch"); e.printStackTrace(); return false; } }
Example #23
Source File: KafkaLowLevelConsumer09.java From datacollector with Apache License 2.0 | 4 votes |
@Override public List<MessageAndOffset> read(long offset) throws StageException { FetchRequest req = buildFetchRequest(offset); FetchResponse fetchResponse; try { fetchResponse = consumer.fetch(req); } catch (Exception e) { if(e instanceof SocketTimeoutException) { //If the value of consumer.timeout.ms is set to a positive integer, a timeout exception is thrown to the //consumer if no message is available for consumption after the specified timeout value. //If this happens exit gracefully LOG.warn(KafkaErrors.KAFKA_28.getMessage()); return Collections.emptyList(); } else { throw new StageException(KafkaErrors.KAFKA_29, e.toString(), e); } } if(fetchResponse.hasError()) { short code = fetchResponse.errorCode(topic, partition); if(code == ErrorMapping.OffsetOutOfRangeCode()) { //invalid offset offset = getLastOffset(consumer, topic, partition, kafka.api.OffsetRequest.LatestTime(), clientName); } else { //try re-initializing connection with kafka consumer.close(); consumer = null; leader = findNewLeader(leader, topic, partition); } //re-fetch req = buildFetchRequest(offset); fetchResponse = consumer.fetch(req); if(fetchResponse.hasError()) { //could not fetch the second time, give kafka some time LOG.error(KafkaErrors.KAFKA_26.getMessage(), topic, partition, offset); } } List<MessageAndOffset> partitionToPayloadMapArrayList = new ArrayList<>(); for (kafka.message.MessageAndOffset messageAndOffset : fetchResponse.messageSet(topic, partition)) { long currentOffset = messageAndOffset.offset(); if (currentOffset < offset) { LOG.warn(KafkaErrors.KAFKA_27.getMessage(), currentOffset, offset); continue; } ByteBuffer payload = messageAndOffset.message().payload(); final Object key = messageAndOffset.message().key(); byte[] bytes = new byte[payload.limit()]; payload.get(bytes); MessageAndOffset partitionToPayloadMap = new MessageAndOffset( key, bytes, messageAndOffset.nextOffset(), partition ); partitionToPayloadMapArrayList.add(partitionToPayloadMap); } return partitionToPayloadMapArrayList; }
Example #24
Source File: KafkaLowLevelConsumer08.java From datacollector with Apache License 2.0 | 4 votes |
@Override public List<MessageAndOffset> read(long offset) throws StageException { FetchRequest req = buildFetchRequest(offset); FetchResponse fetchResponse; try { fetchResponse = consumer.fetch(req); } catch (Exception e) { if(e instanceof SocketTimeoutException) { //If the value of consumer.timeout.ms is set to a positive integer, a timeout exception is thrown to the //consumer if no message is available for consumption after the specified timeout value. //If this happens exit gracefully LOG.warn(KafkaErrors.KAFKA_28.getMessage()); return Collections.emptyList(); } else { throw new StageException(KafkaErrors.KAFKA_29, e.toString(), e); } } if(fetchResponse.hasError()) { short code = fetchResponse.errorCode(topic, partition); if(code == ErrorMapping.OffsetOutOfRangeCode()) { //invalid offset offset = getLastOffset(consumer, topic, partition, kafka.api.OffsetRequest.LatestTime(), clientName); } else { //try re-initializing connection with kafka consumer.close(); consumer = null; leader = findNewLeader(leader, topic, partition); } //re-fetch req = buildFetchRequest(offset); fetchResponse = consumer.fetch(req); if(fetchResponse.hasError()) { //could not fetch the second time, give kafka some time LOG.error(KafkaErrors.KAFKA_26.getMessage(), topic, partition, offset); } } List<MessageAndOffset> partitionToPayloadMapArrayList = new ArrayList<>(); for (kafka.message.MessageAndOffset messageAndOffset : fetchResponse.messageSet(topic, partition)) { long currentOffset = messageAndOffset.offset(); if (currentOffset < offset) { LOG.warn(KafkaErrors.KAFKA_27.getMessage(), currentOffset, offset); continue; } ByteBuffer payload = messageAndOffset.message().payload(); final Object key = messageAndOffset.message().key(); byte[] bytes = new byte[payload.limit()]; payload.get(bytes); MessageAndOffset partitionToPayloadMap = new MessageAndOffset( key, bytes, messageAndOffset.nextOffset(), partition ); partitionToPayloadMapArrayList.add(partitionToPayloadMap); } return partitionToPayloadMapArrayList; }
Example #25
Source File: LegacyKafkaClient.java From secor with Apache License 2.0 | 4 votes |
private Message getMessage(TopicPartition topicPartition, long offset, SimpleConsumer consumer) { LOG.debug("fetching message topic {} partition {} offset {}", topicPartition.getTopic(), topicPartition.getPartition(), offset); final int MAX_MESSAGE_SIZE_BYTES = mConfig.getMaxMessageSizeBytes(); final String clientName = getClientName(topicPartition); kafka.api.FetchRequest request = new FetchRequestBuilder().clientId(clientName) .addFetch(topicPartition.getTopic(), topicPartition.getPartition(), offset, MAX_MESSAGE_SIZE_BYTES) .build(); FetchResponse response = consumer.fetch(request); if (response.hasError()) { consumer.close(); int errorCode = response.errorCode(topicPartition.getTopic(), topicPartition.getPartition()); if (errorCode == Errors.OFFSET_OUT_OF_RANGE.code()) { throw new MessageDoesNotExistException(); } else { throw new RuntimeException("Error fetching offset data. Reason: " + errorCode); } } MessageAndOffset messageAndOffset = response.messageSet( topicPartition.getTopic(), topicPartition.getPartition()).iterator().next(); byte[] keyBytes = null; if (messageAndOffset.message().hasKey()) { ByteBuffer key = messageAndOffset.message().key(); keyBytes = new byte[key.limit()]; key.get(keyBytes); } byte[] payloadBytes = null; if (!messageAndOffset.message().isNull()) { ByteBuffer payload = messageAndOffset.message().payload(); payloadBytes = new byte[payload.limit()]; payload.get(payloadBytes); } long timestamp = (mConfig.useKafkaTimestamp()) ? mKafkaMessageTimestampFactory.getKafkaMessageTimestamp().getTimestamp(messageAndOffset) : 0l; return new Message(topicPartition.getTopic(), topicPartition.getPartition(), messageAndOffset.offset(), keyBytes, payloadBytes, timestamp, null); }
Example #26
Source File: LowLevelConsumerExample.java From pulsar with Apache License 2.0 | 4 votes |
private static void consumeMessage(Arguments arguments) { Properties properties = new Properties(); properties.put(SimpleConsumer.HTTP_SERVICE_URL, arguments.httpServiceUrl); SimpleConsumer consumer = new SimpleConsumer(arguments.serviceUrl, 0, 0, 0, "clientId", properties); long readOffset = kafka.api.OffsetRequest.EarliestTime(); kafka.api.FetchRequest fReq = new FetchRequestBuilder().clientId("c1") .addFetch(arguments.topicName, arguments.partitionIndex, readOffset, 100000).build(); FetchResponse fetchResponse = consumer.fetch(fReq); TestDecoder decoder = new TestDecoder(); int count = 0; while (count < arguments.totalMessages || arguments.totalMessages == -1) { // 1. Read from topic without subscription/consumer-group name. for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(arguments.topicName, arguments.partitionIndex)) { MessageId msgIdOffset = (messageAndOffset instanceof PulsarMsgAndOffset) ? ((PulsarMsgAndOffset) messageAndOffset).getFullOffset() : null; long currentOffset = messageAndOffset.offset(); if (currentOffset < readOffset) { continue; } ByteBuffer payload = messageAndOffset.message().payload(); byte[] bytes = new byte[payload.limit()]; payload.get(bytes); Tweet tweet = decoder.fromBytes(bytes); log.info("Received tweet: {}-{}", tweet.userName, tweet.message); count++; TopicAndPartition topicPartition = new TopicAndPartition(arguments.topicName, arguments.partitionIndex); OffsetMetadataAndError offsetError = new OffsetMetadataAndError(msgIdOffset, null, (short) 0); Map<TopicAndPartition, OffsetMetadataAndError> requestInfo = Collections.singletonMap(topicPartition, offsetError); // 2. Commit offset for a given topic and subscription-name/consumer-name. OffsetCommitRequest offsetReq = new OffsetCommitRequest(arguments.groupName, requestInfo, (short) -1, 0, "c1"); consumer.commitOffsets(offsetReq); } } consumer.close(); }
Example #27
Source File: KafkaPartitionLevelConsumerTest.java From incubator-pinot with Apache License 2.0 | 4 votes |
@Override public FetchResponse fetch(kafka.javaapi.FetchRequest request) { throw new RuntimeException("Unimplemented"); }
Example #28
Source File: TestKafkaSink.java From suro with Apache License 2.0 | 4 votes |
@Test public void testDefaultParameters() throws IOException { TopicCommand.createTopic(zk.getZkClient(), new TopicCommand.TopicCommandOptions(new String[]{ "--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME, "--replication-factor", "2", "--partitions", "1"})); String description = "{\n" + " \"type\": \"kafka\",\n" + " \"client.id\": \"kafkasink\",\n" + " \"bootstrap.servers\": \"" + kafkaServer.getBrokerListStr() + "\",\n" + " \"acks\": 1\n" + "}"; KafkaSink sink = jsonMapper.readValue(description, new TypeReference<Sink>(){}); sink.open(); Iterator<Message> msgIterator = new MessageSetReader(createMessageSet(TOPIC_NAME, 2)).iterator(); while (msgIterator.hasNext()) { sink.writeTo(new StringMessage(msgIterator.next())); } assertTrue(sink.getNumOfPendingMessages() > 0); sink.close(); assertEquals(sink.getNumOfPendingMessages(), 0); System.out.println(sink.getStat()); // get the leader Option<Object> leaderOpt = ZkUtils.getLeaderForPartition(zk.getZkClient(), TOPIC_NAME, 0); assertTrue("Leader for topic new-topic partition 0 should exist", leaderOpt.isDefined()); int leader = (Integer) leaderOpt.get(); KafkaConfig config; if (leader == kafkaServer.getServer(0).config().brokerId()) { config = kafkaServer.getServer(0).config(); } else { config = kafkaServer.getServer(1).config(); } SimpleConsumer consumer = new SimpleConsumer(config.hostName(), config.port(), 100000, 100000, "clientId"); FetchResponse response = consumer.fetch(new FetchRequestBuilder().addFetch(TOPIC_NAME, 0, 0, 100000).build()); List<MessageAndOffset> messageSet = Lists.newArrayList(response.messageSet(TOPIC_NAME, 0).iterator()); assertEquals("Should have fetched 2 messages", 2, messageSet.size()); assertEquals(new String(extractMessage(messageSet, 0)), "testMessage" + 0); assertEquals(new String(extractMessage(messageSet, 1)), "testMessage" + 1); }
Example #29
Source File: TestKafkaSinkV2.java From suro with Apache License 2.0 | 4 votes |
@Test public void testDefaultParameters() throws IOException { TopicCommand.createTopic(zk.getZkClient(), new TopicCommand.TopicCommandOptions(new String[]{ "--zookeeper", "dummy", "--create", "--topic", TOPIC_NAME, "--replication-factor", "2", "--partitions", "1"})); String description = "{\n" + " \"type\": \"kafka\",\n" + " \"client.id\": \"kafkasink\",\n" + " \"metadata.broker.list\": \"" + kafkaServer.getBrokerListStr() + "\",\n" + " \"request.required.acks\": 1\n" + "}"; ObjectMapper jsonMapper = new DefaultObjectMapper(); jsonMapper.registerSubtypes(new NamedType(KafkaSinkV2.class, "kafka")); KafkaSinkV2 sink = jsonMapper.readValue(description, new TypeReference<Sink>(){}); sink.open(); // create send test messages to Kafka Iterator<Message> msgIterator = new MessageSetReader(createMessageSet(TOPIC_NAME, 2)).iterator(); HashSet<String> sentPayloads = new HashSet<String>(); // track sent messages for comparison later while (msgIterator.hasNext()) { StringMessage next = new StringMessage(msgIterator.next()); sink.writeTo(next); // send sentPayloads.add( new String( next.getMessage().getPayload() ) ); // record } sink.close(); assertEquals(sink.getNumOfPendingMessages(), 0); System.out.println(sink.getStat()); // get the leader Option<Object> leaderOpt = ZkUtils.getLeaderForPartition(zk.getZkClient(), TOPIC_NAME, 0); assertTrue("Leader for topic new-topic partition 0 should exist", leaderOpt.isDefined()); int leader = (Integer) leaderOpt.get(); KafkaConfig config; if (leader == kafkaServer.getServer(0).config().brokerId()) { config = kafkaServer.getServer(0).config(); } else { config = kafkaServer.getServer(1).config(); } // get data back from Kafka SimpleConsumer consumer = new SimpleConsumer(config.hostName(), config.port(), 100000, 100000, "clientId"); FetchResponse response = consumer.fetch(new FetchRequestBuilder().addFetch(TOPIC_NAME, 0, 0, 100000).build()); List<MessageAndOffset> messageSet = Lists.newArrayList(response.messageSet(TOPIC_NAME, 0).iterator()); assertEquals("Should have fetched 2 messages", 2, messageSet.size()); for( int i=0; i<messageSet.size(); i++ ){ // ensure that received message was one that was sent String receivedPayload = new String(extractMessage(messageSet, i)); System.out.println( "Got message: " + new String( receivedPayload ) ); assert( sentPayloads.remove( receivedPayload ) ); } assertEquals(sentPayloads.size(), 0); // all sent messages should have been received }
Example #30
Source File: KafkaConsumer.java From jstorm with Apache License 2.0 | 4 votes |
public ByteBufferMessageSet fetchMessages(int partition, long offset) throws IOException { String topic = config.topic; FetchRequest req = new FetchRequestBuilder().clientId(config.clientId).addFetch(topic, partition, offset, config.fetchMaxBytes) .maxWait(config.fetchWaitMaxMs).build(); FetchResponse fetchResponse = null; SimpleConsumer simpleConsumer = null; try { simpleConsumer = findLeaderConsumer(partition); if (simpleConsumer == null) { // LOG.error(message); return null; } fetchResponse = simpleConsumer.fetch(req); } catch (Exception e) { if (e instanceof ConnectException || e instanceof SocketTimeoutException || e instanceof IOException || e instanceof UnresolvedAddressException) { LOG.warn("Network error when fetching messages:", e); if (simpleConsumer != null) { String host = simpleConsumer.host(); int port = simpleConsumer.port(); simpleConsumer = null; throw new KafkaException("Network error when fetching messages: " + host + ":" + port + " , " + e.getMessage(), e); } } else { throw new RuntimeException(e); } } if (fetchResponse.hasError()) { short code = fetchResponse.errorCode(topic, partition); if (code == ErrorMapping.OffsetOutOfRangeCode() && config.resetOffsetIfOutOfRange) { long startOffset = getOffset(topic, partition, config.startOffsetTime); offset = startOffset; } if(leaderBroker != null) { LOG.error("fetch data from kafka topic[" + config.topic + "] host[" + leaderBroker.host() + ":" + leaderBroker.port() + "] partition[" + partition + "] error:" + code); }else { } return null; } else { ByteBufferMessageSet msgs = fetchResponse.messageSet(topic, partition); return msgs; } }