Java Code Examples for kafka.javaapi.FetchResponse#hasError()
The following examples show how to use
kafka.javaapi.FetchResponse#hasError() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: KafkaPartitionReader.java From Scribengin with GNU Affero General Public License v3.0 | 6 votes |
void nextMessageSet() throws Exception { FetchRequest req = new FetchRequestBuilder(). clientId(name). addFetch(topic, partitionMetadata.partitionId(), currentOffset, fetchSize). minBytes(1). maxWait(1000). build(); FetchResponse fetchResponse = consumer.fetch(req); if(fetchResponse.hasError()) { throw new Exception("TODO: handle the error, reset the consumer...."); } currentMessageSet = fetchResponse.messageSet(topic, partitionMetadata.partitionId()); currentMessageSetIterator = currentMessageSet.iterator(); }
Example 2
Source File: KafkaWrapper.java From incubator-gobblin with Apache License 2.0 | 5 votes |
private synchronized FetchResponse getFetchResponseForFetchRequest(FetchRequest fetchRequest, KafkaPartition partition) { SimpleConsumer consumer = getSimpleConsumer(partition.getLeader().getHostAndPort()); FetchResponse fetchResponse = consumer.fetch(fetchRequest); if (fetchResponse.hasError()) { throw new RuntimeException( String.format("error code %d", fetchResponse.errorCode(partition.getTopicName(), partition.getId()))); } return fetchResponse; }
Example 3
Source File: Kafka08ConsumerClient.java From incubator-gobblin with Apache License 2.0 | 5 votes |
private synchronized FetchResponse getFetchResponseForFetchRequest(FetchRequest fetchRequest, KafkaPartition partition) { SimpleConsumer consumer = getSimpleConsumer(partition.getLeader().getHostAndPort()); FetchResponse fetchResponse = consumer.fetch(fetchRequest); if (fetchResponse.hasError()) { throw new RuntimeException(String.format("error code %d", fetchResponse.errorCode(partition.getTopicName(), partition.getId()))); } return fetchResponse; }
Example 4
Source File: KafkaPartitionLevelConsumer.java From incubator-pinot with Apache License 2.0 | 5 votes |
/** * Fetch messages and the per-partition high watermark from Kafka between the specified offsets. * * @param startOffset The offset of the first message desired, inclusive * @param endOffset The offset of the last message desired, exclusive, or {@link Long#MAX_VALUE} for no end offset. * @param timeoutMillis Timeout in milliseconds * @throws java.util.concurrent.TimeoutException If the operation could not be completed within {@code timeoutMillis} * milliseconds * @return An iterable containing messages fetched from Kafka and their offsets, as well as the high watermark for * this partition. */ public synchronized MessageBatch fetchMessages(long startOffset, long endOffset, int timeoutMillis) throws java.util.concurrent.TimeoutException { // TODO Improve error handling final long connectEndTime = System.currentTimeMillis() + _connectTimeoutMillis; while (_currentState.getStateValue() != KafkaConnectionHandler.ConsumerState.CONNECTED_TO_PARTITION_LEADER && System.currentTimeMillis() < connectEndTime) { _currentState.process(); } if (_currentState.getStateValue() != KafkaConnectionHandler.ConsumerState.CONNECTED_TO_PARTITION_LEADER && connectEndTime <= System.currentTimeMillis()) { throw new java.util.concurrent.TimeoutException(); } FetchResponse fetchResponse = _simpleConsumer.fetch( new FetchRequestBuilder().minBytes(_fetchRequestMinBytes).maxWait(timeoutMillis) .addFetch(_topic, _partition, startOffset, _fetchRequestSizeBytes).build()); if (!fetchResponse.hasError()) { final Iterable<MessageAndOffset> messageAndOffsetIterable = buildOffsetFilteringIterable(fetchResponse.messageSet(_topic, _partition), startOffset, endOffset); // TODO: Instantiate with factory return new SimpleConsumerMessageBatch(messageAndOffsetIterable); } else { throw exceptionForKafkaErrorCode(fetchResponse.errorCode(_topic, _partition)); } }
Example 5
Source File: KafkaPartitionReader.java From Scribengin with GNU Affero General Public License v3.0 | 5 votes |
public List<byte[]> execute() throws Exception { FetchRequest req = new FetchRequestBuilder(). clientId(name). addFetch(topic, partitionMetadata.partitionId(), currentOffset, fetchSize). minBytes(1). maxWait(maxWait). build(); FetchResponse fetchResponse = consumer.fetch(req); if(fetchResponse.hasError()) { short errorCode = fetchResponse.errorCode(topic, partitionMetadata.partitionId()); String msg = "Kafka error code = " + errorCode + ", Partition " + partitionMetadata.partitionId() ; throw new Exception(msg); } List<byte[]> holder = new ArrayList<byte[]>(); ByteBufferMessageSet messageSet = fetchResponse.messageSet(topic, partitionMetadata.partitionId()); int count = 0; for(MessageAndOffset messageAndOffset : messageSet) { if (messageAndOffset.offset() < currentOffset) continue; //old offset, ignore ByteBuffer payload = messageAndOffset.message().payload(); byte[] bytes = new byte[payload.limit()]; payload.get(bytes); holder.add(bytes); currentOffset = messageAndOffset.nextOffset(); count++; if(count == maxRead) break; } return holder ; }
Example 6
Source File: KafkaUtils.java From storm-kafka-0.8-plus with Apache License 2.0 | 5 votes |
public static ByteBufferMessageSet fetchMessages(KafkaConfig config, SimpleConsumer consumer, Partition partition, long offset) { ByteBufferMessageSet msgs = null; String topic = config.topic; int partitionId = partition.partition; for (int errors = 0; errors < 2 && msgs == null; errors++) { FetchRequestBuilder builder = new FetchRequestBuilder(); FetchRequest fetchRequest = builder.addFetch(topic, partitionId, offset, config.fetchSizeBytes). clientId(config.clientId).build(); FetchResponse fetchResponse; try { fetchResponse = consumer.fetch(fetchRequest); } catch (Exception e) { if (e instanceof ConnectException) { throw new FailedFetchException(e); } else { throw new RuntimeException(e); } } if (fetchResponse.hasError()) { KafkaError error = KafkaError.getError(fetchResponse.errorCode(topic, partitionId)); if (error.equals(KafkaError.OFFSET_OUT_OF_RANGE) && config.useStartOffsetTimeIfOffsetOutOfRange && errors == 0) { long startOffset = getOffset(consumer, topic, partitionId, config.startOffsetTime); LOG.warn("Got fetch request with offset out of range: [" + offset + "]; " + "retrying with default start offset time from configuration. " + "configured start offset time: [" + config.startOffsetTime + "] offset: [" + startOffset + "]"); offset = startOffset; } else { String message = "Error fetching data from [" + partition + "] for topic [" + topic + "]: [" + error + "]"; LOG.error(message); throw new FailedFetchException(message); } } else { msgs = fetchResponse.messageSet(topic, partitionId); } } return msgs; }
Example 7
Source File: KafkaLowLevelConsumer08.java From datacollector with Apache License 2.0 | 4 votes |
@Override public List<MessageAndOffset> read(long offset) throws StageException { FetchRequest req = buildFetchRequest(offset); FetchResponse fetchResponse; try { fetchResponse = consumer.fetch(req); } catch (Exception e) { if(e instanceof SocketTimeoutException) { //If the value of consumer.timeout.ms is set to a positive integer, a timeout exception is thrown to the //consumer if no message is available for consumption after the specified timeout value. //If this happens exit gracefully LOG.warn(KafkaErrors.KAFKA_28.getMessage()); return Collections.emptyList(); } else { throw new StageException(KafkaErrors.KAFKA_29, e.toString(), e); } } if(fetchResponse.hasError()) { short code = fetchResponse.errorCode(topic, partition); if(code == ErrorMapping.OffsetOutOfRangeCode()) { //invalid offset offset = getLastOffset(consumer, topic, partition, kafka.api.OffsetRequest.LatestTime(), clientName); } else { //try re-initializing connection with kafka consumer.close(); consumer = null; leader = findNewLeader(leader, topic, partition); } //re-fetch req = buildFetchRequest(offset); fetchResponse = consumer.fetch(req); if(fetchResponse.hasError()) { //could not fetch the second time, give kafka some time LOG.error(KafkaErrors.KAFKA_26.getMessage(), topic, partition, offset); } } List<MessageAndOffset> partitionToPayloadMapArrayList = new ArrayList<>(); for (kafka.message.MessageAndOffset messageAndOffset : fetchResponse.messageSet(topic, partition)) { long currentOffset = messageAndOffset.offset(); if (currentOffset < offset) { LOG.warn(KafkaErrors.KAFKA_27.getMessage(), currentOffset, offset); continue; } ByteBuffer payload = messageAndOffset.message().payload(); final Object key = messageAndOffset.message().key(); byte[] bytes = new byte[payload.limit()]; payload.get(bytes); MessageAndOffset partitionToPayloadMap = new MessageAndOffset( key, bytes, messageAndOffset.nextOffset(), partition ); partitionToPayloadMapArrayList.add(partitionToPayloadMap); } return partitionToPayloadMapArrayList; }
Example 8
Source File: KafkaLowLevelConsumer09.java From datacollector with Apache License 2.0 | 4 votes |
@Override public List<MessageAndOffset> read(long offset) throws StageException { FetchRequest req = buildFetchRequest(offset); FetchResponse fetchResponse; try { fetchResponse = consumer.fetch(req); } catch (Exception e) { if(e instanceof SocketTimeoutException) { //If the value of consumer.timeout.ms is set to a positive integer, a timeout exception is thrown to the //consumer if no message is available for consumption after the specified timeout value. //If this happens exit gracefully LOG.warn(KafkaErrors.KAFKA_28.getMessage()); return Collections.emptyList(); } else { throw new StageException(KafkaErrors.KAFKA_29, e.toString(), e); } } if(fetchResponse.hasError()) { short code = fetchResponse.errorCode(topic, partition); if(code == ErrorMapping.OffsetOutOfRangeCode()) { //invalid offset offset = getLastOffset(consumer, topic, partition, kafka.api.OffsetRequest.LatestTime(), clientName); } else { //try re-initializing connection with kafka consumer.close(); consumer = null; leader = findNewLeader(leader, topic, partition); } //re-fetch req = buildFetchRequest(offset); fetchResponse = consumer.fetch(req); if(fetchResponse.hasError()) { //could not fetch the second time, give kafka some time LOG.error(KafkaErrors.KAFKA_26.getMessage(), topic, partition, offset); } } List<MessageAndOffset> partitionToPayloadMapArrayList = new ArrayList<>(); for (kafka.message.MessageAndOffset messageAndOffset : fetchResponse.messageSet(topic, partition)) { long currentOffset = messageAndOffset.offset(); if (currentOffset < offset) { LOG.warn(KafkaErrors.KAFKA_27.getMessage(), currentOffset, offset); continue; } ByteBuffer payload = messageAndOffset.message().payload(); final Object key = messageAndOffset.message().key(); byte[] bytes = new byte[payload.limit()]; payload.get(bytes); MessageAndOffset partitionToPayloadMap = new MessageAndOffset( key, bytes, messageAndOffset.nextOffset(), partition ); partitionToPayloadMapArrayList.add(partitionToPayloadMap); } return partitionToPayloadMapArrayList; }
Example 9
Source File: KafkaReader.java From HiveKa with Apache License 2.0 | 4 votes |
/** * Creates a fetch request. * * @return false if there's no more fetches * @throws IOException */ public boolean fetch() throws IOException { if (currentOffset >= lastOffset) { return false; } long tempTime = System.currentTimeMillis(); TopicAndPartition topicAndPartition = new TopicAndPartition( kafkaRequest.getTopic(), kafkaRequest.getPartition()); log.debug("\nAsking for offset : " + (currentOffset)); PartitionFetchInfo partitionFetchInfo = new PartitionFetchInfo( currentOffset, fetchBufferSize); HashMap<TopicAndPartition, PartitionFetchInfo> fetchInfo = new HashMap<TopicAndPartition, PartitionFetchInfo>(); fetchInfo.put(topicAndPartition, partitionFetchInfo); FetchRequest fetchRequest = new FetchRequest( 1, // fetch request correlation id "hive_kafka_client", // client name 1000, // fetch request max wait 1024, // fetch request min bytes fetchInfo); FetchResponse fetchResponse = null; try { fetchResponse = simpleConsumer.fetch(fetchRequest); if (fetchResponse.hasError()) { log.info("Error encountered during a fetch request from Kafka"); log.info("Error Code generated : " + fetchResponse.errorCode(kafkaRequest.getTopic(), kafkaRequest.getPartition())); return false; } else { ByteBufferMessageSet messageBuffer = fetchResponse.messageSet( kafkaRequest.getTopic(), kafkaRequest.getPartition()); lastFetchTime = (System.currentTimeMillis() - tempTime); log.debug("Time taken to fetch : " + (lastFetchTime / 1000) + " seconds"); log.debug("The size of the ByteBufferMessageSet returned is : " + messageBuffer.sizeInBytes()); int skipped = 0; totalFetchTime += lastFetchTime; messageIter = messageBuffer.iterator(); //boolean flag = false; Iterator<MessageAndOffset> messageIter2 = messageBuffer .iterator(); MessageAndOffset message = null; while (messageIter2.hasNext()) { message = messageIter2.next(); if (message.offset() < currentOffset) { //flag = true; skipped++; } else { log.debug("Skipped offsets till : " + message.offset()); break; } } log.debug("Number of offsets to be skipped: " + skipped); while(skipped !=0 ) { MessageAndOffset skippedMessage = messageIter.next(); log.debug("Skipping offset : " + skippedMessage.offset()); skipped --; } if (!messageIter.hasNext()) { System.out .println("No more data left to process. Returning false"); messageIter = null; return false; } return true; } } catch (Exception e) { log.info("Exception generated during fetch"); e.printStackTrace(); return false; } }
Example 10
Source File: KafkaSimpleConsumer.java From Pistachio with Apache License 2.0 | 4 votes |
public Iterable<BytesMessageWithOffset> fetch(long offset, int timeoutMs) throws InterruptedException { List<BytesMessageWithOffset> newOffsetMsg = new ArrayList<BytesMessageWithOffset>(); FetchResponse response = null; Broker previousLeader = leaderBroker; while (true) { ensureConsumer(previousLeader); if (offset == Long.MAX_VALUE) { offset = getOffset(false); logger.info("offset max long, fetch from latest in kafka {}", offset); } FetchRequest request = new FetchRequestBuilder() .clientId(clientId) .addFetch(topic, partitionId, offset, 100000000) .maxWait(timeoutMs) .minBytes(1) .build(); //logger.debug("fetch offset {}", offset); try { response = consumer.fetch(request); } catch (Exception e) { // e could be an instance of ClosedByInterruptException as SimpleConsumer.fetch uses nio if (Thread.interrupted()) { logger.info("catch exception of {} with interrupted in fetch for {} - {} with offset {}", e.getClass().getName(), topic, partitionId, offset); throw new InterruptedException(); } logger.warn("caughte exception in fetch {} - {}", topic, partitionId, e); response = null; } if (response == null || response.hasError()) { short errorCode = response != null ? response.errorCode(topic, partitionId) : ErrorMapping.UnknownCode(); logger.warn("fetch {} - {} with offset {} encounters error: {}", topic, partitionId, offset, errorCode); boolean needNewLeader = false; if (errorCode == ErrorMapping.RequestTimedOutCode()) { //TODO: leave it here } else if (errorCode == ErrorMapping.OffsetOutOfRangeCode()) { //TODO: fetch the earliest offset or latest offset ? // seems no obvious correct way to handle it long earliestOffset = getOffset(true); logger.debug("get earilset offset {} for {} - {}", earliestOffset, topic, partitionId); if (earliestOffset < 0) { needNewLeader = true; } else { newOffsetMsg.add(new BytesMessageWithOffset(null, earliestOffset)); offset = earliestOffset; continue; } } else { needNewLeader = true; } if (needNewLeader) { stopConsumer(); previousLeader = leaderBroker; leaderBroker = null; continue; } } else { break; } } return response != null ? filterAndDecode(response.messageSet(topic, partitionId), offset) : (newOffsetMsg.size() > 0 ? newOffsetMsg : EMPTY_MSGS); }
Example 11
Source File: LegacyKafkaClient.java From secor with Apache License 2.0 | 4 votes |
private Message getMessage(TopicPartition topicPartition, long offset, SimpleConsumer consumer) { LOG.debug("fetching message topic {} partition {} offset {}", topicPartition.getTopic(), topicPartition.getPartition(), offset); final int MAX_MESSAGE_SIZE_BYTES = mConfig.getMaxMessageSizeBytes(); final String clientName = getClientName(topicPartition); kafka.api.FetchRequest request = new FetchRequestBuilder().clientId(clientName) .addFetch(topicPartition.getTopic(), topicPartition.getPartition(), offset, MAX_MESSAGE_SIZE_BYTES) .build(); FetchResponse response = consumer.fetch(request); if (response.hasError()) { consumer.close(); int errorCode = response.errorCode(topicPartition.getTopic(), topicPartition.getPartition()); if (errorCode == Errors.OFFSET_OUT_OF_RANGE.code()) { throw new MessageDoesNotExistException(); } else { throw new RuntimeException("Error fetching offset data. Reason: " + errorCode); } } MessageAndOffset messageAndOffset = response.messageSet( topicPartition.getTopic(), topicPartition.getPartition()).iterator().next(); byte[] keyBytes = null; if (messageAndOffset.message().hasKey()) { ByteBuffer key = messageAndOffset.message().key(); keyBytes = new byte[key.limit()]; key.get(keyBytes); } byte[] payloadBytes = null; if (!messageAndOffset.message().isNull()) { ByteBuffer payload = messageAndOffset.message().payload(); payloadBytes = new byte[payload.limit()]; payload.get(payloadBytes); } long timestamp = (mConfig.useKafkaTimestamp()) ? mKafkaMessageTimestampFactory.getKafkaMessageTimestamp().getTimestamp(messageAndOffset) : 0l; return new Message(topicPartition.getTopic(), topicPartition.getPartition(), messageAndOffset.offset(), keyBytes, payloadBytes, timestamp, null); }
Example 12
Source File: KafkaConsumer.java From jstorm with Apache License 2.0 | 4 votes |
public ByteBufferMessageSet fetchMessages(int partition, long offset) throws IOException { String topic = config.topic; FetchRequest req = new FetchRequestBuilder().clientId(config.clientId).addFetch(topic, partition, offset, config.fetchMaxBytes) .maxWait(config.fetchWaitMaxMs).build(); FetchResponse fetchResponse = null; SimpleConsumer simpleConsumer = null; try { simpleConsumer = findLeaderConsumer(partition); if (simpleConsumer == null) { // LOG.error(message); return null; } fetchResponse = simpleConsumer.fetch(req); } catch (Exception e) { if (e instanceof ConnectException || e instanceof SocketTimeoutException || e instanceof IOException || e instanceof UnresolvedAddressException) { LOG.warn("Network error when fetching messages:", e); if (simpleConsumer != null) { String host = simpleConsumer.host(); int port = simpleConsumer.port(); simpleConsumer = null; throw new KafkaException("Network error when fetching messages: " + host + ":" + port + " , " + e.getMessage(), e); } } else { throw new RuntimeException(e); } } if (fetchResponse.hasError()) { short code = fetchResponse.errorCode(topic, partition); if (code == ErrorMapping.OffsetOutOfRangeCode() && config.resetOffsetIfOutOfRange) { long startOffset = getOffset(topic, partition, config.startOffsetTime); offset = startOffset; } if(leaderBroker != null) { LOG.error("fetch data from kafka topic[" + config.topic + "] host[" + leaderBroker.host() + ":" + leaderBroker.port() + "] partition[" + partition + "] error:" + code); }else { } return null; } else { ByteBufferMessageSet msgs = fetchResponse.messageSet(topic, partition); return msgs; } }