kafka.api.FetchRequest Java Examples
The following examples show how to use
kafka.api.FetchRequest.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KafkaLeaderReader.java From arcusplatform with Apache License 2.0 | 7 votes |
private int readNext() { FetchRequestBuilder requestBuilder = new FetchRequestBuilder() .clientId(clientId); for(Map.Entry<TopicAndPartition, Long> offset: offsets.entrySet()) { if(offset.getValue() == null) { logger.warn("Invalid offset for topic: [{}] partition: [{}]", offset.getKey().topic(), offset.getKey().partition()); } else { requestBuilder.addFetch(offset.getKey().topic(), offset.getKey().partition(), offset.getValue(), fetchSize); } } FetchRequest request = requestBuilder.build(); FetchResponse response = getKafkaConsumer().fetch(request); // FIXME handle errors / leader rebalances hear return dispatch(response); }
Example #2
Source File: DemoLowLevelConsumer.java From KafkaExample with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { final String topic = "topic1"; String clientID = "DemoLowLevelConsumer1"; SimpleConsumer simpleConsumer = new SimpleConsumer("kafka0", 9092, 100000, 64 * 1000000, clientID); FetchRequest req = new FetchRequestBuilder().clientId(clientID) .addFetch(topic, 0, 0L, 50).addFetch(topic, 1, 0L, 5000).addFetch(topic, 2, 0L, 1000000).build(); FetchResponse fetchResponse = simpleConsumer.fetch(req); ByteBufferMessageSet messageSet = (ByteBufferMessageSet) fetchResponse.messageSet(topic, 0); for (MessageAndOffset messageAndOffset : messageSet) { ByteBuffer payload = messageAndOffset.message().payload(); long offset = messageAndOffset.offset(); byte[] bytes = new byte[payload.limit()]; payload.get(bytes); System.out.println("Offset:" + offset + ", Payload:" + new String(bytes, "UTF-8")); } }
Example #3
Source File: KafkaPartitionReader.java From Scribengin with GNU Affero General Public License v3.0 | 6 votes |
void nextMessageSet() throws Exception { FetchRequest req = new FetchRequestBuilder(). clientId(name). addFetch(topic, partitionMetadata.partitionId(), currentOffset, fetchSize). minBytes(1). maxWait(1000). build(); FetchResponse fetchResponse = consumer.fetch(req); if(fetchResponse.hasError()) { throw new Exception("TODO: handle the error, reset the consumer...."); } currentMessageSet = fetchResponse.messageSet(topic, partitionMetadata.partitionId()); currentMessageSetIterator = currentMessageSet.iterator(); }
Example #4
Source File: KafkaSimpleConsumer.java From attic-apex-malhar with Apache License 2.0 | 6 votes |
@Override public void run() { long offset = 0; while (isAlive) { // create a fetch request for topic “topic1”, partition 0, current offset, and fetch size of 1MB FetchRequest fetchRequest = new FetchRequestBuilder().clientId("default_client").addFetch("topic1", 1, offset, 1000000).build(); // FetchRequest fetchRequest = new FetchRequest("topic1", 0, offset, 1000000); // get the message set from the consumer and print them out ByteBufferMessageSet messages = consumer.fetch(fetchRequest).messageSet("topic1", 1); Iterator<MessageAndOffset> itr = messages.iterator(); while (itr.hasNext() && isAlive) { MessageAndOffset msg = itr.next(); // advance the offset after consuming each message offset = msg.offset(); logger.debug("consumed: {} offset: {}", byteBufferToString(msg.message().payload()).toString(), offset); receiveCount++; } } }
Example #5
Source File: KafkaLowLevelConsumer08.java From datacollector with Apache License 2.0 | 6 votes |
private FetchRequest buildFetchRequest(long offset) { //1. maxWaitTime is the maximum amount of time in milliseconds to block waiting if insufficient data is // available at the time the request is issued. //2. minFetchSize is the minimum number of bytes of messages that must be available to give a response. If the // client sets this to 0 the server will always respond immediately, however if there is no new data since their // last request they will just get back empty message sets. If this is set to 1, the server will respond as soon // as at least one partition has at least 1 byte of data or the specified timeout occurs. By setting higher // values in combination with the timeout the consumer can tune for throughput and trade a little additional // latency for reading only large chunks of data (e.g. setting MaxWaitTime to 100 ms and setting MinBytes to 64k // would allow the server to wait up to 100ms to try to accumulate 64k of data before responding). //3. maxFetchSize is the maximum bytes to include in the message set for this partition. // This helps bound the size of the response. LOG.info("Building fetch request with clientId {}, minBytes {}, maxWait {}, topic {}, partition {}, offset {}, " + "max fetch size {}.", clientName, minFetchSize, maxWaitTime, topic, partition, offset, maxFetchSize); return new FetchRequestBuilder() .clientId(clientName) .minBytes(minFetchSize) .maxWait(maxWaitTime) .addFetch(topic, partition, offset, maxFetchSize) .build(); }
Example #6
Source File: KafkaLowLevelConsumer09.java From datacollector with Apache License 2.0 | 6 votes |
private FetchRequest buildFetchRequest(long offset) { //1. maxWaitTime is the maximum amount of time in milliseconds to block waiting if insufficient data is // available at the time the request is issued. //2. minFetchSize is the minimum number of bytes of messages that must be available to give a response. If the // client sets this to 0 the server will always respond immediately, however if there is no new data since their // last request they will just get back empty message sets. If this is set to 1, the server will respond as soon // as at least one partition has at least 1 byte of data or the specified timeout occurs. By setting higher // values in combination with the timeout the consumer can tune for throughput and trade a little additional // latency for reading only large chunks of data (e.g. setting MaxWaitTime to 100 ms and setting MinBytes to 64k // would allow the server to wait up to 100ms to try to accumulate 64k of data before responding). //3. maxFetchSize is the maximum bytes to include in the message set for this partition. // This helps bound the size of the response. LOG.info("Building fetch request with clientId {}, minBytes {}, maxWait {}, topic {}, partition {}, offset {}, " + "max fetch size {}.", clientName, minFetchSize, maxWaitTime, topic, partition, offset, maxFetchSize); return new FetchRequestBuilder() .clientId(clientName) .minBytes(minFetchSize) .maxWait(maxWaitTime) .addFetch(topic, partition, offset, maxFetchSize) .build(); }
Example #7
Source File: KafkaComponent.java From metron with Apache License 2.0 | 6 votes |
public List<byte[]> readMessages(String topic) { SimpleConsumer consumer = new SimpleConsumer("localhost", 6667, 100000, 64 * 1024, "consumer"); FetchRequest req = new FetchRequestBuilder() .clientId("consumer") .addFetch(topic, 0, 0, 100000) .build(); FetchResponse fetchResponse = consumer.fetch(req); Iterator<MessageAndOffset> results = fetchResponse.messageSet(topic, 0).iterator(); List<byte[]> messages = new ArrayList<>(); while(results.hasNext()) { ByteBuffer payload = results.next().message().payload(); byte[] bytes = new byte[payload.limit()]; payload.get(bytes); messages.add(bytes); } consumer.close(); return messages; }
Example #8
Source File: KafkaRecordReader.java From kangaroo with Apache License 2.0 | 6 votes |
/** * THIS METHOD HAS SIDE EFFECTS - it will update {@code currentMessageItr} (if necessary) and then return true iff * the iterator still has elements to be read. If you call {@link scala.collection.Iterator#next()} when this method * returns false, you risk a {@link NullPointerException} OR a no-more-elements exception. * * @return true if you can call {@link scala.collection.Iterator#next()} on {@code currentMessageItr}. */ @VisibleForTesting boolean continueItr() { final long remaining = end - currentOffset; if (!canCallNext() && remaining > 0) { final int theFetchSize = (fetchSize > remaining) ? (int) remaining : fetchSize; LOG.debug(String.format("%s fetching %d bytes starting at offset %d", split.toString(), theFetchSize, currentOffset)); final FetchRequest request = new FetchRequest(split.getPartition().getTopic(), split.getPartition() .getPartId(), currentOffset, theFetchSize); final ByteBufferMessageSet msg = consumer.fetch(request); final int errorCode = msg.getErrorCode(); if (errorCode == ErrorMapping.OffsetOutOfRangeCode()) { return false; } if (errorCode != ErrorMapping.NoError()) { ErrorMapping.maybeThrowException(errorCode); } // --> else we try to grab the next iterator currentMessageItr = msg.iterator(); currentOffset += msg.validBytes(); } return canCallNext(); }
Example #9
Source File: SimpleConsumerDemo.java From javabase with Apache License 2.0 | 5 votes |
public static void main(String[] args) throws Exception { generateData(); SimpleConsumer simpleConsumer = new SimpleConsumer(KafkaProperties.KAFKA_SERVER_URL, KafkaProperties.KAFKA_SERVER_PORT, KafkaProperties.CONNECTION_TIMEOUT, KafkaProperties.KAFKA_PRODUCER_BUFFER_SIZE, KafkaProperties.CLIENT_ID); System.out.println("Testing single fetch"); FetchRequest req = new FetchRequestBuilder() .clientId(KafkaProperties.CLIENT_ID) .addFetch(KafkaProperties.TOPIC2, 0, 0L, 100) .build(); FetchResponse fetchResponse = simpleConsumer.fetch(req); printMessages(fetchResponse.messageSet(KafkaProperties.TOPIC2, 0)); System.out.println("Testing single multi-fetch"); Map<String, List<Integer>> topicMap = new HashMap<String, List<Integer>>(); topicMap.put(KafkaProperties.TOPIC2, Collections.singletonList(0)); topicMap.put(KafkaProperties.TOPIC3, Collections.singletonList(0)); req = new FetchRequestBuilder() .clientId(KafkaProperties.CLIENT_ID) .addFetch(KafkaProperties.TOPIC2, 0, 0L, 100) .addFetch(KafkaProperties.TOPIC3, 0, 0L, 100) .build(); fetchResponse = simpleConsumer.fetch(req); int fetchReq = 0; for (Map.Entry<String, List<Integer>> entry : topicMap.entrySet()) { String topic = entry.getKey(); for (Integer offset : entry.getValue()) { System.out.println("Response from fetch request no: " + ++fetchReq); printMessages(fetchResponse.messageSet(topic, offset)); } } }
Example #10
Source File: KafkaUtils.java From storm-kafka-0.8-plus with Apache License 2.0 | 5 votes |
public static ByteBufferMessageSet fetchMessages(KafkaConfig config, SimpleConsumer consumer, Partition partition, long offset) { ByteBufferMessageSet msgs = null; String topic = config.topic; int partitionId = partition.partition; for (int errors = 0; errors < 2 && msgs == null; errors++) { FetchRequestBuilder builder = new FetchRequestBuilder(); FetchRequest fetchRequest = builder.addFetch(topic, partitionId, offset, config.fetchSizeBytes). clientId(config.clientId).build(); FetchResponse fetchResponse; try { fetchResponse = consumer.fetch(fetchRequest); } catch (Exception e) { if (e instanceof ConnectException) { throw new FailedFetchException(e); } else { throw new RuntimeException(e); } } if (fetchResponse.hasError()) { KafkaError error = KafkaError.getError(fetchResponse.errorCode(topic, partitionId)); if (error.equals(KafkaError.OFFSET_OUT_OF_RANGE) && config.useStartOffsetTimeIfOffsetOutOfRange && errors == 0) { long startOffset = getOffset(consumer, topic, partitionId, config.startOffsetTime); LOG.warn("Got fetch request with offset out of range: [" + offset + "]; " + "retrying with default start offset time from configuration. " + "configured start offset time: [" + config.startOffsetTime + "] offset: [" + startOffset + "]"); offset = startOffset; } else { String message = "Error fetching data from [" + partition + "] for topic [" + topic + "]: [" + error + "]"; LOG.error(message); throw new FailedFetchException(message); } } else { msgs = fetchResponse.messageSet(topic, partitionId); } } return msgs; }
Example #11
Source File: KafkaPartitionReader.java From Scribengin with GNU Affero General Public License v3.0 | 5 votes |
public List<byte[]> execute() throws Exception { FetchRequest req = new FetchRequestBuilder(). clientId(name). addFetch(topic, partitionMetadata.partitionId(), currentOffset, fetchSize). minBytes(1). maxWait(maxWait). build(); FetchResponse fetchResponse = consumer.fetch(req); if(fetchResponse.hasError()) { short errorCode = fetchResponse.errorCode(topic, partitionMetadata.partitionId()); String msg = "Kafka error code = " + errorCode + ", Partition " + partitionMetadata.partitionId() ; throw new Exception(msg); } List<byte[]> holder = new ArrayList<byte[]>(); ByteBufferMessageSet messageSet = fetchResponse.messageSet(topic, partitionMetadata.partitionId()); int count = 0; for(MessageAndOffset messageAndOffset : messageSet) { if (messageAndOffset.offset() < currentOffset) continue; //old offset, ignore ByteBuffer payload = messageAndOffset.message().payload(); byte[] bytes = new byte[payload.limit()]; payload.get(bytes); holder.add(bytes); currentOffset = messageAndOffset.nextOffset(); count++; if(count == maxRead) break; } return holder ; }
Example #12
Source File: KafkaClientTest.java From elasticsearch-river-kafka with Apache License 2.0 | 5 votes |
public void testFetch() { expect(mockConsumer.fetch(anyObject(FetchRequest.class))).andReturn(new ByteBufferMessageSet(Collections.EMPTY_LIST)); replay(mockConsumer, mockCurator); client.fetch("my_topic", 0, 1717, 1024); verify(mockConsumer, mockCurator); }
Example #13
Source File: KafkaRecordReaderTest.java From kangaroo with Apache License 2.0 | 5 votes |
@Test @SuppressWarnings("unchecked") public void testContinueItrMultipleIterations() throws Exception { // init split doReturn(mockConsumer).when(reader).getConsumer(split, conf); split.setEndOffset(4097); reader.initialize(split, context); // first iteration final Iterator<MessageAndOffset> mockIterator1 = mock(Iterator.class); when(mockConsumer.fetch(any(FetchRequest.class))).thenReturn(mockMessage); when(mockMessage.getErrorCode()).thenReturn(ErrorMapping.NoError()); when(mockMessage.iterator()).thenReturn(mockIterator1); when(mockMessage.validBytes()).thenReturn(2048l); when(mockIterator1.hasNext()).thenReturn(true); assertTrue("Should be able to continue iterator!", reader.continueItr()); // reset iterator for second iteration when(mockIterator1.hasNext()).thenReturn(false); final Iterator<MessageAndOffset> mockIterator2 = mock(Iterator.class); when(mockMessage.iterator()).thenReturn(mockIterator2); when(mockIterator2.hasNext()).thenReturn(true); assertTrue("Should be able to continue iterator!", reader.continueItr()); // reset iterator for third iteration when(mockIterator2.hasNext()).thenReturn(false); final Iterator<MessageAndOffset> mockIterator3 = mock(Iterator.class); when(mockMessage.iterator()).thenReturn(mockIterator3); when(mockIterator3.hasNext()).thenReturn(true); when(mockMessage.validBytes()).thenReturn(1l); assertTrue("Should be able to continue iterator!", reader.continueItr()); // out of bytes to read when(mockIterator3.hasNext()).thenReturn(false); assertFalse("Should be done with split!", reader.continueItr()); }
Example #14
Source File: KafkaRecordReaderTest.java From kangaroo with Apache License 2.0 | 5 votes |
@Test public void testContinueItr() throws Exception { doReturn(mockConsumer).when(reader).getConsumer(split, conf); // unfortunately, FetchRequest does not implement equals, so we have to do any(), and validate with answer when(mockConsumer.fetch(any(FetchRequest.class))).thenAnswer(new Answer<ByteBufferMessageSet>() { @Override public ByteBufferMessageSet answer(final InvocationOnMock invocation) throws Throwable { final FetchRequest request = (FetchRequest) invocation.getArguments()[0]; assertEquals("topic", request.topic()); assertEquals(0, request.partition()); assertEquals(0, request.offset()); assertEquals(100, request.maxSize()); return mockMessage; } }); when(mockMessage.getErrorCode()).thenReturn(ErrorMapping.NoError()); when(mockMessage.iterator()).thenReturn(mockIterator); when(mockMessage.validBytes()).thenReturn(100l); when(mockIterator.hasNext()).thenReturn(true); reader.initialize(split, context); assertTrue("Should be able to continue iterator!", reader.continueItr()); assertEquals(mockIterator, reader.getCurrentMessageItr()); assertEquals(100, reader.getCurrentOffset()); when(mockIterator.hasNext()).thenReturn(false); assertFalse("Should be done with split!", reader.continueItr()); // call it again just for giggles assertFalse("Should be done with split!", reader.continueItr()); }
Example #15
Source File: KafkaRecordReaderTest.java From kangaroo with Apache License 2.0 | 5 votes |
@Test public void testContinueItrOffsetOutOfRange() throws Exception { doReturn(mockConsumer).when(reader).getConsumer(split, conf); reader.initialize(split, context); when(mockConsumer.fetch(any(FetchRequest.class))).thenReturn(mockMessage); when(mockMessage.getErrorCode()).thenReturn(ErrorMapping.OffsetOutOfRangeCode()); assertFalse("Should be done with split!", reader.continueItr()); }
Example #16
Source File: KafkaRecordReaderTest.java From kangaroo with Apache License 2.0 | 5 votes |
@Test(expected = Exception.class) public void testContinueItrException() throws Exception { doReturn(mockConsumer).when(reader).getConsumer(split, conf); reader.initialize(split, context); when(mockConsumer.fetch(any(FetchRequest.class))).thenReturn(mockMessage); when(mockMessage.getErrorCode()).thenReturn(ErrorMapping.InvalidFetchSizeCode()); reader.continueItr(); fail(); }
Example #17
Source File: KafkaPartitionLevelConsumerTest.java From incubator-pinot with Apache License 2.0 | 5 votes |
@Override public FetchResponse fetch(FetchRequest request) { scala.collection.Traversable<Tuple2<TopicAndPartition, PartitionFetchInfo>> requestInfo = request.requestInfo(); java.util.Map<TopicAndPartition, Short> errorMap = new HashMap<>(); while (requestInfo.headOption().isDefined()) { // jfim: IntelliJ erroneously thinks the following line is an incompatible type error, but it's only because // it doesn't understand scala covariance when called from Java (ie. it thinks head() is of type A even though // it's really of type Tuple2[TopicAndPartition, PartitionFetchInfo]) Tuple2<TopicAndPartition, PartitionFetchInfo> t2 = requestInfo.head(); TopicAndPartition topicAndPartition = t2._1(); PartitionFetchInfo partitionFetchInfo = t2._2(); if (!topicAndPartition.topic().equals(topicName)) { errorMap.put(topicAndPartition, Errors.UNKNOWN_TOPIC_OR_PARTITION.code()); } else if (partitionLeaderIndices.length < topicAndPartition.partition()) { errorMap.put(topicAndPartition, Errors.UNKNOWN_TOPIC_OR_PARTITION.code()); } else if (partitionLeaderIndices[topicAndPartition.partition()] != index) { errorMap.put(topicAndPartition, Errors.NOT_LEADER_FOR_PARTITION.code()); } else { // Do nothing, we'll generate a fake message } requestInfo = requestInfo.tail(); } return new MockFetchResponse(errorMap); }
Example #18
Source File: AbstractExactlyOnceKafkaOutputOperator.java From attic-apex-malhar with Apache License 2.0 | 5 votes |
private void initializeLastProcessingOffset() { // read last received kafka message TopicMetadata tm = KafkaMetadataUtil.getTopicMetadata(Sets.newHashSet((String)getConfigProperties().get(KafkaMetadataUtil.PRODUCER_PROP_BROKERLIST)), this.getTopic()); if (tm == null) { throw new RuntimeException("Failed to retrieve topic metadata"); } partitionNum = tm.partitionsMetadata().size(); lastMsgs = new HashMap<Integer, Pair<byte[],byte[]>>(partitionNum); for (PartitionMetadata pm : tm.partitionsMetadata()) { String leadBroker = pm.leader().host(); int port = pm.leader().port(); String clientName = this.getClass().getName().replace('$', '.') + "_Client_" + tm.topic() + "_" + pm.partitionId(); SimpleConsumer consumer = new SimpleConsumer(leadBroker, port, 100000, 64 * 1024, clientName); long readOffset = KafkaMetadataUtil.getLastOffset(consumer, tm.topic(), pm.partitionId(), kafka.api.OffsetRequest.LatestTime(), clientName); FetchRequest req = new FetchRequestBuilder().clientId(clientName).addFetch(tm.topic(), pm.partitionId(), readOffset - 1, 100000).build(); FetchResponse fetchResponse = consumer.fetch(req); for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(tm.topic(), pm.partitionId())) { Message m = messageAndOffset.message(); ByteBuffer payload = m.payload(); ByteBuffer key = m.key(); byte[] valueBytes = new byte[payload.limit()]; byte[] keyBytes = new byte[key.limit()]; payload.get(valueBytes); key.get(keyBytes); lastMsgs.put(pm.partitionId(), new Pair<byte[], byte[]>(keyBytes, valueBytes)); } } }
Example #19
Source File: KafkaMessageReceiverImpl.java From message-queue-client-framework with Apache License 2.0 | 5 votes |
/** * Check the leader. * * @param a_topic topic name * @param a_partition partition number * @param a_beginOffset begin offset * @return boolean */ private boolean checkLeader(String a_topic, int a_partition, long a_beginOffset) { if (checkConsumer(a_topic, a_partition)) { FetchRequest req = new FetchRequestBuilder() .clientId(pool.getClientId()) .addFetch(a_topic, a_partition, a_beginOffset, KafkaConstants.FETCH_SIZE).build(); fetchResponse = consumer.get().fetch(req); String leadHost = metadata.leader().host(); if (fetchResponse.hasError()) { // Something went wrong! short code = fetchResponse.errorCode(a_topic, a_partition); logger.error("Error fetching data from the Broker:" + leadHost + " Reason: " + code); if (code == ErrorMapping.OffsetOutOfRangeCode()) { // We asked for an invalid offset. For simple case ask for // the last element to reset a_beginOffset = getLatestOffset(a_topic, a_partition); } consumer.get().close(); consumer.set(null); try { metadata = findNewLeader(leadHost, a_topic, a_partition); } catch (MQException e) { logger.error("Find new leader failed.", e); } return false; } return true; } return false; }
Example #20
Source File: PulsarKafkaSimpleConsumer.java From pulsar with Apache License 2.0 | 5 votes |
private Map<String, Reader<byte[]>> createTopicReaders(FetchRequest request) { Map<String, Reader<byte[]>> topicReaderMap = Maps.newHashMap(); scala.collection.immutable.Map<String, scala.collection.immutable.Map<TopicAndPartition, PartitionFetchInfo>> reqInfo = request .requestInfoGroupedByTopic(); Map<String, scala.collection.immutable.Map<TopicAndPartition, PartitionFetchInfo>> topicPartitionMap = scala.collection.JavaConverters .mapAsJavaMapConverter(reqInfo).asJava(); for (Entry<String, scala.collection.immutable.Map<TopicAndPartition, PartitionFetchInfo>> topicPartition : topicPartitionMap .entrySet()) { final String topicName = topicPartition.getKey(); Map<TopicAndPartition, PartitionFetchInfo> topicOffsetMap = scala.collection.JavaConverters .mapAsJavaMapConverter(topicPartition.getValue()).asJava(); if (topicOffsetMap != null && !topicOffsetMap.isEmpty()) { // pulsar-kafka adapter doesn't deal with partition so, assuming only 1 topic-metadata per topic name Entry<TopicAndPartition, PartitionFetchInfo> topicOffset = topicOffsetMap.entrySet().iterator().next(); long offset = topicOffset.getValue().offset(); String topic = getTopicName(topicOffset.getKey()); MessageId msgId = getMessageId(offset); try { Reader<byte[]> reader = client.newReader().readerName(clientId).topic(topic).startMessageId(msgId) .create(); log.info("Successfully created reader for {} at msg-id {}", topic, msgId); topicReaderMap.put(topicName, reader); } catch (PulsarClientException e) { log.warn("Failed to create reader for topic {}", topic, e); throw new RuntimeException("Failed to create reader for " + topic, e); } } } return topicReaderMap; }
Example #21
Source File: PulsarKafkaSimpleConsumer.java From pulsar with Apache License 2.0 | 5 votes |
@Override public PulsarFetchResponse fetch(FetchRequest request) { try { Map<String, Reader<byte[]>> topicReaderMap = createTopicReaders(request); return new PulsarFetchResponse(topicReaderMap, false); } catch (Exception e) { log.warn("Failed to process fetch request{}, {}", request, e.getMessage()); return new PulsarFetchResponse(null, true); } }
Example #22
Source File: SimpleKafkaConsumer.java From twill with Apache License 2.0 | 5 votes |
/** * Makes a call to kafka to fetch messages. */ private FetchResponse fetchMessages(SimpleConsumer consumer, long offset) { FetchRequest request = new FetchRequestBuilder() .clientId(consumer.clientId()) .addFetch(topicPart.getTopic(), topicPart.getPartition(), offset, FETCH_SIZE) .maxWait(MAX_WAIT) .build(); return consumer.fetch(request); }
Example #23
Source File: KafkaPartitionLevelConsumerTest.java From incubator-pinot with Apache License 2.0 | 4 votes |
@Override public FetchResponse fetch(kafka.javaapi.FetchRequest request) { throw new RuntimeException("Unimplemented"); }
Example #24
Source File: KafkaSimpleConsumer.java From Pistachio with Apache License 2.0 | 4 votes |
public Iterable<BytesMessageWithOffset> fetch(long offset, int timeoutMs) throws InterruptedException { List<BytesMessageWithOffset> newOffsetMsg = new ArrayList<BytesMessageWithOffset>(); FetchResponse response = null; Broker previousLeader = leaderBroker; while (true) { ensureConsumer(previousLeader); if (offset == Long.MAX_VALUE) { offset = getOffset(false); logger.info("offset max long, fetch from latest in kafka {}", offset); } FetchRequest request = new FetchRequestBuilder() .clientId(clientId) .addFetch(topic, partitionId, offset, 100000000) .maxWait(timeoutMs) .minBytes(1) .build(); //logger.debug("fetch offset {}", offset); try { response = consumer.fetch(request); } catch (Exception e) { // e could be an instance of ClosedByInterruptException as SimpleConsumer.fetch uses nio if (Thread.interrupted()) { logger.info("catch exception of {} with interrupted in fetch for {} - {} with offset {}", e.getClass().getName(), topic, partitionId, offset); throw new InterruptedException(); } logger.warn("caughte exception in fetch {} - {}", topic, partitionId, e); response = null; } if (response == null || response.hasError()) { short errorCode = response != null ? response.errorCode(topic, partitionId) : ErrorMapping.UnknownCode(); logger.warn("fetch {} - {} with offset {} encounters error: {}", topic, partitionId, offset, errorCode); boolean needNewLeader = false; if (errorCode == ErrorMapping.RequestTimedOutCode()) { //TODO: leave it here } else if (errorCode == ErrorMapping.OffsetOutOfRangeCode()) { //TODO: fetch the earliest offset or latest offset ? // seems no obvious correct way to handle it long earliestOffset = getOffset(true); logger.debug("get earilset offset {} for {} - {}", earliestOffset, topic, partitionId); if (earliestOffset < 0) { needNewLeader = true; } else { newOffsetMsg.add(new BytesMessageWithOffset(null, earliestOffset)); offset = earliestOffset; continue; } } else { needNewLeader = true; } if (needNewLeader) { stopConsumer(); previousLeader = leaderBroker; leaderBroker = null; continue; } } else { break; } } return response != null ? filterAndDecode(response.messageSet(topic, partitionId), offset) : (newOffsetMsg.size() > 0 ? newOffsetMsg : EMPTY_MSGS); }
Example #25
Source File: KafkaLowLevelConsumer09.java From datacollector with Apache License 2.0 | 4 votes |
@Override public List<MessageAndOffset> read(long offset) throws StageException { FetchRequest req = buildFetchRequest(offset); FetchResponse fetchResponse; try { fetchResponse = consumer.fetch(req); } catch (Exception e) { if(e instanceof SocketTimeoutException) { //If the value of consumer.timeout.ms is set to a positive integer, a timeout exception is thrown to the //consumer if no message is available for consumption after the specified timeout value. //If this happens exit gracefully LOG.warn(KafkaErrors.KAFKA_28.getMessage()); return Collections.emptyList(); } else { throw new StageException(KafkaErrors.KAFKA_29, e.toString(), e); } } if(fetchResponse.hasError()) { short code = fetchResponse.errorCode(topic, partition); if(code == ErrorMapping.OffsetOutOfRangeCode()) { //invalid offset offset = getLastOffset(consumer, topic, partition, kafka.api.OffsetRequest.LatestTime(), clientName); } else { //try re-initializing connection with kafka consumer.close(); consumer = null; leader = findNewLeader(leader, topic, partition); } //re-fetch req = buildFetchRequest(offset); fetchResponse = consumer.fetch(req); if(fetchResponse.hasError()) { //could not fetch the second time, give kafka some time LOG.error(KafkaErrors.KAFKA_26.getMessage(), topic, partition, offset); } } List<MessageAndOffset> partitionToPayloadMapArrayList = new ArrayList<>(); for (kafka.message.MessageAndOffset messageAndOffset : fetchResponse.messageSet(topic, partition)) { long currentOffset = messageAndOffset.offset(); if (currentOffset < offset) { LOG.warn(KafkaErrors.KAFKA_27.getMessage(), currentOffset, offset); continue; } ByteBuffer payload = messageAndOffset.message().payload(); final Object key = messageAndOffset.message().key(); byte[] bytes = new byte[payload.limit()]; payload.get(bytes); MessageAndOffset partitionToPayloadMap = new MessageAndOffset( key, bytes, messageAndOffset.nextOffset(), partition ); partitionToPayloadMapArrayList.add(partitionToPayloadMap); } return partitionToPayloadMapArrayList; }
Example #26
Source File: KafkaLowLevelConsumer08.java From datacollector with Apache License 2.0 | 4 votes |
@Override public List<MessageAndOffset> read(long offset) throws StageException { FetchRequest req = buildFetchRequest(offset); FetchResponse fetchResponse; try { fetchResponse = consumer.fetch(req); } catch (Exception e) { if(e instanceof SocketTimeoutException) { //If the value of consumer.timeout.ms is set to a positive integer, a timeout exception is thrown to the //consumer if no message is available for consumption after the specified timeout value. //If this happens exit gracefully LOG.warn(KafkaErrors.KAFKA_28.getMessage()); return Collections.emptyList(); } else { throw new StageException(KafkaErrors.KAFKA_29, e.toString(), e); } } if(fetchResponse.hasError()) { short code = fetchResponse.errorCode(topic, partition); if(code == ErrorMapping.OffsetOutOfRangeCode()) { //invalid offset offset = getLastOffset(consumer, topic, partition, kafka.api.OffsetRequest.LatestTime(), clientName); } else { //try re-initializing connection with kafka consumer.close(); consumer = null; leader = findNewLeader(leader, topic, partition); } //re-fetch req = buildFetchRequest(offset); fetchResponse = consumer.fetch(req); if(fetchResponse.hasError()) { //could not fetch the second time, give kafka some time LOG.error(KafkaErrors.KAFKA_26.getMessage(), topic, partition, offset); } } List<MessageAndOffset> partitionToPayloadMapArrayList = new ArrayList<>(); for (kafka.message.MessageAndOffset messageAndOffset : fetchResponse.messageSet(topic, partition)) { long currentOffset = messageAndOffset.offset(); if (currentOffset < offset) { LOG.warn(KafkaErrors.KAFKA_27.getMessage(), currentOffset, offset); continue; } ByteBuffer payload = messageAndOffset.message().payload(); final Object key = messageAndOffset.message().key(); byte[] bytes = new byte[payload.limit()]; payload.get(bytes); MessageAndOffset partitionToPayloadMap = new MessageAndOffset( key, bytes, messageAndOffset.nextOffset(), partition ); partitionToPayloadMapArrayList.add(partitionToPayloadMap); } return partitionToPayloadMapArrayList; }
Example #27
Source File: KafkaClient.java From elasticsearch-river-kafka with Apache License 2.0 | 4 votes |
ByteBufferMessageSet fetch(String topic, int partition, long offset, int maxSizeBytes) { return consumer.fetch(new FetchRequest(topic, partition, offset, maxSizeBytes)); }
Example #28
Source File: KafkaConsumer.java From jstorm with Apache License 2.0 | 4 votes |
public ByteBufferMessageSet fetchMessages(int partition, long offset) throws IOException { String topic = config.topic; FetchRequest req = new FetchRequestBuilder().clientId(config.clientId).addFetch(topic, partition, offset, config.fetchMaxBytes) .maxWait(config.fetchWaitMaxMs).build(); FetchResponse fetchResponse = null; SimpleConsumer simpleConsumer = null; try { simpleConsumer = findLeaderConsumer(partition); if (simpleConsumer == null) { // LOG.error(message); return null; } fetchResponse = simpleConsumer.fetch(req); } catch (Exception e) { if (e instanceof ConnectException || e instanceof SocketTimeoutException || e instanceof IOException || e instanceof UnresolvedAddressException) { LOG.warn("Network error when fetching messages:", e); if (simpleConsumer != null) { String host = simpleConsumer.host(); int port = simpleConsumer.port(); simpleConsumer = null; throw new KafkaException("Network error when fetching messages: " + host + ":" + port + " , " + e.getMessage(), e); } } else { throw new RuntimeException(e); } } if (fetchResponse.hasError()) { short code = fetchResponse.errorCode(topic, partition); if (code == ErrorMapping.OffsetOutOfRangeCode() && config.resetOffsetIfOutOfRange) { long startOffset = getOffset(topic, partition, config.startOffsetTime); offset = startOffset; } if(leaderBroker != null) { LOG.error("fetch data from kafka topic[" + config.topic + "] host[" + leaderBroker.host() + ":" + leaderBroker.port() + "] partition[" + partition + "] error:" + code); }else { } return null; } else { ByteBufferMessageSet msgs = fetchResponse.messageSet(topic, partition); return msgs; } }