kafka.message.MessageAndOffset Java Examples
The following examples show how to use
kafka.message.MessageAndOffset.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KafkaBoltTest.java From storm-kafka-0.8-plus with Apache License 2.0 | 6 votes |
private boolean verifyMessage(String key, String message) { long lastMessageOffset = KafkaUtils.getOffset(simpleConsumer, kafkaConfig.topic, 0, OffsetRequest.LatestTime()) - 1; ByteBufferMessageSet messageAndOffsets = KafkaUtils.fetchMessages(kafkaConfig, simpleConsumer, new Partition(Broker.fromString(broker.getBrokerConnectionString()), 0), lastMessageOffset); MessageAndOffset messageAndOffset = messageAndOffsets.iterator().next(); Message kafkaMessage = messageAndOffset.message(); ByteBuffer messageKeyBuffer = kafkaMessage.key(); String keyString = null; String messageString = new String(Utils.toByteArray(kafkaMessage.payload())); if (messageKeyBuffer != null) { keyString = new String(Utils.toByteArray(messageKeyBuffer)); } assertEquals(key, keyString); assertEquals(message, messageString); return true; }
Example #2
Source File: Kafka08ConsumerClient.java From incubator-gobblin with Apache License 2.0 | 6 votes |
private Iterator<KafkaConsumerRecord> getIteratorFromFetchResponse(FetchResponse fetchResponse, KafkaPartition partition) { try { ByteBufferMessageSet messageBuffer = fetchResponse.messageSet(partition.getTopicName(), partition.getId()); return Iterators.transform(messageBuffer.iterator(), new Function<kafka.message.MessageAndOffset, KafkaConsumerRecord>() { @Override public KafkaConsumerRecord apply(kafka.message.MessageAndOffset input) { return new Kafka08ConsumerRecord(input, partition.getTopicName(), partition.getId()); } }); } catch (Exception e) { log.warn(String.format("Failed to retrieve next message buffer for partition %s: %s." + "The remainder of this partition will be skipped.", partition, e)); return null; } }
Example #3
Source File: KafkaPartitionLevelConsumer.java From incubator-pinot with Apache License 2.0 | 6 votes |
private Iterable<MessageAndOffset> buildOffsetFilteringIterable(final ByteBufferMessageSet messageAndOffsets, final long startOffset, final long endOffset) { return Iterables.filter(messageAndOffsets, input -> { // Filter messages that are either null or have an offset ∉ [startOffset; endOffset[ if (input == null || input.offset() < startOffset || (endOffset <= input.offset() && endOffset != -1)) { return false; } // Check the message's checksum // TODO We might want to have better handling of this situation, maybe try to fetch the message again? if (!input.message().isValid()) { LOGGER.warn("Discarded message with invalid checksum in partition {} of topic {}", _partition, _topic); return false; } return true; }); }
Example #4
Source File: KafkaWrapper.java From incubator-gobblin with Apache License 2.0 | 6 votes |
@Override protected Iterator<MessageAndOffset> fetchNextMessageBuffer(KafkaPartition partition, long nextOffset, long maxOffset) { if (nextOffset > maxOffset) { return null; } FetchRequest fetchRequest = createFetchRequest(partition, nextOffset); try { FetchResponse fetchResponse = getFetchResponseForFetchRequest(fetchRequest, partition); return getIteratorFromFetchResponse(fetchResponse, partition); } catch (Exception e) { LOG.warn( String.format("Fetch message buffer for partition %s has failed: %s. Will refresh topic metadata and retry", partition, e)); return refreshTopicMetadataAndRetryFetch(partition, fetchRequest); } }
Example #5
Source File: KafkaComponent.java From metron with Apache License 2.0 | 6 votes |
public List<byte[]> readMessages(String topic) { SimpleConsumer consumer = new SimpleConsumer("localhost", 6667, 100000, 64 * 1024, "consumer"); FetchRequest req = new FetchRequestBuilder() .clientId("consumer") .addFetch(topic, 0, 0, 100000) .build(); FetchResponse fetchResponse = consumer.fetch(req); Iterator<MessageAndOffset> results = fetchResponse.messageSet(topic, 0).iterator(); List<byte[]> messages = new ArrayList<>(); while(results.hasNext()) { ByteBuffer payload = results.next().message().payload(); byte[] bytes = new byte[payload.limit()]; payload.get(bytes); messages.add(bytes); } consumer.close(); return messages; }
Example #6
Source File: KafkaSimpleConsumer.java From attic-apex-malhar with Apache License 2.0 | 6 votes |
@Override public void run() { long offset = 0; while (isAlive) { // create a fetch request for topic “topic1”, partition 0, current offset, and fetch size of 1MB FetchRequest fetchRequest = new FetchRequestBuilder().clientId("default_client").addFetch("topic1", 1, offset, 1000000).build(); // FetchRequest fetchRequest = new FetchRequest("topic1", 0, offset, 1000000); // get the message set from the consumer and print them out ByteBufferMessageSet messages = consumer.fetch(fetchRequest).messageSet("topic1", 1); Iterator<MessageAndOffset> itr = messages.iterator(); while (itr.hasNext() && isAlive) { MessageAndOffset msg = itr.next(); // advance the offset after consuming each message offset = msg.offset(); logger.debug("consumed: {} offset: {}", byteBufferToString(msg.message().payload()).toString(), offset); receiveCount++; } } }
Example #7
Source File: TridentKafkaEmitter.java From storm-kafka-0.8-plus with Apache License 2.0 | 6 votes |
/** * re-emit the batch described by the meta data provided * * @param attempt * @param collector * @param partition * @param meta */ private void reEmitPartitionBatch(TransactionAttempt attempt, TridentCollector collector, Partition partition, Map meta) { LOG.info("re-emitting batch, attempt " + attempt); String instanceId = (String) meta.get("instanceId"); if (!_config.forceFromStart || instanceId.equals(_topologyInstanceId)) { SimpleConsumer consumer = _connections.register(partition); long offset = (Long) meta.get("offset"); long nextOffset = (Long) meta.get("nextOffset"); ByteBufferMessageSet msgs = fetchMessages(consumer, partition, offset); for (MessageAndOffset msg : msgs) { if (offset == nextOffset) { break; } if (offset > nextOffset) { throw new RuntimeException("Error when re-emitting batch. overshot the end offset"); } emit(collector, msg.message()); offset = msg.nextOffset(); } } }
Example #8
Source File: PartitionManager.java From storm-kafka-0.8-plus with Apache License 2.0 | 6 votes |
private void fill() { long start = System.nanoTime(); ByteBufferMessageSet msgs = KafkaUtils.fetchMessages(_spoutConfig, _consumer, _partition, _emittedToOffset); long end = System.nanoTime(); long millis = (end - start) / 1000000; _fetchAPILatencyMax.update(millis); _fetchAPILatencyMean.update(millis); _fetchAPICallCount.incr(); int numMessages = countMessages(msgs); _fetchAPIMessageCount.incrBy(numMessages); if (numMessages > 0) { LOG.info("Fetched " + numMessages + " messages from: " + _partition); } for (MessageAndOffset msg : msgs) { _pending.add(_emittedToOffset); _waitingToEmit.add(new MessageAndRealOffset(msg.message(), _emittedToOffset)); _emittedToOffset = msg.nextOffset(); } if (numMessages > 0) { LOG.info("Added " + numMessages + " messages from: " + _partition + " to internal buffers"); } }
Example #9
Source File: DemoLowLevelConsumer.java From KafkaExample with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { final String topic = "topic1"; String clientID = "DemoLowLevelConsumer1"; SimpleConsumer simpleConsumer = new SimpleConsumer("kafka0", 9092, 100000, 64 * 1000000, clientID); FetchRequest req = new FetchRequestBuilder().clientId(clientID) .addFetch(topic, 0, 0L, 50).addFetch(topic, 1, 0L, 5000).addFetch(topic, 2, 0L, 1000000).build(); FetchResponse fetchResponse = simpleConsumer.fetch(req); ByteBufferMessageSet messageSet = (ByteBufferMessageSet) fetchResponse.messageSet(topic, 0); for (MessageAndOffset messageAndOffset : messageSet) { ByteBuffer payload = messageAndOffset.message().payload(); long offset = messageAndOffset.offset(); byte[] bytes = new byte[payload.limit()]; payload.get(bytes); System.out.println("Offset:" + offset + ", Payload:" + new String(bytes, "UTF-8")); } }
Example #10
Source File: SimpleKafkaConsumer.java From twill with Apache License 2.0 | 6 votes |
/** * Creates an Iterator of FetchedMessage based on the given message set. The iterator would also updates * the offset while iterating. */ private Iterator<FetchedMessage> createFetchedMessages(ByteBufferMessageSet messageSet, final AtomicLong offset) { final Iterator<MessageAndOffset> messages = messageSet.iterator(); return new AbstractIterator<FetchedMessage>() { @Override protected FetchedMessage computeNext() { while (messages.hasNext()) { MessageAndOffset message = messages.next(); long msgOffset = message.offset(); if (msgOffset < offset.get()) { LOG.trace("Received old offset {}, expecting {} on {}. Message Ignored.", msgOffset, offset.get(), topicPart); continue; } fetchedMessage.setPayload(message.message().payload()); fetchedMessage.setOffset(message.offset()); fetchedMessage.setNextOffset(message.nextOffset()); return fetchedMessage; } return endOfData(); } }; }
Example #11
Source File: PartitionManager.java From storm-kafka-0.8-plus with Apache License 2.0 | 5 votes |
private int countMessages(ByteBufferMessageSet messageSet) { int counter = 0; for (MessageAndOffset messageAndOffset : messageSet) { counter = counter + 1; } return counter; }
Example #12
Source File: KafkaUtilsTest.java From storm-kafka-0.8-plus with Apache License 2.0 | 5 votes |
@Test public void generateTuplesWithKeyAndKeyValueScheme() { config.scheme = new KeyValueSchemeAsMultiScheme(new StringKeyValueScheme()); String value = "value"; String key = "key"; createTopicAndSendMessage(key, value); ByteBufferMessageSet messageAndOffsets = getLastMessage(); for (MessageAndOffset msg : messageAndOffsets) { Iterable<List<Object>> lists = KafkaUtils.generateTuples(config, msg.message()); assertEquals(ImmutableMap.of(key, value), lists.iterator().next().get(0)); } }
Example #13
Source File: TridentKafkaEmitter.java From storm-kafka-0.8-plus with Apache License 2.0 | 5 votes |
private Map doEmitNewPartitionBatch(SimpleConsumer consumer, Partition partition, TridentCollector collector, Map lastMeta) { long offset; if (lastMeta != null) { String lastInstanceId = null; Map lastTopoMeta = (Map) lastMeta.get("topology"); if (lastTopoMeta != null) { lastInstanceId = (String) lastTopoMeta.get("id"); } if (_config.forceFromStart && !_topologyInstanceId.equals(lastInstanceId)) { offset = KafkaUtils.getOffset(consumer, _config.topic, partition.partition, _config.startOffsetTime); } else { offset = (Long) lastMeta.get("nextOffset"); } } else { offset = KafkaUtils.getOffset(consumer, _config.topic, partition.partition, _config); } ByteBufferMessageSet msgs = fetchMessages(consumer, partition, offset); long endoffset = offset; for (MessageAndOffset msg : msgs) { emit(collector, msg.message()); endoffset = msg.nextOffset(); } Map newMeta = new HashMap(); newMeta.put("offset", offset); newMeta.put("nextOffset", endoffset); newMeta.put("instanceId", _topologyInstanceId); newMeta.put("partition", partition.partition); newMeta.put("broker", ImmutableMap.of("host", partition.host.host, "port", partition.host.port)); newMeta.put("topic", _config.topic); newMeta.put("topology", ImmutableMap.of("name", _topologyName, "id", _topologyInstanceId)); return newMeta; }
Example #14
Source File: PartitionConsumer.java From jstorm with Apache License 2.0 | 5 votes |
private void fillMessages() { ByteBufferMessageSet msgs; try { long start = System.currentTimeMillis(); msgs = consumer.fetchMessages(partition, emittingOffset + 1); if (msgs == null) { LOG.error("fetch null message from offset {}", emittingOffset); return; } int count = 0; for (MessageAndOffset msg : msgs) { count += 1; emittingMessages.add(msg); emittingOffset = msg.offset(); pendingOffsets.add(emittingOffset); LOG.debug("fillmessage fetched a message:{}, offset:{}", msg.message().toString(), msg.offset()); } long end = System.currentTimeMillis(); LOG.info("fetch message from partition:"+partition+", offset:" + emittingOffset+", size:"+msgs.sizeInBytes()+", count:"+count +", time:"+(end-start)); } catch (Exception e) { e.printStackTrace(); LOG.error(e.getMessage(),e); } }
Example #15
Source File: PartitionConsumer.java From jstorm with Apache License 2.0 | 5 votes |
public EmitState emit(SpoutOutputCollector collector) { if (emittingMessages.isEmpty()) { fillMessages(); } int count = 0; while (true) { MessageAndOffset toEmitMsg = emittingMessages.pollFirst(); if (toEmitMsg == null) { return EmitState.EMIT_END; } count ++; Iterable<List<Object>> tups = generateTuples(toEmitMsg.message()); if (tups != null) { for (List<Object> tuple : tups) { LOG.debug("emit message {}", new String(Utils.toByteArray(toEmitMsg.message().payload()))); collector.emit(tuple, new KafkaMessageId(partition, toEmitMsg.offset())); } if(count>=config.batchSendCount) { break; } } else { ack(toEmitMsg.offset()); } } if (emittingMessages.isEmpty()) { return EmitState.EMIT_END; } else { return EmitState.EMIT_MORE; } }
Example #16
Source File: KafkaPartitionReader.java From Scribengin with GNU Affero General Public License v3.0 | 5 votes |
public List<byte[]> execute() throws Exception { FetchRequest req = new FetchRequestBuilder(). clientId(name). addFetch(topic, partitionMetadata.partitionId(), currentOffset, fetchSize). minBytes(1). maxWait(maxWait). build(); FetchResponse fetchResponse = consumer.fetch(req); if(fetchResponse.hasError()) { short errorCode = fetchResponse.errorCode(topic, partitionMetadata.partitionId()); String msg = "Kafka error code = " + errorCode + ", Partition " + partitionMetadata.partitionId() ; throw new Exception(msg); } List<byte[]> holder = new ArrayList<byte[]>(); ByteBufferMessageSet messageSet = fetchResponse.messageSet(topic, partitionMetadata.partitionId()); int count = 0; for(MessageAndOffset messageAndOffset : messageSet) { if (messageAndOffset.offset() < currentOffset) continue; //old offset, ignore ByteBuffer payload = messageAndOffset.message().payload(); byte[] bytes = new byte[payload.limit()]; payload.get(bytes); holder.add(bytes); currentOffset = messageAndOffset.nextOffset(); count++; if(count == maxRead) break; } return holder ; }
Example #17
Source File: KafkaPartitionReader.java From Scribengin with GNU Affero General Public License v3.0 | 5 votes |
byte[] getCurrentMessagePayload() { while(currentMessageSetIterator.hasNext()) { MessageAndOffset messageAndOffset = currentMessageSetIterator.next(); if (messageAndOffset.offset() < currentOffset) continue; //old offset, ignore Message message = messageAndOffset.message(); ByteBuffer payload = message.payload(); byte[] bytes = new byte[payload.limit()]; payload.get(bytes); currentOffset = messageAndOffset.nextOffset(); return bytes; } return null; }
Example #18
Source File: KafkaUtilsTest.java From storm-kafka-0.8-plus with Apache License 2.0 | 5 votes |
@Test public void generateTuplesWithValueSchemeAndKeyValueMessage() { config.scheme = new SchemeAsMultiScheme(new StringScheme()); String value = "value"; String key = "key"; createTopicAndSendMessage(key, value); ByteBufferMessageSet messageAndOffsets = getLastMessage(); for (MessageAndOffset msg : messageAndOffsets) { Iterable<List<Object>> lists = KafkaUtils.generateTuples(config, msg.message()); assertEquals(value, lists.iterator().next().get(0)); } }
Example #19
Source File: KafkaRiver.java From elasticsearch-river-kafka with Apache License 2.0 | 5 votes |
void handleMessages(BulkRequestBuilder bulkRequestBuilder, ByteBufferMessageSet msgs) { long numMsg = 0; for(MessageAndOffset mo : msgs) { ++numMsg; ++stats.numMessages; try { msgHandler.handle(bulkRequestBuilder, mo.message()); } catch (Exception e) { logger.warn("Failed handling message", e); } } logger.debug("handleMessages processed {} messages", numMsg); }
Example #20
Source File: KafkaUtilsTest.java From storm-kafka-0.8-plus with Apache License 2.0 | 5 votes |
private void runGetValueOnlyTuplesTest() { String value = "value"; createTopicAndSendMessage(null, value); ByteBufferMessageSet messageAndOffsets = getLastMessage(); for (MessageAndOffset msg : messageAndOffsets) { Iterable<List<Object>> lists = KafkaUtils.generateTuples(config, msg.message()); assertEquals(value, lists.iterator().next().get(0)); } }
Example #21
Source File: KafkaPartitionLevelConsumer.java From incubator-pinot with Apache License 2.0 | 5 votes |
/** * Fetch messages and the per-partition high watermark from Kafka between the specified offsets. * * @param startOffset The offset of the first message desired, inclusive * @param endOffset The offset of the last message desired, exclusive, or {@link Long#MAX_VALUE} for no end offset. * @param timeoutMillis Timeout in milliseconds * @throws java.util.concurrent.TimeoutException If the operation could not be completed within {@code timeoutMillis} * milliseconds * @return An iterable containing messages fetched from Kafka and their offsets, as well as the high watermark for * this partition. */ public synchronized MessageBatch fetchMessages(long startOffset, long endOffset, int timeoutMillis) throws java.util.concurrent.TimeoutException { // TODO Improve error handling final long connectEndTime = System.currentTimeMillis() + _connectTimeoutMillis; while (_currentState.getStateValue() != KafkaConnectionHandler.ConsumerState.CONNECTED_TO_PARTITION_LEADER && System.currentTimeMillis() < connectEndTime) { _currentState.process(); } if (_currentState.getStateValue() != KafkaConnectionHandler.ConsumerState.CONNECTED_TO_PARTITION_LEADER && connectEndTime <= System.currentTimeMillis()) { throw new java.util.concurrent.TimeoutException(); } FetchResponse fetchResponse = _simpleConsumer.fetch( new FetchRequestBuilder().minBytes(_fetchRequestMinBytes).maxWait(timeoutMillis) .addFetch(_topic, _partition, startOffset, _fetchRequestSizeBytes).build()); if (!fetchResponse.hasError()) { final Iterable<MessageAndOffset> messageAndOffsetIterable = buildOffsetFilteringIterable(fetchResponse.messageSet(_topic, _partition), startOffset, endOffset); // TODO: Instantiate with factory return new SimpleConsumerMessageBatch(messageAndOffsetIterable); } else { throw exceptionForKafkaErrorCode(fetchResponse.errorCode(_topic, _partition)); } }
Example #22
Source File: KafkaLeaderReader.java From arcusplatform with Apache License 2.0 | 5 votes |
public void update(ByteBufferMessageSet bbms) { boolean anyMatches = false; V value = null; for(MessageAndOffset mao: bbms) { anyMatches = true; value = factory.apply(mao.message()); logger.trace("Scanning for [{}] in [{}] at [{}]@[{},{},{}]: ", target, tap.partition(), value, startOffset, mao.offset(), endOffset); int delta = target.compareTo(value); if(delta == 0) { logger.debug("Found exact offset for partition: [{}] value: [{}]", tap.partition(), value); this.offset = mao.offset(); return; } else if(delta > 0) { // not far enough this.startOffset = mao.offset(); } else if(delta < 0) { // too far this.endOffset = mao.offset(); break; // don't process the next message or we'll think we're past the end } } if((endOffset - startOffset) < 2) { logger.debug("Found offset for partition: [{}] value: [{}]", tap.partition(), value); this.offset = this.endOffset; // start with the next message after value } else if(!anyMatches) { logger.debug("Reached the end of partition [{}] using offset [{}]", tap.partition(), endOffset); this.offset = this.endOffset; } }
Example #23
Source File: KafkaDeserializerExtractorTest.java From incubator-gobblin with Apache License 2.0 | 5 votes |
private ByteArrayBasedKafkaRecord getMockMessageAndOffset(ByteBuffer payload) { MessageAndOffset mockMessageAndOffset = mock(MessageAndOffset.class); Message mockMessage = mock(Message.class); when(mockMessage.payload()).thenReturn(payload); when(mockMessageAndOffset.message()).thenReturn(mockMessage); return new Kafka08ConsumerRecord(mockMessageAndOffset, "test", 0); }
Example #24
Source File: KafkaLeaderReader.java From arcusplatform with Apache License 2.0 | 5 votes |
private int dispatch(FetchResponse response) { int numDispatched = 0; for(TopicAndPartition tap: new ArrayList<>(offsets.keySet())) { short errorCode = response.errorCode(tap.topic(), tap.partition()); if(errorCode != 0) { logger.warn("Error reading from topic: [{}] partition: [{}]", tap.topic(), tap.partition(), ErrorMapping.exceptionFor(errorCode)); continue; } ByteBufferMessageSet message = response.messageSet(tap.topic(), tap.partition()); for(MessageAndOffset mao: message) { Long offset = offsets.get(tap); if(offset != null && offset > mao.offset()) { // skip older offsets continue; } KafkaConsumer handler = handlers.computeIfAbsent(tap, handlerFactory); if(handler == null) { logger.debug("No handler for topic: [{}] partition: [{}], this partition won't be processed", tap.topic(), tap.partition()); offsets.remove(tap); handlers.remove(tap); break; } if(handler.apply(tap, mao.message())) { numDispatched++; offsets.put(tap, mao.nextOffset()); } else { logger.debug("Done processing topic: [{}] partition: [{}]", tap.topic(), tap.partition()); offsets.remove(tap); handlers.remove(tap); break; } } } return numDispatched; }
Example #25
Source File: SimpleConsumerDemo.java From javabase with Apache License 2.0 | 5 votes |
private static void printMessages(ByteBufferMessageSet messageSet) throws UnsupportedEncodingException { for (MessageAndOffset messageAndOffset : messageSet) { ByteBuffer payload = messageAndOffset.message().payload(); byte[] bytes = new byte[payload.limit()]; payload.get(bytes); System.out.println(new String(bytes, "UTF-8")); } }
Example #26
Source File: KafkaWrapper.java From incubator-gobblin with Apache License 2.0 | 5 votes |
private Iterator<MessageAndOffset> refreshTopicMetadataAndRetryFetch(KafkaPartition partition, FetchRequest fetchRequest) { try { refreshTopicMetadata(partition); FetchResponse fetchResponse = getFetchResponseForFetchRequest(fetchRequest, partition); return getIteratorFromFetchResponse(fetchResponse, partition); } catch (Exception e) { LOG.warn(String.format("Fetch message buffer for partition %s has failed: %s. This partition will be skipped.", partition, e)); return null; } }
Example #27
Source File: AbstractExactlyOnceKafkaOutputOperator.java From attic-apex-malhar with Apache License 2.0 | 5 votes |
private void initializeLastProcessingOffset() { // read last received kafka message TopicMetadata tm = KafkaMetadataUtil.getTopicMetadata(Sets.newHashSet((String)getConfigProperties().get(KafkaMetadataUtil.PRODUCER_PROP_BROKERLIST)), this.getTopic()); if (tm == null) { throw new RuntimeException("Failed to retrieve topic metadata"); } partitionNum = tm.partitionsMetadata().size(); lastMsgs = new HashMap<Integer, Pair<byte[],byte[]>>(partitionNum); for (PartitionMetadata pm : tm.partitionsMetadata()) { String leadBroker = pm.leader().host(); int port = pm.leader().port(); String clientName = this.getClass().getName().replace('$', '.') + "_Client_" + tm.topic() + "_" + pm.partitionId(); SimpleConsumer consumer = new SimpleConsumer(leadBroker, port, 100000, 64 * 1024, clientName); long readOffset = KafkaMetadataUtil.getLastOffset(consumer, tm.topic(), pm.partitionId(), kafka.api.OffsetRequest.LatestTime(), clientName); FetchRequest req = new FetchRequestBuilder().clientId(clientName).addFetch(tm.topic(), pm.partitionId(), readOffset - 1, 100000).build(); FetchResponse fetchResponse = consumer.fetch(req); for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(tm.topic(), pm.partitionId())) { Message m = messageAndOffset.message(); ByteBuffer payload = m.payload(); ByteBuffer key = m.key(); byte[] valueBytes = new byte[payload.limit()]; byte[] keyBytes = new byte[key.limit()]; payload.get(valueBytes); key.get(keyBytes); lastMsgs.put(pm.partitionId(), new Pair<byte[], byte[]>(keyBytes, valueBytes)); } } }
Example #28
Source File: KafkaWrapper.java From incubator-gobblin with Apache License 2.0 | 5 votes |
private Iterator<MessageAndOffset> getIteratorFromFetchResponse(FetchResponse fetchResponse, KafkaPartition partition) { try { ByteBufferMessageSet messageBuffer = fetchResponse.messageSet(partition.getTopicName(), partition.getId()); return messageBuffer.iterator(); } catch (Exception e) { LOG.warn(String.format("Failed to retrieve next message buffer for partition %s: %s." + "The remainder of this partition will be skipped.", partition, e)); return null; } }
Example #29
Source File: KafkaReader.java From HiveKa with Apache License 2.0 | 5 votes |
/** * Fetches the next Kafka message and stuffs the results into the key and * value * * @param key * @param payload * @param pKey * @return true if there exists more events * @throws IOException */ public boolean getNext(KafkaKey key, BytesWritable payload ,BytesWritable pKey) throws IOException { if (hasNext()) { MessageAndOffset msgAndOffset = messageIter.next(); Message message = msgAndOffset.message(); ByteBuffer buf = message.payload(); int origSize = buf.remaining(); byte[] bytes = new byte[origSize]; buf.get(bytes, buf.position(), origSize); payload.set(bytes, 0, origSize); buf = message.key(); if(buf != null){ origSize = buf.remaining(); bytes = new byte[origSize]; buf.get(bytes, buf.position(), origSize); pKey.set(bytes, 0, origSize); } key.clear(); key.set(kafkaRequest.getTopic(), kafkaRequest.getLeaderId(), kafkaRequest.getPartition(), currentOffset, msgAndOffset.offset() + 1, message.checksum()); key.setMessageSize(msgAndOffset.message().size()); currentOffset = msgAndOffset.offset() + 1; // increase offset currentCount++; // increase count return true; } else { return false; } }
Example #30
Source File: KafkaSimpleConsumer.java From Pistachio with Apache License 2.0 | 5 votes |
private Iterable<BytesMessageWithOffset> filterAndDecode(Iterable<MessageAndOffset> kafkaMessages, long offset) { List<BytesMessageWithOffset> ret = new LinkedList<>(); for (MessageAndOffset msgAndOffset: kafkaMessages) { if (msgAndOffset.offset() >= offset) { byte[] payload = decoder.fromMessage(msgAndOffset.message()); // add nextOffset here, thus next fetch will use nextOffset instead of current offset ret.add(new BytesMessageWithOffset(payload, msgAndOffset.nextOffset())); } } return ret; }