Java Code Examples for org.apache.pulsar.client.api.Message#getMessageId()
The following examples show how to use
org.apache.pulsar.client.api.Message#getMessageId() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: PulsarReaderBuilder.java From hazelcast-jet-contrib with Apache License 2.0 | 6 votes |
/** * Receive the messages as a batch. * In this method, emitted items are created by applying the projection function * to the messages received from Pulsar client. If there is an event time * associated with the message, it sets the event time as the timestamp of the * emitted item. Otherwise, it sets the publish time(which always exists) * of the message as the timestamp. */ private void fillBuffer(SourceBuilder.TimestampedSourceBuffer<T> sourceBuffer) throws PulsarClientException { if (reader == null) { createReader(); } int count = 0; while (!queue.isEmpty() && count++ < MAX_FILL_MESSAGES) { Message<M> message = queue.poll(); long timestamp; if (message.getEventTime() != 0) { timestamp = message.getEventTime(); } else { timestamp = message.getPublishTime(); } T item = projectionFn.apply(message); offset = message.getMessageId(); if (item != null) { sourceBuffer.add(item, timestamp); } } }
Example 2
Source File: MultiTopicsConsumerImpl.java From pulsar with Apache License 2.0 | 6 votes |
private void removeExpiredMessagesFromQueue(Set<MessageId> messageIds) { Message<T> peek = incomingMessages.peek(); if (peek != null) { if (!messageIds.contains(peek.getMessageId())) { // first message is not expired, then no message is expired in queue. return; } // try not to remove elements that are added while we remove Message<T> message = incomingMessages.poll(); checkState(message instanceof TopicMessageImpl); while (message != null) { INCOMING_MESSAGES_SIZE_UPDATER.addAndGet(this, -message.getData().length); MessageId messageId = message.getMessageId(); if (!messageIds.contains(messageId)) { messageIds.add(messageId); break; } message = incomingMessages.poll(); } } }
Example 3
Source File: ConsumerImpl.java From pulsar with Apache License 2.0 | 6 votes |
/** * Record the event that one message has been processed by the application. * * Periodically, it sends a Flow command to notify the broker that it can push more messages */ protected synchronized void messageProcessed(Message<?> msg) { ClientCnx currentCnx = cnx(); ClientCnx msgCnx = ((MessageImpl<?>) msg).getCnx(); lastDequeuedMessageId = msg.getMessageId(); if (msgCnx != currentCnx) { // The processed message did belong to the old queue that was cleared after reconnection. return; } increaseAvailablePermits(currentCnx); stats.updateNumMsgsReceived(msg); trackMessage(msg); INCOMING_MESSAGES_SIZE_UPDATER.addAndGet(this, msg.getData() == null ? 0 : -msg.getData().length); }
Example 4
Source File: FunctionAssignmentTailer.java From pulsar with Apache License 2.0 | 5 votes |
private Thread getTailerThread() { Thread t = new Thread(() -> { while (isRunning) { try { Message<byte[]> msg = reader.readNext(5, TimeUnit.SECONDS); if (msg == null) { if (exitOnEndOfTopic && !reader.hasMessageAvailable()) { break; } } else { functionRuntimeManager.processAssignmentMessage(msg); // keep track of last message id lastMessageId = msg.getMessageId(); } } catch (Throwable th) { if (isRunning) { log.error("Encountered error in assignment tailer", th); // trigger fatal error isRunning = false; errorNotifier.triggerError(th); } else { if (!(th instanceof InterruptedException || th.getCause() instanceof InterruptedException)) { log.warn("Encountered error when assignment tailer is not running", th); } } } } log.info("tailer thread exiting"); exitFuture.complete(null); }); t.setName("assignment-tailer-thread"); return t; }
Example 5
Source File: FunctionRuntimeManager.java From pulsar with Apache License 2.0 | 5 votes |
/** * Initializes the FunctionRuntimeManager. Does the following: * 1. Consume all existing assignments to establish existing/latest set of assignments * 2. After current assignments are read, assignments belonging to this worker will be processed * * @return the message id of the message processed during init phase */ public MessageId initialize() { try { Reader<byte[]> reader = WorkerUtils.createReader( workerService.getClient().newReader(), workerConfig.getWorkerId() + "-function-assignment-initialize", workerConfig.getFunctionAssignmentTopic(), MessageId.earliest); // start init phase this.isInitializePhase = true; // keep track of the last message read MessageId lastMessageRead = MessageId.earliest; // read all existing messages while (reader.hasMessageAvailable()) { Message<byte[]> message = reader.readNext(); lastMessageRead = message.getMessageId(); processAssignmentMessage(message); } // init phase is done this.isInitializePhase = false; // close reader reader.close(); // realize existing assignments Map<String, Assignment> assignmentMap = workerIdToAssignments.get(this.workerConfig.getWorkerId()); if (assignmentMap != null) { for (Assignment assignment : assignmentMap.values()) { if (needsStart(assignment)) { startFunctionInstance(assignment); } } } // complete future to indicate initialization is complete isInitialized.complete(null); return lastMessageRead; } catch (Exception e) { log.error("Failed to initialize function runtime manager: {}", e.getMessage(), e); throw new RuntimeException(e); } }
Example 6
Source File: PulsarSpout.java From pulsar with Apache License 2.0 | 5 votes |
@Override public void fail(Object msgId) { if (msgId instanceof Message) { @SuppressWarnings("unchecked") Message<byte[]> msg = (Message<byte[]>) msgId; MessageId id = msg.getMessageId(); LOG.warn("[{}] Error processing message {}", spoutId, id); // Since the message processing failed, we put it in the failed messages queue if there are more retries // remaining for the message MessageRetries messageRetries = pendingMessageRetries.computeIfAbsent(id, (k) -> new MessageRetries()); if ((failedRetriesTimeoutNano < 0 || (messageRetries.getTimeStamp() + failedRetriesTimeoutNano) > System.nanoTime()) && (maxFailedRetries < 0 || messageRetries.numRetries < maxFailedRetries)) { // since we can retry again, we increment retry count and put it in the queue LOG.info("[{}] Putting message {} in the retry queue", spoutId, id); messageRetries.incrementAndGet(); pendingMessageRetries.putIfAbsent(id, messageRetries); failedMessages.add(msg); --pendingAcks; messagesFailed++; } else { LOG.warn("[{}] Number of retries limit reached, dropping the message {}", spoutId, id); ack(msg); } } }
Example 7
Source File: TopicMessageImpl.java From pulsar with Apache License 2.0 | 5 votes |
TopicMessageImpl(String topicPartitionName, String topicName, Message<T> msg) { this.topicPartitionName = topicPartitionName; this.msg = msg; this.messageId = new TopicMessageIdImpl(topicPartitionName, topicName, msg.getMessageId()); }
Example 8
Source File: ConsumerImpl.java From pulsar with Apache License 2.0 | 5 votes |
private MessageIdImpl getMessageIdImpl(Message<?> msg) { MessageIdImpl messageId = (MessageIdImpl) msg.getMessageId(); if (messageId instanceof BatchMessageIdImpl) { // messageIds contain MessageIdImpl, not BatchMessageIdImpl messageId = new MessageIdImpl(messageId.getLedgerId(), messageId.getEntryId(), getPartitionIndex()); } return messageId; }
Example 9
Source File: ReaderThread.java From pulsar-flink with Apache License 2.0 | 4 votes |
protected void skipFirstMessageIfNeeded() throws org.apache.pulsar.client.api.PulsarClientException { Message<?> currentMessage = null; MessageId currentId; boolean failOnDataLoss = this.failOnDataLoss; if (!startMessageId.equals(MessageId.earliest) && !startMessageId.equals(MessageId.latest) && ((MessageIdImpl) startMessageId).getEntryId() != -1) { MessageIdImpl lastMessageId = (MessageIdImpl) this.owner.getMetadataReader().getLastMessageId(reader.getTopic()); if (!messageIdRoughEquals(startMessageId, lastMessageId) && !reader.hasMessageAvailable()) { MessageIdImpl startMsgIdImpl = (MessageIdImpl) startMessageId; long startMsgLedgerId = startMsgIdImpl.getLedgerId(); long startMsgEntryId = startMsgIdImpl.getEntryId(); // startMessageId is bigger than lastMessageId if (startMsgLedgerId > lastMessageId.getLedgerId() || (startMsgLedgerId == lastMessageId.getLedgerId() && startMsgEntryId > lastMessageId.getEntryId())) { log.error("the start message id is beyond the last commit message id, with topic:{}", reader.getTopic()); throw new RuntimeException("start message id beyond the last commit"); } else if (!failOnDataLoss) { log.info("reset message to valid offset {}", startMessageId); this.owner.getMetadataReader().resetCursor(reader.getTopic(), startMessageId); } } while (currentMessage == null && running) { currentMessage = reader.readNext(pollTimeoutMs, TimeUnit.MILLISECONDS); if (failOnDataLoss) { break; } } if (currentMessage == null) { reportDataLoss(String.format("Cannot read data at offset %s from topic: %s", startMessageId.toString(), topic)); } else { currentId = currentMessage.getMessageId(); if (!messageIdRoughEquals(currentId, startMessageId) && failOnDataLoss) { reportDataLoss( String.format( "Potential Data Loss in reading %s: intended to start at %s, actually we get %s", topic, startMessageId.toString(), currentId.toString())); } if (startMessageId instanceof BatchMessageIdImpl && currentId instanceof BatchMessageIdImpl) { // we seek using a batch message id, we can read next directly later } else if (startMessageId instanceof MessageIdImpl && currentId instanceof BatchMessageIdImpl) { // we seek using a message id, this is supposed to be read by previous task since it's // inclusive for the checkpoint, so we skip this batch BatchMessageIdImpl cbmid = (BatchMessageIdImpl) currentId; MessageIdImpl newStart = new MessageIdImpl(cbmid.getLedgerId(), cbmid.getEntryId() + 1, cbmid.getPartitionIndex()); reader.seek(newStart); } else if (startMessageId instanceof MessageIdImpl && currentId instanceof MessageIdImpl) { // current entry is a non-batch entry, we can read next directly later } } } }
Example 10
Source File: ReaderThread.java From pulsar-flink with Apache License 2.0 | 4 votes |
protected void emitRecord(Message<?> message) throws IOException { MessageId messageId = message.getMessageId(); T record = deserializer.deserialize(message.getData()); owner.emitRecord(record, state, messageId); }
Example 11
Source File: RowReaderThread.java From pulsar-flink with Apache License 2.0 | 4 votes |
@Override protected void emitRecord(Message<?> message) throws IOException { MessageId messageId = message.getMessageId(); Row record = deserializer.deserialize(message); owner.emitRecord(record, state, messageId); }
Example 12
Source File: ConsumerIterator.java From pulsar with Apache License 2.0 | 4 votes |
@SuppressWarnings("unchecked") @Override public PulsarMessageAndMetadata<K, V> next() { Message<byte[]> msg = receivedMessages.poll(); if (msg == null) { try { msg = consumer.receive(); } catch (PulsarClientException e) { log.warn("Failed to receive message for {}-{}, {}", consumer.getTopic(), consumer.getSubscription(), e.getMessage(), e); throw new RuntimeException( "failed to receive message from " + consumer.getTopic() + "-" + consumer.getSubscription()); } } int partition = TopicName.getPartitionIndex(consumer.getTopic()); long offset = MessageIdUtils.getOffset(msg.getMessageId()); String key = msg.getKey(); byte[] value = msg.getValue(); K desKey = null; V desValue = null; if (StringUtils.isNotBlank(key)) { if (keyDeSerializer.isPresent() && keyDeSerializer.get() instanceof StringDecoder) { desKey = (K) key; } else { byte[] decodedBytes = Base64.getDecoder().decode(key); desKey = keyDeSerializer.isPresent() ? keyDeSerializer.get().fromBytes(decodedBytes) : (K) DEFAULT_DECODER.fromBytes(decodedBytes); } } if (value != null) { desValue = valueDeSerializer.isPresent() ? valueDeSerializer.get().fromBytes(msg.getData()) : (V) DEFAULT_DECODER.fromBytes(msg.getData()); } PulsarMessageAndMetadata<K, V> msgAndMetadata = new PulsarMessageAndMetadata<>(consumer.getTopic(), partition, null, offset, keyDeSerializer.orElse(null), valueDeSerializer.orElse(null), desKey, desValue); if (isAutoCommit) { // Commit the offset of previously dequeued messages consumer.acknowledgeCumulativeAsync(msg); } lastConsumedMessageId = msg.getMessageId(); return msgAndMetadata; }
Example 13
Source File: PulsarKafkaConsumer.java From pulsar with Apache License 2.0 | 4 votes |
@SuppressWarnings("unchecked") @Override public ConsumerRecords<K, V> poll(long timeoutMillis) { try { QueueItem item = receivedMessages.poll(timeoutMillis, TimeUnit.MILLISECONDS); if (item == null) { return (ConsumerRecords<K, V>) ConsumerRecords.EMPTY; } Map<TopicPartition, List<ConsumerRecord<K, V>>> records = new HashMap<>(); int numberOfRecords = 0; while (item != null) { TopicName topicName = TopicName.get(item.consumer.getTopic()); String topic = topicName.getPartitionedTopicName(); int partition = topicName.isPartitioned() ? topicName.getPartitionIndex() : 0; Message<byte[]> msg = item.message; MessageIdImpl msgId = (MessageIdImpl) msg.getMessageId(); long offset = MessageIdUtils.getOffset(msgId); TopicPartition tp = new TopicPartition(topic, partition); if (lastReceivedOffset.get(tp) == null && !unpolledPartitions.contains(tp)) { log.info("When polling offsets, invalid offsets were detected. Resetting topic partition {}", tp); resetOffsets(tp); } K key = getKey(topic, msg); if (valueSchema instanceof PulsarKafkaSchema) { ((PulsarKafkaSchema<V>) valueSchema).setTopic(topic); } V value = valueSchema.decode(msg.getData()); TimestampType timestampType = TimestampType.LOG_APPEND_TIME; long timestamp = msg.getPublishTime(); if (msg.getEventTime() > 0) { // If we have Event time, use that in preference timestamp = msg.getEventTime(); timestampType = TimestampType.CREATE_TIME; } ConsumerRecord<K, V> consumerRecord = new ConsumerRecord<>(topic, partition, offset, timestamp, timestampType, -1, msg.hasKey() ? msg.getKey().length() : 0, msg.getData().length, key, value); records.computeIfAbsent(tp, k -> new ArrayList<>()).add(consumerRecord); // Update last offset seen by application lastReceivedOffset.put(tp, offset); unpolledPartitions.remove(tp); if (++numberOfRecords >= maxRecordsInSinglePoll) { break; } // Check if we have an item already available item = receivedMessages.poll(0, TimeUnit.MILLISECONDS); } if (isAutoCommit && !records.isEmpty()) { // Commit the offset of previously dequeued messages commitAsync(); } // If no interceptor is provided, interceptors list will an empty list, original ConsumerRecords will be return. return applyConsumerInterceptorsOnConsume(interceptors, new ConsumerRecords<>(records)); } catch (InterruptedException e) { throw new RuntimeException(e); } }
Example 14
Source File: PulsarKafkaConsumer.java From pulsar with Apache License 2.0 | 4 votes |
@Override public ConsumerRecords<K, V> poll(long timeoutMillis) { try { QueueItem item = receivedMessages.poll(timeoutMillis, TimeUnit.MILLISECONDS); if (item == null) { return (ConsumerRecords<K, V>) ConsumerRecords.EMPTY; } Map<TopicPartition, List<ConsumerRecord<K, V>>> records = new HashMap<>(); int numberOfRecords = 0; while (item != null) { TopicName topicName = TopicName.get(item.consumer.getTopic()); String topic = topicName.getPartitionedTopicName(); int partition = topicName.isPartitioned() ? topicName.getPartitionIndex() : 0; Message<byte[]> msg = item.message; MessageIdImpl msgId = (MessageIdImpl) msg.getMessageId(); long offset = MessageIdUtils.getOffset(msgId); TopicPartition tp = new TopicPartition(topic, partition); if (lastReceivedOffset.get(tp) == null && !unpolledPartitions.contains(tp)) { log.info("When polling offsets, invalid offsets were detected. Resetting topic partition {}", tp); resetOffsets(tp); } K key = getKey(topic, msg); if (valueSchema instanceof PulsarKafkaSchema) { ((PulsarKafkaSchema<V>) valueSchema).setTopic(topic); } V value = valueSchema.decode(msg.getData()); ConsumerRecord<K, V> consumerRecord = new ConsumerRecord<>(topic, partition, offset, key, value); records.computeIfAbsent(tp, k -> new ArrayList<>()).add(consumerRecord); // Update last offset seen by application lastReceivedOffset.put(tp, offset); unpolledPartitions.remove(tp); if (++numberOfRecords >= maxRecordsInSinglePoll) { break; } // Check if we have an item already available item = receivedMessages.poll(0, TimeUnit.MILLISECONDS); } if (isAutoCommit && !records.isEmpty()) { // Commit the offset of previously dequeued messages commitAsync(); } return new ConsumerRecords<>(records); } catch (InterruptedException e) { throw new RuntimeException(e); } }
Example 15
Source File: PersistentTopicE2ETest.java From pulsar with Apache License 2.0 | 4 votes |
/** * Verify: 1. Broker should not replay already acknowledged messages 2. Dispatcher should not stuck while * dispatching new messages due to previous-replay of invalid/already-acked messages * * @throws Exception */ @Test public void testMessageReplay() throws Exception { final String topicName = "persistent://prop/ns-abc/topic2"; final String subName = "sub2"; Message<byte[]> msg; int totalMessages = 10; int replayIndex = totalMessages / 2; Consumer<byte[]> consumer = pulsarClient.newConsumer().topic(topicName).subscriptionName(subName) .subscriptionType(SubscriptionType.Shared).receiverQueueSize(1).subscribe(); Producer<byte[]> producer = pulsarClient.newProducer() .topic(topicName) .enableBatching(false) .messageRoutingMode(MessageRoutingMode.SinglePartition) .create(); PersistentTopic topicRef = (PersistentTopic) pulsar.getBrokerService().getTopicReference(topicName).get(); assertNotNull(topicRef); PersistentSubscription subRef = topicRef.getSubscription(subName); PersistentDispatcherMultipleConsumers dispatcher = (PersistentDispatcherMultipleConsumers) subRef .getDispatcher(); Field replayMap = PersistentDispatcherMultipleConsumers.class.getDeclaredField("messagesToRedeliver"); replayMap.setAccessible(true); ConcurrentLongPairSet messagesToReplay = new ConcurrentLongPairSet(64, 1); assertNotNull(subRef); // (1) Produce messages for (int i = 0; i < totalMessages; i++) { String message = "my-message-" + i; producer.send(message.getBytes()); } MessageIdImpl firstAckedMsg = null; // (2) Consume and ack messages except first message for (int i = 0; i < totalMessages; i++) { msg = consumer.receive(); consumer.acknowledge(msg); MessageIdImpl msgId = (MessageIdImpl) msg.getMessageId(); if (i == 0) { firstAckedMsg = msgId; } if (i < replayIndex) { // (3) accumulate acked messages for replay messagesToReplay.add(msgId.getLedgerId(), msgId.getEntryId()); } } // (4) redelivery : should redeliver only unacked messages Thread.sleep(1000); replayMap.set(dispatcher, messagesToReplay); // (a) redelivery with all acked-message should clear messageReply bucket dispatcher.redeliverUnacknowledgedMessages(dispatcher.getConsumers().get(0)); assertEquals(messagesToReplay.size(), 0); // (b) fill messageReplyBucket with already acked entry again: and try to publish new msg and read it messagesToReplay.add(firstAckedMsg.getLedgerId(), firstAckedMsg.getEntryId()); replayMap.set(dispatcher, messagesToReplay); // send new message final String testMsg = "testMsg"; producer.send(testMsg.getBytes()); // consumer should be able to receive only new message and not the dispatcher.consumerFlow(dispatcher.getConsumers().get(0), 1); msg = consumer.receive(1, TimeUnit.SECONDS); assertNotNull(msg); assertEquals(msg.getData(), testMsg.getBytes()); consumer.close(); producer.close(); }
Example 16
Source File: ZeroQueueConsumerImpl.java From pulsar with Apache License 2.0 | 4 votes |
private Message<T> fetchSingleMessageFromBroker() throws PulsarClientException { // Just being cautious if (incomingMessages.size() > 0) { log.error("The incoming message queue should never be greater than 0 when Queue size is 0"); incomingMessages.clear(); } Message<T> message; try { // if cnx is null or if the connection breaks the connectionOpened function will send the flow again waitingOnReceiveForZeroQueueSize = true; synchronized (this) { if (isConnected()) { sendFlowPermitsToBroker(cnx(), 1); } } do { message = incomingMessages.take(); lastDequeuedMessageId = message.getMessageId(); ClientCnx msgCnx = ((MessageImpl<?>) message).getCnx(); // synchronized need to prevent race between connectionOpened and the check "msgCnx == cnx()" synchronized (this) { // if message received due to an old flow - discard it and wait for the message from the // latest flow command if (msgCnx == cnx()) { waitingOnReceiveForZeroQueueSize = false; break; } } } while (true); stats.updateNumMsgsReceived(message); return message; } catch (InterruptedException e) { stats.incrementNumReceiveFailed(); throw PulsarClientException.unwrap(e); } finally { // Finally blocked is invoked in case the block on incomingMessages is interrupted waitingOnReceiveForZeroQueueSize = false; // Clearing the queue in case there was a race with messageReceived incomingMessages.clear(); } }