Java Code Examples for org.apache.samza.system.IncomingMessageEnvelope#getKey()
The following examples show how to use
org.apache.samza.system.IncomingMessageEnvelope#getKey() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CoordinatorStreamStore.java From samza with Apache License 2.0 | 6 votes |
private void readMessagesFromCoordinatorStream() { synchronized (bootstrapLock) { while (iterator.hasNext()) { IncomingMessageEnvelope envelope = iterator.next(); byte[] keyAsBytes = (byte[]) envelope.getKey(); Serde<List<?>> serde = new JsonSerde<>(); Object[] keyArray = serde.fromBytes(keyAsBytes).toArray(); CoordinatorStreamMessage coordinatorStreamMessage = new CoordinatorStreamMessage(keyArray, new HashMap<>()); String namespacedKey = serializeCoordinatorMessageKeyToJson(coordinatorStreamMessage.getType(), coordinatorStreamMessage.getKey()); if (envelope.getMessage() != null) { messagesReadFromCoordinatorStream.put(namespacedKey, (byte[]) envelope.getMessage()); } else { messagesReadFromCoordinatorStream.remove(namespacedKey); } } } }
Example 2
Source File: KeyedScottyWindowOperator.java From scotty-window-processor with Apache License 2.0 | 5 votes |
@Override public void process(IncomingMessageEnvelope envelope, MessageCollector collector, TaskCoordinator coordinator) throws Exception { Key currentKey = (Key) envelope.getKey(); if (!slicingWindowOperatorMap.containsKey(currentKey)) { slicingWindowOperatorMap.put(currentKey, initWindowOperator()); } SlicingWindowOperator<Value> slicingWindowOperator = slicingWindowOperatorMap.get(currentKey); slicingWindowOperator.processElement((Value) envelope.getMessage(), envelope.getEventTime()); processWatermark(envelope.getEventTime(), collector); }
Example 3
Source File: MultiFileHdfsReader.java From samza with Apache License 2.0 | 5 votes |
public IncomingMessageEnvelope readNext() { if (!hasNext()) { LOG.warn("Attempting to read more data when there aren't any. ssp=" + systemStreamPartition); return null; } // record the next offset before we read, so when the read fails and we reconnect, // we seek to the same offset that we try below curSingleFileOffset = curReader.nextOffset(); IncomingMessageEnvelope messageEnvelope = curReader.readNext(); // Copy everything except for the offset. Turn the single-file style offset into a multi-file one return new IncomingMessageEnvelope(messageEnvelope.getSystemStreamPartition(), getCurOffset(), messageEnvelope.getKey(), messageEnvelope.getMessage(), messageEnvelope.getSize(), messageEnvelope.getEventTime(), messageEnvelope.getArrivalTime()); }
Example 4
Source File: Joiner.java From samza with Apache License 2.0 | 5 votes |
@Override public void process(IncomingMessageEnvelope envelope, MessageCollector collector, TaskCoordinator coordinator) { String key = (String) envelope.getKey(); String value = (String) envelope.getMessage(); String[] pieces = value.split("-"); int epoch = Integer.parseInt(pieces[0]); int partition = Integer.parseInt(pieces[1].split(" ")[1]); Partitions partitions = loadPartitions(epoch, key); logger.info("Joiner got epoch = " + epoch + ", partition = " + partition + ", parts = " + partitions); if (partitions.epoch < epoch) { // we are in a new era if (partitions.partitions.size() != expected) throw new IllegalArgumentException("Should have " + expected + " partitions when new epoch starts."); logger.info("Reseting epoch to " + epoch); this.store.delete(key); partitions.epoch = epoch; partitions.partitions.clear(); partitions.partitions.add(partition); } else if (partitions.epoch > epoch) { logger.info("Ignoring message for epoch " + epoch); } else { partitions.partitions.add(partition); if (partitions.partitions.size() == expected) { logger.info("Completed: " + key + " -> " + Integer.toString(epoch)); collector.send(new OutgoingMessageEnvelope(new SystemStream("kafka", "completed-keys"), key, Integer.toString(epoch))); } } this.store.put(key, partitions.toString()); logger.info("Join store in Task " + this.taskName + " " + key + " -> " + partitions.toString()); }
Example 5
Source File: Checker.java From samza with Apache License 2.0 | 5 votes |
@Override public void process(IncomingMessageEnvelope envelope, MessageCollector collector, TaskCoordinator coordinator) { String key = (String) envelope.getKey(); String epoch = (String) envelope.getMessage(); logger.info("Got key=" + key + ", epoch = " + epoch + " in checker..."); checkEpoch(epoch); this.store.put(key, epoch); }
Example 6
Source File: TestZkStreamProcessorBase.java From samza with Apache License 2.0 | 5 votes |
@Override public void process(IncomingMessageEnvelope incomingMessageEnvelope, MessageCollector messageCollector, TaskCoordinator taskCoordinator) throws Exception { Object message = incomingMessageEnvelope.getMessage(); String key = new String((byte[]) incomingMessageEnvelope.getKey()); Integer val = Integer.valueOf((String) message); LOG.info("Stream processor " + processorId + ";key=" + key + ";offset=" + incomingMessageEnvelope.getOffset() + "; totalRcvd=" + processedMessageCount + ";val=" + val + "; ssp=" + incomingMessageEnvelope .getSystemStreamPartition()); // inject a failure if (val >= BAD_MESSAGE_KEY && processorId.equals(processorIdToFail)) { LOG.info("process method failing for msg=" + message); throw new Exception("Processing in the processor " + processorId + " failed "); } messageCollector.send(new OutgoingMessageEnvelope(new SystemStream(outputSystem, outputTopic), message)); processedMessageCount++; synchronized (endLatch) { if (Integer.valueOf(key) < BAD_MESSAGE_KEY) { endLatch.countDown(); } } }
Example 7
Source File: TransactionalStateIntegrationTest.java From samza with Apache License 2.0 | 5 votes |
@Override public void process(IncomingMessageEnvelope envelope, MessageCollector collector, TaskCoordinator coordinator) { String key = (String) envelope.getKey(); LOG.info("Received key: {}", key); if (key.endsWith("crash_once")) { // endsWith allows :crash_once and crash_once if (!crashedOnce) { crashedOnce = true; coordinator.shutdown(TaskCoordinator.RequestScope.CURRENT_TASK); } else { return; } } else if (key.endsWith("shutdown")) { coordinator.shutdown(TaskCoordinator.RequestScope.CURRENT_TASK); } else if (key.startsWith("-")) { store.delete(key.substring(1)); } else if (key.startsWith(":")) { // write the message and flush, but don't invoke commit later String msg = key.substring(1); store.put(msg, msg); } else { store.put(key, key); } store.flush(); if (!key.startsWith(":")) { coordinator.commit(TaskCoordinator.RequestScope.CURRENT_TASK); } }
Example 8
Source File: TransactionalStateMultiStoreIntegrationTest.java From samza with Apache License 2.0 | 5 votes |
@Override public void process(IncomingMessageEnvelope envelope, MessageCollector collector, TaskCoordinator coordinator) { String key = (String) envelope.getKey(); LOG.info("Received key: {}", key); if (key.endsWith("crash_once")) { // endsWith allows :crash_once and crash_once if (!crashedOnce) { crashedOnce = true; coordinator.shutdown(TaskCoordinator.RequestScope.CURRENT_TASK); } else { return; } } else if (key.endsWith("shutdown")) { coordinator.shutdown(TaskCoordinator.RequestScope.CURRENT_TASK); } else if (key.startsWith("-")) { store.delete(key.substring(1)); } else if (key.startsWith(":")) { // write the message and flush, but don't invoke commit later String msg = key.substring(1); store.put(msg, msg); } else { store.put(key, key); } store.flush(); if (!key.startsWith(":")) { coordinator.commit(TaskCoordinator.RequestScope.CURRENT_TASK); } }