Java Code Examples for org.apache.samza.system.IncomingMessageEnvelope#getMessage()
The following examples show how to use
org.apache.samza.system.IncomingMessageEnvelope#getMessage() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CoordinatorStreamSystemConsumer.java From samza with Apache License 2.0 | 6 votes |
/** * returns all unread messages of a specific type, after an iterator on the stream * * @param iterator the iterator pointing to an offset in the coordinator stream. All unread messages after this iterator are returned * @param type the type of the messages to be returned * @return a set of unread messages of a given type, after a given iterator */ public Set<CoordinatorStreamMessage> getUnreadMessages(SystemStreamPartitionIterator iterator, String type) { LinkedHashSet<CoordinatorStreamMessage> messages = new LinkedHashSet<CoordinatorStreamMessage>(); while (iterator.hasNext()) { IncomingMessageEnvelope envelope = iterator.next(); Object[] keyArray = keySerde.fromBytes((byte[]) envelope.getKey()).toArray(); Map<String, Object> valueMap = null; if (envelope.getMessage() != null) { valueMap = messageSerde.fromBytes((byte[]) envelope.getMessage()); } CoordinatorStreamMessage coordinatorStreamMessage = new CoordinatorStreamMessage(keyArray, valueMap); if (type == null || type.equals(coordinatorStreamMessage.getType())) { messages.add(coordinatorStreamMessage); } } return messages; }
Example 2
Source File: CoordinatorStreamStore.java From samza with Apache License 2.0 | 6 votes |
private void readMessagesFromCoordinatorStream() { synchronized (bootstrapLock) { while (iterator.hasNext()) { IncomingMessageEnvelope envelope = iterator.next(); byte[] keyAsBytes = (byte[]) envelope.getKey(); Serde<List<?>> serde = new JsonSerde<>(); Object[] keyArray = serde.fromBytes(keyAsBytes).toArray(); CoordinatorStreamMessage coordinatorStreamMessage = new CoordinatorStreamMessage(keyArray, new HashMap<>()); String namespacedKey = serializeCoordinatorMessageKeyToJson(coordinatorStreamMessage.getType(), coordinatorStreamMessage.getKey()); if (envelope.getMessage() != null) { messagesReadFromCoordinatorStream.put(namespacedKey, (byte[]) envelope.getMessage()); } else { messagesReadFromCoordinatorStream.remove(namespacedKey); } } } }
Example 3
Source File: InputOperatorImpl.java From samza with Apache License 2.0 | 6 votes |
@Override protected CompletionStage<Collection<Object>> handleMessageAsync(IncomingMessageEnvelope message, MessageCollector collector, TaskCoordinator coordinator) { Object result; InputTransformer transformer = inputOpSpec.getTransformer(); if (transformer != null) { result = transformer.apply(message); } else { result = this.inputOpSpec.isKeyed() ? KV.of(message.getKey(), message.getMessage()) : message.getMessage(); } Collection<Object> output = Optional.ofNullable(result) .map(Collections::singletonList) .orElse(Collections.emptyList()); return CompletableFuture.completedFuture(output); }
Example 4
Source File: WikipediaStatsStreamTask.java From samza-hello-samza with Apache License 2.0 | 6 votes |
@SuppressWarnings("unchecked") @Override public void process(IncomingMessageEnvelope envelope, MessageCollector collector, TaskCoordinator coordinator) { Map<String, Object> edit = (Map<String, Object>) envelope.getMessage(); Map<String, Boolean> flags = (Map<String, Boolean>) edit.get("flags"); Integer editsAllTime = store.get("count-edits-all-time"); if (editsAllTime == null) editsAllTime = 0; store.put("count-edits-all-time", editsAllTime + 1); edits += 1; byteDiff += (Integer) edit.get("diff-bytes"); boolean newTitle = titles.add((String) edit.get("title")); for (Map.Entry<String, Boolean> flag : flags.entrySet()) { if (Boolean.TRUE.equals(flag.getValue())) { counts.compute(flag.getKey(), (k, v) -> v == null ? 0 : v + 1); } } if (!newTitle) { repeatEdits.inc(); } }
Example 5
Source File: NegateNumberTask.java From samza with Apache License 2.0 | 5 votes |
@Override public void process(IncomingMessageEnvelope envelope, MessageCollector collector, TaskCoordinator coordinator) { messagesProcessed += 1; String input = (String) envelope.getMessage(); Integer number = Integer.valueOf(input); Integer output = number.intValue() * -1; collector.send(new OutgoingMessageEnvelope(outputSystemStream, output.toString())); if (messagesProcessed >= maxMessages) { coordinator.shutdown(RequestScope.ALL_TASKS_IN_CONTAINER); } }
Example 6
Source File: SamzaEntranceProcessingItem.java From samoa with Apache License 2.0 | 5 votes |
@Override public void process(IncomingMessageEnvelope envelope, MessageCollector collector, TaskCoordinator coordinator) throws Exception { SamzaStream output = (SamzaStream)this.getOutputStream(); if (output == null) return; // if there is no output stream, do nothing output.setCollector(collector); ContentEvent event = (ContentEvent) envelope.getMessage(); output.put(event); }
Example 7
Source File: HomeTimelineTask.java From newsfeed with MIT License | 5 votes |
@Override @SuppressWarnings("unchecked") public void process(IncomingMessageEnvelope envelope, MessageCollector collector, TaskCoordinator coordinator) { Map<String, Object> message = (Map<String, Object>) envelope.getMessage(); if (!message.get("event").equals("postMessage")) { throw new IllegalStateException("Unexpected event type on deliveries stream: " + message.get("event")); } String recipient = (String) message.get("recipient"); String time = (String) message.get("time"); homeTimeline.put(recipient + ":" + time + ":" + numMessages, message); numMessages++; }
Example 8
Source File: WikipediaParserStreamTask.java From samza-hello-samza with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") @Override public void process(IncomingMessageEnvelope envelope, MessageCollector collector, TaskCoordinator coordinator) { Map<String, Object> jsonObject = (Map<String, Object>) envelope.getMessage(); WikipediaFeedEvent event = new WikipediaFeedEvent(jsonObject); Map<String, Object> parsedJsonObject = WikipediaParser.parseEvent(event); if (parsedJsonObject != null) { collector.send(new OutgoingMessageEnvelope(OUTPUT_STREAM, parsedJsonObject)); } }
Example 9
Source File: AbandonedCartStreamTask.java From Unified-Log-Processing with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") @Override public void process(IncomingMessageEnvelope envelope, MessageCollector collector, TaskCoordinator coordinator) { Map<String, Object> event = (Map<String, Object>) envelope.getMessage(); String verb = (String) event.get("verb"); String shopper = (String) ((Map<String, Object>) event.get("subject")).get("shopper"); if (verb.equals("add")) { // a String timestamp = (String) ((Map<String, Object>) event.get("context")).get("timestamp"); Map<String, Object> item = (Map<String, Object>) ((Map<String, Object>) event.get("directObject")).get("item"); Cart cart = new Cart(store.get(asCartKey(shopper))); cart.addItem(item); store.put(asTimestampKey(shopper), timestamp); store.put(asCartKey(shopper), cart.asJson()); } else if (verb.equals("place")) { // b resetShopper(shopper); } }
Example 10
Source File: SamzaEntranceProcessingItem.java From incubator-samoa with Apache License 2.0 | 5 votes |
@Override public void process(IncomingMessageEnvelope envelope, MessageCollector collector, TaskCoordinator coordinator) throws Exception { SamzaStream output = (SamzaStream) this.getOutputStream(); if (output == null) return; // if there is no output stream, do nothing output.setCollector(collector); ContentEvent event = (ContentEvent) envelope.getMessage(); output.put(event); }
Example 11
Source File: MyStreamTestTask.java From samza with Apache License 2.0 | 5 votes |
@Override public void process(IncomingMessageEnvelope envelope, MessageCollector collector, TaskCoordinator coordinator) throws Exception { Integer obj = (Integer) envelope.getMessage(); collector.send(new OutgoingMessageEnvelope(new SystemStream("test", "output"), envelope.getKey(), envelope.getKey(), obj * multiplier)); }
Example 12
Source File: TestLocalTableWithLowLevelApiEndToEnd.java From samza with Apache License 2.0 | 5 votes |
@Override public void process(IncomingMessageEnvelope message, MessageCollector collector, TaskCoordinator coordinator) { TestTableData.PageView pv = (TestTableData.PageView) message.getMessage(); pageViewTable.put(pv.getMemberId(), pv); TestTableData.PageView pv2 = pageViewTable.get(pv.getMemberId()); Assert.assertEquals(pv.getMemberId(), pv2.getMemberId()); Assert.assertEquals(pv.getPageKey(), pv2.getPageKey()); }
Example 13
Source File: TestZkStreamProcessorBase.java From samza with Apache License 2.0 | 5 votes |
@Override public void process(IncomingMessageEnvelope incomingMessageEnvelope, MessageCollector messageCollector, TaskCoordinator taskCoordinator) throws Exception { Object message = incomingMessageEnvelope.getMessage(); String key = new String((byte[]) incomingMessageEnvelope.getKey()); Integer val = Integer.valueOf((String) message); LOG.info("Stream processor " + processorId + ";key=" + key + ";offset=" + incomingMessageEnvelope.getOffset() + "; totalRcvd=" + processedMessageCount + ";val=" + val + "; ssp=" + incomingMessageEnvelope .getSystemStreamPartition()); // inject a failure if (val >= BAD_MESSAGE_KEY && processorId.equals(processorIdToFail)) { LOG.info("process method failing for msg=" + message); throw new Exception("Processing in the processor " + processorId + " failed "); } messageCollector.send(new OutgoingMessageEnvelope(new SystemStream(outputSystem, outputTopic), message)); processedMessageCount++; synchronized (endLatch) { if (Integer.valueOf(key) < BAD_MESSAGE_KEY) { endLatch.countDown(); } } }
Example 14
Source File: MultiFileHdfsReader.java From samza with Apache License 2.0 | 5 votes |
public IncomingMessageEnvelope readNext() { if (!hasNext()) { LOG.warn("Attempting to read more data when there aren't any. ssp=" + systemStreamPartition); return null; } // record the next offset before we read, so when the read fails and we reconnect, // we seek to the same offset that we try below curSingleFileOffset = curReader.nextOffset(); IncomingMessageEnvelope messageEnvelope = curReader.readNext(); // Copy everything except for the offset. Turn the single-file style offset into a multi-file one return new IncomingMessageEnvelope(messageEnvelope.getSystemStreamPartition(), getCurOffset(), messageEnvelope.getKey(), messageEnvelope.getMessage(), messageEnvelope.getSize(), messageEnvelope.getEventTime(), messageEnvelope.getArrivalTime()); }
Example 15
Source File: Checker.java From samza with Apache License 2.0 | 5 votes |
@Override public void process(IncomingMessageEnvelope envelope, MessageCollector collector, TaskCoordinator coordinator) { String key = (String) envelope.getKey(); String epoch = (String) envelope.getMessage(); logger.info("Got key=" + key + ", epoch = " + epoch + " in checker..."); checkEpoch(epoch); this.store.put(key, epoch); }
Example 16
Source File: Joiner.java From samza with Apache License 2.0 | 5 votes |
@Override public void process(IncomingMessageEnvelope envelope, MessageCollector collector, TaskCoordinator coordinator) { String key = (String) envelope.getKey(); String value = (String) envelope.getMessage(); String[] pieces = value.split("-"); int epoch = Integer.parseInt(pieces[0]); int partition = Integer.parseInt(pieces[1].split(" ")[1]); Partitions partitions = loadPartitions(epoch, key); logger.info("Joiner got epoch = " + epoch + ", partition = " + partition + ", parts = " + partitions); if (partitions.epoch < epoch) { // we are in a new era if (partitions.partitions.size() != expected) throw new IllegalArgumentException("Should have " + expected + " partitions when new epoch starts."); logger.info("Reseting epoch to " + epoch); this.store.delete(key); partitions.epoch = epoch; partitions.partitions.clear(); partitions.partitions.add(partition); } else if (partitions.epoch > epoch) { logger.info("Ignoring message for epoch " + epoch); } else { partitions.partitions.add(partition); if (partitions.partitions.size() == expected) { logger.info("Completed: " + key + " -> " + Integer.toString(epoch)); collector.send(new OutgoingMessageEnvelope(new SystemStream("kafka", "completed-keys"), key, Integer.toString(epoch))); } } this.store.put(key, partitions.toString()); logger.info("Join store in Task " + this.taskName + " " + key + " -> " + partitions.toString()); }
Example 17
Source File: TestAvroFileHdfsReader.java From samza with Apache License 2.0 | 5 votes |
@Test public void testRandomRead() throws Exception { SystemStreamPartition ssp = new SystemStreamPartition("hdfs", "testStream", new Partition(0)); SingleFileHdfsReader reader = new AvroFileHdfsReader(ssp); reader.open(AVRO_FILE, "0"); for (int i = 0; i < NUM_EVENTS / 2; i++) { reader.readNext(); } String offset = reader.nextOffset(); IncomingMessageEnvelope envelope = reader.readNext(); Assert.assertEquals(offset, envelope.getOffset()); GenericRecord record1 = (GenericRecord) envelope.getMessage(); for (int i = 0; i < 5; i++) reader.readNext(); // seek to the offset within the same reader reader.seek(offset); Assert.assertEquals(offset, reader.nextOffset()); envelope = reader.readNext(); Assert.assertEquals(offset, envelope.getOffset()); GenericRecord record2 = (GenericRecord) envelope.getMessage(); Assert.assertEquals(record1, record2); reader.close(); // open a new reader and initialize it with the offset reader = new AvroFileHdfsReader(ssp); reader.open(AVRO_FILE, offset); envelope = reader.readNext(); Assert.assertEquals(offset, envelope.getOffset()); GenericRecord record3 = (GenericRecord) envelope.getMessage(); Assert.assertEquals(record1, record3); reader.close(); }
Example 18
Source File: StreamOperatorTask.java From samza with Apache License 2.0 | 4 votes |
/** * Passes the incoming message envelopes along to the {@link InputOperatorImpl} node * for the input {@link SystemStream}. It is non-blocking and dispatches the message to the container thread * pool. The thread pool size is configured through job.container.thread.pool.size. In the absence of the config, * the task executes the DAG on the run loop thread. * <p> * From then on, each {@link org.apache.samza.operators.impl.OperatorImpl} propagates its transformed output to * its chained {@link org.apache.samza.operators.impl.OperatorImpl}s itself. * * @param ime incoming message envelope to process * @param collector the collector to send messages with * @param coordinator the coordinator to request commits or shutdown * @param callback the task callback handle */ @Override public final void processAsync(IncomingMessageEnvelope ime, MessageCollector collector, TaskCoordinator coordinator, TaskCallback callback) { Runnable processRunnable = () -> { try { SystemStream systemStream = ime.getSystemStreamPartition().getSystemStream(); InputOperatorImpl inputOpImpl = operatorImplGraph.getInputOperator(systemStream); if (inputOpImpl != null) { CompletionStage<Void> processFuture; MessageType messageType = MessageType.of(ime.getMessage()); switch (messageType) { case USER_MESSAGE: processFuture = inputOpImpl.onMessageAsync(ime, collector, coordinator); break; case END_OF_STREAM: EndOfStreamMessage eosMessage = (EndOfStreamMessage) ime.getMessage(); processFuture = inputOpImpl.aggregateEndOfStream(eosMessage, ime.getSystemStreamPartition(), collector, coordinator); break; case WATERMARK: WatermarkMessage watermarkMessage = (WatermarkMessage) ime.getMessage(); processFuture = inputOpImpl.aggregateWatermark(watermarkMessage, ime.getSystemStreamPartition(), collector, coordinator); break; default: processFuture = failedFuture(new SamzaException("Unknown message type " + messageType + " encountered.")); break; } processFuture.whenComplete((val, ex) -> { if (ex != null) { callback.failure(ex); } else { callback.complete(); } }); } } catch (Exception e) { LOG.error("Failed to process the incoming message due to ", e); callback.failure(e); } }; if (taskThreadPool != null) { LOG.debug("Processing message using thread pool."); taskThreadPool.submit(processRunnable); } else { LOG.debug("Processing message on the run loop thread."); processRunnable.run(); } }
Example 19
Source File: CoordinatorStreamSystemConsumer.java From samza with Apache License 2.0 | 4 votes |
/** * Read all messages from the earliest offset, all the way to the latest. * Currently, this method only pays attention to config messages. */ public void bootstrap() { synchronized (bootstrapLock) { // Make a copy so readers aren't affected while we modify the set. final LinkedHashSet<CoordinatorStreamMessage> bootstrappedMessages = new LinkedHashSet<>(bootstrappedStreamSet); log.info("Bootstrapping configuration from coordinator stream."); SystemStreamPartitionIterator iterator = new SystemStreamPartitionIterator(systemConsumer, coordinatorSystemStreamPartition); try { while (iterator.hasNext()) { IncomingMessageEnvelope envelope = iterator.next(); Object[] keyArray = keySerde.fromBytes((byte[]) envelope.getKey()).toArray(); Map<String, Object> valueMap = null; if (envelope.getMessage() != null) { valueMap = messageSerde.fromBytes((byte[]) envelope.getMessage()); } CoordinatorStreamMessage coordinatorStreamMessage = new CoordinatorStreamMessage(keyArray, valueMap); log.debug("Received coordinator stream message: {}", coordinatorStreamMessage); // Remove any existing entry. Set.add() does not add if the element already exists. if (bootstrappedMessages.remove(coordinatorStreamMessage)) { log.debug("Removed duplicate message: {}", coordinatorStreamMessage); } bootstrappedMessages.add(coordinatorStreamMessage); if (SetConfig.TYPE.equals(coordinatorStreamMessage.getType())) { String configKey = coordinatorStreamMessage.getKey(); if (coordinatorStreamMessage.isDelete()) { configMap.remove(configKey); } else { String configValue = new SetConfig(coordinatorStreamMessage).getConfigValue(); configMap.put(configKey, configValue); } } } bootstrappedStreamSet = Collections.unmodifiableSet(bootstrappedMessages); log.debug("Bootstrapped configuration: {}", configMap); isBootstrapped = true; } catch (Exception e) { throw new SamzaException(e); } } }