com.datatorrent.common.util.Pair Java Examples
The following examples show how to use
com.datatorrent.common.util.Pair.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: FSWindowDataManagerTest.java From attic-apex-malhar with Apache License 2.0 | 6 votes |
@Test public void testAbsoluteRecoveryPath() throws IOException { Pair<Context.OperatorContext, FSWindowDataManager> pair = createManagerAndContextFor(1); pair.second.setStatePathRelativeToAppPath(false); long time = System.currentTimeMillis(); pair.second.setStatePath("target/" + time); pair.second.setup(pair.first); Map<Integer, String> data = Maps.newHashMap(); data.put(1, "one"); data.put(2, "two"); data.put(3, "three"); pair.second.save(data, 1); File recoveryDir = new File("target/" + time); Assert.assertTrue("recover filePath exist", recoveryDir.isDirectory()); pair.second.teardown(); }
Example #2
Source File: StreamingContainerManager.java From attic-apex-core with Apache License 2.0 | 6 votes |
private void purgeCheckpoints() { for (Pair<PTOperator, Long> p : purgeCheckpoints) { final PTOperator operator = p.getFirst(); if (!operator.isOperatorStateLess()) { final long windowId = p.getSecond(); Runnable r = new Runnable() { @Override public void run() { try { operator.getOperatorMeta().getValue(OperatorContext.STORAGE_AGENT).delete(operator.getId(), windowId); } catch (IOException ex) { LOG.error("Failed to purge checkpoint for operator {} for windowId {}", operator, windowId, ex); } } }; poolExecutor.submit(r); } } purgeCheckpoints.clear(); }
Example #3
Source File: FSWindowDataManagerTest.java From attic-apex-malhar with Apache License 2.0 | 6 votes |
@Test public void testSave() throws IOException { Pair<Context.OperatorContext, FSWindowDataManager> pair = createManagerAndContextFor(1); pair.second.setup(pair.first); Map<Integer, String> data = Maps.newHashMap(); data.put(1, "one"); data.put(2, "two"); data.put(3, "three"); pair.second.save(data, 1); pair.second.setup(pair.first); @SuppressWarnings("unchecked") Map<Integer, String> artifact = (Map<Integer, String>)pair.second.retrieve(1); Assert.assertEquals("dataOf1", data, artifact); pair.second.teardown(); }
Example #4
Source File: AbstractKinesisOutputOperator.java From attic-apex-malhar with Apache License 2.0 | 6 votes |
public void processTuple(T tuple) { // Send out single data try { if (isBatchProcessing) { if (putRecordsRequestEntryList.size() == batchSize) { flushRecords(); logger.debug( "flushed {} records.", batchSize ); } addRecord(tuple); } else { Pair<String, V> keyValue = tupleToKeyValue(tuple); PutRecordRequest requestRecord = new PutRecordRequest(); requestRecord.setStreamName(streamName); requestRecord.setPartitionKey(keyValue.first); requestRecord.setData(ByteBuffer.wrap(getRecord(keyValue.second))); client.putRecord(requestRecord); } sendCount++; } catch (AmazonClientException e) { throw new RuntimeException(e); } }
Example #5
Source File: LogicalCompare.java From attic-apex-malhar with Apache License 2.0 | 6 votes |
@Override public void process(Pair<T, T> tuple) { int i = tuple.first.compareTo(tuple.second); if (i > 0) { greaterThan.emit(tuple); greaterThanOrEqualTo.emit(tuple); notEqualTo.emit(tuple); } else if (i < 0) { lessThan.emit(tuple); lessThanOrEqualTo.emit(tuple); notEqualTo.emit(tuple); } else { equalTo.emit(tuple); lessThanOrEqualTo.emit(tuple); greaterThanOrEqualTo.emit(tuple); } }
Example #6
Source File: AbstractExactlyOnceKafkaOutputOperator.java From attic-apex-malhar with Apache License 2.0 | 6 votes |
@Override public void process(T tuple) { Pair<K, V> keyValue = tupleToKeyValue(tuple); int pid = 0; if (partitioner != null) { pid = partitioner.partition(keyValue.first, partitionNum); } Pair<byte[], byte[]> lastMsg = lastMsgs.get(pid); if (lastMsg == null || compareToLastMsg(keyValue, lastMsg) > 0) { getProducer().send(new KeyedMessage<K, V>(getTopic(), keyValue.first, keyValue.second)); sendCount++; } else { // ignore tuple because kafka has already had the tuple logger.debug("Ingore tuple " + tuple); return; } }
Example #7
Source File: AbstractKinesisInputOperator.java From attic-apex-malhar with Apache License 2.0 | 6 votes |
/** * Implement InputOperator Interface. */ @Override public void emitTuples() { if (currentWindowId <= windowDataManager.getLargestCompletedWindow()) { return; } int count = consumer.getQueueSize(); if (maxTuplesPerWindow > 0) { count = Math.min(count, maxTuplesPerWindow - emitCount); } for (int i = 0; i < count; i++) { Pair<String, Record> data = consumer.pollRecord(); String shardId = data.getFirst(); String recordId = data.getSecond().getSequenceNumber(); emitTuple(data); MutablePair<String, Integer> shardOffsetAndCount = currentWindowRecoveryState.get(shardId); if (shardOffsetAndCount == null) { currentWindowRecoveryState.put(shardId, new MutablePair<String, Integer>(recordId, 1)); } else { shardOffsetAndCount.setRight(shardOffsetAndCount.right + 1); } shardPosition.put(shardId, recordId); } emitCount += count; }
Example #8
Source File: AbstractExactlyOnceKafkaOutputOperator.java From attic-apex-malhar with Apache License 2.0 | 5 votes |
private void initializeLastProcessingOffset() { // read last received kafka message TopicMetadata tm = KafkaMetadataUtil.getTopicMetadata(Sets.newHashSet((String)getConfigProperties().get(KafkaMetadataUtil.PRODUCER_PROP_BROKERLIST)), this.getTopic()); if (tm == null) { throw new RuntimeException("Failed to retrieve topic metadata"); } partitionNum = tm.partitionsMetadata().size(); lastMsgs = new HashMap<Integer, Pair<byte[],byte[]>>(partitionNum); for (PartitionMetadata pm : tm.partitionsMetadata()) { String leadBroker = pm.leader().host(); int port = pm.leader().port(); String clientName = this.getClass().getName().replace('$', '.') + "_Client_" + tm.topic() + "_" + pm.partitionId(); SimpleConsumer consumer = new SimpleConsumer(leadBroker, port, 100000, 64 * 1024, clientName); long readOffset = KafkaMetadataUtil.getLastOffset(consumer, tm.topic(), pm.partitionId(), kafka.api.OffsetRequest.LatestTime(), clientName); FetchRequest req = new FetchRequestBuilder().clientId(clientName).addFetch(tm.topic(), pm.partitionId(), readOffset - 1, 100000).build(); FetchResponse fetchResponse = consumer.fetch(req); for (MessageAndOffset messageAndOffset : fetchResponse.messageSet(tm.topic(), pm.partitionId())) { Message m = messageAndOffset.message(); ByteBuffer payload = m.payload(); ByteBuffer key = m.key(); byte[] valueBytes = new byte[payload.limit()]; byte[] keyBytes = new byte[key.limit()]; payload.get(valueBytes); key.get(keyBytes); lastMsgs.put(pm.partitionId(), new Pair<byte[], byte[]>(keyBytes, valueBytes)); } } }
Example #9
Source File: FSWindowDataManagerTest.java From attic-apex-malhar with Apache License 2.0 | 5 votes |
private Pair<Context.OperatorContext, FSWindowDataManager> createManagerAndContextFor(int operatorId) { FSWindowDataManager dataManager = new FSWindowDataManager(); OperatorContext context = mockOperatorContext(operatorId, testMeta.attributes); return new Pair<>(context, dataManager); }
Example #10
Source File: FSWindowDataManagerTest.java From attic-apex-malhar with Apache License 2.0 | 5 votes |
@Test public void testDelete() throws IOException { Pair<Context.OperatorContext, FSWindowDataManager> pair1 = createManagerAndContextFor(1); pair1.second.getWal().setMaxLength(2); pair1.second.setup(pair1.first); Map<Integer, String> dataOf1 = Maps.newHashMap(); dataOf1.put(1, "one"); dataOf1.put(2, "two"); dataOf1.put(3, "three"); for (int i = 1; i <= 9; ++i) { pair1.second.save(dataOf1, i); } pair1.second.committed(3); pair1.second.teardown(); Pair<Context.OperatorContext, FSWindowDataManager> pair1AfterRecovery = createManagerAndContextFor(1); testMeta.attributes.put(Context.OperatorContext.ACTIVATION_WINDOW_ID, 1L); pair1AfterRecovery.second.setup(pair1AfterRecovery.first); Assert.assertEquals("window 1 deleted", null, pair1AfterRecovery.second.retrieve(1)); Assert.assertEquals("window 3 deleted", null, pair1AfterRecovery.second.retrieve(3)); Assert.assertEquals("window 4 exists", dataOf1, pair1AfterRecovery.second.retrieve(4)); pair1.second.teardown(); }
Example #11
Source File: FSWindowDataManagerTest.java From attic-apex-malhar with Apache License 2.0 | 5 votes |
@Test public void testRecovery() throws IOException { Pair<Context.OperatorContext, FSWindowDataManager> pair1 = createManagerAndContextFor(1); Pair<Context.OperatorContext, FSWindowDataManager> pair2 = createManagerAndContextFor(2); pair1.second.setup(pair1.first); pair2.second.setup(pair2.first); Map<Integer, String> dataOf1 = Maps.newHashMap(); dataOf1.put(1, "one"); dataOf1.put(2, "two"); dataOf1.put(3, "three"); Map<Integer, String> dataOf2 = Maps.newHashMap(); dataOf2.put(4, "four"); dataOf2.put(5, "five"); dataOf2.put(6, "six"); pair1.second.save(dataOf1, 1); pair2.second.save(dataOf2, 2); pair1.second.setup(pair1.first); Assert.assertEquals("largest recovery window", 1, pair1.second.getLargestCompletedWindow()); pair2.second.setup(pair2.first); Assert.assertEquals("largest recovery window", 2, pair2.second.getLargestCompletedWindow()); pair1.second.teardown(); pair2.second.teardown(); WindowDataManager manager = pair1.second.partition(1, Sets.newHashSet(2)).get(0); manager.setup(pair1.first); Assert.assertEquals("largest recovery window", 1, manager.getLargestCompletedWindow()); manager.teardown(); }
Example #12
Source File: FSWindowDataManagerTest.java From attic-apex-malhar with Apache License 2.0 | 5 votes |
@Test public void testRetrieveAllPartitions() throws IOException { Pair<Context.OperatorContext, FSWindowDataManager> pair1 = createManagerAndContextFor(1); Pair<Context.OperatorContext, FSWindowDataManager> pair2 = createManagerAndContextFor(2); pair1.second.setup(pair1.first); pair2.second.setup(pair2.first); Map<Integer, String> dataOf1 = Maps.newHashMap(); dataOf1.put(1, "one"); dataOf1.put(2, "two"); dataOf1.put(3, "three"); Map<Integer, String> dataOf2 = Maps.newHashMap(); dataOf2.put(4, "four"); dataOf2.put(5, "five"); dataOf2.put(6, "six"); pair1.second.save(dataOf1, 1); pair2.second.save(dataOf2, 1); pair1.second.teardown(); pair2.second.teardown(); List<WindowDataManager> managers = pair1.second.partition(3, null); managers.get(0).setup(pair1.first); Map<Integer, Object> artifacts = managers.get(0).retrieveAllPartitions(1); Assert.assertEquals("num artifacts", 2, artifacts.size()); Assert.assertEquals("artifact 1", dataOf1, artifacts.get(1)); Assert.assertEquals("artifact 2", dataOf2, artifacts.get(2)); managers.get(0).teardown(); }
Example #13
Source File: FSWindowDataManagerTest.java From attic-apex-malhar with Apache License 2.0 | 5 votes |
@Test public void testRetrieve() throws IOException { Pair<Context.OperatorContext, FSWindowDataManager> pair1 = createManagerAndContextFor(1); Pair<Context.OperatorContext, FSWindowDataManager> pair2 = createManagerAndContextFor(2); pair1.second.setup(pair1.first); pair2.second.setup(pair2.first); Map<Integer, String> dataOf1 = Maps.newHashMap(); dataOf1.put(1, "one"); dataOf1.put(2, "two"); dataOf1.put(3, "three"); Map<Integer, String> dataOf2 = Maps.newHashMap(); dataOf2.put(4, "four"); dataOf2.put(5, "five"); dataOf2.put(6, "six"); pair1.second.save(dataOf1, 1); pair2.second.save(dataOf2, 1); pair1.second.setup(pair1.first); Object artifact1 = pair1.second.retrieve(1); Assert.assertEquals("data of 1", dataOf1, artifact1); pair2.second.setup(pair2.first); Object artifact2 = pair2.second.retrieve(1); Assert.assertEquals("data of 2", dataOf2, artifact2); pair1.second.teardown(); pair2.second.teardown(); }
Example #14
Source File: FSWindowDataManagerTest.java From attic-apex-malhar with Apache License 2.0 | 5 votes |
@Test public void testLargestRecoveryWindow() { Pair<Context.OperatorContext, FSWindowDataManager> pair = createManagerAndContextFor(1); pair.second.setup(pair.first); Assert.assertEquals("largest recovery", Stateless.WINDOW_ID, pair.second.getLargestCompletedWindow()); pair.second.teardown(); }
Example #15
Source File: Average.java From attic-apex-malhar with Apache License 2.0 | 5 votes |
/** * Emit average. */ @Override public void endWindow() { // May want to send out only if count != 0 if (counts != 0) { Pair<V,Long> pair = new Pair<>(getAverage(),counts); average.emit(pair); } sums = 0; counts = 0; }
Example #16
Source File: KinesisByteArrayOutputOperatorTest.java From attic-apex-malhar with Apache License 2.0 | 5 votes |
protected Pair<String, byte[]> getNextTuple(TupleGenerator<TestPOJO> generator) { TestPOJO obj = generator.getNextTuple(); if (fieldValueGenerator == null) { fieldValueGenerator = FieldValueSerializableGenerator.getFieldValueGenerator(TestPOJO.class, null); } return new Pair<String, byte[]>(obj.getRow(), fieldValueGenerator.serializeObject(obj)); }
Example #17
Source File: AppMetricsService.java From examples with Apache License 2.0 | 5 votes |
@Override public Map<String, Object> computeAppLevelMetrics(Map<String, Map<String, Object>> completedMetrics) { Long incoming = (Long) completedMetrics.get("csvParser").get("incomingTuplesCount"); Long filtered = (Long) completedMetrics.get("filter").get("trueTuples"); Long windowedRecordSize = (Long)completedMetrics.get("POJOGenerator").get("windowedRecordSize"); Long recordCount = (Long)completedMetrics.get("POJOGenerator").get("emittedRecordCount"); Map<String, Object> output = Maps.newHashMap(); if(incoming != null && filtered != null){ if(incoming != 0){ double percentFiltered = (filtered * 100.0) /incoming; output.put("percentFiltered", percentFiltered); } } if ((windowedRecordSize != null) && (recordCount != null)) { if (recordCount != 0) { double averageRecordSize = new Double(windowedRecordSize) / new Double(recordCount); output.put("avgRecordSize", averageRecordSize); } } Collection<Collection<Pair<String, Object>>> ccp = Lists.newArrayList(); for (Map.Entry<String, Map<String, Object>> e1 : completedMetrics.entrySet()) { for (Map.Entry<String, Object> e2 : e1.getValue().entrySet()) { Object metricValue = e2.getValue(); Collection<Pair<String, Object>> row = Lists.newArrayList(); row.add(new Pair<String, Object>("MetricName", e1.getKey() + "." + e2.getKey())); row.add(new Pair<>("MetricValue", metricValue)); ccp.add(row); } } if (!ccp.isEmpty()) { output.put("AllMetrics", ccp); } return output; }
Example #18
Source File: Node.java From attic-apex-core with Apache License 2.0 | 5 votes |
protected void reportStats(ContainerStats.OperatorStats stats, long windowId) { stats.outputPorts = new ArrayList<>(); for (Entry<String, Sink<Object>> e : outputs.entrySet()) { ContainerStats.OperatorStats.PortStats portStats = new ContainerStats.OperatorStats.PortStats(e.getKey()); portStats.tupleCount = e.getValue().getCount(true) - controlTupleCount; portStats.endWindowTimestamp = endWindowEmitTime; stats.outputPorts.add(portStats); } controlTupleCount = 0; long currentCpuTime = tmb.getCurrentThreadCpuTime(); stats.cpuTimeUsed = currentCpuTime - lastSampleCpuTime; lastSampleCpuTime = currentCpuTime; if (checkpoint != null) { stats.checkpoint = checkpoint; stats.checkpointStats = checkpointStats; checkpointStats = null; checkpoint = null; } else { Pair<FutureTask<Stats.CheckpointStats>, CheckpointWindowInfo> pair = taskQueue.peek(); if (pair != null && pair.getFirst().isDone()) { taskQueue.poll(); try { CheckpointWindowInfo checkpointWindowInfo = pair.getSecond(); stats.checkpointStats = pair.getFirst().get(); stats.checkpoint = new Checkpoint(checkpointWindowInfo.windowId, checkpointWindowInfo.applicationWindowCount, checkpointWindowInfo.checkpointWindowCount); if (operator instanceof Operator.CheckpointListener) { ((Operator.CheckpointListener)operator).checkpointed(checkpointWindowInfo.windowId); } } catch (Exception ex) { throw Throwables.propagate(ex); } } } context.report(stats, windowId); }
Example #19
Source File: KinesisConsumer.java From attic-apex-malhar with Apache License 2.0 | 5 votes |
/** * This method is called in setup method of the operator */ public void create() { holdingBuffer = new ArrayBlockingQueue<Pair<String, Record>>(bufferSize); boolean defaultSelect = (shardIds == null) || (shardIds.size() == 0); final List<Shard> pms = KinesisUtil.getInstance().getShardList(streamName); for (final Shard shId: pms) { if ((shardIds.contains(shId.getShardId()) || defaultSelect) && !closedShards.contains(shId)) { simpleConsumerThreads.add(shId); } } }
Example #20
Source File: StreamingContainerManager.java From attic-apex-core with Apache License 2.0 | 5 votes |
@VisibleForTesting protected Collection<Pair<Long, Map<String, Object>>> getLogicalMetrics(String operatorName) { if (logicalMetrics.get(operatorName) != null) { return Collections.unmodifiableCollection(logicalMetrics.get(operatorName)); } return null; }
Example #21
Source File: AbstractKinesisOutputOperator.java From attic-apex-malhar with Apache License 2.0 | 5 votes |
private void addRecord(T tuple) { try { Pair<String, V> keyValue = tupleToKeyValue(tuple); PutRecordsRequestEntry putRecordsEntry = new PutRecordsRequestEntry(); putRecordsEntry.setData(ByteBuffer.wrap(getRecord(keyValue.second))); putRecordsEntry.setPartitionKey(keyValue.first); putRecordsRequestEntryList.add(putRecordsEntry); } catch (AmazonClientException e) { throw new RuntimeException(e); } }
Example #22
Source File: Average.java From attic-apex-malhar with Apache License 2.0 | 4 votes |
@Override public void process(Pair<V, Long> pair) { sums += pair.getFirst().doubleValue() * pair.getSecond(); counts += pair.getSecond(); }
Example #23
Source File: AbstractDAGExecutionPluginContext.java From attic-apex-core with Apache License 2.0 | 4 votes |
@Override public Queue<Pair<Long, Map<String, Object>>> getWindowMetrics(String operatorName) { return dnmgr.getWindowMetrics(operatorName); }
Example #24
Source File: FSWindowDataManagerTest.java From attic-apex-malhar with Apache License 2.0 | 4 votes |
@Test public void testDeleteDoesNotRemoveTmpFiles() throws IOException { Pair<Context.OperatorContext, FSWindowDataManager> pair1 = createManagerAndContextFor(1); pair1.second.setup(pair1.first); Pair<Context.OperatorContext, FSWindowDataManager> pair2 = createManagerAndContextFor(2); pair2.second.setup(pair2.first); Pair<Context.OperatorContext, FSWindowDataManager> pair3 = createManagerAndContextFor(3); pair3.second.setup(pair3.first); Map<Integer, String> dataOf1 = Maps.newHashMap(); dataOf1.put(1, "one"); dataOf1.put(2, "two"); dataOf1.put(3, "three"); Map<Integer, String> dataOf2 = Maps.newHashMap(); dataOf2.put(4, "four"); dataOf2.put(5, "five"); dataOf2.put(6, "six"); Map<Integer, String> dataOf3 = Maps.newHashMap(); dataOf2.put(7, "seven"); dataOf2.put(8, "eight"); dataOf2.put(9, "nine"); for (int i = 1; i <= 9; ++i) { pair1.second.save(dataOf1, i); } for (int i = 1; i <= 6; ++i) { pair2.second.save(dataOf2, i); } for (int i = 1; i <= 3; ++i) { pair3.second.save(dataOf3, i); } pair1.second.teardown(); pair2.second.teardown(); pair3.second.teardown(); FSWindowDataManager fsManager = (FSWindowDataManager)pair1.second.partition(1, Sets.newHashSet(2, 3)).get(0); fsManager.setup(pair1.first); Assert.assertEquals("recovery window", 3, fsManager.getLargestCompletedWindow()); Map<Integer, Object> artifacts = fsManager.retrieveAllPartitions(1); Assert.assertEquals("num artifacts", 3, artifacts.size()); fsManager.committed(3); fsManager.teardown(); testMeta.attributes.put(Context.OperatorContext.ACTIVATION_WINDOW_ID, 3L); fsManager.setup(pair1.first); Assert.assertEquals("recovery window", Stateless.WINDOW_ID, fsManager.getLargestCompletedWindow()); fsManager.teardown(); }
Example #25
Source File: Node.java From attic-apex-core with Apache License 2.0 | 4 votes |
void checkpoint(long windowId) { if (!context.stateless) { if (operator instanceof Operator.CheckpointNotificationListener) { ((Operator.CheckpointNotificationListener)operator).beforeCheckpoint(windowId); } StorageAgent ba = context.getValue(OperatorContext.STORAGE_AGENT); if (ba != null) { try { checkpointStats = new Stats.CheckpointStats(); checkpointStats.checkpointStartTime = System.currentTimeMillis(); ba.save(operator, id, windowId); if (ba instanceof AsyncStorageAgent) { AsyncStorageAgent asyncStorageAgent = (AsyncStorageAgent)ba; if (!asyncStorageAgent.isSyncCheckpoint()) { if (PROCESSING_MODE != ProcessingMode.EXACTLY_ONCE) { CheckpointWindowInfo checkpointWindowInfo = new CheckpointWindowInfo(); checkpointWindowInfo.windowId = windowId; checkpointWindowInfo.applicationWindowCount = applicationWindowCount; checkpointWindowInfo.checkpointWindowCount = checkpointWindowCount; CheckpointHandler checkpointHandler = new CheckpointHandler(); checkpointHandler.agent = asyncStorageAgent; checkpointHandler.operatorId = id; checkpointHandler.windowId = windowId; checkpointHandler.stats = checkpointStats; FutureTask<Stats.CheckpointStats> futureTask = new FutureTask<>(checkpointHandler); taskQueue.add(new Pair<>(futureTask, checkpointWindowInfo)); executorService.submit(futureTask); checkpoint = null; checkpointStats = null; return; } else { asyncStorageAgent.flush(id, windowId); } } } checkpointStats.checkpointTime = System.currentTimeMillis() - checkpointStats.checkpointStartTime; } catch (IOException ie) { try { logger.warn("Rolling back checkpoint {} for Operator {} due to the exception {}", Codec.getStringWindowId(windowId), operator, ie); ba.delete(id, windowId); } catch (IOException ex) { logger.warn("Error while rolling back checkpoint", ex); } throw new RuntimeException(ie); } } } calculateNextCheckpointWindow(); dagCheckpointOffsetCount = 0; checkpoint = new Checkpoint(windowId, applicationWindowCount, checkpointWindowCount); if (operator instanceof Operator.CheckpointListener) { ((Operator.CheckpointListener)operator).checkpointed(windowId); } }
Example #26
Source File: StreamingContainerManager.java From attic-apex-core with Apache License 2.0 | 4 votes |
public Queue<Pair<Long, Map<String, Object>>> getWindowMetrics(String operatorName) { return logicalMetrics.get(operatorName); }
Example #27
Source File: LogicalCompareTest.java From attic-apex-malhar with Apache License 2.0 | 4 votes |
/** * Test operator logic emits correct results. */ @SuppressWarnings({ "rawtypes", "unchecked" }) @Test public void testNodeProcessing() { LogicalCompare<Integer> oper = new LogicalCompare<Integer>() { }; CollectorTestSink eSink = new CollectorTestSink(); CollectorTestSink neSink = new CollectorTestSink(); CollectorTestSink gtSink = new CollectorTestSink(); CollectorTestSink gteSink = new CollectorTestSink(); CollectorTestSink ltSink = new CollectorTestSink(); CollectorTestSink lteSink = new CollectorTestSink(); oper.equalTo.setSink(eSink); oper.notEqualTo.setSink(neSink); oper.greaterThan.setSink(gtSink); oper.greaterThanOrEqualTo.setSink(gteSink); oper.lessThan.setSink(ltSink); oper.lessThanOrEqualTo.setSink(lteSink); Pair<Integer, Integer> gtuple = new Pair<Integer, Integer>(2, 1); Pair<Integer, Integer> etuple = new Pair<Integer, Integer>(2, 2); Pair<Integer, Integer> ltuple = new Pair<Integer, Integer>(2, 3); oper.beginWindow(0); // oper.input.process(gtuple); oper.input.process(etuple); oper.input.process(ltuple); oper.endWindow(); // Assert.assertEquals("number emitted tuples", 1, eSink.collectedTuples.size()); Assert.assertEquals("tuples were", eSink.collectedTuples.get(0).equals(etuple), true); Assert.assertEquals("number emitted tuples", 2, neSink.collectedTuples.size()); Assert.assertEquals("tuples were", neSink.collectedTuples.get(0).equals(gtuple), true); Assert.assertEquals("tuples were", neSink.collectedTuples.get(1).equals(ltuple), true); Assert.assertEquals("number emitted tuples", 1, gtSink.collectedTuples.size()); Assert.assertEquals("tuples were", gtSink.collectedTuples.get(0).equals(gtuple), true); Assert.assertEquals("number emitted tuples", 2, gteSink.collectedTuples.size()); Assert.assertEquals("tuples were", gteSink.collectedTuples.get(0).equals(gtuple), true); Assert.assertEquals("tuples were", gteSink.collectedTuples.get(1).equals(etuple), true); Assert.assertEquals("number emitted tuples", 1, ltSink.collectedTuples.size()); Assert.assertEquals("tuples were", ltSink.collectedTuples.get(0).equals(ltuple), true); Assert.assertEquals("number emitted tuples", 2, lteSink.collectedTuples.size()); Assert.assertEquals("tuples were", lteSink.collectedTuples.get(0).equals(etuple), true); Assert.assertEquals("tuples were", lteSink.collectedTuples.get(1).equals(ltuple), true); }
Example #28
Source File: AverageTest.java From attic-apex-malhar with Apache License 2.0 | 4 votes |
@SuppressWarnings({ "unchecked", "rawtypes" }) public void testNodeSchemaProcessing(Average oper) { CollectorTestSink averageSink = new CollectorTestSink(); oper.average.setSink(averageSink); oper.beginWindow(0); // Double a = new Double(2.0); Double b = new Double(20.0); Double c = new Double(1000.0); oper.data.process(a); oper.data.process(b); oper.data.process(c); a = 1.0; oper.data.process(a); a = 10.0; oper.data.process(a); b = 5.0; oper.data.process(b); b = 12.0; oper.data.process(b); c = 22.0; oper.data.process(c); c = 14.0; oper.data.process(c); a = 46.0; oper.data.process(a); b = 2.0; oper.data.process(b); a = 23.0; oper.data.process(a); oper.endWindow(); // Assert.assertEquals("number emitted tuples", 1, averageSink.collectedTuples.size()); for (Object o : averageSink.collectedTuples) { // count is 12 Number val = ((Pair<? extends Number, Integer>)o).getFirst().intValue(); Assert.assertEquals("emitted average value was was ", new Integer(1157 / 12), val); } }
Example #29
Source File: AbstractKinesisInputOperator.java From attic-apex-malhar with Apache License 2.0 | 4 votes |
/** * Any concrete class derived from AbstractKinesisInputOperator may implement this method to emit tuples to an output port. */ public void emitTuple(Pair<String, Record> data) { outputPort.emit(getTuple(data.getSecond())); }
Example #30
Source File: KinesisByteArrayOutputOperator.java From attic-apex-malhar with Apache License 2.0 | 4 votes |
@Override protected Pair<String, byte[]> tupleToKeyValue(Pair<String, byte[]> tuple) { return tuple; }