org.apache.flink.api.java.tuple.Tuple2 Java Examples
The following examples show how to use
org.apache.flink.api.java.tuple.Tuple2.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: SkipListSerializerTest.java From flink with Apache License 2.0 | 6 votes |
private void testSkipListKeySerializer(int delta) throws IOException { String key = "key-abcdedg" + delta; String namespace = "namespace-dfsfdafd" + delta; byte[] skipListKey = skipListKeySerializer.serialize(key, namespace); int offset = 10; byte[] data = new byte[10 + skipListKey.length]; System.arraycopy(skipListKey, 0, data, offset, skipListKey.length); MemorySegment skipListKeySegment = MemorySegmentFactory.wrap(data); assertEquals(key, skipListKeySerializer.deserializeKey(skipListKeySegment, offset, skipListKey.length)); assertEquals(namespace, skipListKeySerializer.deserializeNamespace(skipListKeySegment, offset, skipListKey.length)); Tuple2<byte[], byte[]> serializedKeyAndNamespace = skipListKeySerializer.getSerializedKeyAndNamespace(skipListKeySegment, offset); assertEquals(key, deserialize(keySerializer, serializedKeyAndNamespace.f0)); assertEquals(namespace, deserialize(namespaceSerializer, serializedKeyAndNamespace.f1)); byte[] serializedNamespace = skipListKeySerializer.serializeNamespace(namespace); assertEquals(namespace, deserialize(namespaceSerializer, serializedNamespace)); }
Example #2
Source File: ReusingSortMergeCoGroupIteratorITCase.java From flink with Apache License 2.0 | 6 votes |
private Map<Integer, Collection<String>> collectData(TupleGenerator iter, int num) throws Exception { Map<Integer, Collection<String>> map = new HashMap<>(); Tuple2<Integer, String> pair = new Tuple2<>(); for (int i = 0; i < num; i++) { iter.next(pair); int key = pair.f0; if (!map.containsKey(key)) { map.put(key, new ArrayList<String>()); } Collection<String> values = map.get(key); values.add(pair.f1); } return map; }
Example #3
Source File: RestServerEndpointITCase.java From flink with Apache License 2.0 | 6 votes |
@Test public void testEndpointsMustBeUnique() throws Exception { final RestServerEndpointConfiguration serverConfig = RestServerEndpointConfiguration.fromConfiguration(config); final List<Tuple2<RestHandlerSpecification, ChannelInboundHandler>> handlers = Arrays.asList( Tuple2.of(new TestHeaders(), testHandler), Tuple2.of(new TestHeaders(), testUploadHandler) ); assertThrows("REST handler registration", FlinkRuntimeException.class, () -> { try (TestRestServerEndpoint restServerEndpoint = new TestRestServerEndpoint(serverConfig, handlers)) { restServerEndpoint.start(); return null; } }); }
Example #4
Source File: WindowTranslationTest.java From flink with Apache License 2.0 | 6 votes |
@Test @SuppressWarnings("rawtypes") public void testReduceProcessingTime() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setStreamTimeCharacteristic(TimeCharacteristic.ProcessingTime); DataStream<Tuple2<String, Integer>> source = env.fromElements(Tuple2.of("hello", 1), Tuple2.of("hello", 2)); DataStream<Tuple2<String, Integer>> window1 = source .keyBy(new TupleKeySelector()) .window(SlidingProcessingTimeWindows.of(Time.of(1, TimeUnit.SECONDS), Time.of(100, TimeUnit.MILLISECONDS))) .reduce(new DummyReducer()); OneInputTransformation<Tuple2<String, Integer>, Tuple2<String, Integer>> transform = (OneInputTransformation<Tuple2<String, Integer>, Tuple2<String, Integer>>) window1.getTransformation(); OneInputStreamOperator<Tuple2<String, Integer>, Tuple2<String, Integer>> operator = transform.getOperator(); Assert.assertTrue(operator instanceof WindowOperator); WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?> winOperator = (WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?>) operator; Assert.assertTrue(winOperator.getTrigger() instanceof ProcessingTimeTrigger); Assert.assertTrue(winOperator.getWindowAssigner() instanceof SlidingProcessingTimeWindows); Assert.assertTrue(winOperator.getStateDescriptor() instanceof ReducingStateDescriptor); processElementAndEnsureOutput(winOperator, winOperator.getKeySelector(), BasicTypeInfo.STRING_TYPE_INFO, new Tuple2<>("hello", 1)); }
Example #5
Source File: SiddhiStream.java From flink-siddhi with Apache License 2.0 | 6 votes |
/** * Siddhi Continuous Query Language (CQL) * * @return ExecutionSiddhiStream context */ public ExecutionSiddhiStream cql(DataStream<ControlEvent> controlStream) { DataStream<Tuple2<StreamRoute, Object>> unionStream = controlStream .map(new NamedControlStream(ControlEvent.DEFAULT_INTERNAL_CONTROL_STREAM)) .broadcast() .union(this.toDataStream()) .transform("add route transform", SiddhiTypeFactory.getStreamTupleTypeInformation(TypeInformation.of(Object.class)), new AddRouteOperator(getCepEnvironment().getDataStreamSchemas())); DataStream<Tuple2<StreamRoute, Object>> partitionedStream = new DataStream<>( unionStream.getExecutionEnvironment(), new PartitionTransformation<>(unionStream.getTransformation(), new DynamicPartitioner())); return new ExecutionSiddhiStream(partitionedStream, null, getCepEnvironment()); }
Example #6
Source File: RocksFullSnapshotStrategy.java From flink with Apache License 2.0 | 6 votes |
private void writeSnapshotToOutputStream( @Nonnull CheckpointStreamWithResultProvider checkpointStreamWithResultProvider, @Nonnull KeyGroupRangeOffsets keyGroupRangeOffsets) throws IOException, InterruptedException { final List<Tuple2<RocksIteratorWrapper, Integer>> kvStateIterators = new ArrayList<>(metaData.size()); final DataOutputView outputView = new DataOutputViewStreamWrapper(checkpointStreamWithResultProvider.getCheckpointOutputStream()); final ReadOptions readOptions = new ReadOptions(); try { readOptions.setSnapshot(snapshot); writeKVStateMetaData(kvStateIterators, readOptions, outputView); writeKVStateData(kvStateIterators, checkpointStreamWithResultProvider, keyGroupRangeOffsets); } finally { for (Tuple2<RocksIteratorWrapper, Integer> kvStateIterator : kvStateIterators) { IOUtils.closeQuietly(kvStateIterator.f0); } IOUtils.closeQuietly(readOptions); } }
Example #7
Source File: CassandraTupleWriteAheadSinkExample.java From flink with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(1); env.enableCheckpointing(1000); env.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 1000)); env.setStateBackend(new FsStateBackend("file:///" + System.getProperty("java.io.tmpdir") + "/flink/backend")); CassandraSink<Tuple2<String, Integer>> sink = CassandraSink.addSink(env.addSource(new MySource())) .setQuery("INSERT INTO example.values (id, counter) values (?, ?);") .enableWriteAheadLog() .setClusterBuilder(new ClusterBuilder() { private static final long serialVersionUID = 2793938419775311824L; @Override public Cluster buildCluster(Cluster.Builder builder) { return builder.addContactPoint("127.0.0.1").build(); } }) .build(); sink.name("Cassandra Sink").disableChaining().setParallelism(1).uid("hello"); env.execute(); }
Example #8
Source File: GroupReduceITCase.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Test public void testIntBasedDefinitionOnGroupSortForPartialNestedTuple() throws Exception { /* * Test int-based definition on group sort, for (partial) nested Tuple ASC */ final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(1); DataSet<Tuple2<Tuple2<Integer, Integer>, String>> ds = CollectionDataSets.getGroupSortedNestedTupleDataSet(env); // f0.f0 is first integer DataSet<String> reduceDs = ds.groupBy("f1") .sortGroup("f0.f0", Order.ASCENDING) .sortGroup("f0.f1", Order.ASCENDING) .reduceGroup(new NestedTupleReducer()); List<String> result = reduceDs.collect(); String expected = "a--(1,2)-(1,3)-(2,1)-\n" + "b--(2,2)-\n" + "c--(3,3)-(3,6)-(4,9)-\n"; compareResultAsText(result, expected); }
Example #9
Source File: WindowTranslationTest.java From flink with Apache License 2.0 | 6 votes |
@Test @SuppressWarnings("rawtypes") public void testFoldEventTime() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setStreamTimeCharacteristic(TimeCharacteristic.IngestionTime); DataStream<Tuple2<String, Integer>> source = env.fromElements(Tuple2.of("hello", 1), Tuple2.of("hello", 2)); DataStream<Tuple3<String, String, Integer>> window1 = source .keyBy(0) .window(SlidingEventTimeWindows.of(Time.of(1, TimeUnit.SECONDS), Time.of(100, TimeUnit.MILLISECONDS))) .fold(new Tuple3<>("", "", 1), new DummyFolder()); OneInputTransformation<Tuple2<String, Integer>, Tuple3<String, String, Integer>> transform = (OneInputTransformation<Tuple2<String, Integer>, Tuple3<String, String, Integer>>) window1.getTransformation(); OneInputStreamOperator<Tuple2<String, Integer>, Tuple3<String, String, Integer>> operator = transform.getOperator(); Assert.assertTrue(operator instanceof WindowOperator); WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?> winOperator = (WindowOperator<String, Tuple2<String, Integer>, ?, ?, ?>) operator; Assert.assertTrue(winOperator.getTrigger() instanceof EventTimeTrigger); Assert.assertTrue(winOperator.getWindowAssigner() instanceof SlidingEventTimeWindows); Assert.assertTrue(winOperator.getStateDescriptor() instanceof FoldingStateDescriptor); processElementAndEnsureOutput(winOperator, winOperator.getKeySelector(), BasicTypeInfo.STRING_TYPE_INFO, new Tuple2<>("hello", 1)); }
Example #10
Source File: AbstractOuterJoinTaskTest.java From flink with Apache License 2.0 | 6 votes |
private void testSortBothOuterJoinTask(int keyCnt1, int valCnt1, int keyCnt2, int valCnt2) throws Exception { setOutput(this.outList, this.serializer); addDriverComparator(this.comparator1); addDriverComparator(this.comparator2); getTaskConfig().setDriverPairComparator(new RuntimePairComparatorFactory()); getTaskConfig().setDriverStrategy(this.getSortDriverStrategy()); getTaskConfig().setRelativeMemoryDriver(this.bnljn_frac); setNumFileHandlesForSort(4); final AbstractOuterJoinDriver<Tuple2<Integer, Integer>, Tuple2<Integer, Integer>, Tuple2<Integer, Integer>> testTask = getOuterJoinDriver(); addInputSorted(new UniformIntTupleGenerator(keyCnt1, valCnt1, false), this.serializer, this.comparator1.duplicate()); addInputSorted(new UniformIntTupleGenerator(keyCnt2, valCnt2, false), this.serializer, this.comparator2.duplicate()); testDriver(testTask, MockJoinStub.class); final int expCnt = calculateExpectedCount(keyCnt1, valCnt1, keyCnt2, valCnt2); Assert.assertTrue("Result set size was " + this.outList.size() + ". Expected was " + expCnt, this.outList.size() == expCnt); this.outList.clear(); }
Example #11
Source File: AggregateITCase.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Test public void testGroupedAggregateOfMutableValueTypes() throws Exception { /* * Grouped Aggregate of mutable value types */ final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); DataSet<Tuple3<IntValue, LongValue, StringValue>> ds = ValueCollectionDataSets.get3TupleDataSet(env); DataSet<Tuple2<IntValue, LongValue>> aggregateDs = ds.groupBy(1) .aggregate(Aggregations.SUM, 0) .project(1, 0); List<Tuple2<IntValue, LongValue>> result = aggregateDs.collect(); String expected = "1,1\n" + "2,5\n" + "3,15\n" + "4,34\n" + "5,65\n" + "6,111\n"; compareResultAsTuples(result, expected); }
Example #12
Source File: ExplainingTable.java From flink-learning with Apache License 2.0 | 6 votes |
public static void main(String[] args) { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); StreamTableEnvironment tEnv = StreamTableEnvironment.create(env); DataStream<Tuple2<Integer, String>> stream1 = env.fromElements(new Tuple2<>(1, "hello")); DataStream<Tuple2<Integer, String>> stream2 = env.fromElements(new Tuple2<>(1, "hello")); Table table1 = tEnv.fromDataStream(stream1, "count, word"); Table table2 = tEnv.fromDataStream(stream2, "count, word"); Table table = table1 .where("LIKE(word, 'F%')") .unionAll(table2); String explanation = tEnv.explain(table); System.out.println(explanation); }
Example #13
Source File: CollectionDataSets.java From flink with Apache License 2.0 | 6 votes |
public static DataSet<Tuple3<Tuple2<Integer, Integer>, String, Integer>> getGroupSortedNestedTupleDataSet2(ExecutionEnvironment env) { List<Tuple3<Tuple2<Integer, Integer>, String, Integer>> data = new ArrayList<>(); data.add(new Tuple3<>(new Tuple2<>(1, 3), "a", 2)); data.add(new Tuple3<>(new Tuple2<>(1, 2), "a", 1)); data.add(new Tuple3<>(new Tuple2<>(2, 1), "a", 3)); data.add(new Tuple3<>(new Tuple2<>(2, 2), "b", 4)); data.add(new Tuple3<>(new Tuple2<>(3, 3), "c", 5)); data.add(new Tuple3<>(new Tuple2<>(3, 6), "c", 6)); data.add(new Tuple3<>(new Tuple2<>(4, 9), "c", 7)); TupleTypeInfo<Tuple3<Tuple2<Integer, Integer>, String, Integer>> type = new TupleTypeInfo<>( new TupleTypeInfo<Tuple2<Integer, Integer>>(BasicTypeInfo.INT_TYPE_INFO, BasicTypeInfo.INT_TYPE_INFO), BasicTypeInfo.STRING_TYPE_INFO, BasicTypeInfo.INT_TYPE_INFO ); return env.fromCollection(data, type); }
Example #14
Source File: ScatterGatherIteration.java From flink with Apache License 2.0 | 6 votes |
@Override public void coGroup(Iterable<Edge<K, EV>> edges, Iterable<Vertex<K, Tuple3<VV, LongValue, LongValue>>> state, Collector<Tuple2<K, Message>> out) throws Exception { final Iterator<Vertex<K, Tuple3<VV, LongValue, LongValue>>> stateIter = state.iterator(); if (stateIter.hasNext()) { Vertex<K, Tuple3<VV, LongValue, LongValue>> vertexWithDegrees = stateIter.next(); nextVertex.f0 = vertexWithDegrees.f0; nextVertex.f1 = vertexWithDegrees.f1.f0; scatterFunction.setInDegree(vertexWithDegrees.f1.f1.getValue()); scatterFunction.setOutDegree(vertexWithDegrees.f1.f2.getValue()); scatterFunction.set(edges.iterator(), out, vertexWithDegrees.getId()); scatterFunction.sendMessages(nextVertex); } }
Example #15
Source File: EdgeTargetDegreesTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testWithEmptyGraphWithVertices() throws Exception { DataSet<Edge<LongValue, Tuple2<NullValue, Degrees>>> targetDegrees = emptyGraphWithVertices .run(new EdgeTargetDegrees<>()); assertEquals(0, targetDegrees.collect().size()); }
Example #16
Source File: AdditionalOperatorsTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Test public void testCrossWithSmall() { // construct the plan ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(DEFAULT_PARALLELISM); DataSet<Long> set1 = env.generateSequence(0,1); DataSet<Long> set2 = env.generateSequence(0,1); set1.crossWithTiny(set2).name("Cross") .output(new DiscardingOutputFormat<Tuple2<Long, Long>>()); try { Plan plan = env.createProgramPlan(); OptimizedPlan oPlan = compileWithStats(plan); OptimizerPlanNodeResolver resolver = new OptimizerPlanNodeResolver(oPlan); DualInputPlanNode crossPlanNode = resolver.getNode("Cross"); Channel in1 = crossPlanNode.getInput1(); Channel in2 = crossPlanNode.getInput2(); assertEquals(ShipStrategyType.FORWARD, in1.getShipStrategy()); assertEquals(ShipStrategyType.BROADCAST, in2.getShipStrategy()); } catch(CompilerException ce) { ce.printStackTrace(); fail("The Flink optimizer is unable to compile this plan correctly."); } }
Example #17
Source File: DistinctAndGroupingOptimizerTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testDistinctDestroysPartitioningOfNonDistinctFields() { try { ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(4); @SuppressWarnings("unchecked") DataSet<Tuple2<Long, Long>> data = env.fromElements(new Tuple2<Long, Long>(0L, 0L), new Tuple2<Long, Long>(1L, 1L)) .map(new IdentityMapper<Tuple2<Long,Long>>()).setParallelism(4); data.distinct(1) .groupBy(0) .sum(1) .output(new DiscardingOutputFormat<Tuple2<Long, Long>>()); Plan p = env.createProgramPlan(); OptimizedPlan op = compileNoStats(p); SinkPlanNode sink = op.getDataSinks().iterator().next(); SingleInputPlanNode reducer = (SingleInputPlanNode) sink.getInput().getSource(); SingleInputPlanNode combiner = (SingleInputPlanNode) reducer.getInput().getSource(); SingleInputPlanNode distinctReducer = (SingleInputPlanNode) combiner.getInput().getSource(); assertEquals(ShipStrategyType.FORWARD, sink.getInput().getShipStrategy()); // reducer must repartition, because it works on a different field assertEquals(ShipStrategyType.PARTITION_HASH, reducer.getInput().getShipStrategy()); assertEquals(ShipStrategyType.FORWARD, combiner.getInput().getShipStrategy()); // distinct reducer is partitioned assertEquals(ShipStrategyType.PARTITION_HASH, distinctReducer.getInput().getShipStrategy()); } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); } }
Example #18
Source File: TestUtils.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
static OneInputStreamOperatorTestHarness<Tuple2<String, Integer>, Object> createRescalingTestSink( File outDir, int totalParallelism, int taskIdx, long inactivityInterval, long partMaxSize) throws Exception { final RollingPolicy<Tuple2<String, Integer>, String> rollingPolicy = DefaultRollingPolicy .create() .withMaxPartSize(partMaxSize) .withRolloverInterval(inactivityInterval) .withInactivityInterval(inactivityInterval) .build(); final BucketAssigner<Tuple2<String, Integer>, String> bucketer = new TupleToStringBucketer(); final Encoder<Tuple2<String, Integer>> encoder = (element, stream) -> { stream.write((element.f0 + '@' + element.f1).getBytes(StandardCharsets.UTF_8)); stream.write('\n'); }; return createCustomRescalingTestSink( outDir, totalParallelism, taskIdx, 10L, bucketer, encoder, rollingPolicy, new DefaultBucketFactoryImpl<>()); }
Example #19
Source File: ScatterGatherIteration.java From flink with Apache License 2.0 | 5 votes |
private <VVWithDegree> void configureUpdateFunction(CoGroupOperator<?, ?, Vertex<K, VVWithDegree>> updates) { // configure coGroup update function with name and broadcast variables updates = updates.name("Vertex State Updates"); if (this.configuration != null) { for (Tuple2<String, DataSet<?>> e : this.configuration.getGatherBcastVars()) { updates = updates.withBroadcastSet(e.f1, e.f0); } } // let the operator know that we preserve the key field updates.withForwardedFieldsFirst("0").withForwardedFieldsSecond("0"); }
Example #20
Source File: SOSImpl.java From Alink with Apache License 2.0 | 5 votes |
DataSet <Tuple2 <Integer, DenseVector>> computeBindingProbabilities(DataSet <Tuple2 <Integer, DenseVector>> dissimilarityVectors, final double perplexity, final int maxIter, final double tol) { return dissimilarityVectors .map(new MapFunction <Tuple2 <Integer, DenseVector>, Tuple2 <Integer, DenseVector>>() { @Override public Tuple2 <Integer, DenseVector> map(Tuple2 <Integer, DenseVector> row) throws Exception { int id = row.f0; DenseVector dissmilarity = row.f1; // beta: 1 / (2 * sigma^2) // compute beta by solving a nonlinear equation double beta = solveForBeta(id, dissmilarity, 1.0, perplexity, maxIter, tol); // (1) the affinity that data point xi has with data point xj decays Gaussian-like with // respect to the dissimilarity dij // (2) a data point has no affinity with itself, i.e., aii = 0 DenseVector ret = DenseVector.zeros(row.f1.size()); double s = 0.; // compute the affinity for (int i = 0; i < dissmilarity.size(); i++) { if (i != id) { double v = dissmilarity.get(i); v = Math.exp(-v * beta); s += v; ret.set(i, v); } } // compute the binding probability ret.scaleEqual(1.0 / s); return Tuple2.of(row.f0, ret); } }) .withForwardedFields("f0") .name("computeBindingProbabilities"); }
Example #21
Source File: JobGraphGeneratorTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testGeneratingJobGraphWithUnconsumedResultPartition() { ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); DataSet<Tuple2<Long, Long>> input = env.fromElements(new Tuple2<>(1L, 2L)) .setParallelism(1); DataSet<Tuple2<Long, Long>> ds = input.map(new IdentityMapper<>()) .setParallelism(3); AbstractID intermediateDataSetID = new AbstractID(); // this output branch will be excluded. ds.output(BlockingShuffleOutputFormat.createOutputFormat(intermediateDataSetID)) .setParallelism(1); // this is the normal output branch. ds.output(new DiscardingOutputFormat<>()) .setParallelism(1); JobGraph jobGraph = compileJob(env); Assert.assertEquals(3, jobGraph.getVerticesSortedTopologicallyFromSources().size()); JobVertex mapVertex = jobGraph.getVerticesSortedTopologicallyFromSources().get(1); Assert.assertThat(mapVertex, Matchers.instanceOf(JobVertex.class)); // there are 2 output result with one of them is ResultPartitionType.BLOCKING_PERSISTENT Assert.assertEquals(2, mapVertex.getProducedDataSets().size()); Assert.assertTrue(mapVertex.getProducedDataSets().stream() .anyMatch(dataSet -> dataSet.getId().equals(new IntermediateDataSetID(intermediateDataSetID)) && dataSet.getResultType() == ResultPartitionType.BLOCKING_PERSISTENT)); }
Example #22
Source File: LegacyStatefulJobSavepointMigrationITCase.java From flink with Apache License 2.0 | 5 votes |
@Override public void flatMap(Tuple2<Long, Long> value, Collector<Tuple2<Long, Long>> out) throws Exception { out.collect(value); getRuntimeContext().getState(stateDescriptor).update(value.f1); assertEquals(value.f1, getRuntimeContext().getState(stateDescriptor).value()); }
Example #23
Source File: HashTableTest.java From flink with Apache License 2.0 | 5 votes |
public HashTableTest() { TypeSerializer<?>[] fieldSerializers = { LongSerializer.INSTANCE, BytePrimitiveArraySerializer.INSTANCE }; @SuppressWarnings("unchecked") Class<Tuple2<Long, byte[]>> clazz = (Class<Tuple2<Long, byte[]>>) (Class<?>) Tuple2.class; this.buildSerializer = new TupleSerializer<Tuple2<Long, byte[]>>(clazz, fieldSerializers); this.probeSerializer = LongSerializer.INSTANCE; TypeComparator<?>[] comparators = { new LongComparator(true) }; TypeSerializer<?>[] comparatorSerializers = { LongSerializer.INSTANCE }; this.buildComparator = new TupleComparator<Tuple2<Long, byte[]>>(new int[] {0}, comparators, comparatorSerializers); this.probeComparator = new LongComparator(true); this.pairComparator = new TypePairComparator<Long, Tuple2<Long, byte[]>>() { private long ref; @Override public void setReference(Long reference) { ref = reference; } @Override public boolean equalToReference(Tuple2<Long, byte[]> candidate) { //noinspection UnnecessaryUnboxing return candidate.f0.longValue() == ref; } @Override public int compareToReference(Tuple2<Long, byte[]> candidate) { long x = ref; long y = candidate.f0; return (x < y) ? -1 : ((x == y) ? 0 : 1); } }; }
Example #24
Source File: WindowTranslationTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Test @SuppressWarnings("rawtypes") public void testReduceWithEvictor() throws Exception { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setStreamTimeCharacteristic(TimeCharacteristic.IngestionTime); DataStream<Tuple2<String, Integer>> source = env.fromElements(Tuple2.of("hello", 1), Tuple2.of("hello", 2)); DummyReducer reducer = new DummyReducer(); DataStream<Tuple2<String, Integer>> window1 = source .keyBy(0) .window(SlidingEventTimeWindows.of(Time.of(1, TimeUnit.SECONDS), Time.of(100, TimeUnit.MILLISECONDS))) .evictor(CountEvictor.of(100)) .reduce(reducer); OneInputTransformation<Tuple2<String, Integer>, Tuple2<String, Integer>> transform = (OneInputTransformation<Tuple2<String, Integer>, Tuple2<String, Integer>>) window1.getTransformation(); OneInputStreamOperator<Tuple2<String, Integer>, Tuple2<String, Integer>> operator = transform.getOperator(); Assert.assertTrue(operator instanceof EvictingWindowOperator); EvictingWindowOperator<String, Tuple2<String, Integer>, ?, ?> winOperator = (EvictingWindowOperator<String, Tuple2<String, Integer>, ?, ?>) operator; Assert.assertTrue(winOperator.getTrigger() instanceof EventTimeTrigger); Assert.assertTrue(winOperator.getEvictor() instanceof CountEvictor); Assert.assertTrue(winOperator.getWindowAssigner() instanceof SlidingEventTimeWindows); Assert.assertTrue(winOperator.getStateDescriptor() instanceof ListStateDescriptor); processElementAndEnsureOutput(winOperator, winOperator.getKeySelector(), BasicTypeInfo.STRING_TYPE_INFO, new Tuple2<>("hello", 1)); }
Example #25
Source File: PartitionOperatorTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testRangePartitionByComplexKeyWithOrders() throws Exception { final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); final DataSource<Tuple2<Tuple2<Integer, Integer>, Integer>> ds = env.fromElements( new Tuple2<>(new Tuple2<>(1, 1), 1), new Tuple2<>(new Tuple2<>(2, 2), 2), new Tuple2<>(new Tuple2<>(2, 2), 2) ); ds.partitionByRange(0, 1).withOrders(Order.ASCENDING, Order.DESCENDING); }
Example #26
Source File: ContinuousFileProcessingCheckpointITCase.java From flink with Apache License 2.0 | 5 votes |
public void run() { try { for (int i = 0; i < NO_OF_FILES; i++) { Tuple2<org.apache.hadoop.fs.Path, String> tmpFile; long modTime; do { // give it some time so that the files have // different modification timestamps. Thread.sleep(50); tmpFile = fillWithData(localFsURI, "file", i, "This is test line."); modTime = localFs.getFileStatus(tmpFile.f0).getModificationTime(); if (modTime <= lastCreatedModTime) { // delete the last created file to recreate it with a different timestamp localFs.delete(tmpFile.f0, false); } } while (modTime <= lastCreatedModTime); lastCreatedModTime = modTime; // rename the file org.apache.hadoop.fs.Path file = new org.apache.hadoop.fs.Path(localFsURI + "/file" + i); localFs.rename(tmpFile.f0, file); Assert.assertTrue(localFs.exists(file)); filesCreated.add(file); fileContents.put(i, tmpFile.f1); } } catch (IOException | InterruptedException e) { e.printStackTrace(); } }
Example #27
Source File: UdfAnalyzerTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
public String map(Tuple2<String, String> value) throws Exception { if (value.f0.equals("whatever")) { return value.f0; } else { return value.f1; } }
Example #28
Source File: HashTableTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override public Tuple2<Long, byte[]> next() { if (count++ < numRecords) { return new Tuple2<>(42L, payload); } else { return null; } }
Example #29
Source File: AdditionalOperatorsTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testCrossWithSmall() { // construct the plan ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(DEFAULT_PARALLELISM); DataSet<Long> set1 = env.generateSequence(0,1); DataSet<Long> set2 = env.generateSequence(0,1); set1.crossWithTiny(set2).name("Cross") .output(new DiscardingOutputFormat<Tuple2<Long, Long>>()); try { Plan plan = env.createProgramPlan(); OptimizedPlan oPlan = compileWithStats(plan); OptimizerPlanNodeResolver resolver = new OptimizerPlanNodeResolver(oPlan); DualInputPlanNode crossPlanNode = resolver.getNode("Cross"); Channel in1 = crossPlanNode.getInput1(); Channel in2 = crossPlanNode.getInput2(); assertEquals(ShipStrategyType.FORWARD, in1.getShipStrategy()); assertEquals(ShipStrategyType.BROADCAST, in2.getShipStrategy()); } catch(CompilerException ce) { ce.printStackTrace(); fail("The Flink optimizer is unable to compile this plan correctly."); } }
Example #30
Source File: Tokenizer.java From flink with Apache License 2.0 | 5 votes |
@Override public void flatMap(String value, Collector<Tuple2<String, Integer>> out) { String[] tokens = value.toLowerCase().split("\\W+"); for (String token : tokens) { if (token.length() > 0) { out.collect(new Tuple2<>(token, 1)); } } }