org.apache.flink.api.common.operators.Union Java Examples
The following examples show how to use
org.apache.flink.api.common.operators.Union.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: KeyFunctions.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") public static <T, K> org.apache.flink.api.common.operators.Operator<Tuple2<K, T>> appendKeyExtractor( org.apache.flink.api.common.operators.Operator<T> input, SelectorFunctionKeys<T, K> key) { if (input instanceof Union) { // if input is a union, we apply the key extractors recursively to all inputs org.apache.flink.api.common.operators.Operator<T> firstInput = ((Union) input).getFirstInput(); org.apache.flink.api.common.operators.Operator<T> secondInput = ((Union) input).getSecondInput(); org.apache.flink.api.common.operators.Operator<Tuple2<K, T>> firstInputWithKey = appendKeyExtractor(firstInput, key); org.apache.flink.api.common.operators.Operator<Tuple2<K, T>> secondInputWithKey = appendKeyExtractor(secondInput, key); return new Union(firstInputWithKey, secondInputWithKey, input.getName()); } TypeInformation<T> inputType = key.getInputType(); TypeInformation<Tuple2<K, T>> typeInfoWithKey = createTypeWithKey(key); KeyExtractingMapper<T, K> extractor = new KeyExtractingMapper(key.getKeyExtractor()); MapOperatorBase<T, Tuple2<K, T>, MapFunction<T, Tuple2<K, T>>> mapper = new MapOperatorBase<T, Tuple2<K, T>, MapFunction<T, Tuple2<K, T>>>( extractor, new UnaryOperatorInformation(inputType, typeInfoWithKey), "Key Extractor" ); mapper.setInput(input); mapper.setParallelism(input.getParallelism()); return mapper; }
Example #2
Source File: KeyFunctions.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") public static <T, K1, K2> org.apache.flink.api.common.operators.Operator<Tuple3<K1, K2, T>> appendKeyExtractor( org.apache.flink.api.common.operators.Operator<T> input, SelectorFunctionKeys<T, K1> key1, SelectorFunctionKeys<T, K2> key2) { if (input instanceof Union) { // if input is a union, we apply the key extractors recursively to all inputs org.apache.flink.api.common.operators.Operator<T> firstInput = ((Union) input).getFirstInput(); org.apache.flink.api.common.operators.Operator<T> secondInput = ((Union) input).getSecondInput(); org.apache.flink.api.common.operators.Operator<Tuple3<K1, K2, T>> firstInputWithKey = appendKeyExtractor(firstInput, key1, key2); org.apache.flink.api.common.operators.Operator<Tuple3<K1, K2, T>> secondInputWithKey = appendKeyExtractor(secondInput, key1, key2); return new Union(firstInputWithKey, secondInputWithKey, input.getName()); } TypeInformation<T> inputType = key1.getInputType(); TypeInformation<Tuple3<K1, K2, T>> typeInfoWithKey = createTypeWithKey(key1, key2); TwoKeyExtractingMapper<T, K1, K2> extractor = new TwoKeyExtractingMapper<>(key1.getKeyExtractor(), key2.getKeyExtractor()); MapOperatorBase<T, Tuple3<K1, K2, T>, MapFunction<T, Tuple3<K1, K2, T>>> mapper = new MapOperatorBase<T, Tuple3<K1, K2, T>, MapFunction<T, Tuple3<K1, K2, T>>>( extractor, new UnaryOperatorInformation<>(inputType, typeInfoWithKey), "Key Extractor" ); mapper.setInput(input); mapper.setParallelism(input.getParallelism()); return mapper; }
Example #3
Source File: KeyFunctions.java From flink with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") public static <T, K1, K2> org.apache.flink.api.common.operators.Operator<Tuple3<K1, K2, T>> appendKeyExtractor( org.apache.flink.api.common.operators.Operator<T> input, SelectorFunctionKeys<T, K1> key1, SelectorFunctionKeys<T, K2> key2) { if (input instanceof Union) { // if input is a union, we apply the key extractors recursively to all inputs org.apache.flink.api.common.operators.Operator<T> firstInput = ((Union) input).getFirstInput(); org.apache.flink.api.common.operators.Operator<T> secondInput = ((Union) input).getSecondInput(); org.apache.flink.api.common.operators.Operator<Tuple3<K1, K2, T>> firstInputWithKey = appendKeyExtractor(firstInput, key1, key2); org.apache.flink.api.common.operators.Operator<Tuple3<K1, K2, T>> secondInputWithKey = appendKeyExtractor(secondInput, key1, key2); return new Union(firstInputWithKey, secondInputWithKey, input.getName()); } TypeInformation<T> inputType = key1.getInputType(); TypeInformation<Tuple3<K1, K2, T>> typeInfoWithKey = createTypeWithKey(key1, key2); TwoKeyExtractingMapper<T, K1, K2> extractor = new TwoKeyExtractingMapper<>(key1.getKeyExtractor(), key2.getKeyExtractor()); MapOperatorBase<T, Tuple3<K1, K2, T>, MapFunction<T, Tuple3<K1, K2, T>>> mapper = new MapOperatorBase<T, Tuple3<K1, K2, T>, MapFunction<T, Tuple3<K1, K2, T>>>( extractor, new UnaryOperatorInformation<>(inputType, typeInfoWithKey), "Key Extractor" ); mapper.setInput(input); mapper.setParallelism(input.getParallelism()); return mapper; }
Example #4
Source File: KeyFunctions.java From flink with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") public static <T, K> org.apache.flink.api.common.operators.Operator<Tuple2<K, T>> appendKeyExtractor( org.apache.flink.api.common.operators.Operator<T> input, SelectorFunctionKeys<T, K> key) { if (input instanceof Union) { // if input is a union, we apply the key extractors recursively to all inputs org.apache.flink.api.common.operators.Operator<T> firstInput = ((Union) input).getFirstInput(); org.apache.flink.api.common.operators.Operator<T> secondInput = ((Union) input).getSecondInput(); org.apache.flink.api.common.operators.Operator<Tuple2<K, T>> firstInputWithKey = appendKeyExtractor(firstInput, key); org.apache.flink.api.common.operators.Operator<Tuple2<K, T>> secondInputWithKey = appendKeyExtractor(secondInput, key); return new Union(firstInputWithKey, secondInputWithKey, input.getName()); } TypeInformation<T> inputType = key.getInputType(); TypeInformation<Tuple2<K, T>> typeInfoWithKey = createTypeWithKey(key); KeyExtractingMapper<T, K> extractor = new KeyExtractingMapper(key.getKeyExtractor()); MapOperatorBase<T, Tuple2<K, T>, MapFunction<T, Tuple2<K, T>>> mapper = new MapOperatorBase<T, Tuple2<K, T>, MapFunction<T, Tuple2<K, T>>>( extractor, new UnaryOperatorInformation(inputType, typeInfoWithKey), "Key Extractor" ); mapper.setInput(input); mapper.setParallelism(input.getParallelism()); return mapper; }
Example #5
Source File: KeyFunctions.java From flink with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") public static <T, K> org.apache.flink.api.common.operators.Operator<Tuple2<K, T>> appendKeyExtractor( org.apache.flink.api.common.operators.Operator<T> input, SelectorFunctionKeys<T, K> key) { if (input instanceof Union) { // if input is a union, we apply the key extractors recursively to all inputs org.apache.flink.api.common.operators.Operator<T> firstInput = ((Union) input).getFirstInput(); org.apache.flink.api.common.operators.Operator<T> secondInput = ((Union) input).getSecondInput(); org.apache.flink.api.common.operators.Operator<Tuple2<K, T>> firstInputWithKey = appendKeyExtractor(firstInput, key); org.apache.flink.api.common.operators.Operator<Tuple2<K, T>> secondInputWithKey = appendKeyExtractor(secondInput, key); return new Union(firstInputWithKey, secondInputWithKey, input.getName()); } TypeInformation<T> inputType = key.getInputType(); TypeInformation<Tuple2<K, T>> typeInfoWithKey = createTypeWithKey(key); KeyExtractingMapper<T, K> extractor = new KeyExtractingMapper(key.getKeyExtractor()); MapOperatorBase<T, Tuple2<K, T>, MapFunction<T, Tuple2<K, T>>> mapper = new MapOperatorBase<T, Tuple2<K, T>, MapFunction<T, Tuple2<K, T>>>( extractor, new UnaryOperatorInformation(inputType, typeInfoWithKey), "Key Extractor" ); mapper.setInput(input); mapper.setParallelism(input.getParallelism()); return mapper; }
Example #6
Source File: KeyFunctions.java From flink with Apache License 2.0 | 5 votes |
@SuppressWarnings("unchecked") public static <T, K1, K2> org.apache.flink.api.common.operators.Operator<Tuple3<K1, K2, T>> appendKeyExtractor( org.apache.flink.api.common.operators.Operator<T> input, SelectorFunctionKeys<T, K1> key1, SelectorFunctionKeys<T, K2> key2) { if (input instanceof Union) { // if input is a union, we apply the key extractors recursively to all inputs org.apache.flink.api.common.operators.Operator<T> firstInput = ((Union) input).getFirstInput(); org.apache.flink.api.common.operators.Operator<T> secondInput = ((Union) input).getSecondInput(); org.apache.flink.api.common.operators.Operator<Tuple3<K1, K2, T>> firstInputWithKey = appendKeyExtractor(firstInput, key1, key2); org.apache.flink.api.common.operators.Operator<Tuple3<K1, K2, T>> secondInputWithKey = appendKeyExtractor(secondInput, key1, key2); return new Union(firstInputWithKey, secondInputWithKey, input.getName()); } TypeInformation<T> inputType = key1.getInputType(); TypeInformation<Tuple3<K1, K2, T>> typeInfoWithKey = createTypeWithKey(key1, key2); TwoKeyExtractingMapper<T, K1, K2> extractor = new TwoKeyExtractingMapper<>(key1.getKeyExtractor(), key2.getKeyExtractor()); MapOperatorBase<T, Tuple3<K1, K2, T>, MapFunction<T, Tuple3<K1, K2, T>>> mapper = new MapOperatorBase<T, Tuple3<K1, K2, T>, MapFunction<T, Tuple3<K1, K2, T>>>( extractor, new UnaryOperatorInformation<>(inputType, typeInfoWithKey), "Key Extractor" ); mapper.setInput(input); mapper.setParallelism(input.getParallelism()); return mapper; }
Example #7
Source File: BinaryUnionNode.java From flink with Apache License 2.0 | 4 votes |
public BinaryUnionNode(Union<?> union){ super(union); }
Example #8
Source File: UnionTranslationTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void translateUnion3SortedGroup() { try { final int parallelism = 4; ExecutionEnvironment env = ExecutionEnvironment.createLocalEnvironment(parallelism); DataSet<Tuple3<Double, StringValue, LongValue>> dataset1 = getSourceDataSet(env, 2); DataSet<Tuple3<Double, StringValue, LongValue>> dataset2 = getSourceDataSet(env, 3); DataSet<Tuple3<Double, StringValue, LongValue>> dataset3 = getSourceDataSet(env, -1); dataset1.union(dataset2).union(dataset3) .groupBy((KeySelector<Tuple3<Double, StringValue, LongValue>, String>) value -> "") .sortGroup((KeySelector<Tuple3<Double, StringValue, LongValue>, String>) value -> "", Order.ASCENDING) .reduceGroup((GroupReduceFunction<Tuple3<Double, StringValue, LongValue>, String>) (values, out) -> {}) .returns(String.class) .output(new DiscardingOutputFormat<>()); Plan p = env.createProgramPlan(); // The plan should look like the following one. // // DataSet1(2) - MapOperator(2)-+ // |- Union(-1) -+ // DataSet2(3) - MapOperator(3)-+ |- Union(-1) - SingleInputOperator - Sink // | // DataSet3(-1) - MapOperator(-1)-+ GenericDataSinkBase<?> sink = p.getDataSinks().iterator().next(); Union secondUnionOperator = (Union) ((SingleInputOperator) sink.getInput()).getInput(); // The first input of the second union should be the first union. Union firstUnionOperator = (Union) secondUnionOperator.getFirstInput(); // The key mapper should be added to the second input stream of the second union. assertTrue(secondUnionOperator.getSecondInput() instanceof MapOperatorBase<?, ?, ?>); // The key mappers should be added to both of the two input streams for the first union. assertTrue(firstUnionOperator.getFirstInput() instanceof MapOperatorBase<?, ?, ?>); assertTrue(firstUnionOperator.getSecondInput() instanceof MapOperatorBase<?, ?, ?>); // The parallelisms of the key mappers should be equal to those of their inputs. assertEquals(firstUnionOperator.getFirstInput().getParallelism(), 2); assertEquals(firstUnionOperator.getSecondInput().getParallelism(), 3); assertEquals(secondUnionOperator.getSecondInput().getParallelism(), -1); // The union should always have the default parallelism. assertEquals(secondUnionOperator.getParallelism(), ExecutionConfig.PARALLELISM_DEFAULT); assertEquals(firstUnionOperator.getParallelism(), ExecutionConfig.PARALLELISM_DEFAULT); } catch (Exception e) { System.err.println(e.getMessage()); e.printStackTrace(); fail("Test caused an error: " + e.getMessage()); } }
Example #9
Source File: UnionTranslationTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void translateUnion2Group() { try { final int parallelism = 4; ExecutionEnvironment env = ExecutionEnvironment.createLocalEnvironment(parallelism); DataSet<Tuple3<Double, StringValue, LongValue>> dataset1 = getSourceDataSet(env, 3); DataSet<Tuple3<Double, StringValue, LongValue>> dataset2 = getSourceDataSet(env, 2); dataset1.union(dataset2) .groupBy((KeySelector<Tuple3<Double, StringValue, LongValue>, String>) value -> "") .reduceGroup((GroupReduceFunction<Tuple3<Double, StringValue, LongValue>, String>) (values, out) -> {}) .returns(String.class) .output(new DiscardingOutputFormat<>()); Plan p = env.createProgramPlan(); // The plan should look like the following one. // // DataSet1(3) - MapOperator(3)-+ // |- Union(-1) - SingleInputOperator - Sink // DataSet2(2) - MapOperator(2)-+ GenericDataSinkBase<?> sink = p.getDataSinks().iterator().next(); Union unionOperator = (Union) ((SingleInputOperator) sink.getInput()).getInput(); // The key mappers should be added to both of the two input streams for union. assertTrue(unionOperator.getFirstInput() instanceof MapOperatorBase<?, ?, ?>); assertTrue(unionOperator.getSecondInput() instanceof MapOperatorBase<?, ?, ?>); // The parallelisms of the key mappers should be equal to those of their inputs. assertEquals(unionOperator.getFirstInput().getParallelism(), 3); assertEquals(unionOperator.getSecondInput().getParallelism(), 2); // The union should always have the default parallelism. assertEquals(unionOperator.getParallelism(), ExecutionConfig.PARALLELISM_DEFAULT); } catch (Exception e) { System.err.println(e.getMessage()); e.printStackTrace(); fail("Test caused an error: " + e.getMessage()); } }
Example #10
Source File: BinaryUnionNode.java From flink with Apache License 2.0 | 4 votes |
public BinaryUnionNode(Union<?> union){ super(union); }
Example #11
Source File: UnionTranslationTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void translateUnion3SortedGroup() { try { final int parallelism = 4; ExecutionEnvironment env = ExecutionEnvironment.createLocalEnvironment(parallelism); DataSet<Tuple3<Double, StringValue, LongValue>> dataset1 = getSourceDataSet(env, 2); DataSet<Tuple3<Double, StringValue, LongValue>> dataset2 = getSourceDataSet(env, 3); DataSet<Tuple3<Double, StringValue, LongValue>> dataset3 = getSourceDataSet(env, -1); dataset1.union(dataset2).union(dataset3) .groupBy((KeySelector<Tuple3<Double, StringValue, LongValue>, String>) value -> "") .sortGroup((KeySelector<Tuple3<Double, StringValue, LongValue>, String>) value -> "", Order.ASCENDING) .reduceGroup((GroupReduceFunction<Tuple3<Double, StringValue, LongValue>, String>) (values, out) -> {}) .returns(String.class) .output(new DiscardingOutputFormat<>()); Plan p = env.createProgramPlan(); // The plan should look like the following one. // // DataSet1(2) - MapOperator(2)-+ // |- Union(-1) -+ // DataSet2(3) - MapOperator(3)-+ |- Union(-1) - SingleInputOperator - Sink // | // DataSet3(-1) - MapOperator(-1)-+ GenericDataSinkBase<?> sink = p.getDataSinks().iterator().next(); Union secondUnionOperator = (Union) ((SingleInputOperator) sink.getInput()).getInput(); // The first input of the second union should be the first union. Union firstUnionOperator = (Union) secondUnionOperator.getFirstInput(); // The key mapper should be added to the second input stream of the second union. assertTrue(secondUnionOperator.getSecondInput() instanceof MapOperatorBase<?, ?, ?>); // The key mappers should be added to both of the two input streams for the first union. assertTrue(firstUnionOperator.getFirstInput() instanceof MapOperatorBase<?, ?, ?>); assertTrue(firstUnionOperator.getSecondInput() instanceof MapOperatorBase<?, ?, ?>); // The parallelisms of the key mappers should be equal to those of their inputs. assertEquals(firstUnionOperator.getFirstInput().getParallelism(), 2); assertEquals(firstUnionOperator.getSecondInput().getParallelism(), 3); assertEquals(secondUnionOperator.getSecondInput().getParallelism(), -1); // The union should always have the default parallelism. assertEquals(secondUnionOperator.getParallelism(), ExecutionConfig.PARALLELISM_DEFAULT); assertEquals(firstUnionOperator.getParallelism(), ExecutionConfig.PARALLELISM_DEFAULT); } catch (Exception e) { System.err.println(e.getMessage()); e.printStackTrace(); fail("Test caused an error: " + e.getMessage()); } }
Example #12
Source File: UnionTranslationTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void translateUnion2Group() { try { final int parallelism = 4; ExecutionEnvironment env = ExecutionEnvironment.createLocalEnvironment(parallelism); DataSet<Tuple3<Double, StringValue, LongValue>> dataset1 = getSourceDataSet(env, 3); DataSet<Tuple3<Double, StringValue, LongValue>> dataset2 = getSourceDataSet(env, 2); dataset1.union(dataset2) .groupBy((KeySelector<Tuple3<Double, StringValue, LongValue>, String>) value -> "") .reduceGroup((GroupReduceFunction<Tuple3<Double, StringValue, LongValue>, String>) (values, out) -> {}) .returns(String.class) .output(new DiscardingOutputFormat<>()); Plan p = env.createProgramPlan(); // The plan should look like the following one. // // DataSet1(3) - MapOperator(3)-+ // |- Union(-1) - SingleInputOperator - Sink // DataSet2(2) - MapOperator(2)-+ GenericDataSinkBase<?> sink = p.getDataSinks().iterator().next(); Union unionOperator = (Union) ((SingleInputOperator) sink.getInput()).getInput(); // The key mappers should be added to both of the two input streams for union. assertTrue(unionOperator.getFirstInput() instanceof MapOperatorBase<?, ?, ?>); assertTrue(unionOperator.getSecondInput() instanceof MapOperatorBase<?, ?, ?>); // The parallelisms of the key mappers should be equal to those of their inputs. assertEquals(unionOperator.getFirstInput().getParallelism(), 3); assertEquals(unionOperator.getSecondInput().getParallelism(), 2); // The union should always have the default parallelism. assertEquals(unionOperator.getParallelism(), ExecutionConfig.PARALLELISM_DEFAULT); } catch (Exception e) { System.err.println(e.getMessage()); e.printStackTrace(); fail("Test caused an error: " + e.getMessage()); } }
Example #13
Source File: BinaryUnionNode.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
public BinaryUnionNode(Union<?> union){ super(union); }
Example #14
Source File: UnionTranslationTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
@Test public void translateUnion3SortedGroup() { try { final int parallelism = 4; ExecutionEnvironment env = ExecutionEnvironment.createLocalEnvironment(parallelism); DataSet<Tuple3<Double, StringValue, LongValue>> dataset1 = getSourceDataSet(env, 2); DataSet<Tuple3<Double, StringValue, LongValue>> dataset2 = getSourceDataSet(env, 3); DataSet<Tuple3<Double, StringValue, LongValue>> dataset3 = getSourceDataSet(env, -1); dataset1.union(dataset2).union(dataset3) .groupBy((KeySelector<Tuple3<Double, StringValue, LongValue>, String>) value -> "") .sortGroup((KeySelector<Tuple3<Double, StringValue, LongValue>, String>) value -> "", Order.ASCENDING) .reduceGroup((GroupReduceFunction<Tuple3<Double, StringValue, LongValue>, String>) (values, out) -> {}) .returns(String.class) .output(new DiscardingOutputFormat<>()); Plan p = env.createProgramPlan(); // The plan should look like the following one. // // DataSet1(2) - MapOperator(2)-+ // |- Union(-1) -+ // DataSet2(3) - MapOperator(3)-+ |- Union(-1) - SingleInputOperator - Sink // | // DataSet3(-1) - MapOperator(-1)-+ GenericDataSinkBase<?> sink = p.getDataSinks().iterator().next(); Union secondUnionOperator = (Union) ((SingleInputOperator) sink.getInput()).getInput(); // The first input of the second union should be the first union. Union firstUnionOperator = (Union) secondUnionOperator.getFirstInput(); // The key mapper should be added to the second input stream of the second union. assertTrue(secondUnionOperator.getSecondInput() instanceof MapOperatorBase<?, ?, ?>); // The key mappers should be added to both of the two input streams for the first union. assertTrue(firstUnionOperator.getFirstInput() instanceof MapOperatorBase<?, ?, ?>); assertTrue(firstUnionOperator.getSecondInput() instanceof MapOperatorBase<?, ?, ?>); // The parallelisms of the key mappers should be equal to those of their inputs. assertEquals(firstUnionOperator.getFirstInput().getParallelism(), 2); assertEquals(firstUnionOperator.getSecondInput().getParallelism(), 3); assertEquals(secondUnionOperator.getSecondInput().getParallelism(), -1); // The union should always have the default parallelism. assertEquals(secondUnionOperator.getParallelism(), ExecutionConfig.PARALLELISM_DEFAULT); assertEquals(firstUnionOperator.getParallelism(), ExecutionConfig.PARALLELISM_DEFAULT); } catch (Exception e) { System.err.println(e.getMessage()); e.printStackTrace(); fail("Test caused an error: " + e.getMessage()); } }
Example #15
Source File: UnionTranslationTest.java From Flink-CEPplus with Apache License 2.0 | 4 votes |
@Test public void translateUnion2Group() { try { final int parallelism = 4; ExecutionEnvironment env = ExecutionEnvironment.createLocalEnvironment(parallelism); DataSet<Tuple3<Double, StringValue, LongValue>> dataset1 = getSourceDataSet(env, 3); DataSet<Tuple3<Double, StringValue, LongValue>> dataset2 = getSourceDataSet(env, 2); dataset1.union(dataset2) .groupBy((KeySelector<Tuple3<Double, StringValue, LongValue>, String>) value -> "") .reduceGroup((GroupReduceFunction<Tuple3<Double, StringValue, LongValue>, String>) (values, out) -> {}) .returns(String.class) .output(new DiscardingOutputFormat<>()); Plan p = env.createProgramPlan(); // The plan should look like the following one. // // DataSet1(3) - MapOperator(3)-+ // |- Union(-1) - SingleInputOperator - Sink // DataSet2(2) - MapOperator(2)-+ GenericDataSinkBase<?> sink = p.getDataSinks().iterator().next(); Union unionOperator = (Union) ((SingleInputOperator) sink.getInput()).getInput(); // The key mappers should be added to both of the two input streams for union. assertTrue(unionOperator.getFirstInput() instanceof MapOperatorBase<?, ?, ?>); assertTrue(unionOperator.getSecondInput() instanceof MapOperatorBase<?, ?, ?>); // The parallelisms of the key mappers should be equal to those of their inputs. assertEquals(unionOperator.getFirstInput().getParallelism(), 3); assertEquals(unionOperator.getSecondInput().getParallelism(), 2); // The union should always have the default parallelism. assertEquals(unionOperator.getParallelism(), ExecutionConfig.PARALLELISM_DEFAULT); } catch (Exception e) { System.err.println(e.getMessage()); e.printStackTrace(); fail("Test caused an error: " + e.getMessage()); } }
Example #16
Source File: UnionOperator.java From flink with Apache License 2.0 | 2 votes |
/** * Returns the BinaryNodeTranslation of the Union. * * @param input1 The first input of the union, as a common API operator. * @param input2 The second input of the union, as a common API operator. * @return The common API union operator. */ @Override protected Union<T> translateToDataFlow(Operator<T> input1, Operator<T> input2) { return new Union<T>(input1, input2, unionLocationName); }
Example #17
Source File: UnionOperator.java From flink with Apache License 2.0 | 2 votes |
/** * Returns the BinaryNodeTranslation of the Union. * * @param input1 The first input of the union, as a common API operator. * @param input2 The second input of the union, as a common API operator. * @return The common API union operator. */ @Override protected Union<T> translateToDataFlow(Operator<T> input1, Operator<T> input2) { return new Union<T>(input1, input2, unionLocationName); }
Example #18
Source File: UnionOperator.java From Flink-CEPplus with Apache License 2.0 | 2 votes |
/** * Returns the BinaryNodeTranslation of the Union. * * @param input1 The first input of the union, as a common API operator. * @param input2 The second input of the union, as a common API operator. * @return The common API union operator. */ @Override protected Union<T> translateToDataFlow(Operator<T> input1, Operator<T> input2) { return new Union<T>(input1, input2, unionLocationName); }