org.apache.flink.optimizer.dataproperties.LocalProperties Java Examples
The following examples show how to use
org.apache.flink.optimizer.dataproperties.LocalProperties.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: CoGroupRawDescriptor.java From flink with Apache License 2.0 | 6 votes |
@Override public boolean areCoFulfilled(RequestedLocalProperties requested1, RequestedLocalProperties requested2, LocalProperties produced1, LocalProperties produced2) { int numRelevantFields = this.keys1.size(); Ordering prod1 = produced1.getOrdering(); Ordering prod2 = produced2.getOrdering(); if (prod1 == null || prod2 == null || prod1.getNumberOfFields() < numRelevantFields || prod2.getNumberOfFields() < numRelevantFields) { throw new CompilerException("The given properties do not meet this operators requirements."); } for (int i = 0; i < numRelevantFields; i++) { if (prod1.getOrder(i) != prod2.getOrder(i)) { return false; } } return true; }
Example #2
Source File: OperatorDescriptorDual.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
protected boolean checkSameOrdering(LocalProperties produced1, LocalProperties produced2, int numRelevantFields) { Ordering prod1 = produced1.getOrdering(); Ordering prod2 = produced2.getOrdering(); if (prod1 == null || prod2 == null) { throw new CompilerException("The given properties do not meet this operators requirements."); } // check that order of fields is equivalent if (!checkEquivalentFieldPositionsInKeyFields( prod1.getInvolvedIndexes(), prod2.getInvolvedIndexes(), numRelevantFields)) { return false; } // check that both inputs have the same directions of order for (int i = 0; i < numRelevantFields; i++) { if (prod1.getOrder(i) != prod2.getOrder(i)) { return false; } } return true; }
Example #3
Source File: Channel.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
private void computeLocalPropertiesAfterShippingOnly() { switch (this.shipStrategy) { case BROADCAST: case PARTITION_HASH: case PARTITION_CUSTOM: case PARTITION_RANGE: case PARTITION_RANDOM: case PARTITION_FORCED_REBALANCE: this.localProps = new LocalProperties(); break; case FORWARD: this.localProps = this.source.getLocalProperties(); break; case NONE: throw new CompilerException("ShipStrategy has not yet been set."); default: throw new CompilerException("Unknown ShipStrategy."); } }
Example #4
Source File: OperatorDescriptorDual.java From flink with Apache License 2.0 | 6 votes |
protected boolean checkSameOrdering(LocalProperties produced1, LocalProperties produced2, int numRelevantFields) { Ordering prod1 = produced1.getOrdering(); Ordering prod2 = produced2.getOrdering(); if (prod1 == null || prod2 == null) { throw new CompilerException("The given properties do not meet this operators requirements."); } // check that order of fields is equivalent if (!checkEquivalentFieldPositionsInKeyFields( prod1.getInvolvedIndexes(), prod2.getInvolvedIndexes(), numRelevantFields)) { return false; } // check that both inputs have the same directions of order for (int i = 0; i < numRelevantFields; i++) { if (prod1.getOrder(i) != prod2.getOrder(i)) { return false; } } return true; }
Example #5
Source File: Channel.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
public LocalProperties getLocalProperties() { if (this.localProps == null) { computeLocalPropertiesAfterShippingOnly(); switch (this.localStrategy) { case NONE: break; case SORT: case COMBININGSORT: this.localProps = LocalProperties.forOrdering(Utils.createOrdering(this.localKeys, this.localSortOrder)); break; default: throw new CompilerException("Unsupported local strategy for channel."); } } return this.localProps; }
Example #6
Source File: CoGroupRawDescriptor.java From flink with Apache License 2.0 | 6 votes |
@Override public boolean areCoFulfilled(RequestedLocalProperties requested1, RequestedLocalProperties requested2, LocalProperties produced1, LocalProperties produced2) { int numRelevantFields = this.keys1.size(); Ordering prod1 = produced1.getOrdering(); Ordering prod2 = produced2.getOrdering(); if (prod1 == null || prod2 == null || prod1.getNumberOfFields() < numRelevantFields || prod2.getNumberOfFields() < numRelevantFields) { throw new CompilerException("The given properties do not meet this operators requirements."); } for (int i = 0; i < numRelevantFields; i++) { if (prod1.getOrder(i) != prod2.getOrder(i)) { return false; } } return true; }
Example #7
Source File: BulkPartialSolutionPlanNode.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
public BulkPartialSolutionPlanNode(BulkPartialSolutionNode template, String nodeName, GlobalProperties gProps, LocalProperties lProps, Channel initialInput) { super(template, nodeName, DriverStrategy.NONE); this.globalProps = gProps; this.localProps = lProps; this.initialInput = initialInput; // the partial solution does not cost anything this.nodeCosts = NO_COSTS; this.cumulativeCosts = NO_COSTS; if (initialInput.getSource().branchPlan != null && initialInput.getSource().branchPlan.size() > 0) { if (this.branchPlan == null) { this.branchPlan = new HashMap<OptimizerNode, PlanNode>(); } this.branchPlan.putAll(initialInput.getSource().branchPlan); } }
Example #8
Source File: BulkPartialSolutionNode.java From flink with Apache License 2.0 | 5 votes |
public void setCandidateProperties(GlobalProperties gProps, LocalProperties lProps, Channel initialInput) { if (this.cachedPlans != null) { throw new IllegalStateException(); } else { this.cachedPlans = Collections.<PlanNode>singletonList(new BulkPartialSolutionPlanNode(this, "PartialSolution ("+this.getOperator().getName()+")", gProps, lProps, initialInput)); } }
Example #9
Source File: DataSourceNode.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * Creates a new DataSourceNode for the given contract. * * @param pactContract * The data source contract object. */ public DataSourceNode(GenericDataSourceBase<?, ?> pactContract) { super(pactContract); if (pactContract.getUserCodeWrapper().getUserCodeClass() == null) { throw new IllegalArgumentException("Input format has not been set."); } if (NonParallelInput.class.isAssignableFrom(pactContract.getUserCodeWrapper().getUserCodeClass())) { setParallelism(1); this.sequentialInput = true; } else { this.sequentialInput = false; } this.replicatedInput = ReplicatingInputFormat.class.isAssignableFrom( pactContract.getUserCodeWrapper().getUserCodeClass()); this.gprops = new GlobalProperties(); this.lprops = new LocalProperties(); SplitDataProperties<?> splitProps = pactContract.getSplitDataProperties(); if(replicatedInput) { this.gprops.setFullyReplicated(); this.lprops = new LocalProperties(); } else if (splitProps != null) { // configure data properties of data source using split properties setDataPropertiesFromSplitProperties(splitProps); } }
Example #10
Source File: PropertyDataSourceTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Test public void checkSinglePartitionedOrderedSource3() { ExecutionEnvironment env = ExecutionEnvironment.createLocalEnvironment(); env.setParallelism(DEFAULT_PARALLELISM); DataSource<Tuple2<Long, String>> data = env.readCsvFile("/some/path").types(Long.class, String.class); data.getSplitDataProperties() .splitsPartitionedBy(0) .splitsOrderedBy(new int[]{1}, new Order[]{Order.ASCENDING}); data.output(new DiscardingOutputFormat<Tuple2<Long, String>>()); Plan plan = env.createProgramPlan(); // submit the plan to the compiler OptimizedPlan oPlan = compileNoStats(plan); // check the optimized Plan SinkPlanNode sinkNode = oPlan.getDataSinks().iterator().next(); SourcePlanNode sourceNode = (SourcePlanNode) sinkNode.getPredecessor(); GlobalProperties gprops = sourceNode.getGlobalProperties(); LocalProperties lprops = sourceNode.getLocalProperties(); Assert.assertTrue((new FieldSet(gprops.getPartitioningFields().toArray())).equals(new FieldSet(0))); Assert.assertTrue(gprops.getPartitioning() == PartitioningProperty.ANY_PARTITIONING); Assert.assertTrue(lprops.getGroupedFields() == null); Assert.assertTrue(lprops.getOrdering() == null); }
Example #11
Source File: PropertyDataSourceTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void checkSinglePartitionedGroupedSource3() { ExecutionEnvironment env = ExecutionEnvironment.createLocalEnvironment(); env.setParallelism(DEFAULT_PARALLELISM); DataSource<Tuple2<Long, String>> data = env.readCsvFile("/some/path").types(Long.class, String.class); data.getSplitDataProperties() .splitsPartitionedBy(1) .splitsGroupedBy(0); data.output(new DiscardingOutputFormat<Tuple2<Long, String>>()); Plan plan = env.createProgramPlan(); // submit the plan to the compiler OptimizedPlan oPlan = compileNoStats(plan); // check the optimized Plan SinkPlanNode sinkNode = oPlan.getDataSinks().iterator().next(); SourcePlanNode sourceNode = (SourcePlanNode) sinkNode.getPredecessor(); GlobalProperties gprops = sourceNode.getGlobalProperties(); LocalProperties lprops = sourceNode.getLocalProperties(); Assert.assertTrue((new FieldSet(gprops.getPartitioningFields().toArray())).equals(new FieldSet(1))); Assert.assertTrue(gprops.getPartitioning() == PartitioningProperty.ANY_PARTITIONING); Assert.assertTrue(lprops.getGroupedFields() == null); Assert.assertTrue(lprops.getOrdering() == null); }
Example #12
Source File: PropertyDataSourceTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Test public void checkSinglePartitionedGroupedSource3() { ExecutionEnvironment env = ExecutionEnvironment.createLocalEnvironment(); env.setParallelism(DEFAULT_PARALLELISM); DataSource<Tuple2<Long, String>> data = env.readCsvFile("/some/path").types(Long.class, String.class); data.getSplitDataProperties() .splitsPartitionedBy(1) .splitsGroupedBy(0); data.output(new DiscardingOutputFormat<Tuple2<Long, String>>()); Plan plan = env.createProgramPlan(); // submit the plan to the compiler OptimizedPlan oPlan = compileNoStats(plan); // check the optimized Plan SinkPlanNode sinkNode = oPlan.getDataSinks().iterator().next(); SourcePlanNode sourceNode = (SourcePlanNode) sinkNode.getPredecessor(); GlobalProperties gprops = sourceNode.getGlobalProperties(); LocalProperties lprops = sourceNode.getLocalProperties(); Assert.assertTrue((new FieldSet(gprops.getPartitioningFields().toArray())).equals(new FieldSet(1))); Assert.assertTrue(gprops.getPartitioning() == PartitioningProperty.ANY_PARTITIONING); Assert.assertTrue(lprops.getGroupedFields() == null); Assert.assertTrue(lprops.getOrdering() == null); }
Example #13
Source File: PropertyDataSourceTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void checkSinglePartitionedGroupedSource6() { ExecutionEnvironment env = ExecutionEnvironment.createLocalEnvironment(); env.setParallelism(DEFAULT_PARALLELISM); DataSource<Tuple3<Long, SomePojo, String>> data = env.fromCollection(tuple3PojoData, tuple3PojoType); data.getSplitDataProperties() .splitsPartitionedBy("f1.intField") .splitsGroupedBy("f0; f1.intField"); data.output(new DiscardingOutputFormat<Tuple3<Long, SomePojo, String>>()); Plan plan = env.createProgramPlan(); // submit the plan to the compiler OptimizedPlan oPlan = compileNoStats(plan); // check the optimized Plan SinkPlanNode sinkNode = oPlan.getDataSinks().iterator().next(); SourcePlanNode sourceNode = (SourcePlanNode) sinkNode.getPredecessor(); GlobalProperties gprops = sourceNode.getGlobalProperties(); LocalProperties lprops = sourceNode.getLocalProperties(); Assert.assertTrue((new FieldSet(gprops.getPartitioningFields().toArray())).equals(new FieldSet(2))); Assert.assertTrue(gprops.getPartitioning() == PartitioningProperty.ANY_PARTITIONING); Assert.assertTrue(new FieldSet(lprops.getGroupedFields().toArray()).equals(new FieldSet(0,2))); Assert.assertTrue(lprops.getOrdering() == null); }
Example #14
Source File: FeedbackPropertiesMatchTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testNoPartialSolutionFoundTwoInputOperator() { try { SourcePlanNode target = new SourcePlanNode(getSourceNode(), "Partial Solution"); SourcePlanNode source1 = new SourcePlanNode(getSourceNode(), "Source 1"); SourcePlanNode source2 = new SourcePlanNode(getSourceNode(), "Source 2"); Channel toMap1 = new Channel(source1); toMap1.setShipStrategy(ShipStrategyType.FORWARD, DataExchangeMode.PIPELINED); toMap1.setLocalStrategy(LocalStrategy.NONE); SingleInputPlanNode map1 = new SingleInputPlanNode(getMapNode(), "Mapper 1", toMap1, DriverStrategy.MAP); Channel toMap2 = new Channel(source2); toMap2.setShipStrategy(ShipStrategyType.FORWARD, DataExchangeMode.PIPELINED); toMap2.setLocalStrategy(LocalStrategy.NONE); SingleInputPlanNode map2 = new SingleInputPlanNode(getMapNode(), "Mapper 2", toMap2, DriverStrategy.MAP); Channel toJoin1 = new Channel(map1); Channel toJoin2 = new Channel(map2); toJoin1.setShipStrategy(ShipStrategyType.FORWARD, DataExchangeMode.PIPELINED); toJoin1.setLocalStrategy(LocalStrategy.NONE); toJoin2.setShipStrategy(ShipStrategyType.FORWARD, DataExchangeMode.PIPELINED); toJoin2.setLocalStrategy(LocalStrategy.NONE); DualInputPlanNode join = new DualInputPlanNode(getJoinNode(), "Join", toJoin1, toJoin2, DriverStrategy.HYBRIDHASH_BUILD_FIRST); FeedbackPropertiesMeetRequirementsReport report = join.checkPartialSolutionPropertiesMet(target, new GlobalProperties(), new LocalProperties()); assertEquals(NO_PARTIAL_SOLUTION, report); } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); } }
Example #15
Source File: PropertyDataSourceTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Test public void checkSinglePartitionedGroupedSource1() { ExecutionEnvironment env = ExecutionEnvironment.createLocalEnvironment(); env.setParallelism(DEFAULT_PARALLELISM); DataSource<Tuple2<Long, String>> data = env.readCsvFile("/some/path").types(Long.class, String.class); data.getSplitDataProperties() .splitsPartitionedBy(0) .splitsGroupedBy(0); data.output(new DiscardingOutputFormat<Tuple2<Long, String>>()); Plan plan = env.createProgramPlan(); // submit the plan to the compiler OptimizedPlan oPlan = compileNoStats(plan); // check the optimized Plan SinkPlanNode sinkNode = oPlan.getDataSinks().iterator().next(); SourcePlanNode sourceNode = (SourcePlanNode) sinkNode.getPredecessor(); GlobalProperties gprops = sourceNode.getGlobalProperties(); LocalProperties lprops = sourceNode.getLocalProperties(); Assert.assertTrue((new FieldSet(gprops.getPartitioningFields().toArray())).equals(new FieldSet(0))); Assert.assertTrue(gprops.getPartitioning() == PartitioningProperty.ANY_PARTITIONING); Assert.assertTrue(new FieldSet(lprops.getGroupedFields().toArray()).equals(new FieldSet(0))); Assert.assertTrue(lprops.getOrdering() == null); }
Example #16
Source File: CrossStreamOuterSecondDescriptor.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Override public LocalProperties computeLocalProperties(LocalProperties in1, LocalProperties in2) { // uniqueness becomes grouping with streamed nested loops if ((in2.getGroupedFields() == null || in2.getGroupedFields().size() == 0) && in2.getUniqueFields() != null && in2.getUniqueFields().size() > 0) { return LocalProperties.forGrouping(in2.getUniqueFields().iterator().next().toFieldList()); } else { return in2.clearUniqueFieldSets(); } }
Example #17
Source File: PropertyDataSourceTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void checkSinglePartitionedSource7() { ExecutionEnvironment env = ExecutionEnvironment.createLocalEnvironment(); env.setParallelism(DEFAULT_PARALLELISM); DataSource<Tuple2<Long, String>> data = env.readCsvFile("/some/path").types(Long.class, String.class); data.getSplitDataProperties() .splitsPartitionedBy("byDate", 1, 0); data.output(new DiscardingOutputFormat<Tuple2<Long, String>>()); Plan plan = env.createProgramPlan(); // submit the plan to the compiler OptimizedPlan oPlan = compileNoStats(plan); // check the optimized Plan SinkPlanNode sinkNode = oPlan.getDataSinks().iterator().next(); SourcePlanNode sourceNode = (SourcePlanNode) sinkNode.getPredecessor(); GlobalProperties gprops = sourceNode.getGlobalProperties(); LocalProperties lprops = sourceNode.getLocalProperties(); Assert.assertTrue((new FieldSet(gprops.getPartitioningFields().toArray())).equals(new FieldSet(0, 1))); Assert.assertTrue(gprops.getPartitioning() == PartitioningProperty.CUSTOM_PARTITIONING); Assert.assertTrue(gprops.getCustomPartitioner() != null); Assert.assertTrue(lprops.getGroupedFields() == null); Assert.assertTrue(lprops.getOrdering() == null); }
Example #18
Source File: PropertyDataSourceTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void checkSinglePartitionedGroupedSource7() { ExecutionEnvironment env = ExecutionEnvironment.createLocalEnvironment(); env.setParallelism(DEFAULT_PARALLELISM); DataSource<Tuple3<Long, SomePojo, String>> data = env.fromCollection(tuple3PojoData, tuple3PojoType); data.getSplitDataProperties() .splitsPartitionedBy("f1.intField") .splitsGroupedBy("f1"); data.output(new DiscardingOutputFormat<Tuple3<Long, SomePojo, String>>()); Plan plan = env.createProgramPlan(); // submit the plan to the compiler OptimizedPlan oPlan = compileNoStats(plan); // check the optimized Plan SinkPlanNode sinkNode = oPlan.getDataSinks().iterator().next(); SourcePlanNode sourceNode = (SourcePlanNode) sinkNode.getPredecessor(); GlobalProperties gprops = sourceNode.getGlobalProperties(); LocalProperties lprops = sourceNode.getLocalProperties(); Assert.assertTrue((new FieldSet(gprops.getPartitioningFields().toArray())).equals(new FieldSet(2))); Assert.assertTrue(gprops.getPartitioning() == PartitioningProperty.ANY_PARTITIONING); Assert.assertTrue(new FieldSet(lprops.getGroupedFields().toArray()).equals(new FieldSet(1,2,3))); Assert.assertTrue(lprops.getOrdering() == null); }
Example #19
Source File: CrossStreamOuterSecondDescriptor.java From flink with Apache License 2.0 | 5 votes |
@Override public LocalProperties computeLocalProperties(LocalProperties in1, LocalProperties in2) { // uniqueness becomes grouping with streamed nested loops if ((in2.getGroupedFields() == null || in2.getGroupedFields().size() == 0) && in2.getUniqueFields() != null && in2.getUniqueFields().size() > 0) { return LocalProperties.forGrouping(in2.getUniqueFields().iterator().next().toFieldList()); } else { return in2.clearUniqueFieldSets(); } }
Example #20
Source File: PropertyDataSourceTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Test public void checkSinglePartitionedGroupedSource8() { ExecutionEnvironment env = ExecutionEnvironment.createLocalEnvironment(); env.setParallelism(DEFAULT_PARALLELISM); DataSource<Tuple3<Long, SomePojo, String>> data = env.fromCollection(tuple3PojoData, tuple3PojoType); data.getSplitDataProperties() .splitsPartitionedBy("f1") .splitsGroupedBy("f1.stringField"); data.output(new DiscardingOutputFormat<Tuple3<Long, SomePojo, String>>()); Plan plan = env.createProgramPlan(); // submit the plan to the compiler OptimizedPlan oPlan = compileNoStats(plan); // check the optimized Plan SinkPlanNode sinkNode = oPlan.getDataSinks().iterator().next(); SourcePlanNode sourceNode = (SourcePlanNode) sinkNode.getPredecessor(); GlobalProperties gprops = sourceNode.getGlobalProperties(); LocalProperties lprops = sourceNode.getLocalProperties(); Assert.assertTrue((new FieldSet(gprops.getPartitioningFields().toArray())).equals(new FieldSet(1,2,3))); Assert.assertTrue(gprops.getPartitioning() == PartitioningProperty.ANY_PARTITIONING); Assert.assertTrue(lprops.getGroupedFields() == null); Assert.assertTrue(lprops.getOrdering() == null); }
Example #21
Source File: CrossStreamOuterFirstDescriptor.java From flink with Apache License 2.0 | 5 votes |
@Override public LocalProperties computeLocalProperties(LocalProperties in1, LocalProperties in2) { // uniqueness becomes grouping with streamed nested loops if ((in1.getGroupedFields() == null || in1.getGroupedFields().size() == 0) && in1.getUniqueFields() != null && in1.getUniqueFields().size() > 0) { return LocalProperties.forGrouping(in1.getUniqueFields().iterator().next().toFieldList()); } else { return in1.clearUniqueFieldSets(); } }
Example #22
Source File: PropertyDataSourceTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void checkSinglePartitionedOrderedSource5() { ExecutionEnvironment env = ExecutionEnvironment.createLocalEnvironment(); env.setParallelism(DEFAULT_PARALLELISM); DataSource<Tuple3<Long, SomePojo, String>> data = env.fromCollection(tuple3PojoData, tuple3PojoType); data.getSplitDataProperties() .splitsPartitionedBy("f1.intField") .splitsOrderedBy("f0; f1.intField", new Order[]{Order.ASCENDING, Order.DESCENDING}); data.output(new DiscardingOutputFormat<Tuple3<Long, SomePojo, String>>()); Plan plan = env.createProgramPlan(); // submit the plan to the compiler OptimizedPlan oPlan = compileNoStats(plan); // check the optimized Plan SinkPlanNode sinkNode = oPlan.getDataSinks().iterator().next(); SourcePlanNode sourceNode = (SourcePlanNode) sinkNode.getPredecessor(); GlobalProperties gprops = sourceNode.getGlobalProperties(); LocalProperties lprops = sourceNode.getLocalProperties(); Assert.assertTrue((new FieldSet(gprops.getPartitioningFields().toArray())).equals(new FieldSet(2))); Assert.assertTrue(gprops.getPartitioning() == PartitioningProperty.ANY_PARTITIONING); Assert.assertTrue(new FieldSet(lprops.getGroupedFields().toArray()).equals(new FieldSet(0,2))); Assert.assertTrue(lprops.getOrdering() == null); }
Example #23
Source File: NAryUnionPlanNode.java From flink with Apache License 2.0 | 5 votes |
/** * @param template */ public NAryUnionPlanNode(BinaryUnionNode template, List<Channel> inputs, GlobalProperties gProps, Costs cumulativeCosts) { super(template, "Union", DriverStrategy.NONE); this.inputs = inputs; this.globalProps = gProps; this.localProps = new LocalProperties(); this.nodeCosts = new Costs(); this.cumulativeCosts = cumulativeCosts; }
Example #24
Source File: PropertyDataSourceTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void checkSinglePartitionedOrderedSource4() { ExecutionEnvironment env = ExecutionEnvironment.createLocalEnvironment(); env.setParallelism(DEFAULT_PARALLELISM); DataSource<Tuple2<Long, String>> data = env.readCsvFile("/some/path").types(Long.class, String.class); data.getSplitDataProperties() .splitsPartitionedBy(0, 1) .splitsOrderedBy(new int[]{1}, new Order[]{Order.DESCENDING}); data.output(new DiscardingOutputFormat<Tuple2<Long, String>>()); Plan plan = env.createProgramPlan(); // submit the plan to the compiler OptimizedPlan oPlan = compileNoStats(plan); // check the optimized Plan SinkPlanNode sinkNode = oPlan.getDataSinks().iterator().next(); SourcePlanNode sourceNode = (SourcePlanNode) sinkNode.getPredecessor(); GlobalProperties gprops = sourceNode.getGlobalProperties(); LocalProperties lprops = sourceNode.getLocalProperties(); Assert.assertTrue((new FieldSet(gprops.getPartitioningFields().toArray())).equals(new FieldSet(0, 1))); Assert.assertTrue(gprops.getPartitioning() == PartitioningProperty.ANY_PARTITIONING); Assert.assertTrue(lprops.getGroupedFields() == null); Assert.assertTrue(lprops.getOrdering() == null); }
Example #25
Source File: PlanNode.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
public void initProperties(GlobalProperties globals, LocalProperties locals) { if (this.globalProps != null || this.localProps != null) { throw new IllegalStateException(); } this.globalProps = globals; this.localProps = locals; }
Example #26
Source File: PropertyDataSourceTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void checkSinglePartitionedGroupedSource3() { ExecutionEnvironment env = ExecutionEnvironment.createLocalEnvironment(); env.setParallelism(DEFAULT_PARALLELISM); DataSource<Tuple2<Long, String>> data = env.readCsvFile("/some/path").types(Long.class, String.class); data.getSplitDataProperties() .splitsPartitionedBy(1) .splitsGroupedBy(0); data.output(new DiscardingOutputFormat<Tuple2<Long, String>>()); Plan plan = env.createProgramPlan(); // submit the plan to the compiler OptimizedPlan oPlan = compileNoStats(plan); // check the optimized Plan SinkPlanNode sinkNode = oPlan.getDataSinks().iterator().next(); SourcePlanNode sourceNode = (SourcePlanNode) sinkNode.getPredecessor(); GlobalProperties gprops = sourceNode.getGlobalProperties(); LocalProperties lprops = sourceNode.getLocalProperties(); Assert.assertTrue((new FieldSet(gprops.getPartitioningFields().toArray())).equals(new FieldSet(1))); Assert.assertTrue(gprops.getPartitioning() == PartitioningProperty.ANY_PARTITIONING); Assert.assertTrue(lprops.getGroupedFields() == null); Assert.assertTrue(lprops.getOrdering() == null); }
Example #27
Source File: FeedbackPropertiesMatchTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testNoPartialSolutionFoundSingleInputOnly() { try { SourcePlanNode target = new SourcePlanNode(getSourceNode(), "Source"); SourcePlanNode otherTarget = new SourcePlanNode(getSourceNode(), "Source"); Channel toMap1 = new Channel(target); toMap1.setShipStrategy(ShipStrategyType.FORWARD, DataExchangeMode.PIPELINED); toMap1.setLocalStrategy(LocalStrategy.NONE); SingleInputPlanNode map1 = new SingleInputPlanNode(getMapNode(), "Mapper 1", toMap1, DriverStrategy.MAP); Channel toMap2 = new Channel(map1); toMap2.setShipStrategy(ShipStrategyType.FORWARD, DataExchangeMode.PIPELINED); toMap2.setLocalStrategy(LocalStrategy.NONE); SingleInputPlanNode map2 = new SingleInputPlanNode(getMapNode(), "Mapper 2", toMap2, DriverStrategy.MAP); { GlobalProperties gp = new GlobalProperties(); LocalProperties lp = new LocalProperties(); FeedbackPropertiesMeetRequirementsReport report = map2.checkPartialSolutionPropertiesMet(otherTarget, gp, lp); assertTrue(report == NO_PARTIAL_SOLUTION); } } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); } }
Example #28
Source File: PropertyDataSourceTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void checkSinglePartitionedGroupedSource2() { ExecutionEnvironment env = ExecutionEnvironment.createLocalEnvironment(); env.setParallelism(DEFAULT_PARALLELISM); DataSource<Tuple2<Long, String>> data = env.readCsvFile("/some/path").types(Long.class, String.class); data.getSplitDataProperties() .splitsPartitionedBy(0) .splitsGroupedBy(1, 0); data.output(new DiscardingOutputFormat<Tuple2<Long, String>>()); Plan plan = env.createProgramPlan(); // submit the plan to the compiler OptimizedPlan oPlan = compileNoStats(plan); // check the optimized Plan SinkPlanNode sinkNode = oPlan.getDataSinks().iterator().next(); SourcePlanNode sourceNode = (SourcePlanNode) sinkNode.getPredecessor(); GlobalProperties gprops = sourceNode.getGlobalProperties(); LocalProperties lprops = sourceNode.getLocalProperties(); Assert.assertTrue((new FieldSet(gprops.getPartitioningFields().toArray())).equals(new FieldSet(0))); Assert.assertTrue(gprops.getPartitioning() == PartitioningProperty.ANY_PARTITIONING); Assert.assertTrue(new FieldSet(lprops.getGroupedFields().toArray()).equals(new FieldSet(0, 1))); Assert.assertTrue(lprops.getOrdering() == null); }
Example #29
Source File: WorksetNode.java From flink with Apache License 2.0 | 5 votes |
public void setCandidateProperties(GlobalProperties gProps, LocalProperties lProps, Channel initialInput) { if (this.cachedPlans != null) { throw new IllegalStateException(); } else { WorksetPlanNode wspn = new WorksetPlanNode(this, "Workset ("+this.getOperator().getName()+")", gProps, lProps, initialInput); this.cachedPlans = Collections.<PlanNode>singletonList(wspn); } }
Example #30
Source File: PropertyDataSourceTest.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
@Test public void checkSinglePartitionedSource2() { ExecutionEnvironment env = ExecutionEnvironment.createLocalEnvironment(); env.setParallelism(DEFAULT_PARALLELISM); DataSource<Tuple2<Long, String>> data = env.readCsvFile("/some/path").types(Long.class, String.class); data.getSplitDataProperties() .splitsPartitionedBy(1, 0); data.output(new DiscardingOutputFormat<Tuple2<Long,String>>()); Plan plan = env.createProgramPlan(); // submit the plan to the compiler OptimizedPlan oPlan = compileNoStats(plan); // check the optimized Plan SinkPlanNode sinkNode = oPlan.getDataSinks().iterator().next(); SourcePlanNode sourceNode = (SourcePlanNode) sinkNode.getPredecessor(); GlobalProperties gprops = sourceNode.getGlobalProperties(); LocalProperties lprops = sourceNode.getLocalProperties(); Assert.assertTrue((new FieldSet(gprops.getPartitioningFields().toArray())).equals(new FieldSet(0, 1))); Assert.assertTrue(gprops.getPartitioning() == PartitioningProperty.ANY_PARTITIONING); Assert.assertTrue(lprops.getGroupedFields() == null); Assert.assertTrue(lprops.getOrdering() == null); }