org.apache.calcite.plan.ConventionTraitDef Java Examples
The following examples show how to use
org.apache.calcite.plan.ConventionTraitDef.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: GremlinCompiler.java From sql-gremlin with Apache License 2.0 | 6 votes |
public GremlinCompiler(Graph graph, SchemaConfig schemaConfig) { this.graph = graph; this.schemaConfig = schemaConfig; final SchemaPlus rootSchema = Frameworks.createRootSchema(true); final List<RelTraitDef> traitDefs = new ArrayList<>(); traitDefs.add(ConventionTraitDef.INSTANCE); traitDefs.add(RelCollationTraitDef.INSTANCE); final SqlParser.Config parserConfig = SqlParser.configBuilder().setLex(Lex.MYSQL).build(); frameworkConfig = Frameworks.newConfigBuilder() .parserConfig(parserConfig) .defaultSchema(rootSchema.add("gremlin", new GremlinSchema(graph, schemaConfig))) .traitDefs(traitDefs) .programs(Programs.sequence(Programs.ofRules(Programs.RULE_SET), Programs.CALC_PROGRAM)) .build(); }
Example #2
Source File: PlanningConfigurationBuilder.java From flink with Apache License 2.0 | 6 votes |
public PlanningConfigurationBuilder( TableConfig tableConfig, FunctionCatalog functionCatalog, CalciteSchema rootSchema, ExpressionBridge<PlannerExpression> expressionBridge) { this.tableConfig = tableConfig; this.functionCatalog = functionCatalog; // the converter is needed when calling temporal table functions from SQL, because // they reference a history table represented with a tree of table operations this.context = Contexts.of(expressionBridge); this.planner = new VolcanoPlanner(costFactory, context); planner.setExecutor(new ExpressionReducer(tableConfig)); planner.addRelTraitDef(ConventionTraitDef.INSTANCE); this.expressionBridge = expressionBridge; this.rootSchema = rootSchema; }
Example #3
Source File: TraitPropagationTest.java From calcite with Apache License 2.0 | 6 votes |
public void onMatch(RelOptRuleCall call) { LogicalProject rel = call.rel(0); RelNode rawInput = call.rel(1); RelNode input = convert(rawInput, PHYSICAL); if (subsetHack && input instanceof RelSubset) { RelSubset subset = (RelSubset) input; for (RelNode child : subset.getRels()) { // skip logical nodes if (child.getTraitSet().getTrait(ConventionTraitDef.INSTANCE) == Convention.NONE) { continue; } else { RelTraitSet outcome = child.getTraitSet().replace(PHYSICAL); call.transformTo( new PhysProj(rel.getCluster(), outcome, convert(child, outcome), rel.getProjects(), rel.getRowType())); } } } else { call.transformTo( PhysProj.create(input, rel.getProjects(), rel.getRowType())); } }
Example #4
Source File: VolcanoPlannerTest.java From calcite with Apache License 2.0 | 6 votes |
/** * Tests transformation of a single+leaf from NONE to PHYS. In the past, * this one didn't work due to the definition of ReformedSingleRule. */ @Disabled // broken, because ReformedSingleRule matches child traits strictly @Test void testTransformSingleReformed() { VolcanoPlanner planner = new VolcanoPlanner(); planner.addRelTraitDef(ConventionTraitDef.INSTANCE); planner.addRule(new PhysLeafRule()); planner.addRule(new ReformedSingleRule()); RelOptCluster cluster = newCluster(planner); NoneLeafRel leafRel = new NoneLeafRel( cluster, "a"); NoneSingleRel singleRel = new NoneSingleRel( cluster, leafRel); RelNode convertedRel = planner.changeTraits( singleRel, cluster.traitSetOf(PHYS_CALLING_CONVENTION)); planner.setRoot(convertedRel); RelNode result = planner.chooseDelegate().findBestExp(); assertTrue(result instanceof PhysSingleRel); }
Example #5
Source File: VolcanoPlannerTest.java From calcite with Apache License 2.0 | 6 votes |
@Test public void testPruneNode() { VolcanoPlanner planner = new VolcanoPlanner(); planner.addRelTraitDef(ConventionTraitDef.INSTANCE); planner.addRule(new PhysLeafRule()); RelOptCluster cluster = newCluster(planner); NoneLeafRel leafRel = new NoneLeafRel( cluster, "a"); planner.setRoot(leafRel); // prune the node planner.prune(leafRel); // verify that the rule match cannot be popped, // as the related node has been pruned while (true) { VolcanoRuleMatch ruleMatch = planner.ruleQueue.popMatch(VolcanoPlannerPhase.OPTIMIZE); if (ruleMatch == null) { break; } assertFalse(ruleMatch.rels[0] == leafRel); } }
Example #6
Source File: BatsOptimizerTest.java From Bats with Apache License 2.0 | 6 votes |
static void testVolcanoPlanner() throws Exception { VolcanoPlanner volcanoPlanner = createVolcanoPlanner(); volcanoPlanner.addRelTraitDef(ConventionTraitDef.INSTANCE); // volcanoPlanner.addRelTraitDef(RelCollationTraitDef.INSTANCE); // addRules(volcanoPlanner); volcanoPlanner.addRule(ReduceExpressionsRule.PROJECT_INSTANCE); // volcanoPlanner.addRule(EnumerableRules.ENUMERABLE_PROJECT_RULE); RelNode relNode = testSqlToRelConverter(volcanoPlanner); volcanoPlanner.setRoot(relNode); relNode = volcanoPlanner.findBestExp(); // 在这一步出错 String plan = RelOptUtil.toString(relNode); System.out.println("Volcano Plan:"); System.out.println("------------------------------------------------------------------"); System.out.println(plan); }
Example #7
Source File: BatsOptimizerTest.java From Bats with Apache License 2.0 | 6 votes |
static HepPlanner createHepPlanner() { HepProgramBuilder builder = new HepProgramBuilder(); // builder.addRuleInstance(FilterJoinRule.FilterIntoJoinRule.FILTER_ON_JOIN); // builder.addRuleInstance(FilterJoinRule.JOIN); builder.addRuleCollection(Programs.CALC_RULES); // builder.addRuleCollection(Programs.RULE_SET); // builder.addRuleInstance(ReduceExpressionsRule.PROJECT_INSTANCE); // 加上这个可以把100+100变成200,常量折叠 // builder.addRuleInstance(ReduceExpressionsRule.FILTER_INSTANCE); // builder.addRuleInstance(FilterProjectTransposeRule.INSTANCE); // HepMatchOrder order = HepMatchOrder.TOP_DOWN; // builder.addMatchOrder(order); // builder.addConverters(true); HepPlanner hepPlanner = new HepPlanner(builder.build()); hepPlanner.addRelTraitDef(ConventionTraitDef.INSTANCE); hepPlanner.addRelTraitDef(RelCollationTraitDef.INSTANCE); return hepPlanner; }
Example #8
Source File: VolcanoPlannerTest.java From calcite with Apache License 2.0 | 6 votes |
/** * Tests that VolcanoPlanner should fire rule match from subsets after a * RelSet merge. The rules matching for a RelSubset should be able to fire * on the subsets that are merged into the RelSets. */ @Test void testSetMergeMatchSubsetRule() { VolcanoPlanner planner = new VolcanoPlanner(); planner.addRelTraitDef(ConventionTraitDef.INSTANCE); planner.addRelTraitDef(RelCollationTraitDef.INSTANCE); planner.addRule(new PhysLeafRule()); planner.addRule(new GoodSingleRule()); planner.addRule(new PhysSingleInputSetMergeRule()); final List<String> buf = new ArrayList<>(); planner.addRule(new PhysSingleSubsetRule(buf)); RelOptCluster cluster = newCluster(planner); NoneLeafRel leafRel = new NoneLeafRel(cluster, "a"); NoneSingleRel singleRel = new NoneSingleRel(cluster, leafRel); RelNode convertedRel = planner .changeTraits(singleRel, cluster.traitSetOf(PHYS_CALLING_CONVENTION)); planner.setRoot(convertedRel); RelNode result = planner.chooseDelegate().findBestExp(); assertTrue(result instanceof PhysSingleRel); assertThat(sort(buf), equalTo( sort("PhysSingleRel:RelSubset#0.PHYS.[]", "PhysSingleRel:RelSubset#0.PHYS_3.[]"))); }
Example #9
Source File: VolcanoPlannerTest.java From calcite with Apache License 2.0 | 6 votes |
@Test void testMultiInputsParentOpMatching() { VolcanoPlanner planner = new VolcanoPlanner(); planner.addRelTraitDef(ConventionTraitDef.INSTANCE); RelOptCluster cluster = newCluster(planner); // The trigger rule that generates PhysLeafRel from NoneLeafRel planner.addRule(new PhysLeafRule()); // The rule with third child op matching PhysLeafRel, which should not be // matched at all planner.addRule(new ThreeInputsUnionRule()); // Construct a union with only two children NoneLeafRel leftRel = new NoneLeafRel(cluster, "b"); RelNode leftPhy = planner .changeTraits(leftRel, cluster.traitSetOf(PHYS_CALLING_CONVENTION)); PhysLeafRel rightPhy = new PhysLeafRel(cluster, PHYS_CALLING_CONVENTION, "b"); planner.setRoot( new EnumerableUnion(cluster, cluster.traitSetOf(PHYS_CALLING_CONVENTION), Arrays.asList(leftPhy, rightPhy), false)); planner.chooseDelegate().findBestExp(); }
Example #10
Source File: SqlWorker.java From quark with Apache License 2.0 | 6 votes |
private Planner buildPlanner(QueryContext context) { final List<RelTraitDef> traitDefs = new ArrayList<RelTraitDef>(); traitDefs.add(ConventionTraitDef.INSTANCE); traitDefs.add(RelCollationTraitDef.INSTANCE); final ChainedSqlOperatorTable opTab = new ChainedSqlOperatorTable( ImmutableList.of(SqlStdOperatorTable.instance(), HiveSqlOperatorTable.instance(), catalogReader)); FrameworkConfig config = Frameworks.newConfigBuilder() // .parserConfig(SqlParser.configBuilder() .setQuotedCasing(Casing.UNCHANGED) .setUnquotedCasing(Casing.TO_UPPER) .setQuoting(Quoting.DOUBLE_QUOTE) .build()) // .defaultSchema(context.getDefaultSchema()) // .operatorTable(opTab) // .traitDefs(traitDefs) // .convertletTable(StandardConvertletTable.INSTANCE)// .programs(getPrograms()) // .typeSystem(RelDataTypeSystem.DEFAULT) // .build(); return Frameworks.getPlanner(config); }
Example #11
Source File: VolcanoPlannerTest.java From calcite with Apache License 2.0 | 6 votes |
/** * Tests transformation of a single+leaf from NONE to PHYS. */ @Test void testTransformSingleGood() { VolcanoPlanner planner = new VolcanoPlanner(); planner.addRelTraitDef(ConventionTraitDef.INSTANCE); planner.addRule(new PhysLeafRule()); planner.addRule(new GoodSingleRule()); RelOptCluster cluster = newCluster(planner); NoneLeafRel leafRel = new NoneLeafRel( cluster, "a"); NoneSingleRel singleRel = new NoneSingleRel( cluster, leafRel); RelNode convertedRel = planner.changeTraits( singleRel, cluster.traitSetOf(PHYS_CALLING_CONVENTION)); planner.setRoot(convertedRel); RelNode result = planner.chooseDelegate().findBestExp(); assertTrue(result instanceof PhysSingleRel); }
Example #12
Source File: VolcanoPlannerTest.java From calcite with Apache License 2.0 | 6 votes |
/** * Tests transformation of a leaf from NONE to PHYS. */ @Test void testTransformLeaf() { VolcanoPlanner planner = new VolcanoPlanner(); planner.addRelTraitDef(ConventionTraitDef.INSTANCE); planner.addRule(new PhysLeafRule()); RelOptCluster cluster = newCluster(planner); NoneLeafRel leafRel = new NoneLeafRel( cluster, "a"); RelNode convertedRel = planner.changeTraits( leafRel, cluster.traitSetOf(PHYS_CALLING_CONVENTION)); planner.setRoot(convertedRel); RelNode result = planner.chooseDelegate().findBestExp(); assertTrue(result instanceof PhysLeafRel); }
Example #13
Source File: PlanningConfigurationBuilder.java From flink with Apache License 2.0 | 6 votes |
public PlanningConfigurationBuilder( TableConfig tableConfig, FunctionCatalog functionCatalog, CalciteSchema rootSchema, ExpressionBridge<PlannerExpression> expressionBridge) { this.tableConfig = tableConfig; this.functionCatalog = functionCatalog; // the converter is needed when calling temporal table functions from SQL, because // they reference a history table represented with a tree of table operations. this.context = Contexts.of(expressionBridge, tableConfig); this.planner = new VolcanoPlanner(costFactory, context); planner.setExecutor(new ExpressionReducer(tableConfig)); planner.addRelTraitDef(ConventionTraitDef.INSTANCE); this.expressionBridge = expressionBridge; this.rootSchema = rootSchema; }
Example #14
Source File: ComboRuleTest.java From calcite with Apache License 2.0 | 6 votes |
@Test void testCombo() { VolcanoPlanner planner = new VolcanoPlanner(); planner.addRelTraitDef(ConventionTraitDef.INSTANCE); planner.addRule(new ComboRule()); planner.addRule(new AddIntermediateNodeRule()); planner.addRule(new GoodSingleRule()); RelOptCluster cluster = newCluster(planner); NoneLeafRel leafRel = new NoneLeafRel(cluster, "a"); NoneSingleRel singleRel = new NoneSingleRel(cluster, leafRel); NoneSingleRel singleRel2 = new NoneSingleRel(cluster, singleRel); RelNode convertedRel = planner.changeTraits(singleRel2, cluster.traitSetOf(PHYS_CALLING_CONVENTION)); planner.setRoot(convertedRel); RelNode result = planner.chooseDelegate().findBestExp(); assertTrue(result instanceof IntermediateNode); }
Example #15
Source File: SqlHintsConverterTest.java From calcite with Apache License 2.0 | 6 votes |
@Test void testUseMergeJoin() { final String sql = "select /*+ use_merge_join(emp, dept) */\n" + "ename, job, sal, dept.name\n" + "from emp join dept on emp.deptno = dept.deptno"; RelOptPlanner planner = new VolcanoPlanner(); planner.addRelTraitDef(ConventionTraitDef.INSTANCE); planner.addRelTraitDef(RelCollationTraitDef.INSTANCE); Tester tester1 = tester.withDecorrelation(true) .withClusterFactory( relOptCluster -> RelOptCluster.create(planner, relOptCluster.getRexBuilder())); final RelNode rel = tester1.convertSqlToRel(sql).rel; RuleSet ruleSet = RuleSets.ofList( EnumerableRules.ENUMERABLE_MERGE_JOIN_RULE, EnumerableRules.ENUMERABLE_JOIN_RULE, EnumerableRules.ENUMERABLE_PROJECT_RULE, EnumerableRules.ENUMERABLE_TABLE_SCAN_RULE, EnumerableRules.ENUMERABLE_SORT_RULE, AbstractConverter.ExpandConversionRule.INSTANCE); Program program = Programs.of(ruleSet); RelTraitSet toTraits = rel .getCluster() .traitSet() .replace(EnumerableConvention.INSTANCE); RelNode relAfter = program.run(planner, rel, toTraits, Collections.emptyList(), Collections.emptyList()); String planAfter = NL + RelOptUtil.toString(relAfter); getDiffRepos().assertEquals("planAfter", "${planAfter}", planAfter); }
Example #16
Source File: FrameworksTest.java From calcite with Apache License 2.0 | 6 votes |
/** Test case for * <a href="https://issues.apache.org/jira/browse/CALCITE-2039">[CALCITE-2039] * AssertionError when pushing project to ProjectableFilterableTable</a> * using UPDATE via {@link Frameworks}. */ @Test void testUpdate() throws Exception { Table table = new TableImpl(); final SchemaPlus rootSchema = Frameworks.createRootSchema(true); SchemaPlus schema = rootSchema.add("x", new AbstractSchema()); schema.add("MYTABLE", table); List<RelTraitDef> traitDefs = new ArrayList<>(); traitDefs.add(ConventionTraitDef.INSTANCE); traitDefs.add(RelDistributionTraitDef.INSTANCE); SqlParser.Config parserConfig = SqlParser.configBuilder(SqlParser.Config.DEFAULT) .setCaseSensitive(false) .build(); final FrameworkConfig config = Frameworks.newConfigBuilder() .parserConfig(parserConfig) .defaultSchema(schema) .traitDefs(traitDefs) // define the rules you want to apply .ruleSets( RuleSets.ofList(AbstractConverter.ExpandConversionRule.INSTANCE)) .programs(Programs.ofRules(Programs.RULE_SET)) .build(); executeQuery(config, " UPDATE MYTABLE set id=7 where id=1", CalciteSystemProperty.DEBUG.value()); }
Example #17
Source File: PlannerTest.java From calcite with Apache License 2.0 | 6 votes |
/** Unit test that parses, validates, converts and plans. Planner is * provided with a list of RelTraitDefs to register. */ @Test void testPlanWithExplicitTraitDefs() throws Exception { RuleSet ruleSet = RuleSets.ofList( FilterMergeRule.INSTANCE, EnumerableRules.ENUMERABLE_TABLE_SCAN_RULE, EnumerableRules.ENUMERABLE_FILTER_RULE, EnumerableRules.ENUMERABLE_PROJECT_RULE); final List<RelTraitDef> traitDefs = new ArrayList<>(); traitDefs.add(ConventionTraitDef.INSTANCE); traitDefs.add(RelCollationTraitDef.INSTANCE); Planner planner = getPlanner(traitDefs, Programs.of(ruleSet)); SqlNode parse = planner.parse("select * from \"emps\""); SqlNode validate = planner.validate(parse); RelNode convert = planner.rel(validate).project(); RelTraitSet traitSet = convert.getTraitSet() .replace(EnumerableConvention.INSTANCE); RelNode transform = planner.transform(0, traitSet, convert); assertThat(toString(transform), equalTo( "EnumerableProject(empid=[$0], deptno=[$1], name=[$2], salary=[$3], commission=[$4])\n" + " EnumerableTableScan(table=[[hr, emps]])\n")); }
Example #18
Source File: SortRemoveRuleTest.java From calcite with Apache License 2.0 | 6 votes |
/** * The default schema that is used in these tests provides tables sorted on the primary key. Due * to this scan operators always come with a {@link org.apache.calcite.rel.RelCollation} trait. */ private RelNode transform(String sql, RuleSet prepareRules) throws Exception { final SchemaPlus rootSchema = Frameworks.createRootSchema(true); final SchemaPlus defSchema = rootSchema.add("hr", new HrClusteredSchema()); final FrameworkConfig config = Frameworks.newConfigBuilder() .parserConfig(SqlParser.Config.DEFAULT) .defaultSchema(defSchema) .traitDefs(ConventionTraitDef.INSTANCE, RelCollationTraitDef.INSTANCE) .programs( Programs.of(prepareRules), Programs.ofRules(SortRemoveRule.INSTANCE)) .build(); Planner planner = Frameworks.getPlanner(config); SqlNode parse = planner.parse(sql); SqlNode validate = planner.validate(parse); RelRoot planRoot = planner.rel(validate); RelNode planBefore = planRoot.rel; RelTraitSet desiredTraits = planBefore.getTraitSet() .replace(EnumerableConvention.INSTANCE); RelNode planAfter = planner.transform(0, desiredTraits, planBefore); return planner.transform(1, desiredTraits, planAfter); }
Example #19
Source File: VolcanoPlannerTest.java From calcite with Apache License 2.0 | 5 votes |
/** Test case for * <a href="https://issues.apache.org/jira/browse/CALCITE-3118">[CALCITE-3118] * VolcanoRuleCall should look at RelSubset rather than RelSet * when checking child ordinal of a parent operand</a>. */ @Test void testMatchedOperandsDifferent() { VolcanoPlanner planner = new VolcanoPlanner(); planner.addRelTraitDef(ConventionTraitDef.INSTANCE); RelOptCluster cluster = newCluster(planner); // The rule that triggers the assert rule planner.addRule(new PhysLeafRule()); // The rule asserting that the matched operands are different planner.addRule(new AssertOperandsDifferentRule()); // Construct two children in the same set and a parent RelNode NoneLeafRel leftRel = new NoneLeafRel(cluster, "a"); RelNode leftPhy = planner .changeTraits(leftRel, cluster.traitSetOf(PHYS_CALLING_CONVENTION)); PhysLeafRel rightPhy = new PhysLeafRel(cluster, PHYS_CALLING_CONVENTION_2, "b"); PhysBiRel parent = new PhysBiRel(cluster, cluster.traitSetOf(PHYS_CALLING_CONVENTION), leftPhy, rightPhy); planner.setRoot(parent); // Make sure both RelNodes are in the same set, but different subset planner.ensureRegistered(leftPhy, rightPhy); planner.chooseDelegate().findBestExp(); }
Example #20
Source File: VolcanoPlannerTest.java From calcite with Apache License 2.0 | 5 votes |
/** * This always worked (in contrast to testRemoveSingleReformed) because it * uses a completely-physical pattern (requiring GoodSingleRule to fire * first). */ @Test void testRemoveSingleGood() { VolcanoPlanner planner = new VolcanoPlanner(); planner.addRelTraitDef(ConventionTraitDef.INSTANCE); planner.addRule(new PhysLeafRule()); planner.addRule(new GoodSingleRule()); planner.addRule(new GoodRemoveSingleRule()); RelOptCluster cluster = newCluster(planner); NoneLeafRel leafRel = new NoneLeafRel( cluster, "a"); NoneSingleRel singleRel = new NoneSingleRel( cluster, leafRel); RelNode convertedRel = planner.changeTraits( singleRel, cluster.traitSetOf(PHYS_CALLING_CONVENTION)); planner.setRoot(convertedRel); RelNode result = planner.chooseDelegate().findBestExp(); assertTrue(result instanceof PhysLeafRel); PhysLeafRel resultLeaf = (PhysLeafRel) result; assertEquals( "c", resultLeaf.label); }
Example #21
Source File: VolcanoPlannerTest.java From calcite with Apache License 2.0 | 5 votes |
@Disabled("CALCITE-2592 EnumerableMergeJoin is never taken") @Test void testMergeJoin() { VolcanoPlanner planner = new VolcanoPlanner(); planner.addRelTraitDef(ConventionTraitDef.INSTANCE); // Below two lines are important for the planner to use collation trait and generate merge join planner.addRelTraitDef(RelCollationTraitDef.INSTANCE); planner.registerAbstractRelationalRules(); planner.addRule(EnumerableRules.ENUMERABLE_MERGE_JOIN_RULE); planner.addRule(EnumerableRules.ENUMERABLE_VALUES_RULE); planner.addRule(EnumerableRules.ENUMERABLE_SORT_RULE); RelOptCluster cluster = newCluster(planner); RelBuilder relBuilder = RelFactories.LOGICAL_BUILDER.create(cluster, null); RelNode logicalPlan = relBuilder .values(new String[]{"id", "name"}, "2", "a", "1", "b") .values(new String[]{"id", "name"}, "1", "x", "2", "y") .join(JoinRelType.INNER, "id") .build(); RelTraitSet desiredTraits = cluster.traitSet().replace(EnumerableConvention.INSTANCE); final RelNode newRoot = planner.changeTraits(logicalPlan, desiredTraits); planner.setRoot(newRoot); RelNode bestExp = planner.findBestExp(); final String plan = "" + "EnumerableMergeJoin(condition=[=($0, $2)], joinType=[inner])\n" + " EnumerableSort(sort0=[$0], dir0=[ASC])\n" + " EnumerableValues(tuples=[[{ '2', 'a' }, { '1', 'b' }]])\n" + " EnumerableValues(tuples=[[{ '1', 'x' }, { '2', 'y' }]])\n"; assertThat("Merge join + sort is expected", plan, isLinux(RelOptUtil.toString(bestExp))); }
Example #22
Source File: VolcanoPlannerTest.java From calcite with Apache License 2.0 | 5 votes |
/** * Tests a rule that is fired once per subset (whereas most rules are fired * once per rel in a set or rel in a subset) */ @Test void testSubsetRule() { VolcanoPlanner planner = new VolcanoPlanner(); planner.addRelTraitDef(ConventionTraitDef.INSTANCE); planner.addRelTraitDef(RelCollationTraitDef.INSTANCE); planner.addRule(new PhysLeafRule()); planner.addRule(new GoodSingleRule()); final List<String> buf = new ArrayList<>(); planner.addRule(new SubsetRule(buf)); RelOptCluster cluster = newCluster(planner); NoneLeafRel leafRel = new NoneLeafRel( cluster, "a"); NoneSingleRel singleRel = new NoneSingleRel( cluster, leafRel); RelNode convertedRel = planner.changeTraits( singleRel, cluster.traitSetOf(PHYS_CALLING_CONVENTION)); planner.changeTraits(leafRel, cluster.traitSetOf(PHYS_CALLING_CONVENTION) .plus(RelCollations.of(0))); planner.setRoot(convertedRel); RelNode result = planner.chooseDelegate().findBestExp(); assertTrue(result instanceof PhysSingleRel); assertThat(sort(buf), equalTo( sort( "NoneSingleRel:RelSubset#0.NONE.[]", "PhysSingleRel:RelSubset#0.PHYS.[0]", "PhysSingleRel:RelSubset#0.PHYS.[]"))); }
Example #23
Source File: SqlHintsConverterTest.java From calcite with Apache License 2.0 | 5 votes |
@Test void testHintsPropagationInVolcanoPlannerRules() { final String sql = "select /*+ use_hash_join(r, s), use_hash_join(emp, dept) */\n" + "ename, job, sal, dept.name\n" + "from emp join dept on emp.deptno = dept.deptno"; RelOptPlanner planner = new VolcanoPlanner(); planner.addRelTraitDef(ConventionTraitDef.INSTANCE); Tester tester1 = tester.withDecorrelation(true) .withClusterFactory( relOptCluster -> RelOptCluster.create(planner, relOptCluster.getRexBuilder())); final RelNode rel = tester1.convertSqlToRel(sql).rel; final RelHint hint = RelHint.builder("USE_HASH_JOIN") .inheritPath(0) .hintOption("EMP") .hintOption("DEPT") .build(); // Validate Volcano planner. RuleSet ruleSet = RuleSets.ofList( new MockEnumerableJoinRule(hint), // Rule to validate the hint. FilterProjectTransposeRule.INSTANCE, FilterMergeRule.INSTANCE, ProjectMergeRule.INSTANCE, EnumerableRules.ENUMERABLE_JOIN_RULE, EnumerableRules.ENUMERABLE_PROJECT_RULE, EnumerableRules.ENUMERABLE_FILTER_RULE, EnumerableRules.ENUMERABLE_SORT_RULE, EnumerableRules.ENUMERABLE_LIMIT_RULE, EnumerableRules.ENUMERABLE_TABLE_SCAN_RULE); Program program = Programs.of(ruleSet); RelTraitSet toTraits = rel .getCluster() .traitSet() .replace(EnumerableConvention.INSTANCE); program.run(planner, rel, toTraits, Collections.emptyList(), Collections.emptyList()); }
Example #24
Source File: TopDownOptTest.java From calcite with Apache License 2.0 | 5 votes |
private Query(String sql) { this.sql = sql; planner = new VolcanoPlanner(); // Always use top-down optimization planner.setTopDownOpt(true); planner.addRelTraitDef(ConventionTraitDef.INSTANCE); planner.addRelTraitDef(RelCollationTraitDef.INSTANCE); RelOptUtil.registerDefaultRules(planner, false, false); // Remove to Keep deterministic join order. planner.removeRule(JoinCommuteRule.INSTANCE); planner.removeRule(JoinPushThroughJoinRule.LEFT); planner.removeRule(JoinPushThroughJoinRule.RIGHT); // Always use sorted agg. planner.addRule(EnumerableRules.ENUMERABLE_SORTED_AGGREGATE_RULE); planner.removeRule(EnumerableRules.ENUMERABLE_AGGREGATE_RULE); // pushing down sort should be handled by top-down optimization. planner.removeRule(SortProjectTransposeRule.INSTANCE); // Sort will only be pushed down by traits propagation. planner.removeRule(SortJoinTransposeRule.INSTANCE); planner.removeRule(SortJoinCopyRule.INSTANCE); }
Example #25
Source File: VolcanoPlannerTraitTest.java From calcite with Apache License 2.0 | 5 votes |
@Test void testPlanWithNoneConvention() { VolcanoPlanner planner = new VolcanoPlanner(); planner.addRelTraitDef(ConventionTraitDef.INSTANCE); RelOptCluster cluster = newCluster(planner); NoneTinyLeafRel leaf = new NoneTinyLeafRel(cluster, "noneLeafRel"); planner.setRoot(leaf); RelOptCost cost = planner.getCost(leaf, cluster.getMetadataQuery()); assertTrue(cost.isInfinite()); planner.setNoneConventionHasInfiniteCost(false); cost = planner.getCost(leaf, cluster.getMetadataQuery()); assertTrue(!cost.isInfinite()); }
Example #26
Source File: FrameworksTest.java From calcite with Apache License 2.0 | 5 votes |
/** Test case for * <a href="https://issues.apache.org/jira/browse/CALCITE-3228">[CALCITE-3228] * Error while applying rule ProjectScanRule:interpreter</a> * * <p>This bug appears under the following conditions: * 1) have an aggregate with group by and multi aggregate calls. * 2) the aggregate can be removed during optimization. * 3) all aggregate calls are simplified to the same reference. * */ @Test void testPushProjectToScan() throws Exception { Table table = new TableImpl(); final SchemaPlus rootSchema = Frameworks.createRootSchema(true); SchemaPlus schema = rootSchema.add("x", new AbstractSchema()); schema.add("MYTABLE", table); List<RelTraitDef> traitDefs = new ArrayList<>(); traitDefs.add(ConventionTraitDef.INSTANCE); traitDefs.add(RelDistributionTraitDef.INSTANCE); SqlParser.Config parserConfig = SqlParser.configBuilder(SqlParser.Config.DEFAULT) .setCaseSensitive(false) .build(); final FrameworkConfig config = Frameworks.newConfigBuilder() .parserConfig(parserConfig) .defaultSchema(schema) .traitDefs(traitDefs) // define the rules you want to apply .ruleSets( RuleSets.ofList(AbstractConverter.ExpandConversionRule.INSTANCE, ProjectTableScanRule.INSTANCE)) .programs(Programs.ofRules(Programs.RULE_SET)) .build(); executeQuery(config, "select min(id) as mi, max(id) as ma from mytable where id=1 group by id", CalciteSystemProperty.DEBUG.value()); }
Example #27
Source File: PlannerTest.java From calcite with Apache License 2.0 | 5 votes |
/** Test case for * <a href="https://issues.apache.org/jira/browse/CALCITE-569">[CALCITE-569] * ArrayIndexOutOfBoundsException when deducing collation</a>. */ @Test void testOrderByNonSelectColumn() throws Exception { final SchemaPlus schema = Frameworks.createRootSchema(true) .add("tpch", new ReflectiveSchema(new TpchSchema())); String query = "select t.psPartkey from\n" + "(select ps.psPartkey from `tpch`.`partsupp` ps\n" + "order by ps.psPartkey, ps.psSupplyCost) t\n" + "order by t.psPartkey"; List<RelTraitDef> traitDefs = new ArrayList<>(); traitDefs.add(ConventionTraitDef.INSTANCE); traitDefs.add(RelCollationTraitDef.INSTANCE); final SqlParser.Config parserConfig = SqlParser.configBuilder().setLex(Lex.MYSQL).build(); FrameworkConfig config = Frameworks.newConfigBuilder() .parserConfig(parserConfig) .defaultSchema(schema) .traitDefs(traitDefs) .programs(Programs.ofRules(Programs.RULE_SET)) .build(); String plan; try (Planner p = Frameworks.getPlanner(config)) { SqlNode n = p.parse(query); n = p.validate(n); RelNode r = p.rel(n).project(); plan = RelOptUtil.toString(r); plan = Util.toLinux(plan); } assertThat(plan, equalTo("LogicalSort(sort0=[$0], dir0=[ASC])\n" + " LogicalProject(psPartkey=[$0])\n" + " LogicalTableScan(table=[[tpch, partsupp]])\n")); }
Example #28
Source File: VolcanoPlannerTraitTest.java From calcite with Apache License 2.0 | 5 votes |
PhysToIteratorConverter( RelOptCluster cluster, RelNode child) { super( cluster, ConventionTraitDef.INSTANCE, child.getTraitSet().replace(EnumerableConvention.INSTANCE), child); }
Example #29
Source File: PlannerImpl.java From calcite with Apache License 2.0 | 5 votes |
private void ready() { switch (state) { case STATE_0_CLOSED: reset(); } ensure(State.STATE_1_RESET); RelDataTypeSystem typeSystem = connectionConfig.typeSystem(RelDataTypeSystem.class, RelDataTypeSystem.DEFAULT); typeFactory = new JavaTypeFactoryImpl(typeSystem); planner = new VolcanoPlanner(costFactory, context); RelOptUtil.registerDefaultRules(planner, connectionConfig.materializationsEnabled(), Hook.ENABLE_BINDABLE.get(false)); planner.setExecutor(executor); state = State.STATE_2_READY; // If user specify own traitDef, instead of default default trait, // register the trait def specified in traitDefs. if (this.traitDefs == null) { planner.addRelTraitDef(ConventionTraitDef.INSTANCE); if (CalciteSystemProperty.ENABLE_COLLATION_TRAIT.value()) { planner.addRelTraitDef(RelCollationTraitDef.INSTANCE); } } else { for (RelTraitDef def : this.traitDefs) { planner.addRelTraitDef(def); } } }
Example #30
Source File: TraitPropagationTest.java From calcite with Apache License 2.0 | 5 votes |
private static RelNode run(PropAction action, RuleSet rules) throws Exception { FrameworkConfig config = Frameworks.newConfigBuilder() .ruleSets(rules).build(); final Properties info = new Properties(); final Connection connection = DriverManager .getConnection("jdbc:calcite:", info); final CalciteServerStatement statement = connection .createStatement().unwrap(CalciteServerStatement.class); final CalcitePrepare.Context prepareContext = statement.createPrepareContext(); final JavaTypeFactory typeFactory = prepareContext.getTypeFactory(); CalciteCatalogReader catalogReader = new CalciteCatalogReader(prepareContext.getRootSchema(), prepareContext.getDefaultSchemaPath(), typeFactory, prepareContext.config()); final RexBuilder rexBuilder = new RexBuilder(typeFactory); final RelOptPlanner planner = new VolcanoPlanner(config.getCostFactory(), config.getContext()); // set up rules before we generate cluster planner.clearRelTraitDefs(); planner.addRelTraitDef(RelCollationTraitDef.INSTANCE); planner.addRelTraitDef(ConventionTraitDef.INSTANCE); planner.clear(); for (RelOptRule r : rules) { planner.addRule(r); } final RelOptCluster cluster = RelOptCluster.create(planner, rexBuilder); return action.apply(cluster, catalogReader, prepareContext.getRootSchema().plus()); }