Java Code Examples for org.apache.calcite.rel.RelNode#getInput()
The following examples show how to use
org.apache.calcite.rel.RelNode#getInput() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: MoreRelOptUtil.java From dremio-oss with Apache License 2.0 | 6 votes |
/** * Constructor. * * @param root The root of the plan. */ public OrderByInSubQueryRemover(RelNode root) { // Identify if there is either a sort at the plan root, or // a Project with a sort at the plan root. These Sorts should not // be skipped since ORDER BY is always legal there. if (root instanceof Sort) { this.topLevelSort = (Sort) root; } else if (root instanceof Project && root.getInput(0) instanceof Sort) { this.topLevelSort = (Sort) root.getInput(0); } else { this.topLevelSort = null; } }
Example 2
Source File: JoinCommuteRule.java From calcite with Apache License 2.0 | 5 votes |
public void onMatch(final RelOptRuleCall call) { Join join = call.rel(0); final RelNode swapped = swap(join, this.swapOuter, call.builder()); if (swapped == null) { return; } // The result is either a Project or, if the project is trivial, a // raw Join. final Join newJoin = swapped instanceof Join ? (Join) swapped : (Join) swapped.getInput(0); call.transformTo(swapped); // We have converted join='a join b' into swapped='select // a0,a1,a2,b0,b1 from b join a'. Now register that project='select // b0,b1,a0,a1,a2 from (select a0,a1,a2,b0,b1 from b join a)' is the // same as 'b join a'. If we didn't do this, the swap join rule // would fire on the new join, ad infinitum. final RelBuilder relBuilder = call.builder(); final List<RexNode> exps = RelOptUtil.createSwappedJoinExprs(newJoin, join, false); relBuilder.push(swapped) .project(exps, newJoin.getRowType().getFieldNames()); call.getPlanner().ensureRegistered(relBuilder.build(), newJoin); }
Example 3
Source File: AbstractMaterializedViewRule.java From Bats with Apache License 2.0 | 5 votes |
@Override public Pair<RelNode, RelNode> pushFilterToOriginalViewPlan(RelBuilder builder, RelNode topViewProject, RelNode viewNode, RexNode cond) { // We add (and push) the filter to the view plan before triggering the rewriting. // This is useful in case some of the columns can be folded to same value after // filter is added. HepProgramBuilder pushFiltersProgram = new HepProgramBuilder(); if (topViewProject != null) { pushFiltersProgram.addRuleInstance(filterProjectTransposeRule); } pushFiltersProgram.addRuleInstance(this.filterAggregateTransposeRule) .addRuleInstance(this.aggregateProjectPullUpConstantsRule).addRuleInstance(this.projectMergeRule); final HepPlanner tmpPlanner = new HepPlanner(pushFiltersProgram.build()); // Now that the planner is created, push the node RelNode topNode = builder.push(topViewProject != null ? topViewProject : viewNode).filter(cond).build(); tmpPlanner.setRoot(topNode); topNode = tmpPlanner.findBestExp(); RelNode resultTopViewProject = null; RelNode resultViewNode = null; while (topNode != null) { if (topNode instanceof Project) { if (resultTopViewProject != null) { // Both projects could not be merged, we will bail out return Pair.of(topViewProject, viewNode); } resultTopViewProject = topNode; topNode = topNode.getInput(0); } else if (topNode instanceof Aggregate) { resultViewNode = topNode; topNode = null; } else { // We move to the child topNode = topNode.getInput(0); } } return Pair.of(resultTopViewProject, resultViewNode); }
Example 4
Source File: JoinCommuteRule.java From Bats with Apache License 2.0 | 5 votes |
@Override public void onMatch(final RelOptRuleCall call) { Join join = call.rel(0); if (!join.getSystemFieldList().isEmpty()) { // FIXME Enable this rule for joins with system fields return; } final RelNode swapped = swap(join, this.swapOuter, call.builder()); if (swapped == null) { return; } // The result is either a Project or, if the project is trivial, a // raw Join. final Join newJoin = swapped instanceof Join ? (Join) swapped : (Join) swapped.getInput(0); call.transformTo(swapped); // We have converted join='a join b' into swapped='select // a0,a1,a2,b0,b1 from b join a'. Now register that project='select // b0,b1,a0,a1,a2 from (select a0,a1,a2,b0,b1 from b join a)' is the // same as 'b join a'. If we didn't do this, the swap join rule // would fire on the new join, ad infinitum. final RelBuilder relBuilder = call.builder(); final List<RexNode> exps = RelOptUtil.createSwappedJoinExprs(newJoin, join, false); relBuilder.push(swapped).project(exps, newJoin.getRowType().getFieldNames()); call.getPlanner().ensureRegistered(relBuilder.build(), newJoin); }
Example 5
Source File: MaterializedViewAggregateRule.java From calcite with Apache License 2.0 | 5 votes |
@Override public Pair<RelNode, RelNode> pushFilterToOriginalViewPlan(RelBuilder builder, RelNode topViewProject, RelNode viewNode, RexNode cond) { // We add (and push) the filter to the view plan before triggering the rewriting. // This is useful in case some of the columns can be folded to same value after // filter is added. HepProgramBuilder pushFiltersProgram = new HepProgramBuilder(); if (topViewProject != null) { pushFiltersProgram.addRuleInstance(filterProjectTransposeRule); } pushFiltersProgram .addRuleInstance(this.filterAggregateTransposeRule) .addRuleInstance(this.aggregateProjectPullUpConstantsRule) .addRuleInstance(this.projectMergeRule); final HepPlanner tmpPlanner = new HepPlanner(pushFiltersProgram.build()); // Now that the planner is created, push the node RelNode topNode = builder .push(topViewProject != null ? topViewProject : viewNode) .filter(cond).build(); tmpPlanner.setRoot(topNode); topNode = tmpPlanner.findBestExp(); RelNode resultTopViewProject = null; RelNode resultViewNode = null; while (topNode != null) { if (topNode instanceof Project) { if (resultTopViewProject != null) { // Both projects could not be merged, we will bail out return Pair.of(topViewProject, viewNode); } resultTopViewProject = topNode; topNode = topNode.getInput(0); } else if (topNode instanceof Aggregate) { resultViewNode = topNode; topNode = null; } else { // We move to the child topNode = topNode.getInput(0); } } return Pair.of(resultTopViewProject, resultViewNode); }
Example 6
Source File: TraitConversionTest.java From calcite with Apache License 2.0 | 5 votes |
@Test void testTraitConversion() { final VolcanoPlanner planner = new VolcanoPlanner(); planner.addRelTraitDef(ConventionTraitDef.INSTANCE); planner.addRelTraitDef(NEW_TRAIT_DEF_INSTANCE); planner.addRule(new RandomSingleTraitRule()); planner.addRule(new SingleLeafTraitRule()); planner.addRule(ExpandConversionRule.INSTANCE); planner.setTopDownOpt(false); final RelOptCluster cluster = newCluster(planner); final NoneLeafRel leafRel = new NoneLeafRel(cluster, "a"); final NoneSingleRel singleRel = new NoneSingleRel(cluster, leafRel); final RelNode convertedRel = planner.changeTraits(singleRel, cluster.traitSetOf(PHYS_CALLING_CONVENTION)); planner.setRoot(convertedRel); final RelNode result = planner.chooseDelegate().findBestExp(); assertTrue(result instanceof RandomSingleRel); assertTrue(result.getTraitSet().contains(PHYS_CALLING_CONVENTION)); assertTrue(result.getTraitSet().contains(SIMPLE_DISTRIBUTION_RANDOM)); final RelNode input = result.getInput(0); assertTrue(input instanceof BridgeRel); assertTrue(input.getTraitSet().contains(PHYS_CALLING_CONVENTION)); assertTrue(input.getTraitSet().contains(SIMPLE_DISTRIBUTION_RANDOM)); final RelNode input2 = input.getInput(0); assertTrue(input2 instanceof SingletonLeafRel); assertTrue(input2.getTraitSet().contains(PHYS_CALLING_CONVENTION)); assertTrue(input2.getTraitSet().contains(SIMPLE_DISTRIBUTION_SINGLETON)); }
Example 7
Source File: TableUtil.java From sql-gremlin with Apache License 2.0 | 5 votes |
public static TableDef getTableDef(RelNode parent) { if(parent instanceof GremlinTableScan) { final GremlinTableScan scan = (GremlinTableScan) parent; return scan.getGremlinTable().getTableDef(); } else { if(parent.getInput(0) != null) { return getTableDef(parent.getInput(0)); } else { return null; } } }
Example 8
Source File: RexSubQueryUtils.java From dremio-oss with Apache License 2.0 | 5 votes |
@Override protected RelNode visitChild(RelNode parent, int i, RelNode child) { RelNode newParent = parent; if (parent instanceof JdbcRelImpl) { transformer.setTraitSet(parent.getTraitSet().plus(DistributionTrait.ANY).plus(RelCollations.EMPTY)); newParent = parent.accept(transformer); } return super.visitChild(newParent, i, newParent.getInput(i)); }
Example 9
Source File: TestPushLimitToPruneableScan.java From dremio-oss with Apache License 2.0 | 5 votes |
public static Checker transformed(int newOffset, int newFetch, PartitionChunkMetadata... newChunks) { return new Checker() { @Override public boolean shouldTransform() { return true; } @Override public void check(LimitPrel originalLimit, TestScanPrel originalScan, RelNode newNode) { assertThat(newNode, is(instanceOf(LimitPrel.class))); assertThat(RexLiteral.intValue(((LimitPrel) newNode).getOffset()), is(newOffset)); assertThat(RexLiteral.intValue(((LimitPrel) newNode).getFetch()), is(newFetch)); assertThat(newNode.getRowType(), is(originalLimit.getRowType())); RelNode input = newNode.getInput(0); assertThat(input, is(instanceOf(TestScanPrel.class))); assertThat(input.getRowType(), is(originalLimit.getRowType())); assertThat(((TestScanPrel)input).getTableMetadata().getSplitCount(), is(newChunks.length)); final List<PartitionChunkMetadata> chunks = new ArrayList<>(); ((TestScanPrel)input).getTableMetadata().getSplits().forEachRemaining(chunks::add); assertThat(chunks, Matchers.contains(newChunks)); } @Override public String toString() { return String.format("Transformation to limit(offset: %d, fetch: %d) + scan with %d chunk(s)", newOffset, newFetch, newChunks.length); } }; }
Example 10
Source File: CollationConversionTest.java From calcite with Apache License 2.0 | 5 votes |
@Test void testCollationConversion() { final VolcanoPlanner planner = new VolcanoPlanner(); planner.addRelTraitDef(ConventionTraitDef.INSTANCE); planner.addRelTraitDef(COLLATION_TRAIT_DEF); planner.addRule(new SingleNodeRule()); planner.addRule(new LeafTraitRule()); planner.addRule(ExpandConversionRule.INSTANCE); planner.setTopDownOpt(false); final RelOptCluster cluster = newCluster(planner); final NoneLeafRel leafRel = new NoneLeafRel(cluster, "a"); final NoneSingleRel singleRel = new NoneSingleRel(cluster, leafRel); final RelNode convertedRel = planner.changeTraits(singleRel, cluster.traitSetOf(PHYS_CALLING_CONVENTION).plus(ROOT_COLLATION)); planner.setRoot(convertedRel); RelNode result = planner.chooseDelegate().findBestExp(); assertTrue(result instanceof RootSingleRel); assertTrue(result.getTraitSet().contains(ROOT_COLLATION)); assertTrue(result.getTraitSet().contains(PHYS_CALLING_CONVENTION)); final RelNode input = result.getInput(0); assertTrue(input instanceof PhysicalSort); assertTrue(result.getTraitSet().contains(ROOT_COLLATION)); assertTrue(input.getTraitSet().contains(PHYS_CALLING_CONVENTION)); final RelNode input2 = input.getInput(0); assertTrue(input2 instanceof LeafRel); assertTrue(input2.getTraitSet().contains(LEAF_COLLATION)); assertTrue(input.getTraitSet().contains(PHYS_CALLING_CONVENTION)); }
Example 11
Source File: RexSubQueryUtils.java From dremio-oss with Apache License 2.0 | 4 votes |
@Override protected RelNode visitChild(RelNode parent, int i, RelNode child) { RelNode newParent = parent.accept(flattener); return super.visitChild(newParent, i, newParent.getInput(i)); }
Example 12
Source File: MaterializedViewAggregateRule.java From calcite with Apache License 2.0 | 4 votes |
@Override protected RelNode rewriteQuery( RelBuilder relBuilder, RexBuilder rexBuilder, RexSimplify simplify, RelMetadataQuery mq, RexNode compensationColumnsEquiPred, RexNode otherCompensationPred, Project topProject, RelNode node, BiMap<RelTableRef, RelTableRef> queryToViewTableMapping, EquivalenceClasses viewEC, EquivalenceClasses queryEC) { Aggregate aggregate = (Aggregate) node; // Our target node is the node below the root, which should have the maximum // number of available expressions in the tree in order to maximize our // number of rewritings. // If the program is available, we execute it to maximize rewriting opportunities. // For instance, a program might pull up all the expressions that are below the // aggregate so we can introduce compensation filters easily. This is important // depending on the planner strategy. RelNode newAggregateInput = aggregate.getInput(0); RelNode target = aggregate.getInput(0); if (unionRewritingPullProgram != null) { final HepPlanner tmpPlanner = new HepPlanner(unionRewritingPullProgram); tmpPlanner.setRoot(newAggregateInput); newAggregateInput = tmpPlanner.findBestExp(); target = newAggregateInput.getInput(0); } // We need to check that all columns required by compensating predicates // are contained in the query. List<RexNode> queryExprs = extractReferences(rexBuilder, target); if (!compensationColumnsEquiPred.isAlwaysTrue()) { compensationColumnsEquiPred = rewriteExpression(rexBuilder, mq, target, target, queryExprs, queryToViewTableMapping, queryEC, false, compensationColumnsEquiPred); if (compensationColumnsEquiPred == null) { // Skip it return null; } } // For the rest, we use the query equivalence classes if (!otherCompensationPred.isAlwaysTrue()) { otherCompensationPred = rewriteExpression(rexBuilder, mq, target, target, queryExprs, queryToViewTableMapping, viewEC, true, otherCompensationPred); if (otherCompensationPred == null) { // Skip it return null; } } final RexNode queryCompensationPred = RexUtil.not( RexUtil.composeConjunction(rexBuilder, ImmutableList.of(compensationColumnsEquiPred, otherCompensationPred))); // Generate query rewriting. RelNode rewrittenPlan = relBuilder .push(target) .filter(simplify.simplifyUnknownAsFalse(queryCompensationPred)) .build(); if (unionRewritingPullProgram != null) { return aggregate.copy(aggregate.getTraitSet(), ImmutableList.of( newAggregateInput.copy(newAggregateInput.getTraitSet(), ImmutableList.of(rewrittenPlan)))); } return aggregate.copy(aggregate.getTraitSet(), ImmutableList.of(rewrittenPlan)); }
Example 13
Source File: MaterializedViewJoinRule.java From calcite with Apache License 2.0 | 4 votes |
@Override protected RelNode rewriteQuery( RelBuilder relBuilder, RexBuilder rexBuilder, RexSimplify simplify, RelMetadataQuery mq, RexNode compensationColumnsEquiPred, RexNode otherCompensationPred, Project topProject, RelNode node, BiMap<RelTableRef, RelTableRef> viewToQueryTableMapping, EquivalenceClasses viewEC, EquivalenceClasses queryEC) { // Our target node is the node below the root, which should have the maximum // number of available expressions in the tree in order to maximize our // number of rewritings. // We create a project on top. If the program is available, we execute // it to maximize rewriting opportunities. For instance, a program might // pull up all the expressions that are below the aggregate so we can // introduce compensation filters easily. This is important depending on // the planner strategy. RelNode newNode = node; RelNode target = node; if (unionRewritingPullProgram != null) { final HepPlanner tmpPlanner = new HepPlanner(unionRewritingPullProgram); tmpPlanner.setRoot(newNode); newNode = tmpPlanner.findBestExp(); target = newNode.getInput(0); } // All columns required by compensating predicates must be contained // in the query. List<RexNode> queryExprs = extractReferences(rexBuilder, target); if (!compensationColumnsEquiPred.isAlwaysTrue()) { compensationColumnsEquiPred = rewriteExpression(rexBuilder, mq, target, target, queryExprs, viewToQueryTableMapping.inverse(), queryEC, false, compensationColumnsEquiPred); if (compensationColumnsEquiPred == null) { // Skip it return null; } } // For the rest, we use the query equivalence classes if (!otherCompensationPred.isAlwaysTrue()) { otherCompensationPred = rewriteExpression(rexBuilder, mq, target, target, queryExprs, viewToQueryTableMapping.inverse(), viewEC, true, otherCompensationPred); if (otherCompensationPred == null) { // Skip it return null; } } final RexNode queryCompensationPred = RexUtil.not( RexUtil.composeConjunction(rexBuilder, ImmutableList.of(compensationColumnsEquiPred, otherCompensationPred))); // Generate query rewriting. RelNode rewrittenPlan = relBuilder .push(target) .filter(simplify.simplifyUnknownAsFalse(queryCompensationPred)) .build(); if (unionRewritingPullProgram != null) { rewrittenPlan = newNode.copy( newNode.getTraitSet(), ImmutableList.of(rewrittenPlan)); } if (topProject != null) { return topProject.copy(topProject.getTraitSet(), ImmutableList.of(rewrittenPlan)); } return rewrittenPlan; }
Example 14
Source File: AbstractMaterializedViewRule.java From Bats with Apache License 2.0 | 4 votes |
@Override protected RelNode rewriteQuery(RelBuilder relBuilder, RexBuilder rexBuilder, RexSimplify simplify, RelMetadataQuery mq, RexNode compensationColumnsEquiPred, RexNode otherCompensationPred, Project topProject, RelNode node, BiMap<RelTableRef, RelTableRef> viewToQueryTableMapping, EquivalenceClasses viewEC, EquivalenceClasses queryEC) { // Our target node is the node below the root, which should have the maximum // number of available expressions in the tree in order to maximize our // number of rewritings. // We create a project on top. If the program is available, we execute // it to maximize rewriting opportunities. For instance, a program might // pull up all the expressions that are below the aggregate so we can // introduce compensation filters easily. This is important depending on // the planner strategy. RelNode newNode = node; RelNode target = node; if (unionRewritingPullProgram != null) { final HepPlanner tmpPlanner = new HepPlanner(unionRewritingPullProgram); tmpPlanner.setRoot(newNode); newNode = tmpPlanner.findBestExp(); target = newNode.getInput(0); } // All columns required by compensating predicates must be contained // in the query. List<RexNode> queryExprs = extractReferences(rexBuilder, target); if (!compensationColumnsEquiPred.isAlwaysTrue()) { compensationColumnsEquiPred = rewriteExpression(rexBuilder, mq, target, target, queryExprs, viewToQueryTableMapping.inverse(), queryEC, false, compensationColumnsEquiPred); if (compensationColumnsEquiPred == null) { // Skip it return null; } } // For the rest, we use the query equivalence classes if (!otherCompensationPred.isAlwaysTrue()) { otherCompensationPred = rewriteExpression(rexBuilder, mq, target, target, queryExprs, viewToQueryTableMapping.inverse(), viewEC, true, otherCompensationPred); if (otherCompensationPred == null) { // Skip it return null; } } final RexNode queryCompensationPred = RexUtil.not(RexUtil.composeConjunction(rexBuilder, ImmutableList.of(compensationColumnsEquiPred, otherCompensationPred))); // Generate query rewriting. RelNode rewrittenPlan = relBuilder.push(target) .filter(simplify.simplifyUnknownAsFalse(queryCompensationPred)).build(); if (unionRewritingPullProgram != null) { rewrittenPlan = newNode.copy(newNode.getTraitSet(), ImmutableList.of(rewrittenPlan)); } if (topProject != null) { return topProject.copy(topProject.getTraitSet(), ImmutableList.of(rewrittenPlan)); } return rewrittenPlan; }
Example 15
Source File: MycatCalcitePlanner.java From Mycat2 with GNU General Public License v3.0 | 4 votes |
/** * 测试单库分表与不同分片两个情况 * * @param relBuilder * @param cache * @param margeList * @param bestExp2 * @return */ private RelNode simplyAggreate(MycatRelBuilder relBuilder, IdentityHashMap<RelNode, Boolean> cache, IdentityHashMap<RelNode, List<String>> margeList, RelNode bestExp2) { RelNode parent = bestExp2; RelNode child = bestExp2 instanceof Aggregate ? bestExp2.getInput(0) : null; RelNode bestExp3 = parent; if (parent instanceof Aggregate && child instanceof Union) { Aggregate aggregate = (Aggregate) parent; if (aggregate.getAggCallList() != null && !aggregate.getAggCallList().isEmpty()) {//distinct会没有参数 List<AggregateCall> aggCallList = aggregate.getAggCallList(); boolean allMatch = aggregate.getRowType().getFieldCount() == 1 && aggCallList.stream().allMatch(new Predicate<AggregateCall>() { @Override public boolean test(AggregateCall aggregateCall) { return SUPPORTED_AGGREGATES.getOrDefault(aggregateCall.getAggregation().getKind(), false) && aggregate.getRowType().getFieldList().stream().allMatch(i -> i.getType().getSqlTypeName().getFamily() == SqlTypeFamily.NUMERIC); } }); if (allMatch) { List<RelNode> inputs = child.getInputs(); List<RelNode> resList = new ArrayList<>(inputs.size()); boolean allCanPush = true;//是否聚合节点涉及不同分片 String target = null; for (RelNode input : inputs) { RelNode res; if (cache.get(input)) { res = LogicalAggregate.create(input, aggregate.getGroupSet(), aggregate.getGroupSets(), aggregate.getAggCallList()); cache.put(res, Boolean.TRUE); List<String> strings = margeList.getOrDefault(input, Collections.emptyList()); Objects.requireNonNull(strings); if (target == null && strings.size() > 0) { target = strings.get(0); } else if (target != null && strings.size() > 0) { if (!target.equals(strings.get(0))) { allCanPush = false; } } margeList.put(res, strings); } else { res = input; allCanPush = false; } resList.add(res); } LogicalUnion logicalUnion = LogicalUnion.create(resList, ((Union) child).all); //构造sum relBuilder.clear(); relBuilder.push(logicalUnion); List<RexNode> fields = relBuilder.fields(); if (fields == null) { fields = Collections.emptyList(); } RelBuilder.GroupKey groupKey = relBuilder.groupKey(); List<RelBuilder.AggCall> aggCalls = fields.stream().map(i -> relBuilder.sum(i)).collect(Collectors.toList()); relBuilder.aggregate(groupKey, aggCalls); bestExp3 = relBuilder.build(); cache.put(logicalUnion, allCanPush); cache.put(bestExp3, allCanPush); if (target != null) {//是否聚合节点涉及不同分片 List<String> targetSingelList = Collections.singletonList(target); margeList.put(logicalUnion, targetSingelList); margeList.put(bestExp3, targetSingelList); } } } } return bestExp3; }
Example 16
Source File: RelDecorrelator.java From flink with Apache License 2.0 | 4 votes |
private RelNode getCorRel(CorRef corVar) { final RelNode r = cm.mapCorToCorRel.get(corVar.corr); return r.getInput(0); }
Example 17
Source File: RelDecorrelator.java From flink with Apache License 2.0 | 4 votes |
private RelNode getCorRel(CorRef corVar) { final RelNode r = cm.mapCorToCorRel.get(corVar.corr); return r.getInput(0); }
Example 18
Source File: RelMetadataTest.java From calcite with Apache License 2.0 | 4 votes |
@Test void testCustomProviderWithRelMetadataQuery() { final List<String> buf = new ArrayList<>(); ColTypeImpl.THREAD_LIST.set(buf); final String sql = "select deptno, count(*) from emp where deptno > 10 " + "group by deptno having count(*) = 0"; final RelRoot root = tester .withClusterFactory(cluster -> { // Create a custom provider that includes ColType. // Include the same provider twice just to be devious. final ImmutableList<RelMetadataProvider> list = ImmutableList.of(ColTypeImpl.SOURCE, ColTypeImpl.SOURCE, cluster.getMetadataProvider()); cluster.setMetadataProvider( ChainedRelMetadataProvider.of(list)); cluster.setMetadataQuerySupplier(MyRelMetadataQuery::new); return cluster; }) .convertSqlToRel(sql); final RelNode rel = root.rel; // Top node is a filter. Its metadata uses getColType(RelNode, int). assertThat(rel, instanceOf(LogicalFilter.class)); assertThat(rel.getCluster().getMetadataQuery(), instanceOf(MyRelMetadataQuery.class)); final MyRelMetadataQuery mq = (MyRelMetadataQuery) rel.getCluster().getMetadataQuery(); assertThat(colType(mq, rel, 0), equalTo("DEPTNO-rel")); assertThat(colType(mq, rel, 1), equalTo("EXPR$1-rel")); // Next node is an aggregate. Its metadata uses // getColType(LogicalAggregate, int). final RelNode input = rel.getInput(0); assertThat(input, instanceOf(LogicalAggregate.class)); assertThat(colType(mq, input, 0), equalTo("DEPTNO-agg")); // The metadata query is caching, only the first request for each piece of metadata // generates a new call to the provider. assertThat(buf.toString(), equalTo("[DEPTNO-rel, EXPR$1-rel, DEPTNO-agg]")); assertThat(buf.size(), equalTo(3)); assertThat(colType(mq, input, 0), equalTo("DEPTNO-agg")); assertThat(buf.size(), equalTo(3)); assertThat(colType(mq, input, 0), equalTo("DEPTNO-agg")); assertThat(buf.size(), equalTo(3)); assertThat(colType(mq, input, 1), equalTo("EXPR$1-agg")); assertThat(buf.size(), equalTo(4)); assertThat(colType(mq, input, 1), equalTo("EXPR$1-agg")); assertThat(buf.size(), equalTo(4)); assertThat(colType(mq, input, 0), equalTo("DEPTNO-agg")); assertThat(buf.size(), equalTo(4)); // Invalidate the metadata query triggers clearing of all the metadata. rel.getCluster().invalidateMetadataQuery(); assertThat(rel.getCluster().getMetadataQuery(), instanceOf(MyRelMetadataQuery.class)); final MyRelMetadataQuery mq1 = (MyRelMetadataQuery) rel.getCluster().getMetadataQuery(); assertThat(colType(mq1, input, 0), equalTo("DEPTNO-agg")); assertThat(buf.size(), equalTo(5)); assertThat(colType(mq1, input, 0), equalTo("DEPTNO-agg")); assertThat(buf.size(), equalTo(5)); // Resets the RelMetadataQuery to default. rel.getCluster().setMetadataQuerySupplier(RelMetadataQuery::instance); }
Example 19
Source File: RelDecorrelator.java From calcite with Apache License 2.0 | 4 votes |
private RelNode getCorRel(CorRef corVar) { final RelNode r = cm.mapCorToCorRel.get(corVar.corr); return r.getInput(0); }
Example 20
Source File: AbstractMaterializedViewRule.java From Bats with Apache License 2.0 | 4 votes |
@Override protected RelNode rewriteQuery(RelBuilder relBuilder, RexBuilder rexBuilder, RexSimplify simplify, RelMetadataQuery mq, RexNode compensationColumnsEquiPred, RexNode otherCompensationPred, Project topProject, RelNode node, BiMap<RelTableRef, RelTableRef> queryToViewTableMapping, EquivalenceClasses viewEC, EquivalenceClasses queryEC) { Aggregate aggregate = (Aggregate) node; // Our target node is the node below the root, which should have the maximum // number of available expressions in the tree in order to maximize our // number of rewritings. // If the program is available, we execute it to maximize rewriting opportunities. // For instance, a program might pull up all the expressions that are below the // aggregate so we can introduce compensation filters easily. This is important // depending on the planner strategy. RelNode newAggregateInput = aggregate.getInput(0); RelNode target = aggregate.getInput(0); if (unionRewritingPullProgram != null) { final HepPlanner tmpPlanner = new HepPlanner(unionRewritingPullProgram); tmpPlanner.setRoot(newAggregateInput); newAggregateInput = tmpPlanner.findBestExp(); target = newAggregateInput.getInput(0); } // We need to check that all columns required by compensating predicates // are contained in the query. List<RexNode> queryExprs = extractReferences(rexBuilder, target); if (!compensationColumnsEquiPred.isAlwaysTrue()) { compensationColumnsEquiPred = rewriteExpression(rexBuilder, mq, target, target, queryExprs, queryToViewTableMapping, queryEC, false, compensationColumnsEquiPred); if (compensationColumnsEquiPred == null) { // Skip it return null; } } // For the rest, we use the query equivalence classes if (!otherCompensationPred.isAlwaysTrue()) { otherCompensationPred = rewriteExpression(rexBuilder, mq, target, target, queryExprs, queryToViewTableMapping, viewEC, true, otherCompensationPred); if (otherCompensationPred == null) { // Skip it return null; } } final RexNode queryCompensationPred = RexUtil.not(RexUtil.composeConjunction(rexBuilder, ImmutableList.of(compensationColumnsEquiPred, otherCompensationPred))); // Generate query rewriting. RelNode rewrittenPlan = relBuilder.push(target) .filter(simplify.simplifyUnknownAsFalse(queryCompensationPred)).build(); if (unionRewritingPullProgram != null) { return aggregate.copy(aggregate.getTraitSet(), ImmutableList .of(newAggregateInput.copy(newAggregateInput.getTraitSet(), ImmutableList.of(rewrittenPlan)))); } return aggregate.copy(aggregate.getTraitSet(), ImmutableList.of(rewrittenPlan)); }