org.apache.calcite.plan.volcano.VolcanoPlanner Java Examples
The following examples show how to use
org.apache.calcite.plan.volcano.VolcanoPlanner.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestPushLimitToPruneableScan.java From dremio-oss with Apache License 2.0 | 6 votes |
@Before public void setUp() { MockitoAnnotations.initMocks(this); when(optionManager.getOption(eq(ExecConstants.SLICE_TARGET))) .thenReturn(ExecConstants.SLICE_TARGET_OPTION.getDefault()); when(optionManager.getOption(eq(PlannerSettings.ENABLE_LEAF_LIMITS.getOptionName()))) .thenReturn(PlannerSettings.ENABLE_LEAF_LIMITS.getDefault()); when(optionManager.getOption(eq(PlannerSettings.ENABLE_TRIVIAL_SINGULAR.getOptionName()))) .thenReturn(PlannerSettings.ENABLE_TRIVIAL_SINGULAR.getDefault()); when(optionManager.getOptionValidatorListing()).thenReturn(mock(OptionValidatorListing.class)); ClusterResourceInformation info = mock(ClusterResourceInformation.class); when(info.getExecutorNodeCount()).thenReturn(1); plannerSettings = new PlannerSettings(DremioTest.DEFAULT_SABOT_CONFIG, optionManager, () -> info); cluster = RelOptCluster.create(new VolcanoPlanner(plannerSettings), REX_BUILDER); metadata = new TableMetadataImpl(pluginId, DATASET_CONFIG, "testuser", MaterializedSplitsPointer.of(0, Arrays.asList( TEST_PARTITION_CHUNK_METADATA_1, TEST_PARTITION_CHUNK_METADATA_2, TEST_PARTITION_CHUNK_METADATA_3 ), 3)); }
Example #2
Source File: SqlHintsConverterTest.java From calcite with Apache License 2.0 | 6 votes |
@Test void testUseMergeJoin() { final String sql = "select /*+ use_merge_join(emp, dept) */\n" + "ename, job, sal, dept.name\n" + "from emp join dept on emp.deptno = dept.deptno"; RelOptPlanner planner = new VolcanoPlanner(); planner.addRelTraitDef(ConventionTraitDef.INSTANCE); planner.addRelTraitDef(RelCollationTraitDef.INSTANCE); Tester tester1 = tester.withDecorrelation(true) .withClusterFactory( relOptCluster -> RelOptCluster.create(planner, relOptCluster.getRexBuilder())); final RelNode rel = tester1.convertSqlToRel(sql).rel; RuleSet ruleSet = RuleSets.ofList( EnumerableRules.ENUMERABLE_MERGE_JOIN_RULE, EnumerableRules.ENUMERABLE_JOIN_RULE, EnumerableRules.ENUMERABLE_PROJECT_RULE, EnumerableRules.ENUMERABLE_TABLE_SCAN_RULE, EnumerableRules.ENUMERABLE_SORT_RULE, AbstractConverter.ExpandConversionRule.INSTANCE); Program program = Programs.of(ruleSet); RelTraitSet toTraits = rel .getCluster() .traitSet() .replace(EnumerableConvention.INSTANCE); RelNode relAfter = program.run(planner, rel, toTraits, Collections.emptyList(), Collections.emptyList()); String planAfter = NL + RelOptUtil.toString(relAfter); getDiffRepos().assertEquals("planAfter", "${planAfter}", planAfter); }
Example #3
Source File: PlanningConfigurationBuilder.java From flink with Apache License 2.0 | 6 votes |
public PlanningConfigurationBuilder( TableConfig tableConfig, FunctionCatalog functionCatalog, CalciteSchema rootSchema, ExpressionBridge<PlannerExpression> expressionBridge) { this.tableConfig = tableConfig; this.functionCatalog = functionCatalog; // the converter is needed when calling temporal table functions from SQL, because // they reference a history table represented with a tree of table operations. this.context = Contexts.of(expressionBridge, tableConfig); this.planner = new VolcanoPlanner(costFactory, context); planner.setExecutor(new ExpressionReducer(tableConfig)); planner.addRelTraitDef(ConventionTraitDef.INSTANCE); this.expressionBridge = expressionBridge; this.rootSchema = rootSchema; }
Example #4
Source File: TestSimpleLimitExchangeRemover.java From dremio-oss with Apache License 2.0 | 6 votes |
@Before public void setup() { optionList = new OptionList(); optionList.add(ExecConstants.SLICE_TARGET_OPTION.getDefault()); optionList.add(PlannerSettings.ENABLE_LEAF_LIMITS.getDefault()); optionList.add(PlannerSettings.ENABLE_TRIVIAL_SINGULAR.getDefault()); optionManager = mock(OptionManager.class); when(optionManager.getOptionValidatorListing()).thenReturn(mock(OptionValidatorListing.class)); when(optionManager.getNonDefaultOptions()).thenReturn(optionList); when(optionManager.getOption(eq(ExecConstants.SLICE_TARGET))) .thenReturn(ExecConstants.SLICE_TARGET_OPTION.getDefault()); when(optionManager.getOption(eq(PlannerSettings.ENABLE_LEAF_LIMITS.getOptionName()))) .thenReturn(PlannerSettings.ENABLE_LEAF_LIMITS.getDefault()); when(optionManager.getOption(eq(PlannerSettings.ENABLE_TRIVIAL_SINGULAR.getOptionName()))) .thenReturn(PlannerSettings.ENABLE_TRIVIAL_SINGULAR.getDefault()); ClusterResourceInformation info = mock(ClusterResourceInformation.class); when(info.getExecutorNodeCount()).thenReturn(1); plannerSettings = new PlannerSettings(DremioTest.DEFAULT_SABOT_CONFIG, optionManager, () -> info); cluster = RelOptCluster.create(new VolcanoPlanner(plannerSettings), rexBuilder); }
Example #5
Source File: BatsOptimizerTest.java From Bats with Apache License 2.0 | 6 votes |
static void testVolcanoPlanner() throws Exception { VolcanoPlanner volcanoPlanner = createVolcanoPlanner(); volcanoPlanner.addRelTraitDef(ConventionTraitDef.INSTANCE); // volcanoPlanner.addRelTraitDef(RelCollationTraitDef.INSTANCE); // addRules(volcanoPlanner); volcanoPlanner.addRule(ReduceExpressionsRule.PROJECT_INSTANCE); // volcanoPlanner.addRule(EnumerableRules.ENUMERABLE_PROJECT_RULE); RelNode relNode = testSqlToRelConverter(volcanoPlanner); volcanoPlanner.setRoot(relNode); relNode = volcanoPlanner.findBestExp(); // 在这一步出错 String plan = RelOptUtil.toString(relNode); System.out.println("Volcano Plan:"); System.out.println("------------------------------------------------------------------"); System.out.println(plan); }
Example #6
Source File: PlanCaptureAttemptObserver.java From dremio-oss with Apache License 2.0 | 6 votes |
private String getPlanDump(RelOptPlanner planner) { if (planner == null) { return null; } // Use VolcanoPlanner#dump to get more detailed information if (planner instanceof VolcanoPlanner) { StringWriter sw = new StringWriter(); PrintWriter pw = new PrintWriter(sw); ((VolcanoPlanner) planner).dump(pw); pw.flush(); return sw.toString(); } // Print the current tree otherwise RelNode root = planner.getRoot(); return RelOptUtil.toString(root); }
Example #7
Source File: PlannerContext.java From flink with Apache License 2.0 | 6 votes |
public PlannerContext( TableConfig tableConfig, FunctionCatalog functionCatalog, CalciteSchema rootSchema, List<RelTraitDef> traitDefs) { this.tableConfig = tableConfig; this.functionCatalog = functionCatalog; this.context = new FlinkContextImpl(tableConfig, functionCatalog); this.rootSchema = rootSchema; this.traitDefs = traitDefs; // Make a framework config to initialize the RelOptCluster instance, // caution that we can only use the attributes that can not be overwrite/configured // by user. final FrameworkConfig frameworkConfig = createFrameworkConfig(); RelOptPlanner planner = new VolcanoPlanner(frameworkConfig.getCostFactory(), frameworkConfig.getContext()); planner.setExecutor(frameworkConfig.getExecutor()); for (RelTraitDef traitDef : frameworkConfig.getTraitDefs()) { planner.addRelTraitDef(traitDef); } this.cluster = FlinkRelOptClusterFactory.create(planner, new RexBuilder(typeFactory)); }
Example #8
Source File: PlanningConfigurationBuilder.java From flink with Apache License 2.0 | 6 votes |
public PlanningConfigurationBuilder( TableConfig tableConfig, FunctionCatalog functionCatalog, CalciteSchema rootSchema, ExpressionBridge<PlannerExpression> expressionBridge) { this.tableConfig = tableConfig; this.functionCatalog = functionCatalog; // the converter is needed when calling temporal table functions from SQL, because // they reference a history table represented with a tree of table operations this.context = Contexts.of(expressionBridge); this.planner = new VolcanoPlanner(costFactory, context); planner.setExecutor(new ExpressionReducer(tableConfig)); planner.addRelTraitDef(ConventionTraitDef.INSTANCE); this.expressionBridge = expressionBridge; this.rootSchema = rootSchema; }
Example #9
Source File: MaterializedViewFilterScanRule.java From quark with Apache License 2.0 | 5 votes |
protected void apply(RelOptRuleCall call, Filter filter, TableScan scan) { //Avoid optimizing already optimized scan if (scan instanceof QuarkViewScan || scan instanceof QuarkTileScan) { return; } RelNode root = filter.copy(filter.getTraitSet(), Collections.singletonList((RelNode) scan)); RelOptPlanner planner = call.getPlanner(); if (planner instanceof VolcanoPlanner) { List<RelOptMaterialization> materializations = ((VolcanoPlanner) planner).getMaterializations(); for (RelOptMaterialization materialization : materializations) { if (scan.getRowType().equals(materialization.queryRel.getRowType())) { RelNode target = materialization.queryRel; final HepPlanner hepPlanner = new HepPlanner(program, planner.getContext()); hepPlanner.setRoot(target); target = hepPlanner.findBestExp(); List<RelNode> subs = new MaterializedViewSubstitutionVisitor(target, root) .go(materialization.tableRel); for (RelNode s : subs) { call.transformTo(s); } } } } }
Example #10
Source File: BatsOptimizerTest.java From Bats with Apache License 2.0 | 5 votes |
static void addRules(VolcanoPlanner volcanoPlanner) { volcanoPlanner.registerAbstractRelationalRules(); RelOptUtil.registerAbstractRels(volcanoPlanner); // for (RelOptRule rule : Bindables.RULES) { // volcanoPlanner.addRule(rule); // } // volcanoPlanner.addRule(Bindables.BINDABLE_TABLE_SCAN_RULE); // volcanoPlanner.addRule(ProjectTableScanRule.INSTANCE); // volcanoPlanner.addRule(ProjectTableScanRule.INTERPRETER); // for (RelOptRule rule : org.apache.calcite.prepare.CalcitePrepareImpl.ENUMERABLE_RULES) // volcanoPlanner.addRule(rule); // volcanoPlanner.addRule(org.apache.calcite.interpreter.NoneToBindableConverterRule.INSTANCE); }
Example #11
Source File: SqlHintsConverterTest.java From calcite with Apache License 2.0 | 5 votes |
@Test void testHintsPropagationInVolcanoPlannerRules() { final String sql = "select /*+ use_hash_join(r, s), use_hash_join(emp, dept) */\n" + "ename, job, sal, dept.name\n" + "from emp join dept on emp.deptno = dept.deptno"; RelOptPlanner planner = new VolcanoPlanner(); planner.addRelTraitDef(ConventionTraitDef.INSTANCE); Tester tester1 = tester.withDecorrelation(true) .withClusterFactory( relOptCluster -> RelOptCluster.create(planner, relOptCluster.getRexBuilder())); final RelNode rel = tester1.convertSqlToRel(sql).rel; final RelHint hint = RelHint.builder("USE_HASH_JOIN") .inheritPath(0) .hintOption("EMP") .hintOption("DEPT") .build(); // Validate Volcano planner. RuleSet ruleSet = RuleSets.ofList( new MockEnumerableJoinRule(hint), // Rule to validate the hint. FilterProjectTransposeRule.INSTANCE, FilterMergeRule.INSTANCE, ProjectMergeRule.INSTANCE, EnumerableRules.ENUMERABLE_JOIN_RULE, EnumerableRules.ENUMERABLE_PROJECT_RULE, EnumerableRules.ENUMERABLE_FILTER_RULE, EnumerableRules.ENUMERABLE_SORT_RULE, EnumerableRules.ENUMERABLE_LIMIT_RULE, EnumerableRules.ENUMERABLE_TABLE_SCAN_RULE); Program program = Programs.of(ruleSet); RelTraitSet toTraits = rel .getCluster() .traitSet() .replace(EnumerableConvention.INSTANCE); program.run(planner, rel, toTraits, Collections.emptyList(), Collections.emptyList()); }
Example #12
Source File: TopDownOptTest.java From calcite with Apache License 2.0 | 5 votes |
private Query(String sql) { this.sql = sql; planner = new VolcanoPlanner(); // Always use top-down optimization planner.setTopDownOpt(true); planner.addRelTraitDef(ConventionTraitDef.INSTANCE); planner.addRelTraitDef(RelCollationTraitDef.INSTANCE); RelOptUtil.registerDefaultRules(planner, false, false); // Remove to Keep deterministic join order. planner.removeRule(JoinCommuteRule.INSTANCE); planner.removeRule(JoinPushThroughJoinRule.LEFT); planner.removeRule(JoinPushThroughJoinRule.RIGHT); // Always use sorted agg. planner.addRule(EnumerableRules.ENUMERABLE_SORTED_AGGREGATE_RULE); planner.removeRule(EnumerableRules.ENUMERABLE_AGGREGATE_RULE); // pushing down sort should be handled by top-down optimization. planner.removeRule(SortProjectTransposeRule.INSTANCE); // Sort will only be pushed down by traits propagation. planner.removeRule(SortJoinTransposeRule.INSTANCE); planner.removeRule(SortJoinCopyRule.INSTANCE); }
Example #13
Source File: PlannerImpl.java From calcite with Apache License 2.0 | 5 votes |
private void ready() { switch (state) { case STATE_0_CLOSED: reset(); } ensure(State.STATE_1_RESET); RelDataTypeSystem typeSystem = connectionConfig.typeSystem(RelDataTypeSystem.class, RelDataTypeSystem.DEFAULT); typeFactory = new JavaTypeFactoryImpl(typeSystem); planner = new VolcanoPlanner(costFactory, context); RelOptUtil.registerDefaultRules(planner, connectionConfig.materializationsEnabled(), Hook.ENABLE_BINDABLE.get(false)); planner.setExecutor(executor); state = State.STATE_2_READY; // If user specify own traitDef, instead of default default trait, // register the trait def specified in traitDefs. if (this.traitDefs == null) { planner.addRelTraitDef(ConventionTraitDef.INSTANCE); if (CalciteSystemProperty.ENABLE_COLLATION_TRAIT.value()) { planner.addRelTraitDef(RelCollationTraitDef.INSTANCE); } } else { for (RelTraitDef def : this.traitDefs) { planner.addRelTraitDef(def); } } }
Example #14
Source File: CalcitePrepareImpl.java From calcite with Apache License 2.0 | 5 votes |
private ParseResult convert_(Context context, String sql, boolean analyze, boolean fail, CalciteCatalogReader catalogReader, SqlValidator validator, SqlNode sqlNode1) { final JavaTypeFactory typeFactory = context.getTypeFactory(); final Convention resultConvention = enableBindable ? BindableConvention.INSTANCE : EnumerableConvention.INSTANCE; // Use the Volcano because it can handle the traits. final VolcanoPlanner planner = new VolcanoPlanner(); planner.addRelTraitDef(ConventionTraitDef.INSTANCE); final SqlToRelConverter.ConfigBuilder configBuilder = SqlToRelConverter.configBuilder().withTrimUnusedFields(true); final CalcitePreparingStmt preparingStmt = new CalcitePreparingStmt(this, context, catalogReader, typeFactory, context.getRootSchema(), null, createCluster(planner, new RexBuilder(typeFactory)), resultConvention, createConvertletTable()); final SqlToRelConverter converter = preparingStmt.getSqlToRelConverter(validator, catalogReader, configBuilder.build()); final RelRoot root = converter.convertQuery(sqlNode1, false, true); if (analyze) { return analyze_(validator, sql, sqlNode1, root, fail); } return new ConvertResult(this, validator, sql, sqlNode1, validator.getValidatedNodeType(sqlNode1), root); }
Example #15
Source File: PlannerContext.java From flink with Apache License 2.0 | 5 votes |
public PlannerContext( TableConfig tableConfig, FunctionCatalog functionCatalog, CatalogManager catalogManager, CalciteSchema rootSchema, List<RelTraitDef> traitDefs) { this.tableConfig = tableConfig; this.context = new FlinkContextImpl( tableConfig, functionCatalog, catalogManager, this::createSqlExprToRexConverter); this.rootSchema = rootSchema; this.traitDefs = traitDefs; // Make a framework config to initialize the RelOptCluster instance, // caution that we can only use the attributes that can not be overwrite/configured // by user. this.frameworkConfig = createFrameworkConfig(); RelOptPlanner planner = new VolcanoPlanner(frameworkConfig.getCostFactory(), frameworkConfig.getContext()); planner.setExecutor(frameworkConfig.getExecutor()); for (RelTraitDef traitDef : frameworkConfig.getTraitDefs()) { planner.addRelTraitDef(traitDef); } this.cluster = FlinkRelOptClusterFactory.create(planner, new RexBuilder(typeFactory)); }
Example #16
Source File: BatsOptimizerTest.java From Bats with Apache License 2.0 | 5 votes |
static VolcanoPlanner createVolcanoPlanner() { RelOptCostFactory costFactory = RelOptCostImpl.FACTORY; Context externalContext = null; VolcanoPlanner volcanoPlanner = new VolcanoPlanner(costFactory, externalContext); // RexExecutor rexExecutor = null; return volcanoPlanner; }
Example #17
Source File: TestSplitCountChecker.java From dremio-oss with Apache License 2.0 | 5 votes |
@Before public void setup() throws Exception { OptionList optionList = new OptionList(); optionList.add(ExecConstants.SLICE_TARGET_OPTION.getDefault()); optionList.add(PlannerSettings.ENABLE_LEAF_LIMITS.getDefault()); optionList.add(PlannerSettings.ENABLE_TRIVIAL_SINGULAR.getDefault()); final OptionManager optionManager = new TestingOptionManager(mock(OptionValidatorListing.class), optionList); ClusterResourceInformation info = mock(ClusterResourceInformation.class); when(info.getExecutorNodeCount()).thenReturn(1); final PlannerSettings plannerSettings = new PlannerSettings(DremioTest.DEFAULT_SABOT_CONFIG, optionManager, () -> info); cluster = RelOptCluster.create(new VolcanoPlanner(plannerSettings), rexBuilder); }
Example #18
Source File: TestRelMdRowCount.java From dremio-oss with Apache License 2.0 | 5 votes |
@Before public void setup() throws Exception { final LegacyKVStoreProvider storeProvider = new LegacyKVStoreProvider() { @Override public <K, V, T extends LegacyKVStore<K, V>, U extends KVStore<K, V>> T getStore(Class<? extends LegacyStoreCreationFunction<K, V, T, U>> creator) { LegacyKVStore<?,?> store = mock(LegacyKVStore.class); when(store.find()).thenReturn(Collections.emptyList()); return (T) store; } @Override public void start() throws Exception { } @Override public void close() throws Exception { } }; final OptionValidatorListing optionValidatorListing = new OptionValidatorListingImpl(DremioTest.CLASSPATH_SCAN_RESULT); SystemOptionManager som = new SystemOptionManager(optionValidatorListing, new LogicalPlanPersistence(DremioTest.DEFAULT_SABOT_CONFIG, DremioTest.CLASSPATH_SCAN_RESULT), () -> storeProvider, false); OptionManager optionManager = OptionManagerWrapper.Builder.newBuilder() .withOptionManager(new DefaultOptionManager(optionValidatorListing)) .withOptionManager(som) .build(); som.start(); ClusterResourceInformation info = mock(ClusterResourceInformation.class); when(info.getExecutorNodeCount()).thenReturn(1); PlannerSettings plannerSettings = new PlannerSettings(DremioTest.DEFAULT_SABOT_CONFIG, optionManager, () -> info); cluster = RelOptCluster.create(new VolcanoPlanner(plannerSettings), rexBuilder); cluster.setMetadataProvider(DefaultRelMetadataProvider.INSTANCE); }
Example #19
Source File: TestIndexBasedPruning.java From dremio-oss with Apache License 2.0 | 5 votes |
@Before public void setUp() { MockitoAnnotations.initMocks(this); when(optionManager.getOptionValidatorListing()).thenReturn(mock(OptionValidatorListing.class)); when(optionManager.getOption(eq(PlannerSettings.FILTER_MAX_SELECTIVITY_ESTIMATE_FACTOR.getOptionName()))) .thenReturn(PlannerSettings.FILTER_MAX_SELECTIVITY_ESTIMATE_FACTOR.getDefault()); when(optionManager.getOption(eq(PlannerSettings.FILTER_MIN_SELECTIVITY_ESTIMATE_FACTOR.getOptionName()))) .thenReturn(PlannerSettings.FILTER_MIN_SELECTIVITY_ESTIMATE_FACTOR.getDefault()); OptionList optionList = new OptionList(); optionList.add(PlannerSettings.FILTER_MAX_SELECTIVITY_ESTIMATE_FACTOR.getDefault()); optionList.add(PlannerSettings.FILTER_MIN_SELECTIVITY_ESTIMATE_FACTOR.getDefault()); when(optionManager.getNonDefaultOptions()).thenReturn(optionList); ClusterResourceInformation info = mock(ClusterResourceInformation.class); when(info.getExecutorNodeCount()).thenReturn(1); plannerSettings = new PlannerSettings(DremioTest.DEFAULT_SABOT_CONFIG, optionManager, () -> info); RelOptCluster cluster = RelOptCluster.create(new VolcanoPlanner(plannerSettings), REX_BUILDER); SplitsPointer splitsPointer = new TestSplitsPointer(0, Arrays.asList(TEST_PARTITION_CHUNK_METADATA_1, TEST_PARTITION_CHUNK_METADATA_2), 2); TableMetadata indexPrunableMetadata = new TableMetadataImpl(pluginId, DATASET_CONFIG, "testuser", splitsPointer); SourceType newType = mock(SourceType.class); when(newType.value()).thenReturn("TestSource"); when(pluginId.getType()).thenReturn(newType); indexPrunableScan = new TestScanRel(cluster, TRAITS, table, pluginId, indexPrunableMetadata, PROJECTED_COLUMNS, 0, false); filter = new FilterRel(cluster, TRAITS, indexPrunableScan, rexNode); rule = new PruneScanRuleBase.PruneScanRuleFilterOnScan<>(pluginId.getType(), TestScanRel.class, mock(OptimizerRulesContext.class)); }
Example #20
Source File: PlannerImpl.java From Mycat2 with GNU General Public License v3.0 | 5 votes |
private void ready() { switch (state) { case STATE_0_CLOSED: reset(); } ensure(State.STATE_1_RESET); planner = new VolcanoPlanner(costFactory, context); RelOptUtil.registerDefaultRules(planner, connectionConfig.materializationsEnabled(), Hook.ENABLE_BINDABLE.get(false)); planner.setExecutor(executor); state = State.STATE_2_READY; // If user specify own traitDef, instead of default default trait, // register the trait def specified in traitDefs. if (this.traitDefs == null) { planner.addRelTraitDef(ConventionTraitDef.INSTANCE); if (CalciteSystemProperty.ENABLE_COLLATION_TRAIT.value()) { planner.addRelTraitDef(RelCollationTraitDef.INSTANCE); } } else { for (RelTraitDef def : this.traitDefs) { planner.addRelTraitDef(def); } } }
Example #21
Source File: MycatCalcitePlanner.java From Mycat2 with GNU General Public License v3.0 | 4 votes |
public RelOptCluster newCluster() { RelOptPlanner planner = new VolcanoPlanner(); planner.addRelTraitDef(ConventionTraitDef.INSTANCE); return RelOptCluster.create(planner, MycatCalciteSupport.INSTANCE.RexBuilder); }
Example #22
Source File: CodeGenerationBenchmark.java From calcite with Apache License 2.0 | 4 votes |
@Setup(Level.Trial) public void setup() { planInfos = new PlanInfo[queries]; VolcanoPlanner planner = new VolcanoPlanner(); planner.addRelTraitDef(ConventionTraitDef.INSTANCE); planner.addRule(FilterToCalcRule.INSTANCE); planner.addRule(ProjectToCalcRule.INSTANCE); planner.addRule(EnumerableRules.ENUMERABLE_CALC_RULE); planner.addRule(EnumerableRules.ENUMERABLE_JOIN_RULE); planner.addRule(EnumerableRules.ENUMERABLE_VALUES_RULE); RelDataTypeFactory typeFactory = new JavaTypeFactoryImpl(org.apache.calcite.rel.type.RelDataTypeSystem.DEFAULT); RelOptCluster cluster = RelOptCluster.create(planner, new RexBuilder(typeFactory)); RelTraitSet desiredTraits = cluster.traitSet().replace(EnumerableConvention.INSTANCE); RelBuilder relBuilder = RelFactories.LOGICAL_BUILDER.create(cluster, null); // Generates queries of the following form depending on the configuration parameters. // SELECT `t`.`name` // FROM (VALUES (1, 'Value0')) AS `t` (`id`, `name`) // INNER JOIN (VALUES (1, 'Value1')) AS `t` (`id`, `name`) AS `t0` ON `t`.`id` = `t0`.`id` // INNER JOIN (VALUES (2, 'Value2')) AS `t` (`id`, `name`) AS `t1` ON `t`.`id` = `t1`.`id` // INNER JOIN (VALUES (3, 'Value3')) AS `t` (`id`, `name`) AS `t2` ON `t`.`id` = `t2`.`id` // INNER JOIN ... // WHERE // `t`.`name` = 'name0' OR // `t`.`name` = 'name1' OR // `t`.`name` = 'name2' OR // ... // OR `t`.`id` = 0 // The last disjunction (i.e, t.id = $i) is what makes the queries different from one another // by assigning a different constant literal. for (int i = 0; i < queries; i++) { relBuilder.values(new String[]{"id", "name"}, 1, "Value" + 0); for (int j = 1; j <= joins; j++) { relBuilder .values(new String[]{"id", "name"}, j, "Value" + j) .join(JoinRelType.INNER, "id"); } List<RexNode> disjunctions = new ArrayList<>(); for (int j = 0; j < whereClauseDisjunctions; j++) { disjunctions.add( relBuilder.equals( relBuilder.field("name"), relBuilder.literal("name" + j))); } disjunctions.add( relBuilder.equals( relBuilder.field("id"), relBuilder.literal(i))); RelNode query = relBuilder .filter(relBuilder.or(disjunctions)) .project(relBuilder.field("name")) .build(); RelNode query0 = planner.changeTraits(query, desiredTraits); planner.setRoot(query0); PlanInfo info = new PlanInfo(); EnumerableRel plan = (EnumerableRel) planner.findBestExp(); EnumerableRelImplementor relImplementor = new EnumerableRelImplementor(plan.getCluster().getRexBuilder(), new HashMap<>()); info.classExpr = relImplementor.implementRoot(plan, EnumerableRel.Prefer.ARRAY); info.javaCode = Expressions.toString(info.classExpr.memberDeclarations, "\n", false); ICompilerFactory compilerFactory; try { compilerFactory = CompilerFactoryFactory.getDefaultCompilerFactory(); } catch (Exception e) { throw new IllegalStateException( "Unable to instantiate java compiler", e); } IClassBodyEvaluator cbe = compilerFactory.newClassBodyEvaluator(); cbe.setClassName(info.classExpr.name); cbe.setExtendedClass(Utilities.class); cbe.setImplementedInterfaces( plan.getRowType().getFieldCount() == 1 ? new Class[]{Bindable.class, Typed.class} : new Class[]{ArrayBindable.class}); cbe.setParentClassLoader(EnumerableInterpretable.class.getClassLoader()); info.cbe = cbe; planInfos[i] = info; } }
Example #23
Source File: CheapestPlanWithReflectionVisitor.java From dremio-oss with Apache License 2.0 | 4 votes |
public CheapestPlanWithReflectionVisitor(VolcanoPlanner planner) { this.root = planner.getRoot(); this.planner = planner; }
Example #24
Source File: RelOptTestBase.java From calcite with Apache License 2.0 | 4 votes |
/** * Checks the plan for a SQL statement before/after executing a given rule, * with a pre-program to prepare the tree. * * @param tester Tester * @param preProgram Program to execute before comparing before state * @param planner Planner * @param sql SQL query * @param unchanged Whether the rule is to have no effect */ private void checkPlanning(Tester tester, HepProgram preProgram, RelOptPlanner planner, String sql, boolean unchanged) { final DiffRepository diffRepos = getDiffRepos(); String sql2 = diffRepos.expand("sql", sql); final RelRoot root = tester.convertSqlToRel(sql2); final RelNode relInitial = root.rel; assertNotNull(relInitial); List<RelMetadataProvider> list = new ArrayList<>(); list.add(DefaultRelMetadataProvider.INSTANCE); planner.registerMetadataProviders(list); RelMetadataProvider plannerChain = ChainedRelMetadataProvider.of(list); final RelOptCluster cluster = relInitial.getCluster(); cluster.setMetadataProvider(plannerChain); RelNode relBefore; if (preProgram == null) { relBefore = relInitial; } else { HepPlanner prePlanner = new HepPlanner(preProgram); prePlanner.setRoot(relInitial); relBefore = prePlanner.findBestExp(); } assertThat(relBefore, notNullValue()); final String planBefore = NL + RelOptUtil.toString(relBefore); diffRepos.assertEquals("planBefore", "${planBefore}", planBefore); SqlToRelTestBase.assertValid(relBefore); if (planner instanceof VolcanoPlanner) { relBefore = planner.changeTraits(relBefore, relBefore.getTraitSet().replace(EnumerableConvention.INSTANCE)); } planner.setRoot(relBefore); RelNode r = planner.findBestExp(); if (tester.isLateDecorrelate()) { final String planMid = NL + RelOptUtil.toString(r); diffRepos.assertEquals("planMid", "${planMid}", planMid); SqlToRelTestBase.assertValid(r); final RelBuilder relBuilder = RelFactories.LOGICAL_BUILDER.create(cluster, null); r = RelDecorrelator.decorrelateQuery(r, relBuilder); } final String planAfter = NL + RelOptUtil.toString(r); if (unchanged) { assertThat(planAfter, is(planBefore)); } else { diffRepos.assertEquals("planAfter", "${planAfter}", planAfter); if (planBefore.equals(planAfter)) { throw new AssertionError("Expected plan before and after is the same.\n" + "You must use unchanged=true or call checkUnchanged"); } } SqlToRelTestBase.assertValid(r); }
Example #25
Source File: PlanCaptureAttemptObserver.java From dremio-oss with Apache License 2.0 | 4 votes |
@Override public void planRelTransform(final PlannerPhase phase, RelOptPlanner planner, final RelNode before, final RelNode after, final long millisTaken) { final boolean noTransform = before == after; final String planAsString = toStringOrEmpty(after, noTransform || phase.forceVerbose()); final long millisTakenFinalize = (phase.useMaterializations) ? millisTaken - (findMaterializationMillis + normalizationMillis + substitutionMillis) : millisTaken; if (phase.useMaterializations) { planPhases.add(PlanPhaseProfile.newBuilder() .setPhaseName("Substitution") .setDurationMillis(substitutionMillis) .setPlan("") .build()); } PlanPhaseProfile.Builder b = PlanPhaseProfile.newBuilder() .setPhaseName(phase.description) .setDurationMillis(millisTakenFinalize) .setPlan(planAsString); // dump state of volcano planner to troubleshoot costing issues (or long planning issues). if (verbose || noTransform) { final String dump = getPlanDump(planner); if (dump != null) { b.setPlannerDump(dump); } //System.out.println(Thread.currentThread().getName() + ":\n" + dump); } planPhases.add(b.build()); if (verbose && phase.useMaterializations && planner instanceof VolcanoPlanner && numSubstitutions > 0) { try { Map<String, RelNode> bestPlansWithReflections = new CheapestPlanWithReflectionVisitor((VolcanoPlanner) planner).getBestPlansWithReflections(); for (String reflection : bestPlansWithReflections.keySet()) { String plan = RelOptUtil.toString(bestPlansWithReflections.get(reflection), SqlExplainLevel.ALL_ATTRIBUTES); LayoutMaterializedViewProfile profile = mapIdToAccelerationProfile.get(reflection); if (profile != null) { mapIdToAccelerationProfile.put( reflection, LayoutMaterializedViewProfile.newBuilder(profile) .setOptimizedPlanBytes(ByteString.copyFrom(plan.getBytes())) .build() ); } } } catch (Exception e) { logger.debug("Failed to find best plans with reflections", e); } } }