org.apache.calcite.rel.RelRoot Java Examples
The following examples show how to use
org.apache.calcite.rel.RelRoot.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: CalcitePrepareImpl.java From calcite with Apache License 2.0 | 6 votes |
@Override public RelRoot expandView(RelDataType rowType, String queryString, List<String> schemaPath, List<String> viewPath) { expansionDepth++; SqlParser parser = prepare.createParser(queryString); SqlNode sqlNode; try { sqlNode = parser.parseQuery(); } catch (SqlParseException e) { throw new RuntimeException("parse failed", e); } // View may have different schema path than current connection. final CatalogReader catalogReader = this.catalogReader.withSchemaPath(schemaPath); SqlValidator validator = createSqlValidator(catalogReader); final SqlToRelConverter.Config config = SqlToRelConverter.configBuilder() .withTrimUnusedFields(true).build(); SqlToRelConverter sqlToRelConverter = getSqlToRelConverter(validator, catalogReader, config); RelRoot root = sqlToRelConverter.convertQuery(sqlNode, true, false); --expansionDepth; return root; }
Example #2
Source File: ViewExpanders.java From Bats with Apache License 2.0 | 6 votes |
/** Converts a {@code ViewExpander} to a {@code ToRelContext}. */ public static RelOptTable.ToRelContext toRelContext( RelOptTable.ViewExpander viewExpander, RelOptCluster cluster) { if (viewExpander instanceof RelOptTable.ToRelContext) { return (RelOptTable.ToRelContext) viewExpander; } return new RelOptTable.ToRelContext() { public RelOptCluster getCluster() { return cluster; } public RelRoot expandView(RelDataType rowType, String queryString, List<String> schemaPath, List<String> viewPath) { return viewExpander.expandView(rowType, queryString, schemaPath, viewPath); } }; }
Example #3
Source File: SortRemoveRuleTest.java From calcite with Apache License 2.0 | 6 votes |
/** * The default schema that is used in these tests provides tables sorted on the primary key. Due * to this scan operators always come with a {@link org.apache.calcite.rel.RelCollation} trait. */ private RelNode transform(String sql, RuleSet prepareRules) throws Exception { final SchemaPlus rootSchema = Frameworks.createRootSchema(true); final SchemaPlus defSchema = rootSchema.add("hr", new HrClusteredSchema()); final FrameworkConfig config = Frameworks.newConfigBuilder() .parserConfig(SqlParser.Config.DEFAULT) .defaultSchema(defSchema) .traitDefs(ConventionTraitDef.INSTANCE, RelCollationTraitDef.INSTANCE) .programs( Programs.of(prepareRules), Programs.ofRules(SortRemoveRule.INSTANCE)) .build(); Planner planner = Frameworks.getPlanner(config); SqlNode parse = planner.parse(sql); SqlNode validate = planner.validate(parse); RelRoot planRoot = planner.rel(validate); RelNode planBefore = planRoot.rel; RelTraitSet desiredTraits = planBefore.getTraitSet() .replace(EnumerableConvention.INSTANCE); RelNode planAfter = planner.transform(0, desiredTraits, planBefore); return planner.transform(1, desiredTraits, planAfter); }
Example #4
Source File: ViewTable.java From Bats with Apache License 2.0 | 6 votes |
private RelRoot expandView(RelOptTable.ToRelContext context, RelDataType rowType, String queryString) { try { final RelRoot root = context.expandView(rowType, queryString, schemaPath, viewPath); final RelNode rel = RelOptUtil.createCastRel(root.rel, rowType, true); // Expand any views final RelNode rel2 = rel.accept( new RelShuttleImpl() { @Override public RelNode visit(TableScan scan) { final RelOptTable table = scan.getTable(); final TranslatableTable translatableTable = table.unwrap(TranslatableTable.class); if (translatableTable != null) { return translatableTable.toRel(context, table); } return super.visit(scan); } }); return root.withRel(rel2); } catch (Exception e) { throw new RuntimeException("Error while parsing view definition: " + queryString, e); } }
Example #5
Source File: ViewExpanders.java From calcite with Apache License 2.0 | 6 votes |
/** Creates a simple {@code ToRelContext} that cannot expand views. */ public static RelOptTable.ToRelContext simpleContext( RelOptCluster cluster, List<RelHint> hints) { return new RelOptTable.ToRelContext() { public RelOptCluster getCluster() { return cluster; } public RelRoot expandView(RelDataType rowType, String queryString, List<String> schemaPath, List<String> viewPath) { throw new UnsupportedOperationException(); } public List<RelHint> getTableHints() { return hints; } }; }
Example #6
Source File: TableEnv.java From marble with Apache License 2.0 | 6 votes |
protected RelRoot getSqlPlanRel(String sql) throws Throwable { try (Planner planner = Frameworks.getPlanner(frameworkConfig)) { RelRoot root; final SqlNode parsedSqlNode = planner.parse(sql); final Pair<SqlNode, RelDataType> validatedSqlNodeAndType = planner .validateAndGetType( parsedSqlNode); root = planner.rel(validatedSqlNodeAndType.getKey()); final Program program = createProgram(); //getDesiredTraits final RelTraitSet desiredTraits = root.rel.getTraitSet() .replace(EnumerableConvention.INSTANCE) .replace(root.collation) .simplify(); RelNode logicalRelNode = root.rel; final RelNode optimizedRelNode = program.run( root.rel.getCluster().getPlanner(), logicalRelNode, desiredTraits, Collections.emptyList(), Collections.emptyList()); root = root.withRel(optimizedRelNode); return root; } }
Example #7
Source File: PlannerImpl.java From Mycat2 with GNU General Public License v3.0 | 6 votes |
public RelRoot rel(SqlNode sql) { ensure(State.STATE_4_VALIDATED); assert validatedSqlNode != null; final RexBuilder rexBuilder = createRexBuilder(); final RelOptCluster cluster = RelOptCluster.create(planner, rexBuilder); final SqlToRelConverter.Config config = SqlToRelConverter.configBuilder() .withConfig(sqlToRelConverterConfig) .withTrimUnusedFields(false) .build(); final SqlToRelConverter sqlToRelConverter = new SqlToRelConverter(this, validator, createCatalogReader(), cluster, convertletTable, config); root = sqlToRelConverter.convertQuery(validatedSqlNode, false, true); root = root.withRel(sqlToRelConverter.flattenTypes(root.rel, true)); final RelBuilder relBuilder = config.getRelBuilderFactory().create(cluster, null); root = root.withRel( RelDecorrelator.decorrelateQuery(root.rel, relBuilder)); state = State.STATE_5_CONVERTED; return root; }
Example #8
Source File: ViewExpanders.java From calcite with Apache License 2.0 | 6 votes |
/** Converts a {@code ViewExpander} to a {@code ToRelContext}. */ public static RelOptTable.ToRelContext toRelContext( RelOptTable.ViewExpander viewExpander, RelOptCluster cluster, List<RelHint> hints) { return new RelOptTable.ToRelContext() { public RelOptCluster getCluster() { return cluster; } public List<RelHint> getTableHints() { return hints; } public RelRoot expandView(RelDataType rowType, String queryString, List<String> schemaPath, List<String> viewPath) { return viewExpander.expandView(rowType, queryString, schemaPath, viewPath); } }; }
Example #9
Source File: ViewTable.java From calcite with Apache License 2.0 | 6 votes |
private RelRoot expandView(RelOptTable.ToRelContext context, RelDataType rowType, String queryString) { try { final RelRoot root = context.expandView(rowType, queryString, schemaPath, viewPath); final RelNode rel = RelOptUtil.createCastRel(root.rel, rowType, true); // Expand any views final RelNode rel2 = rel.accept( new RelShuttleImpl() { @Override public RelNode visit(TableScan scan) { final RelOptTable table = scan.getTable(); final TranslatableTable translatableTable = table.unwrap(TranslatableTable.class); if (translatableTable != null) { return translatableTable.toRel(context, table); } return super.visit(scan); } }); return root.withRel(rel2); } catch (Exception e) { throw new RuntimeException("Error while parsing view definition: " + queryString, e); } }
Example #10
Source File: DremioSqlToRelConverter.java From dremio-oss with Apache License 2.0 | 6 votes |
private static RelRoot expandViewHelper(NamespaceKey path, final String viewOwner, final String queryString, final List<String> context, final SqlConverter sqlConverter) { //RelDataType rowType = view.getRowType(cluster.getTypeFactory()); final DremioCatalogReader catalog; if(viewOwner != null) { catalog = sqlConverter.getCatalogReader().withSchemaPathAndUser(viewOwner, context); } else { catalog = sqlConverter.getCatalogReader().withSchemaPath(context); } final SqlConverter newConverter = new SqlConverter(sqlConverter, catalog); final SqlNode parsedNode = newConverter.parse(queryString); final SqlNode validatedNode = newConverter.validate(parsedNode); final RelRootPlus root = newConverter.toConvertibleRelRoot(validatedNode, true); if(path == null) { return root; } // we need to make sure that if a inner expansion is context sensitive, we consider the current // expansion context sensitive even if it isn't locally. final boolean contextSensitive = root.isContextSensitive() || ExpansionNode.isContextSensitive(root.rel); return new RelRoot(ExpansionNode.wrap(path, root.rel, root.validatedRowType, contextSensitive), root.validatedRowType, root.kind, root.fields, root.collation); }
Example #11
Source File: DremioSqlToRelConverter.java From dremio-oss with Apache License 2.0 | 6 votes |
public static RelRoot expandView(NamespaceKey path, final String viewOwner, final String queryString, final List<String> context, final SqlConverter sqlConverter) { ViewExpansionToken token = null; try { token = sqlConverter.getViewExpansionContext().reserveViewExpansionToken(viewOwner); return expandViewHelper(path, viewOwner, queryString, context, sqlConverter); } catch (RuntimeException e) { if (!(e.getCause() instanceof UserNotFoundException)) { throw e; } final String delegatedUser = sqlConverter.getViewExpansionContext().getQueryUser(); return expandViewHelper(path, delegatedUser, queryString, context, sqlConverter); } finally { if (token != null) { token.release(); } } }
Example #12
Source File: SamzaSqlDslConverter.java From samza with Apache License 2.0 | 6 votes |
@Override public Collection<RelRoot> convertDsl(String dsl) { // TODO: Introduce an API to parse a dsl string and return one or more sql statements List<String> sqlStmts = fetchSqlFromConfig(config); QueryPlanner planner = getQueryPlanner(getSqlConfig(sqlStmts, config)); List<RelRoot> relRoots = new LinkedList<>(); for (String sql: sqlStmts) { // we always pass only select query to the planner for samza sql. The reason is that samza sql supports // schema evolution where source and destination could up to an extent have independent schema evolution while // calcite expects strict comformance of the destination schema with that of the fields in the select query. SamzaSqlQueryParser.QueryInfo qinfo = SamzaSqlQueryParser.parseQuery(sql); RelRoot relRoot = planner.plan(qinfo.getSelectQuery()); relRoots.add(relRoot); } return relRoots; }
Example #13
Source File: QueryTranslator.java From samza with Apache License 2.0 | 6 votes |
/** * For unit testing only */ @VisibleForTesting void translate(SamzaSqlQueryParser.QueryInfo queryInfo, StreamApplicationDescriptor appDesc, int queryId) { QueryPlanner planner = new QueryPlanner(sqlConfig.getRelSchemaProviders(), sqlConfig.getInputSystemStreamConfigBySource(), sqlConfig.getUdfMetadata()); final RelRoot relRoot = planner.plan(queryInfo.getSelectQuery()); SamzaSqlExecutionContext executionContext = new SamzaSqlExecutionContext(sqlConfig); TranslatorContext translatorContext = new TranslatorContext(appDesc, relRoot, executionContext); translate(relRoot, sqlConfig.getOutputSystemStreams().get(queryId), translatorContext, queryId); Map<Integer, TranslatorContext> translatorContexts = new HashMap<>(); translatorContexts.put(queryId, translatorContext.clone()); appDesc.withApplicationTaskContextFactory(new ApplicationTaskContextFactory<SamzaSqlApplicationContext>() { @Override public SamzaSqlApplicationContext create(ExternalContext externalContext, JobContext jobContext, ContainerContext containerContext, TaskContext taskContext, ApplicationContainerContext applicationContainerContext) { return new SamzaSqlApplicationContext(translatorContexts); } }); }
Example #14
Source File: PlannerImpl.java From calcite with Apache License 2.0 | 6 votes |
public RelRoot rel(SqlNode sql) { ensure(State.STATE_4_VALIDATED); assert validatedSqlNode != null; final RexBuilder rexBuilder = createRexBuilder(); final RelOptCluster cluster = RelOptCluster.create(planner, rexBuilder); final SqlToRelConverter.Config config = SqlToRelConverter.configBuilder() .withConfig(sqlToRelConverterConfig) .withTrimUnusedFields(false) .build(); final SqlToRelConverter sqlToRelConverter = new SqlToRelConverter(this, validator, createCatalogReader(), cluster, convertletTable, config); root = sqlToRelConverter.convertQuery(validatedSqlNode, false, true); root = root.withRel(sqlToRelConverter.flattenTypes(root.rel, true)); final RelBuilder relBuilder = config.getRelBuilderFactory().create(cluster, null); root = root.withRel( RelDecorrelator.decorrelateQuery(root.rel, relBuilder)); state = State.STATE_5_CONVERTED; return root; }
Example #15
Source File: SamzaSqlValidator.java From samza with Apache License 2.0 | 6 votes |
/** * Validate a list of sql statements * @param sqlStmts list of sql statements * @throws SamzaSqlValidatorException exception for sql validation */ public void validate(List<String> sqlStmts) throws SamzaSqlValidatorException { SamzaSqlApplicationConfig sqlConfig = SamzaSqlDslConverter.getSqlConfig(sqlStmts, config); QueryPlanner planner = SamzaSqlDslConverter.getQueryPlanner(sqlConfig); for (String sql: sqlStmts) { // we always pass only select query to the planner for samza sql. The reason is that samza sql supports // schema evolution where source and destination could up to an extent have independent schema evolution while // calcite expects strict conformance of the destination schema with that of the fields in the select query. SamzaSqlQueryParser.QueryInfo qinfo = SamzaSqlQueryParser.parseQuery(sql); RelRoot relRoot; try { relRoot = planner.plan(qinfo.getSelectQuery()); } catch (SamzaException e) { throw new SamzaSqlValidatorException(String.format("Validation failed for sql stmt:\n%s\n with the following" + " error: \n%s\n", sql, e), e); } // Now that we have logical plan, validate different aspects. String sink = qinfo.getSink(); validate(relRoot, sink, sqlConfig.getRelSchemaProviders().get(sink), sqlConfig.getSamzaRelConverters().get(sink)); } }
Example #16
Source File: SamzaSqlValidator.java From samza with Apache License 2.0 | 6 votes |
private void validateOutput(RelRoot relRoot, RelSchemaProvider outputRelSchemaProvider) throws SamzaSqlValidatorException { LogicalProject project = (LogicalProject) relRoot.rel; RelRecordType projectRecord = (RelRecordType) project.getRowType(); RelRecordType outputRecord = (RelRecordType) QueryPlanner.getSourceRelSchema(outputRelSchemaProvider, new RelSchemaConverter()); // Handle any DELETE ops. if (projectRecord.getFieldList().stream().anyMatch(f -> f.getName().equalsIgnoreCase(SamzaSqlRelMessage.OP_NAME))) { validateDeleteOp(relRoot); return; } // Get Samza Sql schema along with Calcite schema. The reason is that the Calcite schema does not have a way // to represent optional fields while Samza Sql schema can represent optional fields. This is the reason that // we use SqlSchema in validating output. SqlSchema outputSqlSchema = QueryPlanner.getSourceSqlSchema(outputRelSchemaProvider); validateOutputRecords(outputSqlSchema, outputRecord, projectRecord, outputRelSchemaProvider); LOG.info("Samza Sql Validation finished successfully."); }
Example #17
Source File: ServerDdlExecutor.java From calcite with Apache License 2.0 | 5 votes |
/** Populates the table called {@code name} by executing {@code query}. */ static void populate(SqlIdentifier name, SqlNode query, CalcitePrepare.Context context) { // Generate, prepare and execute an "INSERT INTO table query" statement. // (It's a bit inefficient that we convert from SqlNode to SQL and back // again.) final FrameworkConfig config = Frameworks.newConfigBuilder() .defaultSchema(context.getRootSchema().plus()) .build(); final Planner planner = Frameworks.getPlanner(config); try { final StringBuilder buf = new StringBuilder(); final SqlWriterConfig writerConfig = SqlPrettyWriter.config().withAlwaysUseParentheses(false); final SqlPrettyWriter w = new SqlPrettyWriter(writerConfig, buf); buf.append("INSERT INTO "); name.unparse(w, 0, 0); buf.append(' '); query.unparse(w, 0, 0); final String sql = buf.toString(); final SqlNode query1 = planner.parse(sql); final SqlNode query2 = planner.validate(query1); final RelRoot r = planner.rel(query2); final PreparedStatement prepare = context.getRelRunner().prepare(r.rel); int rowCount = prepare.executeUpdate(); Util.discard(rowCount); prepare.close(); } catch (SqlParseException | ValidationException | RelConversionException | SQLException e) { throw new RuntimeException(e); } }
Example #18
Source File: CalcitePrepare.java From calcite with Apache License 2.0 | 5 votes |
public AnalyzeViewResult(CalcitePrepareImpl prepare, SqlValidator validator, String sql, SqlNode sqlNode, RelDataType rowType, RelRoot root, Table table, ImmutableList<String> tablePath, RexNode constraint, ImmutableIntList columnMapping, boolean modifiable) { super(prepare, validator, sql, sqlNode, rowType, root); this.table = table; this.tablePath = tablePath; this.constraint = constraint; this.columnMapping = columnMapping; this.modifiable = modifiable; Preconditions.checkArgument(modifiable == (table != null)); }
Example #19
Source File: CalciteMaterializer.java From calcite with Apache License 2.0 | 5 votes |
/** Populates a materialization record, converting a table path * (essentially a list of strings, like ["hr", "sales"]) into a table object * that can be used in the planning process. */ void populate(Materialization materialization) { SqlParser parser = SqlParser.create(materialization.sql); SqlNode node; try { node = parser.parseStmt(); } catch (SqlParseException e) { throw new RuntimeException("parse failed", e); } final SqlToRelConverter.Config config = SqlToRelConverter.configBuilder() .withTrimUnusedFields(true).build(); SqlToRelConverter sqlToRelConverter2 = getSqlToRelConverter(getSqlValidator(), catalogReader, config); RelRoot root = sqlToRelConverter2.convertQuery(node, true, true); materialization.queryRel = trimUnusedFields(root).rel; // Identify and substitute a StarTable in queryRel. // // It is possible that no StarTables match. That is OK, but the // materialization patterns that are recognized will not be as rich. // // It is possible that more than one StarTable matches. TBD: should we // take the best (whatever that means), or all of them? useStar(schema, materialization); RelOptTable table = this.catalogReader.getTable(materialization.materializedTable.path()); materialization.tableRel = sqlToRelConverter2.toRel(table, ImmutableList.of()); }
Example #20
Source File: RelMetadataTest.java From calcite with Apache License 2.0 | 5 votes |
@Test void testBrokenCustomProviderWithMetadataFactory() { final List<String> buf = new ArrayList<>(); ColTypeImpl.THREAD_LIST.set(buf); final String sql = "select deptno, count(*) from emp where deptno > 10 " + "group by deptno having count(*) = 0"; final RelRoot root = tester .withClusterFactory(cluster -> { cluster.setMetadataProvider( ChainedRelMetadataProvider.of( ImmutableList.of(BrokenColTypeImpl.SOURCE, cluster.getMetadataProvider()))); return cluster; }) .convertSqlToRel(sql); final RelNode rel = root.rel; assertThat(rel, instanceOf(LogicalFilter.class)); final MyRelMetadataQuery mq = new MyRelMetadataQuery(); try { assertThat(colType(mq, rel, 0), equalTo("DEPTNO-rel")); fail("expected error"); } catch (IllegalArgumentException e) { final String value = "No handler for method [public abstract java.lang.String " + "org.apache.calcite.test.RelMetadataTest$ColType.getColType(int)] " + "applied to argument of type [interface org.apache.calcite.rel.RelNode]; " + "we recommend you create a catch-all (RelNode) handler"; assertThat(e.getMessage(), is(value)); } }
Example #21
Source File: SqlToRelTestBase.java From calcite with Apache License 2.0 | 5 votes |
@Override public RelRoot expandView(RelDataType rowType, String queryString, List<String> schemaPath, List<String> viewPath) { try { SqlNode parsedNode = SqlParser.create(queryString).parseStmt(); SqlNode validatedNode = validator.validate(parsedNode); SqlToRelConverter converter = new SqlToRelConverter( this, validator, catalogReader, cluster, StandardConvertletTable.INSTANCE, config); return converter.convertQuery(validatedNode, false, true); } catch (SqlParseException e) { throw new RuntimeException("Error happened while expanding view.", e); } }
Example #22
Source File: TpcdsLatticeSuggesterTest.java From calcite with Apache License 2.0 | 5 votes |
List<Lattice> addQuery(String q) throws SqlParseException, ValidationException, RelConversionException { final Planner planner = new PlannerImpl(config); final SqlNode node = planner.parse(q); final SqlNode node2 = planner.validate(node); final RelRoot root = planner.rel(node2); return suggester.addQuery(root.project()); }
Example #23
Source File: SamzaSqlValidator.java From samza with Apache License 2.0 | 5 votes |
private void validate(RelRoot relRoot, String sink, RelSchemaProvider outputSchemaProvider, SamzaRelConverter outputRelSchemaConverter) throws SamzaSqlValidatorException { if (!skipOutputValidation(relRoot, sink, outputSchemaProvider, outputRelSchemaConverter)) { // Validate select fields (including Udf return types) with output schema validateOutput(relRoot, outputSchemaProvider); } // TODO: // 1. SAMZA-2314: Validate Udf arguments. // 2. SAMZA-2315: Validate operators. These are the operators that are supported by Calcite but not by Samza Sql. // Eg: LogicalAggregate with sum function is not supported by Samza Sql. }
Example #24
Source File: CalcitePrepareImpl.java From calcite with Apache License 2.0 | 5 votes |
CalcitePreparedExplain( RelDataType resultType, RelDataType parameterRowType, RelRoot root, SqlExplainFormat format, SqlExplainLevel detailLevel) { super(resultType, parameterRowType, root, format, detailLevel); }
Example #25
Source File: CalciteResult.java From Bats with Apache License 2.0 | 5 votes |
public AnalyzeViewResult(SqlValidator validator, String sql, SqlNode sqlNode, RelDataType rowType, RelRoot root, Table table, ImmutableList<String> tablePath, RexNode constraint, ImmutableIntList columnMapping, boolean modifiable) { super(validator, sql, sqlNode, rowType, root); this.table = table; this.tablePath = tablePath; this.constraint = constraint; this.columnMapping = columnMapping; this.modifiable = modifiable; Preconditions.checkArgument(modifiable == (table != null)); }
Example #26
Source File: BatsOptimizerTest.java From Bats with Apache License 2.0 | 5 votes |
static RelNode testSqlToRelConverter(RelOptPlanner planner) throws Exception { RexBuilder rexBuilder = createRexBuilder(); RelOptCluster cluster = RelOptCluster.create(planner, rexBuilder); RelOptTable.ViewExpander viewExpander = ViewExpanders.simpleContext(cluster); Pair<SqlNode, SqlValidator> pair = testSqlValidator(); SqlNode sqlNode = pair.left; SqlValidator validator = pair.right; CatalogReader catalogReader = createCalciteCatalogReader(); SqlRexConvertletTable convertletTable = StandardConvertletTable.INSTANCE; SqlToRelConverter.Config config = SqlToRelConverter.Config.DEFAULT; // 不转换成EnumerableTableScan,而是LogicalTableScan config = SqlToRelConverter.configBuilder().withConvertTableAccess(false).build(); SqlToRelConverter converter = new SqlToRelConverter(viewExpander, validator, catalogReader, cluster, convertletTable, config); boolean needsValidation = false; boolean top = false; RelRoot root = converter.convertQuery(sqlNode, needsValidation, top); RelNode relNode = root.rel; String plan = RelOptUtil.toString(relNode); System.out.println("Logical Plan:"); System.out.println("------------------------------------------------------------------"); System.out.println(plan); System.out.println(); // testPrograms(root.rel); return relNode; }
Example #27
Source File: ViewExpanders.java From Bats with Apache License 2.0 | 5 votes |
/** Creates a simple {@code ToRelContext} that cannot expand views. */ public static RelOptTable.ToRelContext simpleContext(RelOptCluster cluster) { return new RelOptTable.ToRelContext() { public RelOptCluster getCluster() { return cluster; } public RelRoot expandView(RelDataType rowType, String queryString, List<String> schemaPath, List<String> viewPath) { throw new UnsupportedOperationException(); } }; }
Example #28
Source File: TranslatorContext.java From samza with Apache License 2.0 | 5 votes |
/** * Create the instance of TranslatorContext * @param streamAppDesc Samza's streamAppDesc that is populated during the translation. * @param relRoot Root of the relational graph from calcite. * @param executionContext the execution context */ public TranslatorContext(StreamApplicationDescriptor streamAppDesc, RelRoot relRoot, SamzaSqlExecutionContext executionContext) { this.streamAppDesc = streamAppDesc; this.compiler = createExpressionCompiler(relRoot); this.executionContext = executionContext; this.dataContext = new DataContextImpl(); this.relSamzaConverters = executionContext.getSamzaSqlApplicationConfig().getSamzaRelConverters(); this.relTableKeyConverters = executionContext.getSamzaSqlApplicationConfig().getSamzaRelTableKeyConverters(); this.messageStreams = new HashMap<>(); this.relNodes = new HashMap<>(); this.systemDescriptors = new HashMap<>(); }
Example #29
Source File: SamzaExecutor.java From samza with Apache License 2.0 | 5 votes |
SqlSchema generateResultSchema(Config config) { SamzaSqlDslConverter converter = (SamzaSqlDslConverter) new SamzaSqlDslConverterFactory().create(config); RelRoot relRoot = converter.convertDsl("").iterator().next(); List<String> colNames = new ArrayList<>(); List<String> colTypeNames = new ArrayList<>(); for (RelDataTypeField dataTypeField : relRoot.validatedRowType.getFieldList()) { colNames.add(dataTypeField.getName()); colTypeNames.add(dataTypeField.getType().toString()); } // TODO: Need to find a way to convert the relational to SQL Schema. After fixing this TODO, please resolve the TODOs // in QueryResult class and executeQuery(). return new SqlSchema(colNames, Collections.emptyList()); }
Example #30
Source File: SamzaSqlApplicationConfig.java From samza with Apache License 2.0 | 5 votes |
public static Collection<RelRoot> populateSystemStreamsAndGetRelRoots(List<String> dslStmts, Config config, List<String> inputSystemStreams, List<String> outputSystemStreams) { // TODO: Get the converter factory based on the file type. Create abstraction around this. DslConverterFactory dslConverterFactory = new SamzaSqlDslConverterFactory(); DslConverter dslConverter = dslConverterFactory.create(config); Collection<RelRoot> relRoots = dslConverter.convertDsl(String.join("\n", dslStmts)); // RelRoot does not have sink node for Samza SQL dsl, so we can not traverse the relRoot tree to get // "outputSystemStreams" // FIXME: the snippet below does not work for Samza SQL dsl but is required for other dsls. Future fix could be // for samza sql to build TableModify for sink and stick it to the relRoot, so we could get output stream out of it. // for (RelRoot relRoot : relRoots) { // SamzaSqlApplicationConfig.populateSystemStreams(relRoot.project(), inputSystemStreams, outputSystemStreams); // } // The below code is specific to Samza SQL dsl and should be removed once Samza SQL includes sink as part of // relRoot and the above code in uncommented. List<String> sqlStmts = SamzaSqlDslConverter.fetchSqlFromConfig(config); List<SamzaSqlQueryParser.QueryInfo> queryInfo = SamzaSqlDslConverter.fetchQueryInfo(sqlStmts); inputSystemStreams.addAll(queryInfo.stream().map(SamzaSqlQueryParser.QueryInfo::getSources).flatMap(Collection::stream) .collect(Collectors.toList())); outputSystemStreams.addAll(queryInfo.stream().map(SamzaSqlQueryParser.QueryInfo::getSink).collect(Collectors.toList())); return relRoots; }