Java Code Examples for org.apache.calcite.rel.logical.LogicalTableFunctionScan#create()
The following examples show how to use
org.apache.calcite.rel.logical.LogicalTableFunctionScan#create() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: QueryOperationConverter.java From flink with Apache License 2.0 | 5 votes |
@Override public <U> RelNode visit(CalculatedQueryOperation<U> calculatedTable) { String[] fieldNames = calculatedTable.getTableSchema().getFieldNames(); int[] fieldIndices = IntStream.range(0, fieldNames.length).toArray(); TypeInformation<U> resultType = calculatedTable.getResultType(); FlinkTableFunctionImpl function = new FlinkTableFunctionImpl<>( resultType, fieldIndices, fieldNames); TableFunction<?> tableFunction = calculatedTable.getTableFunction(); FlinkTypeFactory typeFactory = relBuilder.getTypeFactory(); TableSqlFunction sqlFunction = new TableSqlFunction( tableFunction.functionIdentifier(), tableFunction.toString(), tableFunction, resultType, typeFactory, function); List<RexNode> parameters = convertToRexNodes(calculatedTable.getParameters()); return LogicalTableFunctionScan.create( relBuilder.peek().getCluster(), Collections.emptyList(), relBuilder.call(sqlFunction, parameters), function.getElementType(null), function.getRowType(typeFactory, null), null); }
Example 2
Source File: QueryOperationConverter.java From flink with Apache License 2.0 | 5 votes |
@Override public <U> RelNode visit(CalculatedQueryOperation<U> calculatedTable) { DataType resultType = fromLegacyInfoToDataType(calculatedTable.getResultType()); TableFunction<?> tableFunction = calculatedTable.getTableFunction(); String[] fieldNames = calculatedTable.getTableSchema().getFieldNames(); TypedFlinkTableFunction function = new TypedFlinkTableFunction( tableFunction, fieldNames, resultType); FlinkTypeFactory typeFactory = relBuilder.getTypeFactory(); TableSqlFunction sqlFunction = new TableSqlFunction( tableFunction.functionIdentifier(), tableFunction.toString(), tableFunction, resultType, typeFactory, function, scala.Option.empty()); List<RexNode> parameters = convertToRexNodes(calculatedTable.getParameters()); return LogicalTableFunctionScan.create( relBuilder.peek().getCluster(), Collections.emptyList(), relBuilder.call(sqlFunction, parameters), function.getElementType(null), function.getRowType(typeFactory, null, null), null); }
Example 3
Source File: QueryOperationConverter.java From flink with Apache License 2.0 | 5 votes |
@Override public RelNode visit(CalculatedQueryOperation calculatedTable) { FlinkTypeFactory typeFactory = relBuilder.getTypeFactory(); if (calculatedTable.getFunctionDefinition() instanceof TableFunctionDefinition) { TableFunctionDefinition functionDefinition = (TableFunctionDefinition) calculatedTable.getFunctionDefinition(); String[] fieldNames = calculatedTable.getTableSchema().getFieldNames(); int[] fieldIndices = IntStream.range(0, fieldNames.length).toArray(); TableFunction<?> tableFunction = functionDefinition.getTableFunction(); TypeInformation<?> rowType = functionDefinition.getResultType(); FlinkTableFunctionImpl<?> function = new FlinkTableFunctionImpl<>( rowType, fieldIndices, fieldNames ); final TableSqlFunction sqlFunction = new TableSqlFunction( tableFunction.functionIdentifier(), tableFunction.toString(), tableFunction, rowType, typeFactory, function); List<RexNode> parameters = convertToRexNodes(calculatedTable.getArguments()); return LogicalTableFunctionScan.create( relBuilder.peek().getCluster(), Collections.emptyList(), relBuilder.call(sqlFunction, parameters), function.getElementType(null), function.getRowType(typeFactory, null), null); } throw new ValidationException( "The new type inference for functions is only supported in the Blink planner."); }
Example 4
Source File: QueryOperationConverter.java From flink with Apache License 2.0 | 5 votes |
private RelNode convertLegacyTableFunction( CalculatedQueryOperation calculatedTable, TableFunctionDefinition functionDefinition, List<RexNode> parameters, FlinkTypeFactory typeFactory) { String[] fieldNames = calculatedTable.getTableSchema().getFieldNames(); TableFunction<?> tableFunction = functionDefinition.getTableFunction(); DataType resultType = fromLegacyInfoToDataType(functionDefinition.getResultType()); TypedFlinkTableFunction function = new TypedFlinkTableFunction( tableFunction, fieldNames, resultType ); final TableSqlFunction sqlFunction = new TableSqlFunction( calculatedTable.getFunctionIdentifier().orElse(null), tableFunction.toString(), tableFunction, resultType, typeFactory, function, scala.Option.empty()); return LogicalTableFunctionScan.create( relBuilder.peek().getCluster(), Collections.emptyList(), relBuilder.getRexBuilder() .makeCall(function.getRowType(typeFactory), sqlFunction, parameters), function.getElementType(null), function.getRowType(typeFactory), null); }
Example 5
Source File: FilterTableFunctionTransposeRule.java From Bats with Apache License 2.0 | 4 votes |
public void onMatch(RelOptRuleCall call) { LogicalFilter filter = call.rel(0); LogicalTableFunctionScan funcRel = call.rel(1); Set<RelColumnMapping> columnMappings = funcRel.getColumnMappings(); if (columnMappings == null || columnMappings.isEmpty()) { // No column mapping information, so no push-down // possible. return; } List<RelNode> funcInputs = funcRel.getInputs(); if (funcInputs.size() != 1) { // TODO: support more than one relational input; requires // offsetting field indices, similar to join return; } // TODO: support mappings other than 1-to-1 if (funcRel.getRowType().getFieldCount() != funcInputs.get(0).getRowType().getFieldCount()) { return; } for (RelColumnMapping mapping : columnMappings) { if (mapping.iInputColumn != mapping.iOutputColumn) { return; } if (mapping.derived) { return; } } final List<RelNode> newFuncInputs = new ArrayList<>(); final RelOptCluster cluster = funcRel.getCluster(); final RexNode condition = filter.getCondition(); // create filters on top of each func input, modifying the filter // condition to reference the child instead RexBuilder rexBuilder = filter.getCluster().getRexBuilder(); List<RelDataTypeField> origFields = funcRel.getRowType().getFieldList(); // TODO: these need to be non-zero once we // support arbitrary mappings int[] adjustments = new int[origFields.size()]; for (RelNode funcInput : funcInputs) { RexNode newCondition = condition.accept( new RelOptUtil.RexInputConverter( rexBuilder, origFields, funcInput.getRowType().getFieldList(), adjustments)); newFuncInputs.add( LogicalFilter.create(funcInput, newCondition)); } // create a new UDX whose children are the filters created above LogicalTableFunctionScan newFuncRel = LogicalTableFunctionScan.create(cluster, newFuncInputs, funcRel.getCall(), funcRel.getElementType(), funcRel.getRowType(), columnMappings); call.transformTo(newFuncRel); }
Example 6
Source File: FilterTableFunctionTransposeRule.java From calcite with Apache License 2.0 | 4 votes |
public void onMatch(RelOptRuleCall call) { LogicalFilter filter = call.rel(0); LogicalTableFunctionScan funcRel = call.rel(1); Set<RelColumnMapping> columnMappings = funcRel.getColumnMappings(); if (columnMappings == null || columnMappings.isEmpty()) { // No column mapping information, so no push-down // possible. return; } List<RelNode> funcInputs = funcRel.getInputs(); if (funcInputs.size() != 1) { // TODO: support more than one relational input; requires // offsetting field indices, similar to join return; } // TODO: support mappings other than 1-to-1 if (funcRel.getRowType().getFieldCount() != funcInputs.get(0).getRowType().getFieldCount()) { return; } for (RelColumnMapping mapping : columnMappings) { if (mapping.iInputColumn != mapping.iOutputColumn) { return; } if (mapping.derived) { return; } } final List<RelNode> newFuncInputs = new ArrayList<>(); final RelOptCluster cluster = funcRel.getCluster(); final RexNode condition = filter.getCondition(); // create filters on top of each func input, modifying the filter // condition to reference the child instead RexBuilder rexBuilder = filter.getCluster().getRexBuilder(); List<RelDataTypeField> origFields = funcRel.getRowType().getFieldList(); // TODO: these need to be non-zero once we // support arbitrary mappings int[] adjustments = new int[origFields.size()]; for (RelNode funcInput : funcInputs) { RexNode newCondition = condition.accept( new RelOptUtil.RexInputConverter( rexBuilder, origFields, funcInput.getRowType().getFieldList(), adjustments)); newFuncInputs.add( LogicalFilter.create(funcInput, newCondition)); } // create a new UDX whose children are the filters created above LogicalTableFunctionScan newFuncRel = LogicalTableFunctionScan.create(cluster, newFuncInputs, funcRel.getCall(), funcRel.getElementType(), funcRel.getRowType(), columnMappings); call.transformTo(newFuncRel); }
Example 7
Source File: RelFactories.java From calcite with Apache License 2.0 | 4 votes |
@Override public RelNode createTableFunctionScan(RelOptCluster cluster, List<RelNode> inputs, RexNode rexCall, Type elementType, Set<RelColumnMapping> columnMappings) { return LogicalTableFunctionScan.create(cluster, inputs, rexCall, elementType, rexCall.getType(), columnMappings); }