org.apache.calcite.rel.logical.LogicalTableFunctionScan Java Examples
The following examples show how to use
org.apache.calcite.rel.logical.LogicalTableFunctionScan.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: QueryOperationConverter.java From flink with Apache License 2.0 | 5 votes |
private RelNode convertLegacyTableFunction( CalculatedQueryOperation calculatedTable, TableFunctionDefinition functionDefinition, List<RexNode> parameters, FlinkTypeFactory typeFactory) { String[] fieldNames = calculatedTable.getTableSchema().getFieldNames(); TableFunction<?> tableFunction = functionDefinition.getTableFunction(); DataType resultType = fromLegacyInfoToDataType(functionDefinition.getResultType()); TypedFlinkTableFunction function = new TypedFlinkTableFunction( tableFunction, fieldNames, resultType ); final TableSqlFunction sqlFunction = new TableSqlFunction( calculatedTable.getFunctionIdentifier().orElse(null), tableFunction.toString(), tableFunction, resultType, typeFactory, function, scala.Option.empty()); return LogicalTableFunctionScan.create( relBuilder.peek().getCluster(), Collections.emptyList(), relBuilder.getRexBuilder() .makeCall(function.getRowType(typeFactory), sqlFunction, parameters), function.getElementType(null), function.getRowType(typeFactory), null); }
Example #2
Source File: RelFieldTrimmer.java From calcite with Apache License 2.0 | 5 votes |
/** * Variant of {@link #trimFields(RelNode, ImmutableBitSet, Set)} for * {@link org.apache.calcite.rel.logical.LogicalTableFunctionScan}. */ public TrimResult trimFields( LogicalTableFunctionScan tabFun, ImmutableBitSet fieldsUsed, Set<RelDataTypeField> extraFields) { final RelDataType rowType = tabFun.getRowType(); final int fieldCount = rowType.getFieldCount(); final List<RelNode> newInputs = new ArrayList<>(); for (RelNode input : tabFun.getInputs()) { final int inputFieldCount = input.getRowType().getFieldCount(); ImmutableBitSet inputFieldsUsed = ImmutableBitSet.range(inputFieldCount); // Create input with trimmed columns. final Set<RelDataTypeField> inputExtraFields = Collections.emptySet(); TrimResult trimResult = trimChildRestore( tabFun, input, inputFieldsUsed, inputExtraFields); assert trimResult.right.isIdentity(); newInputs.add(trimResult.left); } LogicalTableFunctionScan newTabFun = tabFun; if (!tabFun.getInputs().equals(newInputs)) { newTabFun = tabFun.copy(tabFun.getTraitSet(), newInputs, tabFun.getCall(), tabFun.getElementType(), tabFun.getRowType(), tabFun.getColumnMappings()); } assert newTabFun.getClass() == tabFun.getClass(); // Always project all fields. Mapping mapping = Mappings.createIdentity(fieldCount); return result(newTabFun, mapping); }
Example #3
Source File: EnumerableTableFunctionScanRule.java From calcite with Apache License 2.0 | 5 votes |
@Override public RelNode convert(RelNode rel) { final RelTraitSet traitSet = rel.getTraitSet().replace(EnumerableConvention.INSTANCE); LogicalTableFunctionScan tbl = (LogicalTableFunctionScan) rel; return new EnumerableTableFunctionScan(rel.getCluster(), traitSet, convertList(tbl.getInputs(), traitSet.getTrait(0)), tbl.getElementType(), tbl.getRowType(), tbl.getCall(), tbl.getColumnMappings()); }
Example #4
Source File: FilterTableFunctionTransposeRule.java From calcite with Apache License 2.0 | 5 votes |
/** * Creates a FilterTableFunctionTransposeRule. */ public FilterTableFunctionTransposeRule(RelBuilderFactory relBuilderFactory) { super( operand(LogicalFilter.class, operand(LogicalTableFunctionScan.class, any())), relBuilderFactory, null); }
Example #5
Source File: FilterTableFunctionTransposeRule.java From Bats with Apache License 2.0 | 5 votes |
/** * Creates a FilterTableFunctionTransposeRule. */ public FilterTableFunctionTransposeRule(RelBuilderFactory relBuilderFactory) { super( operand(LogicalFilter.class, operand(LogicalTableFunctionScan.class, any())), relBuilderFactory, null); }
Example #6
Source File: QueryOperationConverter.java From flink with Apache License 2.0 | 5 votes |
@Override public RelNode visit(CalculatedQueryOperation calculatedTable) { FlinkTypeFactory typeFactory = relBuilder.getTypeFactory(); if (calculatedTable.getFunctionDefinition() instanceof TableFunctionDefinition) { TableFunctionDefinition functionDefinition = (TableFunctionDefinition) calculatedTable.getFunctionDefinition(); String[] fieldNames = calculatedTable.getTableSchema().getFieldNames(); int[] fieldIndices = IntStream.range(0, fieldNames.length).toArray(); TableFunction<?> tableFunction = functionDefinition.getTableFunction(); TypeInformation<?> rowType = functionDefinition.getResultType(); FlinkTableFunctionImpl<?> function = new FlinkTableFunctionImpl<>( rowType, fieldIndices, fieldNames ); final TableSqlFunction sqlFunction = new TableSqlFunction( tableFunction.functionIdentifier(), tableFunction.toString(), tableFunction, rowType, typeFactory, function); List<RexNode> parameters = convertToRexNodes(calculatedTable.getArguments()); return LogicalTableFunctionScan.create( relBuilder.peek().getCluster(), Collections.emptyList(), relBuilder.call(sqlFunction, parameters), function.getElementType(null), function.getRowType(typeFactory, null), null); } throw new ValidationException( "The new type inference for functions is only supported in the Blink planner."); }
Example #7
Source File: CopyWithCluster.java From dremio-oss with Apache License 2.0 | 5 votes |
@Override public RelNode visit(TableFunctionScan scan) { if (scan instanceof LogicalTableFunctionScan) { return copyOf((LogicalTableFunctionScan) scan); } notSupported(scan); return super.visit(scan); }
Example #8
Source File: CopyWithCluster.java From dremio-oss with Apache License 2.0 | 5 votes |
private RelNode copyOf(LogicalTableFunctionScan rel) { return new LogicalTableFunctionScan( cluster, copyOf(rel.getTraitSet()), visitAll(rel.getInputs()), copyOf(rel.getCall()), rel.getElementType(), copyOf(rel.getRowType()), rel.getColumnMappings() ); }
Example #9
Source File: QueryOperationConverter.java From flink with Apache License 2.0 | 5 votes |
@Override public <U> RelNode visit(CalculatedQueryOperation<U> calculatedTable) { DataType resultType = fromLegacyInfoToDataType(calculatedTable.getResultType()); TableFunction<?> tableFunction = calculatedTable.getTableFunction(); String[] fieldNames = calculatedTable.getTableSchema().getFieldNames(); TypedFlinkTableFunction function = new TypedFlinkTableFunction( tableFunction, fieldNames, resultType); FlinkTypeFactory typeFactory = relBuilder.getTypeFactory(); TableSqlFunction sqlFunction = new TableSqlFunction( tableFunction.functionIdentifier(), tableFunction.toString(), tableFunction, resultType, typeFactory, function, scala.Option.empty()); List<RexNode> parameters = convertToRexNodes(calculatedTable.getParameters()); return LogicalTableFunctionScan.create( relBuilder.peek().getCluster(), Collections.emptyList(), relBuilder.call(sqlFunction, parameters), function.getElementType(null), function.getRowType(typeFactory, null, null), null); }
Example #10
Source File: QueryOperationConverter.java From flink with Apache License 2.0 | 5 votes |
@Override public <U> RelNode visit(CalculatedQueryOperation<U> calculatedTable) { String[] fieldNames = calculatedTable.getTableSchema().getFieldNames(); int[] fieldIndices = IntStream.range(0, fieldNames.length).toArray(); TypeInformation<U> resultType = calculatedTable.getResultType(); FlinkTableFunctionImpl function = new FlinkTableFunctionImpl<>( resultType, fieldIndices, fieldNames); TableFunction<?> tableFunction = calculatedTable.getTableFunction(); FlinkTypeFactory typeFactory = relBuilder.getTypeFactory(); TableSqlFunction sqlFunction = new TableSqlFunction( tableFunction.functionIdentifier(), tableFunction.toString(), tableFunction, resultType, typeFactory, function); List<RexNode> parameters = convertToRexNodes(calculatedTable.getParameters()); return LogicalTableFunctionScan.create( relBuilder.peek().getCluster(), Collections.emptyList(), relBuilder.call(sqlFunction, parameters), function.getElementType(null), function.getRowType(typeFactory, null), null); }
Example #11
Source File: RelFieldTrimmer.java From Bats with Apache License 2.0 | 5 votes |
/** * Variant of {@link #trimFields(RelNode, ImmutableBitSet, Set)} for * {@link org.apache.calcite.rel.logical.LogicalTableFunctionScan}. */ public TrimResult trimFields(LogicalTableFunctionScan tabFun, ImmutableBitSet fieldsUsed, Set<RelDataTypeField> extraFields) { final RelDataType rowType = tabFun.getRowType(); final int fieldCount = rowType.getFieldCount(); final List<RelNode> newInputs = new ArrayList<>(); for (RelNode input : tabFun.getInputs()) { final int inputFieldCount = input.getRowType().getFieldCount(); ImmutableBitSet inputFieldsUsed = ImmutableBitSet.range(inputFieldCount); // Create input with trimmed columns. final Set<RelDataTypeField> inputExtraFields = Collections.emptySet(); TrimResult trimResult = trimChildRestore(tabFun, input, inputFieldsUsed, inputExtraFields); assert trimResult.right.isIdentity(); newInputs.add(trimResult.left); } LogicalTableFunctionScan newTabFun = tabFun; if (!tabFun.getInputs().equals(newInputs)) { newTabFun = tabFun.copy(tabFun.getTraitSet(), newInputs, tabFun.getCall(), tabFun.getElementType(), tabFun.getRowType(), tabFun.getColumnMappings()); } assert newTabFun.getClass() == tabFun.getClass(); // Always project all fields. Mapping mapping = Mappings.createIdentity(fieldCount); return result(newTabFun, mapping); }
Example #12
Source File: FilterTableFunctionTransposeRule.java From calcite with Apache License 2.0 | 4 votes |
public void onMatch(RelOptRuleCall call) { LogicalFilter filter = call.rel(0); LogicalTableFunctionScan funcRel = call.rel(1); Set<RelColumnMapping> columnMappings = funcRel.getColumnMappings(); if (columnMappings == null || columnMappings.isEmpty()) { // No column mapping information, so no push-down // possible. return; } List<RelNode> funcInputs = funcRel.getInputs(); if (funcInputs.size() != 1) { // TODO: support more than one relational input; requires // offsetting field indices, similar to join return; } // TODO: support mappings other than 1-to-1 if (funcRel.getRowType().getFieldCount() != funcInputs.get(0).getRowType().getFieldCount()) { return; } for (RelColumnMapping mapping : columnMappings) { if (mapping.iInputColumn != mapping.iOutputColumn) { return; } if (mapping.derived) { return; } } final List<RelNode> newFuncInputs = new ArrayList<>(); final RelOptCluster cluster = funcRel.getCluster(); final RexNode condition = filter.getCondition(); // create filters on top of each func input, modifying the filter // condition to reference the child instead RexBuilder rexBuilder = filter.getCluster().getRexBuilder(); List<RelDataTypeField> origFields = funcRel.getRowType().getFieldList(); // TODO: these need to be non-zero once we // support arbitrary mappings int[] adjustments = new int[origFields.size()]; for (RelNode funcInput : funcInputs) { RexNode newCondition = condition.accept( new RelOptUtil.RexInputConverter( rexBuilder, origFields, funcInput.getRowType().getFieldList(), adjustments)); newFuncInputs.add( LogicalFilter.create(funcInput, newCondition)); } // create a new UDX whose children are the filters created above LogicalTableFunctionScan newFuncRel = LogicalTableFunctionScan.create(cluster, newFuncInputs, funcRel.getCall(), funcRel.getElementType(), funcRel.getRowType(), columnMappings); call.transformTo(newFuncRel); }
Example #13
Source File: RelFactories.java From calcite with Apache License 2.0 | 4 votes |
@Override public RelNode createTableFunctionScan(RelOptCluster cluster, List<RelNode> inputs, RexNode rexCall, Type elementType, Set<RelColumnMapping> columnMappings) { return LogicalTableFunctionScan.create(cluster, inputs, rexCall, elementType, rexCall.getType(), columnMappings); }
Example #14
Source File: EnumerableTableFunctionScanRule.java From calcite with Apache License 2.0 | 4 votes |
/** * Creates an EnumerableTableFunctionScanRule. * * @param relBuilderFactory Builder for relational expressions */ public EnumerableTableFunctionScanRule(RelBuilderFactory relBuilderFactory) { super(LogicalTableFunctionScan.class, (Predicate<RelNode>) r -> true, Convention.NONE, EnumerableConvention.INSTANCE, relBuilderFactory, "EnumerableTableFunctionScanRule"); }
Example #15
Source File: RelStructuredTypeFlattener.java From Bats with Apache License 2.0 | 4 votes |
public void rewriteRel(LogicalTableFunctionScan rel) { rewriteGeneric(rel); }
Example #16
Source File: RelDecorrelator.java From calcite with Apache License 2.0 | 4 votes |
public Frame decorrelateRel(LogicalTableFunctionScan rel) { if (RexUtil.containsCorrelation(rel.getCall())) { return null; } return decorrelateRel((RelNode) rel); }
Example #17
Source File: RelStructuredTypeFlattener.java From calcite with Apache License 2.0 | 4 votes |
public void rewriteRel(LogicalTableFunctionScan rel) { rewriteGeneric(rel); }
Example #18
Source File: FilterTableFunctionTransposeRule.java From Bats with Apache License 2.0 | 4 votes |
public void onMatch(RelOptRuleCall call) { LogicalFilter filter = call.rel(0); LogicalTableFunctionScan funcRel = call.rel(1); Set<RelColumnMapping> columnMappings = funcRel.getColumnMappings(); if (columnMappings == null || columnMappings.isEmpty()) { // No column mapping information, so no push-down // possible. return; } List<RelNode> funcInputs = funcRel.getInputs(); if (funcInputs.size() != 1) { // TODO: support more than one relational input; requires // offsetting field indices, similar to join return; } // TODO: support mappings other than 1-to-1 if (funcRel.getRowType().getFieldCount() != funcInputs.get(0).getRowType().getFieldCount()) { return; } for (RelColumnMapping mapping : columnMappings) { if (mapping.iInputColumn != mapping.iOutputColumn) { return; } if (mapping.derived) { return; } } final List<RelNode> newFuncInputs = new ArrayList<>(); final RelOptCluster cluster = funcRel.getCluster(); final RexNode condition = filter.getCondition(); // create filters on top of each func input, modifying the filter // condition to reference the child instead RexBuilder rexBuilder = filter.getCluster().getRexBuilder(); List<RelDataTypeField> origFields = funcRel.getRowType().getFieldList(); // TODO: these need to be non-zero once we // support arbitrary mappings int[] adjustments = new int[origFields.size()]; for (RelNode funcInput : funcInputs) { RexNode newCondition = condition.accept( new RelOptUtil.RexInputConverter( rexBuilder, origFields, funcInput.getRowType().getFieldList(), adjustments)); newFuncInputs.add( LogicalFilter.create(funcInput, newCondition)); } // create a new UDX whose children are the filters created above LogicalTableFunctionScan newFuncRel = LogicalTableFunctionScan.create(cluster, newFuncInputs, funcRel.getCall(), funcRel.getElementType(), funcRel.getRowType(), columnMappings); call.transformTo(newFuncRel); }