Java Code Examples for org.apache.flink.table.api.TableSchema#Builder
The following examples show how to use
org.apache.flink.table.api.TableSchema#Builder .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TableEnvironmentImpl.java From flink with Apache License 2.0 | 6 votes |
@Override public TableResult executeInternal(List<ModifyOperation> operations) { List<Transformation<?>> transformations = translate(operations); List<String> sinkIdentifierNames = extractSinkIdentifierNames(operations); String jobName = "insert-into_" + String.join(",", sinkIdentifierNames); Pipeline pipeline = execEnv.createPipeline(transformations, tableConfig, jobName); try { JobClient jobClient = execEnv.executeAsync(pipeline); TableSchema.Builder builder = TableSchema.builder(); Object[] affectedRowCounts = new Long[operations.size()]; for (int i = 0; i < operations.size(); ++i) { // use sink identifier name as field name builder.field(sinkIdentifierNames.get(i), DataTypes.BIGINT()); affectedRowCounts[i] = -1L; } return TableResultImpl.builder() .jobClient(jobClient) .resultKind(ResultKind.SUCCESS_WITH_CONTENT) .tableSchema(builder.build()) .data(Collections.singletonList(Row.of(affectedRowCounts))) .build(); } catch (Exception e) { throw new TableException("Failed to execute sql", e); } }
Example 2
Source File: SelectTableSinkSchemaConverter.java From flink with Apache License 2.0 | 6 votes |
/** * Convert time attributes (proc time / event time) to normal timestamps, * and return a new {@link TableSchema}. */ static TableSchema convertTimeAttributeToRegularTimestamp(TableSchema tableSchema) { DataType[] oldTypes = tableSchema.getFieldDataTypes(); String[] oldNames = tableSchema.getFieldNames(); TableSchema.Builder builder = TableSchema.builder(); for (int i = 0; i < tableSchema.getFieldCount(); i++) { DataType fieldType = oldTypes[i]; String fieldName = oldNames[i]; if (fieldType.getLogicalType() instanceof TimestampType) { TimestampType timestampType = (TimestampType) fieldType.getLogicalType(); if (!timestampType.getKind().equals(TimestampKind.REGULAR)) { // converts `TIME ATTRIBUTE(ROWTIME)`/`TIME ATTRIBUTE(PROCTIME)` to `TIMESTAMP` builder.field(fieldName, Types.SQL_TIMESTAMP); continue; } } builder.field(fieldName, fieldType); } return builder.build(); }
Example 3
Source File: TableSchemaUtils.java From flink with Apache License 2.0 | 6 votes |
/** * Creates a new schema but drop the constraint with given name. */ public static TableSchema dropConstraint(TableSchema oriSchema, String constraintName) { // Validate the constraint name is valid. Optional<UniqueConstraint> uniqueConstraintOpt = oriSchema.getPrimaryKey(); if (!uniqueConstraintOpt.isPresent() || !uniqueConstraintOpt.get().getName().equals(constraintName)) { throw new ValidationException( String.format("Constraint %s to drop does not exist", constraintName)); } TableSchema.Builder builder = builderWithGivenColumns(oriSchema.getTableColumns()); // Copy watermark specification. for (WatermarkSpec wms : oriSchema.getWatermarkSpecs()) { builder.watermark( wms.getRowtimeAttribute(), wms.getWatermarkExpr(), wms.getWatermarkExprOutputType()); } return builder.build(); }
Example 4
Source File: HiveCatalogHiveMetadataTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testCreateTableWithConstraints() throws Exception { Assume.assumeTrue(HiveVersionTestUtil.HIVE_310_OR_LATER); HiveCatalog hiveCatalog = (HiveCatalog) catalog; hiveCatalog.createDatabase(db1, createDb(), false); TableSchema.Builder builder = TableSchema.builder(); builder.fields( new String[]{"x", "y", "z"}, new DataType[]{DataTypes.INT().notNull(), DataTypes.TIMESTAMP(9).notNull(), DataTypes.BIGINT()}); builder.primaryKey("pk_name", new String[]{"x"}); hiveCatalog.createTable(path1, new CatalogTableImpl(builder.build(), getBatchTableProperties(), null), false); CatalogTable catalogTable = (CatalogTable) hiveCatalog.getTable(path1); assertTrue("PK not present", catalogTable.getSchema().getPrimaryKey().isPresent()); UniqueConstraint pk = catalogTable.getSchema().getPrimaryKey().get(); assertEquals("pk_name", pk.getName()); assertEquals(Collections.singletonList("x"), pk.getColumns()); assertFalse(catalogTable.getSchema().getFieldDataTypes()[0].getLogicalType().isNullable()); assertFalse(catalogTable.getSchema().getFieldDataTypes()[1].getLogicalType().isNullable()); assertTrue(catalogTable.getSchema().getFieldDataTypes()[2].getLogicalType().isNullable()); hiveCatalog.dropDatabase(db1, false, true); }
Example 5
Source File: HiveTableUtil.java From flink with Apache License 2.0 | 6 votes |
/** * Create a Flink's TableSchema from Hive table's columns and partition keys. */ public static TableSchema createTableSchema(List<FieldSchema> cols, List<FieldSchema> partitionKeys, Set<String> notNullColumns, UniqueConstraint primaryKey) { List<FieldSchema> allCols = new ArrayList<>(cols); allCols.addAll(partitionKeys); String[] colNames = new String[allCols.size()]; DataType[] colTypes = new DataType[allCols.size()]; for (int i = 0; i < allCols.size(); i++) { FieldSchema fs = allCols.get(i); colNames[i] = fs.getName(); colTypes[i] = HiveTypeUtil.toFlinkType(TypeInfoUtils.getTypeInfoFromTypeString(fs.getType())); if (notNullColumns.contains(colNames[i])) { colTypes[i] = colTypes[i].notNull(); } } TableSchema.Builder builder = TableSchema.builder().fields(colNames, colTypes); if (primaryKey != null) { builder.primaryKey(primaryKey.getName(), primaryKey.getColumns().toArray(new String[0])); } return builder.build(); }
Example 6
Source File: SelectTableSinkSchemaConverter.java From flink with Apache License 2.0 | 6 votes |
/** * Convert time attributes (proc time / event time) to regular timestamp * and build a new {@link TableSchema}. */ public static TableSchema convertTimeAttributeToRegularTimestamp(TableSchema tableSchema) { DataType[] dataTypes = tableSchema.getFieldDataTypes(); String[] oldNames = tableSchema.getFieldNames(); TableSchema.Builder builder = TableSchema.builder(); for (int i = 0; i < tableSchema.getFieldCount(); i++) { DataType fieldType = dataTypes[i]; String fieldName = oldNames[i]; if (fieldType.getLogicalType() instanceof TimestampType) { TimestampType timestampType = (TimestampType) fieldType.getLogicalType(); if (!timestampType.getKind().equals(TimestampKind.REGULAR)) { // converts `TIME ATTRIBUTE(ROWTIME)`/`TIME ATTRIBUTE(PROCTIME)` to `TIMESTAMP(3)` builder.field(fieldName, DataTypes.TIMESTAMP(3)); continue; } } builder.field(fieldName, fieldType); } return builder.build(); }
Example 7
Source File: SelectTableSinkSchemaConverter.java From flink with Apache License 2.0 | 5 votes |
/** * Change to default conversion class and build a new {@link TableSchema}. */ public static TableSchema changeDefaultConversionClass(TableSchema tableSchema) { DataType[] oldTypes = tableSchema.getFieldDataTypes(); String[] fieldNames = tableSchema.getFieldNames(); TableSchema.Builder builder = TableSchema.builder(); for (int i = 0; i < tableSchema.getFieldCount(); i++) { DataType fieldType = LogicalTypeDataTypeConverter.fromLogicalTypeToDataType( LogicalTypeDataTypeConverter.fromDataTypeToLogicalType(oldTypes[i])); builder.field(fieldNames[i], fieldType); } return builder.build(); }
Example 8
Source File: LocalExecutor.java From flink with Apache License 2.0 | 5 votes |
private static TableSchema removeTimeAttributes(TableSchema schema) { final TableSchema.Builder builder = TableSchema.builder(); for (int i = 0; i < schema.getFieldCount(); i++) { final DataType dataType = schema.getFieldDataTypes()[i]; final DataType convertedType = DataTypeUtils.replaceLogicalType( dataType, LogicalTypeUtils.removeTimeAttributes(dataType.getLogicalType())); builder.field(schema.getFieldNames()[i], convertedType); } return builder.build(); }
Example 9
Source File: JdbcTypeUtil.java From flink with Apache License 2.0 | 5 votes |
/** * The original table schema may contain generated columns which shouldn't be produced/consumed * by TableSource/TableSink. And the original TIMESTAMP/DATE/TIME types uses LocalDateTime/LocalDate/LocalTime * as the conversion classes, however, JDBC connector uses Timestamp/Date/Time classes. So that * we bridge them to the expected conversion classes. */ public static TableSchema normalizeTableSchema(TableSchema schema) { TableSchema.Builder physicalSchemaBuilder = TableSchema.builder(); schema.getTableColumns() .forEach(c -> { if (!c.isGenerated()) { final DataType type = DataTypeUtils.transform(c.getType(), TypeTransformations.timeToSqlTypes()); physicalSchemaBuilder.field(c.getName(), type); } }); return physicalSchemaBuilder.build(); }
Example 10
Source File: KuduTableUtils.java From bahir-flink with Apache License 2.0 | 5 votes |
public static TableSchema getSchemaWithSqlTimestamp(TableSchema schema) { TableSchema.Builder builder = new TableSchema.Builder(); TableSchemaUtils.getPhysicalSchema(schema).getTableColumns().forEach( tableColumn -> { if (tableColumn.getType().getLogicalType() instanceof TimestampType) { builder.field(tableColumn.getName(), tableColumn.getType().bridgedTo(Timestamp.class)); } else { builder.field(tableColumn.getName(), tableColumn.getType()); } }); return builder.build(); }
Example 11
Source File: KuduTableUtils.java From bahir-flink with Apache License 2.0 | 5 votes |
public static TableSchema kuduToFlinkSchema(Schema schema) { TableSchema.Builder builder = TableSchema.builder(); for (ColumnSchema column : schema.getColumns()) { DataType flinkType = KuduTypeUtils.toFlinkType(column.getType(), column.getTypeAttributes()).nullable(); builder.field(column.getName(), flinkType); } return builder.build(); }
Example 12
Source File: LocalExecutor.java From flink with Apache License 2.0 | 5 votes |
private static TableSchema removeTimeAttributes(TableSchema schema) { final TableSchema.Builder builder = TableSchema.builder(); for (int i = 0; i < schema.getFieldCount(); i++) { final DataType dataType = schema.getFieldDataTypes()[i]; final DataType convertedType = DataTypeUtils.replaceLogicalType( dataType, LogicalTypeUtils.removeTimeAttributes(dataType.getLogicalType())); builder.field(schema.getFieldNames()[i], convertedType); } return builder.build(); }
Example 13
Source File: DescriptorProperties.java From flink with Apache License 2.0 | 5 votes |
/** * Returns a table schema under the given key if it exists. */ public Optional<TableSchema> getOptionalTableSchema(String key) { // filter for number of fields final int fieldCount = properties.keySet().stream() .filter((k) -> k.startsWith(key) && k.endsWith('.' + TABLE_SCHEMA_NAME)) .mapToInt((k) -> 1) .sum(); if (fieldCount == 0) { return Optional.empty(); } // validate fields and build schema final TableSchema.Builder schemaBuilder = TableSchema.builder(); for (int i = 0; i < fieldCount; i++) { final String nameKey = key + '.' + i + '.' + TABLE_SCHEMA_NAME; final String typeKey = key + '.' + i + '.' + TABLE_SCHEMA_TYPE; final String name = optionalGet(nameKey).orElseThrow(exceptionSupplier(nameKey)); final TypeInformation<?> type = optionalGet(typeKey) .map(TypeStringUtils::readTypeInfo) .orElseThrow(exceptionSupplier(typeKey)); schemaBuilder.field(name, type); } return Optional.of(schemaBuilder.build()); }
Example 14
Source File: OperationConverterUtils.java From flink with Apache License 2.0 | 5 votes |
private static void setWatermarkAndPK(TableSchema.Builder builder, TableSchema schema) { for (WatermarkSpec watermarkSpec : schema.getWatermarkSpecs()) { builder.watermark(watermarkSpec); } schema.getPrimaryKey().ifPresent(pk -> { builder.primaryKey(pk.getName(), pk.getColumns().toArray(new String[0])); }); }
Example 15
Source File: TableSchemaUtils.java From flink with Apache License 2.0 | 5 votes |
/** Returns the builder with copied columns info from the given table schema. */ private static TableSchema.Builder builderWithGivenColumns(List<TableColumn> oriColumns) { TableSchema.Builder builder = TableSchema.builder(); for (TableColumn column : oriColumns) { if (column.isGenerated()) { builder.field(column.getName(), column.getType(), column.getExpr().get()); } else { builder.field(column.getName(), column.getType()); } } return builder; }
Example 16
Source File: DescriptorProperties.java From Flink-CEPplus with Apache License 2.0 | 5 votes |
/** * Returns a table schema under the given key if it exists. */ public Optional<TableSchema> getOptionalTableSchema(String key) { // filter for number of fields final int fieldCount = properties.keySet().stream() .filter((k) -> k.startsWith(key) && k.endsWith('.' + TABLE_SCHEMA_NAME)) .mapToInt((k) -> 1) .sum(); if (fieldCount == 0) { return Optional.empty(); } // validate fields and build schema final TableSchema.Builder schemaBuilder = TableSchema.builder(); for (int i = 0; i < fieldCount; i++) { final String nameKey = key + '.' + i + '.' + TABLE_SCHEMA_NAME; final String typeKey = key + '.' + i + '.' + TABLE_SCHEMA_TYPE; final String name = optionalGet(nameKey).orElseThrow(exceptionSupplier(nameKey)); final TypeInformation<?> type = optionalGet(typeKey) .map(TypeStringUtils::readTypeInfo) .orElseThrow(exceptionSupplier(typeKey)); schemaBuilder.field(name, type); } return Optional.of(schemaBuilder.build()); }
Example 17
Source File: OperationConverterUtils.java From flink with Apache License 2.0 | 4 votes |
public static Operation convertChangeColumn( ObjectIdentifier tableIdentifier, SqlChangeColumn changeColumn, CatalogTable catalogTable, SqlValidator sqlValidator) { String oldName = changeColumn.getOldName().getSimple(); if (catalogTable.getPartitionKeys().indexOf(oldName) >= 0) { // disallow changing partition columns throw new ValidationException("CHANGE COLUMN cannot be applied to partition columns"); } TableSchema oldSchema = catalogTable.getSchema(); int oldIndex = Arrays.asList(oldSchema.getFieldNames()).indexOf(oldName); if (oldIndex < 0) { throw new ValidationException(String.format("Old column %s not found for CHANGE COLUMN", oldName)); } boolean first = changeColumn.isFirst(); String after = changeColumn.getAfter() == null ? null : changeColumn.getAfter().getSimple(); List<TableColumn> tableColumns = oldSchema.getTableColumns(); TableColumn newTableColumn = toTableColumn(changeColumn.getNewColumn(), sqlValidator); if ((!first && after == null) || oldName.equals(after)) { tableColumns.set(oldIndex, newTableColumn); } else { // need to change column position tableColumns.remove(oldIndex); if (first) { tableColumns.add(0, newTableColumn); } else { int newIndex = tableColumns .stream() .map(TableColumn::getName) .collect(Collectors.toList()) .indexOf(after); if (newIndex < 0) { throw new ValidationException(String.format("After column %s not found for CHANGE COLUMN", after)); } tableColumns.add(newIndex + 1, newTableColumn); } } TableSchema.Builder builder = TableSchema.builder(); for (TableColumn column : tableColumns) { builder.add(column); } setWatermarkAndPK(builder, oldSchema); TableSchema newSchema = builder.build(); Map<String, String> newProperties = new HashMap<>(catalogTable.getOptions()); newProperties.putAll(extractProperties(changeColumn.getProperties())); return new AlterTableSchemaOperation( tableIdentifier, new CatalogTableImpl( newSchema, catalogTable.getPartitionKeys(), newProperties, catalogTable.getComment())); // TODO: handle watermark and constraints }
Example 18
Source File: CatalogTableSchemaResolver.java From flink with Apache License 2.0 | 4 votes |
/** * Resolve the computed column's type for the given schema. * * @param tableSchema Table schema to derive table field names and data types * @return the resolved TableSchema */ public TableSchema resolve(TableSchema tableSchema) { final String rowtime; if (!tableSchema.getWatermarkSpecs().isEmpty()) { // TODO: [FLINK-14473] we only support top-level rowtime attribute right now rowtime = tableSchema.getWatermarkSpecs().get(0).getRowtimeAttribute(); if (rowtime.contains(".")) { throw new ValidationException( String.format("Nested field '%s' as rowtime attribute is not supported right now.", rowtime)); } } else { rowtime = null; } String[] fieldNames = tableSchema.getFieldNames(); DataType[] fieldTypes = tableSchema.getFieldDataTypes(); TableSchema.Builder builder = TableSchema.builder(); for (int i = 0; i < tableSchema.getFieldCount(); ++i) { TableColumn tableColumn = tableSchema.getTableColumns().get(i); DataType fieldType = fieldTypes[i]; if (tableColumn.isGenerated()) { fieldType = resolveExpressionDataType(tableColumn.getExpr().get(), tableSchema); if (isProctime(fieldType)) { if (fieldNames[i].equals(rowtime)) { throw new TableException("Watermark can not be defined for a processing time attribute column."); } } } if (isStreamingMode && fieldNames[i].equals(rowtime)) { TimestampType originalType = (TimestampType) fieldType.getLogicalType(); LogicalType rowtimeType = new TimestampType( originalType.isNullable(), TimestampKind.ROWTIME, originalType.getPrecision()); fieldType = TypeConversions.fromLogicalToDataType(rowtimeType); } if (tableColumn.isGenerated()) { builder.field(fieldNames[i], fieldType, tableColumn.getExpr().get()); } else { builder.field(fieldNames[i], fieldType); } } tableSchema.getWatermarkSpecs().forEach(builder::watermark); tableSchema.getPrimaryKey().ifPresent( pk -> builder.primaryKey(pk.getName(), pk.getColumns().toArray(new String[0]))); return builder.build(); }
Example 19
Source File: HiveTableSinkITCase.java From flink with Apache License 2.0 | 4 votes |
@Test public void testWriteComplexType() throws Exception { String dbName = "default"; String tblName = "dest"; ObjectPath tablePath = new ObjectPath(dbName, tblName); TableSchema.Builder builder = new TableSchema.Builder(); builder.fields(new String[]{"a", "m", "s"}, new DataType[]{ DataTypes.ARRAY(DataTypes.INT()), DataTypes.MAP(DataTypes.INT(), DataTypes.STRING()), DataTypes.ROW(DataTypes.FIELD("f1", DataTypes.INT()), DataTypes.FIELD("f2", DataTypes.STRING()))}); RowTypeInfo rowTypeInfo = createHiveDestTable(dbName, tblName, builder.build(), 0); List<Row> toWrite = new ArrayList<>(); Row row = new Row(rowTypeInfo.getArity()); Object[] array = new Object[]{1, 2, 3}; Map<Integer, String> map = new HashMap<Integer, String>() {{ put(1, "a"); put(2, "b"); }}; Row struct = new Row(2); struct.setField(0, 3); struct.setField(1, "c"); row.setField(0, array); row.setField(1, map); row.setField(2, struct); toWrite.add(row); TableEnvironment tableEnv = HiveTestUtils.createTableEnvWithBlinkPlannerBatchMode(); Table src = tableEnv.fromTableSource(new CollectionTableSource(toWrite, rowTypeInfo)); tableEnv.registerTable("complexSrc", src); tableEnv.registerCatalog("hive", hiveCatalog); TableEnvUtil.execInsertTableAndWaitResult(tableEnv.sqlQuery("select * from complexSrc"), "hive.`default`.dest"); List<String> result = hiveShell.executeQuery("select * from " + tblName); assertEquals(1, result.size()); assertEquals("[1,2,3]\t{1:\"a\",2:\"b\"}\t{\"f1\":3,\"f2\":\"c\"}", result.get(0)); hiveCatalog.dropTable(tablePath, false); }
Example 20
Source File: HiveTableSinkTest.java From flink with Apache License 2.0 | 4 votes |
@Test public void testWriteComplexType() throws Exception { String dbName = "default"; String tblName = "dest"; ObjectPath tablePath = new ObjectPath(dbName, tblName); TableSchema.Builder builder = new TableSchema.Builder(); builder.fields(new String[]{"a", "m", "s"}, new DataType[]{ DataTypes.ARRAY(DataTypes.INT()), DataTypes.MAP(DataTypes.INT(), DataTypes.STRING()), DataTypes.ROW(DataTypes.FIELD("f1", DataTypes.INT()), DataTypes.FIELD("f2", DataTypes.STRING()))}); RowTypeInfo rowTypeInfo = createDestTable(dbName, tblName, builder.build(), 0); List<Row> toWrite = new ArrayList<>(); Row row = new Row(rowTypeInfo.getArity()); Object[] array = new Object[]{1, 2, 3}; Map<Integer, String> map = new HashMap<Integer, String>() {{ put(1, "a"); put(2, "b"); }}; Row struct = new Row(2); struct.setField(0, 3); struct.setField(1, "c"); row.setField(0, array); row.setField(1, map); row.setField(2, struct); toWrite.add(row); TableEnvironment tableEnv = HiveTestUtils.createTableEnv(); Table src = tableEnv.fromTableSource(new CollectionTableSource(toWrite, rowTypeInfo)); tableEnv.registerTable("complexSrc", src); tableEnv.registerCatalog("hive", hiveCatalog); tableEnv.sqlQuery("select * from complexSrc").insertInto("hive", "default", "dest"); tableEnv.execute("mytest"); List<String> result = hiveShell.executeQuery("select * from " + tblName); assertEquals(1, result.size()); assertEquals("[1,2,3]\t{1:\"a\",2:\"b\"}\t{\"f1\":3,\"f2\":\"c\"}", result.get(0)); hiveCatalog.dropTable(tablePath, false); }