Java Code Examples for org.apache.flink.table.catalog.CatalogBaseTable#getSchema()
The following examples show how to use
org.apache.flink.table.catalog.CatalogBaseTable#getSchema() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: PulsarMetadataReader.java From pulsar-flink with Apache License 2.0 | 6 votes |
public void putSchema(ObjectPath tablePath, CatalogBaseTable table) throws IncompatibleSchemaException { String topic = objectPath2TopicName(tablePath); TableSchema tableSchema = table.getSchema(); List<String> fieldsRemaining = new ArrayList<>(tableSchema.getFieldCount()); for (String fieldName : tableSchema.getFieldNames()) { if (!PulsarOptions.META_FIELD_NAMES.contains(fieldName)) { fieldsRemaining.add(fieldName); } } DataType dataType; if (fieldsRemaining.size() == 1) { dataType = tableSchema.getFieldDataType(fieldsRemaining.get(0)).get(); } else { List<DataTypes.Field> fieldList = fieldsRemaining.stream() .map(f -> DataTypes.FIELD(f, tableSchema.getFieldDataType(f).get())) .collect(Collectors.toList()); dataType = DataTypes.ROW(fieldList.toArray(new DataTypes.Field[0])); } SchemaInfo si = SchemaUtils.sqlType2PulsarSchema(dataType).getSchemaInfo(); SchemaUtils.uploadPulsarSchema(admin, topic, si); }
Example 2
Source File: TableEnvHiveConnectorITCase.java From flink with Apache License 2.0 | 6 votes |
@Test public void testNotNullConstraints() throws Exception { Assume.assumeTrue(HiveVersionTestUtil.HIVE_310_OR_LATER); TableEnvironment tableEnv = getTableEnvWithHiveCatalog(); tableEnv.executeSql("create database db1"); try { tableEnv.executeSql("create table db1.tbl (x int,y bigint not null enable rely,z string not null enable norely)"); CatalogBaseTable catalogTable = hiveCatalog.getTable(new ObjectPath("db1", "tbl")); TableSchema tableSchema = catalogTable.getSchema(); assertTrue("By default columns should be nullable", tableSchema.getFieldDataTypes()[0].getLogicalType().isNullable()); assertFalse("NOT NULL columns should be reflected in table schema", tableSchema.getFieldDataTypes()[1].getLogicalType().isNullable()); assertTrue("NOT NULL NORELY columns should be considered nullable", tableSchema.getFieldDataTypes()[2].getLogicalType().isNullable()); } finally { tableEnv.executeSql("drop database db1 cascade"); } }
Example 3
Source File: KuduCatalog.java From bahir-flink with Apache License 2.0 | 5 votes |
@Override public void createTable(ObjectPath tablePath, CatalogBaseTable table, boolean ignoreIfExists) throws TableAlreadyExistException { Map<String, String> tableProperties = table.getProperties(); TableSchema tableSchema = table.getSchema(); Set<String> optionalProperties = new HashSet<>(Arrays.asList(KUDU_REPLICAS)); Set<String> requiredProperties = new HashSet<>(Arrays.asList(KUDU_HASH_COLS)); if (!tableSchema.getPrimaryKey().isPresent()) { requiredProperties.add(KUDU_PRIMARY_KEY_COLS); } if (!tableProperties.keySet().containsAll(requiredProperties)) { throw new CatalogException("Missing required property. The following properties must be provided: " + requiredProperties.toString()); } Set<String> permittedProperties = Sets.union(requiredProperties, optionalProperties); if (!permittedProperties.containsAll(tableProperties.keySet())) { throw new CatalogException("Unpermitted properties were given. The following properties are allowed:" + permittedProperties.toString()); } String tableName = tablePath.getObjectName(); KuduTableInfo tableInfo = KuduTableUtils.createTableInfo(tableName, tableSchema, tableProperties); createTable(tableInfo, ignoreIfExists); }
Example 4
Source File: TableEnvHiveConnectorITCase.java From flink with Apache License 2.0 | 5 votes |
@Test public void testPKConstraint() throws Exception { // While PK constraints are supported since Hive 2.1.0, the constraints cannot be RELY in 2.x versions. // So let's only test for 3.x. Assume.assumeTrue(HiveVersionTestUtil.HIVE_310_OR_LATER); TableEnvironment tableEnv = getTableEnvWithHiveCatalog(); tableEnv.executeSql("create database db1"); try { // test rely PK constraints tableEnv.executeSql("create table db1.tbl1 (x tinyint,y smallint,z int, primary key (x,z) disable novalidate rely)"); CatalogBaseTable catalogTable = hiveCatalog.getTable(new ObjectPath("db1", "tbl1")); TableSchema tableSchema = catalogTable.getSchema(); assertTrue(tableSchema.getPrimaryKey().isPresent()); UniqueConstraint pk = tableSchema.getPrimaryKey().get(); assertEquals(2, pk.getColumns().size()); assertTrue(pk.getColumns().containsAll(Arrays.asList("x", "z"))); // test norely PK constraints tableEnv.executeSql("create table db1.tbl2 (x tinyint,y smallint, primary key (x) disable norely)"); catalogTable = hiveCatalog.getTable(new ObjectPath("db1", "tbl2")); tableSchema = catalogTable.getSchema(); assertFalse(tableSchema.getPrimaryKey().isPresent()); // test table w/o PK tableEnv.executeSql("create table db1.tbl3 (x tinyint)"); catalogTable = hiveCatalog.getTable(new ObjectPath("db1", "tbl3")); tableSchema = catalogTable.getSchema(); assertFalse(tableSchema.getPrimaryKey().isPresent()); } finally { tableEnv.executeSql("drop database db1 cascade"); } }
Example 5
Source File: HiveDB.java From Alink with Apache License 2.0 | 4 votes |
private TableSchema getTableSchemaWithPartitionColumns(String tableName) throws Exception { ObjectPath op = ObjectPath.fromString(String.format("%s.%s", dbName, tableName)); CatalogBaseTable tbl = getCatalog().getTable(op); TableSchema schema = tbl.getSchema(); return schema; }