org.apache.flink.table.catalog.CatalogTable Java Examples
The following examples show how to use
org.apache.flink.table.catalog.CatalogTable.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: HiveTableSinkITCase.java From flink with Apache License 2.0 | 6 votes |
private CatalogTable createHiveCatalogTable(TableSchema tableSchema, int numPartCols) { if (numPartCols == 0) { return new CatalogTableImpl( tableSchema, new HashMap<String, String>() {{ // creating a hive table needs explicit is_generic=false flag put(CatalogConfig.IS_GENERIC, String.valueOf(false)); }}, ""); } String[] partCols = new String[numPartCols]; System.arraycopy(tableSchema.getFieldNames(), tableSchema.getFieldNames().length - numPartCols, partCols, 0, numPartCols); return new CatalogTableImpl( tableSchema, Arrays.asList(partCols), new HashMap<String, String>() {{ // creating a hive table needs explicit is_generic=false flag put(CatalogConfig.IS_GENERIC, String.valueOf(false)); }}, ""); }
Example #2
Source File: HiveCatalogDataTypeTest.java From flink with Apache License 2.0 | 6 votes |
private CatalogTable createCatalogTable(DataType[] types) { String[] colNames = new String[types.length]; for (int i = 0; i < types.length; i++) { colNames[i] = String.format("%s_%d", types[i].toString().toLowerCase(), i); } TableSchema schema = TableSchema.builder() .fields(colNames, types) .build(); return new CatalogTableImpl( schema, new HashMap<String, String>() {{ put("is_streaming", "false"); put(CatalogConfig.IS_GENERIC, String.valueOf(false)); }}, "" ); }
Example #3
Source File: HiveTableOutputFormat.java From flink with Apache License 2.0 | 6 votes |
public HiveTableOutputFormat(JobConf jobConf, ObjectPath tablePath, CatalogTable table, HiveTablePartition hiveTablePartition, Properties tableProperties, boolean overwrite) { super(jobConf.getCredentials()); Preconditions.checkNotNull(table, "table cannot be null"); Preconditions.checkNotNull(hiveTablePartition, "HiveTablePartition cannot be null"); Preconditions.checkNotNull(tableProperties, "Table properties cannot be null"); HadoopUtils.mergeHadoopConf(jobConf); this.jobConf = jobConf; this.tablePath = tablePath; this.partitionColumns = table.getPartitionKeys(); TableSchema tableSchema = table.getSchema(); this.fieldNames = tableSchema.getFieldNames(); this.fieldTypes = tableSchema.getFieldDataTypes(); this.hiveTablePartition = hiveTablePartition; this.tableProperties = tableProperties; this.overwrite = overwrite; isPartitioned = partitionColumns != null && !partitionColumns.isEmpty(); isDynamicPartition = isPartitioned && partitionColumns.size() > hiveTablePartition.getPartitionSpec().size(); hiveVersion = Preconditions.checkNotNull(jobConf.get(HiveCatalogValidator.CATALOG_HIVE_VERSION), "Hive version is not defined"); }
Example #4
Source File: TableFactoryUtil.java From flink with Apache License 2.0 | 6 votes |
/** * Creates a {@link TableSink} from a {@link CatalogTable}. * * <p>It considers {@link Catalog#getFactory()} if provided. */ @SuppressWarnings("unchecked") public static <T> TableSink<T> findAndCreateTableSink( @Nullable Catalog catalog, ObjectIdentifier objectIdentifier, CatalogTable catalogTable, ReadableConfig configuration, boolean isStreamingMode) { TableSinkFactory.Context context = new TableSinkFactoryContextImpl( objectIdentifier, catalogTable, configuration, !isStreamingMode); if (catalog == null) { return findAndCreateTableSink(context); } else { return createTableSinkForCatalogTable(catalog, context) .orElseGet(() -> findAndCreateTableSink(context)); } }
Example #5
Source File: KafkaDynamicTableFactoryTestBase.java From flink with Apache License 2.0 | 6 votes |
@Test public void testMissingStartupTimestamp() { // Construct table source using DDL and table source factory ObjectIdentifier objectIdentifier = ObjectIdentifier.of( "default", "default", "scanTable"); final Map<String, String> modifiedOptions = getModifiedOptions( getFullSourceOptions(), options -> { options.put("scan.startup.mode", "timestamp"); }); CatalogTable catalogTable = createKafkaSourceCatalogTable(modifiedOptions); thrown.expect(ValidationException.class); thrown.expect(containsCause(new ValidationException("'scan.startup.timestamp-millis' " + "is required in 'timestamp' startup mode but missing."))); FactoryUtil.createTableSource(null, objectIdentifier, catalogTable, new Configuration(), Thread.currentThread().getContextClassLoader()); }
Example #6
Source File: HiveTableFactoryTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testHiveTable() throws Exception { TableSchema schema = TableSchema.builder() .field("name", DataTypes.STRING()) .field("age", DataTypes.INT()) .build(); Map<String, String> properties = new HashMap<>(); catalog.createDatabase("mydb", new CatalogDatabaseImpl(new HashMap<>(), ""), true); ObjectPath path = new ObjectPath("mydb", "mytable"); CatalogTable table = new CatalogTableImpl(schema, properties, "hive table"); catalog.createTable(path, table, true); Optional<TableFactory> opt = catalog.getTableFactory(); assertTrue(opt.isPresent()); HiveTableFactory tableFactory = (HiveTableFactory) opt.get(); TableSink tableSink = tableFactory.createTableSink(path, table); assertTrue(tableSink instanceof HiveTableSink); TableSource tableSource = tableFactory.createTableSource(path, table); assertTrue(tableSource instanceof HiveTableSource); }
Example #7
Source File: SqlToOperationConverterTest.java From flink with Apache License 2.0 | 6 votes |
@Before public void before() throws TableAlreadyExistException, DatabaseNotExistException { final ObjectPath path1 = new ObjectPath(catalogManager.getCurrentDatabase(), "t1"); final ObjectPath path2 = new ObjectPath(catalogManager.getCurrentDatabase(), "t2"); final TableSchema tableSchema = TableSchema.builder() .field("a", DataTypes.BIGINT()) .field("b", DataTypes.VARCHAR(Integer.MAX_VALUE)) .field("c", DataTypes.INT()) .field("d", DataTypes.VARCHAR(Integer.MAX_VALUE)) .build(); Map<String, String> properties = new HashMap<>(); properties.put("connector", "COLLECTION"); final CatalogTable catalogTable = new CatalogTableImpl(tableSchema, properties, ""); catalog.createTable(path1, catalogTable, true); catalog.createTable(path2, catalogTable, true); }
Example #8
Source File: TableEnvironmentTest.java From flink with Apache License 2.0 | 6 votes |
private static void assertCatalogTable(CatalogTable table) { assertThat( table.getSchema(), equalTo( TableSchema.builder() .field("my_field_0", DataTypes.INT()) .field("my_field_1", DataTypes.BOOLEAN()) .field("my_part_1", DataTypes.BIGINT()) .field("my_part_2", DataTypes.STRING()) .build())); assertThat( table.getPartitionKeys(), equalTo(Arrays.asList("my_part_1", "my_part_2"))); Map<String, String> properties = new HashMap<>(); properties.put("update-mode", "append"); properties.put("connector.property-version", "1"); properties.put("format.type", "my_format"); properties.put("format.property-version", "1"); properties.put("connector.type", "table-source-factory-mock"); assertThat(table.getProperties(), equalTo(properties)); }
Example #9
Source File: SqlToOperationConverter.java From flink with Apache License 2.0 | 6 votes |
private Operation convertAlterTableProperties(ObjectIdentifier tableIdentifier, CatalogTable oldTable, SqlAlterTableProperties alterTableProperties) { LinkedHashMap<String, String> partitionKVs = alterTableProperties.getPartitionKVs(); // it's altering partitions if (partitionKVs != null) { CatalogPartitionSpec partitionSpec = new CatalogPartitionSpec(partitionKVs); CatalogPartition catalogPartition = catalogManager.getPartition(tableIdentifier, partitionSpec) .orElseThrow(() -> new ValidationException(String.format("Partition %s of table %s doesn't exist", partitionSpec.getPartitionSpec(), tableIdentifier))); Map<String, String> newProps = new HashMap<>(catalogPartition.getProperties()); newProps.putAll(OperationConverterUtils.extractProperties(alterTableProperties.getPropertyList())); return new AlterPartitionPropertiesOperation( tableIdentifier, partitionSpec, new CatalogPartitionImpl(newProps, catalogPartition.getComment())); } else { // it's altering a table Map<String, String> newProperties = new HashMap<>(oldTable.getOptions()); newProperties.putAll(OperationConverterUtils.extractProperties(alterTableProperties.getPropertyList())); return new AlterTablePropertiesOperation(tableIdentifier, oldTable.copy(newProperties)); } }
Example #10
Source File: KafkaDynamicTableFactoryTestBase.java From flink with Apache License 2.0 | 6 votes |
@Test public void testInvalidSinkPartitioner() { ObjectIdentifier objectIdentifier = ObjectIdentifier.of( "default", "default", "sinkTable"); final Map<String, String> modifiedOptions = getModifiedOptions( getFullSourceOptions(), options -> { options.put("sink.partitioner", "abc"); }); final CatalogTable sinkTable = createKafkaSinkCatalogTable(modifiedOptions); thrown.expect(ValidationException.class); thrown.expect(containsCause(new ValidationException("Could not find and instantiate partitioner class 'abc'"))); FactoryUtil.createTableSink( null, objectIdentifier, sinkTable, new Configuration(), Thread.currentThread().getContextClassLoader()); }
Example #11
Source File: DatabaseCalciteSchema.java From flink with Apache License 2.0 | 6 votes |
private Table convertCatalogTable(ObjectPath tablePath, CatalogTable table) { TableSource<?> tableSource; Optional<TableFactory> tableFactory = catalog.getTableFactory(); if (tableFactory.isPresent()) { TableFactory tf = tableFactory.get(); if (tf instanceof TableSourceFactory) { tableSource = ((TableSourceFactory) tf).createTableSource(tablePath, table); } else { throw new TableException(String.format("Cannot query a sink-only table. TableFactory provided by catalog %s must implement TableSourceFactory", catalog.getClass())); } } else { tableSource = TableFactoryUtil.findAndCreateTableSource(table); } if (!(tableSource instanceof StreamTableSource)) { throw new TableException("Catalog tables support only StreamTableSource and InputFormatTableSource"); } return new TableSourceTable<>( tableSource, !((StreamTableSource<?>) tableSource).isBounded(), FlinkStatistic.UNKNOWN() ); }
Example #12
Source File: SqlToOperationConverterTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testAlterTableAddUniqueConstraintEnforced() throws Exception { Catalog catalog = new GenericInMemoryCatalog("default", "default"); catalogManager.registerCatalog("cat1", catalog); catalog.createDatabase("db1", new CatalogDatabaseImpl(new HashMap<>(), null), true); CatalogTable catalogTable = new CatalogTableImpl( TableSchema.builder() .field("a", DataTypes.STRING().notNull()) .field("b", DataTypes.BIGINT().notNull()) .field("c", DataTypes.BIGINT()) .build(), new HashMap<>(), "tb1"); catalogManager.setCurrentCatalog("cat1"); catalogManager.setCurrentDatabase("db1"); catalog.createTable(new ObjectPath("db1", "tb1"), catalogTable, true); // Test alter table add enforced thrown.expect(UnsupportedOperationException.class); thrown.expectMessage("UNIQUE constraint is not supported yet"); parse("alter table tb1 add constraint ct1 unique(a, b)", SqlDialect.DEFAULT); }
Example #13
Source File: KafkaDynamicTableFactoryTestBase.java From flink with Apache License 2.0 | 6 votes |
@Test public void testTableSourceCommitOnCheckpointsDisabled() { //Construct table source using options and table source factory ObjectIdentifier objectIdentifier = ObjectIdentifier.of( "default", "default", "scanTable"); Map<String, String> tableOptions = getFullSourceOptions(); tableOptions.remove("properties.group.id"); CatalogTable catalogTable = createKafkaSourceCatalogTable(tableOptions); final DynamicTableSource tableSource = FactoryUtil.createTableSource(null, objectIdentifier, catalogTable, new Configuration(), Thread.currentThread().getContextClassLoader()); // Test commitOnCheckpoints flag should be false when do not set consumer group. assertThat(tableSource, instanceOf(KafkaDynamicSourceBase.class)); ScanTableSource.ScanRuntimeProvider providerWithoutGroupId = ((KafkaDynamicSourceBase) tableSource) .getScanRuntimeProvider(ScanRuntimeProviderContext.INSTANCE); assertThat(providerWithoutGroupId, instanceOf(SourceFunctionProvider.class)); final SourceFunctionProvider functionProviderWithoutGroupId = (SourceFunctionProvider) providerWithoutGroupId; final SourceFunction<RowData> function = functionProviderWithoutGroupId.createSourceFunction(); assertFalse(((FlinkKafkaConsumerBase) function).getEnableCommitOnCheckpoints()); }
Example #14
Source File: HiveTableFactory.java From flink with Apache License 2.0 | 6 votes |
@Override public TableSource<RowData> createTableSource(TableSourceFactory.Context context) { CatalogTable table = checkNotNull(context.getTable()); Preconditions.checkArgument(table instanceof CatalogTableImpl); boolean isGeneric = Boolean.parseBoolean(table.getProperties().get(CatalogConfig.IS_GENERIC)); if (!isGeneric) { return new HiveTableSource( new JobConf(hiveConf), context.getConfiguration(), context.getObjectIdentifier().toObjectPath(), table); } else { return TableFactoryUtil.findAndCreateTableSource(context); } }
Example #15
Source File: SqlToOperationConverterTest.java From flink with Apache License 2.0 | 6 votes |
@Before public void before() throws TableAlreadyExistException, DatabaseNotExistException { catalogManager.setCatalogTableSchemaResolver(new CatalogTableSchemaResolver(new ParserMock(), true)); final ObjectPath path1 = new ObjectPath(catalogManager.getCurrentDatabase(), "t1"); final ObjectPath path2 = new ObjectPath(catalogManager.getCurrentDatabase(), "t2"); final TableSchema tableSchema = TableSchema.builder() .field("a", DataTypes.BIGINT()) .field("b", DataTypes.VARCHAR(Integer.MAX_VALUE)) .field("c", DataTypes.INT()) .field("d", DataTypes.VARCHAR(Integer.MAX_VALUE)) .build(); Map<String, String> properties = new HashMap<>(); properties.put("connector", "COLLECTION"); final CatalogTable catalogTable = new CatalogTableImpl(tableSchema, properties, ""); catalog.createTable(path1, catalogTable, true); catalog.createTable(path2, catalogTable, true); }
Example #16
Source File: HiveTableInputFormat.java From flink with Apache License 2.0 | 6 votes |
public HiveTableInputFormat( JobConf jobConf, CatalogTable catalogTable, List<HiveTablePartition> partitions, int[] projectedFields, long limit, String hiveVersion, boolean useMapRedReader) { super(jobConf.getCredentials()); this.partitionKeys = catalogTable.getPartitionKeys(); this.fieldTypes = catalogTable.getSchema().getFieldDataTypes(); this.fieldNames = catalogTable.getSchema().getFieldNames(); this.limit = limit; this.hiveVersion = hiveVersion; checkNotNull(catalogTable, "catalogTable can not be null."); this.partitions = checkNotNull(partitions, "partitions can not be null."); this.jobConf = new JobConf(jobConf); int rowArity = catalogTable.getSchema().getFieldCount(); selectedFields = projectedFields != null ? projectedFields : IntStream.range(0, rowArity).toArray(); this.useMapRedReader = useMapRedReader; }
Example #17
Source File: HiveTableSource.java From flink with Apache License 2.0 | 6 votes |
private HiveTableSource( JobConf jobConf, ReadableConfig flinkConf, ObjectPath tablePath, CatalogTable catalogTable, List<Map<String, String>> remainingPartitions, String hiveVersion, boolean partitionPruned, int[] projectedFields, boolean isLimitPushDown, long limit) { this.jobConf = Preconditions.checkNotNull(jobConf); this.flinkConf = Preconditions.checkNotNull(flinkConf); this.tablePath = Preconditions.checkNotNull(tablePath); this.catalogTable = Preconditions.checkNotNull(catalogTable); this.remainingPartitions = remainingPartitions; this.hiveVersion = hiveVersion; hiveShim = HiveShimLoader.loadHiveShim(hiveVersion); this.partitionPruned = partitionPruned; this.projectedFields = projectedFields; this.isLimitPushDown = isLimitPushDown; this.limit = limit; }
Example #18
Source File: SqlToOperationConverterTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testAlterTableAddUniqueConstraint() throws Exception { Catalog catalog = new GenericInMemoryCatalog("default", "default"); catalogManager.registerCatalog("cat1", catalog); catalog.createDatabase("db1", new CatalogDatabaseImpl(new HashMap<>(), null), true); CatalogTable catalogTable = new CatalogTableImpl( TableSchema.builder() .field("a", DataTypes.STRING().notNull()) .field("b", DataTypes.BIGINT().notNull()) .build(), new HashMap<>(), "tb1"); catalogManager.setCurrentCatalog("cat1"); catalogManager.setCurrentDatabase("db1"); catalog.createTable(new ObjectPath("db1", "tb1"), catalogTable, true); // Test alter add table constraint. thrown.expect(UnsupportedOperationException.class); thrown.expectMessage("UNIQUE constraint is not supported yet"); parse("alter table tb1 add constraint ct1 unique(a, b) not enforced", SqlDialect.DEFAULT); }
Example #19
Source File: HiveCatalogHiveMetadataTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testCreateTableWithConstraints() throws Exception { Assume.assumeTrue(HiveVersionTestUtil.HIVE_310_OR_LATER); HiveCatalog hiveCatalog = (HiveCatalog) catalog; hiveCatalog.createDatabase(db1, createDb(), false); TableSchema.Builder builder = TableSchema.builder(); builder.fields( new String[]{"x", "y", "z"}, new DataType[]{DataTypes.INT().notNull(), DataTypes.TIMESTAMP(9).notNull(), DataTypes.BIGINT()}); builder.primaryKey("pk_name", new String[]{"x"}); hiveCatalog.createTable(path1, new CatalogTableImpl(builder.build(), getBatchTableProperties(), null), false); CatalogTable catalogTable = (CatalogTable) hiveCatalog.getTable(path1); assertTrue("PK not present", catalogTable.getSchema().getPrimaryKey().isPresent()); UniqueConstraint pk = catalogTable.getSchema().getPrimaryKey().get(); assertEquals("pk_name", pk.getName()); assertEquals(Collections.singletonList("x"), pk.getColumns()); assertFalse(catalogTable.getSchema().getFieldDataTypes()[0].getLogicalType().isNullable()); assertFalse(catalogTable.getSchema().getFieldDataTypes()[1].getLogicalType().isNullable()); assertTrue(catalogTable.getSchema().getFieldDataTypes()[2].getLogicalType().isNullable()); hiveCatalog.dropDatabase(db1, false, true); }
Example #20
Source File: HiveCatalogHiveMetadataTest.java From flink with Apache License 2.0 | 6 votes |
private void checkStatistics(int inputStat, int expectStat) throws Exception { catalog.dropTable(path1, true); Map<String, String> properties = new HashMap<>(); properties.put(CatalogConfig.IS_GENERIC, "false"); properties.put(StatsSetupConst.ROW_COUNT, String.valueOf(inputStat)); properties.put(StatsSetupConst.NUM_FILES, String.valueOf(inputStat)); properties.put(StatsSetupConst.TOTAL_SIZE, String.valueOf(inputStat)); properties.put(StatsSetupConst.RAW_DATA_SIZE, String.valueOf(inputStat)); CatalogTable catalogTable = new CatalogTableImpl( TableSchema.builder().field("f0", DataTypes.INT()).build(), properties, ""); catalog.createTable(path1, catalogTable, false); CatalogTableStatistics statistics = catalog.getTableStatistics(path1); assertEquals(expectStat, statistics.getRowCount()); assertEquals(expectStat, statistics.getFileCount()); assertEquals(expectStat, statistics.getRawDataSize()); assertEquals(expectStat, statistics.getTotalSize()); }
Example #21
Source File: KafkaDynamicTableFactoryTestBase.java From flink with Apache License 2.0 | 6 votes |
@Test public void testMissingSpecificOffsets() { // Construct table source using DDL and table source factory ObjectIdentifier objectIdentifier = ObjectIdentifier.of( "default", "default", "scanTable"); final Map<String, String> modifiedOptions = getModifiedOptions( getFullSourceOptions(), options -> { options.remove("scan.startup.specific-offsets"); }); CatalogTable catalogTable = createKafkaSourceCatalogTable(modifiedOptions); thrown.expect(ValidationException.class); thrown.expect(containsCause(new ValidationException("'scan.startup.specific-offsets' " + "is required in 'specific-offsets' startup mode but missing."))); FactoryUtil.createTableSource(null, objectIdentifier, catalogTable, new Configuration(), Thread.currentThread().getContextClassLoader()); }
Example #22
Source File: SqlToOperationConverterTest.java From flink with Apache License 2.0 | 6 votes |
@Before public void before() throws TableAlreadyExistException, DatabaseNotExistException { catalogManager.setCatalogTableSchemaResolver(new CatalogTableSchemaResolver(parser, isStreamingMode)); final ObjectPath path1 = new ObjectPath(catalogManager.getCurrentDatabase(), "t1"); final ObjectPath path2 = new ObjectPath(catalogManager.getCurrentDatabase(), "t2"); final TableSchema tableSchema = TableSchema.builder() .field("a", DataTypes.BIGINT()) .field("b", DataTypes.VARCHAR(Integer.MAX_VALUE)) .field("c", DataTypes.INT()) .field("d", DataTypes.VARCHAR(Integer.MAX_VALUE)) .build(); Map<String, String> properties = new HashMap<>(); properties.put("connector", "COLLECTION"); final CatalogTable catalogTable = new CatalogTableImpl(tableSchema, properties, ""); catalog.createTable(path1, catalogTable, true); catalog.createTable(path2, catalogTable, true); }
Example #23
Source File: HiveBatchSource.java From Alink with Apache License 2.0 | 6 votes |
private HiveBatchSource(JobConf jobConf, ObjectPath tablePath, CatalogTable catalogTable, List<Map<String, String>> remainingPartitions, String hiveVersion, boolean partitionPruned, int[] projectedFields, boolean isLimitPushDown, long limit) { this.jobConf = Preconditions.checkNotNull(jobConf); this.tablePath = Preconditions.checkNotNull(tablePath); this.catalogTable = Preconditions.checkNotNull(catalogTable); this.remainingPartitions = remainingPartitions; this.hiveVersion = hiveVersion; hiveShim = HiveShimLoader.loadHiveShim(hiveVersion); this.partitionPruned = partitionPruned; this.projectedFields = projectedFields; this.isLimitPushDown = isLimitPushDown; this.limit = limit; }
Example #24
Source File: HiveCatalogDataTypeTest.java From flink with Apache License 2.0 | 6 votes |
private CatalogTable createCatalogTable(DataType[] types) { String[] colNames = new String[types.length]; for (int i = 0; i < types.length; i++) { colNames[i] = String.format("%s_%d", types[i].toString().toLowerCase(), i); } TableSchema schema = TableSchema.builder() .fields(colNames, types) .build(); return new CatalogTableImpl( schema, new HashMap<String, String>() {{ put("is_streaming", "false"); put(CatalogConfig.IS_GENERIC, String.valueOf(false)); }}, "" ); }
Example #25
Source File: KafkaDynamicTableFactoryTestBase.java From flink with Apache License 2.0 | 6 votes |
@Test public void testInvalidScanStartupMode() { // Construct table source using DDL and table source factory ObjectIdentifier objectIdentifier = ObjectIdentifier.of( "default", "default", "scanTable"); final Map<String, String> modifiedOptions = getModifiedOptions( getFullSourceOptions(), options -> { options.put("scan.startup.mode", "abc"); }); CatalogTable catalogTable = createKafkaSourceCatalogTable(modifiedOptions); thrown.expect(ValidationException.class); thrown.expect(containsCause(new ValidationException("Invalid value for option 'scan.startup.mode'. " + "Supported values are [earliest-offset, latest-offset, group-offsets, specific-offsets, timestamp], " + "but was: abc"))); FactoryUtil.createTableSource(null, objectIdentifier, catalogTable, new Configuration(), Thread.currentThread().getContextClassLoader()); }
Example #26
Source File: HiveCatalogHiveMetadataTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testAlterTableColumnStatistics() throws Exception { catalog.createDatabase(db1, createDb(), false); TableSchema tableSchema = TableSchema.builder() .field("first", DataTypes.STRING()) .field("second", DataTypes.INT()) .field("third", DataTypes.BOOLEAN()) .field("fourth", DataTypes.DATE()) .field("fifth", DataTypes.DOUBLE()) .field("sixth", DataTypes.BIGINT()) .field("seventh", DataTypes.BYTES()) .build(); CatalogTable catalogTable = new CatalogTableImpl(tableSchema, getBatchTableProperties(), TEST_COMMENT); catalog.createTable(path1, catalogTable, false); Map<String, CatalogColumnStatisticsDataBase> columnStatisticsDataBaseMap = new HashMap<>(); columnStatisticsDataBaseMap.put("first", new CatalogColumnStatisticsDataString(10, 5.2, 3, 100)); columnStatisticsDataBaseMap.put("second", new CatalogColumnStatisticsDataLong(0, 1000, 3, 0)); columnStatisticsDataBaseMap.put("third", new CatalogColumnStatisticsDataBoolean(15, 20, 3)); columnStatisticsDataBaseMap.put("fourth", new CatalogColumnStatisticsDataDate(new Date(71L), new Date(17923L), 1321, 0L)); columnStatisticsDataBaseMap.put("fifth", new CatalogColumnStatisticsDataDouble(15.02, 20.01, 3, 10)); columnStatisticsDataBaseMap.put("sixth", new CatalogColumnStatisticsDataLong(0, 20, 3, 2)); columnStatisticsDataBaseMap.put("seventh", new CatalogColumnStatisticsDataBinary(150, 20, 3)); CatalogColumnStatistics catalogColumnStatistics = new CatalogColumnStatistics(columnStatisticsDataBaseMap); catalog.alterTableColumnStatistics(path1, catalogColumnStatistics, false); checkEquals(catalogColumnStatistics, catalog.getTableColumnStatistics(path1)); }
Example #27
Source File: CreateTableOperation.java From flink with Apache License 2.0 | 5 votes |
public CreateTableOperation( ObjectIdentifier tableIdentifier, CatalogTable catalogTable, boolean ignoreIfExists, boolean isTemporary) { this.tableIdentifier = tableIdentifier; this.catalogTable = catalogTable; this.ignoreIfExists = ignoreIfExists; this.isTemporary = isTemporary; }
Example #28
Source File: HiveTableSource.java From flink with Apache License 2.0 | 5 votes |
public HiveTableSource( JobConf jobConf, ReadableConfig flinkConf, ObjectPath tablePath, CatalogTable catalogTable) { this.jobConf = Preconditions.checkNotNull(jobConf); this.flinkConf = Preconditions.checkNotNull(flinkConf); this.tablePath = Preconditions.checkNotNull(tablePath); this.catalogTable = Preconditions.checkNotNull(catalogTable); this.hiveVersion = Preconditions.checkNotNull(jobConf.get(HiveCatalogValidator.CATALOG_HIVE_VERSION), "Hive version is not defined"); hiveShim = HiveShimLoader.loadHiveShim(hiveVersion); partitionPruned = false; }
Example #29
Source File: HiveDB.java From Alink with Apache License 2.0 | 5 votes |
private HiveBatchSource getHiveBatchSource(String tableName, Params parameter) throws Exception { ObjectPath objectPath = ObjectPath.fromString(dbName + "." + tableName); CatalogTable catalogTable = getCatalogTable(tableName); HiveBatchSource hiveTableSource = new HiveBatchSource(new JobConf(catalog.getHiveConf()), objectPath, catalogTable); String partitionSpecsStr = parameter.get(HiveSourceParams.PARTITIONS); if (!StringUtils.isNullOrWhitespaceOnly(partitionSpecsStr)) { String[] partitionSpecs = partitionSpecsStr.trim().split(","); List<Map<String, String>> selectedPartitions = getSelectedPartitions(partitionSpecs); hiveTableSource = (HiveBatchSource) hiveTableSource.applyPartitionPruning(selectedPartitions); } return hiveTableSource; }
Example #30
Source File: HiveTableFactory.java From flink with Apache License 2.0 | 5 votes |
@Override public TableSink<Row> createTableSink(ObjectPath tablePath, CatalogTable table) { Preconditions.checkNotNull(table); Preconditions.checkArgument(table instanceof CatalogTableImpl); boolean isGeneric = Boolean.valueOf(table.getProperties().get(CatalogConfig.IS_GENERIC)); if (!isGeneric) { return createOutputFormatTableSink(tablePath, table); } else { return TableFactoryUtil.findAndCreateTableSink(table); } }