Java Code Examples for org.apache.hadoop.hive.metastore.api.Table#setPartitionKeys()
The following examples show how to use
org.apache.hadoop.hive.metastore.api.Table#setPartitionKeys() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TestUtils.java From waggle-dance with Apache License 2.0 | 6 votes |
static Table createPartitionedTable(HiveMetaStoreClient metaStoreClient, String database, String table, File location) throws Exception { Table hiveTable = new Table(); hiveTable.setDbName(database); hiveTable.setTableName(table); hiveTable.setTableType(TableType.EXTERNAL_TABLE.name()); hiveTable.putToParameters("EXTERNAL", "TRUE"); hiveTable.setPartitionKeys(PARTITION_COLUMNS); StorageDescriptor sd = new StorageDescriptor(); sd.setCols(DATA_COLUMNS); sd.setLocation(location.toURI().toString()); sd.setParameters(new HashMap<>()); sd.setSerdeInfo(new SerDeInfo()); hiveTable.setSd(sd); metaStoreClient.createTable(hiveTable); return hiveTable; }
Example 2
Source File: HiveAvroToOrcConverterTest.java From incubator-gobblin with Apache License 2.0 | 6 votes |
@Test public void dropReplacedPartitionsTest() throws Exception { Table table = ConvertibleHiveDatasetTest.getTestTable("dbName", "tableName"); table.setTableType("VIRTUAL_VIEW"); table.setPartitionKeys(ImmutableList.of(new FieldSchema("year", "string", ""), new FieldSchema("month", "string", ""))); Partition part = new Partition(); part.setParameters(ImmutableMap.of("gobblin.replaced.partitions", "2015,12|2016,01")); SchemaAwareHiveTable hiveTable = new SchemaAwareHiveTable(table, null); SchemaAwareHivePartition partition = new SchemaAwareHivePartition(table, part, null); QueryBasedHiveConversionEntity conversionEntity = new QueryBasedHiveConversionEntity(null, hiveTable, Optional.of(partition)); List<ImmutableMap<String, String>> expected = ImmutableList.of(ImmutableMap.of("year", "2015", "month", "12"), ImmutableMap.of("year", "2016", "month", "01")); Assert.assertEquals(AbstractAvroToOrcConverter.getDropPartitionsDDLInfo(conversionEntity), expected); // Make sure that a partition itself is not dropped Partition replacedSelf = new Partition(); replacedSelf.setParameters(ImmutableMap.of("gobblin.replaced.partitions", "2015,12|2016,01|2016,02")); replacedSelf.setValues(ImmutableList.of("2016", "02")); conversionEntity = new QueryBasedHiveConversionEntity(null, hiveTable, Optional.of(new SchemaAwareHivePartition(table, replacedSelf, null))); Assert.assertEquals(AbstractAvroToOrcConverter.getDropPartitionsDDLInfo(conversionEntity), expected); }
Example 3
Source File: TestUtils.java From circus-train with Apache License 2.0 | 6 votes |
private static Table createView( HiveMetaStoreClient metaStoreClient, String database, String view, String table, List<FieldSchema> partitionCols) throws TException { Table hiveView = new Table(); hiveView.setDbName(database); hiveView.setTableName(view); hiveView.setTableType(TableType.VIRTUAL_VIEW.name()); hiveView.setViewOriginalText(hql(database, table)); hiveView.setViewExpandedText(expandHql(database, table, DATA_COLUMNS, partitionCols)); hiveView.setPartitionKeys(partitionCols); StorageDescriptor sd = new StorageDescriptor(); sd.setCols(DATA_COLUMNS); sd.setParameters(new HashMap<String, String>()); sd.setSerdeInfo(new SerDeInfo()); hiveView.setSd(sd); metaStoreClient.createTable(hiveView); return hiveView; }
Example 4
Source File: DestructiveReplicaTest.java From circus-train with Apache License 2.0 | 6 votes |
@Before public void setUp() { SourceTable sourceTable = new SourceTable(); sourceTable.setDatabaseName(DATABASE); sourceTable.setTableName(TABLE); tableReplication.setSourceTable(sourceTable); ReplicaTable replicaTable = new ReplicaTable(); replicaTable.setDatabaseName(DATABASE); replicaTable.setTableName(REPLICA_TABLE); tableReplication.setReplicaTable(replicaTable); when(replicaMetaStoreClientSupplier.get()).thenReturn(client); replica = new DestructiveReplica(replicaMetaStoreClientSupplier, cleanupLocationManager, tableReplication); table = new Table(); table.setDbName(DATABASE); table.setTableName(REPLICA_TABLE); table.setPartitionKeys(Lists.newArrayList(new FieldSchema("part1", "string", ""))); Map<String, String> parameters = new HashMap<>(); parameters.put(CircusTrainTableParameter.SOURCE_TABLE.parameterName(), DATABASE + "." + TABLE); parameters.put(REPLICATION_EVENT.parameterName(), EVENT_ID); table.setParameters(parameters); StorageDescriptor sd1 = new StorageDescriptor(); sd1.setLocation(tableLocation.toString()); table.setSd(sd1); }
Example 5
Source File: HiveUtils.java From kite with Apache License 2.0 | 6 votes |
static Table createEmptyTable(String namespace, String name) { Table table = new Table(); table.setDbName(namespace); table.setTableName(name); table.setPartitionKeys(new ArrayList<FieldSchema>()); table.setParameters(new HashMap<String, String>()); StorageDescriptor sd = new StorageDescriptor(); sd.setSerdeInfo(new SerDeInfo()); sd.setNumBuckets(-1); sd.setBucketCols(new ArrayList<String>()); sd.setCols(new ArrayList<FieldSchema>()); sd.setParameters(new HashMap<String, String>()); sd.setSortCols(new ArrayList<Order>()); sd.getSerdeInfo().setParameters(new HashMap<String, String>()); SkewedInfo skewInfo = new SkewedInfo(); skewInfo.setSkewedColNames(new ArrayList<String>()); skewInfo.setSkewedColValues(new ArrayList<List<String>>()); skewInfo.setSkewedColValueLocationMaps(new HashMap<List<String>, String>()); sd.setSkewedInfo(skewInfo); table.setSd(sd); return table; }
Example 6
Source File: ReplicaTest.java From circus-train with Apache License 2.0 | 6 votes |
private Table newTable() { Table table = new Table(); table.setDbName(DB_NAME); table.setTableName(TABLE_NAME); table.setTableType(TableType.EXTERNAL_TABLE.name()); StorageDescriptor sd = new StorageDescriptor(); sd.setLocation(tableLocation); table.setSd(sd); HashMap<String, String> parameters = new HashMap<>(); parameters.put(StatsSetupConst.ROW_COUNT, "1"); table.setParameters(parameters); table.setPartitionKeys(PARTITIONS); return table; }
Example 7
Source File: HiveDifferencesTest.java From circus-train with Apache License 2.0 | 5 votes |
private static Table newTable(String databaseName, String tableName, String location) { Table table = new Table(); table.setDbName(databaseName); table.setTableName(tableName); table.setParameters(new HashMap<String, String>()); table.setPartitionKeys(Arrays.asList(new FieldSchema("a", "string", null))); StorageDescriptor sd = new StorageDescriptor(); sd.setLocation(location); table.setSd(sd); return table; }
Example 8
Source File: HiveMetadataFetcherTest.java From pxf with Apache License 2.0 | 5 votes |
@Test public void getTableMetadata() throws Exception { fetcher = new HiveMetadataFetcher(context, mockConfigurationFactory, fakeHiveClientWrapper); String tableName = "cause"; // mock hive table returned from hive client List<FieldSchema> fields = new ArrayList<>(); fields.add(new FieldSchema("field1", "string", null)); fields.add(new FieldSchema("field2", "int", null)); StorageDescriptor sd = new StorageDescriptor(); sd.setCols(fields); sd.setInputFormat("org.apache.hadoop.mapred.TextInputFormat"); Table hiveTable = new Table(); hiveTable.setTableType("MANAGED_TABLE"); hiveTable.setSd(sd); hiveTable.setPartitionKeys(new ArrayList<>()); when(mockHiveClient.getTable("default", tableName)).thenReturn(hiveTable); // Get metadata metadataList = fetcher.getMetadata(tableName); Metadata metadata = metadataList.get(0); assertEquals("default.cause", metadata.getItem().toString()); List<Metadata.Field> resultFields = metadata.getFields(); assertNotNull(resultFields); assertEquals(2, resultFields.size()); Metadata.Field field = resultFields.get(0); assertEquals("field1", field.getName()); assertEquals("text", field.getType().getTypeName()); // converted type field = resultFields.get(1); assertEquals("field2", field.getName()); assertEquals("int4", field.getType().getTypeName()); }
Example 9
Source File: HiveUtils.java From kite with Apache License 2.0 | 5 votes |
private static List<FieldSchema> getPartCols(Table table) { List<FieldSchema> partKeys = table.getPartitionKeys(); if (partKeys == null) { partKeys = new ArrayList<FieldSchema>(); table.setPartitionKeys(partKeys); } return partKeys; }
Example 10
Source File: HiveEntityFactory.java From circus-train with Apache License 2.0 | 5 votes |
public static Table newTable(String name, String dbName, List<FieldSchema> partitionKeys, StorageDescriptor sd) { Table table = new Table(); table.setTableName(name); table.setDbName(dbName); table.setSd(sd); table.setPartitionKeys(partitionKeys); table.setTableType(TableType.EXTERNAL_TABLE.name()); table.setParameters(new HashMap<String, String>()); return table; }
Example 11
Source File: AbstractMetastoreTestWithStaticConfiguration.java From incubator-sentry with Apache License 2.0 | 5 votes |
public Table createMetastoreTableWithPartition(HiveMetaStoreClient client, String dbName, String tabName, List<FieldSchema> cols, List<FieldSchema> partionVals) throws Exception { Table tbl = makeMetastoreTableObject(client, dbName, tabName, cols); tbl.setPartitionKeys(partionVals); client.createTable(tbl); return client.getTable(dbName, tabName); }
Example 12
Source File: ComparisonToolIntegrationTest.java From circus-train with Apache License 2.0 | 4 votes |
private void createReplicaTable() throws Exception { File partitionEurope = new File(replicaTableUri, "local_date=2000-01-01"); File partitionUk = new File(partitionEurope, "local_hour=0"); File dataFileUk = new File(partitionUk, PART_00000); FileUtils.writeStringToFile(dataFileUk, "1\tadam\tlondon\tuk\n2\tsusan\tglasgow\tuk\n"); File partitionAsia = new File(replicaTableUri, "local_date=2000-01-02"); File partitionChina = new File(partitionAsia, "local_hour=0"); File dataFileChina = new File(partitionChina, PART_00000); String data = "1\tchun\tbeijing\tchina\n2\tshanghai\tmilan\titaly\n"; FileUtils.writeStringToFile(dataFileChina, data); HiveMetaStoreClient replicaClient = catalog.client(); Table replica = new Table(); replica.setDbName(DATABASE); replica.setTableName(REPLICA_TABLE); replica.setTableType(TableType.EXTERNAL_TABLE.name()); Map<String, String> parameters = new HashMap<>(); parameters.put("comment", "comment replica"); replica.setParameters(parameters); List<FieldSchema> partitionColumns = Arrays.asList(new FieldSchema("local_date", "string", ""), new FieldSchema("local_hour", "string", "")); replica.setPartitionKeys(partitionColumns); List<FieldSchema> dataColumns = Arrays.asList(new FieldSchema("id", "bigint", ""), new FieldSchema("name", "string", ""), new FieldSchema("city", "string", ""), new FieldSchema("country", "string", "")); StorageDescriptor sd = new StorageDescriptor(); sd.setCols(dataColumns); sd.setLocation(replicaTableUri.toURI().toString()); sd.setParameters(new HashMap<String, String>()); sd.setSerdeInfo(new SerDeInfo()); replica.setSd(sd); replicaClient.createTable(replica); LOG.info(">>>> Partitions added: {}", +replicaClient.add_partitions( Arrays.asList(newPartition(REPLICA_TABLE, sd, Arrays.asList("2000-01-01", "0"), partitionUk), newPartition(REPLICA_TABLE, sd, Arrays.asList("2000-01-02", "0"), partitionChina)))); }
Example 13
Source File: HiveDifferencesIntegrationTest.java From circus-train with Apache License 2.0 | 4 votes |
private void createTable( String databaseName, String tableName, File tableLocation, String sourceTable, String sourceLocation, boolean addChecksum) throws Exception { File partition0 = createPartitionData("part=0", tableLocation, Arrays.asList("1\tadam", "2\tsusan")); File partition1 = createPartitionData("part=1", tableLocation, Arrays.asList("3\tchun", "4\tkim")); Table table = new Table(); table.setDbName(databaseName); table.setTableName(tableName); table.setTableType(TableType.EXTERNAL_TABLE.name()); table.setParameters(new HashMap<String, String>()); if (sourceTable != null) { table.getParameters().put(CircusTrainTableParameter.SOURCE_TABLE.parameterName(), sourceTable); } if (sourceLocation != null) { table.getParameters().put(CircusTrainTableParameter.SOURCE_LOCATION.parameterName(), sourceLocation); } List<FieldSchema> partitionColumns = Arrays.asList(PARTITION_COL); table.setPartitionKeys(partitionColumns); List<FieldSchema> dataColumns = Arrays.asList(FOO_COL, BAR_COL); StorageDescriptor sd = new StorageDescriptor(); sd.setCols(dataColumns); sd.setLocation(tableLocation.toURI().toString()); sd.setParameters(new HashMap<String, String>()); sd.setSerdeInfo(new SerDeInfo()); table.setSd(sd); HiveMetaStoreClient client = catalog.client(); client.createTable(table); LOG .info(">>>> Partitions added: {}", +client .add_partitions(Arrays .asList( newPartition(databaseName, tableName, sd, Arrays.asList("0"), partition0, sourceTable, sourceLocation + "part=0", addChecksum), newPartition(databaseName, tableName, sd, Arrays.asList("1"), partition1, sourceTable, sourceLocation + "part=1", addChecksum)))); }
Example 14
Source File: ComparisonToolIntegrationTest.java From circus-train with Apache License 2.0 | 4 votes |
private void createSourceTable() throws Exception { File partitionEurope = new File(sourceTableUri, "local_date=2000-01-01"); File partitionUk = new File(partitionEurope, "local_hour=0"); File dataFileUk = new File(partitionUk, PART_00000); FileUtils.writeStringToFile(dataFileUk, "1\tadam\tlondon\n2\tsusan\tglasgow\n"); File partitionAsia = new File(sourceTableUri, "local_date=2000-01-02"); File partitionChina = new File(partitionAsia, "local_hour=0"); File dataFileChina = new File(partitionChina, PART_00000); String data = "1\tchun\tbeijing\n2\tshanghai\tmilan\n"; FileUtils.writeStringToFile(dataFileChina, data); HiveMetaStoreClient sourceClient = catalog.client(); Table source = new Table(); source.setDbName(DATABASE); source.setTableName(SOURCE_TABLE); source.setTableType(TableType.EXTERNAL_TABLE.name()); Map<String, String> parameters = new HashMap<>(); parameters.put("comment", "comment source"); source.setParameters(parameters); List<FieldSchema> partitionColumns = Arrays.asList(new FieldSchema("local_date", "string", ""), new FieldSchema("local_hour", "string", "")); source.setPartitionKeys(partitionColumns); List<FieldSchema> dataColumns = Arrays.asList(new FieldSchema("id", "bigint", ""), new FieldSchema("name", "string", ""), new FieldSchema("city", "string", "")); StorageDescriptor sd = new StorageDescriptor(); sd.setCols(dataColumns); sd.setLocation(sourceTableUri.toURI().toString()); sd.setParameters(new HashMap<String, String>()); sd.setSerdeInfo(new SerDeInfo()); source.setSd(sd); sourceClient.createTable(source); LOG.info(">>>> Partitions added: {}", +sourceClient .add_partitions(Arrays.asList(newPartition(SOURCE_TABLE, sd, Arrays.asList("2000-01-01", "0"), partitionUk), newPartition(SOURCE_TABLE, sd, Arrays.asList("2000-01-02", "0"), partitionChina)))); }
Example 15
Source File: HiveTableUtil.java From flink with Apache License 2.0 | 4 votes |
public static Table instantiateHiveTable(ObjectPath tablePath, CatalogBaseTable table, HiveConf hiveConf) { if (!(table instanceof CatalogTableImpl) && !(table instanceof CatalogViewImpl)) { throw new CatalogException( "HiveCatalog only supports CatalogTableImpl and CatalogViewImpl"); } // let Hive set default parameters for us, e.g. serialization.format Table hiveTable = org.apache.hadoop.hive.ql.metadata.Table.getEmptyTable(tablePath.getDatabaseName(), tablePath.getObjectName()); hiveTable.setCreateTime((int) (System.currentTimeMillis() / 1000)); Map<String, String> properties = new HashMap<>(table.getProperties()); // Table comment if (table.getComment() != null) { properties.put(HiveCatalogConfig.COMMENT, table.getComment()); } boolean isGeneric = HiveCatalog.isGenericForCreate(properties); // Hive table's StorageDescriptor StorageDescriptor sd = hiveTable.getSd(); HiveTableUtil.setDefaultStorageFormat(sd, hiveConf); if (isGeneric) { DescriptorProperties tableSchemaProps = new DescriptorProperties(true); tableSchemaProps.putTableSchema(Schema.SCHEMA, table.getSchema()); if (table instanceof CatalogTable) { tableSchemaProps.putPartitionKeys(((CatalogTable) table).getPartitionKeys()); } properties.putAll(tableSchemaProps.asMap()); properties = maskFlinkProperties(properties); hiveTable.setParameters(properties); } else { HiveTableUtil.initiateTableFromProperties(hiveTable, properties, hiveConf); List<FieldSchema> allColumns = HiveTableUtil.createHiveColumns(table.getSchema()); // Table columns and partition keys if (table instanceof CatalogTableImpl) { CatalogTable catalogTable = (CatalogTableImpl) table; if (catalogTable.isPartitioned()) { int partitionKeySize = catalogTable.getPartitionKeys().size(); List<FieldSchema> regularColumns = allColumns.subList(0, allColumns.size() - partitionKeySize); List<FieldSchema> partitionColumns = allColumns.subList(allColumns.size() - partitionKeySize, allColumns.size()); sd.setCols(regularColumns); hiveTable.setPartitionKeys(partitionColumns); } else { sd.setCols(allColumns); hiveTable.setPartitionKeys(new ArrayList<>()); } } else { sd.setCols(allColumns); } // Table properties hiveTable.getParameters().putAll(properties); } if (table instanceof CatalogViewImpl) { // TODO: [FLINK-12398] Support partitioned view in catalog API hiveTable.setPartitionKeys(new ArrayList<>()); CatalogView view = (CatalogView) table; hiveTable.setViewOriginalText(view.getOriginalQuery()); hiveTable.setViewExpandedText(view.getExpandedQuery()); hiveTable.setTableType(TableType.VIRTUAL_VIEW.name()); } return hiveTable; }
Example 16
Source File: HiveMetadataFetcherTest.java From pxf with Apache License 2.0 | 4 votes |
@Test public void getTableMetadataWithIncompatibleTables() throws Exception { fetcher = new HiveMetadataFetcher(context, mockConfigurationFactory, fakeHiveClientWrapper); String tablePattern = "*"; String dbPattern = "*"; String dbName = "default"; String pattern = dbPattern + "." + tablePattern; String tableName1 = "viewtable"; // mock hive table returned from hive client Table hiveTable1 = new Table(); hiveTable1.setTableType("VIRTUAL_VIEW"); when(mockHiveClient.getTable(dbName, tableName1)).thenReturn(hiveTable1); String tableName2 = "regulartable"; // mock hive table returned from hive client List<FieldSchema> fields = new ArrayList<>(); fields.add(new FieldSchema("field1", "string", null)); fields.add(new FieldSchema("field2", "int", null)); StorageDescriptor sd = new StorageDescriptor(); sd.setCols(fields); sd.setInputFormat("org.apache.hadoop.mapred.TextInputFormat"); Table hiveTable2 = new Table(); hiveTable2.setTableType("MANAGED_TABLE"); hiveTable2.setSd(sd); hiveTable2.setPartitionKeys(new ArrayList<>()); when(mockHiveClient.getTable(dbName, tableName2)).thenReturn(hiveTable2); // Mock get databases and tables return from hive client List<String> tableNames = new ArrayList<>(Arrays.asList(tableName1, tableName2)); List<String> dbNames = new ArrayList<>(Collections.singletonList(dbName)); when(mockHiveClient.getDatabases(dbPattern)).thenReturn(dbNames); when(mockHiveClient.getTables(dbName, tablePattern)).thenReturn(tableNames); // Get metadata metadataList = fetcher.getMetadata(pattern); assertEquals(1, metadataList.size()); Metadata metadata = metadataList.get(0); assertEquals(dbName + "." + tableName2, metadata.getItem().toString()); List<Metadata.Field> resultFields = metadata.getFields(); assertNotNull(resultFields); assertEquals(2, resultFields.size()); Metadata.Field field = resultFields.get(0); assertEquals("field1", field.getName()); assertEquals("text", field.getType().getTypeName()); // converted type field = resultFields.get(1); assertEquals("field2", field.getName()); assertEquals("int4", field.getType().getTypeName()); }
Example 17
Source File: HiveMetadataFetcherTest.java From pxf with Apache License 2.0 | 4 votes |
@Test public void getTableMetadataWithMultipleTables() throws Exception { fetcher = new HiveMetadataFetcher(context, mockConfigurationFactory, fakeHiveClientWrapper); String tablePattern = "*"; String dbPattern = "*"; String dbName = "default"; String tableNameBase = "regulartable"; String pattern = dbPattern + "." + tablePattern; List<String> dbNames = new ArrayList<>(Collections.singletonList(dbName)); List<String> tableNames = new ArrayList<>(); // Prepare for tables List<FieldSchema> fields = new ArrayList<>(); fields.add(new FieldSchema("field1", "string", null)); fields.add(new FieldSchema("field2", "int", null)); StorageDescriptor sd = new StorageDescriptor(); sd.setCols(fields); sd.setInputFormat("org.apache.hadoop.mapred.TextInputFormat"); // Mock hive tables returned from hive client for (int index = 1; index <= 2; index++) { String tableName = tableNameBase + index; tableNames.add(tableName); Table hiveTable = new Table(); hiveTable.setTableType("MANAGED_TABLE"); hiveTable.setSd(sd); hiveTable.setPartitionKeys(new ArrayList<>()); when(mockHiveClient.getTable(dbName, tableName)).thenReturn(hiveTable); } // Mock database and table names return from hive client when(mockHiveClient.getDatabases(dbPattern)).thenReturn(dbNames); when(mockHiveClient.getTables(dbName, tablePattern)).thenReturn(tableNames); // Get metadata metadataList = fetcher.getMetadata(pattern); assertEquals(2, metadataList.size()); for (int index = 1; index <= 2; index++) { Metadata metadata = metadataList.get(index - 1); assertEquals(dbName + "." + tableNameBase + index, metadata.getItem().toString()); List<Metadata.Field> resultFields = metadata.getFields(); assertNotNull(resultFields); assertEquals(2, resultFields.size()); Metadata.Field field = resultFields.get(0); assertEquals("field1", field.getName()); assertEquals("text", field.getType().getTypeName()); // converted type field = resultFields.get(1); assertEquals("field2", field.getName()); assertEquals("int4", field.getType().getTypeName()); } }
Example 18
Source File: HiveCatalog.java From flink with Apache License 2.0 | 4 votes |
private static Table instantiateHiveTable(ObjectPath tablePath, CatalogBaseTable table) { // let Hive set default parameters for us, e.g. serialization.format Table hiveTable = org.apache.hadoop.hive.ql.metadata.Table.getEmptyTable(tablePath.getDatabaseName(), tablePath.getObjectName()); hiveTable.setCreateTime((int) (System.currentTimeMillis() / 1000)); Map<String, String> properties = new HashMap<>(table.getProperties()); // Table comment properties.put(HiveCatalogConfig.COMMENT, table.getComment()); boolean isGeneric = Boolean.valueOf(properties.get(CatalogConfig.IS_GENERIC)); if (isGeneric) { properties = maskFlinkProperties(properties); } // Table properties hiveTable.setParameters(properties); // Hive table's StorageDescriptor StorageDescriptor sd = hiveTable.getSd(); setStorageFormat(sd, properties); List<FieldSchema> allColumns = HiveTableUtil.createHiveColumns(table.getSchema()); // Table columns and partition keys if (table instanceof CatalogTableImpl) { CatalogTable catalogTable = (CatalogTableImpl) table; if (catalogTable.isPartitioned()) { int partitionKeySize = catalogTable.getPartitionKeys().size(); List<FieldSchema> regularColumns = allColumns.subList(0, allColumns.size() - partitionKeySize); List<FieldSchema> partitionColumns = allColumns.subList(allColumns.size() - partitionKeySize, allColumns.size()); sd.setCols(regularColumns); hiveTable.setPartitionKeys(partitionColumns); } else { sd.setCols(allColumns); hiveTable.setPartitionKeys(new ArrayList<>()); } } else if (table instanceof CatalogViewImpl) { CatalogView view = (CatalogViewImpl) table; // TODO: [FLINK-12398] Support partitioned view in catalog API sd.setCols(allColumns); hiveTable.setPartitionKeys(new ArrayList<>()); hiveTable.setViewOriginalText(view.getOriginalQuery()); hiveTable.setViewExpandedText(view.getExpandedQuery()); hiveTable.setTableType(TableType.VIRTUAL_VIEW.name()); } else { throw new CatalogException( "HiveCatalog only supports CatalogTableImpl and CatalogViewImpl"); } return hiveTable; }
Example 19
Source File: HiveConvertersImpl.java From metacat with Apache License 2.0 | 4 votes |
/** * {@inheritDoc} */ @Override public Table metacatToHiveTable(final TableDto dto) { final Table table = new Table(); final QualifiedName name = dto.getName(); if (name != null) { table.setTableName(name.getTableName()); table.setDbName(name.getDatabaseName()); } final StorageDto storageDto = dto.getSerde(); if (storageDto != null) { table.setOwner(storageDto.getOwner()); } final AuditDto auditDto = dto.getAudit(); if (auditDto != null && auditDto.getCreatedDate() != null) { table.setCreateTime(dateToEpochSeconds(auditDto.getCreatedDate())); } Map<String, String> params = new HashMap<>(); if (dto.getMetadata() != null) { params = dto.getMetadata(); } table.setParameters(params); updateTableTypeAndViewInfo(dto, table); table.setSd(fromStorageDto(storageDto, table.getTableName())); final List<FieldDto> fields = dto.getFields(); if (fields == null) { table.setPartitionKeys(Collections.emptyList()); table.getSd().setCols(Collections.emptyList()); } else { final List<FieldSchema> nonPartitionFields = Lists.newArrayListWithCapacity(fields.size()); final List<FieldSchema> partitionFields = Lists.newArrayListWithCapacity(fields.size()); for (FieldDto fieldDto : fields) { final FieldSchema f = metacatToHiveField(fieldDto); if (fieldDto.isPartition_key()) { partitionFields.add(f); } else { nonPartitionFields.add(f); } } table.setPartitionKeys(partitionFields); table.getSd().setCols(nonPartitionFields); } return table; }
Example 20
Source File: SubmarineMetaStoreTest.java From submarine with Apache License 2.0 | 4 votes |
@Before public void createDatabase() throws InvalidObjectException, MetaException { listTables(); Database database = new Database(); database.setName("testdb"); database.setDescription("testdb"); database.setLocationUri("hdfs://mycluster/user/hive/warehouse/testdb.db"); Map map = new HashMap(); map.put("key", "value"); database.setParameters(map); database.setOwnerName("root"); database.setOwnerType(PrincipalType.USER); submarineMetaStore.createDatabase(database); assertEquals(1, submarineMetaStore.getDatabaseCount()); Table table = new Table(); table.setTableName("testtable"); table.setDbName("testdb"); table.setOwner("root"); table.setCreateTime((int) new Date().getTime() / 1000); table.setLastAccessTime((int) new Date().getTime() / 1000); table.setRetention(0); StorageDescriptor sd = new StorageDescriptor(); List<FieldSchema> fieldSchemas = new ArrayList<>(); FieldSchema fieldSchema = new FieldSchema(); fieldSchema.setName("a"); fieldSchema.setType("int"); fieldSchema.setComment("a"); fieldSchemas.add(fieldSchema); sd.setCols(fieldSchemas); sd.setLocation("hdfs://mycluster/user/hive/warehouse/testdb.db/testtable"); sd.setInputFormat("org.apache.hadoop.mapred.TextInputFormat"); sd.setOutputFormat("org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat"); sd.setCompressed(false); sd.setNumBuckets(-1); SerDeInfo serdeInfo = new SerDeInfo(); serdeInfo.setName("test"); serdeInfo.setSerializationLib("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"); Map<String, String> parametersMap = new HashMap(); parametersMap.put("serialization.format", "|"); parametersMap.put("field.delim", "|"); serdeInfo.setParameters(parametersMap); sd.setSerdeInfo(serdeInfo); table.setSd(sd); List<FieldSchema> partitionKeys = new ArrayList<>(); table.setPartitionKeys(partitionKeys); Map<String, String> parameters = new HashMap<>(); table.setParameters(parameters); String viewOriginalText = ""; table.setViewOriginalText(viewOriginalText); String viewExpandedText = ""; table.setViewExpandedText(viewExpandedText); String tableType = "MANAGED_TABLE"; table.setTableType(tableType); submarineMetaStore.createTable(table); Table tableTest = submarineMetaStore.getTable("testdb", "testtable"); assertEquals("testtable", tableTest.getTableName()); int tableCount = submarineMetaStore.getTableCount(); assertEquals(1, tableCount); }