Java Code Examples for org.apache.hadoop.hive.metastore.api.Table#getTableName()
The following examples show how to use
org.apache.hadoop.hive.metastore.api.Table#getTableName() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: MetastoreClientTableIntegrationTest.java From aws-glue-data-catalog-client-for-apache-hive-metastore with Apache License 2.0 | 6 votes |
@Test public void alterTableValid() throws Exception { //TODO: add test for alter Table cascade. // if change is related with column and cascade is turned on, it will also change table's partition String newType = "boolean"; Table newHiveTable = CatalogToHiveConverter.convertTable(getTestTable(), hiveTable.getDbName()); // changing table name is not supported newHiveTable.setTableName(hiveTable.getTableName()); Path oldDBPath = new Path(hiveDB.getLocationUri()); Path oldTablePath = new Path(hiveTable.getSd().getLocation()); Path newTablePath = new Path(oldDBPath, newHiveTable.getTableName()); when(wh.getDatabasePath(hiveDB)).thenReturn(oldDBPath); when(wh.getFs(oldTablePath)).thenReturn(new RawLocalFileSystem()); when(wh.getFs(newTablePath)).thenReturn(new RawLocalFileSystem()); newHiveTable.setTableType(newType); metastoreClient.createTable(hiveTable); metastoreClient.alter_table(newHiveTable.getDbName(), hiveTable.getTableName(), newHiveTable); Table result = metastoreClient.getTable(hiveDB.getName(), newHiveTable.getTableName()); assertEquals(newType, result.getTableType()); }
Example 2
Source File: HiveMetaStoreBasedRegister.java From incubator-gobblin with Apache License 2.0 | 6 votes |
private void createOrAlterTable(IMetaStoreClient client, Table table, HiveSpec spec) throws TException, IOException { String dbName = table.getDbName(); String tableName = table.getTableName(); boolean tableExistenceInCache; if (this.optimizedChecks) { try { this.tableAndDbExistenceCache.get(dbName + ":" + tableName, new Callable<Boolean>() { @Override public Boolean call() throws Exception { return ensureHiveTableExistenceBeforeAlternation(tableName, dbName, client, table, spec); } }); } catch (ExecutionException ee) { throw new IOException("Table existence checking throwing execution exception."); } } else { this.ensureHiveTableExistenceBeforeAlternation(tableName, dbName, client, table, spec); } }
Example 3
Source File: LocalHiveMetastoreTestUtils.java From incubator-gobblin with Apache License 2.0 | 6 votes |
public Partition addTestPartition(Table tbl, List<String> values, int createTime) throws Exception { StorageDescriptor partitionSd = new StorageDescriptor(); if (StringUtils.isNotBlank(tbl.getSd().getLocation())) { partitionSd.setLocation(tbl.getSd().getLocation() + values); } else { partitionSd.setLocation("/tmp/" + tbl.getTableName() + "/part1"); } partitionSd.setSerdeInfo( new SerDeInfo("name", "serializationLib", ImmutableMap.of(HiveAvroSerDeManager.SCHEMA_URL, "/tmp/dummy"))); partitionSd.setCols(tbl.getPartitionKeys()); Partition partition = new Partition(values, tbl.getDbName(), tbl.getTableName(), 1, 1, partitionSd, new HashMap<String, String>()); partition.setCreateTime(createTime); return this.getLocalMetastoreClient().add_partition(partition); }
Example 4
Source File: MetacatHMSHandler.java From metacat with Apache License 2.0 | 6 votes |
private List<Partition> dropPartitionsCoreNoTxn( final RawStore ms, final Table tbl, final List<List<String>> partsValues) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException { final List<Partition> deletedPartitions = new ArrayList<Partition>(); Partition part = null; final String dbName = tbl.getDbName(); final String tblName = tbl.getTableName(); for (List<String> partValues : partsValues) { part = ms.getPartition(dbName, tblName, partValues); if (part == null) { throw new NoSuchObjectException("Partition doesn't exist. " + partValues); } if (!ms.dropPartition(dbName, tblName, partValues)) { throw new MetaException("Unable to drop partition"); } deletedPartitions.add(part); } return deletedPartitions; }
Example 5
Source File: CopyPartitionsOperation.java From circus-train with Apache License 2.0 | 6 votes |
/** * Copies partitions from oldTable to newTable, partitions copied are modified to take the schema of newTable */ public void execute(CloseableMetaStoreClient client, Table oldTable, Table newTable) throws TException { int count = 0; String databaseName = newTable.getDbName(); String tableName = newTable.getTableName(); PartitionIterator partitionIterator = new PartitionIterator(client, oldTable, partitionBatchSize); while (partitionIterator.hasNext()) { List<Partition> batch = new ArrayList<>(); for (int i = 0; i < partitionBatchSize && partitionIterator.hasNext(); i++) { Partition partition = partitionIterator.next(); count++; Partition copy = new Partition(partition); copy.setDbName(databaseName); copy.setTableName(tableName); StorageDescriptor sd = new StorageDescriptor(partition.getSd()); sd.setCols(newTable.getSd().getCols()); copy.setSd(sd); batch.add(copy); } LOG.info("Copying batch of size {} to {}.{}", batch.size(), databaseName, tableName); client.add_partitions(batch); } LOG.info("Copied {} partitions to {}.{}", count, databaseName, tableName); }
Example 6
Source File: RenameTableOperation.java From circus-train with Apache License 2.0 | 6 votes |
/** * <p> * NOTE: assumes both `from` and `to` exist * </p> * Renames tables 'from' table into 'to' table, at the end of the operation 'from' will be gone and 'to' will be * renamed. */ public void execute(CloseableMetaStoreClient client, Table from, Table to) throws Exception { LOG .info("Renaming table {}.{} to {}.{}", from.getDbName(), from.getTableName(), to.getDbName(), to.getTableName()); Table fromTable = client.getTable(from.getDbName(), from.getTableName()); Table toTable = client.getTable(to.getDbName(), to.getTableName()); String fromTableName = fromTable.getTableName(); String toTableName = toTable.getTableName(); String toDelete = toTableName + DELETE_ME; try { fromTable.setTableName(toTableName); toTable.setTableName(toDelete); client.alter_table(toTable.getDbName(), toTableName, toTable); client.alter_table(fromTable.getDbName(), fromTableName, fromTable); } finally { dropTableService.dropTable(client, toTable.getDbName(), toDelete); } }
Example 7
Source File: AlterTableService.java From circus-train with Apache License 2.0 | 6 votes |
public void alterTable(CloseableMetaStoreClient client, Table oldTable, Table newTable) throws Exception { List<FieldSchema> oldColumns = oldTable.getSd().getCols(); List<FieldSchema> newColumns = newTable.getSd().getCols(); if (hasAnyChangedColumns(oldColumns, newColumns)) { LOG .info("Found columns that have changed type, attempting to recreate target table with the new columns." + "Old columns: {}, new columns: {}", oldColumns, newColumns); Table tempTable = new Table(newTable); String tempName = newTable.getTableName() + "_temp"; tempTable.setTableName(tempName); try { client.createTable(tempTable); copyPartitionsOperation.execute(client, newTable, tempTable); renameTableOperation.execute(client, tempTable, newTable); } finally { dropTableService.dropTable(client, tempTable.getDbName(), tempName); } } else { client.alter_table(newTable.getDbName(), newTable.getTableName(), newTable); } }
Example 8
Source File: HiveCatalog.java From flink with Apache License 2.0 | 6 votes |
private Partition instantiateHivePartition(Table hiveTable, CatalogPartitionSpec partitionSpec, CatalogPartition catalogPartition) throws PartitionSpecInvalidException { List<String> partCols = getFieldNames(hiveTable.getPartitionKeys()); List<String> partValues = getOrderedFullPartitionValues( partitionSpec, partCols, new ObjectPath(hiveTable.getDbName(), hiveTable.getTableName())); // validate partition values for (int i = 0; i < partCols.size(); i++) { if (StringUtils.isNullOrWhitespaceOnly(partValues.get(i))) { throw new PartitionSpecInvalidException(getName(), partCols, new ObjectPath(hiveTable.getDbName(), hiveTable.getTableName()), partitionSpec); } } // TODO: handle GenericCatalogPartition StorageDescriptor sd = hiveTable.getSd().deepCopy(); sd.setLocation(catalogPartition.getProperties().remove(HiveCatalogConfig.PARTITION_LOCATION)); Map<String, String> properties = new HashMap<>(catalogPartition.getProperties()); properties.put(HiveCatalogConfig.COMMENT, catalogPartition.getComment()); return HiveTableUtil.createHivePartition( hiveTable.getDbName(), hiveTable.getTableName(), partValues, sd, properties); }
Example 9
Source File: InMemoryThriftMetastore.java From presto with Apache License 2.0 | 6 votes |
@Override public synchronized void alterTable(HiveIdentity identity, String databaseName, String tableName, Table newTable) { SchemaTableName oldName = new SchemaTableName(databaseName, tableName); SchemaTableName newName = new SchemaTableName(newTable.getDbName(), newTable.getTableName()); // if the name did not change, this is a simple schema change if (oldName.equals(newName)) { if (relations.replace(oldName, newTable) == null) { throw new TableNotFoundException(oldName); } return; } // remove old table definition and add the new one Table table = relations.get(oldName); if (table == null) { throw new TableNotFoundException(oldName); } if (relations.putIfAbsent(newName, newTable) != null) { throw new TableAlreadyExistsException(newName); } relations.remove(oldName); }
Example 10
Source File: MetastoreClientUtils.java From aws-glue-data-catalog-client-for-apache-hive-metastore with Apache License 2.0 | 6 votes |
/** * Taken from HiveMetaStore#create_table_core * https://github.com/apache/hive/blob/rel/release-2.3.0/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java#L1370-L1383 */ public static void validateTableObject(Table table, Configuration conf) throws InvalidObjectException { checkNotNull(table, "table cannot be null"); checkNotNull(table.getSd(), "Table#StorageDescriptor cannot be null"); if (!hiveShims.validateTableName(table.getTableName(), conf)) { throw new InvalidObjectException(table.getTableName() + " is not a valid object name"); } String validate = MetaStoreUtils.validateTblColumns(table.getSd().getCols()); if (validate != null) { throw new InvalidObjectException("Invalid column " + validate); } if (table.getPartitionKeys() != null) { validate = MetaStoreUtils.validateTblColumns(table.getPartitionKeys()); if (validate != null) { throw new InvalidObjectException("Invalid partition column " + validate); } } }
Example 11
Source File: HiveStatsUtil.java From flink with Apache License 2.0 | 5 votes |
/** * Create columnStatistics from the given Hive column stats of a hive table. */ public static ColumnStatistics createTableColumnStats( Table hiveTable, Map<String, CatalogColumnStatisticsDataBase> colStats) { ColumnStatisticsDesc desc = new ColumnStatisticsDesc(true, hiveTable.getDbName(), hiveTable.getTableName()); return createHiveColumnStatistics(colStats, hiveTable.getSd(), desc); }
Example 12
Source File: FederatedHMSHandlerTest.java From waggle-dance with Apache License 2.0 | 5 votes |
@Test public void get_table_req() throws TException { Table table = new Table(); table.setDbName(DB_P); table.setTableName("table"); GetTableRequest request = new GetTableRequest(table.getDbName(), table.getTableName()); GetTableResult response = new GetTableResult(table); when(primaryClient.get_table_req(request)).thenReturn(response); when(primaryMapping.transformInboundGetTableRequest(request)).thenReturn(request); when(primaryMapping.transformOutboundGetTableResult(response)).thenReturn(response); GetTableResult result = handler.get_table_req(request); assertThat(result.getTable().getDbName(), is(DB_P)); assertThat(result.getTable().getTableName(), is("table")); }
Example 13
Source File: MetacatHMSHandler.java From metacat with Apache License 2.0 | 5 votes |
private List<Partition> addPartitionsCoreNoTxn( final RawStore ms, final Table tbl, final List<Partition> parts, final boolean ifNotExists, final Map<PartValEqWrapper, Boolean> addedPartitions, final List<Partition> existingParts) throws MetaException, InvalidObjectException, AlreadyExistsException, TException { logInfo("add_partitions"); final String dbName = tbl.getDbName(); final String tblName = tbl.getTableName(); final List<Partition> result = new ArrayList<Partition>(); for (Partition part : parts) { if (!part.getTableName().equals(tblName) || !part.getDbName().equals(dbName)) { throw new MetaException("Partition does not belong to target table " + dbName + "." + tblName + ": " + part); } final boolean shouldAdd = startAddPartition(ms, part, ifNotExists); if (!shouldAdd) { existingParts.add(part); LOG.info("Not adding partition " + part + " as it already exists"); continue; } final boolean madeDir = createLocationForAddedPartition(tbl, part); if (addedPartitions.put(new PartValEqWrapper(part), madeDir) != null) { // Technically, for ifNotExists case, we could insert one and discard the other // because the first one now "exists", but it seems better to report the problem // upstream as such a command doesn't make sense. throw new MetaException("Duplicate partitions in the list: " + part); } initializeAddedPartition(tbl, part, madeDir); result.add(part); } return result; }
Example 14
Source File: HiveCatalog.java From flink with Apache License 2.0 | 5 votes |
private Partition instantiateHivePartition(Table hiveTable, CatalogPartitionSpec partitionSpec, CatalogPartition catalogPartition) throws PartitionSpecInvalidException { List<String> partCols = getFieldNames(hiveTable.getPartitionKeys()); List<String> partValues = getOrderedFullPartitionValues( partitionSpec, partCols, new ObjectPath(hiveTable.getDbName(), hiveTable.getTableName())); // validate partition values for (int i = 0; i < partCols.size(); i++) { if (StringUtils.isNullOrWhitespaceOnly(partValues.get(i))) { throw new PartitionSpecInvalidException(getName(), partCols, new ObjectPath(hiveTable.getDbName(), hiveTable.getTableName()), partitionSpec); } } // TODO: handle GenericCatalogPartition StorageDescriptor sd = hiveTable.getSd().deepCopy(); sd.setLocation(catalogPartition.getProperties().remove(SqlCreateHiveTable.TABLE_LOCATION_URI)); Map<String, String> properties = new HashMap<>(catalogPartition.getProperties()); String comment = catalogPartition.getComment(); if (comment != null) { properties.put(HiveCatalogConfig.COMMENT, comment); } return HiveTableUtil.createHivePartition( hiveTable.getDbName(), hiveTable.getTableName(), partValues, sd, properties); }
Example 15
Source File: HiveStatsUtil.java From flink with Apache License 2.0 | 5 votes |
/** * Create columnStatistics from the given Hive column stats of a hive table. */ public static ColumnStatistics createTableColumnStats( Table hiveTable, Map<String, CatalogColumnStatisticsDataBase> colStats, String hiveVersion) { ColumnStatisticsDesc desc = new ColumnStatisticsDesc(true, hiveTable.getDbName(), hiveTable.getTableName()); return createHiveColumnStatistics(colStats, hiveTable.getSd(), desc, hiveVersion); }
Example 16
Source File: InMemoryThriftMetastore.java From presto with Apache License 2.0 | 5 votes |
@Override public synchronized void createTable(HiveIdentity identity, Table table) { TableType tableType = TableType.valueOf(table.getTableType()); checkArgument(EnumSet.of(MANAGED_TABLE, EXTERNAL_TABLE, VIRTUAL_VIEW).contains(tableType), "Invalid table type: %s", tableType); if (tableType == VIRTUAL_VIEW) { checkArgument(table.getSd().getLocation() == null, "Storage location for view must be null"); } else { File directory = new File(new Path(table.getSd().getLocation()).toUri()); checkArgument(directory.exists(), "Table directory does not exist"); if (tableType == MANAGED_TABLE) { checkArgument(isParentDir(directory, baseDirectory), "Table directory must be inside of the metastore base directory"); } } SchemaTableName schemaTableName = new SchemaTableName(table.getDbName(), table.getTableName()); Table tableCopy = table.deepCopy(); if (relations.putIfAbsent(schemaTableName, tableCopy) != null) { throw new TableAlreadyExistsException(schemaTableName); } if (tableType == VIRTUAL_VIEW) { views.put(schemaTableName, tableCopy); } PrincipalPrivilegeSet privileges = table.getPrivileges(); if (privileges != null) { throw new UnsupportedOperationException(); } }
Example 17
Source File: HiveMetaStoreBasedRegister.java From incubator-gobblin with Apache License 2.0 | 5 votes |
@Deprecated /** * @deprecated Please use {@link #createOrAlterTable(IMetaStoreClient, Table, HiveSpec)} instead. */ private boolean createTableIfNotExists(IMetaStoreClient client, Table table, HiveTable hiveTable) throws IOException { String dbName = table.getDbName(); String tableName = table.getTableName(); try (AutoCloseableHiveLock lock = this.locks.getTableLock(dbName, tableName)) { boolean tableExists; try (Timer.Context context = this.metricContext.timer(TABLE_EXISTS).time()) { tableExists = client.tableExists(table.getDbName(), table.getTableName()); } if (tableExists) { return false; } try (Timer.Context context = this.metricContext.timer(CREATE_HIVE_TABLE).time()) { client.createTable(getTableWithCreateTimeNow(table)); } log.info(String.format("Created Hive table %s in db %s", tableName, dbName)); HiveMetaStoreEventHelper.submitSuccessfulTableCreation(this.eventSubmitter, hiveTable); return true; } catch (TException e) { HiveMetaStoreEventHelper.submitFailedTableCreation(eventSubmitter, hiveTable, e); throw new IOException(String.format("Error in creating or altering Hive table %s in db %s", table.getTableName(), table.getDbName()), e); } }
Example 18
Source File: HiveCatalog.java From flink with Apache License 2.0 | 4 votes |
private CatalogBaseTable instantiateCatalogTable(Table hiveTable, HiveConf hiveConf) { boolean isView = TableType.valueOf(hiveTable.getTableType()) == TableType.VIRTUAL_VIEW; // Table properties Map<String, String> properties = hiveTable.getParameters(); boolean isGeneric = isGenericForGet(hiveTable.getParameters()); TableSchema tableSchema; // Partition keys List<String> partitionKeys = new ArrayList<>(); if (isGeneric) { properties = retrieveFlinkProperties(properties); DescriptorProperties tableSchemaProps = new DescriptorProperties(true); tableSchemaProps.putProperties(properties); ObjectPath tablePath = new ObjectPath(hiveTable.getDbName(), hiveTable.getTableName()); tableSchema = tableSchemaProps.getOptionalTableSchema(Schema.SCHEMA) .orElseThrow(() -> new CatalogException("Failed to get table schema from properties for generic table " + tablePath)); partitionKeys = tableSchemaProps.getPartitionKeys(); // remove the schema from properties properties = CatalogTableImpl.removeRedundant(properties, tableSchema, partitionKeys); } else { properties.put(CatalogConfig.IS_GENERIC, String.valueOf(false)); // Table schema List<FieldSchema> fields = getNonPartitionFields(hiveConf, hiveTable); Set<String> notNullColumns = client.getNotNullColumns(hiveConf, hiveTable.getDbName(), hiveTable.getTableName()); Optional<UniqueConstraint> primaryKey = isView ? Optional.empty() : client.getPrimaryKey(hiveTable.getDbName(), hiveTable.getTableName(), HiveTableUtil.relyConstraint((byte) 0)); // PK columns cannot be null primaryKey.ifPresent(pk -> notNullColumns.addAll(pk.getColumns())); tableSchema = HiveTableUtil.createTableSchema(fields, hiveTable.getPartitionKeys(), notNullColumns, primaryKey.orElse(null)); if (!hiveTable.getPartitionKeys().isEmpty()) { partitionKeys = getFieldNames(hiveTable.getPartitionKeys()); } } String comment = properties.remove(HiveCatalogConfig.COMMENT); if (isView) { return new CatalogViewImpl( hiveTable.getViewOriginalText(), hiveTable.getViewExpandedText(), tableSchema, properties, comment); } else { return new CatalogTableImpl(tableSchema, partitionKeys, properties, comment); } }
Example 19
Source File: HiveConfigClientUtils.java From incubator-gobblin with Apache License 2.0 | 2 votes |
/** * Get the dataset uri for a hive db and table. The uri is relative to the store uri . * @param table the hive table for which a config client uri needs to be built */ public static String getDatasetUri(Table table) { return HIVE_DATASETS_CONFIG_PREFIX + table.getDbName() + Path.SEPARATOR + table.getTableName(); }