Java Code Examples for org.apache.hadoop.hive.metastore.TableType#valueOf()
The following examples show how to use
org.apache.hadoop.hive.metastore.TableType#valueOf() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: HiveClientImpl.java From dremio-oss with Apache License 2.0 | 6 votes |
@Override public Table getTable(final String dbName, final String tableName, boolean ignoreAuthzErrors) throws TException{ Table table = getTableWithoutTableTypeChecking(dbName, tableName, ignoreAuthzErrors); if(table == null){ return null; } TableType type = TableType.valueOf(table.getTableType()); switch (type) { case EXTERNAL_TABLE: case MANAGED_TABLE: return table; case VIRTUAL_VIEW: throw UserException.unsupportedError().message("Hive views are not supported").build(NOPLogger.NOP_LOGGER); case INDEX_TABLE: default: return null; } }
Example 2
Source File: HiveClientImpl.java From dremio-oss with Apache License 2.0 | 6 votes |
@Override public Table getTable(final String dbName, final String tableName, boolean ignoreAuthzErrors) throws TException{ Table table = getTableWithoutTableTypeChecking(dbName, tableName, ignoreAuthzErrors); if(table == null){ return null; } TableType type = TableType.valueOf(table.getTableType()); switch (type) { case EXTERNAL_TABLE: case MANAGED_TABLE: return table; case VIRTUAL_VIEW: throw UserException.unsupportedError().message("Hive views are not supported").build(NOPLogger.NOP_LOGGER); default: return null; } }
Example 3
Source File: InMemoryThriftMetastore.java From presto with Apache License 2.0 | 5 votes |
@Override public synchronized void createTable(HiveIdentity identity, Table table) { TableType tableType = TableType.valueOf(table.getTableType()); checkArgument(EnumSet.of(MANAGED_TABLE, EXTERNAL_TABLE, VIRTUAL_VIEW).contains(tableType), "Invalid table type: %s", tableType); if (tableType == VIRTUAL_VIEW) { checkArgument(table.getSd().getLocation() == null, "Storage location for view must be null"); } else { File directory = new File(new Path(table.getSd().getLocation()).toUri()); checkArgument(directory.exists(), "Table directory does not exist"); if (tableType == MANAGED_TABLE) { checkArgument(isParentDir(directory, baseDirectory), "Table directory must be inside of the metastore base directory"); } } SchemaTableName schemaTableName = new SchemaTableName(table.getDbName(), table.getTableName()); Table tableCopy = table.deepCopy(); if (relations.putIfAbsent(schemaTableName, tableCopy) != null) { throw new TableAlreadyExistsException(schemaTableName); } if (tableType == VIRTUAL_VIEW) { views.put(schemaTableName, tableCopy); } PrincipalPrivilegeSet privileges = table.getPrivileges(); if (privileges != null) { throw new UnsupportedOperationException(); } }
Example 4
Source File: HiveClientWrapper.java From pxf with Apache License 2.0 | 5 votes |
public Table getHiveTable(IMetaStoreClient client, Metadata.Item itemName) throws Exception { Table tbl = client.getTable(itemName.getPath(), itemName.getName()); String tblType = tbl.getTableType(); LOG.debug("Item: {}.{}, type: {}", itemName.getPath(), itemName.getName(), tblType); if (TableType.valueOf(tblType) == TableType.VIRTUAL_VIEW) { throw new UnsupportedOperationException("Hive views are not supported by PXF"); } return tbl; }
Example 5
Source File: HiveCatalog.java From flink with Apache License 2.0 | 4 votes |
private static CatalogBaseTable instantiateCatalogTable(Table hiveTable, HiveConf hiveConf) { boolean isView = TableType.valueOf(hiveTable.getTableType()) == TableType.VIRTUAL_VIEW; // Table properties Map<String, String> properties = hiveTable.getParameters(); boolean isGeneric = Boolean.valueOf(properties.get(CatalogConfig.IS_GENERIC)); if (isGeneric) { properties = retrieveFlinkProperties(properties); } String comment = properties.remove(HiveCatalogConfig.COMMENT); // Table schema List<FieldSchema> fields; if (org.apache.hadoop.hive.ql.metadata.Table.hasMetastoreBasedSchema(hiveConf, hiveTable.getSd().getSerdeInfo().getSerializationLib())) { // get schema from metastore fields = hiveTable.getSd().getCols(); } else { // get schema from deserializer try { fields = MetaStoreUtils.getFieldsFromDeserializer(hiveTable.getTableName(), MetaStoreUtils.getDeserializer(hiveConf, hiveTable, true)); } catch (SerDeException | MetaException e) { throw new CatalogException("Failed to get Hive table schema from deserializer", e); } } TableSchema tableSchema = HiveTableUtil.createTableSchema(fields, hiveTable.getPartitionKeys()); // Partition keys List<String> partitionKeys = new ArrayList<>(); if (!hiveTable.getPartitionKeys().isEmpty()) { partitionKeys = getFieldNames(hiveTable.getPartitionKeys()); } if (isView) { return new CatalogViewImpl( hiveTable.getViewOriginalText(), hiveTable.getViewExpandedText(), tableSchema, properties, comment); } else { return new CatalogTableImpl(tableSchema, partitionKeys, properties, comment); } }
Example 6
Source File: HiveCatalog.java From flink with Apache License 2.0 | 4 votes |
private CatalogBaseTable instantiateCatalogTable(Table hiveTable, HiveConf hiveConf) { boolean isView = TableType.valueOf(hiveTable.getTableType()) == TableType.VIRTUAL_VIEW; // Table properties Map<String, String> properties = hiveTable.getParameters(); boolean isGeneric = isGenericForGet(hiveTable.getParameters()); TableSchema tableSchema; // Partition keys List<String> partitionKeys = new ArrayList<>(); if (isGeneric) { properties = retrieveFlinkProperties(properties); DescriptorProperties tableSchemaProps = new DescriptorProperties(true); tableSchemaProps.putProperties(properties); ObjectPath tablePath = new ObjectPath(hiveTable.getDbName(), hiveTable.getTableName()); tableSchema = tableSchemaProps.getOptionalTableSchema(Schema.SCHEMA) .orElseThrow(() -> new CatalogException("Failed to get table schema from properties for generic table " + tablePath)); partitionKeys = tableSchemaProps.getPartitionKeys(); // remove the schema from properties properties = CatalogTableImpl.removeRedundant(properties, tableSchema, partitionKeys); } else { properties.put(CatalogConfig.IS_GENERIC, String.valueOf(false)); // Table schema List<FieldSchema> fields = getNonPartitionFields(hiveConf, hiveTable); Set<String> notNullColumns = client.getNotNullColumns(hiveConf, hiveTable.getDbName(), hiveTable.getTableName()); Optional<UniqueConstraint> primaryKey = isView ? Optional.empty() : client.getPrimaryKey(hiveTable.getDbName(), hiveTable.getTableName(), HiveTableUtil.relyConstraint((byte) 0)); // PK columns cannot be null primaryKey.ifPresent(pk -> notNullColumns.addAll(pk.getColumns())); tableSchema = HiveTableUtil.createTableSchema(fields, hiveTable.getPartitionKeys(), notNullColumns, primaryKey.orElse(null)); if (!hiveTable.getPartitionKeys().isEmpty()) { partitionKeys = getFieldNames(hiveTable.getPartitionKeys()); } } String comment = properties.remove(HiveCatalogConfig.COMMENT); if (isView) { return new CatalogViewImpl( hiveTable.getViewOriginalText(), hiveTable.getViewExpandedText(), tableSchema, properties, comment); } else { return new CatalogTableImpl(tableSchema, partitionKeys, properties, comment); } }