Java Code Examples for org.apache.hadoop.hive.metastore.api.Table#getParameters()
The following examples show how to use
org.apache.hadoop.hive.metastore.api.Table#getParameters() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: HiveCatalogTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testCreateHiveTable() { Map<String, String> map = new HashMap<>(new FileSystem().path("/test_path").toProperties()); map.put(CatalogConfig.IS_GENERIC, String.valueOf(false)); Table hiveTable = HiveTableUtil.instantiateHiveTable( new ObjectPath("test", "test"), new CatalogTableImpl( schema, map, null ), HiveTestUtils.createHiveConf()); Map<String, String> prop = hiveTable.getParameters(); assertEquals(prop.remove(CatalogConfig.IS_GENERIC), String.valueOf(false)); assertTrue(prop.keySet().stream().noneMatch(k -> k.startsWith(CatalogConfig.FLINK_PROPERTY_PREFIX))); }
Example 2
Source File: HiveTableOperations.java From iceberg with Apache License 2.0 | 6 votes |
private void setParameters(String newMetadataLocation, Table tbl) { Map<String, String> parameters = tbl.getParameters(); if (parameters == null) { parameters = new HashMap<>(); } parameters.put(TABLE_TYPE_PROP, ICEBERG_TABLE_TYPE_VALUE.toUpperCase(Locale.ENGLISH)); parameters.put(METADATA_LOCATION_PROP, newMetadataLocation); if (currentMetadataLocation() != null && !currentMetadataLocation().isEmpty()) { parameters.put(PREVIOUS_METADATA_LOCATION_PROP, currentMetadataLocation()); } tbl.setParameters(parameters); }
Example 3
Source File: DropTableService.java From circus-train with Apache License 2.0 | 6 votes |
/** * Removes all parameters from a table before dropping the table. */ private void removeTableParamsAndDrop( CloseableMetaStoreClient client, Table table, String databaseName, String tableName) throws TException { Map<String, String> tableParameters = table.getParameters(); if (tableParameters != null && !tableParameters.isEmpty()) { if (isExternal(tableParameters)) { table.setParameters(Collections.singletonMap(EXTERNAL_KEY, IS_EXTERNAL)); } else { table.setParameters(Collections.emptyMap()); } client.alter_table(databaseName, tableName, table); } log.info("Dropping replica table '{}.{}'.", databaseName, tableName); client.dropTable(databaseName, tableName, false, true); }
Example 4
Source File: HiveTablesTest.java From iceberg with Apache License 2.0 | 6 votes |
@Test public void testCreate() throws TException { // Table should be created in hive metastore final Table table = metastoreClient.getTable(DB_NAME, TABLE_NAME); // check parameters are in expected state final Map<String, String> parameters = table.getParameters(); Assert.assertNotNull(parameters); Assert.assertTrue(ICEBERG_TABLE_TYPE_VALUE.equalsIgnoreCase(parameters.get(TABLE_TYPE_PROP))); Assert.assertTrue(ICEBERG_TABLE_TYPE_VALUE.equalsIgnoreCase(table.getTableType())); // Ensure the table is pointing to empty location Assert.assertEquals(getTableLocation(TABLE_NAME) , table.getSd().getLocation()); // Ensure it is stored as unpartitioned table in hive. Assert.assertEquals(0 , table.getPartitionKeysSize()); // Only 1 snapshotFile Should exist and no manifests should exist Assert.assertEquals(1, metadataVersionFiles(TABLE_NAME).size()); Assert.assertEquals(0, manifestFiles(TABLE_NAME).size()); final com.netflix.iceberg.Table icebergTable = new HiveTables(hiveConf).load(DB_NAME, TABLE_NAME); // Iceberg schema should match the loaded table Assert.assertEquals(schema.asStruct(), icebergTable.schema().asStruct()); }
Example 5
Source File: HiveTableOperations.java From iceberg with Apache License 2.0 | 6 votes |
private void setParameters(String newMetadataLocation, Table tbl) { Map<String, String> parameters = tbl.getParameters(); if (parameters == null) { parameters = new HashMap<>(); } parameters.put(TABLE_TYPE_PROP, ICEBERG_TABLE_TYPE_VALUE.toUpperCase(Locale.ENGLISH)); parameters.put(METADATA_LOCATION_PROP, newMetadataLocation); if (currentMetadataLocation() != null && !currentMetadataLocation().isEmpty()) { parameters.put(PREVIOUS_METADATA_LOCATION_PROP, currentMetadataLocation()); } tbl.setParameters(parameters); }
Example 6
Source File: TableParametersTransformationTest.java From circus-train with Apache License 2.0 | 5 votes |
@Test public void typicalOverride() { transformation.tableReplicationStart(createEventTableReplication(OVERRIDE_KEY, OVERRIDE_VALUE), EVENT_ID); Table transformedTable = transformation.transform(table); Map<String, String> tableParameters = transformedTable.getParameters(); assertThat(tableParameters.size(), is(1)); assertThat(tableParameters.get(OVERRIDE_KEY), is(OVERRIDE_VALUE)); }
Example 7
Source File: HiveCatalogTest.java From flink with Apache License 2.0 | 5 votes |
@Test public void testCreateGenericTable() { Table hiveTable = HiveTableUtil.instantiateHiveTable( new ObjectPath("test", "test"), new CatalogTableImpl( schema, new FileSystem().path("/test_path").toProperties(), null ), HiveTestUtils.createHiveConf()); Map<String, String> prop = hiveTable.getParameters(); assertEquals(prop.remove(CatalogConfig.IS_GENERIC), String.valueOf("true")); assertTrue(prop.keySet().stream().allMatch(k -> k.startsWith(CatalogConfig.FLINK_PROPERTY_PREFIX))); }
Example 8
Source File: HiveConnectorFastPartitionService.java From metacat with Apache License 2.0 | 5 votes |
private void createLocationForPartitions(final QualifiedName tableQName, final List<PartitionInfo> partitionInfos, final Table table) { final boolean doFileSystemCalls = Boolean.parseBoolean(getContext().getConfiguration() .getOrDefault("hive.metastore.use.fs.calls", "true")) || (table.getParameters() != null && Boolean.parseBoolean(table.getParameters() .getOrDefault("hive.metastore.use.fs.calls", "false"))); partitionInfos.forEach(partitionInfo -> createLocationForPartition(tableQName, partitionInfo, table, doFileSystemCalls)); }
Example 9
Source File: HiveConnectorFastPartitionService.java From metacat with Apache License 2.0 | 5 votes |
protected void addUpdateDropPartitions(final QualifiedName tableQName, final Table table, final List<String> partitionNames, final List<PartitionInfo> addedPartitionInfos, final List<PartitionHolder> existingPartitionHolders, final Set<String> deletePartitionNames) { final boolean useHiveFastServiceForSavePartitions = Boolean.parseBoolean(getContext().getConfiguration() .getOrDefault("hive.use.embedded.sql.save.partitions", "false")) || (table.getParameters() != null && Boolean.parseBoolean(table.getParameters() .getOrDefault("hive.use.embedded.sql.save.partitions", "false"))); if (useHiveFastServiceForSavePartitions) { final long start = registry.clock().wallTime(); try { if (!existingPartitionHolders.isEmpty()) { final List<PartitionInfo> existingPartitionInfos = existingPartitionHolders.stream() .map(PartitionHolder::getPartitionInfo).collect(Collectors.toList()); copyTableSdToPartitionInfosSd(existingPartitionInfos, table); createLocationForPartitions(tableQName, existingPartitionInfos, table); } copyTableSdToPartitionInfosSd(addedPartitionInfos, table); createLocationForPartitions(tableQName, addedPartitionInfos, table); } finally { registry.timer(registry .createId(HiveMetrics.TagCreatePartitionLocations.getMetricName()).withTags(tableQName.parts())) .record(registry.clock().wallTime() - start, TimeUnit.MILLISECONDS); } directSqlSavePartition.addUpdateDropPartitions(tableQName, table, addedPartitionInfos, existingPartitionHolders, deletePartitionNames); } else { super.addUpdateDropPartitions(tableQName, table, partitionNames, addedPartitionInfos, existingPartitionHolders, deletePartitionNames); } }
Example 10
Source File: TableParametersTransformationTest.java From circus-train with Apache License 2.0 | 5 votes |
@Test public void typicalTwoReplicationsSecondOverride() { transformation.tableReplicationStart(createEventTableReplication(Collections.EMPTY_MAP),EVENT_ID); transformation.transform(table); assertThat(table.getParameters().size(), is(1)); assertThat(table.getParameters().get(KEY), is(VALUE)); transformation.tableReplicationStart(createEventTableReplication(SECOND_OVERRIDE_KEY, SECOND_OVERRIDE_VALUE), EVENT_ID); Table transformedTable = transformation.transform(new Table()); Map<String, String> tableParameters = transformedTable.getParameters(); assertThat(tableParameters.size(), is(1)); assertThat(tableParameters.get(SECOND_OVERRIDE_KEY), is(SECOND_OVERRIDE_VALUE)); }
Example 11
Source File: TableParametersTransformationTest.java From circus-train with Apache License 2.0 | 5 votes |
@Test public void typicalTwoReplicationsFirstOverride() { transformation.tableReplicationStart(createEventTableReplication(OVERRIDE_KEY, OVERRIDE_VALUE),EVENT_ID); transformation.transform(table); assertThat(table.getParameters().size(), is(1)); assertThat(table.getParameters().get(OVERRIDE_KEY), is(OVERRIDE_VALUE)); transformation.tableReplicationStart(createEventTableReplication(Collections.EMPTY_MAP), EVENT_ID); Table transformedTable = transformation.transform(new Table()); Map<String, String> tableParameters = transformedTable.getParameters(); assertThat(tableParameters.size(), is(1)); assertThat(tableParameters.get(KEY), is(VALUE)); }
Example 12
Source File: TableParametersTransformationTest.java From circus-train with Apache License 2.0 | 5 votes |
@Test public void typicalTwoReplicationsBothOverride() { transformation.tableReplicationStart(createEventTableReplication(OVERRIDE_KEY, OVERRIDE_VALUE),EVENT_ID); transformation.transform(table); assertThat(table.getParameters().size(), is(1)); assertThat(table.getParameters().get(OVERRIDE_KEY), is(OVERRIDE_VALUE)); transformation.tableReplicationStart(createEventTableReplication(SECOND_OVERRIDE_KEY, SECOND_OVERRIDE_VALUE), EVENT_ID); Table transformedTable = transformation.transform(new Table()); Map<String, String> tableParameters = transformedTable.getParameters(); assertThat(tableParameters.size(), is(1)); assertThat(tableParameters.get(SECOND_OVERRIDE_KEY), is(SECOND_OVERRIDE_VALUE)); }
Example 13
Source File: AvroHiveTableStrategyTest.java From data-highway with Apache License 2.0 | 5 votes |
@Test public void newHiveTable() throws URISyntaxException { when(uriResolver.resolve(schema1, TABLE, 1)) .thenReturn(new URI("https://s3.amazonaws.com/road-schema-bucket/roads/table/schemas/1/table_v1.avsc")); doReturn(Instant.ofEpochSecond(1526462225L)).when(clock).instant(); Table result = underTest.newHiveTable(DATABASE, TABLE, PARTITION_COLUMN, LOCATION, schema1, 1); assertThat(result.getDbName(), is(DATABASE)); assertThat(result.getTableName(), is(TABLE)); assertThat(result.getTableType(), is(TableType.EXTERNAL_TABLE.toString())); Map<String, String> parameters = result.getParameters(); assertThat(parameters.get("EXTERNAL"), is("TRUE")); assertThat(parameters.get("data-highway.version"), is(DataHighwayVersion.VERSION)); assertThat(parameters.get("data-highway.last-revision"), is("2018-05-16T09:17:05Z")); assertThat(parameters.get(AvroHiveTableStrategy.AVRO_SCHEMA_URL), is("https://s3.amazonaws.com/road-schema-bucket/roads/table/schemas/1/table_v1.avsc")); assertThat(parameters.get(AvroHiveTableStrategy.AVRO_SCHEMA_VERSION), is("1")); List<FieldSchema> partitionKeys = result.getPartitionKeys(); assertThat(partitionKeys.size(), is(1)); assertThat(partitionKeys.get(0), is(new FieldSchema(PARTITION_COLUMN, "string", null))); StorageDescriptor storageDescriptor = result.getSd(); assertThat(storageDescriptor.getInputFormat(), is(AvroStorageDescriptorFactory.AVRO_INPUT_FORMAT)); assertThat(storageDescriptor.getOutputFormat(), is(AvroStorageDescriptorFactory.AVRO_OUTPUT_FORMAT)); assertThat(storageDescriptor.getLocation(), is(LOCATION)); assertThat(storageDescriptor.getCols().size(), is(0)); SerDeInfo serdeInfo = storageDescriptor.getSerdeInfo(); assertThat(serdeInfo.getSerializationLib(), is(AvroStorageDescriptorFactory.AVRO_SERDE)); }
Example 14
Source File: TableParametersTransformationTest.java From circus-train with Apache License 2.0 | 5 votes |
@Test public void transformationParametersOverwriteTableParameters() { Map<String, String> parameters = new HashMap<>(); parameters.put(KEY, "old_value"); table.setParameters(parameters); Table transformedTable = transformation.transform(table); Map<String, String> tableParameters = transformedTable.getParameters(); assertThat(tableParameters.size(), is(1)); assertThat(tableParameters.get(KEY), is(VALUE)); }
Example 15
Source File: TableParametersTransformationTest.java From circus-train with Apache License 2.0 | 5 votes |
@Test public void typicalWithTableParameters() { Map<String, String> parameters = new HashMap<>(); parameters.put("old_key", "old_value"); table.setParameters(parameters); Table transformedTable = transformation.transform(table); Map<String, String> tableParameters = transformedTable.getParameters(); assertThat(tableParameters.size(), is(2)); assertThat(tableParameters.get("old_key"), is("old_value")); assertThat(tableParameters.get(KEY), is(VALUE)); }
Example 16
Source File: TableParametersTransformationTest.java From circus-train with Apache License 2.0 | 5 votes |
@Test public void typical() { Table transformedTable = transformation.transform(table); Map<String, String> tableParameters = transformedTable.getParameters(); assertThat(tableParameters.size(), is(1)); assertThat(tableParameters.get(KEY), is(VALUE)); }
Example 17
Source File: TableParametersTransformation.java From circus-train with Apache License 2.0 | 5 votes |
private Map<String, String> mergeTableParameters(Map<String, String> tableParameters, Table table) { Map<String, String> parameters; if (table.getParameters() != null) { parameters = new LinkedHashMap<>(table.getParameters()); } else { parameters = new LinkedHashMap<>(); } parameters.putAll(tableParameters); return parameters; }
Example 18
Source File: HiveUtils.java From kite with Apache License 2.0 | 4 votes |
static DatasetDescriptor descriptorForTable(Configuration conf, Table table) { final DatasetDescriptor.Builder builder = new DatasetDescriptor.Builder(); Format format; final String serializationLib = table.getSd().getSerdeInfo().getSerializationLib(); if (SERDE_TO_FORMAT.containsKey(serializationLib)) { format = SERDE_TO_FORMAT.get(serializationLib); builder.format(format); } else { // TODO: should this use an "unknown" format? others fail in open() throw new UnknownFormatException( "Unknown format for serde:" + serializationLib); } final Path dataLocation = new Path(table.getSd().getLocation()); final FileSystem fs = fsForPath(conf, dataLocation); builder.location(fs.makeQualified(dataLocation)); // custom properties Map<String, String> properties = table.getParameters(); String namesProperty = coalesce( properties.get(CUSTOM_PROPERTIES_PROPERTY_NAME), properties.get(OLD_CUSTOM_PROPERTIES_PROPERTY_NAME)); if (namesProperty != null) { for (String property : NAME_SPLITTER.split(namesProperty)) { builder.property(property, properties.get(property)); } } PartitionStrategy partitionStrategy = null; if (isPartitioned(table)) { String partitionProperty = coalesce( properties.get(PARTITION_EXPRESSION_PROPERTY_NAME), properties.get(OLD_PARTITION_EXPRESSION_PROPERTY_NAME)); if (partitionProperty != null) { partitionStrategy = Accessor.getDefault() .fromExpression(partitionProperty); } else { // build a partition strategy for the table from the Hive strategy partitionStrategy = fromPartitionColumns(getPartCols(table)); } builder.partitionStrategy(partitionStrategy); } String schemaUrlString = properties.get(AVRO_SCHEMA_URL_PROPERTY_NAME); if (schemaUrlString != null) { try { // URI.create is safe because this library wrote the URI builder.schemaUri(URI.create(schemaUrlString)); } catch (IOException e) { throw new DatasetIOException("Could not read schema", e); } } else { String schemaLiteral = properties.get(AVRO_SCHEMA_LITERAL_PROPERTY_NAME); if (schemaLiteral != null) { builder.schemaLiteral(schemaLiteral); } else { builder.schema(HiveSchemaConverter.convertTable( table.getTableName(), table.getSd().getCols(), partitionStrategy)); } } String compressionType = properties.get(COMPRESSION_TYPE_PROPERTY_NAME); if (compressionType != null) { builder.compressionType(compressionType); } try { return builder.build(); } catch (IllegalStateException ex) { throw new DatasetException("Cannot find schema: missing metadata"); } }
Example 19
Source File: HiveCatalog.java From flink with Apache License 2.0 | 4 votes |
private static CatalogBaseTable instantiateCatalogTable(Table hiveTable, HiveConf hiveConf) { boolean isView = TableType.valueOf(hiveTable.getTableType()) == TableType.VIRTUAL_VIEW; // Table properties Map<String, String> properties = hiveTable.getParameters(); boolean isGeneric = Boolean.valueOf(properties.get(CatalogConfig.IS_GENERIC)); if (isGeneric) { properties = retrieveFlinkProperties(properties); } String comment = properties.remove(HiveCatalogConfig.COMMENT); // Table schema List<FieldSchema> fields; if (org.apache.hadoop.hive.ql.metadata.Table.hasMetastoreBasedSchema(hiveConf, hiveTable.getSd().getSerdeInfo().getSerializationLib())) { // get schema from metastore fields = hiveTable.getSd().getCols(); } else { // get schema from deserializer try { fields = MetaStoreUtils.getFieldsFromDeserializer(hiveTable.getTableName(), MetaStoreUtils.getDeserializer(hiveConf, hiveTable, true)); } catch (SerDeException | MetaException e) { throw new CatalogException("Failed to get Hive table schema from deserializer", e); } } TableSchema tableSchema = HiveTableUtil.createTableSchema(fields, hiveTable.getPartitionKeys()); // Partition keys List<String> partitionKeys = new ArrayList<>(); if (!hiveTable.getPartitionKeys().isEmpty()) { partitionKeys = getFieldNames(hiveTable.getPartitionKeys()); } if (isView) { return new CatalogViewImpl( hiveTable.getViewOriginalText(), hiveTable.getViewExpandedText(), tableSchema, properties, comment); } else { return new CatalogTableImpl(tableSchema, partitionKeys, properties, comment); } }
Example 20
Source File: HiveCatalog.java From flink with Apache License 2.0 | 4 votes |
private CatalogBaseTable instantiateCatalogTable(Table hiveTable, HiveConf hiveConf) { boolean isView = TableType.valueOf(hiveTable.getTableType()) == TableType.VIRTUAL_VIEW; // Table properties Map<String, String> properties = hiveTable.getParameters(); boolean isGeneric = isGenericForGet(hiveTable.getParameters()); TableSchema tableSchema; // Partition keys List<String> partitionKeys = new ArrayList<>(); if (isGeneric) { properties = retrieveFlinkProperties(properties); DescriptorProperties tableSchemaProps = new DescriptorProperties(true); tableSchemaProps.putProperties(properties); ObjectPath tablePath = new ObjectPath(hiveTable.getDbName(), hiveTable.getTableName()); tableSchema = tableSchemaProps.getOptionalTableSchema(Schema.SCHEMA) .orElseThrow(() -> new CatalogException("Failed to get table schema from properties for generic table " + tablePath)); partitionKeys = tableSchemaProps.getPartitionKeys(); // remove the schema from properties properties = CatalogTableImpl.removeRedundant(properties, tableSchema, partitionKeys); } else { properties.put(CatalogConfig.IS_GENERIC, String.valueOf(false)); // Table schema List<FieldSchema> fields = getNonPartitionFields(hiveConf, hiveTable); Set<String> notNullColumns = client.getNotNullColumns(hiveConf, hiveTable.getDbName(), hiveTable.getTableName()); Optional<UniqueConstraint> primaryKey = isView ? Optional.empty() : client.getPrimaryKey(hiveTable.getDbName(), hiveTable.getTableName(), HiveTableUtil.relyConstraint((byte) 0)); // PK columns cannot be null primaryKey.ifPresent(pk -> notNullColumns.addAll(pk.getColumns())); tableSchema = HiveTableUtil.createTableSchema(fields, hiveTable.getPartitionKeys(), notNullColumns, primaryKey.orElse(null)); if (!hiveTable.getPartitionKeys().isEmpty()) { partitionKeys = getFieldNames(hiveTable.getPartitionKeys()); } } String comment = properties.remove(HiveCatalogConfig.COMMENT); if (isView) { return new CatalogViewImpl( hiveTable.getViewOriginalText(), hiveTable.getViewExpandedText(), tableSchema, properties, comment); } else { return new CatalogTableImpl(tableSchema, partitionKeys, properties, comment); } }