Java Code Examples for org.apache.hadoop.hive.metastore.HiveMetaStoreClient#getTable()
The following examples show how to use
org.apache.hadoop.hive.metastore.HiveMetaStoreClient#getTable() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CircusTrainParquetSchemaEvolutionIntegrationTest.java From circus-train with Apache License 2.0 | 6 votes |
private void assertTable(HiveMetaStoreClient client, Schema schema, String database, String table, List<String> expectedData) throws Exception { assertThat(client.getAllTables(database).size(), is(1)); Table hiveTable = client.getTable(database, table); List<FieldSchema> cols = hiveTable.getSd().getCols(); assertThat(cols.size(), is(schema.getFields().size())); assertColumnSchema(schema, cols); PartitionIterator partitionIterator = new PartitionIterator(client, hiveTable, (short) 1000); List<Partition> partitions = new ArrayList<>(); while (partitionIterator.hasNext()) { Partition partition = partitionIterator.next(); assertColumnSchema(schema, partition.getSd().getCols()); partitions.add(partition); } assertThat(partitions.size(), is(2)); List<String> data = shell.executeQuery("select * from " + database + "." + table); assertThat(data.size(), is(expectedData.size())); assertThat(data.containsAll(expectedData), is(true)); }
Example 2
Source File: WaggleDanceIntegrationTest.java From waggle-dance with Apache License 2.0 | 6 votes |
@Test public void typical() throws Exception { runner = WaggleDanceRunner .builder(configLocation) .primary("primary", localServer.getThriftConnectionUri(), READ_ONLY) .federate(SECONDARY_METASTORE_NAME, remoteServer.getThriftConnectionUri(), REMOTE_DATABASE) .build(); runWaggleDance(runner); HiveMetaStoreClient proxy = getWaggleDanceClient(); // Local table Table localTable = localServer.client().getTable(LOCAL_DATABASE, LOCAL_TABLE); Table waggledLocalTable = proxy.getTable(LOCAL_DATABASE, LOCAL_TABLE); assertThat(waggledLocalTable, is(localTable)); // Remote table String waggledRemoteDbName = REMOTE_DATABASE; assertTypicalRemoteTable(proxy, waggledRemoteDbName); }
Example 3
Source File: WaggleDanceIntegrationTest.java From waggle-dance with Apache License 2.0 | 6 votes |
@Test public void usePrefix() throws Exception { runner = WaggleDanceRunner .builder(configLocation) .databaseResolution(DatabaseResolution.PREFIXED) .primary("primary", localServer.getThriftConnectionUri(), READ_ONLY) .federate(SECONDARY_METASTORE_NAME, remoteServer.getThriftConnectionUri()) .build(); runWaggleDance(runner); HiveMetaStoreClient proxy = getWaggleDanceClient(); // Local table Table localTable = localServer.client().getTable(LOCAL_DATABASE, LOCAL_TABLE); Table waggledLocalTable = proxy.getTable(LOCAL_DATABASE, LOCAL_TABLE); assertThat(waggledLocalTable, is(localTable)); // Remote table String waggledRemoteDbName = PREFIXED_REMOTE_DATABASE; assertTypicalRemoteTable(proxy, waggledRemoteDbName); }
Example 4
Source File: WaggleDanceIntegrationTest.java From waggle-dance with Apache License 2.0 | 6 votes |
@Test public void alterTableOnFederatedIsNotAllowedUsingManual() throws Exception { String[] mappableDatabases = new String[] { REMOTE_DATABASE }; String[] writableDatabaseWhitelist = new String[] { REMOTE_DATABASE }; runner = WaggleDanceRunner .builder(configLocation) .databaseResolution(DatabaseResolution.MANUAL) .primary("primary", localServer.getThriftConnectionUri(), AccessControlType.READ_AND_WRITE_AND_CREATE_ON_DATABASE_WHITELIST) .federate("doesNotMatterManualMode", remoteServer.getThriftConnectionUri(), AccessControlType.READ_AND_WRITE_ON_DATABASE_WHITELIST, mappableDatabases, writableDatabaseWhitelist) .build(); runWaggleDance(runner); HiveMetaStoreClient proxy = getWaggleDanceClient(); Table table = proxy.getTable(REMOTE_DATABASE, REMOTE_TABLE); Table newTable = new Table(table); newTable.setTableName("new_remote_table"); proxy.alter_table_with_environmentContext(REMOTE_DATABASE, REMOTE_TABLE, newTable, null); assertThat(proxy.tableExists(REMOTE_DATABASE, "new_remote_table"), is(true)); }
Example 5
Source File: HiveServer2CoreTest.java From beeju with Apache License 2.0 | 6 votes |
@Test public void dropTable() throws Exception { HiveServer2Core server = setupServer(); String tableName = "my_table"; createUnpartitionedTable(DATABASE, tableName, server); try (Connection connection = DriverManager.getConnection(server.getJdbcConnectionUrl()); Statement statement = connection.createStatement()) { String dropHql = String.format("DROP TABLE %s.%s", DATABASE, tableName); statement.execute(dropHql); } HiveMetaStoreClient client = server.getCore().newClient(); try { client.getTable(DATABASE, tableName); fail(String.format("Table %s.%s was not deleted", DATABASE, tableName)); } catch (NoSuchObjectException e) { // expected } finally { client.close(); } server.shutdown(); }
Example 6
Source File: WaggleDanceIntegrationTest.java From waggle-dance with Apache License 2.0 | 5 votes |
@Test public void federatedWritesSucceedIfReadAndWriteOnDatabaseWhiteListIsConfigured() throws Exception { runner = WaggleDanceRunner .builder(configLocation) .databaseResolution(DatabaseResolution.PREFIXED) .primary("primary", localServer.getThriftConnectionUri(), AccessControlType.READ_ONLY) .federate(SECONDARY_METASTORE_NAME, remoteServer.getThriftConnectionUri(), READ_AND_WRITE_ON_DATABASE_WHITELIST, new String[] { REMOTE_DATABASE }, new String[] { REMOTE_DATABASE }) .build(); runWaggleDance(runner); HiveMetaStoreClient proxy = getWaggleDanceClient(); String waggledRemoteDbName = PREFIXED_REMOTE_DATABASE; assertTypicalRemoteTable(proxy, waggledRemoteDbName); // get succeeds proxy.getTable(waggledRemoteDbName, REMOTE_TABLE); // drop table proxy.dropTable(waggledRemoteDbName, REMOTE_TABLE); try { // get fails proxy.getTable(waggledRemoteDbName, REMOTE_TABLE); fail("Should get NoSuchObjectException"); } catch (NoSuchObjectException e) { // Federated table should be allowed to drop, so it now no longer exists and we get an appropriate exception } }
Example 7
Source File: HiveTestUtils.java From kite with Apache License 2.0 | 5 votes |
public static void assertTableIsManaged( HiveMetaStoreClient client, String db, String name) throws MetaException, TException { final Table table = client.getTable(db, name); Assert.assertTrue("Table should be external db:" + db + " table:" + table, !MetaStoreUtils.isExternalTable(table) && !MetaStoreUtils.isIndexTable(table) && TableType.MANAGED_TABLE.toString().equals(table.getTableType())); }
Example 8
Source File: HiveTestUtils.java From kite with Apache License 2.0 | 5 votes |
public static void assertTableIsExternal( HiveMetaStoreClient client, String db, String name) throws MetaException, TException { final Table table = client.getTable(db, name); Assert.assertTrue("Table should be external db:" + db + " table:" + table, MetaStoreUtils.isExternalTable(table) && TableType.EXTERNAL_TABLE.toString().equals(table.getTableType())); }
Example 9
Source File: AbstractMetastoreTestWithStaticConfiguration.java From incubator-sentry with Apache License 2.0 | 5 votes |
public Table createMetastoreTableWithPartition(HiveMetaStoreClient client, String dbName, String tabName, List<FieldSchema> cols, List<FieldSchema> partionVals) throws Exception { Table tbl = makeMetastoreTableObject(client, dbName, tabName, cols); tbl.setPartitionKeys(partionVals); client.createTable(tbl); return client.getTable(dbName, tabName); }
Example 10
Source File: TestMetastoreEndToEnd.java From incubator-sentry with Apache License 2.0 | 5 votes |
/** * Verify alter table privileges * @throws Exception */ @Test public void testAlterTablePrivileges() throws Exception { HiveMetaStoreClient client = context.getMetaStoreClient(ADMIN1); createMetastoreTable(client, dbName, tabName1, Lists.newArrayList(new FieldSchema("col1", "int", ""))); client.close(); // verify group1 users with DDL privileges can alter tables in db_1 client = context.getMetaStoreClient(USER1_1); Table metaTable2 = client.getTable(dbName, tabName1); metaTable2.getSd().setCols( Lists.newArrayList(new FieldSchema("col2", "double", ""))); client.alter_table(dbName, tabName1, metaTable2); Table metaTable3 = client.getTable(dbName, tabName1); assertEquals(metaTable2.getSd().getCols(), metaTable3.getSd().getCols()); // verify group1 users with DDL privileges can alter tables in db_1 client = context.getMetaStoreClient(USER2_1); metaTable2 = client.getTable(dbName, tabName1); metaTable2.getSd().setCols( Lists.newArrayList(new FieldSchema("col3", "string", ""))); client.alter_table(dbName, tabName1, metaTable2); metaTable3 = client.getTable(dbName, tabName1); assertEquals(metaTable2.getSd().getCols(), metaTable3.getSd().getCols()); // verify group3 users can't alter tables in db_1 client = context.getMetaStoreClient(USER3_1); metaTable2 = client.getTable(dbName, tabName1); metaTable2.getSd().setCols( Lists.newArrayList(new FieldSchema("col3", "string", ""))); try { client.alter_table(dbName, tabName1, metaTable2); fail("alter table should have failed for non-privilege user"); } catch (MetaException e) { Context.verifyMetastoreAuthException(e); } client.close(); }
Example 11
Source File: WaggleDanceIntegrationTest.java From waggle-dance with Apache License 2.0 | 5 votes |
@Test public void federatedWritesFailIfReadAndWriteOnDatabaseWhiteListDoesNotIncludeDb() throws Exception { runner = WaggleDanceRunner .builder(configLocation) .databaseResolution(DatabaseResolution.PREFIXED) .primary("primary", localServer.getThriftConnectionUri(), AccessControlType.READ_ONLY) .federate(SECONDARY_METASTORE_NAME, remoteServer.getThriftConnectionUri(), READ_AND_WRITE_ON_DATABASE_WHITELIST, new String[] { REMOTE_DATABASE }, new String[] { "mismatch" }) .build(); runWaggleDance(runner); HiveMetaStoreClient proxy = getWaggleDanceClient(); String waggledRemoteDbName = PREFIXED_REMOTE_DATABASE; assertTypicalRemoteTable(proxy, waggledRemoteDbName); // get succeeds proxy.getTable(waggledRemoteDbName, REMOTE_TABLE); try { // drop fails proxy.dropTable(waggledRemoteDbName, REMOTE_TABLE); fail("Should get MetaException"); } catch (MetaException e) { // Federated table should not be allowed to drop } }
Example 12
Source File: WaggleDanceIntegrationTest.java From waggle-dance with Apache License 2.0 | 5 votes |
@Test public void federatedWritesFailIfReadAndWriteOnDatabaseWhiteListIsNotConfigured() throws Exception { runner = WaggleDanceRunner .builder(configLocation) .databaseResolution(DatabaseResolution.PREFIXED) .primary("primary", localServer.getThriftConnectionUri(), AccessControlType.READ_ONLY) .federate(SECONDARY_METASTORE_NAME, remoteServer.getThriftConnectionUri()) .build(); runWaggleDance(runner); HiveMetaStoreClient proxy = getWaggleDanceClient(); String waggledRemoteDbName = PREFIXED_REMOTE_DATABASE; assertTypicalRemoteTable(proxy, waggledRemoteDbName); // get succeeds proxy.getTable(waggledRemoteDbName, REMOTE_TABLE); try { // drop fails proxy.dropTable(waggledRemoteDbName, REMOTE_TABLE); fail("Should get MetaException"); } catch (MetaException e) { // Federated table should not be allowed to drop } }
Example 13
Source File: WaggleDanceIntegrationTest.java From waggle-dance with Apache License 2.0 | 5 votes |
@Test public void readWriteCreateAllowedPrefixed() throws Exception { String writableDatabase = "writable_db"; localServer.createDatabase(writableDatabase); runner = WaggleDanceRunner .builder(configLocation) .databaseResolution(DatabaseResolution.PREFIXED) .primary("primary", localServer.getThriftConnectionUri(), AccessControlType.READ_AND_WRITE_AND_CREATE) .build(); runWaggleDance(runner); HiveMetaStoreClient proxy = getWaggleDanceClient(); // create rights proxy.createDatabase(new Database("newDB", "", new File(localWarehouseUri, "newDB").toURI().toString(), null)); Database newDB = proxy.getDatabase("newDB"); assertNotNull(newDB); // read rights Table localTable = localServer.client().getTable(LOCAL_DATABASE, LOCAL_TABLE); Table waggledLocalTable = proxy.getTable(LOCAL_DATABASE, LOCAL_TABLE); assertThat(waggledLocalTable, is(localTable)); // write rights proxy.dropTable(LOCAL_DATABASE, LOCAL_TABLE); try { proxy.getTable(LOCAL_DATABASE, LOCAL_TABLE); fail("Should get NoSuchObjectException"); } catch (NoSuchObjectException e) { // Local table should be allowed to drop, so it now no longer exists and we get an appropriate exception } }
Example 14
Source File: WaggleDanceIntegrationTest.java From waggle-dance with Apache License 2.0 | 5 votes |
@Test public void readWriteCreateAllowed() throws Exception { String writableDatabase = "writable_db"; localServer.createDatabase(writableDatabase); runner = WaggleDanceRunner .builder(configLocation) .primary("primary", localServer.getThriftConnectionUri(), AccessControlType.READ_AND_WRITE_AND_CREATE) .build(); runWaggleDance(runner); HiveMetaStoreClient proxy = getWaggleDanceClient(); // create rights proxy.createDatabase(new Database("newDB", "", new File(localWarehouseUri, "newDB").toURI().toString(), null)); Database newDB = proxy.getDatabase("newDB"); assertNotNull(newDB); // read rights Table localTable = localServer.client().getTable(LOCAL_DATABASE, LOCAL_TABLE); Table waggledLocalTable = proxy.getTable(LOCAL_DATABASE, LOCAL_TABLE); assertThat(waggledLocalTable, is(localTable)); // write rights proxy.dropTable(LOCAL_DATABASE, LOCAL_TABLE); try { proxy.getTable(LOCAL_DATABASE, LOCAL_TABLE); fail("Should get NoSuchObjectException"); } catch (NoSuchObjectException e) { // Local table should be allowed to drop, so it now no longer exists and we get an appropriate exception } }
Example 15
Source File: WaggleDanceIntegrationTest.java From waggle-dance with Apache License 2.0 | 5 votes |
@Test public void typicalWithGraphite() throws Exception { runner = WaggleDanceRunner .builder(configLocation) .primary("primary", localServer.getThriftConnectionUri(), READ_ONLY) .federate("remote", remoteServer.getThriftConnectionUri(), REMOTE_DATABASE) .graphite("localhost", graphite.port(), "graphitePrefix", 1000) .build(); runWaggleDance(runner); HiveMetaStoreClient proxy = getWaggleDanceClient(); // Execute a couple of requests proxy.getAllDatabases(); proxy.getTable(LOCAL_DATABASE, LOCAL_TABLE); proxy.getAllDatabases(); proxy.getTable(REMOTE_DATABASE, REMOTE_TABLE); runner.stop(); Set<String> metrics = new TreeSet<>(Arrays.asList(new String(graphite.getOutput()).split("\n"))); assertMetric(metrics, "graphitePrefix.counter.com.hotels.bdp.waggledance.server.FederatedHMSHandler.get_all_databases.all.calls.count 2"); assertMetric(metrics, "graphitePrefix.counter.com.hotels.bdp.waggledance.server.FederatedHMSHandler.get_all_databases.all.success.count 2"); assertMetric(metrics, "graphitePrefix.counter.com.hotels.bdp.waggledance.server.FederatedHMSHandler.get_table_req.primary.calls.count 1"); assertMetric(metrics, "graphitePrefix.counter.com.hotels.bdp.waggledance.server.FederatedHMSHandler.get_table_req.primary.success.count 1"); assertMetric(metrics, "graphitePrefix.counter.com.hotels.bdp.waggledance.server.FederatedHMSHandler.get_table_req.remote.calls.count 1"); assertMetric(metrics, "graphitePrefix.counter.com.hotels.bdp.waggledance.server.FederatedHMSHandler.get_table_req.remote.success.count 1"); }
Example 16
Source File: WaggleDanceIntegrationTest.java From waggle-dance with Apache License 2.0 | 5 votes |
private void assertTypicalRemoteTable(HiveMetaStoreClient proxy, String waggledRemoteDbName) throws TException { Table remoteTable = remoteServer.client().getTable(REMOTE_DATABASE, REMOTE_TABLE); Table waggledRemoteTable = proxy.getTable(waggledRemoteDbName, REMOTE_TABLE); assertThat(waggledRemoteTable.getDbName(), is(waggledRemoteDbName)); assertThat(waggledRemoteTable.getTableName(), is(remoteTable.getTableName())); assertThat(waggledRemoteTable.getSd(), is(remoteTable.getSd())); assertThat(waggledRemoteTable.getParameters(), is(remoteTable.getParameters())); assertThat(waggledRemoteTable.getPartitionKeys(), is(remoteTable.getPartitionKeys())); List<String> partitionNames = Arrays.asList("continent=Europe/country=UK", "continent=Asia/country=China"); List<Partition> remotePartitions = remoteServer .client() .getPartitionsByNames(REMOTE_DATABASE, REMOTE_TABLE, partitionNames); List<Partition> waggledRemotePartitions = proxy .getPartitionsByNames(waggledRemoteDbName, REMOTE_TABLE, partitionNames); assertThat(waggledRemotePartitions.size(), is(2)); for (int i = 0; i < waggledRemotePartitions.size(); ++i) { Partition remotePartition = remotePartitions.get(i); Partition waggledRemotePartition = waggledRemotePartitions.get(i); assertThat(remotePartition.getDbName(), is(REMOTE_DATABASE)); assertThat(waggledRemotePartition.getDbName(), is(waggledRemoteDbName)); assertThat(waggledRemotePartition.getTableName(), is(remotePartition.getTableName())); assertThat(waggledRemotePartition.getCreateTime(), is(remotePartition.getCreateTime())); assertThat(waggledRemotePartition.getParameters(), is(remotePartition.getParameters())); assertThat(waggledRemotePartition.getPrivileges(), is(remotePartition.getPrivileges())); assertThat(waggledRemotePartition.getSd(), is(remotePartition.getSd())); assertThat(waggledRemotePartition.getValues(), is(remotePartition.getValues())); } }
Example 17
Source File: WaggleDanceIntegrationTest.java From waggle-dance with Apache License 2.0 | 5 votes |
@Test public void usePrimaryPrefix() throws Exception { String primaryPrefix = "primary_"; runner = WaggleDanceRunner .builder(configLocation) .databaseResolution(DatabaseResolution.PREFIXED) .primary("primary", localServer.getThriftConnectionUri(), READ_ONLY) .withPrimaryPrefix(primaryPrefix) .federate(SECONDARY_METASTORE_NAME, remoteServer.getThriftConnectionUri()) .build(); runWaggleDance(runner); HiveMetaStoreClient proxy = getWaggleDanceClient(); // Local table String prefixedLocalDbName = primaryPrefix + LOCAL_DATABASE; Table localTable = proxy.getTable(prefixedLocalDbName, LOCAL_TABLE); assertThat(localTable.getDbName(), is(prefixedLocalDbName)); // fetch without prefix works and result is prefixed Table localTable2 = proxy.getTable(LOCAL_DATABASE, LOCAL_TABLE); assertThat(localTable2.getDbName(), is(prefixedLocalDbName)); // Remote table String prefixedRemoteDbName = PREFIXED_REMOTE_DATABASE; assertTypicalRemoteTable(proxy, prefixedRemoteDbName); }
Example 18
Source File: HiveServer2CoreTest.java From beeju with Apache License 2.0 | 5 votes |
@Test public void createTable() throws Exception { HiveServer2Core server = setupServer(); String tableName = "my_test_table"; try (Connection connection = DriverManager.getConnection(server.getJdbcConnectionUrl()); Statement statement = connection.createStatement()) { String createHql = new StringBuilder() .append("CREATE TABLE `" + DATABASE + "." + tableName + "`(`id` int, `name` string) ") .append("ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' ") .append("STORED AS INPUTFORMAT 'org.apache.hadoop.mapred.TextInputFormat' ") .append("OUTPUTFORMAT 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat'") .toString(); statement.execute(createHql); } HiveMetaStoreClient client = server.getCore().newClient(); Table table = client.getTable(DATABASE, tableName); client.close(); assertThat(table.getDbName(), is(DATABASE)); assertThat(table.getTableName(), is(tableName)); assertThat(table.getSd().getCols(), is(Arrays.asList(new FieldSchema("id", "int", null), new FieldSchema("name", "string", null)))); assertThat(table.getSd().getInputFormat(), is("org.apache.hadoop.mapred.TextInputFormat")); assertThat(table.getSd().getOutputFormat(), is("org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat")); assertThat(table.getSd().getSerdeInfo().getSerializationLib(), is("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe")); server.shutdown(); }
Example 19
Source File: TestMetastoreEndToEnd.java From incubator-sentry with Apache License 2.0 | 4 votes |
/** * Verify URI privileges for alter table table * @throws Exception */ @Test public void testUriPartitionPrivileges() throws Exception { String tabName1 = "tab1"; String newPath1 = "fooTab1"; String newPath2 = "fooTab2"; ArrayList<String> partVals1 = Lists.newArrayList("part1"); ArrayList<String> partVals2 = Lists.newArrayList("part2"); ArrayList<String> partVals3 = Lists.newArrayList("part2"); String tabDir1 = hiveServer.getProperty(HiveServerFactory.WAREHOUSE_DIR) + File.separator + newPath1; String tabDir2 = hiveServer.getProperty(HiveServerFactory.WAREHOUSE_DIR) + File.separator + newPath2; policyFile.addRolesToGroup(USERGROUP1, uri_role) .addRolesToGroup(USERGROUP2, db_all_role) .addPermissionsToRole(uri_role, "server=server1->URI=" + tabDir1) .addPermissionsToRole(uri_role, "server=server1->URI=" + tabDir2); writePolicyFile(policyFile); // user with URI privileges should be able to alter partition to set that specific location HiveMetaStoreClient client = context.getMetaStoreClient(USER1_1); Table tbl1 = createMetastoreTableWithPartition(client, dbName, tabName1, Lists.newArrayList(new FieldSchema("col1", "int", "")), Lists.newArrayList(new FieldSchema("part_col1", "string", ""))); addPartition(client, dbName, tabName1, partVals1, tbl1); addPartitionWithLocation(client, dbName, tabName1, partVals2, tbl1, tabDir1); client.close(); // user without URI privileges should be NOT able to alter partition to set // that specific location client = context.getMetaStoreClient(USER2_1); try { tbl1 = client.getTable(dbName, tabName1); addPartitionWithLocation(client, dbName, tabName1, partVals3, tbl1, tabDir2); fail("Add partition with location should have failed"); } catch (MetaException e) { Context.verifyMetastoreAuthException(e); } client.close(); }
Example 20
Source File: TestMetastoreEndToEnd.java From incubator-sentry with Apache License 2.0 | 4 votes |
/** * Verify alter partion privileges * TODO: We seem to have a bit inconsistency with Alter partition. It's only * allowed with SERVER privilege. If we allow add/drop partition with DB * level privilege, then this should also be at the same level. * @throws Exception */ @Test public void testAlterSetLocationPrivileges() throws Exception { String newPath1 = "fooTab1"; ArrayList<String> partVals1 = Lists.newArrayList("part1"); ArrayList<String> partVals2 = Lists.newArrayList("part2"); String tabDir1 = hiveServer.getProperty(HiveServerFactory.WAREHOUSE_DIR) + File.separator + newPath1; policyFile.addRolesToGroup(USERGROUP1, uri_role) .addRolesToGroup(USERGROUP2, uri_role) .addPermissionsToRole(uri_role, "server=server1->URI=" + tabDir1); writePolicyFile(policyFile); HiveMetaStoreClient client = context.getMetaStoreClient(ADMIN1); Table tbl1 = createMetastoreTableWithPartition(client, dbName, tabName1, Lists.newArrayList(new FieldSchema("col1", "int", "")), Lists.newArrayList(new FieldSchema("part_col1", "string", ""))); addPartition(client, dbName, tabName1, partVals1, tbl1); tbl1 = client.getTable(dbName, tabName1); addPartition(client, dbName, tabName1, partVals2, tbl1); client.close(); // user with DB and URI privileges should be able to alter partition set location client = context.getMetaStoreClient(USER1_1); Partition newPartition = client.getPartition(dbName, tabName1, partVals1); newPartition.getSd().setLocation(tabDir1); client.alter_partition(dbName, tabName1, newPartition); client.close(); // user with Table and URI privileges should be able to alter partition set location client = context.getMetaStoreClient(USER2_1); newPartition = client.getPartition(dbName, tabName1, partVals2); newPartition.getSd().setLocation(tabDir1); client.alter_partition(dbName, tabName1, newPartition); client.close(); policyFile.addRolesToGroup(USERGROUP3, db_all_role); writePolicyFile(policyFile); // user without URI privileges should not be able to alter partition set location client = context.getMetaStoreClient(USER3_1); newPartition = client.getPartition(dbName, tabName1, partVals2); newPartition.getSd().setLocation(tabDir1); try { client.alter_partition(dbName, tabName1, newPartition); fail("alter partition with location should have failed"); } catch (MetaException e) { Context.verifyMetastoreAuthException(e); } client.close(); }